feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,67 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_DEFORMABLE_SURFACE_MATERIAL_CORE_H
#define PXS_DEFORMABLE_SURFACE_MATERIAL_CORE_H
#include "PxDeformableSurfaceMaterial.h"
#include "PxsMaterialShared.h"
namespace physx
{
PX_ALIGN_PREFIX(16)
struct PxsDeformableSurfaceMaterialData
{
PxReal youngs; //4
PxReal poissons; //8
PxReal dynamicFriction; //12
PxReal thickness; //16
PxReal bendingStiffness; //20
PxReal elasticityDamping; //24
PxReal bendingDamping; //28
PxReal padding[1]; //32, 4 bytes padding to make the total size 32 bytes
PX_CUDA_CALLABLE PxsDeformableSurfaceMaterialData()
: youngs(1.e+6f)
, poissons(0.45f)
, dynamicFriction(0.0f)
, thickness(0.0f)
, bendingStiffness(0.0f)
, elasticityDamping(0.0f)
, bendingDamping(0.0f)
{}
PxsDeformableSurfaceMaterialData(const PxEMPTY) {}
}
PX_ALIGN_SUFFIX(16);
typedef MaterialCoreT<PxsDeformableSurfaceMaterialData, PxDeformableSurfaceMaterial> PxsDeformableSurfaceMaterialCore;
} //namespace phyxs
#endif // PXS_DEFORMABLE_SURFACE_MATERIAL_CORE_H

View File

@@ -0,0 +1,82 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_DEFORMABLE_VOLUME_MATERIAL_CORE_H
#define PXS_DEFORMABLE_VOLUME_MATERIAL_CORE_H
#include "PxDeformableVolumeMaterial.h"
#include "PxsMaterialShared.h"
namespace physx
{
PX_FORCE_INLINE PX_CUDA_CALLABLE PxU16 toUniformU16(PxReal f)
{
f = PxClamp(f, 0.0f, 1.0f);
return PxU16(f * 65535.0f);
}
PX_FORCE_INLINE PX_CUDA_CALLABLE PxReal toUniformReal(PxU16 v)
{
return PxReal(v) * (1.0f / 65535.0f);
}
PX_ALIGN_PREFIX(16) struct PxsDeformableVolumeMaterialData
{
PxReal youngs; //4
PxReal poissons; //8
PxReal dynamicFriction; //12
PxReal elasticityDamping; // 16
PxU16 dampingScale; //20, known to be in the range of 0...1. Mapped to integer range 0...65535
PxU16 materialModel; //22
PxReal deformThreshold; //24
PxReal deformLowLimitRatio; //28
PxReal deformHighLimitRatio; //32
PX_CUDA_CALLABLE PxsDeformableVolumeMaterialData() :
youngs (1.e+6f),
poissons (0.45f),
dynamicFriction (0.0f),
elasticityDamping (0.0f),
//dampingScale (0),
materialModel (PxDeformableVolumeMaterialModel::eCO_ROTATIONAL),
deformThreshold (PX_MAX_F32),
deformLowLimitRatio (1.0f),
deformHighLimitRatio(1.0f)
{}
PxsDeformableVolumeMaterialData(const PxEMPTY) {}
}PX_ALIGN_SUFFIX(16);
typedef MaterialCoreT<PxsDeformableVolumeMaterialData, PxDeformableVolumeMaterial> PxsDeformableVolumeMaterialCore;
} //namespace phyxs
#endif // PXS_DEFORMABLE_VOLUME_MATERIAL_CORE_H

View File

@@ -0,0 +1,74 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MATERIAL_CORE_H
#define PXS_MATERIAL_CORE_H
#include "PxMaterial.h"
#include "foundation/PxUtilities.h"
#include "PxsMaterialShared.h"
namespace physx
{
struct PxsMaterialData
{
PxReal dynamicFriction;
PxReal staticFriction;
PxReal restitution;
PxReal damping;
PxMaterialFlags flags;
PxU8 fricCombineMode; // PxCombineMode::Enum
PxU8 restCombineMode; // PxCombineMode::Enum
PxU8 dampingCombineMode; // PxCombineMode::Enum
PxsMaterialData() :
dynamicFriction (0.0f),
staticFriction (0.0f),
restitution (0.0f),
damping (0.0f),
fricCombineMode (PxCombineMode::eAVERAGE),
restCombineMode (PxCombineMode::eAVERAGE),
dampingCombineMode(PxCombineMode::eAVERAGE)
{}
PxsMaterialData(const PxEMPTY) {}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxCombineMode::Enum getFrictionCombineMode() const { return PxCombineMode::Enum(fricCombineMode); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxCombineMode::Enum getRestitutionCombineMode() const { return PxCombineMode::Enum(restCombineMode); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxCombineMode::Enum getDampingCombineMode() const { return PxCombineMode::Enum(dampingCombineMode); }
PX_FORCE_INLINE void setFrictionCombineMode(PxCombineMode::Enum combineMode) { fricCombineMode = PxTo8(combineMode); }
PX_FORCE_INLINE void setRestitutionCombineMode(PxCombineMode::Enum combineMode) { restCombineMode = PxTo8(combineMode); }
PX_FORCE_INLINE void setDampingCombineMode(PxCombineMode::Enum combineMode) { dampingCombineMode = PxTo8(combineMode); }
};
typedef MaterialCoreT<PxsMaterialData, PxMaterial> PxsMaterialCore;
} //namespace phyxs
#endif

View File

@@ -0,0 +1,169 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MATERIAL_MANAGER_H
#define PXS_MATERIAL_MANAGER_H
#include "PxsMaterialCore.h"
#include "PxsDeformableSurfaceMaterialCore.h"
#include "PxsDeformableVolumeMaterialCore.h"
#include "PxsPBDMaterialCore.h"
#include "foundation/PxAlignedMalloc.h"
namespace physx
{
struct PxsMaterialInfo
{
PxU16 mMaterialIndex0;
PxU16 mMaterialIndex1;
};
template<class MaterialCore>
class PxsMaterialManagerT
{
public:
PxsMaterialManagerT()
{
const PxU32 matCount = 128;
materials = reinterpret_cast<MaterialCore*>(physx::PxAlignedAllocator<16>().allocate(sizeof(MaterialCore)*matCount, PX_FL));
maxMaterials = matCount;
for(PxU32 i=0; i<matCount; ++i)
{
materials[i].mMaterialIndex = MATERIAL_INVALID_HANDLE;
}
}
~PxsMaterialManagerT()
{
physx::PxAlignedAllocator<16>().deallocate(materials);
}
void setMaterial(MaterialCore* mat)
{
const PxU16 materialIndex = mat->mMaterialIndex;
resize(PxU32(materialIndex) + 1);
materials[materialIndex] = *mat;
}
void updateMaterial(MaterialCore* mat)
{
materials[mat->mMaterialIndex] =*mat;
}
void removeMaterial(MaterialCore* mat)
{
mat->mMaterialIndex = MATERIAL_INVALID_HANDLE;
}
PX_FORCE_INLINE MaterialCore* getMaterial(const PxU32 index)const
{
PX_ASSERT(index < maxMaterials);
return &materials[index];
}
PxU32 getMaxSize()const
{
return maxMaterials;
}
void resize(PxU32 minValueForMax)
{
if(maxMaterials>=minValueForMax)
return;
const PxU32 numMaterials = maxMaterials;
maxMaterials = (minValueForMax+31)&~31;
MaterialCore* mat = reinterpret_cast<MaterialCore*>(physx::PxAlignedAllocator<16>().allocate(sizeof(MaterialCore)*maxMaterials, PX_FL));
for(PxU32 i=0; i<numMaterials; ++i)
mat[i] = materials[i];
for(PxU32 i = numMaterials; i < maxMaterials; ++i)
mat[i].mMaterialIndex = MATERIAL_INVALID_HANDLE;
physx::PxAlignedAllocator<16>().deallocate(materials);
materials = mat;
}
MaterialCore* materials;//make sure materials's start address is 16 bytes align
PxU32 maxMaterials;
PxU32 mPad;
#if !PX_P64_FAMILY
PxU32 mPad2;
#endif
};
//This class is used for forward declaration
class PxsMaterialManager : public PxsMaterialManagerT<PxsMaterialCore>
{
};
class PxsDeformableSurfaceMaterialManager : public PxsMaterialManagerT<PxsDeformableSurfaceMaterialCore>
{
};
class PxsDeformableVolumeMaterialManager : public PxsMaterialManagerT<PxsDeformableVolumeMaterialCore>
{
};
class PxsPBDMaterialManager : public PxsMaterialManagerT<PxsPBDMaterialCore>
{
};
template<class MaterialCore>
class PxsMaterialManagerIterator
{
public:
PxsMaterialManagerIterator(PxsMaterialManagerT<MaterialCore>& manager) : mManager(manager), mIndex(0)
{
}
bool getNextMaterial(MaterialCore*& materialCore)
{
const PxU32 maxSize = mManager.getMaxSize();
PxU32 index = mIndex;
while(index < maxSize && mManager.getMaterial(index)->mMaterialIndex == MATERIAL_INVALID_HANDLE)
index++;
materialCore = NULL;
if(index < maxSize)
materialCore = mManager.getMaterial(index++);
mIndex = index;
return materialCore!=NULL;
}
private:
PxsMaterialManagerIterator& operator=(const PxsMaterialManagerIterator&);
PxsMaterialManagerT<MaterialCore>& mManager;
PxU32 mIndex;
};
}
#endif

View File

@@ -0,0 +1,56 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MATERIAL_SHARED_H
#define PXS_MATERIAL_SHARED_H
#include "foundation/PxSimpleTypes.h"
namespace physx
{
#define MATERIAL_INVALID_HANDLE 0xffff
class PxOutputStream;
template<class MaterialDataT, class PxMaterialT>
class MaterialCoreT : public MaterialDataT
{
public:
MaterialCoreT(const MaterialDataT& desc) : MaterialDataT(desc), mMaterial(NULL), mMaterialIndex(MATERIAL_INVALID_HANDLE) {}
MaterialCoreT() : mMaterial(NULL), mMaterialIndex(MATERIAL_INVALID_HANDLE) {}
MaterialCoreT(const PxEMPTY) : MaterialDataT(PxEmpty) {}
~MaterialCoreT() {}
PxMaterialT* mMaterial; // PT: TODO: eventually this could just be a base PxBaseMaterial class instead of a templated param
PxU16 mMaterialIndex; //handle assign by the handle manager
};
} //namespace phyxs
#endif

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_PBD_MATERIAL_CORE_H
#define PXS_PBD_MATERIAL_CORE_H
#include "PxParticleGpu.h"
#include "PxsMaterialShared.h"
namespace physx
{
struct PxsPBDMaterialData : public PxsParticleMaterialData
{
PxsPBDMaterialData() {} // PT: TODO: ctor leaves things uninitialized, is that by design?
PxsPBDMaterialData(const PxEMPTY) {}
PxU32 flags; //24
PxReal viscosity; //28
PxReal vorticityConfinement; //32
PxReal surfaceTension; //36
PxReal cohesion; //40
PxReal lift; //44
PxReal drag; //48
PxReal cflCoefficient; //52
PxReal particleFrictionScale; //56
PxReal particleAdhesionScale; //60
};
typedef MaterialCoreT<PxsPBDMaterialData, PxPBDMaterial> PxsPBDMaterialCore;
} //namespace phyxs
#endif

View File

@@ -0,0 +1,44 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_CONFIG_H
#define PXV_CONFIG_H
/*! \file internal top level include file for lowlevel. */
#include "PxPhysXConfig.h"
/************************************************************************/
/* Compiler workarounds */
/************************************************************************/
#if PX_VC
#pragma warning(disable: 4355 ) // "this" used in base member initializer list
#pragma warning(disable: 4146 ) // unary minus operator applied to unsigned type.
#endif
#endif

View File

@@ -0,0 +1,172 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_DYNAMICS_H
#define PXV_DYNAMICS_H
#include "foundation/PxVec3.h"
#include "foundation/PxQuat.h"
#include "foundation/PxTransform.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxIntrinsics.h"
#include "PxRigidDynamic.h"
namespace physx
{
/*!
\file
Dynamics interface.
*/
struct PxsRigidCore
{
PxsRigidCore() : mFlags(0), solverIterationCounts(0) {}
PxsRigidCore(const PxEMPTY) : mFlags(PxEmpty) {}
PX_ALIGN_PREFIX(16)
PxTransform body2World PX_ALIGN_SUFFIX(16);
PxRigidBodyFlags mFlags; // API body flags
PxU16 solverIterationCounts; // vel iters are in low word and pos iters in high word.
PX_FORCE_INLINE PxU32 isKinematic() const { return mFlags & PxRigidBodyFlag::eKINEMATIC; }
PX_FORCE_INLINE PxU32 hasCCD() const { return mFlags & PxRigidBodyFlag::eENABLE_CCD; }
PX_FORCE_INLINE PxU32 hasCCDFriction() const { return mFlags & PxRigidBodyFlag::eENABLE_CCD_FRICTION; }
PX_FORCE_INLINE PxU32 hasIdtBody2Actor() const { return mFlags & PxRigidBodyFlag::eRESERVED; }
};
PX_COMPILE_TIME_ASSERT(sizeof(PxsRigidCore) == 32);
#define PXV_CONTACT_REPORT_DISABLED PX_MAX_F32
struct PxsBodyCore : public PxsRigidCore
{
PxsBodyCore() : PxsRigidCore() { fixedBaseLink = PxU8(0); }
PxsBodyCore(const PxEMPTY) : PxsRigidCore(PxEmpty) {}
PX_FORCE_INLINE const PxTransform& getBody2Actor() const { return body2Actor; }
PX_FORCE_INLINE void setBody2Actor(const PxTransform& t)
{
if(t.p.isZero() && t.q.isIdentity())
mFlags.raise(PxRigidBodyFlag::eRESERVED);
else
mFlags.clear(PxRigidBodyFlag::eRESERVED);
body2Actor = t;
}
protected:
PxTransform body2Actor;
public:
PxReal ccdAdvanceCoefficient; //64
PxVec3 linearVelocity;
PxReal maxPenBias;
PxVec3 angularVelocity;
PxReal contactReportThreshold; //96
PxReal maxAngularVelocitySq;
PxReal maxLinearVelocitySq;
PxReal linearDamping;
PxReal angularDamping; //112
PxVec3 inverseInertia;
PxReal inverseMass; //128
PxReal maxContactImpulse;
PxReal sleepThreshold;
union
{
PxReal freezeThreshold;
PxReal cfmScale;
};
PxReal wakeCounter; //144 this is authoritative wakeCounter
PxReal solverWakeCounter; //this is calculated by the solver when it performs sleepCheck. It is committed to wakeCounter in ScAfterIntegrationTask if the body is still awake.
PxU32 numCountedInteractions;
PxReal offsetSlop; //Slop value used to snap contact line of action back in-line with the COM
PxU8 isFastMoving; //This could be a single bit but it's a u8 at the moment for simplicity's sake
PxU8 disableGravity; //This could be a single bit but it's a u8 at the moment for simplicity's sake
PxRigidDynamicLockFlags lockFlags; //This is u8.
PxU8 fixedBaseLink; //160 This indicates whether the articulation link has PxArticulationFlag::eFIX_BASE. All fits into 16 byte alignment
// PT: moved from Sc::BodyCore ctor - we don't want to duplicate all this in immediate mode
PX_FORCE_INLINE void init( const PxTransform& bodyPose,
const PxVec3& inverseInertia_, PxReal inverseMass_,
PxReal wakeCounter_, PxReal scaleSpeed,
PxReal linearDamping_, PxReal angularDamping_,
PxReal maxLinearVelocitySq_, PxReal maxAngularVelocitySq_,
PxActorType::Enum type)
{
PX_ASSERT(bodyPose.p.isFinite());
PX_ASSERT(bodyPose.q.isFinite());
// PT: TODO: unify naming convention
// From PxsRigidCore
body2World = bodyPose;
mFlags = PxRigidBodyFlags();
solverIterationCounts = (1 << 8) | 4;
setBody2Actor(PxTransform(PxIdentity));
ccdAdvanceCoefficient = 0.15f;
linearVelocity = PxVec3(0.0f);
maxPenBias = -1e32f;//-PX_MAX_F32;
angularVelocity = PxVec3(0.0f);
contactReportThreshold = PXV_CONTACT_REPORT_DISABLED;
maxAngularVelocitySq = maxAngularVelocitySq_;
maxLinearVelocitySq = maxLinearVelocitySq_;
linearDamping = linearDamping_;
angularDamping = angularDamping_;
inverseInertia = inverseInertia_;
inverseMass = inverseMass_;
maxContactImpulse = 1e32f;// PX_MAX_F32;
sleepThreshold = 5e-5f * scaleSpeed * scaleSpeed;
if(type == PxActorType::eARTICULATION_LINK)
cfmScale = 0.025f;
else
freezeThreshold = 2.5e-5f * scaleSpeed * scaleSpeed;
wakeCounter = wakeCounter_;
offsetSlop = 0.f;
// PT: this one is not initialized?
//solverWakeCounter
// PT: these are initialized in BodySim ctor
//numCountedInteractions;
//numBodyInteractions;
isFastMoving = false;
disableGravity = false;
lockFlags = PxRigidDynamicLockFlags(0);
}
};
PX_COMPILE_TIME_ASSERT(sizeof(PxsBodyCore) == 160);
}
#endif

View File

@@ -0,0 +1,260 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_GEOMETRY_H
#define PXV_GEOMETRY_H
#include "foundation/PxTransform.h"
#include "PxvConfig.h"
/*!
\file
Geometry interface
*/
/************************************************************************/
/* Shapes */
/************************************************************************/
#include "GuGeometryChecks.h"
#include "CmUtils.h"
namespace physx
{
//
// Summary of our material approach:
//
// On the API level, materials are accessed via pointer. Internally we store indices into the material table.
// The material table is stored in the SDK and the materials are shared among scenes. To make this threadsafe,
// we have the following approach:
//
// - Every scene has a copy of the SDK master material table
// - At the beginning of a simulation step, the scene material table gets synced to the master material table.
// - While the simulation is running, the scene table does not get touched.
// - Each shape stores the indices of its material(s). When the simulation is not running and a user requests the
// materials of the shape, the indices are used to fetch the material from the master material table. When the
// the simulation is running then the same indices are used internally to fetch the materials from the scene
// material table.
// - This whole scheme only works as long as the position of a material in the material table does not change
// when other materials get deleted/inserted. The data structure of the material table makes sure that is the case.
//
struct MaterialIndicesStruct
{
// PX_SERIALIZATION
MaterialIndicesStruct(const PxEMPTY) {}
//~PX_SERIALIZATION
MaterialIndicesStruct()
: indices(NULL)
, numIndices(0)
, pad(PX_PADDING_16)
, gpuRemapId(0)
{
}
~MaterialIndicesStruct()
{
}
void allocate(PxU16 size)
{
indices = PX_ALLOCATE(PxU16, size, "MaterialIndicesStruct::allocate");
numIndices = size;
}
void deallocate()
{
PX_FREE(indices);
numIndices = 0;
}
PxU16* indices; // the remap table for material index
PxU16 numIndices; // the size of the remap table
PxU16 pad; // pad for serialization
PxU32 gpuRemapId; // PT: using padding bytes on x64
};
struct PxConvexMeshGeometryLL: public PxConvexMeshGeometry
{
bool gpuCompatible; // PT: TODO: remove?
};
struct PxTriangleMeshGeometryLL: public PxTriangleMeshGeometry
{
MaterialIndicesStruct materialsLL;
};
struct PxParticleSystemGeometryLL : public PxParticleSystemGeometry
{
MaterialIndicesStruct materialsLL;
};
struct PxTetrahedronMeshGeometryLL : public PxTetrahedronMeshGeometry
{
MaterialIndicesStruct materialsLL;
};
struct PxHeightFieldGeometryLL : public PxHeightFieldGeometry
{
MaterialIndicesStruct materialsLL;
};
template <> struct PxcGeometryTraits<PxParticleSystemGeometryLL> { enum { TypeID = PxGeometryType::ePARTICLESYSTEM}; };
template <> struct PxcGeometryTraits<PxConvexMeshGeometryLL> { enum { TypeID = PxGeometryType::eCONVEXMESH }; };
template <> struct PxcGeometryTraits<PxTriangleMeshGeometryLL> { enum { TypeID = PxGeometryType::eTRIANGLEMESH }; };
template <> struct PxcGeometryTraits<PxTetrahedronMeshGeometryLL> { enum { TypeID = PxGeometryType::eTETRAHEDRONMESH }; };
template <> struct PxcGeometryTraits<PxHeightFieldGeometryLL> { enum { TypeID = PxGeometryType::eHEIGHTFIELD }; };
class InvalidGeometry : public PxGeometry
{
public:
PX_CUDA_CALLABLE PX_FORCE_INLINE InvalidGeometry() : PxGeometry(PxGeometryType::eINVALID) {}
};
class GeometryUnion
{
public:
// PX_SERIALIZATION
GeometryUnion(const PxEMPTY) {}
//~PX_SERIALIZATION
PX_CUDA_CALLABLE PX_FORCE_INLINE GeometryUnion() { reinterpret_cast<InvalidGeometry&>(mGeometry) = InvalidGeometry(); }
PX_CUDA_CALLABLE PX_FORCE_INLINE GeometryUnion(const PxGeometry& g) { set(g); }
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxGeometry& getGeometry() const { return reinterpret_cast<const PxGeometry&>(mGeometry); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxGeometryType::Enum getType() const { return reinterpret_cast<const PxGeometry&>(mGeometry).getType(); }
PX_CUDA_CALLABLE void set(const PxGeometry& g);
template<class Geom> PX_CUDA_CALLABLE PX_FORCE_INLINE Geom& get()
{
checkType<Geom>(getGeometry());
return reinterpret_cast<Geom&>(mGeometry);
}
template<class Geom> PX_CUDA_CALLABLE PX_FORCE_INLINE const Geom& get() const
{
checkType<Geom>(getGeometry());
return reinterpret_cast<const Geom&>(mGeometry);
}
private:
union {
void* alignment; // PT: Makes sure the class is at least aligned to pointer size. See DE6803.
PxU8 box[sizeof(PxBoxGeometry)];
PxU8 sphere[sizeof(PxSphereGeometry)];
PxU8 capsule[sizeof(PxCapsuleGeometry)];
PxU8 plane[sizeof(PxPlaneGeometry)];
PxU8 convexCore[sizeof(PxConvexCoreGeometry)];
PxU8 convex[sizeof(PxConvexMeshGeometryLL)];
PxU8 particleSystem[sizeof(PxParticleSystemGeometryLL)];
PxU8 mesh[sizeof(PxTriangleMeshGeometryLL)];
PxU8 tetMesh[sizeof(PxTetrahedronMeshGeometryLL)];
PxU8 heightfield[sizeof(PxHeightFieldGeometryLL)];
PxU8 custom[sizeof(PxCustomGeometry)];
PxU8 invalid[sizeof(InvalidGeometry)];
} mGeometry;
};
struct PxShapeCoreFlag
{
enum Enum
{
eOWNS_MATERIAL_IDX_MEMORY = (1<<0), // PT: for de-serialization to avoid deallocating material index list. Moved there from Sc::ShapeCore (since one byte was free).
eIS_EXCLUSIVE = (1<<1), // PT: shape's exclusive flag
eIDT_TRANSFORM = (1<<2), // PT: true if PxsShapeCore::transform is identity
eDEFORMABLE_SURFACE_SHAPE = (1<<3), // True if this shape is a deformable surface shape
eDEFORMABLE_VOLUME_SHAPE = (1<<4) // True if this shape is a deformable volume shape
};
};
typedef PxFlags<PxShapeCoreFlag::Enum,PxU8> PxShapeCoreFlags;
PX_FLAGS_OPERATORS(PxShapeCoreFlag::Enum,PxU8)
struct PxsShapeCore
{
PxsShapeCore()
{
setDensityForFluid(800.0f);
}
// PX_SERIALIZATION
PxsShapeCore(const PxEMPTY) : mShapeCoreFlags(PxEmpty), mGeometry(PxEmpty) {}
//~PX_SERIALIZATION
#if PX_WINDOWS_FAMILY // PT: to avoid "error: offset of on non-standard-layout type" on Linux
protected:
#endif
PX_ALIGN_PREFIX(16)
PxTransform mTransform PX_ALIGN_SUFFIX(16); // PT: Offset 0
#if PX_WINDOWS_FAMILY // PT: to avoid "error: offset of on non-standard-layout type" on Linux
public:
#endif
PX_FORCE_INLINE const PxTransform& getTransform() const
{
return mTransform;
}
PX_FORCE_INLINE void setTransform(const PxTransform& t)
{
mTransform = t;
if(t.p.isZero() && t.q.isIdentity())
mShapeCoreFlags.raise(PxShapeCoreFlag::eIDT_TRANSFORM);
else
mShapeCoreFlags.clear(PxShapeCoreFlag::eIDT_TRANSFORM);
}
PxReal mContactOffset; // PT: Offset 28
PxU8 mShapeFlags; // PT: Offset 32 !< API shape flags // PT: TODO: use PxShapeFlags here. Needs to move flags to separate file.
PxShapeCoreFlags mShapeCoreFlags; // PT: Offset 33
PxU16 mMaterialIndex; // PT: Offset 34
PxReal mRestOffset; // PT: Offset 36 - same as the API property of the same name - PT: moved from Sc::ShapeCore to fill padding bytes
GeometryUnion mGeometry; // PT: Offset 40
PxReal mTorsionalRadius; // PT: Offset 104 - PT: moved from Sc::ShapeCore to fill padding bytes
PxReal mMinTorsionalPatchRadius; // PT: Offset 108 - PT: moved from Sc::ShapeCore to fill padding bytes
PX_FORCE_INLINE float getDensityForFluid() const
{
return mGeometry.getGeometry().mTypePadding;
}
PX_FORCE_INLINE void setDensityForFluid(float density)
{
const_cast<PxGeometry&>(mGeometry.getGeometry()).mTypePadding = density;
}
};
PX_COMPILE_TIME_ASSERT( sizeof(GeometryUnion) <= 64); // PT: if you break this one I will not be happy
PX_COMPILE_TIME_ASSERT( (sizeof(PxsShapeCore)&0xf) == 0);
}
#endif

View File

@@ -0,0 +1,107 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_GLOBALS_H
#define PXV_GLOBALS_H
#include "PxvConfig.h"
#include "foundation/PxBasicTemplates.h"
namespace physx
{
/*!
\file
PhysX Low-level, Memory management
*/
/************************************************************************/
/* Error Handling */
/************************************************************************/
enum PxvErrorCode
{
PXD_ERROR_NO_ERROR = 0,
PXD_ERROR_INVALID_PARAMETER,
PXD_ERROR_INVALID_PARAMETER_SIZE,
PXD_ERROR_INTERNAL_ERROR,
PXD_ERROR_NOT_IMPLEMENTED,
PXD_ERROR_NO_CONTEXT,
PXD_ERROR_NO_TASK_MANAGER,
PXD_ERROR_WARNING
};
class PxShape;
class PxRigidActor;
struct PxsShapeCore;
struct PxsRigidCore;
struct PxvOffsetTable
{
PX_FORCE_INLINE PxvOffsetTable() {}
PX_FORCE_INLINE const PxShape* convertPxsShape2Px(const PxsShapeCore* pxs) const
{
return PxPointerOffset<const PxShape*>(pxs, pxsShapeCore2PxShape);
}
PX_FORCE_INLINE const PxRigidActor* convertPxsRigidCore2PxRigidBody(const PxsRigidCore* pxs) const
{
return PxPointerOffset<const PxRigidActor*>(pxs, pxsRigidCore2PxRigidBody);
}
PX_FORCE_INLINE const PxRigidActor* convertPxsRigidCore2PxRigidStatic(const PxsRigidCore* pxs) const
{
return PxPointerOffset<const PxRigidActor*>(pxs, pxsRigidCore2PxRigidStatic);
}
ptrdiff_t pxsShapeCore2PxShape;
ptrdiff_t pxsRigidCore2PxRigidBody;
ptrdiff_t pxsRigidCore2PxRigidStatic;
};
extern PxvOffsetTable gPxvOffsetTable;
/*!
Initialize low-level implementation.
*/
void PxvInit(const PxvOffsetTable& offsetTable);
/*!
Shut down low-level implementation.
*/
void PxvTerm();
#if PX_SUPPORT_GPU_PHYSX
class PxPhysXGpu* PxvGetPhysXGpu(bool createIfNeeded);
void PxvReleasePhysXGpu(PxPhysXGpu*);
#endif
}
#endif

View File

@@ -0,0 +1,214 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_MANAGER_H
#define PXV_MANAGER_H
#include "foundation/PxVec3.h"
#include "foundation/PxQuat.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMemory.h"
#include "PxvConfig.h"
#include "PxvGeometry.h"
namespace physx
{
/*!
\file
Manager interface
*/
/************************************************************************/
/* Managers */
/************************************************************************/
class PxsContactManager;
struct PxsRigidCore;
struct PxsShapeCore;
class PxsRigidBody;
/*!
Type of PXD_MANAGER_CCD_MODE property
*/
enum PxvContactManagerCCDMode
{
PXD_MANAGER_CCD_NONE,
PXD_MANAGER_CCD_LINEAR
};
/*!
Manager descriptor
*/
struct PxvManagerDescRigidRigid
{
/*!
Manager user data
\sa PXD_MANAGER_USER_DATA
*/
//void* userData;
/*!
Dominance setting for one way interactions.
A dominance of 0 means the corresp. body will
not be pushable by the other body in the constraint.
\sa PXD_MANAGER_DOMINANCE0
*/
PxU8 dominance0;
/*!
Dominance setting for one way interactions.
A dominance of 0 means the corresp. body will
not be pushable by the other body in the constraint.
\sa PXD_MANAGER_DOMINANCE1
*/
PxU8 dominance1;
/*!
PxsRigidBodies
*/
PxsRigidBody* rigidBody0;
PxsRigidBody* rigidBody1;
/*!
Shape Core structures
*/
const PxsShapeCore* shapeCore0;
const PxsShapeCore* shapeCore1;
/*!
Body Core structures
*/
PxsRigidCore* rigidCore0;
PxsRigidCore* rigidCore1;
/*!
Enable contact information reporting.
*/
int reportContactInfo;
/*!
Enable contact impulse threshold reporting.
*/
int hasForceThreshold;
/*!
Enable generated contacts to be changeable
*/
int contactChangeable;
/*!
Disable strong friction
*/
//int disableStrongFriction;
/*!
Contact resolution rest distance.
*/
PxReal restDistance;
/*!
Disable contact response
*/
int disableResponse;
/*!
Disable discrete contact generation
*/
int disableDiscreteContact;
/*!
Disable CCD contact generation
*/
int disableCCDContact;
/*!
Is connected to an articulation (1 - first body, 2 - second body)
*/
int hasArticulations;
/*!
is connected to a dynamic (1 - first body, 2 - second body)
*/
int hasDynamics;
/*!
Is the pair touching? Use when re-creating the manager with prior knowledge about touch status.
positive: pair is touching
0: touch state unknown (this is a new pair)
negative: pair is not touching
Default is 0
*/
int hasTouch;
/*!
Identifies whether body 1 is kinematic. We can treat kinematics as statics and embed velocity into constraint
because kinematic bodies' velocities will not change
*/
bool body1Kinematic;
/*
Index entries into the transform cache for shape 0
*/
PxU32 transformCache0;
/*
Index entries into the transform cache for shape 1
*/
PxU32 transformCache1;
PxvManagerDescRigidRigid()
{
PxMemSet(this, 0, sizeof(PxvManagerDescRigidRigid));
dominance0 = 1u;
dominance1 = 1u;
}
};
/*!
Report struct for contact manager touch reports
*/
struct PxvContactManagerTouchEvent
{
void* userData;
// PT: only useful to search for places where we get/set this specific user data
PX_FORCE_INLINE void setCMTouchEventUserData(void* ud) { userData = ud; }
PX_FORCE_INLINE void* getCMTouchEventUserData() const { return userData; }
};
}
#endif

View File

@@ -0,0 +1,124 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_SIM_STATS_H
#define PXV_SIM_STATS_H
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "foundation/PxSimpleTypes.h"
#include "geometry/PxGeometry.h"
namespace physx
{
/*!
\file
Context handling
*/
/************************************************************************/
/* Context handling, types */
/************************************************************************/
/*!
Description: contains statistics for the simulation.
*/
struct PxvSimStats
{
PxvSimStats() { clearAll(); }
void clearAll() { PxMemZero(this, sizeof(PxvSimStats)); } // set counters to zero
PX_FORCE_INLINE void incCCDPairs(PxGeometryType::Enum g0, PxGeometryType::Enum g1)
{
PX_ASSERT(g0 <= g1); // That's how they should be sorted
mNbCCDPairs[g0][g1]++;
}
PX_FORCE_INLINE void decCCDPairs(PxGeometryType::Enum g0, PxGeometryType::Enum g1)
{
PX_ASSERT(g0 <= g1); // That's how they should be sorted
PX_ASSERT(mNbCCDPairs[g0][g1]);
mNbCCDPairs[g0][g1]--;
}
PX_FORCE_INLINE void incModifiedContactPairs(PxGeometryType::Enum g0, PxGeometryType::Enum g1)
{
PX_ASSERT(g0 <= g1); // That's how they should be sorted
mNbModifiedContactPairs[g0][g1]++;
}
PX_FORCE_INLINE void decModifiedContactPairs(PxGeometryType::Enum g0, PxGeometryType::Enum g1)
{
PX_ASSERT(g0 <= g1); // That's how they should be sorted
PX_ASSERT(mNbModifiedContactPairs[g0][g1]);
mNbModifiedContactPairs[g0][g1]--;
}
// PT: those guys are now persistent and shouldn't be cleared each frame
PxU32 mNbDiscreteContactPairs [PxGeometryType::eGEOMETRY_COUNT][PxGeometryType::eGEOMETRY_COUNT];
PxU32 mNbCCDPairs [PxGeometryType::eGEOMETRY_COUNT][PxGeometryType::eGEOMETRY_COUNT];
PxU32 mNbModifiedContactPairs [PxGeometryType::eGEOMETRY_COUNT][PxGeometryType::eGEOMETRY_COUNT];
PxU32 mNbDiscreteContactPairsTotal; // PT: sum of mNbDiscreteContactPairs, i.e. number of pairs reaching narrow phase
PxU32 mNbDiscreteContactPairsWithCacheHits;
PxU32 mNbDiscreteContactPairsWithContacts;
PxU32 mNbActiveConstraints;
PxU32 mNbActiveDynamicBodies;
PxU32 mNbActiveKinematicBodies;
PxU32 mNbAxisSolverConstraints;
PxU32 mTotalCompressedContactSize;
PxU32 mTotalConstraintSize;
PxU32 mPeakConstraintBlockAllocations;
PxU32 mNbNewPairs;
PxU32 mNbLostPairs;
PxU32 mNbNewTouches;
PxU32 mNbLostTouches;
PxU32 mNbPartitions;
PxU64 mGpuDynamicsTempBufferCapacity;
PxU32 mGpuDynamicsRigidContactCount;
PxU32 mGpuDynamicsRigidPatchCount;
PxU32 mGpuDynamicsFoundLostPairs;
PxU32 mGpuDynamicsFoundLostAggregatePairs;
PxU32 mGpuDynamicsTotalAggregatePairs;
PxU32 mGpuDynamicsDeformableSurfaceContacts;
PxU32 mGpuDynamicsDeformableVolumeContacts;
PxU32 mGpuDynamicsParticleContacts; // not implemented
PxU32 mGpuDynamicsCollisionStackSize;
};
}
#endif

View File

@@ -0,0 +1,102 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxvGlobals.h"
#include "PxsContext.h"
#include "PxcContactMethodImpl.h"
#include "GuContactMethodImpl.h"
#if PX_SUPPORT_GPU_PHYSX
#include "PxPhysXGpu.h"
static physx::PxPhysXGpu* gPxPhysXGpu = NULL;
#endif
namespace physx
{
PxvOffsetTable gPxvOffsetTable;
void PxvInit(const PxvOffsetTable& offsetTable)
{
#if PX_SUPPORT_GPU_PHYSX
gPxPhysXGpu = NULL;
#endif
gPxvOffsetTable = offsetTable;
}
void PxvTerm()
{
#if PX_SUPPORT_GPU_PHYSX
PX_RELEASE(gPxPhysXGpu);
#endif
}
}
#if PX_SUPPORT_GPU_PHYSX
namespace physx
{
//forward declare stuff from PxPhysXGpuModuleLoader.cpp
void PxLoadPhysxGPUModule(const char* appGUID);
void PxUnloadPhysxGPUModule();
typedef physx::PxPhysXGpu* (PxCreatePhysXGpu_FUNC)();
extern PxCreatePhysXGpu_FUNC* g_PxCreatePhysXGpu_Func;
PxPhysXGpu* PxvGetPhysXGpu(bool createIfNeeded)
{
if (!gPxPhysXGpu && createIfNeeded)
{
#ifdef PX_PHYSX_GPU_STATIC
gPxPhysXGpu = PxCreatePhysXGpu();
#else
PxLoadPhysxGPUModule(NULL);
if (g_PxCreatePhysXGpu_Func)
{
gPxPhysXGpu = g_PxCreatePhysXGpu_Func();
}
#endif
}
return gPxPhysXGpu;
}
// PT: added for the standalone GPU BP but we may want to revisit this
void PxvReleasePhysXGpu(PxPhysXGpu* gpu)
{
PX_ASSERT(gpu==gPxPhysXGpu);
PxUnloadPhysxGPUModule();
PX_RELEASE(gpu);
gPxPhysXGpu = NULL;
}
}
#endif
#include "PxsDeformableSurfaceMaterialCore.h"
#include "PxsDeformableVolumeMaterialCore.h"
#include "PxsPBDMaterialCore.h"
#include "PxsMaterialCore.h"

View File

@@ -0,0 +1,50 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_CONTACT_METHOD_IMPL_H
#define PXC_CONTACT_METHOD_IMPL_H
#include "GuContactMethodImpl.h"
namespace physx
{
/*!
Method prototype for contact generation routines
*/
typedef bool (*PxcContactMethod) (GU_CONTACT_METHOD_ARGS);
// Matrix of types
extern PxcContactMethod g_ContactMethodTable[][PxGeometryType::eGEOMETRY_COUNT];
extern const bool g_CanUseContactCache[][PxGeometryType::eGEOMETRY_COUNT];
extern PxcContactMethod g_PCMContactMethodTable[][PxGeometryType::eGEOMETRY_COUNT];
extern const bool gEnablePCMCaching[][PxGeometryType::eGEOMETRY_COUNT];
}
#endif

View File

@@ -0,0 +1,154 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_CONSTRAINT_BLOCK_POOL_H
#define PXC_CONSTRAINT_BLOCK_POOL_H
#include "PxvConfig.h"
#include "foundation/PxArray.h"
#include "foundation/PxMutex.h"
#include "PxcNpMemBlockPool.h"
namespace physx
{
class PxsConstraintBlockManager
{
public:
PxsConstraintBlockManager(PxcNpMemBlockPool & blockPool):
mBlockPool(blockPool)
{
}
PX_FORCE_INLINE void reset()
{
mBlockPool.releaseConstraintBlocks(mTrackingArray);
}
PxcNpMemBlockArray mTrackingArray;
PxcNpMemBlockPool& mBlockPool;
private:
PxsConstraintBlockManager& operator=(const PxsConstraintBlockManager&);
};
class PxcConstraintBlockStream
{
PX_NOCOPY(PxcConstraintBlockStream)
public:
PxcConstraintBlockStream(PxcNpMemBlockPool & blockPool) :
mBlockPool (blockPool),
mBlock (NULL),
mUsed (0)
{
}
PX_FORCE_INLINE PxU8* reserve(PxU32 size, PxsConstraintBlockManager& manager)
{
size = (size+15)&~15;
if(size>PxcNpMemBlock::SIZE)
return mBlockPool.acquireExceptionalConstraintMemory(size);
if(mBlock == NULL || size+mUsed>PxcNpMemBlock::SIZE)
{
mBlock = mBlockPool.acquireConstraintBlock(manager.mTrackingArray);
PX_ASSERT(0==mBlock || mBlock->data == reinterpret_cast<PxU8*>(mBlock));
mUsed = size;
return reinterpret_cast<PxU8*>(mBlock);
}
PX_ASSERT(mBlock && mBlock->data == reinterpret_cast<PxU8*>(mBlock));
PxU8* PX_RESTRICT result = mBlock->data+mUsed;
mUsed += size;
return result;
}
PX_FORCE_INLINE void reset()
{
mBlock = NULL;
mUsed = 0;
}
PX_FORCE_INLINE PxcNpMemBlockPool& getMemBlockPool() { return mBlockPool; }
private:
PxcNpMemBlockPool& mBlockPool;
PxcNpMemBlock* mBlock; // current constraint block
PxU32 mUsed; // number of bytes used in constraint block
//Tracking peak allocations
PxU32 mPeakUsed;
};
class PxcContactBlockStream
{
PX_NOCOPY(PxcContactBlockStream)
public:
PxcContactBlockStream(PxcNpMemBlockPool & blockPool):
mBlockPool(blockPool),
mBlock(NULL),
mUsed(0)
{
}
PX_FORCE_INLINE PxU8* reserve(PxU32 size)
{
size = (size+15)&~15;
if(size>PxcNpMemBlock::SIZE)
return mBlockPool.acquireExceptionalConstraintMemory(size);
PX_ASSERT(size <= PxcNpMemBlock::SIZE);
if(mBlock == NULL || size+mUsed>PxcNpMemBlock::SIZE)
{
mBlock = mBlockPool.acquireContactBlock();
PX_ASSERT(0==mBlock || mBlock->data == reinterpret_cast<PxU8*>(mBlock));
mUsed = size;
return reinterpret_cast<PxU8*>(mBlock);
}
PX_ASSERT(mBlock && mBlock->data == reinterpret_cast<PxU8*>(mBlock));
PxU8* PX_RESTRICT result = mBlock->data+mUsed;
mUsed += size;
return result;
}
PX_FORCE_INLINE void reset()
{
mBlock = NULL;
mUsed = 0;
}
PX_FORCE_INLINE PxcNpMemBlockPool& getMemBlockPool() { return mBlockPool; }
private:
PxcNpMemBlockPool& mBlockPool;
PxcNpMemBlock* mBlock; // current constraint block
PxU32 mUsed; // number of bytes used in constraint block
};
}
#endif

View File

@@ -0,0 +1,65 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_CONTACT_CACHE_H
#define PXC_CONTACT_CACHE_H
#include "foundation/PxTransform.h"
#include "PxvConfig.h"
#include "PxcContactMethodImpl.h"
namespace physx
{
class PxcNpThreadContext;
bool PxcCacheLocalContacts( PxcNpThreadContext& context, Gu::Cache& pairContactCache,
const PxTransform32& tm0, const PxTransform32& tm1,
const PxcContactMethod conMethod,
const PxGeometry& shape0, const PxGeometry& shape1);
struct PxcLocalContactsCache
{
PxTransform mTransform0;
PxTransform mTransform1;
PxU16 mNbCachedContacts;
bool mUseFaceIndices;
bool mSameNormal;
PX_FORCE_INLINE void operator = (const PxcLocalContactsCache& other)
{
mTransform0 = other.mTransform0;
mTransform1 = other.mTransform1;
mNbCachedContacts = other.mNbCachedContacts;
mUseFaceIndices = other.mUseFaceIndices;
mSameNormal = other.mSameNormal;
}
};
}
#endif // PXC_CONTACT_CACHE_H

View File

@@ -0,0 +1,65 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_MATERIAL_METHOD_H
#define PXC_MATERIAL_METHOD_H
#include "geometry/PxGeometry.h"
namespace physx
{
struct PxsShapeCore;
struct PxsMaterialInfo;
class PxContactBuffer;
#define MATERIAL_METHOD_ARGS \
const PxsShapeCore* shape0, \
const PxsShapeCore* shape1, \
const PxContactBuffer& contactBuffer, \
PxsMaterialInfo* materialInfo
#define SINGLE_MATERIAL_METHOD_ARGS \
const PxsShapeCore* shape, \
PxU32 index, \
const PxContactBuffer& contactBuffer, \
PxsMaterialInfo* materialInfo
/*!
Method prototype for fetch material routines
*/
typedef void (*PxcGetMaterialMethod) (MATERIAL_METHOD_ARGS);
typedef void (*PxcGetSingleMaterialMethod) (SINGLE_MATERIAL_METHOD_ARGS);
extern PxcGetMaterialMethod g_GetMaterialMethodTable[][PxGeometryType::eGEOMETRY_COUNT];
extern PxcGetSingleMaterialMethod g_GetSingleMaterialMethodTable[PxGeometryType::eGEOMETRY_COUNT];
}
#endif

View File

@@ -0,0 +1,49 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_BATCH_H
#define PXC_NP_BATCH_H
#include "PxvConfig.h"
namespace physx
{
struct PxcNpWorkUnit;
class PxcNpThreadContext;
struct PxsContactManagerOutput;
namespace Gu
{
struct Cache;
}
void PxcDiscreteNarrowPhase(PxcNpThreadContext& context, const PxcNpWorkUnit& cmInput, Gu::Cache& cache, PxsContactManagerOutput& output, PxU64 contextID);
void PxcDiscreteNarrowPhasePCM(PxcNpThreadContext& context, const PxcNpWorkUnit& cmInput, Gu::Cache& cache, PxsContactManagerOutput& output, PxU64 contextID);
}
#endif

View File

@@ -0,0 +1,132 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_CACHE_H
#define PXC_NP_CACHE_H
#include "foundation/PxMemory.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxPool.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxUtilities.h"
#include "PxcNpCacheStreamPair.h"
#include "GuContactMethodImpl.h"
namespace physx
{
PX_FORCE_INLINE void PxcNpCacheReserve(PxcNpCacheStreamPair& streams, Gu::Cache& cache, PxU32 bytes)
{
bool sizeTooLarge;
PxU8* ls = streams.reserve(bytes, sizeTooLarge);
cache.mCachedData = ls;
if(sizeTooLarge)
{
// PT: TODO: consider changing the error message, it will silently become obsolete if we change the value of PxcNpMemBlock::SIZE.
// On the other hand the PxSceneDesc::maxNbContactDataBlocks also hardcodes "16K data blocks" so this isn't urgent.
PX_WARN_ONCE(
"Attempting to allocate more than 16K of contact data for a single contact pair in narrowphase. "
"Either accept dropped contacts or simplify collision geometry.");
}
else if(ls==NULL)
{
PX_WARN_ONCE(
"Reached limit set by PxSceneDesc::maxNbContactDataBlocks - ran out of buffer space for narrow phase. "
"Either accept dropped contacts or increase buffer size allocated for narrow phase by increasing PxSceneDesc::maxNbContactDataBlocks.");
}
}
template <typename T>
void PxcNpCacheWrite(PxcNpCacheStreamPair& streams,
Gu::Cache& cache,
const T& payload,
PxU32 bytes,
const PxU8* data)
{
PxU8* ls = PxcNpCacheWriteInitiate(streams, cache, payload, bytes);
if (ls == NULL)
return;
PxcNpCacheWriteFinalize(ls, payload, bytes, data);
}
template <typename T>
PxU8* PxcNpCacheWriteInitiate(PxcNpCacheStreamPair& streams, Gu::Cache& cache, const T& payload, PxU32 bytes)
{
const PxU32 payloadSize = (sizeof(payload)+3)&~3;
cache.mCachedSize = PxTo16((payloadSize + 4 + bytes + 0xF)&~0xF);
PxcNpCacheReserve(streams, cache, cache.mCachedSize);
return cache.mCachedData;
}
template <typename T>
PX_FORCE_INLINE void PxcNpCacheWriteFinalize(PxU8* ls, const T& payload, PxU32 bytes, const PxU8* data)
{
const PxU32 payloadSize = (sizeof(payload)+3)&~3;
*reinterpret_cast<T*>(ls) = payload;
*reinterpret_cast<PxU32*>(ls+payloadSize) = bytes;
if(data)
PxMemCopy(ls+payloadSize+sizeof(PxU32), data, bytes);
}
template <typename T>
PX_FORCE_INLINE PxU8* PxcNpCacheRead(Gu::Cache& cache, T*& payload)
{
PxU8* ls = cache.mCachedData;
payload = reinterpret_cast<T*>(ls);
const PxU32 payloadSize = (sizeof(T)+3)&~3;
return reinterpret_cast<PxU8*>(ls+payloadSize+sizeof(PxU32));
}
template <typename T>
const PxU8* PxcNpCacheRead2(Gu::Cache& cache, T& payload, PxU32& bytes)
{
const PxU8* ls = cache.mCachedData;
if(ls==NULL)
{
bytes = 0;
return NULL;
}
const PxU32 payloadSize = (sizeof(payload)+3)&~3;
payload = *reinterpret_cast<const T*>(ls);
bytes = *reinterpret_cast<const PxU32*>(ls+payloadSize);
PX_ASSERT(cache.mCachedSize == ((payloadSize + 4 + bytes+0xF)&~0xF));
return reinterpret_cast<const PxU8*>(ls+payloadSize+sizeof(PxU32));
}
}
#endif

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_CACHE_STREAM_PAIR_H
#define PXC_NP_CACHE_STREAM_PAIR_H
#include "foundation/PxSimpleTypes.h"
#include "PxvConfig.h"
#include "PxcNpMemBlockPool.h"
namespace physx
{
struct PxcNpCacheStreamPair
{
PX_NOCOPY(PxcNpCacheStreamPair)
public:
PxcNpCacheStreamPair(PxcNpMemBlockPool& blockPool);
// reserve can fail and return null.
PxU8* reserve(PxU32 byteCount, bool& sizeTooLarge);
PX_FORCE_INLINE void reset()
{
mBlock = NULL;
mUsed = 0;
}
private:
PxcNpMemBlockPool& mBlockPool;
PxcNpMemBlock* mBlock;
PxU32 mUsed;
};
}
#endif

View File

@@ -0,0 +1,60 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_CONTACT_PREP_SHARED_H
#define PXC_NP_CONTACT_PREP_SHARED_H
#include "foundation/PxSimpleTypes.h"
namespace physx
{
class PxcNpThreadContext;
struct PxsMaterialInfo;
class PxsMaterialManager;
class PxsConstraintBlockManager;
class PxcConstraintBlockStream;
struct PxContactPoint;
struct PxcDataStreamPool;
PX_FORCE_INLINE PxU32 computeAlignedSize(PxU32 size)
{
return (size + 0xf) & 0xfffffff0;
}
static const PxReal PXC_SAME_NORMAL = 0.999f; //Around 6 degrees
PxU32 writeCompressedContact(const PxContactPoint* const PX_RESTRICT contactPoints, const PxU32 numContactPoints, PxcNpThreadContext* threadContext,
PxU16& writtenContactCount, PxU8*& outContactPatches, PxU8*& outContactPoints, PxU16& compressedContactSize, PxReal*& contactForces, PxU32 contactForceByteSize,
PxU8*& outFrictionPatches, PxcDataStreamPool* frictionPatchesStreamPool,
const PxsMaterialManager* materialManager, bool hasModifiableContacts, bool forceNoResponse, const PxsMaterialInfo* PX_RESTRICT pMaterial, PxU8& numPatches,
PxU32 additionalHeaderSize = 0, PxsConstraintBlockManager* manager = NULL, PxcConstraintBlockStream* blockStream = NULL, bool insertAveragePoint = false,
PxcDataStreamPool* pool = NULL, PxcDataStreamPool* patchStreamPool = NULL, PxcDataStreamPool* forcePool = NULL, const bool isMeshType = false);
}
#endif

View File

@@ -0,0 +1,117 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_MEM_BLOCK_POOL_H
#define PXC_NP_MEM_BLOCK_POOL_H
#include "PxvConfig.h"
#include "foundation/PxArray.h"
#include "foundation/PxMutex.h"
namespace physx
{
class PxcScratchAllocator;
struct PxcNpMemBlock
{
enum
{
SIZE = 16384
};
PxU8 data[SIZE];
};
typedef PxArray<PxcNpMemBlock*> PxcNpMemBlockArray;
class PxcNpMemBlockPool
{
PX_NOCOPY(PxcNpMemBlockPool)
public:
PxcNpMemBlockPool(PxcScratchAllocator& allocator);
~PxcNpMemBlockPool();
void init(PxU32 initial16KDataBlocks, PxU32 maxBlocks);
void flush();
void setBlockCount(PxU32 count);
PxU32 getUsedBlockCount() const;
PxU32 getMaxUsedBlockCount() const;
PxU32 getPeakConstraintBlockCount() const;
void releaseUnusedBlocks();
PxcNpMemBlock* acquireConstraintBlock();
PxcNpMemBlock* acquireConstraintBlock(PxcNpMemBlockArray& memBlocks);
PxcNpMemBlock* acquireContactBlock();
PxcNpMemBlock* acquireFrictionBlock();
PxcNpMemBlock* acquireNpCacheBlock();
PxU8* acquireExceptionalConstraintMemory(PxU32 size);
void acquireConstraintMemory();
void releaseConstraintMemory();
void releaseConstraintBlocks(PxcNpMemBlockArray& memBlocks);
void releaseContacts();
void swapFrictionStreams();
void swapNpCacheStreams();
void flushUnused();
private:
PxMutex mLock;
PxcNpMemBlockArray mConstraints;
PxcNpMemBlockArray mContacts[2];
PxcNpMemBlockArray mFriction[2];
PxcNpMemBlockArray mNpCache[2];
PxcNpMemBlockArray mScratchBlocks;
PxArray<PxU8*> mExceptionalConstraints;
PxcNpMemBlockArray mUnused;
PxU32 mNpCacheActiveStream;
PxU32 mFrictionActiveStream;
PxU32 mCCDCacheActiveStream;
PxU32 mContactIndex;
PxU32 mAllocatedBlocks;
PxU32 mMaxBlocks;
PxU32 mInitialBlocks;
PxU32 mUsedBlocks;
PxU32 mMaxUsedBlocks;
PxcNpMemBlock* mScratchBlockAddr;
PxU32 mNbScratchBlocks;
PxcScratchAllocator& mScratchAllocator;
PxU32 mPeakConstraintAllocations;
PxU32 mConstraintAllocations;
PxcNpMemBlock* acquire(PxcNpMemBlockArray& trackingArray, PxU32* allocationCount = NULL, PxU32* peakAllocationCount = NULL, bool isScratchAllocation = false);
void release(PxcNpMemBlockArray& deadArray, PxU32* allocationCount = NULL);
};
}
#endif

View File

@@ -0,0 +1,200 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_THREAD_CONTEXT_H
#define PXC_NP_THREAD_CONTEXT_H
#include "geometry/PxGeometry.h"
#include "geomutils/PxContactBuffer.h"
#include "common/PxRenderOutput.h"
#include "CmRenderBuffer.h"
#include "PxvConfig.h"
#include "CmScaling.h"
#include "PxcNpCacheStreamPair.h"
#include "PxcConstraintBlockStream.h"
#include "PxcThreadCoherentCache.h"
#include "PxcScratchAllocator.h"
#include "foundation/PxBitMap.h"
#include "../pcm/GuPersistentContactManifold.h"
#include "../contact/GuContactMethodImpl.h"
namespace physx
{
class PxsTransformCache;
class PxsMaterialManager;
namespace Sc
{
class BodySim;
}
/*!
Per-thread context used by contact generation routines.
*/
struct PxcDataStreamPool
{
PxU8* mDataStream;
PxI32 mSharedDataIndex;
PxU32 mDataStreamSize;
PxU32 mSharedDataIndexGPU;
bool isOverflown() const
{
//FD: my expectaton is that reading those variables is atomic, shared indices are non-decreasing,
//so we can only get a false overflow alert because of concurrency issues, which is not a big deal as it means
//it did overflow a bit later
return (mSharedDataIndex + mSharedDataIndexGPU) > mDataStreamSize;
}
};
struct PxcNpContext
{
private:
PX_NOCOPY(PxcNpContext)
public:
PxcNpContext() :
mNpMemBlockPool (mScratchAllocator),
mMeshContactMargin (0.0f),
mToleranceLength (0.0f),
mContactStreamPool (NULL),
mPatchStreamPool (NULL),
mForceAndIndiceStreamPool(NULL),
mFrictionPatchStreamPool(NULL),
mMaterialManager (NULL)
{
}
PxcScratchAllocator mScratchAllocator;
PxcNpMemBlockPool mNpMemBlockPool;
PxReal mMeshContactMargin;
PxReal mToleranceLength;
Cm::RenderBuffer mRenderBuffer;
PxcDataStreamPool* mContactStreamPool;
PxcDataStreamPool* mPatchStreamPool;
PxcDataStreamPool* mForceAndIndiceStreamPool;
PxcDataStreamPool* mFrictionPatchStreamPool;
PxsMaterialManager* mMaterialManager;
PX_FORCE_INLINE PxReal getToleranceLength() const { return mToleranceLength; }
PX_FORCE_INLINE void setToleranceLength(PxReal x) { mToleranceLength = x; }
PX_FORCE_INLINE PxReal getMeshContactMargin() const { return mMeshContactMargin; }
PX_FORCE_INLINE void setMeshContactMargin(PxReal x) { mMeshContactMargin = x; }
PX_FORCE_INLINE PxcNpMemBlockPool& getNpMemBlockPool() { return mNpMemBlockPool; }
PX_FORCE_INLINE const PxcNpMemBlockPool& getNpMemBlockPool() const { return mNpMemBlockPool; }
PX_FORCE_INLINE void setMaterialManager(PxsMaterialManager* m){ mMaterialManager = m; }
PX_FORCE_INLINE PxsMaterialManager* getMaterialManager() const { return mMaterialManager; }
};
class PxcNpThreadContext : public PxcThreadCoherentCache<PxcNpThreadContext, PxcNpContext>::EntryBase
{
PX_NOCOPY(PxcNpThreadContext)
public:
PxcNpThreadContext(PxcNpContext* params);
~PxcNpThreadContext();
#if PX_ENABLE_SIM_STATS
void clearStats();
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
PX_FORCE_INLINE void addLocalNewTouchCount(PxU32 newTouchCMCount) { mLocalNewTouchCount += newTouchCMCount; }
PX_FORCE_INLINE void addLocalLostTouchCount(PxU32 lostTouchCMCount) { mLocalLostTouchCount += lostTouchCMCount; }
PX_FORCE_INLINE PxU32 getLocalNewTouchCount() const { return mLocalNewTouchCount; }
PX_FORCE_INLINE PxU32 getLocalLostTouchCount() const { return mLocalLostTouchCount; }
PX_FORCE_INLINE PxBitMap& getLocalChangeTouch() { return mLocalChangeTouch; }
void reset(PxU32 cmCount);
// debugging
PxRenderOutput mRenderOutput;
// dsequeira: Need to think about this block pool allocation a bit more. Ideally we'd be
// taking blocks from a single pool, except that we want to be able to selectively reclaim
// blocks if the user needs to defragment, depending on which artifacts they're willing
// to tolerate, such that the blocks we don't reclaim are contiguous.
#if PX_ENABLE_SIM_STATS
PxU32 mDiscreteContactPairs [PxGeometryType::eGEOMETRY_COUNT][PxGeometryType::eGEOMETRY_COUNT];
PxU32 mModifiedContactPairs [PxGeometryType::eGEOMETRY_COUNT][PxGeometryType::eGEOMETRY_COUNT];
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
PxcContactBlockStream mContactBlockStream; // constraint block pool
PxcNpCacheStreamPair mNpCacheStreamPair; // narrow phase pairwise data cache
// Everything below here is scratch state. Most of it can even overlap.
// temporary contact buffer
PxContactBuffer mContactBuffer;
PX_ALIGN(16, Gu::MultiplePersistentContactManifold mTempManifold);
Gu::NarrowPhaseParams mNarrowPhaseParams;
// DS: this stuff got moved here from the PxcNpPairContext. As Pierre says:
////////// PT: those members shouldn't be there in the end, it's not necessary
PxsTransformCache* mTransformCache;
const PxReal* mContactDistances;
bool mPCM;
bool mContactCache;
bool mCreateAveragePoint; // flag to enforce whether we create average points
#if PX_ENABLE_SIM_STATS
PxU32 mCompressedCacheSize;
PxU32 mNbDiscreteContactPairsWithCacheHits;
PxU32 mNbDiscreteContactPairsWithContacts;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
PxReal mDt; // AP: still needed for ccd
PxU32 mCCDPass;
PxU32 mCCDFaceIndex;
PxU32 mMaxPatches;
PxcDataStreamPool* mContactStreamPool;
PxcDataStreamPool* mPatchStreamPool;
PxcDataStreamPool* mForceAndIndiceStreamPool; //this stream is used to store the force buffer and triangle index if we are performing mesh/heightfield contact gen
PxcDataStreamPool* mFrictionPatchStreamPool;
PxsMaterialManager* mMaterialManager;
private:
// change touch handling.
PxBitMap mLocalChangeTouch;
PxU32 mLocalNewTouchCount;
PxU32 mLocalLostTouchCount;
};
}
#endif

View File

@@ -0,0 +1,211 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_NP_WORK_UNIT_H
#define PXC_NP_WORK_UNIT_H
#include "PxConstraintDesc.h"
#include "PxvGeometry.h"
// PT: the shapeCore structs are 16-bytes aligned by design so the low 4 bits of their pointers are available.
// We can store the geom types there since they fit. An alternative would be simply to read the types from
// shapeCore->mGeometry.getType() but that is one more indirection/cache miss. We might be using other shapeCore
// data everywhere we need the type so that might be irrelevant and could be revisited.
PX_COMPILE_TIME_ASSERT(physx::PxGeometryType::eGEOMETRY_COUNT<16);
namespace physx
{
struct PxsRigidCore;
struct PxsShapeCore;
namespace IG
{
typedef PxU32 EdgeIndex;
}
struct PxcNpWorkUnitFlag
{
enum Enum
{
eOUTPUT_CONTACTS = 1 << 0,
eOUTPUT_CONSTRAINTS = 1 << 1,
eDISABLE_STRONG_FRICTION = 1 << 2,
eARTICULATION_BODY0 = 1 << 3,
eARTICULATION_BODY1 = 1 << 4,
eDYNAMIC_BODY0 = 1 << 5,
eDYNAMIC_BODY1 = 1 << 6,
eSOFT_BODY = 1 << 7,
eMODIFIABLE_CONTACT = 1 << 8,
eFORCE_THRESHOLD = 1 << 9,
eDETECT_DISCRETE_CONTACT = 1 << 10,
eHAS_KINEMATIC_ACTOR = 1 << 11,
eDISABLE_RESPONSE = 1 << 12,
eDETECT_CCD_CONTACTS = 1 << 13,
eDOMINANCE_0 = 1 << 14,
eDOMINANCE_1 = 1 << 15,
};
};
struct PxcNpWorkUnitStatusFlag
{
enum Enum
{
eHAS_NO_TOUCH = (1 << 0),
eHAS_TOUCH = (1 << 1),
//eHAS_SOLVER_CONSTRAINTS = (1 << 2),
eREQUEST_CONSTRAINTS = (1 << 3),
eHAS_CCD_RETOUCH = (1 << 4), // Marks pairs that are touching at a CCD pass and were touching at discrete collision or at a previous CCD pass already
// but we can not tell whether they lost contact in a pass before. We send them as pure eNOTIFY_TOUCH_CCD events to the
// contact report callback if requested.
eDIRTY_MANAGER = (1 << 5),
eREFRESHED_WITH_TOUCH = (1 << 6),
eTOUCH_KNOWN = eHAS_NO_TOUCH | eHAS_TOUCH // The touch status is known (if narrowphase never ran for a pair then no flag will be set)
};
};
struct PxcNpWorkUnit
{
const PxsRigidCore* mRigidCore0; // INPUT //8
const PxsRigidCore* mRigidCore1; // INPUT //16
private:
const void* mShapeCoreAndType0; // INPUT //24
const void* mShapeCoreAndType1; // INPUT //32
public:
PxU8* mCCDContacts; // OUTPUT //40
PxU8* mFrictionDataPtr; // INOUT //48
PxU16 mFlags; // INPUT //50
PxU8 mFrictionPatchCount; // INOUT //51
PxU8 mStatusFlags; // OUTPUT (see PxcNpWorkUnitStatusFlag) //52
PxReal mRestDistance; // INPUT //56
PxU32 mTransformCache0; // //60
PxU32 mTransformCache1; // //64
IG::EdgeIndex mEdgeIndex; //inout the island gen edge index //68
PxU32 mNpIndex; //INPUT //72
PxReal mTorsionalPatchRadius; //76
PxReal mMinTorsionalPatchRadius; //80
PxReal mOffsetSlop; //84
//88 pading
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE const void* encode(const PxsShapeCore* shapeCore)
{
const PxU64 type = PxU64(shapeCore->mGeometry.getType());
PxU64 data = PxU64(shapeCore);
PX_ASSERT(!(data & 15));
data |= type;
return reinterpret_cast<const void*>(data);
}
PX_FORCE_INLINE void setShapeCore0(const PxsShapeCore* shapeCore)
{
mShapeCoreAndType0 = encode(shapeCore);
}
PX_FORCE_INLINE void setShapeCore1(const PxsShapeCore* shapeCore)
{
mShapeCoreAndType1 = encode(shapeCore);
}
PX_FORCE_INLINE const PxsShapeCore* getShapeCore0() const
{
return reinterpret_cast<const PxsShapeCore*>(PxU64(mShapeCoreAndType0) & ~15);
}
PX_FORCE_INLINE const PxsShapeCore* getShapeCore1() const
{
return reinterpret_cast<const PxsShapeCore*>(PxU64(mShapeCoreAndType1) & ~15);
}
PX_FORCE_INLINE PxGeometryType::Enum getGeomType0() const
{
return PxGeometryType::Enum(PxU64(mShapeCoreAndType0) & 15);
}
PX_FORCE_INLINE PxGeometryType::Enum getGeomType1() const
{
return PxGeometryType::Enum(PxU64(mShapeCoreAndType1) & 15);
}
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE PxU8 getDominance0() const
{
return (mFlags & PxcNpWorkUnitFlag::eDOMINANCE_0) ? 0 : 1;
}
PX_FORCE_INLINE void setDominance0(PxU8 v)
{
if(v==0)
mFlags |= PxcNpWorkUnitFlag::eDOMINANCE_0;
else
mFlags &= ~PxcNpWorkUnitFlag::eDOMINANCE_0;
}
PX_FORCE_INLINE PxU8 getDominance1() const
{
return (mFlags & PxcNpWorkUnitFlag::eDOMINANCE_1) ? 0 : 1;
}
PX_FORCE_INLINE void setDominance1(PxU8 v)
{
if(v==0)
mFlags |= PxcNpWorkUnitFlag::eDOMINANCE_1;
else
mFlags &= ~PxcNpWorkUnitFlag::eDOMINANCE_1;
}
PX_FORCE_INLINE void setInvMassScaleFromDominance(PxConstraintInvMassScale& invMassScales) const
{
const PxReal dominance0 = getDominance0() ? 1.0f : 0.0f;
const PxReal dominance1 = getDominance1() ? 1.0f : 0.0f;
invMassScales.linear0 = invMassScales.angular0 = dominance0;
invMassScales.linear1 = invMassScales.angular1 = dominance1;
}
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE void clearCachedState()
{
mFrictionDataPtr = NULL;
mFrictionPatchCount = 0;
mCCDContacts = NULL;
}
};
}
#endif

View File

@@ -0,0 +1,137 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_SCRATCH_ALLOCATOR_H
#define PXC_SCRATCH_ALLOCATOR_H
#include "foundation/PxAssert.h"
#include "PxvConfig.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
class PxcScratchAllocator : public PxUserAllocated
{
PX_NOCOPY(PxcScratchAllocator)
public:
PxcScratchAllocator() : mStack("PxcScratchAllocator"), mStart(NULL), mSize(0)
{
mStack.reserve(64);
mStack.pushBack(0);
}
void setBlock(void* addr, PxU32 size)
{
PX_ASSERT(!(size&15));
// if the stack is not empty then some scratch memory was not freed on the previous frame. That's
// likely indicative of a problem, because when the scratch block is too small the memory will have
// come from the heap
PX_ASSERT(mStack.size()==1);
mStack.popBack();
mStart = reinterpret_cast<PxU8*>(addr);
mSize = size;
mStack.pushBack(mStart + size);
}
void* allocAll(PxU32& size)
{
PxMutex::ScopedLock lock(mLock);
PX_ASSERT(mStack.size()>0);
size = PxU32(mStack.back()-mStart);
if(size==0)
return NULL;
mStack.pushBack(mStart);
return mStart;
}
void* alloc(PxU32 requestedSize, bool fallBackToHeap = false)
{
requestedSize = (requestedSize+15)&~15;
PxMutex::ScopedLock lock(mLock);
PX_ASSERT(mStack.size()>=1);
PxU8* top = mStack.back();
if(top - mStart >= ptrdiff_t(requestedSize))
{
PxU8* addr = top - requestedSize;
mStack.pushBack(addr);
return addr;
}
if(!fallBackToHeap)
return NULL;
return PX_ALLOC(requestedSize, "Scratch Block Fallback");
}
void free(void* addr)
{
PX_ASSERT(addr!=NULL);
if(!isScratchAddr(addr))
{
PX_FREE(addr);
return;
}
PxMutex::ScopedLock lock(mLock);
PX_ASSERT(mStack.size()>1);
PxU32 i=mStack.size()-1;
while(mStack[i]<addr)
i--;
PX_ASSERT(mStack[i]==addr);
mStack.remove(i);
}
bool isScratchAddr(void* addr) const
{
PxU8* a = reinterpret_cast<PxU8*>(addr);
return a>= mStart && a<mStart+mSize;
}
private:
PxMutex mLock;
PxArray<PxU8*> mStack;
PxU8* mStart;
PxU32 mSize;
};
}
#endif

View File

@@ -0,0 +1,148 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXC_THREAD_COHERENT_CACHE_H
#define PXC_THREAD_COHERENT_CACHE_H
#include "foundation/PxMutex.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxSList.h"
namespace physx
{
class PxsContext;
/*!
Controls a pool of large objects which must be thread safe.
Tries to return the object most recently used by the thread(for better cache coherancy).
Assumes the object has a default contructor.
(Note the semantics are different to a pool because we dont want to construct/destroy each time
an object is requested, which may be expensive).
TODO: add thread coherancy.
*/
template<class T, class Params>
class PxcThreadCoherentCache : public PxAlignedAllocator<16, PxReflectionAllocator<T> >
{
typedef PxAlignedAllocator<16, PxReflectionAllocator<T> > Allocator;
PX_NOCOPY(PxcThreadCoherentCache)
public:
typedef PxSListEntry EntryBase;
PX_INLINE PxcThreadCoherentCache(Params* params, const Allocator& alloc = Allocator()) : Allocator(alloc), mParams(params)
{
}
PX_INLINE ~PxcThreadCoherentCache()
{
T* np = static_cast<T*>(root.pop());
while(np!=NULL)
{
np->~T();
Allocator::deallocate(np);
np = static_cast<T*>(root.pop());
}
}
PX_INLINE T* get()
{
T* rv = static_cast<T*>(root.pop());
if(rv==NULL)
{
rv = reinterpret_cast<T*>(Allocator::allocate(sizeof(T), PX_FL));
PX_PLACEMENT_NEW(rv, T(mParams));
}
return rv;
}
PX_INLINE void put(T* item)
{
root.push(*item);
}
private:
PxSList root;
Params* mParams;
template<class T2, class P2>
friend class PxcThreadCoherentCacheIterator;
};
/*!
Used to iterate over all objects controlled by the cache.
Note: The iterator flushes the cache(extracts all items on construction and adds them back on
destruction so we can iterate the list in a safe manner).
*/
template<class T, class Params>
class PxcThreadCoherentCacheIterator
{
public:
PxcThreadCoherentCacheIterator(PxcThreadCoherentCache<T, Params>& cache) : mCache(cache)
{
mNext = cache.root.flush();
mFirst = mNext;
}
~PxcThreadCoherentCacheIterator()
{
PxSListEntry* np = mFirst;
while(np != NULL)
{
PxSListEntry* npNext = np->next();
mCache.root.push(*np);
np = npNext;
}
}
PX_INLINE T* getNext()
{
if(mNext == NULL)
return NULL;
T* rv = static_cast<T*>(mNext);
mNext = mNext->next();
return rv;
}
private:
PxcThreadCoherentCacheIterator<T, Params>& operator=(const PxcThreadCoherentCacheIterator<T, Params>&);
PxcThreadCoherentCache<T, Params> &mCache;
PxSListEntry* mNext;
PxSListEntry* mFirst;
};
}
#endif

View File

@@ -0,0 +1,484 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxcContactCache.h"
#include "PxcNpThreadContext.h"
#include "foundation/PxUtilities.h"
#include "PxcNpCache.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
//#define ENABLE_CONTACT_CACHE_STATS
#ifdef ENABLE_CONTACT_CACHE_STATS
static PxU32 gNbCalls;
static PxU32 gNbHits;
#endif
void PxcClearContactCacheStats()
{
#ifdef ENABLE_CONTACT_CACHE_STATS
gNbCalls = 0;
gNbHits = 0;
#endif
}
void PxcDisplayContactCacheStats()
{
#ifdef ENABLE_CONTACT_CACHE_STATS
pxPrintf("%d|%d (%f)\n", gNbHits, gNbCalls, gNbCalls ? float(gNbHits)/float(gNbCalls) : 0.0f);
#endif
}
namespace physx
{
const bool g_CanUseContactCache[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
false, //PxcContactSphereSphere
false, //PxcContactSpherePlane
true, //PxcContactSphereCapsule
false, //PxcContactSphereBox
false, //PxConvexCoreGeometry
true, //PxcContactSphereConvex
false, //ParticleSystem
true, //SoftBody
true, //PxcContactSphereMesh
true, //PxcContactSphereHeightField
false, //PxcContactGeometryCustomGeometry
},
//PxGeometryType::ePLANE
{
false, //-
false, //PxcInvalidContactPair
true, //PxcContactPlaneCapsule
true, //PxcContactPlaneBox
false, //PxConvexCoreGeometry
true, //PxcContactPlaneConvex
false, //ParticleSystem
true, //SoftBody
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
false, //PxcContactGeometryCustomGeometry
},
//PxGeometryType::eCAPSULE
{
false, //-
false, //-
true, //PxcContactCapsuleCapsule
true, //PxcContactCapsuleBox
false, //PxConvexCoreGeometry
true, //PxcContactCapsuleConvex
false, //ParticleSystem
true, //SoftBody
true, //PxcContactCapsuleMesh
true, //PxcContactCapsuleHeightField
false, //PxcContactGeometryCustomGeometry
},
//PxGeometryType::eBOX
{
false, //-
false, //-
false, //-
true, //PxcContactBoxBox
false, //PxConvexCoreGeometry
true, //PxcContactBoxConvex
false, //ParticleSystem
true, //SoftBody
true, //PxcContactBoxMesh
true, //PxcContactBoxHeightField
false, //PxcContactGeometryCustomGeometry
},
//PxGeometryType::eCONVEXCORE
{
false, //-
false, //-
false, //-
false, //-
false, //PxConvexCoreGeometry
false, //PxcContactBoxConvex
false, //ParticleSystem
false, //SoftBody
false, //PxcContactBoxMesh
false, //PxcContactBoxHeightField
false, //PxcContactGeometryCustomGeometry
},
//PxGeometryType::eCONVEXMESH
{
false, //-
false, //-
false, //-
false, //-
false, //-
true, //PxcContactConvexConvex
false, //-
true, //-
true, //PxcContactConvexMesh2
true, //PxcContactConvexHeightField
false, //PxcContactGeometryCustomGeometry
},
//PxGeometryType::ePARTICLESYSTEM
{
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
},
//PxGeometryType::eTETRAHEDRONMESH
{
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
},
//PxGeometryType::eTRIANGLEMESH
{
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
true, //-
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
},
//PxGeometryType::eHEIGHTFIELD
{
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
true, //-
false, //-
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
},
//PxGeometryType::eCUSTOM
{
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
false, //-
true, //-
false, //-
false, //PxcInvalidContactPair
false, //PxcInvalidContactPair
},
};
PX_COMPILE_TIME_ASSERT(sizeof(g_CanUseContactCache) / sizeof(g_CanUseContactCache[0]) == PxGeometryType::eGEOMETRY_COUNT);
}
static PX_FORCE_INLINE void updateContact( PxContactPoint& dst, const PxcLocalContactsCache& contactsData,
const PxMat34& world0, const PxMat34& world1,
const PxVec3& point, const PxVec3& normal, float separation)
{
const PxVec3 tmp0 = contactsData.mTransform0.transformInv(point);
const PxVec3 worldpt0 = world0.transform(tmp0);
const PxVec3 tmp1 = contactsData.mTransform1.transformInv(point);
const PxVec3 worldpt1 = world1.transform(tmp1);
const PxVec3 motion = worldpt0 - worldpt1;
dst.normal = normal;
dst.point = (worldpt0 + worldpt1)*0.5f;
//dst.point = point;
dst.separation = separation + motion.dot(normal);
}
static PX_FORCE_INLINE void prefetchData128(PxU8* PX_RESTRICT ptr, PxU32 size)
{
// PT: always prefetch the cache line containing our address (which unfortunately won't be aligned to 128 most of the time)
PxPrefetchLine(ptr, 0);
// PT: compute start offset of our data within its cache line
const PxU32 startOffset = PxU32(size_t(ptr)&127);
// PT: prefetch next cache line if needed
if(startOffset+size>128)
PxPrefetchLine(ptr+128, 0);
}
static PX_FORCE_INLINE PxU8* outputToCache(PxU8* PX_RESTRICT bytes, const PxVec3& v)
{
*reinterpret_cast<PxVec3*>(bytes) = v;
return bytes + sizeof(PxVec3);
}
static PX_FORCE_INLINE PxU8* outputToCache(PxU8* PX_RESTRICT bytes, PxReal v)
{
*reinterpret_cast<PxReal*>(bytes) = v;
return bytes + sizeof(PxReal);
}
static PX_FORCE_INLINE PxU8* outputToCache(PxU8* PX_RESTRICT bytes, PxU32 v)
{
*reinterpret_cast<PxU32*>(bytes) = v;
return bytes + sizeof(PxU32);
}
//PxU32 gContactCache_NbCalls = 0;
//PxU32 gContactCache_NbHits = 0;
static PX_FORCE_INLINE PxReal maxComponentDeltaPos(const PxTransform& t0, const PxTransform& t1)
{
PxReal delta = PxAbs(t0.p.x - t1.p.x);
delta = PxMax(delta, PxAbs(t0.p.y - t1.p.y));
delta = PxMax(delta, PxAbs(t0.p.z - t1.p.z));
return delta;
}
static PX_FORCE_INLINE PxReal maxComponentDeltaRot(const PxTransform& t0, const PxTransform& t1)
{
PxReal delta = PxAbs(t0.q.x - t1.q.x);
delta = PxMax(delta, PxAbs(t0.q.y - t1.q.y));
delta = PxMax(delta, PxAbs(t0.q.z - t1.q.z));
delta = PxMax(delta, PxAbs(t0.q.w - t1.q.w));
return delta;
}
bool physx::PxcCacheLocalContacts( PxcNpThreadContext& context, Cache& pairContactCache,
const PxTransform32& tm0, const PxTransform32& tm1,
const PxcContactMethod conMethod,
const PxGeometry& shape0, const PxGeometry& shape1)
{
const NarrowPhaseParams& params = context.mNarrowPhaseParams;
// gContactCache_NbCalls++;
if(pairContactCache.mCachedData)
prefetchData128(pairContactCache.mCachedData, pairContactCache.mCachedSize);
PxContactBuffer& contactBuffer = context.mContactBuffer;
contactBuffer.reset();
PxcLocalContactsCache contactsData;
PxU32 nbCachedBytes;
const PxU8* cachedBytes = PxcNpCacheRead2(pairContactCache, contactsData, nbCachedBytes);
pairContactCache.mCachedData = NULL;
pairContactCache.mCachedSize = 0;
#ifdef ENABLE_CONTACT_CACHE_STATS
gNbCalls++;
#endif
const PxU32 payloadSize = (sizeof(PxcLocalContactsCache)+3)&~3;
if(cachedBytes)
{
// PT: we used to store the relative TM but it's better to save memory and recompute it
const PxTransform t0to1 = tm1.transformInv(tm0);
const PxTransform relTM = contactsData.mTransform1.transformInv(contactsData.mTransform0);
const PxReal epsilon = 0.01f;
if( maxComponentDeltaPos(t0to1, relTM)<epsilon*params.mToleranceLength
&& maxComponentDeltaRot(t0to1, relTM)<epsilon)
{
// gContactCache_NbHits++;
const PxU32 nbContacts = contactsData.mNbCachedContacts;
PxU8* ls = PxcNpCacheWriteInitiate(context.mNpCacheStreamPair, pairContactCache, contactsData, nbCachedBytes);
prefetchData128(ls, (payloadSize + 4 + nbCachedBytes + 0xF)&~0xF);
contactBuffer.count = nbContacts;
if(nbContacts)
{
PxContactPoint* PX_RESTRICT dst = contactBuffer.contacts;
const Matrix34FromTransform world1(tm1);
const Matrix34FromTransform world0(tm0);
const bool sameNormal = contactsData.mSameNormal;
const PxU8* contacts = reinterpret_cast<const PxU8*>(cachedBytes);
const PxVec3* normal0 = NULL;
for(PxU32 i=0;i<nbContacts;i++)
{
if(i!=nbContacts-1)
PxPrefetchLine(contacts, 128);
const PxVec3* cachedNormal;
if(!i || !sameNormal)
{
cachedNormal = reinterpret_cast<const PxVec3*>(contacts); contacts += sizeof(PxVec3);
normal0 = cachedNormal;
}
else
{
cachedNormal = normal0;
}
const PxVec3* cachedPoint = reinterpret_cast<const PxVec3*>(contacts); contacts += sizeof(PxVec3);
const PxReal* cachedPD = reinterpret_cast<const PxReal*>(contacts); contacts += sizeof(PxReal);
updateContact(*dst, contactsData, world0, world1, *cachedPoint, *cachedNormal, *cachedPD);
if(contactsData.mUseFaceIndices)
{
const PxU32* cachedIndex1 = reinterpret_cast<const PxU32*>(contacts); contacts += sizeof(PxU32);
dst->internalFaceIndex1 = *cachedIndex1;
}
else
{
dst->internalFaceIndex1 = PXC_CONTACT_NO_FACE_INDEX;
}
dst++;
}
}
if(ls)
PxcNpCacheWriteFinalize(ls, contactsData, nbCachedBytes, cachedBytes);
#ifdef ENABLE_CONTACT_CACHE_STATS
gNbHits++;
#endif
return true;
}
else
{
// PT: if we reach this point we cached the contacts but we couldn't use them next frame
// => waste of time and memory
}
}
conMethod(shape0, shape1, tm0, tm1, params, pairContactCache, context.mContactBuffer, &context.mRenderOutput);
//if(contactBuffer.count)
{
contactsData.mTransform0 = tm0;
contactsData.mTransform1 = tm1;
PxU32 nbBytes = 0;
const PxU8* bytes = NULL;
const PxU32 count = contactBuffer.count;
if(count)
{
const bool useFaceIndices = contactBuffer.contacts[0].internalFaceIndex1!=PXC_CONTACT_NO_FACE_INDEX;
contactsData.mNbCachedContacts = PxTo16(count);
contactsData.mUseFaceIndices = useFaceIndices;
const PxContactPoint* PX_RESTRICT srcContacts = contactBuffer.contacts;
// PT: this loop should not be here. We should output the contacts directly compressed, as we used to.
bool sameNormal = true;
{
const PxVec3 normal0 = srcContacts->normal;
for(PxU32 i=1;i<count;i++)
{
if(srcContacts[i].normal!=normal0)
{
sameNormal = false;
break;
}
}
}
contactsData.mSameNormal = sameNormal;
if(!sameNormal)
{
const PxU32 sizeof_CachedContactPoint = sizeof(PxVec3) + sizeof(PxVec3) + sizeof(PxReal);
const PxU32 sizeof_CachedContactPointAndFaceIndices = sizeof_CachedContactPoint + sizeof(PxU32);
const PxU32 sizeOfItem = useFaceIndices ? sizeof_CachedContactPointAndFaceIndices : sizeof_CachedContactPoint;
nbBytes = count * sizeOfItem;
}
else
{
const PxU32 sizeof_CachedContactPoint = sizeof(PxVec3) + sizeof(PxReal);
const PxU32 sizeof_CachedContactPointAndFaceIndices = sizeof_CachedContactPoint + sizeof(PxU32);
const PxU32 sizeOfItem = useFaceIndices ? sizeof_CachedContactPointAndFaceIndices : sizeof_CachedContactPoint;
nbBytes = sizeof(PxVec3) + count * sizeOfItem;
}
PxU8* ls = PxcNpCacheWriteInitiate(context.mNpCacheStreamPair, pairContactCache, contactsData, nbBytes);
if(ls)
{
*reinterpret_cast<PxcLocalContactsCache*>(ls) = contactsData;
*reinterpret_cast<PxU32*>(ls+payloadSize) = nbBytes;
bytes = ls+payloadSize+sizeof(PxU32);
PxU8* dest = const_cast<PxU8*>(bytes);
for(PxU32 i=0;i<count;i++)
{
if(!i || !sameNormal)
dest = outputToCache(dest, srcContacts[i].normal);
dest = outputToCache(dest, srcContacts[i].point);
dest = outputToCache(dest, srcContacts[i].separation);
if(useFaceIndices)
{
dest = outputToCache(dest, srcContacts[i].internalFaceIndex1);
}
}
PX_ASSERT(size_t(dest) - size_t(bytes)==nbBytes);
}
else
{
contactsData.mNbCachedContacts = 0;
PxcNpCacheWrite(context.mNpCacheStreamPair, pairContactCache, contactsData, 0, bytes);
}
}
else
{
contactsData.mNbCachedContacts = 0;
contactsData.mUseFaceIndices = false;
contactsData.mSameNormal = false;
PxcNpCacheWrite(context.mNpCacheStreamPair, pairContactCache, contactsData, nbBytes, bytes);
}
}
return false;
}

View File

@@ -0,0 +1,443 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGeometry.h"
#include "PxcContactMethodImpl.h"
using namespace physx;
#define ARGS shape0, shape1, transform0, transform1, params, cache, contactBuffer, renderOutput
static bool PxcInvalidContactPair (GU_CONTACT_METHOD_ARGS_UNUSED) { return false; }
// PT: IMPORTANT: do NOT remove the indirection! Using the Gu functions directly in the table produces massive perf problems.
static bool PxcContactSphereSphere (GU_CONTACT_METHOD_ARGS) { return contactSphereSphere(ARGS); }
static bool PxcContactSphereCapsule (GU_CONTACT_METHOD_ARGS) { return contactSphereCapsule(ARGS); }
static bool PxcContactSphereBox (GU_CONTACT_METHOD_ARGS) { return contactSphereBox(ARGS); }
static bool PxcContactSpherePlane (GU_CONTACT_METHOD_ARGS) { return contactSpherePlane(ARGS); }
static bool PxcContactSphereConvex (GU_CONTACT_METHOD_ARGS) { return contactCapsuleConvex(ARGS); }
static bool PxcContactSphereMesh (GU_CONTACT_METHOD_ARGS) { return contactSphereMesh(ARGS); }
static bool PxcContactSphereHeightField (GU_CONTACT_METHOD_ARGS) { return contactSphereHeightfield(ARGS); }
static bool PxcContactPlaneBox (GU_CONTACT_METHOD_ARGS) { return contactPlaneBox(ARGS); }
static bool PxcContactPlaneCapsule (GU_CONTACT_METHOD_ARGS) { return contactPlaneCapsule(ARGS); }
static bool PxcContactPlaneConvexCore (GU_CONTACT_METHOD_ARGS) { return contactPlaneConvexCore(ARGS); }
static bool PxcContactPlaneConvex (GU_CONTACT_METHOD_ARGS) { return contactPlaneConvex(ARGS); }
static bool PxcContactPlaneMesh (GU_CONTACT_METHOD_ARGS) { return contactPlaneMesh(ARGS); }
static bool PxcContactCapsuleCapsule (GU_CONTACT_METHOD_ARGS) { return contactCapsuleCapsule(ARGS); }
static bool PxcContactCapsuleBox (GU_CONTACT_METHOD_ARGS) { return contactCapsuleBox(ARGS); }
static bool PxcContactCapsuleConvex (GU_CONTACT_METHOD_ARGS) { return contactCapsuleConvex(ARGS); }
static bool PxcContactCapsuleMesh (GU_CONTACT_METHOD_ARGS) { return contactCapsuleMesh(ARGS); }
static bool PxcContactCapsuleHeightField (GU_CONTACT_METHOD_ARGS) { return contactCapsuleHeightfield(ARGS); }
static bool PxcContactBoxBox (GU_CONTACT_METHOD_ARGS) { return contactBoxBox(ARGS); }
static bool PxcContactBoxConvex (GU_CONTACT_METHOD_ARGS) { return contactBoxConvex(ARGS); }
static bool PxcContactBoxMesh (GU_CONTACT_METHOD_ARGS) { return contactBoxMesh(ARGS); }
static bool PxcContactBoxHeightField (GU_CONTACT_METHOD_ARGS) { return contactBoxHeightfield(ARGS); }
static bool PxcContactConvexCoreConvex (GU_CONTACT_METHOD_ARGS) { return contactConvexCoreConvex(ARGS); }
static bool PxcContactConvexConvex (GU_CONTACT_METHOD_ARGS) { return contactConvexConvex(ARGS); }
static bool PxcContactConvexCoreTrimesh (GU_CONTACT_METHOD_ARGS) { return contactConvexCoreTrimesh(ARGS); }
static bool PxcContactConvexCoreHeightfield (GU_CONTACT_METHOD_ARGS) { return contactConvexCoreHeightfield(ARGS); }
static bool PxcContactConvexMesh (GU_CONTACT_METHOD_ARGS) { return contactConvexMesh(ARGS); }
static bool PxcContactConvexHeightField (GU_CONTACT_METHOD_ARGS) { return contactConvexHeightfield(ARGS); }
static bool PxcContactMeshMesh (GU_CONTACT_METHOD_ARGS) { return contactMeshMesh(ARGS); }
static bool PxcContactGeometryCustomGeometry (GU_CONTACT_METHOD_ARGS) { return contactGeometryCustomGeometry(ARGS); }
static bool PxcPCMContactSphereSphere (GU_CONTACT_METHOD_ARGS) { return pcmContactSphereSphere(ARGS); }
static bool PxcPCMContactSpherePlane (GU_CONTACT_METHOD_ARGS) { return pcmContactSpherePlane(ARGS); }
static bool PxcPCMContactSphereBox (GU_CONTACT_METHOD_ARGS) { return pcmContactSphereBox(ARGS); }
static bool PxcPCMContactSphereCapsule (GU_CONTACT_METHOD_ARGS) { return pcmContactSphereCapsule(ARGS); }
static bool PxcPCMContactSphereConvex (GU_CONTACT_METHOD_ARGS) { return pcmContactSphereConvex(ARGS); }
static bool PxcPCMContactSphereMesh (GU_CONTACT_METHOD_ARGS) { return pcmContactSphereMesh(ARGS); }
static bool PxcPCMContactSphereHeightField (GU_CONTACT_METHOD_ARGS) { return pcmContactSphereHeightField(ARGS); }
static bool PxcPCMContactPlaneCapsule (GU_CONTACT_METHOD_ARGS) { return pcmContactPlaneCapsule(ARGS); }
static bool PxcPCMContactPlaneBox (GU_CONTACT_METHOD_ARGS) { return pcmContactPlaneBox(ARGS); }
static bool PxcPCMContactPlaneConvexCore (GU_CONTACT_METHOD_ARGS) { return contactPlaneConvexCore(ARGS); }
static bool PxcPCMContactPlaneConvex (GU_CONTACT_METHOD_ARGS) { return pcmContactPlaneConvex(ARGS); }
static bool PxcPCMContactPlaneMesh (GU_CONTACT_METHOD_ARGS) { return contactPlaneMesh(ARGS); }
static bool PxcPCMContactCapsuleCapsule (GU_CONTACT_METHOD_ARGS) { return pcmContactCapsuleCapsule(ARGS); }
static bool PxcPCMContactCapsuleBox (GU_CONTACT_METHOD_ARGS) { return pcmContactCapsuleBox(ARGS); }
static bool PxcPCMContactCapsuleConvex (GU_CONTACT_METHOD_ARGS) { return pcmContactCapsuleConvex(ARGS); }
static bool PxcPCMContactCapsuleMesh (GU_CONTACT_METHOD_ARGS) { return pcmContactCapsuleMesh(ARGS); }
static bool PxcPCMContactCapsuleHeightField (GU_CONTACT_METHOD_ARGS) { return pcmContactCapsuleHeightField(ARGS); }
static bool PxcPCMContactBoxBox (GU_CONTACT_METHOD_ARGS) { return pcmContactBoxBox(ARGS); }
static bool PxcPCMContactBoxConvex (GU_CONTACT_METHOD_ARGS) { return pcmContactBoxConvex(ARGS); }
static bool PxcPCMContactBoxMesh (GU_CONTACT_METHOD_ARGS) { return pcmContactBoxMesh(ARGS); }
static bool PxcPCMContactBoxHeightField (GU_CONTACT_METHOD_ARGS) { return pcmContactBoxHeightField(ARGS); }
static bool PxcPCMContactConvexCoreConvex (GU_CONTACT_METHOD_ARGS) { return contactConvexCoreConvex(ARGS); }
static bool PxcPCMContactConvexConvex (GU_CONTACT_METHOD_ARGS) { return pcmContactConvexConvex(ARGS); }
static bool PxcPCMContactConvexCoreTrimesh (GU_CONTACT_METHOD_ARGS) { return contactConvexCoreTrimesh(ARGS); }
static bool PxcPCMContactConvexCoreHeightfield (GU_CONTACT_METHOD_ARGS) { return contactConvexCoreHeightfield(ARGS); }
static bool PxcPCMContactConvexMesh (GU_CONTACT_METHOD_ARGS) { return pcmContactConvexMesh(ARGS); }
static bool PxcPCMContactConvexHeightField (GU_CONTACT_METHOD_ARGS) { return pcmContactConvexHeightField(ARGS); }
static bool PxcPCMContactMeshMesh (GU_CONTACT_METHOD_ARGS) { return contactMeshMesh(ARGS); }
static bool PxcPCMContactGeometryCustomGeometry (GU_CONTACT_METHOD_ARGS) { return pcmContactGeometryCustomGeometry(ARGS); }
#undef ARGS
namespace physx
{
//Table of contact methods for different shape-type combinations
PxcContactMethod g_ContactMethodTable[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
PxcContactSphereSphere, //PxGeometryType::eSPHERE
PxcContactSpherePlane, //PxGeometryType::ePLANE
PxcContactSphereCapsule, //PxGeometryType::eCAPSULE
PxcContactSphereBox, //PxGeometryType::eBOX
PxcContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcContactSphereConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcContactSphereMesh, //PxGeometryType::eTRIANGLEMESH
PxcContactSphereHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
PxcInvalidContactPair, //PxGeometryType::ePLANE
PxcContactPlaneCapsule, //PxGeometryType::eCAPSULE
PxcContactPlaneBox, //PxGeometryType::eBOX
PxcContactPlaneConvexCore, //PxGeometryType::eCONVEXCORE
PxcContactPlaneConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcContactPlaneMesh, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
PxcContactCapsuleCapsule, //PxGeometryType::eCAPSULE
PxcContactCapsuleBox, //PxGeometryType::eBOX
PxcContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcContactCapsuleConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcContactCapsuleMesh, //PxGeometryType::eTRIANGLEMESH
PxcContactCapsuleHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
PxcContactBoxBox, //PxGeometryType::eBOX
PxcContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcContactBoxConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcContactBoxMesh, //PxGeometryType::eTRIANGLEMESH
PxcContactBoxHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXCORE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
PxcContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcContactConvexCoreConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcContactConvexCoreTrimesh, //PxGeometryType::eTRIANGLEMESH
PxcContactConvexCoreHeightfield,//PxGeometryType::eHEIGHTFIELD
PxcInvalidContactPair, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
PxcContactConvexConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcContactConvexMesh, //PxGeometryType::eTRIANGLEMESH
PxcContactConvexHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcInvalidContactPair, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcInvalidContactPair, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcInvalidContactPair, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcInvalidContactPair, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
PxcContactMeshMesh, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
PxcContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(g_ContactMethodTable) / sizeof(g_ContactMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
//Table of contact methods for different shape-type combinations
PxcContactMethod g_PCMContactMethodTable[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
PxcPCMContactSphereSphere, //PxGeometryType::eSPHERE
PxcPCMContactSpherePlane, //PxGeometryType::ePLANE
PxcPCMContactSphereCapsule, //PxGeometryType::eCAPSULE
PxcPCMContactSphereBox, //PxGeometryType::eBOX
PxcPCMContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcPCMContactSphereConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactSphereMesh, //PxGeometryType::eTRIANGLEMESH
PxcPCMContactSphereHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcPCMContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
PxcInvalidContactPair, //PxGeometryType::ePLANE
PxcPCMContactPlaneCapsule, //PxGeometryType::eCAPSULE
PxcPCMContactPlaneBox, //PxGeometryType::eBOX
PxcPCMContactPlaneConvexCore, //PxGeometryType::eCONVEXCORE
PxcPCMContactPlaneConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactPlaneMesh, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcPCMContactGeometryCustomGeometry,//PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
PxcPCMContactCapsuleCapsule, //PxGeometryType::eCAPSULE
PxcPCMContactCapsuleBox, //PxGeometryType::eBOX
PxcPCMContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcPCMContactCapsuleConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactCapsuleMesh, //PxGeometryType::eTRIANGLEMESH
PxcPCMContactCapsuleHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcPCMContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
PxcPCMContactBoxBox, //PxGeometryType::eBOX
PxcPCMContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcPCMContactBoxConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactBoxMesh, //PxGeometryType::eTRIANGLEMESH
PxcPCMContactBoxHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcPCMContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXCORE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
PxcPCMContactConvexCoreConvex, //PxGeometryType::eCONVEXCORE
PxcPCMContactConvexCoreConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactConvexCoreTrimesh, //PxGeometryType::eTRIANGLEMESH
PxcPCMContactConvexCoreHeightfield, //PxGeometryType::eHEIGHTFIELD
PxcInvalidContactPair, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
PxcPCMContactConvexConvex, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactConvexMesh, //PxGeometryType::eTRIANGLEMESH
PxcPCMContactConvexHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcPCMContactGeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcInvalidContactPair, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcInvalidContactPair, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
PxcInvalidContactPair, //PxGeometryType::ePARTICLESYSTEM
PxcInvalidContactPair, //PxGeometryType::eTETRAHEDRONMESH
PxcInvalidContactPair, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcInvalidContactPair, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
PxcPCMContactMeshMesh, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcPCMContactGeometryCustomGeometry,//PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
PxcInvalidContactPair, //PxGeometryType::eHEIGHTFIELD
PxcPCMContactGeometryCustomGeometry,//PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
PxcPCMContactGeometryCustomGeometry,//PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(g_PCMContactMethodTable) / sizeof(g_PCMContactMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
}

View File

@@ -0,0 +1,444 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxcMaterialMethodImpl.h"
#include "PxvGeometry.h"
#include "PxcNpThreadContext.h"
#include "PxsMaterialManager.h"
#include "GuTriangleMesh.h"
#include "GuHeightField.h"
using namespace physx;
using namespace Gu;
// PT: moved these functions to same file for improving code locality and easily reusing code (calling smaller functions from larger ones, see below)
///////////////////////////////////////////////////////////////////////////////
static void PxcGetMaterialShape(const PxsShapeCore* shape, const PxU32 index, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
const PxU16 materialIndex = shape->mMaterialIndex;
const PxU32 count = contactBuffer.count;
PX_ASSERT(index==0 || index==1);
for(PxU32 i=0; i<count; i++)
(&materialInfo[i].mMaterialIndex0)[index] = materialIndex;
}
static void PxcGetMaterialShapeShape(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
const PxU16 materialIndex0 = shape0->mMaterialIndex;
const PxU16 materialIndex1 = shape1->mMaterialIndex;
const PxU32 count = contactBuffer.count;
for(PxU32 i=0; i<count; i++)
{
materialInfo[i].mMaterialIndex0 = materialIndex0;
materialInfo[i].mMaterialIndex1 = materialIndex1;
}
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE const PxU16* getMaterialIndicesLL(const PxTriangleMeshGeometry& meshGeom)
{
return static_cast<const Gu::TriangleMesh*>(meshGeom.triangleMesh)->getMaterials();
}
static void PxcGetMaterialMesh(const PxsShapeCore* shape, const PxU32 index, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
PX_ASSERT(index == 0 || index == 1);
const PxTriangleMeshGeometryLL& shapeMesh = shape->mGeometry.get<const PxTriangleMeshGeometryLL>();
if(shapeMesh.materialsLL.numIndices <= 1)
{
PxcGetMaterialShape(shape, index, contactBuffer, materialInfo);
}
else
{
const PxU32 count = contactBuffer.count;
const PxU16* eaMaterialIndices = getMaterialIndicesLL(shapeMesh);
const PxU16* indices = shapeMesh.materialsLL.indices;
for(PxU32 i=0; i<count; i++)
{
const PxContactPoint& contact = contactBuffer.contacts[i];
const PxU32 localMaterialIndex = eaMaterialIndices ? eaMaterialIndices[contact.internalFaceIndex1] : 0;//shapeMesh.triangleMesh->getTriangleMaterialIndex(contact.featureIndex1);
(&materialInfo[i].mMaterialIndex0)[index] = indices[localMaterialIndex];
}
}
}
static void PxcGetMaterialShapeMesh(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
const PxTriangleMeshGeometryLL& shapeMesh = shape1->mGeometry.get<const PxTriangleMeshGeometryLL>();
if(shapeMesh.materialsLL.numIndices <= 1)
{
PxcGetMaterialShapeShape(shape0, shape1, contactBuffer, materialInfo);
}
else
{
const PxU32 count = contactBuffer.count;
const PxU16* eaMaterialIndices = getMaterialIndicesLL(shapeMesh);
const PxU16* indices = shapeMesh.materialsLL.indices;
const PxU16 materialIndex0 = shape0->mMaterialIndex;
for(PxU32 i=0; i<count; i++)
{
const PxContactPoint& contact = contactBuffer.contacts[i];
materialInfo[i].mMaterialIndex0 = materialIndex0;
const PxU32 localMaterialIndex = eaMaterialIndices ? eaMaterialIndices[contact.internalFaceIndex1] : 0;//shapeMesh.triangleMesh->getTriangleMaterialIndex(contact.featureIndex1);
materialInfo[i].mMaterialIndex1 = indices[localMaterialIndex];
}
}
}
static void PxcGetMaterialSoftBodyMesh(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
// PT: TODO: check this, it reads shape0 and labels it shapeMesh1? It's otherwise the same code as PxcGetMaterialShapeMesh ?
const PxTriangleMeshGeometryLL& shapeMesh1 = shape0->mGeometry.get<const PxTriangleMeshGeometryLL>();
if (shapeMesh1.materialsLL.numIndices <= 1)
{
PxcGetMaterialShapeShape(shape0, shape1, contactBuffer, materialInfo);
}
else
{
const PxU32 count = contactBuffer.count;
const PxU16* eaMaterialIndices = getMaterialIndicesLL(shapeMesh1);
const PxU16* indices = shapeMesh1.materialsLL.indices;
const PxU16 materialIndex0 = shape0->mMaterialIndex;
for (PxU32 i = 0; i<count; i++)
{
const PxContactPoint& contact = contactBuffer.contacts[i];
materialInfo[i].mMaterialIndex0 = materialIndex0;
const PxU32 localMaterialIndex = eaMaterialIndices ? eaMaterialIndices[contact.internalFaceIndex1] : 0;//shapeMesh.triangleMesh->getTriangleMaterialIndex(contact.featureIndex1);
//contact.featureIndex1 = shapeMesh.materials.indices[localMaterialIndex];
materialInfo[i].mMaterialIndex1 = indices[localMaterialIndex];
}
}
}
///////////////////////////////////////////////////////////////////////////////
static PxU32 getMaterialIndex(const Gu::HeightFieldData* hfData, PxU32 triangleIndex)
{
const PxU32 sampleIndex = triangleIndex >> 1;
const bool isFirstTriangle = (triangleIndex & 0x1) == 0;
//get sample
const PxHeightFieldSample* hf = &hfData->samples[sampleIndex];
return isFirstTriangle ? hf->materialIndex0 : hf->materialIndex1;
}
static void PxcGetMaterialHeightField(const PxsShapeCore* shape, const PxU32 index, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
PX_ASSERT(index == 0 || index == 1);
const PxHeightFieldGeometryLL& hfGeom = shape->mGeometry.get<const PxHeightFieldGeometryLL>();
if(hfGeom.materialsLL.numIndices <= 1)
{
PxcGetMaterialShape(shape, index, contactBuffer, materialInfo);
}
else
{
const PxU32 count = contactBuffer.count;
const PxU16* materialIndices = hfGeom.materialsLL.indices;
const Gu::HeightFieldData* hf = &static_cast<const Gu::HeightField*>(hfGeom.heightField)->getData();
for(PxU32 i=0; i<count; i++)
{
const PxContactPoint& contact = contactBuffer.contacts[i];
const PxU32 localMaterialIndex = getMaterialIndex(hf, contact.internalFaceIndex1);
(&materialInfo[i].mMaterialIndex0)[index] = materialIndices[localMaterialIndex];
}
}
}
static void PxcGetMaterialShapeHeightField(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
const PxHeightFieldGeometryLL& hfGeom = shape1->mGeometry.get<const PxHeightFieldGeometryLL>();
if(hfGeom.materialsLL.numIndices <= 1)
{
PxcGetMaterialShapeShape(shape0, shape1, contactBuffer, materialInfo);
}
else
{
const PxU32 count = contactBuffer.count;
const PxU16* materialIndices = hfGeom.materialsLL.indices;
const Gu::HeightFieldData* hf = &static_cast<const Gu::HeightField*>(hfGeom.heightField)->getData();
for(PxU32 i=0; i<count; i++)
{
const PxContactPoint& contact = contactBuffer.contacts[i];
materialInfo[i].mMaterialIndex0 = shape0->mMaterialIndex;
//contact.featureIndex0 = shape0->materialIndex;
const PxU32 localMaterialIndex = getMaterialIndex(hf, contact.internalFaceIndex1);
//contact.featureIndex1 = materialIndices[localMaterialIndex];
PX_ASSERT(localMaterialIndex<hfGeom.materialsLL.numIndices);
materialInfo[i].mMaterialIndex1 = materialIndices[localMaterialIndex];
}
}
}
static void PxcGetMaterialSoftBodyHeightField(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
const PxHeightFieldGeometryLL& hfGeom = shape1->mGeometry.get<const PxHeightFieldGeometryLL>();
if (hfGeom.materialsLL.numIndices <= 1)
{
PxcGetMaterialShapeShape(shape0, shape1, contactBuffer, materialInfo);
}
else
{
const PxU32 count = contactBuffer.count;
const PxU16* materialIndices = hfGeom.materialsLL.indices;
const Gu::HeightFieldData* hf = &static_cast<const Gu::HeightField*>(hfGeom.heightField)->getData();
for(PxU32 i=0; i<count; i++)
{
const PxContactPoint& contact = contactBuffer.contacts[i];
materialInfo[i].mMaterialIndex0 = shape0->mMaterialIndex;
//contact.featureIndex0 = shape0->materialIndex;
const PxU32 localMaterialIndex = getMaterialIndex(hf, contact.internalFaceIndex1);
//contact.featureIndex1 = materialIndices[localMaterialIndex];
PX_ASSERT(localMaterialIndex<hfGeom.materialsLL.numIndices);
materialInfo[i].mMaterialIndex1 = materialIndices[localMaterialIndex];
}
}
}
///////////////////////////////////////////////////////////////////////////////
static void PxcGetMaterialSoftBody(const PxsShapeCore* shape, const PxU32 index, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
PX_ASSERT(index == 1);
PX_UNUSED(index);
PxcGetMaterialShape(shape, index, contactBuffer, materialInfo);
}
static void PxcGetMaterialShapeSoftBody(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
PxcGetMaterialShapeShape(shape0, shape1, contactBuffer, materialInfo);
}
static void PxcGetMaterialSoftBodySoftBody(const PxsShapeCore* shape0, const PxsShapeCore* shape1, const PxContactBuffer& contactBuffer, PxsMaterialInfo* materialInfo)
{
PxcGetMaterialShapeShape(shape0, shape1, contactBuffer, materialInfo);
}
///////////////////////////////////////////////////////////////////////////////
namespace physx
{
PxcGetSingleMaterialMethod g_GetSingleMaterialMethodTable[] =
{
PxcGetMaterialShape, //PxGeometryType::eSPHERE
PxcGetMaterialShape, //PxGeometryType::ePLANE
PxcGetMaterialShape, //PxGeometryType::eCAPSULE
PxcGetMaterialShape, //PxGeometryType::eBOX
PxcGetMaterialShape, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShape, //PxGeometryType::eCUSTOM
};
PX_COMPILE_TIME_ASSERT(sizeof(g_GetSingleMaterialMethodTable) / sizeof(g_GetSingleMaterialMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
//Table of contact methods for different shape-type combinations
PxcGetMaterialMethod g_GetMaterialMethodTable[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
PxcGetMaterialShapeShape, //PxGeometryType::eSPHERE
PxcGetMaterialShapeShape, //PxGeometryType::ePLANE
PxcGetMaterialShapeShape, //PxGeometryType::eCAPSULE
PxcGetMaterialShapeShape, //PxGeometryType::eBOX
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialShapeSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialShapeSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialShapeHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
PxcGetMaterialShapeShape, //PxGeometryType::eCAPSULE
PxcGetMaterialShapeShape, //PxGeometryType::eBOX
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialShapeSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialShapeSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeMesh, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
PxcGetMaterialShapeShape, //PxGeometryType::eCAPSULE
PxcGetMaterialShapeShape, //PxGeometryType::eBOX
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialShapeSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialShapeSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialShapeHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
PxcGetMaterialShapeShape, //PxGeometryType::eBOX
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialShapeSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialShapeSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialShapeHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialShapeSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialShapeSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialShapeHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
PxcGetMaterialShapeShape, //PxGeometryType::eCONVEXMESH
PxcGetMaterialShapeSoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialShapeSoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialShapeHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
PxcGetMaterialSoftBodySoftBody, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialSoftBodySoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialSoftBodyMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialSoftBodyHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
PxcGetMaterialSoftBodySoftBody, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialSoftBodyMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
PxcGetMaterialSoftBodyHeightField, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
PxcGetMaterialShapeShape, //PxGeometryType::eTRIANGLEMESH // mesh-mesh via SDF (single material)
0, //PxGeometryType::eHEIGHTFIELD
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
PxcGetMaterialShapeShape, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(g_GetMaterialMethodTable) / sizeof(g_GetMaterialMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
}

View File

@@ -0,0 +1,512 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxcNpBatch.h"
#include "PxcNpCache.h"
#include "common/PxProfileZone.h"
#include "PxcNpWorkUnit.h"
#include "PxcContactCache.h"
#include "PxcNpContactPrepShared.h"
#include "PxvGeometry.h"
#include "CmTask.h"
#include "PxsMaterialManager.h"
#include "PxsTransformCache.h"
#include "PxsContactManagerState.h"
#include "PxcNpThreadContext.h"
#include "PxcMaterialMethodImpl.h"
// PT: use this define to enable detailed analysis of the NP functions.
//#define LOCAL_PROFILE_ZONE(x, y) PX_PROFILE_ZONE(x, y)
#define LOCAL_PROFILE_ZONE(x, y)
using namespace physx;
using namespace Gu;
PX_COMPILE_TIME_ASSERT(sizeof(PxsCachedTransform)==sizeof(PxTransform32));
static void startContacts(PxsContactManagerOutput& output, PxcNpThreadContext& context)
{
context.mContactBuffer.reset();
output.contactForces = NULL;
output.contactPatches = NULL;
output.contactPoints = NULL;
output.frictionPatches = NULL;
output.nbContacts = 0;
output.nbPatches = 0;
output.statusFlag = 0;
}
static void flipContacts(PxcNpThreadContext& threadContext, PxsMaterialInfo* PX_RESTRICT materialInfo)
{
PxContactBuffer& buffer = threadContext.mContactBuffer;
for(PxU32 i=0; i<buffer.count; ++i)
{
PxContactPoint& contactPoint = buffer.contacts[i];
contactPoint.normal = -contactPoint.normal;
PxSwap(materialInfo[i].mMaterialIndex0, materialInfo[i].mMaterialIndex1);
}
}
static PX_FORCE_INLINE void updateDiscreteContactStats(PxcNpThreadContext& context, PxGeometryType::Enum type0, PxGeometryType::Enum type1)
{
#if PX_ENABLE_SIM_STATS
PX_ASSERT(type0<=type1);
context.mDiscreteContactPairs[type0][type1]++;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
PX_UNUSED(context);
PX_UNUSED(type0);
PX_UNUSED(type1);
#endif
}
static bool copyBuffers(PxsContactManagerOutput& cmOutput, Gu::Cache& cache, PxcNpThreadContext& context, const bool useContactCache, const bool isMeshType)
{
bool ret = false;
//Copy the contact stream from previous buffer to current buffer...
PxU32 oldSize = sizeof(PxContact) * cmOutput.nbContacts + sizeof(PxContactPatch)*cmOutput.nbPatches;
if(oldSize)
{
ret = true;
PxU8* oldPatches = cmOutput.contactPatches;
PxU8* oldContacts = cmOutput.contactPoints;
PxReal* oldForces = cmOutput.contactForces;
PxU8* oldFriction = cmOutput.frictionPatches;
PxU32 forceSize = cmOutput.nbContacts * sizeof(PxReal);
if(isMeshType)
forceSize += cmOutput.nbContacts * sizeof(PxU32);
PxU32 frictionSize = oldFriction ? sizeof(PxFrictionPatch) * cmOutput.nbPatches : 0;
PxU8* PX_RESTRICT contactPatches = NULL;
PxU8* PX_RESTRICT contactPoints = NULL;
PxReal* forceBuffer = NULL;
PxU8* PX_RESTRICT frictionPatches = NULL;
bool isOverflown = false;
//ML: if we are using contactStreamPool, which means we are running the GPU codepath
if(context.mContactStreamPool)
{
const PxU32 patchSize = cmOutput.nbPatches * sizeof(PxContactPatch);
const PxU32 contactSize = cmOutput.nbContacts * sizeof(PxContact);
PxU32 index = PxU32(PxAtomicAdd(&context.mContactStreamPool->mSharedDataIndex, PxI32(contactSize)));
if(context.mContactStreamPool->isOverflown())
{
PX_WARN_ONCE("Contact buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
contactPoints = context.mContactStreamPool->mDataStream + context.mContactStreamPool->mDataStreamSize - index;
const PxU32 patchIndex = PxU32(PxAtomicAdd(&context.mPatchStreamPool->mSharedDataIndex, PxI32(patchSize)));
if(context.mPatchStreamPool->isOverflown())
{
PX_WARN_ONCE("Patch buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
contactPatches = context.mPatchStreamPool->mDataStream + context.mPatchStreamPool->mDataStreamSize - patchIndex;
if(forceSize)
{
index = PxU32(PxAtomicAdd(&context.mForceAndIndiceStreamPool->mSharedDataIndex, PxI32(forceSize)));
if(context.mForceAndIndiceStreamPool->isOverflown())
{
PX_WARN_ONCE("Force buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
forceBuffer = reinterpret_cast<PxReal*>(context.mForceAndIndiceStreamPool->mDataStream + context.mForceAndIndiceStreamPool->mDataStreamSize - index);
}
if (frictionSize)
{
const PxU32 frictionIndex = PxTo32(PxAtomicAdd(&context.mFrictionPatchStreamPool->mSharedDataIndex, PxI32(frictionSize)));
if (context.mFrictionPatchStreamPool->isOverflown())
{
PX_WARN_ONCE("Friction patch buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
frictionPatches = context.mFrictionPatchStreamPool->mDataStream + context.mFrictionPatchStreamPool->mDataStreamSize - frictionIndex;
}
if(isOverflown)
{
contactPatches = NULL;
contactPoints = NULL;
frictionPatches = NULL;
forceBuffer = NULL;
cmOutput.nbContacts = cmOutput.nbPatches = 0;
}
else
{
PxMemCopy(contactPatches, oldPatches, patchSize);
PxMemCopy(contactPoints, oldContacts, contactSize);
if(isMeshType)
PxMemCopy(forceBuffer + cmOutput.nbContacts, oldForces + cmOutput.nbContacts, sizeof(PxU32) * cmOutput.nbContacts);
if (frictionSize)
PxMemCopy(frictionPatches, oldFriction, frictionSize);
}
}
else
{
const PxU32 alignedOldSize = computeAlignedSize(oldSize);
PxU8* data = context.mContactBlockStream.reserve(alignedOldSize + forceSize + frictionSize);
if(forceSize)
forceBuffer = reinterpret_cast<PxReal*>(data + alignedOldSize);
contactPatches = data;
contactPoints = data + cmOutput.nbPatches * sizeof(PxContactPatch);
if (frictionSize)
{
frictionPatches = data + alignedOldSize + forceSize;
PxMemCopy(frictionPatches, oldFriction, frictionSize);
}
PxMemCopy(data, oldPatches, oldSize);
if(isMeshType)
PxMemCopy(forceBuffer + cmOutput.nbContacts, oldForces + cmOutput.nbContacts, sizeof(PxU32) * cmOutput.nbContacts);
}
if(forceSize)
PxMemZero(forceBuffer, forceSize);
cmOutput.contactPatches = contactPatches;
cmOutput.contactPoints = contactPoints;
cmOutput.frictionPatches = frictionPatches;
cmOutput.contactForces = forceBuffer;
}
if(cache.mCachedSize)
{
if(cache.isMultiManifold())
{
PX_ASSERT((cache.mCachedSize & 0xF) == 0);
const PxU8* cachedData = cache.mCachedData;
PxcNpCacheReserve(context.mNpCacheStreamPair, cache, cache.mCachedSize);
if (!cache.mCachedData)
return false;
PX_ASSERT((reinterpret_cast<uintptr_t>(cache.mCachedData)& 0xF) == 0);
PxMemCopy(cache.mCachedData, cachedData, cache.mCachedSize);
cache.setMultiManifold(cache.mCachedData);
}
else if(useContactCache)
{
//Copy cache information as well...
const PxU8* cachedData = cache.mCachedData;
PxcNpCacheReserve(context.mNpCacheStreamPair, cache, computeAlignedSize(cache.mCachedSize));
if (!cache.mCachedData)
return false;
PxMemCopy(cache.mCachedData, cachedData, cache.mCachedSize);
}
}
return ret;
}
//ML: isMeshType is used in the GPU codepath. If the collision pair is mesh/heightfield vs primitives, we need to allocate enough memory for the mForceAndIndiceStreamPool in the threadContext.
static bool finishContacts(const PxcNpWorkUnit& input, PxsContactManagerOutput& npOutput, PxcNpThreadContext& threadContext, PxsMaterialInfo* PX_RESTRICT pMaterials, const bool isMeshType, PxU64 contextID)
{
PX_UNUSED(contextID);
LOCAL_PROFILE_ZONE("finishContacts", contextID);
PxContactBuffer& buffer = threadContext.mContactBuffer;
PX_ASSERT((npOutput.statusFlag & PxsContactManagerStatusFlag::eTOUCH_KNOWN) != PxsContactManagerStatusFlag::eTOUCH_KNOWN);
PxU8 statusFlags = PxU16(npOutput.statusFlag & (~PxsContactManagerStatusFlag::eTOUCH_KNOWN));
if(buffer.count)
statusFlags |= PxsContactManagerStatusFlag::eHAS_TOUCH;
else
statusFlags |= PxsContactManagerStatusFlag::eHAS_NO_TOUCH;
npOutput.nbContacts = PxTo16(buffer.count);
if(!buffer.count)
{
npOutput.statusFlag = statusFlags;
npOutput.nbContacts = 0;
npOutput.nbPatches = 0;
return true;
}
PX_ASSERT(buffer.count);
#if PX_ENABLE_SIM_STATS
threadContext.mNbDiscreteContactPairsWithContacts++;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
npOutput.statusFlag = statusFlags;
PxU32 contactForceByteSize = buffer.count * sizeof(PxReal);
//Regardless of the flags, we need to now record the compressed contact stream
PxU16 compressedContactSize;
const bool createReports =
input.mFlags & PxcNpWorkUnitFlag::eOUTPUT_CONTACTS
|| (input.mFlags & PxcNpWorkUnitFlag::eFORCE_THRESHOLD);
if(!isMeshType && !createReports)
contactForceByteSize = 0;
const bool res = writeCompressedContact(buffer.contacts, buffer.count, &threadContext, npOutput.nbContacts, npOutput.contactPatches, npOutput.contactPoints, compressedContactSize,
reinterpret_cast<PxReal*&>(npOutput.contactForces), contactForceByteSize,
npOutput.frictionPatches, threadContext.mFrictionPatchStreamPool,
threadContext.mMaterialManager, ((input.mFlags & PxcNpWorkUnitFlag::eMODIFIABLE_CONTACT) != 0),
false, pMaterials, npOutput.nbPatches, 0, NULL, NULL, threadContext.mCreateAveragePoint, threadContext.mContactStreamPool,
threadContext.mPatchStreamPool, threadContext.mForceAndIndiceStreamPool, isMeshType) != 0;
//handle buffer overflow
if(!npOutput.nbContacts)
{
PxU8 thisStatusFlags = PxU16(npOutput.statusFlag & (~PxsContactManagerStatusFlag::eTOUCH_KNOWN));
thisStatusFlags |= PxsContactManagerStatusFlag::eHAS_NO_TOUCH;
npOutput.statusFlag = thisStatusFlags;
npOutput.nbContacts = 0;
npOutput.nbPatches = 0;
#if PX_ENABLE_SIM_STATS
threadContext.mNbDiscreteContactPairsWithContacts--;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
}
return res;
}
template<bool useContactCacheT>
static PX_FORCE_INLINE bool checkContactsMustBeGenerated(PxcNpThreadContext& context, const PxcNpWorkUnit& input, Gu::Cache& cache, PxsContactManagerOutput& output,
const PxsCachedTransform* cachedTransform0, const PxsCachedTransform* cachedTransform1,
const bool flip, PxGeometryType::Enum type0, PxGeometryType::Enum type1)
{
PX_ASSERT(cachedTransform0->transform.isSane() && cachedTransform1->transform.isSane());
//ML : if user doesn't raise the eDETECT_DISCRETE_CONTACT, we should not generate contacts
if(!(input.mFlags & PxcNpWorkUnitFlag::eDETECT_DISCRETE_CONTACT))
return false;
if(!(output.statusFlag & PxcNpWorkUnitStatusFlag::eDIRTY_MANAGER) && !(input.mFlags & PxcNpWorkUnitFlag::eMODIFIABLE_CONTACT))
{
const PxU32 body0Dynamic = PxU32(input.mFlags & (PxcNpWorkUnitFlag::eDYNAMIC_BODY0 | PxcNpWorkUnitFlag::eARTICULATION_BODY0 | PxcNpWorkUnitFlag::eSOFT_BODY));
const PxU32 body1Dynamic = PxU32(input.mFlags & (PxcNpWorkUnitFlag::eDYNAMIC_BODY1 | PxcNpWorkUnitFlag::eARTICULATION_BODY1 | PxcNpWorkUnitFlag::eSOFT_BODY));
const PxU32 active0 = PxU32(body0Dynamic && !cachedTransform0->isFrozen());
const PxU32 active1 = PxU32(body1Dynamic && !cachedTransform1->isFrozen());
if(!(active0 || active1))
{
if(flip)
PxSwap(type0, type1);
const bool useContactCache = useContactCacheT ? context.mContactCache && g_CanUseContactCache[type0][type1] : false;
#if PX_ENABLE_SIM_STATS
if(output.nbContacts)
context.mNbDiscreteContactPairsWithContacts++;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
const bool isMeshType = type1 > PxGeometryType::eCONVEXMESH;
copyBuffers(output, cache, context, useContactCache, isMeshType);
return false;
}
}
output.statusFlag &= (~PxcNpWorkUnitStatusFlag::eDIRTY_MANAGER);
const PxReal contactDist0 = context.mContactDistances[input.mTransformCache0];
const PxReal contactDist1 = context.mContactDistances[input.mTransformCache1];
//context.mNarrowPhaseParams.mContactDistance = shape0->contactOffset + shape1->contactOffset;
context.mNarrowPhaseParams.mContactDistance = contactDist0 + contactDist1;
return true;
}
template<bool useLegacyCodepath>
static PX_FORCE_INLINE void discreteNarrowPhase(PxcNpThreadContext& context, const PxcNpWorkUnit& input, Gu::Cache& cache, PxsContactManagerOutput& output, PxU64 contextID)
{
PxGeometryType::Enum type0 = input.getGeomType0();
PxGeometryType::Enum type1 = input.getGeomType1();
const bool flip = (type1<type0);
const PxsCachedTransform* cachedTransform0 = &context.mTransformCache->getTransformCache(input.mTransformCache0);
const PxsCachedTransform* cachedTransform1 = &context.mTransformCache->getTransformCache(input.mTransformCache1);
if(!checkContactsMustBeGenerated<useLegacyCodepath>(context, input, cache, output, cachedTransform0, cachedTransform1, flip, type0, type1))
return;
PxsShapeCore* shape0 = const_cast<PxsShapeCore*>(input.getShapeCore0());
PxsShapeCore* shape1 = const_cast<PxsShapeCore*>(input.getShapeCore1());
if(flip)
{
PxSwap(type0, type1);
PxSwap(shape0, shape1);
PxSwap(cachedTransform0, cachedTransform1);
}
PxsMaterialInfo materialInfo[PxContactBuffer::MAX_CONTACTS];
Gu::MultiplePersistentContactManifold& manifold = context.mTempManifold;
bool isMultiManifold = false;
if(!useLegacyCodepath)
{
if(cache.isMultiManifold())
{
//We are using a multi-manifold. This is cached in a reduced npCache...
isMultiManifold = true;
manifold.fromBuffer(cache.mCachedData);
cache.setMultiManifold(&manifold);
}
else if(cache.isManifold())
{
void* address = cache.mCachedData;
PxPrefetch(address);
PxPrefetch(address, 128);
PxPrefetch(address, 256);
}
}
updateDiscreteContactStats(context, type0, type1);
startContacts(output, context);
const PxTransform32* tm0 = reinterpret_cast<const PxTransform32*>(cachedTransform0);
const PxTransform32* tm1 = reinterpret_cast<const PxTransform32*>(cachedTransform1);
PX_ASSERT(tm0->isSane() && tm1->isSane());
const PxGeometry& contactShape0 = shape0->mGeometry.getGeometry();
const PxGeometry& contactShape1 = shape1->mGeometry.getGeometry();
if(useLegacyCodepath)
{
// PT: many cache misses here...
PxPrefetchLine(shape1, 0); // PT: at least get rid of L2s for shape1
const PxcContactMethod conMethod = g_ContactMethodTable[type0][type1];
PX_ASSERT(conMethod);
const bool useContactCache = context.mContactCache && g_CanUseContactCache[type0][type1];
if(useContactCache)
{
const bool status = PxcCacheLocalContacts(context, cache, *tm0, *tm1, conMethod, contactShape0, contactShape1);
#if PX_ENABLE_SIM_STATS
if(status)
context.mNbDiscreteContactPairsWithCacheHits++;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
PX_UNUSED(status);
#endif
}
else
{
LOCAL_PROFILE_ZONE("conMethod", contextID);
conMethod(contactShape0, contactShape1, *tm0, *tm1, context.mNarrowPhaseParams, cache, context.mContactBuffer, &context.mRenderOutput);
}
}
else
{
LOCAL_PROFILE_ZONE("conMethod", contextID);
const PxcContactMethod conMethod = g_PCMContactMethodTable[type0][type1];
PX_ASSERT(conMethod);
conMethod(contactShape0, contactShape1, *tm0, *tm1, context.mNarrowPhaseParams, cache, context.mContactBuffer, &context.mRenderOutput);
}
if(context.mContactBuffer.count)
{
const PxcGetMaterialMethod materialMethod = g_GetMaterialMethodTable[type0][type1];
if(materialMethod)
{
LOCAL_PROFILE_ZONE("materialMethod", contextID);
materialMethod(shape0, shape1, context.mContactBuffer, materialInfo);
}
if(flip)
{
LOCAL_PROFILE_ZONE("flipContacts", contextID);
flipContacts(context, materialInfo);
}
}
if(!useLegacyCodepath)
{
if(isMultiManifold)
{
//Store the manifold back...
const PxU32 size = (sizeof(MultiPersistentManifoldHeader) +
manifold.mNumManifolds * sizeof(SingleManifoldHeader) +
manifold.mNumTotalContacts * sizeof(Gu::CachedMeshPersistentContact));
PxcNpCacheReserve(context.mNpCacheStreamPair, cache, size);
if (!cache.mCachedData)
return;
PX_ASSERT((reinterpret_cast<uintptr_t>(cache.mCachedData)& 0xf) == 0);
manifold.toBuffer(cache.mCachedData);
cache.setMultiManifold(cache.mCachedData);
cache.mCachedSize = PxTo16(size);
}
}
const bool isMeshType = type1 > PxGeometryType::eCONVEXMESH;
finishContacts(input, output, context, materialInfo, isMeshType, contextID);
}
void physx::PxcDiscreteNarrowPhase(PxcNpThreadContext& context, const PxcNpWorkUnit& input, Gu::Cache& cache, PxsContactManagerOutput& output, PxU64 contextID)
{
LOCAL_PROFILE_ZONE("PxcDiscreteNarrowPhase", contextID);
discreteNarrowPhase<true>(context, input, cache, output, contextID);
}
void physx::PxcDiscreteNarrowPhasePCM(PxcNpThreadContext& context, const PxcNpWorkUnit& input, Gu::Cache& cache, PxsContactManagerOutput& output, PxU64 contextID)
{
LOCAL_PROFILE_ZONE("PxcDiscreteNarrowPhasePCM", contextID);
discreteNarrowPhase<false>(context, input, cache, output, contextID);
}

View File

@@ -0,0 +1,72 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxcNpCacheStreamPair.h"
#include "foundation/PxUserAllocated.h"
#include "PxcNpMemBlockPool.h"
using namespace physx;
PxcNpCacheStreamPair::PxcNpCacheStreamPair(PxcNpMemBlockPool& blockPool) :
mBlockPool (blockPool),
mBlock (NULL),
mUsed (0)
{
}
// reserve can fail and return null. Read should never fail
PxU8* PxcNpCacheStreamPair::reserve(PxU32 size, bool& sizeTooLarge)
{
size = (size+15)&~15;
if(size>PxcNpMemBlock::SIZE)
{
sizeTooLarge = true;
return NULL;
}
sizeTooLarge = false;
if(mBlock == NULL || mUsed + size > PxcNpMemBlock::SIZE)
{
mBlock = mBlockPool.acquireNpCacheBlock();
mUsed = 0;
}
PxU8* ptr;
if(mBlock == NULL)
ptr = NULL;
else
{
ptr = mBlock->data + mUsed;
mUsed += size;
}
return ptr;
}

View File

@@ -0,0 +1,552 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAlloca.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxErrors.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxVecMath.h"
#include "geomutils/PxContactPoint.h"
#include "PxcNpContactPrepShared.h"
#include "PxcNpThreadContext.h"
#include "PxsMaterialManager.h"
#include "PxsMaterialCombiner.h"
#include "PxcNpContactPrepShared.h"
using namespace physx;
using namespace Gu;
using namespace aos;
static PX_FORCE_INLINE void copyContactPoint(PxContact* PX_RESTRICT point, const PxContactPoint* PX_RESTRICT cp)
{
// PT: TODO: consider moving "separation" right after "point" in both structures, to copy both at the same time.
// point->contact = cp->point;
const Vec4V contactV = V4LoadA(&cp->point.x); // PT: V4LoadA safe because 'point' is aligned.
V4StoreU(contactV, &point->contact.x);
point->separation = cp->separation;
}
void combineMaterials(const PxsMaterialManager* materialManager, PxU16 origMatIndex0, PxU16 origMatIndex1, PxReal& staticFriction, PxReal& dynamicFriction, PxReal& combinedRestitution, PxU32& materialFlags, PxReal& combinedDamping)
{
const PxsMaterialData& data0 = *materialManager->getMaterial(origMatIndex0);
const PxsMaterialData& data1 = *materialManager->getMaterial(origMatIndex1);
PxsCombineMaterials(data0, data1, staticFriction, dynamicFriction, combinedRestitution, materialFlags, combinedDamping);
}
struct StridePatch
{
PxU8 startIndex;
PxU8 endIndex;
PxU8 nextIndex;
PxU8 totalCount;
bool isRoot;
};
PxU32 physx::writeCompressedContact(const PxContactPoint* const PX_RESTRICT contactPoints, const PxU32 numContactPoints, PxcNpThreadContext* threadContext,
PxU16& writtenContactCount, PxU8*& outContactPatches, PxU8*& outContactPoints, PxU16& compressedContactSize, PxReal*& outContactForces, PxU32 contactForceByteSize,
PxU8*& outFrictionPatches, PxcDataStreamPool* frictionPatchesStreamPool,
const PxsMaterialManager* materialManager, bool hasModifiableContacts, bool forceNoResponse, const PxsMaterialInfo* PX_RESTRICT pMaterial, PxU8& numPatches,
PxU32 additionalHeaderSize, PxsConstraintBlockManager* manager, PxcConstraintBlockStream* blockStream, bool insertAveragePoint,
PxcDataStreamPool* contactStreamPool, PxcDataStreamPool* patchStreamPool, PxcDataStreamPool* forceStreamPool, const bool isMeshType)
{
if(numContactPoints == 0)
{
writtenContactCount = 0;
outContactPatches = NULL;
outContactPoints = NULL;
outContactForces = NULL;
compressedContactSize = 0;
numPatches = 0;
outFrictionPatches = NULL;
return 0;
}
//Calculate the size of the contact buffer...
PX_ALLOCA(strPatches, StridePatch, numContactPoints);
StridePatch* stridePatches = &strPatches[0];
PxU32 numStrideHeaders = 1;
PxU32 totalUniquePatches = 1;
PxU32 totalContactPoints = numContactPoints;
PxU32 strideStart = 0;
bool root = true;
StridePatch* parentRootPatch = NULL;
{
const PxReal closeNormalThresh = PXC_SAME_NORMAL;
//Go through and tag how many patches we have...
PxVec3 normal = contactPoints[0].normal;
PxU16 mat0 = pMaterial[0].mMaterialIndex0;
PxU16 mat1 = pMaterial[0].mMaterialIndex1;
for(PxU32 a = 1; a < numContactPoints; ++a)
{
if(normal.dot(contactPoints[a].normal) < closeNormalThresh ||
pMaterial[a].mMaterialIndex0 != mat0 || pMaterial[a].mMaterialIndex1 != mat1)
{
StridePatch& patch = stridePatches[numStrideHeaders-1];
patch.startIndex = PxU8(strideStart);
patch.endIndex = PxU8(a);
patch.nextIndex = 0xFF;
patch.totalCount = PxU8(a - strideStart);
patch.isRoot = root;
if(parentRootPatch)
parentRootPatch->totalCount += PxU8(a - strideStart);
root = true;
parentRootPatch = NULL;
for(PxU32 b = 1; b < numStrideHeaders; ++b)
{
StridePatch& thisPatch = stridePatches[b-1];
if(thisPatch.isRoot)
{
PxU32 ind = thisPatch.startIndex;
PxReal dp2 = contactPoints[a].normal.dot(contactPoints[ind].normal);
if(dp2 >= closeNormalThresh && pMaterial[a].mMaterialIndex0 == pMaterial[ind].mMaterialIndex0 &&
pMaterial[a].mMaterialIndex1 == pMaterial[ind].mMaterialIndex1)
{
PxU32 nextInd = b-1;
while(stridePatches[nextInd].nextIndex != 0xFF)
nextInd = stridePatches[nextInd].nextIndex;
stridePatches[nextInd].nextIndex = PxU8(numStrideHeaders);
root = false;
parentRootPatch = &stridePatches[b-1];
break;
}
}
}
normal = contactPoints[a].normal;
mat0 = pMaterial[a].mMaterialIndex0;
mat1 = pMaterial[a].mMaterialIndex1;
totalContactPoints = insertAveragePoint && (a - strideStart) > 1 ? totalContactPoints + 1 : totalContactPoints;
strideStart = a;
numStrideHeaders++;
if(root)
totalUniquePatches++;
}
}
totalContactPoints = insertAveragePoint &&(numContactPoints - strideStart) > 1 ? totalContactPoints + 1 : totalContactPoints;
contactForceByteSize = insertAveragePoint && contactForceByteSize != 0 ? contactForceByteSize + sizeof(PxF32) * (totalContactPoints - numContactPoints) : contactForceByteSize;
}
{
StridePatch& patch = stridePatches[numStrideHeaders-1];
patch.startIndex = PxU8(strideStart);
patch.endIndex = PxU8(numContactPoints);
patch.nextIndex = 0xFF;
patch.totalCount = PxU8(numContactPoints - strideStart);
patch.isRoot = root;
if(parentRootPatch)
parentRootPatch->totalCount += PxU8(numContactPoints - strideStart);
}
numPatches = PxU8(totalUniquePatches);
//Calculate the number of patches/points required
const bool isModifiable = !forceNoResponse && hasModifiableContacts;
const PxU32 patchHeaderSize = sizeof(PxContactPatch) * (isModifiable ? totalContactPoints : totalUniquePatches) + additionalHeaderSize;
const PxU32 pointSize = totalContactPoints * (isModifiable ? sizeof(PxModifiableContact) : sizeof(PxContact));
const PxU32 requiredContactSize = pointSize;
const PxU32 requiredPatchSize = patchHeaderSize;
PxU32 totalRequiredSize;
PxU8* PX_RESTRICT contactData = NULL;
PxU8* PX_RESTRICT patchData = NULL;
PxReal* PX_RESTRICT forceData = NULL;
PxU32* PX_RESTRICT triangleIndice = NULL;
// Calculate friction data size
const PxU32 frictionPatchesSize = numPatches * sizeof(PxFrictionPatch);
PxU8* PX_RESTRICT frictionPatchesData = NULL;
if(contactStreamPool && !isModifiable && additionalHeaderSize == 0) //If the contacts are modifiable, we **DON'T** allocate them in GPU pinned memory. This will be handled later when they're modified
{
bool isOverflown = false;
PxU32 contactIndex = PxU32(PxAtomicAdd(&contactStreamPool->mSharedDataIndex, PxI32(requiredContactSize)));
if (contactStreamPool->isOverflown())
{
PX_WARN_ONCE("Contact buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
contactData = contactStreamPool->mDataStream + contactStreamPool->mDataStreamSize - contactIndex;
const PxU32 patchIndex = PxU32(PxAtomicAdd(&patchStreamPool->mSharedDataIndex, PxI32(requiredPatchSize)));
if (patchStreamPool->isOverflown())
{
PX_WARN_ONCE("Patch buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
patchData = patchStreamPool->mDataStream + patchStreamPool->mDataStreamSize - patchIndex;
PxU32 frictionPatchesIndex = PxTo32(PxAtomicAdd(&frictionPatchesStreamPool->mSharedDataIndex, PxI32(frictionPatchesSize)));
if (frictionPatchesStreamPool->isOverflown())
{
PX_WARN_ONCE("Friction patch buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
frictionPatchesData = frictionPatchesStreamPool->mDataStream + frictionPatchesStreamPool->mDataStreamSize - frictionPatchesIndex;
if(contactForceByteSize)
{
contactForceByteSize = isMeshType ? contactForceByteSize * 2 : contactForceByteSize;
contactIndex = PxU32(PxAtomicAdd(&forceStreamPool->mSharedDataIndex, PxI32(contactForceByteSize)));
if (forceStreamPool->isOverflown())
{
PX_WARN_ONCE("Force buffer overflow detected, please increase its size in the scene desc!\n");
isOverflown = true;
}
forceData = reinterpret_cast<PxReal*>(forceStreamPool->mDataStream + forceStreamPool->mDataStreamSize - contactIndex);
if (isMeshType)
triangleIndice = reinterpret_cast<PxU32*>(forceData + numContactPoints);
}
totalRequiredSize = requiredContactSize + requiredPatchSize;
if (isOverflown)
{
patchData = NULL;
contactData = NULL;
forceData = NULL;
triangleIndice = NULL;
}
}
else
{
const PxU32 alignedRequiredSize = computeAlignedSize(requiredContactSize + requiredPatchSize);
contactForceByteSize = (isMeshType ? contactForceByteSize * 2 : contactForceByteSize);
const PxU32 totalSize = alignedRequiredSize + contactForceByteSize + frictionPatchesSize;
PxU8* data = manager ? blockStream->reserve(totalSize, *manager) : threadContext->mContactBlockStream.reserve(totalSize);
if(data)
{
patchData = data;
contactData = data + requiredPatchSize;
if(contactForceByteSize)
{
forceData = reinterpret_cast<PxReal*>((data + alignedRequiredSize));
if (isMeshType)
triangleIndice = reinterpret_cast<PxU32*>(forceData + numContactPoints);
PxMemZero(forceData, contactForceByteSize);
if (frictionPatchesSize)
{
frictionPatchesData = data + alignedRequiredSize + contactForceByteSize;
PxMemZero(frictionPatchesData, frictionPatchesSize);
}
}
}
totalRequiredSize = alignedRequiredSize;
}
if(patchData == NULL)
{
writtenContactCount = 0;
outContactPatches = NULL;
outContactPoints = NULL;
outContactForces = NULL;
compressedContactSize = 0;
numPatches = 0;
outFrictionPatches = NULL;
return 0;
}
PxPrefetchLine(patchData);
PxPrefetchLine(contactData);
#if PX_ENABLE_SIM_STATS
if(threadContext)
threadContext->mCompressedCacheSize += totalRequiredSize;
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
compressedContactSize = PxTo16(totalRequiredSize);
//PxU32 startIndex = 0;
//Extract first material
PxU16 origMatIndex0 = pMaterial[0].mMaterialIndex0;
PxU16 origMatIndex1 = pMaterial[0].mMaterialIndex1;
PxReal staticFriction, dynamicFriction, combinedRestitution, combinedDamping;
PxU32 materialFlags;
combineMaterials(materialManager, origMatIndex0, origMatIndex1, staticFriction, dynamicFriction, combinedRestitution, materialFlags, combinedDamping);
PxU8* PX_RESTRICT dataPlusOffset = patchData + additionalHeaderSize;
PxContactPatch* PX_RESTRICT patches = reinterpret_cast<PxContactPatch*>(dataPlusOffset);
PxU32* PX_RESTRICT faceIndice = triangleIndice;
outContactPatches = patchData;
outContactPoints = contactData;
outContactForces = forceData;
outFrictionPatches = frictionPatchesData;
struct Local
{
static PX_FORCE_INLINE void fillPatch(PxContactPatch* PX_RESTRICT patch, const StridePatch& rootPatch, const PxVec3& normal,
PxU32 currentIndex, PxReal staticFriction_, PxReal dynamicFriction_, PxReal combinedRestitution_, PxReal combinedDamping_,
PxU32 materialFlags_, PxU32 flags, PxU16 matIndex0, PxU16 matIndex1
)
{
patch->mMassModification.linear0 = 1.0f;
patch->mMassModification.linear1 = 1.0f;
patch->mMassModification.angular0 = 1.0f;
patch->mMassModification.angular1 = 1.0f;
PX_ASSERT(PxAbs(normal.magnitude() - 1) < 1e-3f);
patch->normal = normal;
patch->restitution = combinedRestitution_;
patch->dynamicFriction = dynamicFriction_;
patch->staticFriction = staticFriction_;
patch->damping = combinedDamping_;
patch->startContactIndex = PxTo16(currentIndex);
//KS - we could probably compress this further into the header but the complexity might not be worth it
patch->nbContacts = rootPatch.totalCount;
patch->materialFlags = PxU8(materialFlags_);
patch->internalFlags = PxU8(flags);
patch->materialIndex0 = matIndex0;
patch->materialIndex1 = matIndex1;
}
};
if(isModifiable)
{
PxU32 flags = PxU32(isModifiable ? PxContactPatch::eMODIFIABLE : 0) |
(forceNoResponse ? PxContactPatch::eFORCE_NO_RESPONSE : 0) |
(isMeshType ? PxContactPatch::eHAS_FACE_INDICES : 0);
PxU32 currentIndex = 0;
PxModifiableContact* PX_RESTRICT point = reinterpret_cast<PxModifiableContact*>(contactData);
for(PxU32 a = 0; a < numStrideHeaders; ++a)
{
StridePatch& rootPatch = stridePatches[a];
if(rootPatch.isRoot)
{
const PxU32 startIndex = rootPatch.startIndex;
const PxU16 matIndex0 = pMaterial[startIndex].mMaterialIndex0;
const PxU16 matIndex1 = pMaterial[startIndex].mMaterialIndex1;
if(matIndex0 != origMatIndex0 || matIndex1 != origMatIndex1)
{
combineMaterials(materialManager, matIndex0, matIndex1, staticFriction, dynamicFriction, combinedRestitution, materialFlags, combinedDamping);
origMatIndex0 = matIndex0;
origMatIndex1 = matIndex1;
}
PxContactPatch* PX_RESTRICT patch = patches++;
Local::fillPatch(patch, rootPatch, contactPoints[startIndex].normal, currentIndex, staticFriction, dynamicFriction, combinedRestitution, combinedDamping, materialFlags, flags, matIndex0, matIndex1);
//const PxU32 endIndex = strideHeader[a];
const PxU32 totalCountThisPatch = rootPatch.totalCount;
if(insertAveragePoint && totalCountThisPatch > 1)
{
PxVec3 avgPt(0.0f);
PxF32 avgPen(0.0f);
PxF32 recipCount = 1.0f/(PxF32(rootPatch.totalCount));
PxU32 index = a;
while(index != 0xFF)
{
StridePatch& p = stridePatches[index];
for(PxU32 b = p.startIndex; b < p.endIndex; ++b)
{
avgPt += contactPoints[b].point;
avgPen += contactPoints[b].separation;
}
index = p.nextIndex;
}
if (faceIndice)
{
StridePatch& p = stridePatches[index];
*faceIndice = contactPoints[p.startIndex].internalFaceIndex1;
faceIndice++;
}
patch->nbContacts++;
point->contact = avgPt * recipCount;
point->separation = avgPen * recipCount;
point->normal = contactPoints[startIndex].normal;
point->maxImpulse = PX_MAX_REAL;
point->targetVelocity = PxVec3(0.0f);
point->staticFriction = staticFriction;
point->dynamicFriction = dynamicFriction;
point->restitution = combinedRestitution;
point->materialFlags = materialFlags;
point->materialIndex0 = matIndex0;
point->materialIndex1 = matIndex1;
point++;
currentIndex++;
PxPrefetchLine(point, 128);
}
PxU32 index = a;
while(index != 0xFF)
{
StridePatch& p = stridePatches[index];
for(PxU32 b = p.startIndex; b < p.endIndex; ++b)
{
copyContactPoint(point, &contactPoints[b]);
point->normal = contactPoints[b].normal;
point->maxImpulse = PX_MAX_REAL;
point->targetVelocity = PxVec3(0.0f);
point->staticFriction = staticFriction;
point->dynamicFriction = dynamicFriction;
point->restitution = combinedRestitution;
point->materialFlags = materialFlags;
point->materialIndex0 = matIndex0;
point->materialIndex1 = matIndex1;
if (faceIndice)
{
*faceIndice = contactPoints[b].internalFaceIndex1;
faceIndice++;
}
point++;
currentIndex++;
PxPrefetchLine(point, 128);
}
index = p.nextIndex;
}
}
}
}
else
{
PxU32 flags = PxU32(isMeshType ? PxContactPatch::eHAS_FACE_INDICES : 0);
PxContact* PX_RESTRICT point = reinterpret_cast<PxContact*>(contactData);
PxU32 currentIndex = 0;
{
for(PxU32 a = 0; a < numStrideHeaders; ++a)
{
StridePatch& rootPatch = stridePatches[a];
if(rootPatch.isRoot)
{
const PxU32 startIndex = rootPatch.startIndex;
const PxU16 matIndex0 = pMaterial[startIndex].mMaterialIndex0;
const PxU16 matIndex1 = pMaterial[startIndex].mMaterialIndex1;
if(matIndex0 != origMatIndex0 || matIndex1 != origMatIndex1)
{
combineMaterials(materialManager, matIndex0, matIndex1, staticFriction, dynamicFriction, combinedRestitution, materialFlags, combinedDamping);
origMatIndex0 = matIndex0;
origMatIndex1 = matIndex1;
}
PxContactPatch* PX_RESTRICT patch = patches++;
Local::fillPatch(patch, rootPatch, contactPoints[startIndex].normal, currentIndex, staticFriction, dynamicFriction, combinedRestitution, combinedDamping, materialFlags, flags, matIndex0, matIndex1);
if(insertAveragePoint && (rootPatch.totalCount) > 1)
{
patch->nbContacts++;
PxVec3 avgPt(0.0f);
PxF32 avgPen(0.0f);
PxF32 recipCount = 1.0f/(PxF32(rootPatch.totalCount));
PxU32 index = a;
while(index != 0xFF)
{
StridePatch& p = stridePatches[index];
for(PxU32 b = p.startIndex; b < p.endIndex; ++b)
{
avgPt += contactPoints[b].point;
avgPen += contactPoints[b].separation;
}
index = stridePatches[index].nextIndex;
}
if (faceIndice)
{
StridePatch& p = stridePatches[index];
*faceIndice = contactPoints[p.startIndex].internalFaceIndex1;
faceIndice++;
}
point->contact = avgPt * recipCount;
point->separation = avgPen * recipCount;
point++;
currentIndex++;
PxPrefetchLine(point, 128);
}
PxU32 index = a;
while(index != 0xFF)
{
StridePatch& p = stridePatches[index];
for(PxU32 b = p.startIndex; b < p.endIndex; ++b)
{
copyContactPoint(point, &contactPoints[b]);
if (faceIndice)
{
*faceIndice = contactPoints[b].internalFaceIndex1;
faceIndice++;
}
point++;
currentIndex++;
PxPrefetchLine(point, 128);
}
index = stridePatches[index].nextIndex;
}
}
}
}
}
writtenContactCount = PxTo16(totalContactPoints);
return totalRequiredSize;
}

View File

@@ -0,0 +1,346 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxPreprocessor.h"
#include "foundation/PxMath.h"
#include "PxcNpMemBlockPool.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxInlineArray.h"
#include "PxcScratchAllocator.h"
using namespace physx;
PxcNpMemBlockPool::PxcNpMemBlockPool(PxcScratchAllocator& allocator) :
mConstraints("PxcNpMemBlockPool::mConstraints"),
mExceptionalConstraints("PxcNpMemBlockPool::mExceptionalConstraints"),
mNpCacheActiveStream(0),
mFrictionActiveStream(0),
mCCDCacheActiveStream(0),
mContactIndex(0),
mAllocatedBlocks(0),
mMaxBlocks(0),
mUsedBlocks(0),
mMaxUsedBlocks(0),
mScratchBlockAddr(0),
mNbScratchBlocks(0),
mScratchAllocator(allocator),
mPeakConstraintAllocations(0),
mConstraintAllocations(0)
{
}
void PxcNpMemBlockPool::init(PxU32 initialBlockCount, PxU32 maxBlocks)
{
mMaxBlocks = maxBlocks;
mInitialBlocks = initialBlockCount;
PxU32 reserve = PxMax<PxU32>(initialBlockCount, 64);
mConstraints.reserve(reserve);
mExceptionalConstraints.reserve(16);
mFriction[0].reserve(reserve);
mFriction[1].reserve(reserve);
mNpCache[0].reserve(reserve);
mNpCache[1].reserve(reserve);
mUnused.reserve(reserve);
setBlockCount(initialBlockCount);
}
PxU32 PxcNpMemBlockPool::getUsedBlockCount() const
{
return mUsedBlocks;
}
PxU32 PxcNpMemBlockPool::getMaxUsedBlockCount() const
{
return mMaxUsedBlocks;
}
PxU32 PxcNpMemBlockPool::getPeakConstraintBlockCount() const
{
return mPeakConstraintAllocations;
}
void PxcNpMemBlockPool::setBlockCount(PxU32 blockCount)
{
PxMutex::ScopedLock lock(mLock);
PxU32 current = getUsedBlockCount();
for(PxU32 i=current;i<blockCount;i++)
{
mUnused.pushBack(reinterpret_cast<PxcNpMemBlock *>(PX_ALLOC(PxcNpMemBlock::SIZE, "PxcNpMemBlock")));
mAllocatedBlocks++;
}
}
void PxcNpMemBlockPool::releaseUnusedBlocks()
{
PxMutex::ScopedLock lock(mLock);
while(mUnused.size())
{
PxcNpMemBlock* ptr = mUnused.popBack();
PX_FREE(ptr);
mAllocatedBlocks--;
}
}
PxcNpMemBlockPool::~PxcNpMemBlockPool()
{
// swapping twice guarantees all blocks are released from the stream pairs
swapFrictionStreams();
swapFrictionStreams();
swapNpCacheStreams();
swapNpCacheStreams();
releaseConstraintMemory();
releaseContacts();
releaseContacts();
PX_ASSERT(mUsedBlocks == 0);
flushUnused();
}
void PxcNpMemBlockPool::acquireConstraintMemory()
{
PxU32 size;
void* addr = mScratchAllocator.allocAll(size);
size = size&~(PxcNpMemBlock::SIZE-1);
PX_ASSERT(mScratchBlocks.size()==0);
mScratchBlockAddr = reinterpret_cast<PxcNpMemBlock*>(addr);
mNbScratchBlocks = size/PxcNpMemBlock::SIZE;
mScratchBlocks.resize(mNbScratchBlocks);
for(PxU32 i=0;i<mNbScratchBlocks;i++)
mScratchBlocks[i] = mScratchBlockAddr+i;
}
void PxcNpMemBlockPool::releaseConstraintMemory()
{
PxMutex::ScopedLock lock(mLock);
mPeakConstraintAllocations = mConstraintAllocations = 0;
while(mConstraints.size())
{
PxcNpMemBlock* block = mConstraints.popBack();
if(mScratchAllocator.isScratchAddr(block))
mScratchBlocks.pushBack(block);
else
{
mUnused.pushBack(block);
PX_ASSERT(mUsedBlocks>0);
mUsedBlocks--;
}
}
for(PxU32 i=0;i<mExceptionalConstraints.size();i++)
PX_FREE(mExceptionalConstraints[i]);
mExceptionalConstraints.clear();
PX_ASSERT(mScratchBlocks.size()==mNbScratchBlocks); // check we released them all
mScratchBlocks.clear();
if(mScratchBlockAddr)
{
mScratchAllocator.free(mScratchBlockAddr);
mScratchBlockAddr = 0;
mNbScratchBlocks = 0;
}
}
PxcNpMemBlock* PxcNpMemBlockPool::acquire(PxcNpMemBlockArray& trackingArray, PxU32* allocationCount, PxU32* peakAllocationCount, bool isScratchAllocation)
{
PxMutex::ScopedLock lock(mLock);
if(allocationCount && peakAllocationCount)
{
*peakAllocationCount = PxMax(*allocationCount + 1, *peakAllocationCount);
(*allocationCount)++;
}
// this is a bit of hack - the logic would be better placed in acquireConstraintBlock, but then we'd have to grab the mutex
// once there to check the scratch block array and once here if we fail - or, we'd need a larger refactor to separate out
// locking and acquisition.
if(isScratchAllocation && mScratchBlocks.size()>0)
{
PxcNpMemBlock* block = mScratchBlocks.popBack();
trackingArray.pushBack(block);
return block;
}
if(mUnused.size())
{
PxcNpMemBlock* block = mUnused.popBack();
trackingArray.pushBack(block);
mMaxUsedBlocks = PxMax<PxU32>(mUsedBlocks+1, mMaxUsedBlocks);
mUsedBlocks++;
return block;
}
if(mAllocatedBlocks == mMaxBlocks)
{
#if PX_CHECKED
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL,
"Reached maximum number of allocated blocks so 16k block allocation will fail!");
#endif
return NULL;
}
#if PX_CHECKED
if(mInitialBlocks)
{
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL,
"Number of required 16k memory blocks has exceeded the initial number of blocks. Allocator is being called. Consider increasing the number of pre-allocated 16k blocks.");
}
#endif
// increment here so that if we hit the limit in separate threads we won't overallocated
mAllocatedBlocks++;
PxcNpMemBlock* block = reinterpret_cast<PxcNpMemBlock*>(PX_ALLOC(sizeof(PxcNpMemBlock), "PxcNpMemBlock"));
if(block)
{
trackingArray.pushBack(block);
mMaxUsedBlocks = PxMax<PxU32>(mUsedBlocks+1, mMaxUsedBlocks);
mUsedBlocks++;
}
else
mAllocatedBlocks--;
return block;
}
PxU8* PxcNpMemBlockPool::acquireExceptionalConstraintMemory(PxU32 size)
{
PxU8* memory = reinterpret_cast<PxU8*>(PX_ALLOC(size, "PxcNpExceptionalMemory"));
if(memory)
{
PxMutex::ScopedLock lock(mLock);
mExceptionalConstraints.pushBack(memory);
}
return memory;
}
void PxcNpMemBlockPool::release(PxcNpMemBlockArray& deadArray, PxU32* allocationCount)
{
PxMutex::ScopedLock lock(mLock);
PX_ASSERT(mUsedBlocks >= deadArray.size());
mUsedBlocks -= deadArray.size();
if(allocationCount)
{
*allocationCount -= deadArray.size();
}
while(deadArray.size())
{
PxcNpMemBlock* block = deadArray.popBack();
for(PxU32 a = 0; a < mUnused.size(); ++a)
{
PX_ASSERT(mUnused[a] != block);
}
mUnused.pushBack(block);
}
}
void PxcNpMemBlockPool::flushUnused()
{
while(mUnused.size())
{
PxcNpMemBlock* ptr = mUnused.popBack();
PX_FREE(ptr);
}
}
PxcNpMemBlock* PxcNpMemBlockPool::acquireConstraintBlock()
{
// we track the scratch blocks in the constraint block array, because the code in acquireMultipleConstraintBlocks
// assumes that acquired blocks are listed there.
return acquire(mConstraints);
}
PxcNpMemBlock* PxcNpMemBlockPool::acquireConstraintBlock(PxcNpMemBlockArray& memBlocks)
{
return acquire(memBlocks, &mConstraintAllocations, &mPeakConstraintAllocations, true);
}
PxcNpMemBlock* PxcNpMemBlockPool::acquireContactBlock()
{
return acquire(mContacts[mContactIndex], NULL, NULL, true);
}
void PxcNpMemBlockPool::releaseConstraintBlocks(PxcNpMemBlockArray& memBlocks)
{
PxMutex::ScopedLock lock(mLock);
while(memBlocks.size())
{
PxcNpMemBlock* block = memBlocks.popBack();
if(mScratchAllocator.isScratchAddr(block))
mScratchBlocks.pushBack(block);
else
{
mUnused.pushBack(block);
PX_ASSERT(mUsedBlocks>0);
mUsedBlocks--;
}
}
}
void PxcNpMemBlockPool::releaseContacts()
{
//releaseConstraintBlocks(mContacts);
release(mContacts[1-mContactIndex]);
mContactIndex = 1-mContactIndex;
}
PxcNpMemBlock* PxcNpMemBlockPool::acquireFrictionBlock()
{
return acquire(mFriction[mFrictionActiveStream]);
}
void PxcNpMemBlockPool::swapFrictionStreams()
{
release(mFriction[1-mFrictionActiveStream]);
mFrictionActiveStream = 1-mFrictionActiveStream;
}
PxcNpMemBlock* PxcNpMemBlockPool::acquireNpCacheBlock()
{
return acquire(mNpCache[mNpCacheActiveStream]);
}
void PxcNpMemBlockPool::swapNpCacheStreams()
{
release(mNpCache[1-mNpCacheActiveStream]);
mNpCacheActiveStream = 1-mNpCacheActiveStream;
}

View File

@@ -0,0 +1,91 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxcConstraintBlockStream.h"
#include "PxcNpThreadContext.h"
using namespace physx;
PxcNpThreadContext::PxcNpThreadContext(PxcNpContext* params) :
mRenderOutput (params->mRenderBuffer),
mContactBlockStream (params->mNpMemBlockPool),
mNpCacheStreamPair (params->mNpMemBlockPool),
mNarrowPhaseParams (0.0f, params->mMeshContactMargin, params->mToleranceLength),
mPCM (false),
mContactCache (false),
mCreateAveragePoint (false),
#if PX_ENABLE_SIM_STATS
mCompressedCacheSize (0),
mNbDiscreteContactPairsWithCacheHits(0),
mNbDiscreteContactPairsWithContacts (0),
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
mMaxPatches (0),
mContactStreamPool (params->mContactStreamPool),
mPatchStreamPool (params->mPatchStreamPool),
mForceAndIndiceStreamPool (params->mForceAndIndiceStreamPool),
mFrictionPatchStreamPool (params->mFrictionPatchStreamPool),
mMaterialManager (params->mMaterialManager),
mLocalNewTouchCount (0),
mLocalLostTouchCount (0)
{
#if PX_ENABLE_SIM_STATS
clearStats();
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
}
PxcNpThreadContext::~PxcNpThreadContext()
{
}
#if PX_ENABLE_SIM_STATS
void PxcNpThreadContext::clearStats()
{
PxMemSet(mDiscreteContactPairs, 0, sizeof(mDiscreteContactPairs));
PxMemSet(mModifiedContactPairs, 0, sizeof(mModifiedContactPairs));
mCompressedCacheSize = 0;
mNbDiscreteContactPairsWithCacheHits = 0;
mNbDiscreteContactPairsWithContacts = 0;
}
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
void PxcNpThreadContext::reset(PxU32 cmCount)
{
mContactBlockStream.reset();
mNpCacheStreamPair.reset();
mLocalChangeTouch.clear();
mLocalChangeTouch.resize(cmCount);
mLocalNewTouchCount = 0;
mLocalLostTouchCount = 0;
}

View File

@@ -0,0 +1,588 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGeometry.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxUserAllocated.h"
#include "GuCCDSweepConvexMesh.h"
#ifndef PXS_CCD_H
#define PXS_CCD_H
#define CCD_DEBUG_PRINTS 0
#define CCD_POST_DEPENETRATE_DIST 0.001f
#define CCD_ROTATION_LOCKING 0
#define CCD_MIN_TIME_LEFT 0.01f
#define DEBUG_RENDER_CCD 0
#if CCD_DEBUG_PRINTS
namespace physx {
extern void printCCDDebug(const char* msg, const PxsRigidBody* atom0, PxGeometryType::Enum g0, bool printPtr = true);
extern void printShape(PxsRigidBody* atom0, PxGeometryType::Enum g0, const char* annotation, PxReal dt, PxU32 pass, bool printPtr = true);
}
#define PRINTCCDSHAPE(x) printShape x
#define PRINTCCDDEBUG(x) printCCDDebug x
#else
#define PRINTCCDSHAPE(x)
#define PRINTCCDDEBUG(x)
#endif
namespace physx
{
float computeCCDThreshold(const PxGeometry& geometry);
// ------------------------------------------------------------------------------------------------------------
// a fraction of objects will be CCD active so this is dynamic, not a member of PsxRigidBody
// CCD code builds a temporary array of PxsCCDPair objects (allocated in blocks)
// this is done to gather scattered data from memory and also to reduce PxsRidigBody permanent memory footprint
// we have to do it every pass since new CMs can become fast moving after each pass (and sometimes cease to be)
//
struct PxsCCDBody;
class PxsRigidBody;
struct PxsShapeCore;
struct PxsRigidCore;
class PxsContactManager;
class PxsContext;
class PxCCDContactModifyCallback;
class PxcNpThreadContext;
class PxvNphaseImplementationContext;
namespace Dy
{
class ThresholdStream;
}
/**
\brief structure to represent interactions between a given body and another body.
*/
struct PxsCCDOverlap
{
//The body the interaction relates to
PxsCCDBody* mBody;
//The next interaction in the list
PxsCCDOverlap* mNext;
};
/**
\brief Temporary CCD representation for a shape.
Stores data about a shape that may be frequently used in CCD. It also stores update counters per-shape that can be compared with the body's update
counter to determine if the shape needs its transforms re-calculated. This avoids us needing to store a list of shapes in a CCD body.
*/
struct PxsCCDShape : public Gu::CCDShape
{
const PxsShapeCore* mShapeCore; //Shape core (can be shared)
const PxsRigidCore* mRigidCore; //Rigid body core
PxNodeIndex mNodeIndex;
};
/**
\brief Structure to represent a body in the CCD system.
*/
struct PxsCCDBody
{
Cm::SpatialVector mPreSolverVelocity;
PxU16 mIndex; //The CCD body's index
bool mPassDone; //Whether it has been processed in the current CCD pass
bool mHasAnyPassDone; //Whether this body was influenced by any passes
PxReal mTimeLeft; //CCD time left to elapse (normalized in range 0-1)
PxsRigidBody* mBody; //The rigid body
PxsCCDOverlap* mOverlappingObjects; //A list of overlapping bodies for island update
PxU32 mUpdateCount; //How many times this body has eben updated in the CCD. This is correlated with CCD shapes' update counts.
PxU32 mNbInteractionsThisPass; //How many interactions this pass
/**
\brief Returns the CCD body's index.
\return The CCD body's index.
*/
PX_FORCE_INLINE PxU32 getIndex() const { return mIndex; }
/**
\brief Tests whether this body has already registered an overlap with a given body.
\param[in] body The body to test against.
\return Whether this body has already registered an overlap with a given body.
*/
bool overlaps(PxsCCDBody* body) const
{
PxsCCDOverlap* overlaps = mOverlappingObjects;
while(overlaps)
{
if(overlaps->mBody == body)
return true;
overlaps = overlaps->mNext;
}
return false;
}
/**
\brief Registers an overlap with a given body
\param[in] overlap The CCD overlap to register.
*/
void addOverlap(PxsCCDOverlap* overlap)
{
overlap->mNext = mOverlappingObjects;
mOverlappingObjects = overlap;
}
};
/**
\brief a container class used in the CCD that minimizes frequency of hitting the allocator.
This class stores a set of blocks of memory. It is effectively an array that resizes more efficiently because it doesn't need to
reallocate an entire buffer and copy data.
*/
template<typename T, int BLOCK_SIZE>
struct PxsCCDBlockArray
{
/**
\brief A block of data
*/
struct Block : PxUserAllocated { T items[BLOCK_SIZE]; };
/**
\brief A header for a block of data.
*/
struct BlockInfo
{
Block* block;
PxU32 count; // number of elements in this block
BlockInfo(Block* aBlock, PxU32 aCount) : block(aBlock), count(aCount) {}
};
/*
\brief An array of block headers
*/
PxArray<BlockInfo> blocks;
/**
\brief The current block.
*/
PxU32 currentBlock;
/**
\brief Constructor
*/
PxsCCDBlockArray() : currentBlock(0)
{
blocks.pushBack(BlockInfo(PX_NEW(Block), 0));
}
/**
\brief Destructor
*/
~PxsCCDBlockArray()
{
for (PxU32 i = 0; i < blocks.size(); i++)
{
PX_DELETE(blocks[i].block);
}
currentBlock = 0;
}
/**
\brief Clears this block array.
\note This clear function also deletes all additional blocks
*/
void clear()
{
for (PxU32 i = 0; i < blocks.size(); i++)
{
PX_DELETE(blocks[i].block);
}
blocks.clear();
blocks.pushBack(BlockInfo(PX_NEW(Block), 0)); // at least one block is expected to always be present in the array
currentBlock = 0;
}
/**
\brief Clears this block array but does not release the memory.
*/
void clear_NoDelete()
{
currentBlock = 0;
blocks[0].count = 0;
}
/**
\brief Push a new element onto the back of the block array
\return The new element
*/
T& pushBack()
{
PxU32 numBlocks = blocks.size();
if (blocks[currentBlock].count == BLOCK_SIZE)
{
if((currentBlock + 1) == numBlocks)
{
blocks.pushBack(BlockInfo(PX_NEW(Block), 0));
numBlocks ++;
}
currentBlock++;
blocks[currentBlock].count = 0;
}
const PxU32 count = blocks[currentBlock].count ++;
return blocks[currentBlock].block->items[count];
}
/**
\brief Pushes a new element onto the back of this array, intitializing it to match the data
\param data The data to initialize the new element to
\return The new element
*/
T& pushBack(T& data)
{
PxU32 numBlocks = blocks.size();
if (blocks[currentBlock].count == BLOCK_SIZE)
{
if((currentBlock + 1) == numBlocks)
{
blocks.pushBack(BlockInfo(PX_NEW(Block), 0));
numBlocks ++;
}
currentBlock++;
blocks[currentBlock].count = 0;
}
const PxU32 count = blocks[currentBlock].count ++;
blocks[currentBlock].block->items[count] = data;
return blocks[currentBlock].block->items[count];
}
/**
\brief Pops the last element from the list.
*/
void popBack()
{
PX_ASSERT(blocks[currentBlock].count > 0);
if (blocks[currentBlock].count > 1)
blocks[currentBlock].count --;
else
{
PX_DELETE(blocks[currentBlock].block);
blocks.popBack();
currentBlock--;
}
}
/**
\brief Returns the current size of the array.
\return The current size of the array.
*/
PxU32 size() const
{
return (currentBlock)*BLOCK_SIZE + blocks[currentBlock].count;
}
/**
\brief Returns the element at a given index in the array
\param[in] index The index of the element in the array
\return The element at a given index in the array.
*/
T& operator[] (PxU32 index) const
{
PX_ASSERT(index/BLOCK_SIZE < blocks.size());
PX_ASSERT(index%BLOCK_SIZE < blocks[index/BLOCK_SIZE].count);
return blocks[index/BLOCK_SIZE].block->items[index%BLOCK_SIZE];
}
};
/**
\brief A structure to represent a potential CCD interaction between a pair of shapes
*/
struct PxsCCDPair
{
/**
\brief Defines whether this is an estimated TOI or an accurate TOI.
We store pairs in a priority queue based on the TOIs. We use cheap estimates to cull away work and lazily evaluate TOIs. This means that an element in the
priority queue may either be an estimate or a precise result.
*/
enum E_TOIType
{
eEstimate,
ePrecise
};
PxsRigidBody* mBa0; // Body A. Can be NULL for statics
PxsRigidBody* mBa1; // Body B. Can be NULL for statics
PxsCCDShape* mCCDShape0; // Shape A
PxsCCDShape* mCCDShape1; // Shape B
PxVec3 mMinToiNormal; // The contact normal. Only valid for precise results. On the surface of body/shape A
PxReal mMinToi; // Min TOI. Valid for both precise and estimated results but estimates may be too early (i.e. conservative).
PxReal mPenetrationPostStep; // Valid only for precise sweeps. Only used for initial intersections (i.e. at TOI = 0).
PxVec3 mMinToiPoint; // The contact point. Only valid for precise sweep results.
PxReal mPenetration; // The penetration. Only valid for precise sweep results.
PxsContactManager* mCm; // The contact manager.
PxU32 mIslandId; // The index of the island this pair is in
PxGeometryType::Enum mG0, mG1; // The geometry types for shapes 0 and 1
bool mIsEarliestToiHit; // Indicates this was the earliest hit for one of the bodies in the pair
bool mIsModifiable; // Indicates whether this contact is modifiable
PxU32 mFaceIndex; // The face index. Only valid for precise sweeps involving meshes or heightfields.
PxU16 mMaterialIndex0; // The material index for shape 0
PxU16 mMaterialIndex1; // The material index for shape 1
PxReal mDynamicFriction; // The dynamic friction coefficient
PxReal mStaticFriction; // The static friction coefficient
PxReal mRestitution; // The restitution coefficient
PxU32 mEstimatePass; // The current estimation pass. Used after a sweep hit was found to determine if the pair needs re-estimating.
PxReal mAppliedForce; // The applied force for this pair. Only valid if the pair has been responded to.
PxReal mMaxImpulse; // The maximum impulse to be applied
E_TOIType mToiType; // The TOI type (estimate, precise).
bool mHasFriction; // Whether we want to simulate CCD friction for this pair
/**
\brief Perform a precise sweep for this pair
\param[in] threadContext The per-thread context
\param[in] dt The time-step
\param[in] pass The current CCD pass
\return The normalized TOI. <=1.0 indicates a hit. Otherwise PX_MAX_REAL.
*/
PxReal sweepFindToi(PxcNpThreadContext& threadContext, PxReal dt, PxU32 pass, PxReal ccdThreshold);
/**
\brief Performs a sweep estimation for this pair
\return The normalized TOI. <= 1.0 indicates a potential hit, otherwise PX_MAX_REAL.
*/
PxReal sweepEstimateToi(PxReal ccdThreshold);
/**
\brief Advances this pair to the TOI
\param[in] dt The time-step
\param[in] clipTrajectoryToToi Indicates whether we clip the body's trajectory to the end pose. Only done in the final pass
\return Whether the advance was successful. An advance will be unsuccessful if body bodies were already updated.
*/
bool sweepAdvanceToToi(PxReal dt, bool clipTrajectoryToToi);
/**
\brief Updates the transforms of the shapes involved in this pair.
*/
void updateShapes();
};
/**
\brief Block array of CCD bodies
*/
typedef PxsCCDBlockArray<PxsCCDBody, 128> PxsCCDBodyArray;
/**
\brief Block array of CCD pairs
*/
typedef PxsCCDBlockArray<PxsCCDPair, 128> PxsCCDPairArray;
/**
\brief Block array of CCD overlaps
*/
typedef PxsCCDBlockArray<PxsCCDOverlap, 128> PxsCCDOverlapArray;
/**
\brief Block array of CCD shapes
*/
typedef PxsCCDBlockArray<PxsCCDShape, 128> PxsCCDShapeArray;
/**
\brief Pair structure to be able to look-up a rigid body-shape pair in a map
*/
typedef PxPair<const PxsRigidCore*, const PxsShapeCore*> PxsRigidShapePair;
/**
\brief CCD context object.
*/
class PxsCCDContext : public PxUserAllocated
{
public:
/**
\brief Constructor for PxsCCDContext
\param[in] context The PxsContext that is associated with this PxsCCDContext.
*/
PxsCCDContext(PxsContext* context, Dy::ThresholdStream& thresholdStream, PxvNphaseImplementationContext& nPhaseContext, PxReal ccdThreshold);
/**
\brief Destructor for PxsCCDContext
*/
~PxsCCDContext();
/**
\brief Returns the CCD contact modification callback
\return The CCD contact modification callback
*/
PX_FORCE_INLINE PxCCDContactModifyCallback* getCCDContactModifyCallback() const { return mCCDContactModifyCallback; }
/**
\brief Sets the CCD contact modification callback
\param[in] c The CCD contact modification callback
*/
PX_FORCE_INLINE void setCCDContactModifyCallback(PxCCDContactModifyCallback* c) { mCCDContactModifyCallback = c; }
/**
\brief Returns the maximum number of CCD passes
\return The maximum number of CCD passes
*/
PX_FORCE_INLINE PxU32 getCCDMaxPasses() const { return mCCDMaxPasses; }
/**
\brief Sets the maximum number of CCD passes
\param[in] ccdMaxPasses The maximum number of CCD passes
*/
PX_FORCE_INLINE void setCCDMaxPasses(PxU32 ccdMaxPasses) { mCCDMaxPasses = ccdMaxPasses; }
/**
\brief Returns the current CCD pass
\return The current CCD pass
*/
PX_FORCE_INLINE PxU32 getCurrentCCDPass() const { return miCCDPass; }
/**
\brief Returns The number of swept hits reported
\return The number of swept hits reported
*/
PX_FORCE_INLINE PxI32 getNumSweepHits() const { return mSweepTotalHits; }
/**
\brief Returns The number of updated bodies
\return The number of updated bodies in this CCD pass
*/
PX_FORCE_INLINE PxU32 getNumUpdatedBodies() const { return mUpdatedCCDBodies.size(); }
/**
\brief Returns The update bodies array
\return The updated bodies array from this CCD pass
*/
PX_FORCE_INLINE PxsRigidBody*const* getUpdatedBodies() const { return mUpdatedCCDBodies.begin(); }
/**
\brief Returns Clears the updated bodies array
*/
PX_FORCE_INLINE void clearUpdatedBodies() { mUpdatedCCDBodies.forceSize_Unsafe(0); }
PX_FORCE_INLINE PxReal getCCDThreshold() const { return mCCDThreshold; }
PX_FORCE_INLINE void setCCDThreshold(PxReal t) { mCCDThreshold = t; }
/**
\brief Runs the CCD contact modification.
\param[in] contacts The list of modifiable contacts
\param[in] contactCount The number of contacts
\param[in] shapeCore0 The first shape core
\param[in] shapeCore1 The second shape core
\param[in] rigidCore0 The first rigid core
\param[in] rigidCore1 The second rigid core
\param[in] rigid0 The first rigid body
\param[in] rigid1 The second rigid body
*/
void runCCDModifiableContact(PxModifiableContact* PX_RESTRICT contacts, PxU32 contactCount, const PxsShapeCore* PX_RESTRICT shapeCore0,
const PxsShapeCore* PX_RESTRICT shapeCore1, const PxsRigidCore* PX_RESTRICT rigidCore0, const PxsRigidCore* PX_RESTRICT rigidCore1,
const PxsRigidBody* PX_RESTRICT rigid0, const PxsRigidBody* PX_RESTRICT rigid1);
/**
\brief Performs a single CCD update
This occurs after broad phase and is responsible for creating islands, finding the TOI of collisions, filtering contacts, issuing modification callbacks and responding to
collisions. At the end of this phase all bodies will have stepper to their first TOI if they were involved in a CCD collision this frame.
\param[in] dt The timestep to simulate
\param[in] continuation The continuation task
\param[in] islandSim The island manager
\param[in] disableResweep If this is true, we perform a reduced-fidelity CCD approach
*/
void updateCCD(PxReal dt, PxBaseTask* continuation, IG::IslandSim& islandSim, bool disableResweep, PxI32 numFastMovingShapes);
/**
\brief Signals the beginning of a CCD multi-pass update
*/
void updateCCDBegin();
/**
\brief Resets the CCD contact state in any contact managers that previously had a reported CCD touch. This must be called if CCD update is bypassed for a frame
*/
void resetContactManagers();
private:
/**
\brief Verifies the consistency of the CCD context at the beginning
*/
void verifyCCDBegin();
/**
\brief Cleans up after the CCD update has completed
*/
void updateCCDEnd();
/**
\brief Spawns the update island tasks after the initial sweep estimates have been performed
\param[in] continuation The continuation task
*/
void postCCDSweep(PxBaseTask* continuation);
/**
\brief Creates contact buffers for CCD contacts. These will be sent to the user in the contact notification.
\param[in] continuation The continuation task
*/
void postCCDAdvance(PxBaseTask* continuation);
/**
\brief The final phase of the CCD task chain. Cleans up after the parallel update/postCCDAdvance stages.
\param[in] continuation The continuation task
*/
void postCCDDepenetrate(PxBaseTask* continuation);
typedef Cm::DelegateTask<PxsCCDContext, &PxsCCDContext::postCCDSweep> PostCCDSweepTask;
typedef Cm::DelegateTask<PxsCCDContext, &PxsCCDContext::postCCDAdvance> PostCCDAdvanceTask;
typedef Cm::DelegateTask<PxsCCDContext, &PxsCCDContext::postCCDDepenetrate> PostCCDDepenetrateTask;
PostCCDSweepTask mPostCCDSweepTask;
PostCCDAdvanceTask mPostCCDAdvanceTask;
PostCCDDepenetrateTask mPostCCDDepenetrateTask;
PxCCDContactModifyCallback* mCCDContactModifyCallback;
// CCD global data
bool mDisableCCDResweep;
PxU32 miCCDPass;
PxI32 mSweepTotalHits;
// a fraction of objects will be CCD active so PxsCCDBody is dynamic, not a member of PxsRigidBody
PxsCCDBodyArray mCCDBodies;
PxsCCDOverlapArray mCCDOverlaps;
PxsCCDShapeArray mCCDShapes;
PxArray<PxsCCDBody*> mIslandBodies;
PxArray<PxU16> mIslandSizes;
PxArray<PxsRigidBody*> mUpdatedCCDBodies;
PxHashMap<PxsRigidShapePair, PxsCCDShape*> mMap;
// temporary array updated during CCD update
//Array<PxsCCDPair> mCCDPairs;
PxsCCDPairArray mCCDPairs;
PxArray<PxsCCDPair*> mCCDPtrPairs;
// number of pairs per island
PxArray<PxU32> mCCDIslandHistogram;
// thread context valid during CCD update
PxcNpThreadContext* mCCDThreadContext;
// number of pairs to process per thread
PxU32 mCCDPairsPerBatch;
PxU32 mCCDMaxPasses;
PxsContext* mContext;
Dy::ThresholdStream& mThresholdStream;
PxvNphaseImplementationContext& mNphaseContext;
PxMutex mMutex;
PxReal mCCDThreshold;
private:
PX_NOCOPY(PxsCCDContext)
};
}
#endif

View File

@@ -0,0 +1,148 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_CONTACT_MANAGER_H
#define PXS_CONTACT_MANAGER_H
#include "PxvConfig.h"
#include "PxcNpWorkUnit.h"
namespace physx
{
class PxsRigidBody;
namespace Dy
{
class DynamicsContext;
}
namespace Sc
{
class ShapeInteraction;
}
/**
\brief Additional header structure for CCD contact data stream.
*/
struct PxsCCDContactHeader
{
/**
\brief Stream for next collision. The same pair can collide multiple times during multiple CCD passes.
*/
PxsCCDContactHeader* nextStream; //4 //8
/**
\brief Size (in bytes) of the CCD contact stream (not including force buffer)
*/
PxU16 contactStreamSize; //6 //10
/**
\brief Defines whether the stream is from a previous pass.
It could happen that the stream can not get allocated because we run out of memory. In that case the current event should not use the stream
from an event of the previous pass.
*/
PxU16 isFromPreviousPass; //8 //12
PxU8 pad[12 - sizeof(PxsCCDContactHeader*)]; //16
};
PX_COMPILE_TIME_ASSERT((sizeof(PxsCCDContactHeader) & 0xF) == 0);
class PxsContactManager
{
public:
PxsContactManager(PxU32 index);
~PxsContactManager();
PX_FORCE_INLINE void setDisableStrongFriction(PxU32 d) { (!d) ? mNpUnit.mFlags &= ~PxcNpWorkUnitFlag::eDISABLE_STRONG_FRICTION
: mNpUnit.mFlags |= PxcNpWorkUnitFlag::eDISABLE_STRONG_FRICTION; }
PX_FORCE_INLINE PxReal getRestDistance() const { return mNpUnit.mRestDistance; }
PX_FORCE_INLINE void setRestDistance(PxReal v) { mNpUnit.mRestDistance = v; }
PX_FORCE_INLINE PxU8 getDominance0() const { return mNpUnit.getDominance0(); }
PX_FORCE_INLINE void setDominance0(PxU8 v) { mNpUnit.setDominance0(v); }
PX_FORCE_INLINE PxU8 getDominance1() const { return mNpUnit.getDominance1(); }
PX_FORCE_INLINE void setDominance1(PxU8 v) { mNpUnit.setDominance1(v); }
PX_FORCE_INLINE PxU16 getTouchStatus() const { return PxU16(mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_TOUCH); }
PX_FORCE_INLINE PxU16 touchStatusKnown() const { return PxU16(mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eTOUCH_KNOWN); }
PX_FORCE_INLINE PxI32 getTouchIdx() const { return (mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_TOUCH) ? 1 : (mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_NO_TOUCH ? -1 : 0); }
PX_FORCE_INLINE PxU32 getIndex() const { return mCmIndex; }
PX_FORCE_INLINE PxU16 getHasCCDRetouch() const { return PxU16(mNpUnit.mStatusFlags & PxcNpWorkUnitStatusFlag::eHAS_CCD_RETOUCH); }
PX_FORCE_INLINE void clearCCDRetouch() { mNpUnit.mStatusFlags &= ~PxcNpWorkUnitStatusFlag::eHAS_CCD_RETOUCH; }
PX_FORCE_INLINE void raiseCCDRetouch() { mNpUnit.mStatusFlags |= PxcNpWorkUnitStatusFlag::eHAS_CCD_RETOUCH; }
// flags stuff - needs to be refactored
PX_FORCE_INLINE PxIntBool isChangeable() const { return PxIntBool(mFlags & PXS_CM_CHANGEABLE); }
PX_FORCE_INLINE PxIntBool getCCD() const { return PxIntBool((mFlags & PXS_CM_CCD_LINEAR) && (mNpUnit.mFlags & PxcNpWorkUnitFlag::eDETECT_CCD_CONTACTS)); }
PX_FORCE_INLINE PxIntBool getHadCCDContact() const { return PxIntBool(mFlags & PXS_CM_CCD_CONTACT); }
PX_FORCE_INLINE void setHadCCDContact() { mFlags |= PXS_CM_CCD_CONTACT; }
void setCCD(bool enable);
PX_FORCE_INLINE void clearCCDContactInfo() { mFlags &= ~PXS_CM_CCD_CONTACT; mNpUnit.mCCDContacts = NULL; }
PX_FORCE_INLINE PxcNpWorkUnit& getWorkUnit() { return mNpUnit; }
PX_FORCE_INLINE const PxcNpWorkUnit& getWorkUnit() const { return mNpUnit; }
PX_FORCE_INLINE PxsRigidBody* getRigidBody0() const { return mRigidBody0; }
PX_FORCE_INLINE PxsRigidBody* getRigidBody1() const { return mRigidBody1; }
PX_FORCE_INLINE Sc::ShapeInteraction* getShapeInteraction() const { return mShapeInteraction; }
// Setup solver-constraints
PX_FORCE_INLINE void resetCachedState()
{
// happens when the body transform or shape relative transform changes.
mNpUnit.clearCachedState();
}
private:
//KS - moving this up - we want to get at flags
PxsRigidBody* mRigidBody0;
PxsRigidBody* mRigidBody1;
PxU32 mFlags;
PxU32 mCmIndex; // PT: moved to padding bytes from mNpUnit
Sc::ShapeInteraction* mShapeInteraction;
// everything required for narrow phase to run
PxcNpWorkUnit mNpUnit;
enum
{
PXS_CM_CHANGEABLE = (1 << 0),
PXS_CM_CCD_LINEAR = (1 << 1),
PXS_CM_CCD_CONTACT = (1 << 2)
};
friend class Sc::ShapeInteraction;
};
}
#endif

View File

@@ -0,0 +1,110 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_CONTACT_MANAGER_STATE_H
#define PXS_CONTACT_MANAGER_STATE_H
#include "foundation/PxSimpleTypes.h"
namespace physx
{
struct PxsShapeCore;
/**
There is an implicit 1:1 mapping between PxgContactManagerInput and PxsContactManagerOutput. The structures are split because PxgNpContactManagerInput contains constant
data that is produced by the CPU code and PxgNpContactManagerOutput contains per-frame contact information produced by the NP.
There is also a 1:1 mapping between the PxgNpContactManager and PxsContactManager. This mapping is handled within the PxgNPhaseCore.
The previous contact states are implicitly cached in PxsContactManager and will be propagated to the solver. Friction correlation is also done implicitly using cached
information in PxsContactManager.
The NP will produce a list of pairs that found/lost patches for the solver along with updating the PxgNpContactManagerOutput for all pairs.
*/
struct PxsContactManagerStatusFlag
{
enum Enum
{
eHAS_NO_TOUCH = (1 << 0),
eHAS_TOUCH = (1 << 1),
//eHAS_SOLVER_CONSTRAINTS = (1 << 2),
eREQUEST_CONSTRAINTS = (1 << 3),
eHAS_CCD_RETOUCH = (1 << 4), // Marks pairs that are touching at a CCD pass and were touching at discrete collision or at a previous CCD pass already
// but we can not tell whether they lost contact in a pass before. We send them as pure eNOTIFY_TOUCH_CCD events to the
// contact report callback if requested.
eDIRTY_MANAGER = (1 << 5),
eTOUCH_KNOWN = eHAS_NO_TOUCH | eHAS_TOUCH, // The touch status is known (if narrowphase never ran for a pair then no flag will be set)
eSTATIC_OR_KINEMATIC = (1 << 6)
};
};
struct PX_ALIGN_PREFIX(16) PxsContactManagerOutput
{
PxU8* contactPatches; //Start index/ptr for contact patches
PxU8* contactPoints; //Start index/ptr for contact points
PxReal* contactForces; //Start index/ptr for contact forces
PxU8* frictionPatches; //Contact patches friction information
PxU8 allflagsStart; //padding for compatibility with existing code
PxU8 nbPatches; //Num patches
PxU8 statusFlag; //Status flag (has touch etc.)
PxU8 prevPatches; //Previous number of patches
PxU16 nbContacts; //Num contacts
PxU16 flags; //Not really part of outputs, but we have 4 bytes of padding, so why not?
PxU8 pad[8];
PX_FORCE_INLINE PxU32* getInternalFaceIndice() const
{
return contactForces ? reinterpret_cast<PxU32*>(contactForces + nbContacts) : NULL;
}
}
PX_ALIGN_SUFFIX(16);
PX_COMPILE_TIME_ASSERT((sizeof(PxsContactManagerOutput) & 0xf) == 0);
struct PX_ALIGN_PREFIX(4) PxsContactManagerOutputCounts
{
PxU8 nbPatches; //Num patches
PxU8 prevPatches; //Previous number of patches
PxU8 statusFlag; //Status flag;
PxU8 unused; //Unused
} PX_ALIGN_SUFFIX(4);
struct PX_ALIGN_PREFIX(8) PxsTorsionalFrictionData
{
PxReal mTorsionalPatchRadius;
PxReal mMinTorsionalRadius;
PxsTorsionalFrictionData() {}
PxsTorsionalFrictionData(const PxReal patchRadius, const PxReal minPatchRadius) :
mTorsionalPatchRadius(patchRadius), mMinTorsionalRadius(minPatchRadius) {}
} PX_ALIGN_SUFFIX(8);
}
#endif //PXG_CONTACT_MANAGER_H

View File

@@ -0,0 +1,302 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_CONTEXT_H
#define PXS_CONTEXT_H
#include "foundation/PxPinnedArray.h"
#include "foundation/PxPool.h"
#include "PxVisualizationParameter.h"
#include "PxSceneDesc.h"
#include "common/PxRenderOutput.h"
#include "CmPool.h"
#include "PxvNphaseImplementationContext.h"
#include "PxvSimStats.h"
#include "PxsContactManager.h"
#include "PxcNpBatch.h"
#include "PxcConstraintBlockStream.h"
#include "PxcNpCacheStreamPair.h"
#include "PxcNpMemBlockPool.h"
#include "CmUtils.h"
#include "CmTask.h"
#include "PxContactModifyCallback.h"
#include "PxsTransformCache.h"
#include "GuPersistentContactManifold.h"
#include "PxcNpThreadContext.h"
namespace physx
{
#if PX_SUPPORT_GPU_PHYSX
class PxCudaContextManager;
#endif
class PxsRigidBody;
struct PxcConstraintBlock;
class PxsMaterialManager;
class PxsCCDContext;
struct PxsContactManagerOutput;
struct PxvContactManagerTouchEvent;
namespace Cm
{
class FlushPool;
}
namespace IG
{
typedef PxU32 EdgeIndex;
}
enum PxsTouchEventCount
{
PXS_LOST_TOUCH_COUNT,
PXS_NEW_TOUCH_COUNT,
PXS_CCD_RETOUCH_COUNT, // pairs that are touching at a CCD pass and were touching at discrete collision or at a previous CCD pass already
// (but they could have lost touch in between)
PXS_TOUCH_EVENT_COUNT
};
class PxsContext : public PxUserAllocated, public PxcNpContext
{
PX_NOCOPY(PxsContext)
public:
PxsContext(const PxSceneDesc& desc, PxTaskManager*, Cm::FlushPool&, PxCudaContextManager*, PxU32 poolSlabSize, PxU64 contextID);
~PxsContext();
void createTransformCache(PxVirtualAllocatorCallback& allocatorCallback);
PxsContactManager* createContactManager(PxsContactManager* contactManager, bool useCCD);
void createCache(Gu::Cache& cache, PxGeometryType::Enum geomType0, PxGeometryType::Enum geomType1);
void destroyCache(Gu::Cache& cache);
void destroyContactManager(PxsContactManager* cm);
PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
// Collision properties
PX_FORCE_INLINE PxContactModifyCallback* getContactModifyCallback() const { return mContactModifyCallback; }
PX_FORCE_INLINE void setContactModifyCallback(PxContactModifyCallback* c) { mContactModifyCallback = c; mNpImplementationContext->setContactModifyCallback(c);}
// resource-related
void setScratchBlock(void* addr, PxU32 size);
PX_FORCE_INLINE void setContactDistance(const PxFloatArrayPinned* contactDistances) { mContactDistances = contactDistances; }
// Task-related
void updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation,
PxBaseTask* firstPassContinuation, Cm::FanoutTask* updateBoundAndShapeTask);
void secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation);
void fetchUpdateContactManager();
void swapStreams();
void resetThreadContexts();
// Manager status change
bool getManagerTouchEventCount(PxU32* newTouch, PxU32* lostTouch, PxU32* ccdTouch) const;
void fillManagerTouchEvents(
PxvContactManagerTouchEvent* newTouch, PxU32& newTouchCount,
PxvContactManagerTouchEvent* lostTouch, PxU32& lostTouchCount,
PxvContactManagerTouchEvent* ccdTouch, PxU32& ccdTouchCount);
void beginUpdate();
// PX_ENABLE_SIM_STATS
PX_FORCE_INLINE PxvSimStats& getSimStats() { return mSimStats; }
PX_FORCE_INLINE const PxvSimStats& getSimStats() const { return mSimStats; }
PX_FORCE_INLINE Cm::FlushPool& getTaskPool() const { return mTaskPool; }
PX_FORCE_INLINE PxRenderBuffer& getRenderBuffer() { return mRenderBuffer; }
PX_FORCE_INLINE PxReal getRenderScale() const { return mVisualizationParams[PxVisualizationParameter::eSCALE]; }
PX_FORCE_INLINE PxReal getVisualizationParameter(PxVisualizationParameter::Enum param) const
{
PX_ASSERT(param < PxVisualizationParameter::eNUM_VALUES);
return mVisualizationParams[param];
}
PX_FORCE_INLINE void setVisualizationParameter(PxVisualizationParameter::Enum param, PxReal value)
{
PX_ASSERT(param < PxVisualizationParameter::eNUM_VALUES);
PX_ASSERT(value >= 0.0f);
mVisualizationParams[param] = value;
}
PX_FORCE_INLINE void setVisualizationCullingBox(const PxBounds3& box) { mVisualizationCullingBox = box; }
PX_FORCE_INLINE const PxBounds3& getVisualizationCullingBox() const { return mVisualizationCullingBox; }
PX_FORCE_INLINE bool getPCM() const { return mPCM; }
PX_FORCE_INLINE bool getContactCacheFlag() const { return mContactCache; }
PX_FORCE_INLINE bool getCreateAveragePoint() const { return mCreateAveragePoint; }
// general stuff
void shiftOrigin(const PxVec3& shift);
PX_FORCE_INLINE void setPCM(bool enabled) { mPCM = enabled; }
PX_FORCE_INLINE void setContactCache(bool enabled) { mContactCache = enabled; }
PX_FORCE_INLINE PxcScratchAllocator& getScratchAllocator() { return mScratchAllocator; }
PX_FORCE_INLINE PxsTransformCache& getTransformCache() { return *mTransformCache; }
PX_FORCE_INLINE const PxReal* getContactDistances() const { return mContactDistances->begin(); }
PX_FORCE_INLINE PxvNphaseImplementationContext* getNphaseImplementationContext() const { return mNpImplementationContext; }
PX_FORCE_INLINE void setNphaseImplementationContext(PxvNphaseImplementationContext* ctx) { mNpImplementationContext = ctx; }
PX_FORCE_INLINE PxvNphaseImplementationContext* getNphaseFallbackImplementationContext() const { return mNpFallbackImplementationContext; }
PX_FORCE_INLINE void setNphaseFallbackImplementationContext(PxvNphaseImplementationContext* ctx) { mNpFallbackImplementationContext = ctx; }
PxU32 getMaxPatchCount() const { return mMaxPatches; }
PX_FORCE_INLINE PxcNpThreadContext* getNpThreadContext()
{
// We may want to conditional compile to exclude this on single threaded implementations
// if it is determined to be a performance hit.
return mNpThreadContextPool.get();
}
PX_FORCE_INLINE void putNpThreadContext(PxcNpThreadContext* threadContext)
{ mNpThreadContextPool.put(threadContext); }
PX_FORCE_INLINE PxMutex& getLock() { return mLock; }
PX_FORCE_INLINE PxTaskManager& getTaskManager()
{
PX_ASSERT(mTaskManager);
return *mTaskManager;
}
PX_FORCE_INLINE PxCudaContextManager* getCudaContextManager()
{
return mCudaContextManager;
}
PX_FORCE_INLINE void clearManagerTouchEvents();
PX_FORCE_INLINE Cm::PoolList<PxsContactManager>& getContactManagerPool()
{
return mContactManagerPool;
}
PX_FORCE_INLINE void setActiveContactManager(const PxsContactManager* manager, PxIntBool useCCD)
{
/*const PxU32 index = manager->getIndex();
if(index >= mActiveContactManager.size())
{
const PxU32 newSize = (2 * index + 256)&~255;
mActiveContactManager.resize(newSize);
}
mActiveContactManager.set(index);*/
//Record any pairs that have CCD enabled!
if(useCCD)
{
const PxU32 index = manager->getIndex();
if(index >= mActiveContactManagersWithCCD.size())
{
const PxU32 newSize = (2 * index + 256)&~255;
mActiveContactManagersWithCCD.resize(newSize);
}
mActiveContactManagersWithCCD.set(index);
}
}
private:
void mergeCMDiscreteUpdateResults(PxBaseTask* continuation);
// Threading
PxcThreadCoherentCache<PxcNpThreadContext, PxcNpContext>
mNpThreadContextPool;
// Contact managers
Cm::PoolList<PxsContactManager> mContactManagerPool;
PxPool<Gu::LargePersistentContactManifold> mManifoldPool;
PxPool<Gu::SpherePersistentContactManifold> mSphereManifoldPool;
// PxBitMap mActiveContactManager;
PxBitMap mActiveContactManagersWithCCD; //KS - adding to filter any pairs that had a touch
PxBitMap mContactManagersWithCCDTouch; //KS - adding to filter any pairs that had a touch
PxBitMap mContactManagerTouchEvent;
//Cm::BitMap mContactManagerPatchChangeEvent;
PxU32 mCMTouchEventCount[PXS_TOUCH_EVENT_COUNT];
PxMutex mLock;
PxContactModifyCallback* mContactModifyCallback;
// narrowphase platform-dependent implementations support
PxvNphaseImplementationContext* mNpImplementationContext;
PxvNphaseImplementationContext* mNpFallbackImplementationContext;
// debug rendering (CS TODO: MS would like to have these wrapped into a class)
PxReal mVisualizationParams[PxVisualizationParameter::eNUM_VALUES];
PxBounds3 mVisualizationCullingBox;
PxTaskManager* mTaskManager;
Cm::FlushPool& mTaskPool;
PxCudaContextManager* mCudaContextManager;
// PxU32 mTouchesLost;
// PxU32 mTouchesFound;
// PX_ENABLE_SIM_STATS
PxvSimStats mSimStats;
bool mPCM;
bool mContactCache;
bool mCreateAveragePoint;
PxsTransformCache* mTransformCache;
const PxFloatArrayPinned* mContactDistances;
PxU32 mMaxPatches;
const PxU64 mContextID;
friend class PxsCCDContext;
friend class PxsNphaseImplementationContext;
friend class PxgNphaseImplementationContext; //FDTODO ideally it shouldn't be here..
};
PX_FORCE_INLINE void PxsContext::clearManagerTouchEvents()
{
mContactManagerTouchEvent.clear();
for(PxU32 i = 0; i < PXS_TOUCH_EVENT_COUNT; ++i)
{
mCMTouchEventCount[i] = 0;
}
}
}
#endif

View File

@@ -0,0 +1,93 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_HEAP_MEMORY_ALLOCATOR_H
#define PXS_HEAP_MEMORY_ALLOCATOR_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
struct PxsHeapStats
{
enum Enum
{
eOTHER = 0,
eBROADPHASE,
eNARROWPHASE,
eSOLVER,
eARTICULATION,
eSIMULATION,
eSIMULATION_ARTICULATION,
eSIMULATION_PARTICLES,
eSIMULATION_SOFTBODY,
eSIMULATION_FEMCLOTH,
eSHARED_PARTICLES,
eSHARED_SOFTBODY,
eSHARED_FEMCLOTH,
eHEAPSTATS_COUNT
};
PxU64 stats[eHEAPSTATS_COUNT];
PxsHeapStats()
{
for (PxU32 i = 0; i < eHEAPSTATS_COUNT; i++)
{
stats[i] = 0;
}
}
};
// PT: TODO: consider dropping this class
class PxsHeapMemoryAllocator : public PxVirtualAllocatorCallback, public PxUserAllocated
{
public:
virtual ~PxsHeapMemoryAllocator(){}
// PxVirtualAllocatorCallback
//virtual void* allocate(size_t size, int group, const char* file, int line) = 0;
//virtual void deallocate(void* ptr) = 0;
//~PxVirtualAllocatorCallback
};
class PxsHeapMemoryAllocatorManager : public PxUserAllocated
{
public:
virtual ~PxsHeapMemoryAllocatorManager() {}
virtual PxU64 getDeviceMemorySize() const = 0;
virtual PxsHeapStats getDeviceHeapStats() const = 0;
virtual void flushDeferredDeallocs() = 0;
PxsHeapMemoryAllocator* mMappedMemoryAllocators;
};
}
#endif

View File

@@ -0,0 +1,142 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_ISLAND_MANAGER_TYPES_H
#define PXS_ISLAND_MANAGER_TYPES_H
namespace physx
{
class PxsContactManager;
typedef PxU32 NodeType;
typedef PxU32 EdgeType;
class PxsIslandIndices
{
public:
PxsIslandIndices() {}
~PxsIslandIndices() {}
NodeType bodies;
NodeType articulations;
EdgeType contactManagers;
EdgeType constraints;
};
// PT: it needs to be a PxU64 because we store a PxNodeIndex there for articulations (and we do use all the data)
typedef PxU64 PxsNodeType;
/**
\brief Each contact manager or constraint references two separate bodies, where
a body can be a dynamic rigid body, a kinematic rigid body, an articulation or a static.
The struct PxsIndexedInteraction describes the bodies that make up the pair.
*/
struct PxsIndexedInteraction // 24
{
/**
\brief An enumerated list of all possible body types.
A body type is stored for each body in the pair.
*/
enum Enum
{
eBODY = 0,
eKINEMATIC = 1,
eARTICULATION = 2,
eWORLD = 3
};
/**
\brief An index describing how to access body0
\note If body0 is a dynamic (eBODY) rigid body then solverBody0 is an index into PxsIslandObjects::bodies.
\note If body0 is a kinematic (eKINEMATIC) rigid body then solverBody0 is an index into PxsIslandManager::getActiveKinematics.
\note If body0 is a static (eWORLD) then solverBody0 is PX_MAX_U32 or PX_MAX_U64, depending on the platform being 32- or 64-bit.
\note If body0 is an articulation then the articulation is found directly from Dy::getArticulation(articulation0)
\note If body0 is an deformable volume then the deformable volume is found directly from Dy::getDeformableVolume(deformableVolume0)
*/
union
{
PxsNodeType solverBody0;
PxsNodeType articulation0;
};
/**
\brief An index describing how to access body1
\note If body1 is a dynamic (eBODY) rigid body then solverBody1 is an index into PxsIslandObjects::bodies.
\note If body1 is a kinematic (eKINEMATIC) rigid body then solverBody1 is an index into PxsIslandManager::getActiveKinematics.
\note If body1 is a static (eWORLD) then solverBody1 is PX_MAX_U32 or PX_MAX_U64, depending on the platform being 32- or 64-bit.
\note If body1 is an articulation then the articulation is found directly from Dy::getArticulation(articulation1)
\note If body0 is an deformable volume then the deformable volume is found directly from Dy::getDeformableVolume(deformableVolume1)
*/
union
{
PxsNodeType solverBody1;
PxsNodeType articulation1;
};
/**
\brief The type (eBODY, eKINEMATIC etc) of body0
*/
PxU8 indexType0;
/**
\brief The type (eBODY, eKINEMATIC etc) of body1
*/
PxU8 indexType1;
PxU8 pad[2];
};
// PT: TODO: this is the only type left, merge it with base class and stop wasting padding bytes
/**
\see PxsIslandObjects, PxsIndexedInteraction
*/
struct PxsIndexedContactManager : public PxsIndexedInteraction // 32
{
/**
\brief The contact manager corresponds to the value set in PxsIslandManager::setEdgeRigidCM
*/
PxsContactManager* contactManager;
PxsIndexedContactManager(PxsContactManager* cm) : contactManager(cm) {}
};
#if !PX_X64
PX_COMPILE_TIME_ASSERT(0==(sizeof(PxsIndexedContactManager) & 0x0f));
#endif
} //namespace physx
#endif

View File

@@ -0,0 +1,858 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_ISLAND_SIM_H
#define PXS_ISLAND_SIM_H
#include "foundation/PxAssert.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxArray.h"
#include "CmPriorityQueue.h"
#include "CmBlockArray.h"
#include "PxNodeIndex.h"
namespace physx
{
struct PartitionEdge;
namespace IG
{
#define IG_INVALID_ISLAND 0xFFFFFFFFu
#define IG_INVALID_EDGE 0xFFFFFFFFu
#define IG_LIMIT_DIRTY_NODES 0
#define IG_SANITY_CHECKS 0
typedef PxU32 IslandId;
typedef PxU32 EdgeIndex;
typedef PxU32 EdgeInstanceIndex;
struct Edge
{
//Edge instances can be implicitly calculated based on this edge index, which is an offset into the array of edges.
//From that, the child edge index is simply the
//The constraint or contact referenced by this edge
enum EdgeType
{
eCONTACT_MANAGER,
eCONSTRAINT,
eSOFT_BODY_CONTACT,
eFEM_CLOTH_CONTACT,
ePARTICLE_SYSTEM_CONTACT,
eEDGE_TYPE_COUNT
};
enum EdgeState
{
eINSERTED = 1<<0,
ePENDING_DESTROYED = 1<<1,
eACTIVE = 1<<2,
eIN_DIRTY_LIST = 1<<3,
eDESTROYED = 1<<4,
eREPORT_ONLY_DESTROY= 1<<5,
eACTIVATING = 1<<6
};
PxU16 mEdgeType; // PT: EdgeType. Could be PxU8.
PxU16 mEdgeState; // PT: could be PxU8.
EdgeIndex mNextIslandEdge, mPrevIslandEdge;
PX_FORCE_INLINE void setInserted() { mEdgeState |= eINSERTED; }
PX_FORCE_INLINE void clearInserted() { mEdgeState &= ~eINSERTED; }
PX_FORCE_INLINE void clearDestroyed() { mEdgeState &= ~eDESTROYED; }
PX_FORCE_INLINE void setPendingDestroyed() { mEdgeState |= ePENDING_DESTROYED; }
PX_FORCE_INLINE void clearPendingDestroyed() { mEdgeState &= ~ePENDING_DESTROYED; }
PX_FORCE_INLINE void activateEdge() { mEdgeState |= eACTIVE; }
PX_FORCE_INLINE void deactivateEdge() { mEdgeState &= ~eACTIVE; }
PX_FORCE_INLINE void markInDirtyList() { mEdgeState |= eIN_DIRTY_LIST; }
PX_FORCE_INLINE void clearInDirtyList() { mEdgeState &= ~eIN_DIRTY_LIST; }
PX_FORCE_INLINE void setReportOnlyDestroy() { mEdgeState |= eREPORT_ONLY_DESTROY; }
public:
Edge() : mEdgeType(Edge::eCONTACT_MANAGER), mEdgeState(eDESTROYED),
mNextIslandEdge(IG_INVALID_EDGE), mPrevIslandEdge(IG_INVALID_EDGE)
{
}
PX_FORCE_INLINE PxIntBool isInserted() const { return PxIntBool(mEdgeState & eINSERTED); }
PX_FORCE_INLINE PxIntBool isDestroyed() const { return PxIntBool(mEdgeState & eDESTROYED); }
PX_FORCE_INLINE PxIntBool isPendingDestroyed() const { return PxIntBool(mEdgeState & ePENDING_DESTROYED); }
PX_FORCE_INLINE PxIntBool isActive() const { return PxIntBool(mEdgeState & eACTIVE); }
PX_FORCE_INLINE PxIntBool isInDirtyList() const { return PxIntBool(mEdgeState & eIN_DIRTY_LIST); }
PX_FORCE_INLINE PxIntBool isReportOnlyDestroy() const { return PxIntBool(mEdgeState & eREPORT_ONLY_DESTROY); }
PX_FORCE_INLINE EdgeType getEdgeType() const { return EdgeType(mEdgeType); }
};
struct EdgeInstance
{
EdgeInstanceIndex mNextEdge, mPrevEdge; //The next edge instance in this node's list of edge instances
EdgeInstance() : mNextEdge(IG_INVALID_EDGE), mPrevEdge(IG_INVALID_EDGE)
{
}
};
template<typename Handle>
class HandleManager
{
PxArray<Handle> mFreeHandles;
Handle mCurrentHandle;
public:
HandleManager() : mFreeHandles("FreeHandles"), mCurrentHandle(0)
{
}
~HandleManager(){}
Handle getHandle()
{
if(mFreeHandles.size())
{
Handle handle = mFreeHandles.popBack();
PX_ASSERT(isValidHandle(handle));
return handle;
}
return mCurrentHandle++;
}
bool isNotFreeHandle(Handle handle) const
{
for(PxU32 a = 0; a < mFreeHandles.size(); ++a)
{
if(mFreeHandles[a] == handle)
return false;
}
return true;
}
void freeHandle(Handle handle)
{
PX_ASSERT(isValidHandle(handle));
PX_ASSERT(isNotFreeHandle(handle));
if(handle == mCurrentHandle)
mCurrentHandle--;
else
mFreeHandles.pushBack(handle);
}
bool isValidHandle(Handle handle) const
{
return handle < mCurrentHandle;
}
PX_FORCE_INLINE PxU32 getTotalHandles() const { return mCurrentHandle; }
};
class Node
{
public:
enum NodeType
{
eRIGID_BODY_TYPE,
eARTICULATION_TYPE,
eDEFORMABLE_SURFACE_TYPE,
eDEFORMABLE_VOLUME_TYPE,
ePARTICLESYSTEM_TYPE,
eTYPE_COUNT
};
enum State
{
eREADY_FOR_SLEEPING = 1u << 0, //! Ready to go to sleep
eACTIVE = 1u << 1, //! Active
eKINEMATIC = 1u << 2, //! Kinematic
eDELETED = 1u << 3, //! Is pending deletion
eDIRTY = 1u << 4, //! Is dirty (i.e. lost a connection)
eACTIVATING = 1u << 5 //! Is in the activating list
};
EdgeInstanceIndex mFirstEdgeIndex;
PxU8 mFlags;
PxU8 mType;
PxU16 mStaticTouchCount;
//PxU32 mActiveNodeIndex; //! Look-up for this node in the active nodes list, activating list or deactivating list...
PxNodeIndex mNextNode, mPrevNode;
//A counter for the number of active references to this body. Whenever an edge is activated, this is incremented.
//Whenver an edge is deactivated, this is decremented. This is used for kinematic bodies to determine if they need
//to be in the active kinematics list
PxU32 mActiveRefCount;
//A node can correspond with one kind of user-defined object
void* mObject;
PX_FORCE_INLINE Node() : mType(eRIGID_BODY_TYPE) { reset(); }
PX_FORCE_INLINE ~Node() { }
PX_FORCE_INLINE void reset()
{
mFirstEdgeIndex = IG_INVALID_EDGE;
mFlags = eDELETED;
mObject = NULL;
mActiveRefCount = 0;
mStaticTouchCount = 0;
}
PX_FORCE_INLINE void setActive() { mFlags |= eACTIVE; }
PX_FORCE_INLINE void clearActive() { mFlags &= ~eACTIVE; }
PX_FORCE_INLINE void setActivating() { mFlags |= eACTIVATING; }
PX_FORCE_INLINE void clearActivating() { mFlags &= ~eACTIVATING; }
//Activates a body/node.
PX_FORCE_INLINE void setIsReadyForSleeping() { mFlags |= eREADY_FOR_SLEEPING; }
PX_FORCE_INLINE void clearIsReadyForSleeping() { mFlags &= (~eREADY_FOR_SLEEPING); }
PX_FORCE_INLINE void setIsDeleted() { mFlags |= eDELETED; }
PX_FORCE_INLINE void setKinematicFlag() { PX_ASSERT(!isKinematic()); mFlags |= eKINEMATIC; }
PX_FORCE_INLINE void clearKinematicFlag() { PX_ASSERT(isKinematic()); mFlags &= (~eKINEMATIC); }
PX_FORCE_INLINE void markDirty() { mFlags |= eDIRTY; }
PX_FORCE_INLINE void clearDirty() { mFlags &= (~eDIRTY); }
public:
PX_FORCE_INLINE PxIntBool isActive() const { return PxIntBool(mFlags & eACTIVE); }
PX_FORCE_INLINE PxIntBool isActiveOrActivating() const { return PxIntBool(mFlags & (eACTIVE | eACTIVATING)); }
PX_FORCE_INLINE PxIntBool isActivating() const { return PxIntBool(mFlags & eACTIVATING); }
PX_FORCE_INLINE PxIntBool isKinematic() const { return PxIntBool(mFlags & eKINEMATIC); }
PX_FORCE_INLINE PxIntBool isDeleted() const { return PxIntBool(mFlags & eDELETED); }
PX_FORCE_INLINE PxIntBool isDirty() const { return PxIntBool(mFlags & eDIRTY); }
PX_FORCE_INLINE PxIntBool isReadyForSleeping() const { return PxIntBool(mFlags & eREADY_FOR_SLEEPING); }
PX_FORCE_INLINE NodeType getNodeType() const { return NodeType(mType); }
};
struct Island
{
PxNodeIndex mRootNode;
PxNodeIndex mLastNode;
PxU32 mNodeCount[Node::eTYPE_COUNT];
PxU32 mActiveIndex;
EdgeIndex mFirstEdge[Edge::eEDGE_TYPE_COUNT], mLastEdge[Edge::eEDGE_TYPE_COUNT];
PxU32 mEdgeCount[Edge::eEDGE_TYPE_COUNT];
Island() : mActiveIndex(IG_INVALID_ISLAND)
{
for(PxU32 a = 0; a < Edge::eEDGE_TYPE_COUNT; ++a)
{
mFirstEdge[a] = IG_INVALID_EDGE;
mLastEdge[a] = IG_INVALID_EDGE;
mEdgeCount[a] = 0;
}
for(PxU32 a = 0; a < Node::eTYPE_COUNT; ++a)
{
mNodeCount[a] = 0;
}
}
};
struct TraversalState
{
PxNodeIndex mNodeIndex;
PxU32 mCurrentIndex;
PxU32 mPrevIndex;
PxU32 mDepth;
TraversalState()
{
}
TraversalState( PxNodeIndex nodeIndex, PxU32 currentIndex, PxU32 prevIndex, PxU32 depth) :
mNodeIndex(nodeIndex), mCurrentIndex(currentIndex), mPrevIndex(prevIndex), mDepth(depth)
{
}
};
struct QueueElement
{
TraversalState* mState;
PxU32 mHopCount;
QueueElement()
{
}
QueueElement(TraversalState* state, PxU32 hopCount) : mState(state), mHopCount(hopCount)
{
}
};
struct NodeComparator
{
NodeComparator()
{
}
bool operator() (const QueueElement& node0, const QueueElement& node1) const
{
return node0.mHopCount < node1.mHopCount;
}
private:
NodeComparator& operator = (const NodeComparator&);
};
// PT: island-manager data used by both CPU & GPU code.
// This is managed by external code (e.g. SimpleIslandManager) and passed as const data to IslandSim.
class CPUExternalData
{
public:
PX_FORCE_INLINE PxNodeIndex getNodeIndex1(IG::EdgeIndex index) const { return mEdgeNodeIndices[2 * index]; }
PX_FORCE_INLINE PxNodeIndex getNodeIndex2(IG::EdgeIndex index) const { return mEdgeNodeIndices[2 * index + 1]; }
//KS - stores node indices for a given edge. Node index 0 is at 2* edgeId and NodeIndex1 is at 2*edgeId + 1
//can also be used for edgeInstance indexing so there's no need to figure out outboundNode ID either!
Cm::BlockArray<PxNodeIndex> mEdgeNodeIndices;
};
// PT: island-manager data only needed for the GPU version, but stored in CPU code.
// This is managed by external code (e.g. SimpleIslandManager) and passed as non-const data to only one of the IslandSims.
// (It is otherwise optional). IslandSim will create/update this data during island gen.
class GPUExternalData
{
public:
GPUExternalData() :
mFirstPartitionEdges ("mFirstPartitionEdges"),
mDestroyedPartitionEdges ("mDestroyedPartitionEdges"),
mNpIndexPtr (NULL)
{
}
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE PartitionEdge* getFirstPartitionEdge(IG::EdgeIndex edgeIndex) const { return mFirstPartitionEdges[edgeIndex]; }
PX_FORCE_INLINE void setFirstPartitionEdge(IG::EdgeIndex edgeIndex, PartitionEdge* partitionEdge) { mFirstPartitionEdges[edgeIndex] = partitionEdge; }
PxArray<PartitionEdge*> mFirstPartitionEdges;
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE PxU32 getNbDestroyedPartitionEdges() const { return mDestroyedPartitionEdges.size(); }
PX_FORCE_INLINE const PartitionEdge*const* getDestroyedPartitionEdges() const { return mDestroyedPartitionEdges.begin(); }
PX_FORCE_INLINE PartitionEdge** getDestroyedPartitionEdges() { return mDestroyedPartitionEdges.begin(); }
PX_FORCE_INLINE void clearDestroyedPartitionEdges() { mDestroyedPartitionEdges.forceSize_Unsafe(0); }
PxArray<PartitionEdge*> mDestroyedPartitionEdges;
///////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE const PxBitMap& getActiveContactManagerBitmap() const { return mActiveContactEdges; }
PxBitMap mActiveContactEdges;
///////////////////////////////////////////////////////////////////////////
// PT: these ones are strange, used to store an unrelated ptr from the outside, and only for GPU
PX_FORCE_INLINE void setEdgeNodeIndexPtr(PxU32* ptr) { mNpIndexPtr = ptr; }
PX_FORCE_INLINE PxU32* getEdgeNodeIndexPtr() const { return mNpIndexPtr; }
PxU32* mNpIndexPtr;
};
class IslandSim
{
PX_NOCOPY(IslandSim)
HandleManager<IslandId> mIslandHandles; //! Handle manager for islands
// PT: these arrays are parallel, all indexed by PxNodeIndex::index()
PxArray<Node> mNodes; //! The nodes used in the constraint graph
PxArray<PxU32> mActiveNodeIndex; //! The active node index for each node
PxArray<PxU32> mHopCounts; //! The observed number of "hops" from a given node to its root node. May be inaccurate but used to accelerate searches.
PxArray<PxNodeIndex> mFastRoute; //! The observed last route from a given node to the root node. We try the fast route (unless its broken) before trying others.
PxArray<IslandId> mIslandIds; //! The array of per-node island ids
//
Cm::BlockArray<Edge> mEdges;
Cm::BlockArray<EdgeInstance> mEdgeInstances; //! Edges used to connect nodes in the constraint graph
PxArray<Island> mIslands; //! The array of islands
PxArray<PxU32> mIslandStaticTouchCount; //! Array of static touch counts per-island
PxArray<PxNodeIndex> mActiveNodes[Node::eTYPE_COUNT]; //! An array of active nodes
PxArray<PxNodeIndex> mActiveKinematicNodes; //! An array of active or referenced kinematic nodes
PxArray<EdgeIndex> mActivatedEdges[Edge::eEDGE_TYPE_COUNT]; //! An array of active edges
PxU32 mActiveEdgeCount[Edge::eEDGE_TYPE_COUNT];
PxBitMap mIslandAwake; //! Indicates whether an island is awake or not
//An array of active islands
PxArray<IslandId> mActiveIslands;
PxU32 mInitialActiveNodeCount[Edge::eEDGE_TYPE_COUNT];
PxArray<PxNodeIndex> mNodesToPutToSleep[Node::eTYPE_COUNT];
//Input to this frame's island management (changed nodes/edges)
//Input list of changes observed this frame. If there no changes, no work to be done.
PxArray<EdgeIndex> mDirtyEdges[Edge::eEDGE_TYPE_COUNT];
//Dirty nodes. These nodes lost at least one connection so we need to recompute islands from these nodes
//PxArray<NodeIndex> mDirtyNodes;
PxBitMap mDirtyMap;
#if IG_LIMIT_DIRTY_NODES
PxU32 mLastMapIndex;
#endif
//An array of nodes to activate
PxArray<PxNodeIndex> mActivatingNodes;
PxArray<EdgeIndex> mDestroyedEdges;
//Temporary, transient data used for traversals. TODO - move to PxsSimpleIslandManager. Or if we keep it here, we can
//process multiple island simulations in parallel
Cm::PriorityQueue<QueueElement, NodeComparator> mPriorityQueue; //! Priority queue used for graph traversal
PxArray<TraversalState> mVisitedNodes; //! The list of nodes visited in the current traversal
PxBitMap mVisitedState; //! Indicates whether a node has been visited
PxArray<EdgeIndex> mIslandSplitEdges[Edge::eEDGE_TYPE_COUNT];
PxArray<EdgeIndex> mDeactivatingEdges[Edge::eEDGE_TYPE_COUNT];
public:
// PT: we could perhaps instead pass these as param whenever needed. The coupling otherwise makes it more difficult to unit-test IslandSim in isolation.
const CPUExternalData& mCpuData; // PT: from the simple island manager, shared between accurate/speculative island sim
GPUExternalData* mGpuData; // PT: from the simple island manager, for accurate island sim (null otherwise) and only needed for the GPU version.
protected:
const PxU64 mContextId;
public:
IslandSim(const CPUExternalData& cpuData, GPUExternalData* gpuData, PxU64 contextID);
~IslandSim() {}
void addNode(bool isActive, bool isKinematic, Node::NodeType type, PxNodeIndex nodeIndex, void* object);
void activateNode(PxNodeIndex index);
void deactivateNode(PxNodeIndex index);
void putNodeToSleep(PxNodeIndex index);
void removeConnection(EdgeIndex edgeIndex);
PX_FORCE_INLINE PxU32 getNbActiveNodes(Node::NodeType type) const { return mActiveNodes[type].size(); }
PX_FORCE_INLINE const PxNodeIndex* getActiveNodes(Node::NodeType type) const { return mActiveNodes[type].begin(); }
PX_FORCE_INLINE PxU32 getNbActiveKinematics() const { return mActiveKinematicNodes.size(); }
PX_FORCE_INLINE const PxNodeIndex* getActiveKinematics() const { return mActiveKinematicNodes.begin(); }
PX_FORCE_INLINE PxU32 getNbNodesToActivate(Node::NodeType type) const { return mActiveNodes[type].size() - mInitialActiveNodeCount[type]; }
PX_FORCE_INLINE const PxNodeIndex* getNodesToActivate(Node::NodeType type) const { return mActiveNodes[type].begin() + mInitialActiveNodeCount[type]; }
PX_FORCE_INLINE PxU32 getNbNodesToDeactivate(Node::NodeType type) const { return mNodesToPutToSleep[type].size(); }
PX_FORCE_INLINE const PxNodeIndex* getNodesToDeactivate(Node::NodeType type) const { return mNodesToPutToSleep[type].begin(); }
PX_FORCE_INLINE PxU32 getNbActivatedEdges(Edge::EdgeType type) const { return mActivatedEdges[type].size(); }
PX_FORCE_INLINE const EdgeIndex* getActivatedEdges(Edge::EdgeType type) const { return mActivatedEdges[type].begin(); }
PX_FORCE_INLINE PxU32 getNbActiveEdges(Edge::EdgeType type) const { return mActiveEdgeCount[type]; }
PX_FORCE_INLINE void* getObject(PxNodeIndex nodeIndex, Node::NodeType type) const
{
const Node& node = mNodes[nodeIndex.index()];
PX_ASSERT(node.mType == type);
PX_UNUSED(type);
return node.mObject;
}
PX_FORCE_INLINE void clearDeactivations()
{
for (PxU32 i = 0; i < Node::eTYPE_COUNT; ++i)
{
mNodesToPutToSleep[i].forceSize_Unsafe(0);
mDeactivatingEdges[i].forceSize_Unsafe(0);
}
}
PX_FORCE_INLINE const Island& getIsland(IG::IslandId islandIndex) const { return mIslands[islandIndex]; }
PX_FORCE_INLINE const Island& getIsland(const PxNodeIndex& nodeIndex) const { PX_ASSERT(mIslandIds[nodeIndex.index()] != IG_INVALID_ISLAND); return mIslands[mIslandIds[nodeIndex.index()]]; }
PX_FORCE_INLINE PxU32 getNbActiveIslands() const { return mActiveIslands.size(); }
PX_FORCE_INLINE const IslandId* getActiveIslands() const { return mActiveIslands.begin(); }
PX_FORCE_INLINE PxU32 getNbDeactivatingEdges(const IG::Edge::EdgeType edgeType) const { return mDeactivatingEdges[edgeType].size(); }
PX_FORCE_INLINE const EdgeIndex* getDeactivatingEdges(const IG::Edge::EdgeType edgeType) const { return mDeactivatingEdges[edgeType].begin(); }
// PT: this is not actually used externally
//PX_FORCE_INLINE PxU32 getNbDestroyedEdges() const { return mDestroyedEdges.size(); }
//PX_FORCE_INLINE const EdgeIndex* getDestroyedEdges() const { return mDestroyedEdges.begin(); }
// PT: this is not actually used externally. Still used internally in IslandSim.
//PX_FORCE_INLINE PxU32 getNbDirtyEdges(IG::Edge::EdgeType type) const { return mDirtyEdges[type].size(); }
//PX_FORCE_INLINE const EdgeIndex* getDirtyEdges(IG::Edge::EdgeType type) const { return mDirtyEdges[type].begin(); }
PX_FORCE_INLINE PxU32 getNbEdges() const { return mEdges.size(); }
PX_FORCE_INLINE const Edge& getEdge(EdgeIndex edgeIndex) const { return mEdges[edgeIndex]; }
PX_FORCE_INLINE Edge& getEdge(EdgeIndex edgeIndex) { return mEdges[edgeIndex]; }
PX_FORCE_INLINE PxU32 getNbNodes() const { return mNodes.size(); }
PX_FORCE_INLINE const Node& getNode(const PxNodeIndex& nodeIndex) const { return mNodes[nodeIndex.index()]; }
PX_FORCE_INLINE PxU32 getActiveNodeIndex(const PxNodeIndex& nodeIndex) const { return mActiveNodeIndex[nodeIndex.index()]; }
PX_FORCE_INLINE const PxU32* getActiveNodeIndex() const { return mActiveNodeIndex.begin(); }
//PX_FORCE_INLINE PxU32 getNbActiveNodeIndex() const { return mActiveNodeIndex.size(); }
PX_FORCE_INLINE PxU32 getNbIslands() const { return mIslandStaticTouchCount.size(); }
PX_FORCE_INLINE const PxU32* getIslandStaticTouchCount() const { return mIslandStaticTouchCount.begin(); }
PX_FORCE_INLINE PxU32 getIslandStaticTouchCount(const PxNodeIndex& nodeIndex) const
{
PX_ASSERT(mIslandIds[nodeIndex.index()] != IG_INVALID_ISLAND);
return mIslandStaticTouchCount[mIslandIds[nodeIndex.index()]];
}
PX_FORCE_INLINE const IG::IslandId* getIslandIds() const { return mIslandIds.begin(); }
PX_FORCE_INLINE PxU64 getContextId() const { return mContextId; }
void setKinematic(PxNodeIndex nodeIndex);
void setDynamic(PxNodeIndex nodeIndex);
bool checkInternalConsistency() const;
PX_INLINE void activateNode_ForGPUSolver(PxNodeIndex index)
{
IG::Node& node = mNodes[index.index()];
node.clearIsReadyForSleeping(); //Clear the "isReadyForSleeping" flag. Just in case it was set
}
PX_INLINE void deactivateNode_ForGPUSolver(PxNodeIndex index)
{
IG::Node& node = mNodes[index.index()];
node.setIsReadyForSleeping();
}
// PT: these three functions added for multithreaded implementation of Sc::Scene::islandInsertion
void preallocateConnections(EdgeIndex handle);
bool addConnectionPreallocated(PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Edge::EdgeType edgeType, EdgeIndex handle);
void addDelayedDirtyEdges(PxU32 nbHandles, const EdgeIndex* handles);
// PT: called by SimpleIslandManager. Made public to remove friendship, make the API clearer, and unit-testable.
void addConnection(PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Edge::EdgeType edgeType, EdgeIndex handle);
void wakeIslands(); // PT: this is always followed by a call to processNewEdges(). Merge the two?
void wakeIslands2();
void processNewEdges();
// PT: called by ThirdPassTask::runInternal. Made public to remove friendship, make the API clearer, and unit-testable.
void removeDestroyedEdges(); // PT: this is always followed by a call to processLostEdges(). Merge the two?
void processLostEdges(const PxArray<PxNodeIndex>& destroyedNodes, bool allowDeactivation, bool permitKinematicDeactivation, PxU32 dirtyNodeLimit);
private:
void wakeIslandsInternal(bool flag);
void insertNewEdges();
void removeConnectionInternal(EdgeIndex edgeIndex);
void addConnectionToGraph(EdgeIndex index);
void removeConnectionFromGraph(EdgeIndex edgeIndex);
//Merges 2 islands together. The returned id is the id of the merged island
IslandId mergeIslands(IslandId island0, IslandId island1, PxNodeIndex node0, PxNodeIndex node1);
void mergeIslandsInternal(Island& island0, Island& island1, IslandId islandId0, IslandId islandId1, PxNodeIndex node0, PxNodeIndex node1);
void unwindRoute(PxU32 traversalIndex, PxNodeIndex lastNode, PxU32 hopCount, IslandId id);
void activateIslandInternal(const Island& island);
void activateIsland(IslandId island);
void deactivateIsland(IslandId island);
#if IG_SANITY_CHECKS
bool canFindRoot(PxNodeIndex startNode, PxNodeIndex targetNode, PxArray<PxNodeIndex>* visitedNodes);
#endif
bool tryFastPath(PxNodeIndex startNode, PxNodeIndex targetNode, IslandId islandId);
bool findRoute(PxNodeIndex startNode, PxNodeIndex targetNode, IslandId islandId);
#if PX_DEBUG
bool isPathTo(PxNodeIndex startNode, PxNodeIndex targetNode) const;
#endif
void activateNodeInternal(PxNodeIndex index);
void deactivateNodeInternal(PxNodeIndex index);
PX_FORCE_INLINE void makeEdgeActive(EdgeInstanceIndex index, bool testEdgeType);
IslandId addNodeToIsland(PxNodeIndex nodeIndex1, PxNodeIndex nodeIndex2, IslandId islandId2, bool active1, bool active2);
/* PX_FORCE_INLINE void notifyReadyForSleeping(const PxNodeIndex nodeIndex)
{
Node& node = mNodes[nodeIndex.index()];
//PX_ASSERT(node.isActive());
node.setIsReadyForSleeping();
}
PX_FORCE_INLINE void notifyNotReadyForSleeping(const PxNodeIndex nodeIndex)
{
Node& node = mNodes[nodeIndex.index()];
PX_ASSERT(node.isActive() || node.isActivating());
node.clearIsReadyForSleeping();
}*/
PX_FORCE_INLINE void markIslandActive(IslandId islandId)
{
Island& island = mIslands[islandId];
PX_ASSERT(!mIslandAwake.test(islandId));
PX_ASSERT(island.mActiveIndex == IG_INVALID_ISLAND);
mIslandAwake.set(islandId);
island.mActiveIndex = mActiveIslands.size();
mActiveIslands.pushBack(islandId);
}
PX_FORCE_INLINE void markIslandInactive(IslandId islandId)
{
Island& island = mIslands[islandId];
PX_ASSERT(mIslandAwake.test(islandId));
PX_ASSERT(island.mActiveIndex != IG_INVALID_ISLAND);
PX_ASSERT(mActiveIslands[island.mActiveIndex] == islandId);
IslandId replaceId = mActiveIslands[mActiveIslands.size()-1];
PX_ASSERT(mIslandAwake.test(replaceId));
Island& replaceIsland = mIslands[replaceId];
replaceIsland.mActiveIndex = island.mActiveIndex;
mActiveIslands[island.mActiveIndex] = replaceId;
mActiveIslands.forceSize_Unsafe(mActiveIslands.size()-1);
island.mActiveIndex = IG_INVALID_ISLAND;
mIslandAwake.reset(islandId);
}
PX_FORCE_INLINE void markKinematicActive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(node.isKinematic());
if(node.mActiveRefCount == 0 && mActiveNodeIndex[index] == PX_INVALID_NODE)
{
//PX_ASSERT(mActiveNodeIndex[index] == PX_INVALID_NODE);
//node.mActiveNodeIndex = mActiveKinematicNodes.size();
mActiveNodeIndex[index] = mActiveKinematicNodes.size();
mActiveKinematicNodes.pushBack(nodeIndex);
}
}
PX_FORCE_INLINE void markKinematicInactive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] != PX_INVALID_NODE);
PX_ASSERT(mActiveKinematicNodes[mActiveNodeIndex[index]].index() == index);
if(node.mActiveRefCount == 0)
{
//Only remove from active kinematic list if it has no active contacts referencing it *and* it is asleep
if(mActiveNodeIndex[index] != PX_INVALID_NODE)
{
//Need to verify active node index because there is an edge case where a node could be woken, then put to
//sleep in the same frame. This would mean that it would not have an active index at this stage.
PxNodeIndex replaceIndex = mActiveKinematicNodes.back();
PX_ASSERT(mActiveNodeIndex[replaceIndex.index()] == mActiveKinematicNodes.size() - 1);
mActiveNodeIndex[replaceIndex.index()] = mActiveNodeIndex[index];
mActiveKinematicNodes[mActiveNodeIndex[index]] = replaceIndex;
mActiveKinematicNodes.forceSize_Unsafe(mActiveKinematicNodes.size() - 1);
mActiveNodeIndex[index] = PX_INVALID_NODE;
}
}
}
PX_FORCE_INLINE void markActive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(!node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] == PX_INVALID_NODE);
mActiveNodeIndex[index] = mActiveNodes[node.mType].size();
mActiveNodes[node.mType].pushBack(nodeIndex);
}
PX_FORCE_INLINE void markInactive(PxNodeIndex nodeIndex)
{
const PxU32 index = nodeIndex.index();
const Node& node = mNodes[index];
PX_ASSERT(!node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] != PX_INVALID_NODE);
PxArray<PxNodeIndex>& activeNodes = mActiveNodes[node.mType];
PX_ASSERT(activeNodes[mActiveNodeIndex[index]].index() == index);
const PxU32 initialActiveNodeCount = mInitialActiveNodeCount[node.mType];
if(mActiveNodeIndex[index] < initialActiveNodeCount)
{
//It's in the initial active node set. We retain a list of active nodes, where the existing active nodes
//are at the beginning of the array and the newly activated nodes are at the end of the array...
//The solution is to move the node to the end of the initial active node list in this case
PxU32 activeNodeIndex = mActiveNodeIndex[index];
PxNodeIndex replaceIndex = activeNodes[initialActiveNodeCount - 1];
PX_ASSERT(mActiveNodeIndex[replaceIndex.index()] == initialActiveNodeCount - 1);
mActiveNodeIndex[index] = mActiveNodeIndex[replaceIndex.index()];
mActiveNodeIndex[replaceIndex.index()] = activeNodeIndex;
activeNodes[activeNodeIndex] = replaceIndex;
activeNodes[mActiveNodeIndex[index]] = nodeIndex;
mInitialActiveNodeCount[node.mType]--;
}
PX_ASSERT(!node.isKinematic());
PX_ASSERT(mActiveNodeIndex[index] != PX_INVALID_NODE);
PX_ASSERT(activeNodes[mActiveNodeIndex[index]].index() == index);
PxNodeIndex replaceIndex = activeNodes.back();
PX_ASSERT(mActiveNodeIndex[replaceIndex.index()] == activeNodes.size() - 1);
mActiveNodeIndex[replaceIndex.index()] = mActiveNodeIndex[index];
activeNodes[mActiveNodeIndex[index]] = replaceIndex;
activeNodes.forceSize_Unsafe(activeNodes.size() - 1);
mActiveNodeIndex[index] = PX_INVALID_NODE;
}
PX_FORCE_INLINE void markEdgeActive(EdgeIndex index, PxNodeIndex nodeIndex1, PxNodeIndex nodeIndex2)
{
Edge& edge = mEdges[index];
PX_ASSERT((edge.mEdgeState & Edge::eACTIVATING) == 0);
edge.mEdgeState |= Edge::eACTIVATING;
mActivatedEdges[edge.mEdgeType].pushBack(index);
mActiveEdgeCount[edge.mEdgeType]++;
//Set the active bit...
if(mGpuData && edge.mEdgeType == Edge::eCONTACT_MANAGER)
mGpuData->mActiveContactEdges.set(index);
const PxU32 index1 = nodeIndex1.index();
const PxU32 index2 = nodeIndex2.index();
if (index1 != PX_INVALID_NODE && index2 != PX_INVALID_NODE)
{
PX_ASSERT((!mNodes[index1].isKinematic()) || (!mNodes[index2].isKinematic()) || edge.getEdgeType() == IG::Edge::eCONTACT_MANAGER);
{
Node& node = mNodes[index1];
if(node.mActiveRefCount == 0 && node.isKinematic() && !node.isActiveOrActivating())
markKinematicActive(nodeIndex1); //Add to active kinematic list
node.mActiveRefCount++;
}
{
Node& node = mNodes[index2];
if(node.mActiveRefCount == 0 && node.isKinematic() && !node.isActiveOrActivating())
markKinematicActive(nodeIndex2); //Add to active kinematic list
node.mActiveRefCount++;
}
}
}
void removeEdgeFromActivatingList(EdgeIndex index);
PX_FORCE_INLINE void removeEdgeFromIsland(Island& island, EdgeIndex edgeIndex)
{
Edge& edge = mEdges[edgeIndex];
if(edge.mNextIslandEdge != IG_INVALID_EDGE)
{
PX_ASSERT(mEdges[edge.mNextIslandEdge].mPrevIslandEdge == edgeIndex);
mEdges[edge.mNextIslandEdge].mPrevIslandEdge = edge.mPrevIslandEdge;
}
else
{
PX_ASSERT(island.mLastEdge[edge.mEdgeType] == edgeIndex);
island.mLastEdge[edge.mEdgeType] = edge.mPrevIslandEdge;
}
if(edge.mPrevIslandEdge != IG_INVALID_EDGE)
{
PX_ASSERT(mEdges[edge.mPrevIslandEdge].mNextIslandEdge == edgeIndex);
mEdges[edge.mPrevIslandEdge].mNextIslandEdge = edge.mNextIslandEdge;
}
else
{
PX_ASSERT(island.mFirstEdge[edge.mEdgeType] == edgeIndex);
island.mFirstEdge[edge.mEdgeType] = edge.mNextIslandEdge;
}
island.mEdgeCount[edge.mEdgeType]--;
edge.mNextIslandEdge = edge.mPrevIslandEdge = IG_INVALID_EDGE;
}
PX_FORCE_INLINE void addEdgeToIsland(Island& island, EdgeIndex edgeIndex)
{
Edge& edge = mEdges[edgeIndex];
PX_ASSERT(edge.mNextIslandEdge == IG_INVALID_EDGE && edge.mPrevIslandEdge == IG_INVALID_EDGE);
if(island.mLastEdge[edge.mEdgeType] != IG_INVALID_EDGE)
{
PX_ASSERT(mEdges[island.mLastEdge[edge.mEdgeType]].mNextIslandEdge == IG_INVALID_EDGE);
mEdges[island.mLastEdge[edge.mEdgeType]].mNextIslandEdge = edgeIndex;
}
else
{
PX_ASSERT(island.mFirstEdge[edge.mEdgeType] == IG_INVALID_EDGE);
island.mFirstEdge[edge.mEdgeType] = edgeIndex;
}
edge.mPrevIslandEdge = island.mLastEdge[edge.mEdgeType];
island.mLastEdge[edge.mEdgeType] = edgeIndex;
island.mEdgeCount[edge.mEdgeType]++;
}
PX_FORCE_INLINE void removeNodeFromIsland(Island& island, PxNodeIndex nodeIndex)
{
Node& node = mNodes[nodeIndex.index()];
if(node.mNextNode.isValid())
{
PX_ASSERT(mNodes[node.mNextNode.index()].mPrevNode.index() == nodeIndex.index());
mNodes[node.mNextNode.index()].mPrevNode = node.mPrevNode;
}
else
{
PX_ASSERT(island.mLastNode.index() == nodeIndex.index());
island.mLastNode = node.mPrevNode;
}
if(node.mPrevNode.isValid())
{
PX_ASSERT(mNodes[node.mPrevNode.index()].mNextNode.index() == nodeIndex.index());
mNodes[node.mPrevNode.index()].mNextNode = node.mNextNode;
}
else
{
PX_ASSERT(island.mRootNode.index() == nodeIndex.index());
island.mRootNode = node.mNextNode;
}
island.mNodeCount[node.mType]--;
node.mNextNode = node.mPrevNode = PxNodeIndex();
}
};
}
}
#endif

View File

@@ -0,0 +1,49 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_KERNEL_WRANGLER_H
#define PXS_KERNEL_WRANGLER_H
#include "foundation/PxUserAllocated.h"
namespace physx
{
class PxCudaContextManager;
class KernelWrangler;
class PxsKernelWranglerManager : public PxUserAllocated
{
public:
PX_FORCE_INLINE KernelWrangler* getKernelWrangler() { return mKernelWrangler; }
PX_FORCE_INLINE PxCudaContextManager* getCudaContextManager() { return mCudaContextManager; }
KernelWrangler* mKernelWrangler;
PxCudaContextManager* mCudaContextManager;
};
}
#endif

View File

@@ -0,0 +1,183 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MATERIAL_COMBINER_H
#define PXS_MATERIAL_COMBINER_H
#include "PxsMaterialCore.h"
namespace physx
{
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal combineScalars(PxReal a, PxReal b, PxI32 combineMode)
{
switch (combineMode)
{
case PxCombineMode::eAVERAGE:
return 0.5f * (a + b);
case PxCombineMode::eMIN:
return PxMin(a,b);
case PxCombineMode::eMULTIPLY:
return a * b;
case PxCombineMode::eMAX:
return PxMax(a,b);
default:
return PxReal(0);
}
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal PxsCombinePxReal(PxReal val0, PxReal val1, PxI32 combineMode)
{
switch (combineMode)
{
case PxCombineMode::eAVERAGE:
return 0.5f * (val0 + val1);
case PxCombineMode::eMIN:
return PxMin(val0, val1);
case PxCombineMode::eMULTIPLY:
return (val0 * val1);
case PxCombineMode::eMAX:
return PxMax(val0, val1);
}
return 0.0f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxsCombineMaterials(const PxsMaterialData& mat0Data, const PxsMaterialData& mat1Data,
PxReal& combinedStaticFriction, PxReal& combinedDynamicFriction,
PxReal& combinedRestitution, PxU32& combinedMaterialFlags, PxReal& combinedDamping)
{
const PxReal r0 = mat0Data.restitution;
const PxReal r1 = mat1Data.restitution;
const bool compliant0 = r0 < 0.0f;
const bool compliant1 = r1 < 0.0f;
const bool exactlyOneCompliant = compliant0 ^ compliant1;
const bool bothCompliant = compliant0 & compliant1;
const bool compliantAcc0 = !!(mat0Data.flags & PxMaterialFlag::eCOMPLIANT_ACCELERATION_SPRING);
const bool compliantAcc1 = !!(mat1Data.flags & PxMaterialFlag::eCOMPLIANT_ACCELERATION_SPRING);
const bool exactlyOneAccCompliant = compliantAcc0 ^ compliantAcc1;
// combine restitution
{
// For rigid-rigid or compliant-compliant interactions, follow the user's choice of combine mode but make sure it stays negative for multiply.
// For rigid-compliant interactions, we go with the compliant behavior.
// For forceCompliant-accelerationCompliant, we go with the accelerationCompliant behavior
if (bothCompliant && exactlyOneAccCompliant)
{
combinedRestitution = compliantAcc0 ? r0 : r1;
}
else
{
const PxCombineMode::Enum combineMode =
exactlyOneCompliant ? PxCombineMode::eMIN
: PxMax(mat0Data.getRestitutionCombineMode(), mat1Data.getRestitutionCombineMode());
const PxReal flipSign = (bothCompliant && (combineMode == PxCombineMode::eMULTIPLY)) ? -1.0f : 1.0f;
combinedRestitution = flipSign * combineScalars(r0, r1, combineMode);
}
}
// combine damping
{
// For rigid-rigid or compliant-compliant interactions, follow the user's choice of combine mode.
// For rigid-compliant interactions, we go with the compliant behavior.
// For forceCompliant-accelerationCompliant, we go with the accelerationCompliant behavior
const PxReal d0 = mat0Data.damping;
const PxReal d1 = mat1Data.damping;
if (bothCompliant && exactlyOneAccCompliant)
{
combinedDamping = compliantAcc0 ? d0 : d1;
}
else
{
const PxCombineMode::Enum combineMode =
exactlyOneCompliant ? PxCombineMode::eMAX
: PxMax(mat0Data.getDampingCombineMode(), mat1Data.getDampingCombineMode());
combinedDamping = combineScalars(d0, d1, combineMode);
}
}
// combine isotropic friction
{
const PxU32 combineFlags = (mat0Data.flags | mat1Data.flags); //& (PxMaterialFlag::eDISABLE_STRONG_FRICTION|PxMaterialFlag::eDISABLE_FRICTION); //eventually set DisStrongFric flag, lower all others.
if (!(combineFlags & PxMaterialFlag::eDISABLE_FRICTION))
{
const PxI32 fictionCombineMode = PxMax(mat0Data.getFrictionCombineMode(), mat1Data.getFrictionCombineMode());
PxReal dynFriction = 0.0f;
PxReal staFriction = 0.0f;
dynFriction = PxsCombinePxReal(mat0Data.dynamicFriction, mat1Data.dynamicFriction, fictionCombineMode);
staFriction = PxsCombinePxReal(mat0Data.staticFriction, mat1Data.staticFriction, fictionCombineMode);
/*switch (fictionCombineMode)
{
case PxCombineMode::eAVERAGE:
dynFriction = 0.5f * (mat0Data.dynamicFriction + mat1Data.dynamicFriction);
staFriction = 0.5f * (mat0Data.staticFriction + mat1Data.staticFriction);
break;
case PxCombineMode::eMIN:
dynFriction = PxMin(mat0Data.dynamicFriction, mat1Data.dynamicFriction);
staFriction = PxMin(mat0Data.staticFriction, mat1Data.staticFriction);
break;
case PxCombineMode::eMULTIPLY:
dynFriction = (mat0Data.dynamicFriction * mat1Data.dynamicFriction);
staFriction = (mat0Data.staticFriction * mat1Data.staticFriction);
break;
case PxCombineMode::eMAX:
dynFriction = PxMax(mat0Data.dynamicFriction, mat1Data.dynamicFriction);
staFriction = PxMax(mat0Data.staticFriction, mat1Data.staticFriction);
break;
} */
//isotropic case
const PxReal fDynFriction = PxMax(dynFriction, 0.0f);
#if PX_CUDA_COMPILER
const PxReal fStaFriction = (staFriction - fDynFriction) >= 0 ? staFriction : fDynFriction;
#else
const PxReal fStaFriction = physx::intrinsics::fsel(staFriction - fDynFriction, staFriction, fDynFriction);
#endif
combinedDynamicFriction = fDynFriction;
combinedStaticFriction = fStaFriction;
combinedMaterialFlags = combineFlags;
}
else
{
combinedMaterialFlags = combineFlags | PxMaterialFlag::eDISABLE_STRONG_FRICTION;
combinedDynamicFriction = 0.0f;
combinedStaticFriction = 0.0f;
}
}
}
}
#endif

View File

@@ -0,0 +1,51 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_MEMORY_MANAGER_H
#define PXS_MEMORY_MANAGER_H
#include "foundation/PxPreprocessor.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
class PxVirtualAllocatorCallback;
class PxsMemoryManager : public PxUserAllocated
{
public:
virtual ~PxsMemoryManager(){}
virtual PxVirtualAllocatorCallback* getHostMemoryAllocator() = 0;
virtual PxVirtualAllocatorCallback* getDeviceMemoryAllocator() = 0;
};
// PT: this is for CPU, see createPxgMemoryManager for GPU
PxsMemoryManager* createDefaultMemoryManager();
}
#endif

View File

@@ -0,0 +1,60 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_NPHASE_COMMON_H
#define PXS_NPHASE_COMMON_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxAssert.h"
namespace physx
{
struct PxsContactManagerBase
{
static const PxU32 NEW_CONTACT_MANAGER_MASK = 0x80000000;
static const PxU32 MaxBucketBits = 7;
const PxU32 mBucketId;
PxsContactManagerBase(const PxU32 bucketId) : mBucketId(bucketId)
{
PX_ASSERT(bucketId < (1 << MaxBucketBits));
}
PX_FORCE_INLINE PxU32 computeId(const PxU32 index) const { PX_ASSERT(index < PxU32(1 << (32 - (MaxBucketBits - 1)))); return (index << MaxBucketBits) | (mBucketId); }
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 computeIndexFromId(const PxU32 id) { return id >> MaxBucketBits; }
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 computeBucketIndexFromId(const PxU32 id) { return id & ((1 << MaxBucketBits) - 1); }
private:
PX_NOCOPY(PxsContactManagerBase)
};
}
#endif

View File

@@ -0,0 +1,191 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_NPHASE_IMPLEMENTATION_CONTEXT_H
#define PXS_NPHASE_IMPLEMENTATION_CONTEXT_H
#include "PxvNphaseImplementationContext.h"
#include "PxsContactManagerState.h"
#include "PxcNpCache.h"
#include "foundation/PxPinnedArray.h"
class PxsCMDiscreteUpdateTask;
namespace physx
{
struct PxsContactManagers : PxsContactManagerBase
{
PxArray<PxsContactManagerOutput> mOutputContactManagers;
PxArray<PxsContactManager*> mContactManagerMapping;
PxArray<Gu::Cache> mCaches;
// PT: these buffers should be in pinned memory but may not be if pinned allocation failed.
PxPinnedArraySafe<const Sc::ShapeInteraction*> mShapeInteractionsGPU;
PxFloatArrayPinnedSafe mRestDistancesGPU;
PxPinnedArraySafe<PxsTorsionalFrictionData> mTorsionalPropertiesGPU;
PxsContactManagers(const PxU32 bucketId, PxVirtualAllocatorCallback* callback) : PxsContactManagerBase(bucketId),
mOutputContactManagers ("mOutputContactManagers"),
mContactManagerMapping ("mContactManagerMapping"),
mCaches ("mCaches"),
mShapeInteractionsGPU (callback),
mRestDistancesGPU (callback),
mTorsionalPropertiesGPU (callback)
{
}
void clear()
{
mOutputContactManagers.forceSize_Unsafe(0);
mContactManagerMapping.forceSize_Unsafe(0);
mCaches.forceSize_Unsafe(0);
mShapeInteractionsGPU.forceSize_Unsafe(0);
mRestDistancesGPU.forceSize_Unsafe(0);
mTorsionalPropertiesGPU.forceSize_Unsafe(0);
}
private:
PX_NOCOPY(PxsContactManagers)
};
class PxsNphaseImplementationContext : public PxvNphaseImplementationFallback
{
PX_NOCOPY(PxsNphaseImplementationContext)
public:
PxsNphaseImplementationContext(PxsContext& context, IG::IslandSim* islandSim, PxVirtualAllocatorCallback* callback, PxU32 index, bool gpu) :
PxvNphaseImplementationFallback (context),
mNarrowPhasePairs (index, callback),
mNewNarrowPhasePairs (index, callback),
mModifyCallback (NULL),
mIslandSim (islandSim),
mGPU (gpu)
{}
// PxvNphaseImplementationContext
virtual void destroy() PX_OVERRIDE PX_FINAL;
virtual void updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation,
PxBaseTask* firstPassContinuation, Cm::FanoutTask* updateBoundAndShape) PX_OVERRIDE PX_FINAL;
virtual void postBroadPhaseUpdateContactManager(PxBaseTask*) PX_OVERRIDE PX_FINAL {}
virtual void secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void fetchUpdateContactManager() PX_OVERRIDE PX_FINAL {}
virtual void registerContactManager(PxsContactManager* cm, const Sc::ShapeInteraction* shapeInteraction, PxI32 touching, PxU32 numPatches) PX_OVERRIDE PX_FINAL;
// virtual void registerContactManagers(PxsContactManager** cm, Sc::ShapeInteraction** shapeInteractions, PxU32 nbContactManagers, PxU32 maxContactManagerId);
virtual void unregisterContactManager(PxsContactManager* cm) PX_OVERRIDE PX_FINAL;
virtual void refreshContactManager(PxsContactManager* cm) PX_OVERRIDE PX_FINAL;
virtual void registerShape(const PxNodeIndex& /*nodeIndex*/, const PxsShapeCore& /*shapeCore*/, const PxU32 /*transformCacheID*/, PxActor* /*actor*/, const bool /*isDeformableSurface*/) PX_OVERRIDE PX_FINAL {}
virtual void unregisterShape(const PxsShapeCore& /*shapeCore*/, const PxU32 /*transformCacheID*/, const bool /*isDeformableSurface*/) PX_OVERRIDE PX_FINAL {}
virtual void registerAggregate(const PxU32 /*transformCacheID*/) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsDeformableSurfaceMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsDeformableSurfaceMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsDeformableSurfaceMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsDeformableVolumeMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsDeformableVolumeMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsDeformableVolumeMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void registerMaterial(const PxsPBDMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateMaterial(const PxsPBDMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void unregisterMaterial(const PxsPBDMaterialCore&) PX_OVERRIDE PX_FINAL {}
virtual void updateShapeMaterial(const PxsShapeCore&) PX_OVERRIDE PX_FINAL {}
virtual void startNarrowPhaseTasks() PX_OVERRIDE PX_FINAL {}
virtual void appendContactManagers() PX_OVERRIDE PX_FINAL;
virtual PxsContactManagerOutput& getNewContactManagerOutput(PxU32 npIndex) PX_OVERRIDE PX_FINAL;
virtual PxsContactManagerOutputIterator getContactManagerOutputs() PX_OVERRIDE PX_FINAL;
virtual void setContactModifyCallback(PxContactModifyCallback* callback) PX_OVERRIDE PX_FINAL { mModifyCallback = callback; }
virtual void acquireContext() PX_OVERRIDE PX_FINAL {}
virtual void releaseContext() PX_OVERRIDE PX_FINAL {}
virtual void preallocateNewBuffers(PxU32 /*nbNewPairs*/, PxU32 /*maxIndex*/) PX_OVERRIDE PX_FINAL { /*TODO - implement if it's useful to do so*/}
virtual void lock() PX_OVERRIDE PX_FINAL { mContactManagerMutex.lock(); }
virtual void unlock() PX_OVERRIDE PX_FINAL { mContactManagerMutex.unlock(); }
virtual PxsContactManagerOutputCounts* getLostFoundPatchOutputCounts() PX_OVERRIDE PX_FINAL { return mGPU ? mCmFoundLostOutputCounts.begin() : NULL; }
virtual PxsContactManager** getLostFoundPatchManagers() PX_OVERRIDE PX_FINAL { return mGPU ? mCmFoundLost.begin() : NULL; }
virtual PxU32 getNbLostFoundPatchManagers() PX_OVERRIDE PX_FINAL { return mGPU ? mCmFoundLost.size() : 0; }
virtual PxsContactManagerOutput* getGPUContactManagerOutputBase() PX_OVERRIDE PX_FINAL { return NULL; }
virtual PxReal* getGPURestDistances() PX_OVERRIDE PX_FINAL { return NULL; }
virtual Sc::ShapeInteraction** getGPUShapeInteractions() PX_OVERRIDE PX_FINAL { return NULL; }
virtual PxsTorsionalFrictionData* getGPUTorsionalData() PX_OVERRIDE PX_FINAL { return NULL; }
//~PxvNphaseImplementationContext
// PxvNphaseImplementationFallback
virtual void processContactManager(PxReal dt, PxsContactManagerOutput* cmOutputs, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void processContactManagerSecondPass(PxReal dt, PxBaseTask* continuation) PX_OVERRIDE PX_FINAL;
virtual void unregisterContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual void refreshContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual void appendContactManagersFallback(PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual void removeContactManagersFallback(PxsContactManagerOutput* cmOutputs) PX_OVERRIDE PX_FINAL;
virtual const Sc::ShapeInteraction*const* getShapeInteractionsGPU() const PX_OVERRIDE PX_FINAL { return mNarrowPhasePairs.mShapeInteractionsGPU.begin(); }
virtual const PxReal* getRestDistancesGPU() const PX_OVERRIDE PX_FINAL { return mNarrowPhasePairs.mRestDistancesGPU.begin(); }
virtual const PxsTorsionalFrictionData* getTorsionalDataGPU() const PX_OVERRIDE PX_FINAL { return mNarrowPhasePairs.mTorsionalPropertiesGPU.begin(); }
//~PxvNphaseImplementationFallback
PxArray<PxU32> mRemovedContactManagers;
PxsContactManagers mNarrowPhasePairs;
PxsContactManagers mNewNarrowPhasePairs;
PxContactModifyCallback* mModifyCallback;
IG::IslandSim* mIslandSim;
PxMutex mContactManagerMutex;
PxArray<PxsCMDiscreteUpdateTask*> mCmTasks;
PxArray<PxsContactManagerOutputCounts> mCmFoundLostOutputCounts;
PxArray<PxsContactManager*> mCmFoundLost;
const bool mGPU;
private:
void unregisterContactManagerInternal(PxU32 npIndex, PxsContactManagers& managers, PxsContactManagerOutput* cmOutputs);
PX_FORCE_INLINE void unregisterAndForceSize(PxsContactManagers& cms, PxU32 index)
{
unregisterContactManagerInternal(index, cms, cms.mOutputContactManagers.begin());
cms.mOutputContactManagers.forceSize_Unsafe(cms.mOutputContactManagers.size()-1);
}
void appendNewLostPairs();
};
}
#endif

View File

@@ -0,0 +1,122 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_PARTICLE_BUFFER_H
#define PXS_PARTICLE_BUFFER_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxUserAllocated.h"
#include "PxParticleSystemFlag.h"
namespace physx
{
class PxCudaContextManager;
struct PxParticleVolume;
struct PxParticleRigidFilterPair;
struct PxParticleRigidAttachment;
class PxsParticleBuffer
{
public:
virtual void release() = 0;
virtual PxVec4* getPositionInvMassesD() const = 0;
virtual PxVec4* getVelocitiesD() const = 0;
virtual PxU32* getPhasesD() const = 0;
virtual PxParticleVolume* getParticleVolumesD() const = 0;
virtual PxVec4* getPositionInvMassesH() const = 0;
virtual PxVec4* getVelocitiesH() const = 0;
virtual PxU32* getPhasesH() const = 0;
virtual PxParticleVolume* getParticleVolumesH() const = 0;
virtual void setNbActiveParticles(PxU32 nbActiveParticles) = 0;
virtual PxU32 getNbActiveParticles() const = 0;
virtual PxU32 getMaxParticles() const = 0;
virtual PxU32 getNbParticleVolumes() const = 0;
virtual void setNbParticleVolumes(PxU32 nbParticleVolumes) = 0;
virtual PxU32 getMaxParticleVolumes() const = 0;
virtual void setRigidFilters(PxParticleRigidFilterPair* filters, PxU32 nbFilters) = 0;
virtual void setRigidAttachments(PxParticleRigidAttachment* attachments, PxU32 nbAttachments) = 0;
virtual PxU32 getFlatListStartIndex() const = 0;
virtual void raiseFlags(PxParticleBufferFlag::Enum flags) = 0;
virtual PxU32 getUniqueId() const = 0;
virtual void allocHostBuffers() = 0;
protected:
virtual ~PxsParticleBuffer() {}
};
class PxsParticleAndDiffuseBuffer : public PxsParticleBuffer
{
public:
virtual PxVec4* getDiffusePositionLifeTimeD() const = 0;
virtual PxVec4* getDiffuseVelocitiesD() const = 0;
virtual PxU32 getNbActiveDiffuseParticles() const = 0;
virtual void setMaxActiveDiffuseParticles(PxU32 maxActiveDiffuseParticles) = 0;
virtual PxU32 getMaxDiffuseParticles() const = 0;
virtual void setDiffuseParticleParams(const PxDiffuseParticleParams& params) = 0;
virtual const PxDiffuseParticleParams& getDiffuseParticleParams() const = 0;
protected:
virtual ~PxsParticleAndDiffuseBuffer() {}
};
class PxsParticleClothBuffer : public PxsParticleBuffer
{
public:
virtual PxVec4* getRestPositionsD() = 0;
virtual PxU32* getTrianglesD() const = 0;
virtual void setNbTriangles(PxU32 nbTriangles) = 0;
virtual PxU32 getNbTriangles() const = 0;
virtual PxU32 getNbSprings() const = 0;
virtual PxParticleSpring* getSpringsD() = 0;
virtual void setCloths(PxPartitionedParticleCloth& cloths) = 0;
protected:
virtual ~PxsParticleClothBuffer() {}
};
class PxsParticleRigidBuffer : public PxsParticleBuffer
{
public:
virtual PxU32* getRigidOffsetsD() const = 0;
virtual PxReal* getRigidCoefficientsD() const = 0;
virtual PxVec4* getRigidLocalPositionsD() const = 0;
virtual PxVec4* getRigidTranslationsD() const = 0;
virtual PxVec4* getRigidRotationsD() const = 0;
virtual PxVec4* getRigidLocalNormalsD() const = 0;
virtual void setNbRigids(PxU32 nbRigids) = 0;
virtual PxU32 getNbRigids() const = 0;
protected:
virtual ~PxsParticleRigidBuffer() {}
};
}
#endif

View File

@@ -0,0 +1,131 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_PARTITION_EDGE_H
#define PXS_PARTITION_EDGE_H
// PT: this is a temporary place for code related to PartitionEdge. This seems to be a GPU-only class
// but for some reason some CPU bits do need it. Ideally it would be fully contained inside the GPU DLL.
#include "PxsIslandSim.h"
#include "PxcNpWorkUnit.h"
namespace physx
{
// PT: TODO: mNextPatch is almost always null so it would make sense to store that cold data somewhere else, e.g:
// - use one bit to mark the general case where mNextPatch is null
// - store non-null mNextPatch in a hashmap indexed by mUniqueIndex (no need to reserve the full memory for it)
//
// The only annoying bit is that the mechanism needed to actually walk the linked list (i.e. the hashmap) needs to be
// available in processPartitionEdges below, so that's more GPU stuff exposed to the CPU code. But I guess we crossed
// that line a while ago when the heads of the LLs moved to the low-level island manager anyway. And in fact we could
// put the two in the same structure eventually, like a PartitionEdgeManager.
//
// In any case the benefit of the change would be a smaller PartitionEdge. Going further, the nodes can be retrieved from
// the edge index, so if mNextPatch also disappears then only the unique ID remains, which.... can be derived from
// the PartitionEdge address?! So it would be just the "edge index" with some bits encoded in it, just 4 bytes.
//
// This is per-edge data so we could also merge this with CPUExternalData, which already contains the node indices, and
// has an implicit unique index as the index into mEdgeNodeIndices. But maybe we cannot because there can be multiple
// PartitionEdge for the same source edge (hence the linked list).
//
// Another idea would be to store the edge index instead. You would need access to the edge manager in CPU code but no
// hashmap or new structure is needed.
struct PartitionEdge
{
enum Enum
{
HAS_INFINITE_MASS0 = (1<<0),
HAS_INFINITE_MASS1 = (1<<1),
HAS_THRESHOLD = (1<<2),
IS_CONTACT = (1<<3),
SPECIAL_HANDLED = (1<<4),
NB_BITS = 5
};
PxNodeIndex mNode0; //! The node index for node 0. Can be obtained from the edge index alternatively
PxNodeIndex mNode1; //! The node index for node 1. Can be obtained from the edge index alternatively
PartitionEdge* mNextPatch; //! for the contact manager has more than 1 patch, we have next patch's edge and previous patch's edge to connect to this edge
private:
IG::EdgeIndex mEdgeIndex; //! The edge index into the island manager. Used to identify the contact manager/constraint
public:
PxU32 mUniqueIndex; //! a unique ID for this edge
PX_FORCE_INLINE IG::EdgeIndex getEdgeIndex() const { return mEdgeIndex >> NB_BITS; }
PX_FORCE_INLINE PxU32 isArticulation0() const { return mNode0.isArticulation(); }
PX_FORCE_INLINE PxU32 isArticulation1() const { return mNode1.isArticulation(); }
PX_FORCE_INLINE PxU32 hasInfiniteMass0() const { return mEdgeIndex & HAS_INFINITE_MASS0; }
PX_FORCE_INLINE PxU32 hasInfiniteMass1() const { return mEdgeIndex & HAS_INFINITE_MASS1; }
PX_FORCE_INLINE void setInfiniteMass0() { mEdgeIndex |= HAS_INFINITE_MASS0; }
PX_FORCE_INLINE void setInfiniteMass1() { mEdgeIndex |= HAS_INFINITE_MASS1; }
PX_FORCE_INLINE void setHasThreshold() { mEdgeIndex |= HAS_THRESHOLD; }
PX_FORCE_INLINE PxU32 hasThreshold() const { return mEdgeIndex & HAS_THRESHOLD; }
PX_FORCE_INLINE void setIsContact() { mEdgeIndex |= IS_CONTACT; }
PX_FORCE_INLINE PxU32 isContact() const { return mEdgeIndex & IS_CONTACT; }
PX_FORCE_INLINE void setSpecialHandled() { mEdgeIndex |= SPECIAL_HANDLED; }
PX_FORCE_INLINE void clearSpecialHandled() { mEdgeIndex &= ~SPECIAL_HANDLED; }
PX_FORCE_INLINE PxU32 isSpecialHandled() const { return mEdgeIndex & SPECIAL_HANDLED; }
//KS - This constructor explicitly does not set mUniqueIndex. It is filled in by the pool allocator and this constructor
//is called afterwards. We do not want to stomp the uniqueIndex value
PartitionEdge(IG::EdgeIndex index) :
mNextPatch(NULL),
mEdgeIndex(index << NB_BITS)
{
PX_ASSERT(!(index & 0xf8000000)); // PT: reserve 5 bits for internal flags
}
};
PX_COMPILE_TIME_ASSERT(sizeof(PartitionEdge)<=32); // PT: 2 of them per cache-line
static PX_FORCE_INLINE void processPartitionEdges(const IG::GPUExternalData* gpuData, const PxcNpWorkUnit& unit)
{
if(gpuData && !(unit.mFlags & PxcNpWorkUnitFlag::eDISABLE_RESPONSE))
{
PxU32* edgeNodeIndices = gpuData->getEdgeNodeIndexPtr();
if(edgeNodeIndices) // PT: only non-null for GPU version
{
const PartitionEdge* partitionEdge = gpuData->getFirstPartitionEdge(unit.mEdgeIndex);
while(partitionEdge)
{
edgeNodeIndices[partitionEdge->mUniqueIndex] = unit.mNpIndex;
partitionEdge = partitionEdge->mNextPatch;
}
}
}
}
}
#endif

View File

@@ -0,0 +1,236 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_RIGID_BODY_H
#define PXS_RIGID_BODY_H
#include "PxvDynamics.h"
#include "CmSpatialVector.h"
#include "foundation/PxMutex.h"
namespace physx
{
struct PxsCCDBody;
PX_ALIGN_PREFIX(16)
class PxsRigidBody
{
public:
enum PxsRigidBodyFlag
{
eFROZEN = 1 << 0, //This flag indicates that the stabilization is enabled and the body is
//"frozen". By "frozen", we mean that the body's transform is unchanged
//from the previous frame. This permits various optimizations.
eFREEZE_THIS_FRAME = 1 << 1,
eUNFREEZE_THIS_FRAME = 1 << 2,
eACTIVATE_THIS_FRAME = 1 << 3,
eDEACTIVATE_THIS_FRAME = 1 << 4,
// PT: this flag is now only used on the GPU. For the CPU the data is now stored directly in PxsBodyCore.
eDISABLE_GRAVITY_GPU = 1 << 5,
eSPECULATIVE_CCD = 1 << 6,
eENABLE_GYROSCOPIC = 1 << 7,
eRETAIN_ACCELERATION = 1 << 8,
eFIRST_BODY_COPY_GPU = 1 << 9, // Flag to raise to indicate that the body is DMA'd to the GPU for the first time
eVELOCITY_COPY_GPU = 1 << 10 // Flag to raise to indicate that linear and angular velocities should be DMA'd to the GPU
};
PX_FORCE_INLINE PxsRigidBody(PxsBodyCore* core, PxReal freeze_count) :
mLastTransform (core->body2World),
mInternalFlags (0),
mSolverIterationCounts (core->solverIterationCounts),
mCCD (NULL),
mCore (core),
mSleepLinVelAcc (PxVec3(0.0f)),
mFreezeCount (freeze_count),
mSleepAngVelAcc (PxVec3(0.0f)),
mAccelScale (1.0f)
{}
PX_FORCE_INLINE ~PxsRigidBody() {}
PX_FORCE_INLINE const PxTransform& getPose() const { PX_ASSERT(mCore->body2World.isSane()); return mCore->body2World; }
PX_FORCE_INLINE const PxVec3& getLinearVelocity() const { PX_ASSERT(mCore->linearVelocity.isFinite()); return mCore->linearVelocity; }
PX_FORCE_INLINE const PxVec3& getAngularVelocity() const { PX_ASSERT(mCore->angularVelocity.isFinite()); return mCore->angularVelocity; }
PX_FORCE_INLINE void setVelocity(const PxVec3& linear,
const PxVec3& angular) { PX_ASSERT(linear.isFinite()); PX_ASSERT(angular.isFinite());
mCore->linearVelocity = linear;
mCore->angularVelocity = angular; }
PX_FORCE_INLINE void setLinearVelocity(const PxVec3& linear) { PX_ASSERT(linear.isFinite()); mCore->linearVelocity = linear; }
PX_FORCE_INLINE void setAngularVelocity(const PxVec3& angular) { PX_ASSERT(angular.isFinite()); mCore->angularVelocity = angular; }
PX_FORCE_INLINE void constrainLinearVelocity();
PX_FORCE_INLINE void constrainAngularVelocity();
PX_FORCE_INLINE PxU32 getIterationCounts() { return mCore->solverIterationCounts; }
PX_FORCE_INLINE PxReal getReportThreshold() const { return mCore->contactReportThreshold; }
PX_FORCE_INLINE const PxTransform& getLastCCDTransform() const { return mLastTransform; }
PX_FORCE_INLINE void saveLastCCDTransform() { mLastTransform = mCore->body2World; }
PX_FORCE_INLINE bool isKinematic() const { return mCore->inverseMass == 0.0f; }
PX_FORCE_INLINE void setPose(const PxTransform& pose) { mCore->body2World = pose; }
PX_FORCE_INLINE void setPosition(const PxVec3& position) { mCore->body2World.p = position; }
PX_FORCE_INLINE PxReal getInvMass() const { return mCore->inverseMass; }
PX_FORCE_INLINE PxVec3 getInvInertia() const { return mCore->inverseInertia; }
PX_FORCE_INLINE PxReal getMass() const { return 1.0f/mCore->inverseMass; }
PX_FORCE_INLINE PxVec3 getInertia() const { return PxVec3(1.0f/mCore->inverseInertia.x,
1.0f/mCore->inverseInertia.y,
1.0f/mCore->inverseInertia.z); }
PX_FORCE_INLINE PxsBodyCore& getCore() { return *mCore; }
PX_FORCE_INLINE const PxsBodyCore& getCore() const { return *mCore; }
PX_FORCE_INLINE PxU32 isActivateThisFrame() const { return PxU32(mInternalFlags & eACTIVATE_THIS_FRAME); }
PX_FORCE_INLINE PxU32 isDeactivateThisFrame() const { return PxU32(mInternalFlags & eDEACTIVATE_THIS_FRAME); }
PX_FORCE_INLINE PxU32 isFreezeThisFrame() const { return PxU32(mInternalFlags & eFREEZE_THIS_FRAME); }
PX_FORCE_INLINE PxU32 isUnfreezeThisFrame() const { return PxU32(mInternalFlags & eUNFREEZE_THIS_FRAME); }
PX_FORCE_INLINE void clearFreezeFlag() { mInternalFlags &= ~eFREEZE_THIS_FRAME; }
PX_FORCE_INLINE void clearUnfreezeFlag() { mInternalFlags &= ~eUNFREEZE_THIS_FRAME; }
PX_FORCE_INLINE void clearAllFrameFlags() { mInternalFlags &= ~(eFREEZE_THIS_FRAME | eUNFREEZE_THIS_FRAME | eACTIVATE_THIS_FRAME | eDEACTIVATE_THIS_FRAME); }
PX_FORCE_INLINE void resetSleepFilter() { mSleepAngVelAcc = mSleepLinVelAcc = PxVec3(0.0f); }
// PT: implemented in PxsCCD.cpp:
void advanceToToi(PxReal toi, PxReal dt, bool clip);
void advancePrevPoseToToi(PxReal toi);
// PxTransform getAdvancedTransform(PxReal toi) const;
Cm::SpatialVector getPreSolverVelocities() const;
PxTransform mLastTransform; //28
PxU16 mInternalFlags; //30
PxU16 mSolverIterationCounts; //32
PxsCCDBody* mCCD; //40 // only valid during CCD
PxsBodyCore* mCore; //48
PxVec3 mSleepLinVelAcc; //60
PxReal mFreezeCount; //64
PxVec3 mSleepAngVelAcc; //76
PxReal mAccelScale; //80
}
PX_ALIGN_SUFFIX(16);
PX_COMPILE_TIME_ASSERT(0 == (sizeof(PxsRigidBody) & 0x0f));
void PxsRigidBody::constrainLinearVelocity()
{
const PxU32 lockFlags = mCore->lockFlags;
if(lockFlags)
{
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_LINEAR_X)
mCore->linearVelocity.x = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_LINEAR_Y)
mCore->linearVelocity.y = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_LINEAR_Z)
mCore->linearVelocity.z = 0.0f;
}
}
void PxsRigidBody::constrainAngularVelocity()
{
const PxU32 lockFlags = mCore->lockFlags;
if(lockFlags)
{
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_ANGULAR_X)
mCore->angularVelocity.x = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_ANGULAR_Y)
mCore->angularVelocity.y = 0.0f;
if(lockFlags & PxRigidDynamicLockFlag::eLOCK_ANGULAR_Z)
mCore->angularVelocity.z = 0.0f;
}
}
struct PxsRigidBodyExternalAcceleration
{
PxVec3 linearAcceleration;
PxVec3 angularAcceleration;
PxsRigidBodyExternalAcceleration() : linearAcceleration(PxVec3(0.0f)), angularAcceleration(PxVec3(0.0f))
{ }
PxsRigidBodyExternalAcceleration(const PxVec3& linearAcc, const PxVec3& angularAcc) :
linearAcceleration(linearAcc), angularAcceleration(angularAcc)
{ }
};
struct PxsExternalAccelerationProvider
{
PxArray<PxsRigidBodyExternalAcceleration> mAccelerations;
PxMutex mLock;
volatile PxU32 mArraySize; //Required because of multi threading
PxsExternalAccelerationProvider() : mArraySize(0)
{ }
PX_FORCE_INLINE void setValue(PxsRigidBodyExternalAcceleration& value, PxU32 index, PxU32 maxNumBodies)
{
if (mArraySize < maxNumBodies)
{
PxMutex::ScopedLock lock(mLock);
if (mArraySize < maxNumBodies) //Test again because only after the lock we are sure that only one thread is active at a time
{
mAccelerations.resize(maxNumBodies);
mArraySize = maxNumBodies; //Only now the resize is complete - mAccelerations.size() might already change before the array actually allocated the new memory
}
}
PX_ASSERT(index < mArraySize);
mAccelerations[index] = value;
}
PX_FORCE_INLINE bool hasAccelerations() const
{
return mArraySize > 0;
}
PX_FORCE_INLINE const PxsRigidBodyExternalAcceleration& get(PxU32 index) const
{
PX_ASSERT(index < mArraySize);
return mAccelerations[index];
}
PX_FORCE_INLINE void clearAll()
{
if (mArraySize > 0)
{
mAccelerations.clear();
mArraySize = 0;
}
else if (mAccelerations.capacity() > 0)
mAccelerations.reset();
}
};
}
#endif

View File

@@ -0,0 +1,262 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_SIMPLE_ISLAND_GEN_H
#define PXS_SIMPLE_ISLAND_GEN_H
#include "foundation/PxUserAllocated.h"
#include "PxsIslandSim.h"
#include "CmTask.h"
/*
PT: runs first part of the island gen's second pass in parallel with the Pxg-level constraint partitioning.
mIslandGen task spawns Pxg constraint partitioning task(s).
mIslandGen runs processNarrowPhaseTouchEvents() in parallel with Pxg.
///////////////////////////////////////////////////////////////////////////////
Previous design:
mPostIslandGen runs as a continuation task after mIslandGen and Pxg.
mPostIslandGen mainly runs mSetEdgesConnectedTask, which:
- calls mSimpleIslandManager->setEdgeConnected()
- calls mSimpleIslandManager-secondPassIslandGen()
- calls wakeObjectsUp()
///////////////////////////////////////////////////////////////////////////////
New design:
postIslandGen is not a task anymore (mPostIslandGen does not exist).
postIslandGen is directly called at the end of mIslandGen.
So it now runs in parallel with Pxg.
mIslandGen and Pxg continue to mSolver task.
postIslandGen mainly runs mSetEdgesConnectedTask, which:
- calls mSimpleIslandManager->setEdgeConnected()
- calls mSimpleIslandManager->secondPassIslandGenPart1()
mSolver now first runs the parts that don't overlap with Pxg:
- calls mSimpleIslandManager-secondPassIslandGenPart2()
- calls wakeObjectsUp()
///////////////////////////////////////////////////////////////////////////////
Before:
mIslandGen->processNarrowPhaseTouchEvents |mPostIslandGen |mSolver
=>PxgConstraintPartition |=>setEdgesConnected->secondPassIslandGen->wakeObjectsUp|
After:
mIslandGen->processNarrowPhaseTouchEvents->postIslandGen |secondPassIslandGenPart2->wakeObjectsUp->mSolver
=>PxgConstraintPartition =>setEdgesConnected->secondPassIslandGenPart1 |
*/
#define USE_SPLIT_SECOND_PASS_ISLAND_GEN 1
namespace physx
{
class PxsContactManager;
// PT: TODO: fw declaring an Sc class here is not good
namespace Sc
{
class Interaction;
}
namespace Dy
{
struct Constraint;
}
namespace IG
{
class SimpleIslandManager;
class ThirdPassTask : public Cm::Task
{
SimpleIslandManager& mIslandManager;
IslandSim& mIslandSim;
public:
ThirdPassTask(PxU64 contextID, SimpleIslandManager& islandManager, IslandSim& islandSim);
virtual void runInternal();
virtual const char* getName() const
{
return "ThirdPassIslandGenTask";
}
private:
PX_NOCOPY(ThirdPassTask)
};
class PostThirdPassTask : public Cm::Task
{
SimpleIslandManager& mIslandManager;
public:
PostThirdPassTask(PxU64 contextID, SimpleIslandManager& islandManager);
virtual void runInternal();
virtual const char* getName() const
{
return "PostThirdPassTask";
}
private:
PX_NOCOPY(PostThirdPassTask)
};
class AuxCpuData
{
public:
PX_FORCE_INLINE PxsContactManager* getContactManager(IG::EdgeIndex edgeId) const { return reinterpret_cast<PxsContactManager*>(mConstraintOrCm[edgeId]); }
PX_FORCE_INLINE Dy::Constraint* getConstraint(IG::EdgeIndex edgeId) const { return reinterpret_cast<Dy::Constraint*>(mConstraintOrCm[edgeId]); }
Cm::BlockArray<void*> mConstraintOrCm; //! Pointers to either the constraint or Cm for this pair
};
class SimpleIslandManager : public PxUserAllocated
{
HandleManager<PxU32> mNodeHandles; //! Handle manager for nodes
HandleManager<EdgeIndex> mEdgeHandles; //! Handle manager for edges
//An array of destroyed nodes
PxArray<PxNodeIndex> mDestroyedNodes;
Cm::BlockArray<Sc::Interaction*> mInteractions;
//Edges destroyed this frame
PxArray<EdgeIndex> mDestroyedEdges;
GPUExternalData mGpuData;
CPUExternalData mCpuData;
AuxCpuData mAuxCpuData;
PxBitMap mConnectedMap;
// PT: TODO: figure out why we still need both
IslandSim mAccurateIslandManager;
IslandSim mSpeculativeIslandManager;
ThirdPassTask mSpeculativeThirdPassTask;
ThirdPassTask mAccurateThirdPassTask;
PostThirdPassTask mPostThirdPassTask;
PxU32 mMaxDirtyNodesPerFrame;
const PxU64 mContextID;
const bool mGPU;
public:
SimpleIslandManager(bool useEnhancedDeterminism, bool gpu, PxU64 contextID);
~SimpleIslandManager();
PxNodeIndex addNode(bool isActive, bool isKinematic, Node::NodeType type, void* object);
void removeNode(const PxNodeIndex index);
// PT: these two functions added for multithreaded implementation of Sc::Scene::islandInsertion
void preallocateContactManagers(PxU32 nb, EdgeIndex* handles);
bool addPreallocatedContactManager(EdgeIndex handle, PxsContactManager* manager, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction, Edge::EdgeType edgeType);
EdgeIndex addContactManager(PxsContactManager* manager, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction, Edge::EdgeType edgeType);
EdgeIndex addConstraint(Dy::Constraint* constraint, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction);
PX_FORCE_INLINE PxIntBool isEdgeConnected(EdgeIndex edgeIndex) const { return mConnectedMap.test(edgeIndex); }
void activateNode(PxNodeIndex index);
void deactivateNode(PxNodeIndex index);
void putNodeToSleep(PxNodeIndex index);
void removeConnection(EdgeIndex edgeIndex);
void firstPassIslandGen();
void additionalSpeculativeActivation();
void secondPassIslandGen();
void secondPassIslandGenPart1();
void secondPassIslandGenPart2();
void thirdPassIslandGen(PxBaseTask* continuation);
PX_INLINE void clearDestroyedPartitionEdges()
{
mGpuData.mDestroyedPartitionEdges.forceSize_Unsafe(0);
}
void setEdgeConnected(EdgeIndex edgeIndex, Edge::EdgeType edgeType);
void setEdgeDisconnected(EdgeIndex edgeIndex);
void setEdgeRigidCM(const EdgeIndex edgeIndex, PxsContactManager* cm);
void clearEdgeRigidCM(const EdgeIndex edgeIndex);
void setKinematic(PxNodeIndex nodeIndex);
void setDynamic(PxNodeIndex nodeIndex);
PX_FORCE_INLINE IslandSim& getSpeculativeIslandSim() { return mSpeculativeIslandManager; }
PX_FORCE_INLINE const IslandSim& getSpeculativeIslandSim() const { return mSpeculativeIslandManager; }
PX_FORCE_INLINE IslandSim& getAccurateIslandSim() { return mAccurateIslandManager; }
PX_FORCE_INLINE const IslandSim& getAccurateIslandSim() const { return mAccurateIslandManager; }
PX_FORCE_INLINE const AuxCpuData& getAuxCpuData() const { return mAuxCpuData; }
PX_FORCE_INLINE PxU32 getNbEdgeHandles() const { return mEdgeHandles.getTotalHandles(); }
PX_FORCE_INLINE PxU32 getNbNodeHandles() const { return mNodeHandles.getTotalHandles(); }
void deactivateEdge(const EdgeIndex edge);
PX_FORCE_INLINE PxsContactManager* getContactManager(IG::EdgeIndex edgeId) const { return reinterpret_cast<PxsContactManager*>(mAuxCpuData.mConstraintOrCm[edgeId]); }
PX_FORCE_INLINE Dy::Constraint* getConstraint(IG::EdgeIndex edgeId) const { return reinterpret_cast<Dy::Constraint*>(mAuxCpuData.mConstraintOrCm[edgeId]); }
PX_FORCE_INLINE Sc::Interaction* getInteractionFromEdgeIndex(IG::EdgeIndex edgeId) const { return mInteractions[edgeId]; }
PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
bool checkInternalConsistency();
private:
friend class ThirdPassTask;
friend class PostThirdPassTask;
bool validateDeactivations() const;
EdgeIndex addEdge(void* edge, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction);
EdgeIndex resizeEdgeArrays(EdgeIndex handle, bool flag);
PX_NOCOPY(SimpleIslandManager)
};
}
}
#endif

View File

@@ -0,0 +1,429 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_SIMULATION_CONTROLLER_H
#define PXS_SIMULATION_CONTROLLER_H
#include "PxDirectGPUAPI.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxTransform.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxPinnedArray.h"
#include "foundation/PxUserAllocated.h"
#include "PxScene.h"
#include "PxParticleSystem.h"
#include "PxArticulationTendonData.h"
#include "PxNodeIndex.h"
namespace physx
{
namespace Dy
{
class Context;
struct Constraint;
class FeatherstoneArticulation;
struct ArticulationJointCore;
class ParticleSystemCore;
class ParticleSystem;
#if PX_SUPPORT_GPU_PHYSX
class DeformableSurface;
class DeformableVolume;
#endif
}
namespace Bp
{
class BoundsArray;
class BroadPhase;
class AABBManagerBase;
}
namespace IG
{
class IslandSim;
}
namespace Sc
{
class BodySim;
class ShapeSimBase;
}
class PxsTransformCache;
class PxvNphaseImplementationContext;
class PxBaseTask;
class PxsContext;
class PxsRigidBody;
class PxsKernelWranglerManager;
class PxsHeapMemoryAllocatorManager;
class PxgParticleSystemCore;
struct PxConeLimitedConstraint;
struct PxsShapeCore;
class PxPhysXGpu;
struct PxgSolverConstraintManagerConstants;
struct PxsExternalAccelerationProvider;
class PxsSimulationControllerCallback : public PxUserAllocated
{
public:
virtual void updateScBodyAndShapeSim(PxBaseTask* continuation) = 0;
virtual PxU32 getNbCcdBodies() = 0;
virtual ~PxsSimulationControllerCallback() {}
};
#if PX_SUPPORT_OMNI_PVD
class PxsSimulationControllerOVDCallbacks : public PxUserAllocated
{
public:
virtual void processRigidDynamicSet(const PxsRigidBody* const * rigids, const void* dataVec, const PxRigidDynamicGPUIndex* gpuIndices, PxRigidDynamicGPUAPIWriteType::Enum dataType, PxU32 nbElements) = 0;
virtual void processArticulationSet(const Dy::FeatherstoneArticulation* const * simBodyVec, const void* dataVec, const PxArticulationGPUIndex* indexVec, PxArticulationGPUAPIWriteType::Enum dataType, PxU32 nbElements,
PxU32 maxLinks, PxU32 maxDofs, PxU32 maxFixedTendons, PxU32 maxTendonJoints, PxU32 maxSpatialTendons, PxU32 maxSpatialTendonAttachments) = 0;
// Returns the number of elements in a data block as well as the size of the datablock, see PxArticulationGPUAPIWriteType::Enum for where the sizes etc are derived
PX_FORCE_INLINE void getArticulationDataElements(PxArticulationGPUAPIWriteType::Enum dataType, PxU32 maxLinks, PxU32 maxDofs, PxU32 maxFixedTendons, PxU32 maxTendonJoints, PxU32 maxSpatialTendons, PxU32 maxSpatialTendonAttachments,
PxU32& nbSubElements, PxU32& blockSize) const;
virtual ~PxsSimulationControllerOVDCallbacks() {}
};
#endif
class PxsSimulationController : public PxUserAllocated
{
public:
PxsSimulationController(PxsSimulationControllerCallback* callback, PxIntBool gpu) : mCallback(callback), mGPU(gpu) {}
virtual ~PxsSimulationController(){}
virtual void addPxgShape(Sc::ShapeSimBase* /*shapeSimBase*/, const PxsShapeCore* /*shapeCore*/, PxNodeIndex /*nodeIndex*/, PxU32 /*index*/){}
virtual void setPxgShapeBodyNodeIndex(PxNodeIndex /*nodeIndex*/, PxU32 /*index*/) {}
virtual void removePxgShape(PxU32 /*index*/){}
virtual void addDynamic(PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*nodeIndex*/){}
virtual void addDynamics(PxsRigidBody** /*rigidBody*/, const PxU32* /*nodeIndex*/, PxU32 /*nbBodies*/) {}
virtual void addArticulation(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseArticulation(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseDeferredArticulationIds() {}
#if PX_SUPPORT_GPU_PHYSX
virtual void addSoftBody(Dy::DeformableVolume* /*deformableVolume*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseSoftBody(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void releaseDeferredSoftBodyIds() {}
virtual void activateSoftbody(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void deactivateSoftbody(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void activateSoftbodySelfCollision(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void deactivateSoftbodySelfCollision(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void setSoftBodyWakeCounter(Dy::DeformableVolume* /*deformableVolume*/) {}
virtual void addParticleFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::ParticleSystem* /*particleSystem*/,
PxU32 /*particleId*/, PxU32 /*userBufferId*/, PxU32 /*tetId*/) {}
virtual void removeParticleFilter(Dy::DeformableVolume* /*deformableVolume*/,
const Dy::ParticleSystem* /*particleSystem*/, PxU32 /*particleId*/, PxU32 /*userBufferId*/, PxU32 /*tetId*/) {}
virtual PxU32 addParticleAttachment(Dy::DeformableVolume* /*deformableVolume*/, const Dy::ParticleSystem* /*particleSystem*/,
PxU32 /*particleId*/, PxU32 /*userBufferId*/, PxU32 /*tetId*/, const PxVec4& /*barycentrics*/, const bool /*isActive*/) { return 0; }
virtual void removeParticleAttachment(Dy::DeformableVolume* /*deformableVolume*/, PxU32 /*handle*/) {}
virtual void addRigidFilter(Dy::DeformableVolume* /*deformableVolume*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/) {}
virtual void removeRigidFilter(Dy::DeformableVolume* /*deformableVolume*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/) {}
virtual PxU32 addRigidAttachment(Dy::DeformableVolume* /*deformableVolume*/, const PxNodeIndex& /*softBodyNodeIndex*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/, const PxVec3& /*actorSpacePose*/,
PxConeLimitedConstraint* /*constraint*/, const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeRigidAttachment(Dy::DeformableVolume* /*deformableVolume*/, PxU32 /*handle*/) {}
virtual void addTetRigidFilter(Dy::DeformableVolume* /*deformableVolume*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*tetId*/) {}
virtual PxU32 addTetRigidAttachment(Dy::DeformableVolume* /*deformableVolume*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*tetIdx*/,
const PxVec4& /*barycentrics*/, const PxVec3& /*actorSpacePose*/, PxConeLimitedConstraint* /*constraint*/,
const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeTetRigidFilter(Dy::DeformableVolume* /*deformableVolume*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*tetId*/) {}
virtual void addSoftBodyFilter(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32 /*tetIdx0*/,
PxU32 /*tetIdx1*/) {}
virtual void removeSoftBodyFilter(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32 /*tetIdx0*/,
PxU32 /*tetId1*/) {}
virtual void addSoftBodyFilters(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32* /*tetIndices0*/, PxU32* /*tetIndices1*/,
PxU32 /*tetIndicesSize*/) {}
virtual void removeSoftBodyFilters(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32* /*tetIndices0*/, PxU32* /*tetIndices1*/,
PxU32 /*tetIndicesSize*/) {}
virtual PxU32 addSoftBodyAttachment(Dy::DeformableVolume* /*deformableVolume0*/, Dy::DeformableVolume* /*deformableVolume1*/, PxU32 /*tetIdx0*/, PxU32 /*tetIdx1*/,
const PxVec4& /*tetBarycentric0*/, const PxVec4& /*tetBarycentric1*/,
PxConeLimitedConstraint* /*constraint*/, PxReal /*constraintOffset*/, const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeSoftBodyAttachment(Dy::DeformableVolume* /*deformableVolume0*/, PxU32 /*handle*/) {}
virtual void addClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*triIdx*/, PxU32 /*tetIdx*/) {}
virtual void removeClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*triIdx*/, PxU32 /*tetIdx*/) {}
virtual void addVertClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*vertIdx*/, PxU32 /*tetIdx*/) {}
virtual void removeVertClothFilter(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*vertIdx*/, PxU32 /*tetIdx*/) {}
virtual PxU32 addClothAttachment(Dy::DeformableVolume* /*deformableVolume*/, Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*triIdx*/,
const PxVec4& /*triBarycentric*/, PxU32 /*tetIdx*/, const PxVec4& /*tetBarycentric*/,
PxConeLimitedConstraint* /*constraint*/, PxReal /*constraintOffset*/,
const bool /*isActive*/, bool /*doConversion*/) { return 0; }
virtual void removeClothAttachment(Dy::DeformableVolume* /*deformableVolume*/,PxU32 /*handle*/) {}
virtual void addFEMCloth(Dy::DeformableSurface*, const PxNodeIndex&) {}
virtual void releaseFEMCloth(Dy::DeformableSurface*) {}
virtual void releaseDeferredFEMClothIds() {}
virtual void activateCloth(Dy::DeformableSurface*) {}
virtual void deactivateCloth(Dy::DeformableSurface*) {}
virtual void setClothWakeCounter(Dy::DeformableSurface*) {}
virtual PxU32 addRigidAttachment(Dy::DeformableSurface* /*cloth*/, const PxNodeIndex& /*clothNodeIndex*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*vertIndex*/, const PxVec3& /*actorSpacePose*/,
PxConeLimitedConstraint* /*constraint*/, const bool /*isActive*/) { return 0; }
virtual void removeRigidAttachment(Dy::DeformableSurface* /*cloth*/, PxU32 /*handle*/) {}
virtual void addTriRigidFilter(Dy::DeformableSurface* /*deformableSurface*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*triIdx*/) {}
virtual void removeTriRigidFilter(Dy::DeformableSurface* /*deformableSurface*/,
const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*triIdx*/) {}
virtual PxU32 addTriRigidAttachment(Dy::DeformableSurface* /*deformableSurface*/,
PxsRigidBody* /*rigidBody*/, const PxNodeIndex& /*rigidNodeIndex*/, PxU32 /*triIdx*/, const PxVec4& /*barycentrics*/,
const PxVec3& /*actorSpacePose*/, PxConeLimitedConstraint* /*constraint*/,
const bool /*isActive*/) { return 0; }
virtual void removeTriRigidAttachment(Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*handle*/) {}
virtual void addClothFilter(Dy::DeformableSurface* /*deformableSurface0*/, Dy::DeformableSurface* /*deformableSurface1*/, PxU32 /*triIdx0*/, PxU32 /*triIdx1*/) {}
virtual void removeClothFilter(Dy::DeformableSurface* /*deformableSurface0*/, Dy::DeformableSurface* /*deformableSurface1*/, PxU32 /*triIdx0*/, PxU32 /*triIdx1*/) {}
virtual PxU32 addTriClothAttachment(Dy::DeformableSurface* /*deformableSurface0*/, Dy::DeformableSurface* /*deformableSurface1*/, PxU32 /*triIdx0*/, PxU32 /*triIdx1*/,
const PxVec4& /*triBarycentric0*/, const PxVec4& /*triBarycentric1*/, const bool /*addToActive*/) { return 0; }
virtual void removeTriClothAttachment(Dy::DeformableSurface* /*deformableSurface*/, PxU32 /*handle*/) {}
virtual void addParticleSystem(Dy::ParticleSystem* /*particleSystem*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void releaseParticleSystem(Dy::ParticleSystem* /*particleSystem*/) {}
virtual void releaseDeferredParticleSystemIds() {}
#endif
virtual void setEnableOVDReadback(bool) {}
virtual bool getEnableOVDReadback() const { return false; }
virtual void setEnableOVDCollisionReadback(bool) {}
virtual bool getEnableOVDCollisionReadback() const { return false; }
#if PX_SUPPORT_OMNI_PVD
virtual void setOVDCallbacks(PxsSimulationControllerOVDCallbacks& /*ovdCallbacks*/) {}
#endif
virtual void updateDynamic(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void addJoint(const Dy::Constraint&) {}
virtual void updateJoint(const PxU32 /*edgeIndex*/, Dy::Constraint* /*constraint*/){}
virtual void updateBodies(PxsRigidBody** /*rigidBodies*/, PxU32* /*nodeIndices*/, const PxU32 /*nbBodies*/, PxsExternalAccelerationProvider* /*externalAccelerations*/) {}
// virtual void updateBody(PxsRigidBody* /*rigidBody*/, const PxU32 /*nodeIndex*/) {}
virtual void updateBodies(PxBaseTask* /*continuation*/){}
virtual void updateShapes(PxBaseTask* /*continuation*/) {}
virtual void preIntegrateAndUpdateBound(PxBaseTask* /*continuation*/, const PxVec3 /*gravity*/, const PxReal /*dt*/){}
virtual void updateParticleSystemsAndSoftBodies(){}
virtual void sortContacts(){}
virtual void update(PxBitMapPinned& /*changedHandleMap*/){}
virtual void updateArticulation(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void updateArticulationJoint(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
// virtual void updateArticulationTendon(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void updateArticulationExtAccel(Dy::FeatherstoneArticulation* /*articulation*/, const PxNodeIndex& /*nodeIndex*/) {}
virtual void updateArticulationAfterIntegration(PxsContext* /*llContext*/, Bp::AABBManagerBase* /*aabbManager*/,
PxArray<Sc::BodySim*>& /*ccdBodies*/, PxBaseTask* /*continuation*/, IG::IslandSim& /*islandSim*/, float /*dt*/) {}
virtual void mergeChangedAABBMgHandle() {}
virtual void gpuDmabackData(PxsTransformCache& /*cache*/, Bp::BoundsArray& /*boundArray*/, PxBitMapPinned& /*changedAABBMgrHandles*/, bool /*enableDirectGPUAPI*/){}
virtual void updateScBodyAndShapeSim(PxsTransformCache& cache, Bp::BoundsArray& boundArray, PxBaseTask* continuation) = 0;
virtual PxU32* getActiveBodies() { return NULL; }
virtual PxU32* getDeactiveBodies() { return NULL; }
virtual void** getRigidBodies() { return NULL; }
virtual PxU32 getNbBodies() { return 0; }
virtual PxU32* getUnfrozenShapes() { return NULL; }
virtual PxU32* getFrozenShapes() { return NULL; }
virtual Sc::ShapeSimBase** getShapeSims() { return NULL; }
virtual PxU32 getNbFrozenShapes() { return 0; }
virtual PxU32 getNbUnfrozenShapes() { return 0; }
virtual PxU32 getNbShapes() { return 0; }
virtual void clear() { }
virtual void setBounds(Bp::BoundsArray* /*boundArray*/){}
virtual void reserve(const PxU32 /*nbBodies*/) {}
virtual PxU32 getArticulationRemapIndex(const PxU32 /*nodeIndex*/) { return PX_INVALID_U32;}
//KS - the methods below here should probably be wrapped in if PX_SUPPORT_GPU_PHYSX
// PT: isn't the whole class only needed for GPU anyway?
// AD: Yes.
virtual void setDeformableSurfaceGpuPostSolveCallback(PxPostSolveCallback* /*postSolveCallback*/) { }
virtual void setDeformableVolumeGpuPostSolveCallback(PxPostSolveCallback* /*postSolveCallback*/) { }
// NEW DIRECT-GPU API
virtual bool getRigidDynamicData(void* /*data*/, const PxRigidDynamicGPUIndex* /*gpuIndices*/, PxRigidDynamicGPUAPIReadType::Enum /*dataType*/, PxU32 /*nbElements*/, float /*oneOverDt*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) const { return false; }
virtual bool setRigidDynamicData(const void* /*data*/, const PxRigidDynamicGPUIndex* /*gpuIndices*/, PxRigidDynamicGPUAPIWriteType::Enum /*dataType*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) { return false; }
virtual bool getArticulationData(void* /*data*/, const PxArticulationGPUIndex* /*gpuIndices*/, PxArticulationGPUAPIReadType::Enum /*dataType*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) const { return false; }
virtual bool setArticulationData(const void* /*data*/, const PxArticulationGPUIndex* /*gpuIndices*/, PxArticulationGPUAPIWriteType::Enum /*dataType*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) { return false; }
virtual bool computeArticulationData(void* /*data*/, const PxArticulationGPUIndex* /*gpuIndices*/, PxArticulationGPUAPIComputeType::Enum /*operation*/, PxU32 /*nbElements*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) { return false; }
virtual bool evaluateSDFDistances(PxVec4* /*localGradientAndSDFConcatenated*/, const PxShapeGPUIndex* /*shapeIndices*/, const PxVec4* /*localSamplePointsConcatenated*/, const PxU32* /*samplePointCountPerShape*/, PxU32 /*nbElements*/, PxU32 /*maxPointCount*/, CUevent /*startEvent = NULL*/, CUevent /*finishEvent = NULL*/) { return false; }
virtual bool copyContactData(void* /*data*/, PxU32* /*numContactPairs*/, const PxU32 /*maxContactPairs*/, CUevent /*startEvent*/, CUevent /*copyEvent*/) { return false; }
virtual PxArticulationGPUAPIMaxCounts getArticulationGPUAPIMaxCounts() const { return PxArticulationGPUAPIMaxCounts(); }
virtual bool getD6JointData(void* /*data*/, const PxD6JointGPUIndex* /*gpuIndices*/, PxD6JointGPUAPIReadType::Enum /*dataType*/, PxU32 /*nbElements*/, PxF32 /*oneOverDt*/, CUevent /*startEvent*/, CUevent /*finishEvent*/) const { return false; }
// END NEW DIRECT-GPU API
// DEPRECATED DIRECT-GPU API
PX_DEPRECATED virtual void copySoftBodyDataDEPRECATED(void** /*data*/, void* /*dataSizes*/, void* /*softBodyIndices*/, PxSoftBodyGpuDataFlag::Enum /*flag*/, const PxU32 /*nbCopySoftBodies*/, const PxU32 /*maxSize*/, CUevent /*copyEvent*/) {}
PX_DEPRECATED virtual void applySoftBodyDataDEPRECATED(void** /*data*/, void* /*dataSizes*/, void* /*softBodyIndices*/, PxSoftBodyGpuDataFlag::Enum /*flag*/, const PxU32 /*nbUpdatedSoftBodies*/, const PxU32 /*maxSize*/, CUevent /*applyEvent*/, CUevent /*signalEvent*/) {}
PX_DEPRECATED virtual void applyParticleBufferDataDEPRECATED(const PxU32* /*indices*/, const PxGpuParticleBufferIndexPair* /*indexPairs*/, const PxParticleBufferFlags* /*flags*/, PxU32 /*nbUpdatedBuffers*/, CUevent /*waitEvent*/, CUevent /*signalEvent*/) {}
// END DEPRECATED DIRECT-GPU API
virtual PxU32 getInternalShapeIndex(const PxsShapeCore& /*shapeCore*/) { return PX_INVALID_U32; }
virtual void syncParticleData() {}
virtual void updateBoundsAndShapes(Bp::AABBManagerBase& /*aabbManager*/, bool /*useDirectApi*/){}
#if PX_SUPPORT_GPU_PHYSX
virtual PxU32 getNbDeactivatedDeformableSurfaces() const { return 0; }
virtual PxU32 getNbActivatedDeformableSurfaces() const { return 0; }
virtual Dy::DeformableSurface** getDeactivatedDeformableSurfaces() const { return NULL; }
virtual Dy::DeformableSurface** getActivatedDeformableSurfaces() const { return NULL; }
virtual PxU32 getNbDeactivatedDeformableVolumes() const { return 0; }
virtual PxU32 getNbActivatedDeformableVolumes() const { return 0; }
virtual Dy::DeformableVolume** getDeactivatedDeformableVolumes() const { return NULL; }
virtual Dy::DeformableVolume** getActivatedDeformableVolumes() const { return NULL; }
virtual const PxReal* getDeformableVolumeWakeCounters() const { return NULL; }
virtual bool hasDeformableSurfaces() const { return false; }
virtual bool hasDeformableVolumes() const { return false; }
#endif
protected:
PxsSimulationControllerCallback* mCallback;
public:
const PxIntBool mGPU; // PT: true for GPU version, used to quickly skip calls for CPU version
};
#if PX_SUPPORT_OMNI_PVD
PX_FORCE_INLINE void PxsSimulationControllerOVDCallbacks::getArticulationDataElements(PxArticulationGPUAPIWriteType::Enum dataType, PxU32 maxLinks, PxU32 maxDofs, PxU32 maxFixedTendons, PxU32 maxTendonJoints, PxU32 maxSpatialTendons, PxU32 maxSpatialTendonAttachments,
PxU32& nbSubElements, PxU32& blockSize) const
{
PxU32 singleSubElementSize = 0;
switch(dataType)
{
case PxArticulationGPUAPIWriteType::eJOINT_POSITION:
case PxArticulationGPUAPIWriteType::eJOINT_VELOCITY:
case PxArticulationGPUAPIWriteType::eJOINT_FORCE:
case PxArticulationGPUAPIWriteType::eJOINT_TARGET_VELOCITY:
case PxArticulationGPUAPIWriteType::eJOINT_TARGET_POSITION:
{
nbSubElements = maxDofs;
singleSubElementSize = sizeof(PxReal);
break;
}
case PxArticulationGPUAPIWriteType::eROOT_GLOBAL_POSE:
{
nbSubElements = 1;
singleSubElementSize = sizeof(PxTransform);
break;
}
case PxArticulationGPUAPIWriteType::eROOT_LINEAR_VELOCITY:
case PxArticulationGPUAPIWriteType::eROOT_ANGULAR_VELOCITY:
{
nbSubElements = 1;
singleSubElementSize = sizeof(PxVec3);
break;
}
case PxArticulationGPUAPIWriteType::eLINK_FORCE:
case PxArticulationGPUAPIWriteType::eLINK_TORQUE:
{
nbSubElements = maxLinks;
singleSubElementSize = sizeof(PxVec3);
break;
}
case PxArticulationGPUAPIWriteType::eFIXED_TENDON:
{
nbSubElements = maxFixedTendons;
singleSubElementSize = sizeof(PxGpuFixedTendonData);
break;
}
case PxArticulationGPUAPIWriteType::eFIXED_TENDON_JOINT:
{
nbSubElements = maxFixedTendons * maxTendonJoints;
singleSubElementSize = sizeof(PxGpuTendonJointCoefficientData);
break;
}
case PxArticulationGPUAPIWriteType::eSPATIAL_TENDON:
{
nbSubElements = maxSpatialTendons;
singleSubElementSize = sizeof(PxGpuSpatialTendonData);
break;
}
case PxArticulationGPUAPIWriteType::eSPATIAL_TENDON_ATTACHMENT:
{
nbSubElements = maxSpatialTendons * maxSpatialTendonAttachments;
singleSubElementSize = sizeof(PxGpuTendonAttachmentData);
break;
}
default:
PX_ALWAYS_ASSERT();
nbSubElements = 0;
singleSubElementSize = 0;
break;
}
blockSize = singleSubElementSize * nbSubElements;
}
#endif
}
#endif

View File

@@ -0,0 +1,142 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXS_TRANSFORM_CACHE_H
#define PXS_TRANSFORM_CACHE_H
#include "CmIDPool.h"
#include "foundation/PxBitMap.h"
#include "foundation/PxTransform.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxPinnedArray.h"
#define PX_DEFAULT_CACHE_SIZE 512
namespace physx
{
struct PxsTransformFlag
{
enum Flags
{
eFROZEN = (1 << 0)
};
};
struct PX_ALIGN_PREFIX(16) PxsCachedTransform
{
PxTransform transform;
PxU32 flags;
PX_FORCE_INLINE PxU32 isFrozen() const { return flags & PxsTransformFlag::eFROZEN; }
}
PX_ALIGN_SUFFIX(16);
class PxsTransformCache : public PxUserAllocated
{
typedef PxU32 RefCountType;
public:
PxsTransformCache(PxVirtualAllocatorCallback& allocatorCallback) : mTransformCache(PxVirtualAllocator(&allocatorCallback)), mHasAnythingChanged(true)
{
/*mTransformCache.reserve(PX_DEFAULT_CACHE_SIZE);
mTransformCache.forceSize_Unsafe(PX_DEFAULT_CACHE_SIZE);*/
mUsedSize = 0;
}
void initEntry(PxU32 index)
{
PxU32 oldCapacity = mTransformCache.capacity();
if (index >= oldCapacity)
{
PxU32 newCapacity = PxNextPowerOfTwo(index);
mTransformCache.reserve(newCapacity);
mTransformCache.forceSize_Unsafe(newCapacity);
}
mUsedSize = PxMax(mUsedSize, index + 1u);
}
PX_FORCE_INLINE void setTransformCache(const PxTransform& transform, PxU32 flags, PxU32 index, PxU32 /*indexFrom*/)
{
mTransformCache[index].transform = transform;
mTransformCache[index].flags = flags;
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxsCachedTransform& getTransformCache(PxU32 index) const
{
return mTransformCache[index];
}
PX_FORCE_INLINE PxsCachedTransform& getTransformCache(PxU32 index)
{
return mTransformCache[index];
}
PX_FORCE_INLINE void shiftTransforms(const PxVec3& shift)
{
for (PxU32 i = 0; i < mTransformCache.capacity(); i++)
{
mTransformCache[i].transform.p += shift;
}
mHasAnythingChanged = true;
}
PX_FORCE_INLINE PxU32 getTotalSize() const
{
return mUsedSize;
}
PX_FORCE_INLINE const PxsCachedTransform* getTransforms() const
{
return mTransformCache.begin();
}
PX_FORCE_INLINE PxsCachedTransform* getTransforms()
{
return mTransformCache.begin();
}
PX_FORCE_INLINE PxCachedTransformArrayPinned* getCachedTransformArray()
{
return &mTransformCache;
}
PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; }
PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; }
PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; }
protected:
PxCachedTransformArrayPinned mTransformCache;
private:
PxU32 mUsedSize;
bool mHasAnythingChanged;
};
}
#endif

View File

@@ -0,0 +1,205 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXV_NPHASE_IMPLEMENTATION_CONTEXT_H
#define PXV_NPHASE_IMPLEMENTATION_CONTEXT_H
#include "PxSceneDesc.h"
#include "PxsContactManagerState.h"
#include "foundation/PxArray.h"
#include "PxsNphaseCommon.h"
// PT: TODO: forward decl don't work easily with templates, to revisit
#include "PxsMaterialCore.h"
#include "PxsDeformableSurfaceMaterialCore.h"
#include "PxsDeformableVolumeMaterialCore.h"
#include "PxsPBDMaterialCore.h"
namespace physx
{
namespace IG
{
class IslandSim;
typedef PxU32 EdgeIndex;
}
namespace Cm
{
class FanoutTask;
}
namespace Sc
{
class ShapeInteraction;
}
class PxNodeIndex;
class PxBaseTask;
class PxsContext;
struct PxsShapeCore;
class PxsContactManager;
struct PxsContactManagerOutput;
struct PxsTorsionalFrictionData;
class PxsContactManagerOutputIterator
{
PxU32 mOffsets[1<<PxsContactManagerBase::MaxBucketBits];
PxsContactManagerOutput* mOutputs;
public:
PxsContactManagerOutputIterator() : mOutputs(NULL)
{
}
PxsContactManagerOutputIterator(const PxU32* offsets, PxU32 nbOffsets, PxsContactManagerOutput* outputs) : mOutputs(outputs)
{
PX_ASSERT(nbOffsets <= (1<<PxsContactManagerBase::MaxBucketBits));
for(PxU32 a = 0; a < nbOffsets; ++a)
{
mOffsets[a] = offsets[a];
}
}
PX_FORCE_INLINE PxsContactManagerOutput& getContactManagerOutput(PxU32 id)
{
PX_ASSERT((id & PxsContactManagerBase::NEW_CONTACT_MANAGER_MASK) == 0);
PxU32 bucketId = PxsContactManagerBase::computeBucketIndexFromId(id);
PxU32 cmOutId = PxsContactManagerBase::computeIndexFromId(id);
return mOutputs[mOffsets[bucketId] + cmOutId];
}
PxU32 getIndex(PxU32 id) const
{
PX_ASSERT((id & PxsContactManagerBase::NEW_CONTACT_MANAGER_MASK) == 0);
PxU32 bucketId = PxsContactManagerBase::computeBucketIndexFromId(id);
PxU32 cmOutId = PxsContactManagerBase::computeIndexFromId(id);
return mOffsets[bucketId] + cmOutId;
}
};
class PxvNphaseImplementationContext
{
PX_NOCOPY(PxvNphaseImplementationContext)
PxsContext& mContext;
public:
PxvNphaseImplementationContext(PxsContext& context): mContext(context) {}
virtual ~PxvNphaseImplementationContext() {}
virtual void destroy() = 0;
virtual void updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation, PxBaseTask* firstPassContinuation, Cm::FanoutTask* updateBoundAndShapeTask) = 0;
virtual void postBroadPhaseUpdateContactManager(PxBaseTask* continuation) = 0;
virtual void secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation) = 0;
virtual void fetchUpdateContactManager() = 0;
virtual void registerContactManager(PxsContactManager* cm, const Sc::ShapeInteraction* interaction, PxI32 touching, PxU32 patchCount) = 0;
// virtual void registerContactManagers(PxsContactManager** cm, Sc::ShapeInteraction** shapeInteractions, PxU32 nbContactManagers, PxU32 maxContactManagerId) = 0;
virtual void unregisterContactManager(PxsContactManager* cm) = 0;
virtual void refreshContactManager(PxsContactManager* cm) = 0;
virtual void registerShape(const PxNodeIndex& nodeIndex, const PxsShapeCore& shapeCore, const PxU32 transformCacheID, PxActor* actor, const bool isDeformableSurface = false) = 0;
virtual void unregisterShape(const PxsShapeCore& shapeCore, const PxU32 transformCacheID, const bool isDeformableSurface = false) = 0;
virtual void registerAggregate(const PxU32 transformCacheID) = 0;
virtual void registerMaterial(const PxsMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsMaterialCore& materialCore) = 0;
virtual void registerMaterial(const PxsDeformableSurfaceMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsDeformableSurfaceMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsDeformableSurfaceMaterialCore& materialCore) = 0;
virtual void registerMaterial(const PxsDeformableVolumeMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsDeformableVolumeMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsDeformableVolumeMaterialCore& materialCore) = 0;
virtual void registerMaterial(const PxsPBDMaterialCore& materialCore) = 0;
virtual void updateMaterial(const PxsPBDMaterialCore& materialCore) = 0;
virtual void unregisterMaterial(const PxsPBDMaterialCore& materialCore) = 0;
virtual void updateShapeMaterial(const PxsShapeCore& shapeCore) = 0;
virtual void startNarrowPhaseTasks() = 0;
virtual void appendContactManagers() = 0;
virtual PxsContactManagerOutput& getNewContactManagerOutput(PxU32 index) = 0;
virtual PxsContactManagerOutputIterator getContactManagerOutputs() = 0;
virtual void setContactModifyCallback(PxContactModifyCallback* callback) = 0;
virtual void acquireContext() = 0;
virtual void releaseContext() = 0;
virtual void preallocateNewBuffers(PxU32 nbNewPairs, PxU32 maxIndex) = 0;
virtual void lock() = 0;
virtual void unlock() = 0;
virtual PxsContactManagerOutputCounts* getLostFoundPatchOutputCounts() = 0;
virtual PxsContactManager** getLostFoundPatchManagers() = 0;
virtual PxU32 getNbLostFoundPatchManagers() = 0;
//GPU-specific buffers. Return null for CPU narrow phase
virtual PxsContactManagerOutput* getGPUContactManagerOutputBase() = 0;
virtual PxReal* getGPURestDistances() = 0;
virtual Sc::ShapeInteraction** getGPUShapeInteractions() = 0;
virtual PxsTorsionalFrictionData* getGPUTorsionalData() = 0;
};
class PxvNphaseImplementationFallback: public PxvNphaseImplementationContext
{
PX_NOCOPY(PxvNphaseImplementationFallback)
public:
PxvNphaseImplementationFallback(PxsContext& context) : PxvNphaseImplementationContext(context) {}
virtual ~PxvNphaseImplementationFallback() {}
virtual void unregisterContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) = 0;
virtual void processContactManager(PxReal dt, PxsContactManagerOutput* cmOutputs, PxBaseTask* continuation) = 0;
virtual void processContactManagerSecondPass(PxReal dt, PxBaseTask* continuation) = 0;
virtual void refreshContactManagerFallback(PxsContactManager* cm, PxsContactManagerOutput* cmOutputs) = 0;
virtual void appendContactManagersFallback(PxsContactManagerOutput* outputs) = 0;
virtual void removeContactManagersFallback(PxsContactManagerOutput* cmOutputs) = 0;
virtual const Sc::ShapeInteraction*const* getShapeInteractionsGPU() const = 0;
virtual const PxReal* getRestDistancesGPU() const = 0;
virtual const PxsTorsionalFrictionData* getTorsionalDataGPU() const = 0;
};
PxvNphaseImplementationFallback* createNphaseImplementationContext(PxsContext& context, IG::IslandSim* islandSim, PxVirtualAllocatorCallback* allocator, bool gpuDynamics);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,64 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxsContactManager.h"
using namespace physx;
PxsContactManager::PxsContactManager(PxU32 index) : mFlags(0), mCmIndex(index)
{
// PT: TODO: any reason why we don't initialize all members here, e.g. shapeCore pointers?
// PT: it might be because of the way we preallocate contact managers in the pipeline, and release the ones
// we filtered out. Maybe properly initializing everything "for no reason" in that case is costly.
// Still, it is unclear why we initialize *some* of the members there then.
mNpUnit.mRigidCore0 = NULL;
mNpUnit.mRigidCore1 = NULL;
mNpUnit.mRestDistance = 0;
mNpUnit.mFrictionDataPtr = NULL;
mNpUnit.mFrictionPatchCount = 0;
mNpUnit.mFlags = 0;
mNpUnit.setDominance0(1u);
mNpUnit.setDominance1(1u);
}
PxsContactManager::~PxsContactManager()
{
}
void PxsContactManager::setCCD(bool enable)
{
PxU32 flags = mFlags & (~PXS_CM_CCD_CONTACT);
if (enable)
flags |= PXS_CM_CCD_LINEAR;
else
flags &= ~PXS_CM_CCD_LINEAR;
mFlags = flags;
}

View File

@@ -0,0 +1,599 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxProfileZone.h"
#include "PxvConfig.h"
#include "PxcContactCache.h"
#include "PxsRigidBody.h"
#include "PxsContactManager.h"
#include "PxsContext.h"
#include "PxPhysXConfig.h"
#include "foundation/PxBitMap.h"
#include "CmFlushPool.h"
#include "PxsMaterialManager.h"
#include "PxSceneDesc.h"
#include "PxsCCD.h"
#include "PxvGeometry.h"
#include "PxvManager.h"
#include "PxsSimpleIslandManager.h"
#if PX_SUPPORT_GPU_PHYSX
#include "PxPhysXGpu.h"
#endif
#include "PxcNpContactPrepShared.h"
#include "PxcNpCache.h"
using namespace physx;
PxsContext::PxsContext(const PxSceneDesc& desc, PxTaskManager* taskManager, Cm::FlushPool& taskPool, PxCudaContextManager* cudaContextManager, PxU32 poolSlabSize, PxU64 contextID) :
mNpThreadContextPool (this),
mContactManagerPool ("mContactManagerPool", poolSlabSize),
mManifoldPool ("mManifoldPool", poolSlabSize),
mSphereManifoldPool ("mSphereManifoldPool", poolSlabSize),
mContactModifyCallback (NULL),
mNpImplementationContext (NULL),
mNpFallbackImplementationContext(NULL),
mTaskManager (taskManager),
mTaskPool (taskPool),
mCudaContextManager (cudaContextManager),
mPCM (desc.flags & PxSceneFlag::eENABLE_PCM),
mContactCache (false),
mCreateAveragePoint (desc.flags & PxSceneFlag::eENABLE_AVERAGE_POINT),
mContextID (contextID)
{
clearManagerTouchEvents();
mVisualizationCullingBox.setEmpty();
PxMemZero(mVisualizationParams, sizeof(PxReal) * PxVisualizationParameter::eNUM_VALUES);
mNpMemBlockPool.init(desc.nbContactDataBlocks, desc.maxNbContactDataBlocks);
}
PxsContext::~PxsContext()
{
PX_DELETE(mTransformCache);
mContactManagerPool.destroy(); //manually destroy the contact manager pool, otherwise pool deletion order is random and we can get into trouble with references into other pools needed during destruction.
}
// =========================== Create methods
namespace physx
{
const bool gEnablePCMCaching[][PxGeometryType::eGEOMETRY_COUNT] =
{
//eSPHERE,
{
false, //eSPHERE
false, //ePLANE
false, //eCAPSULE
false, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
true, //eTRIANGLEMESH
true, //eHEIGHTFIELD
true, //eCUSTOM
},
//ePLANE
{
false, //eSPHERE
false, //ePLANE
true, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
false, //eTRIANGLEMESH
false, //eHEIGHTFIELD
true, //eCUSTOM
},
//eCAPSULE,
{
false, //eSPHERE
true, //ePLANE
false, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
true, //eTRIANGLEMESH
true, //eHEIGHTFIELD
true, //eCUSTOM
},
//eBOX,
{
false, //eSPHERE
true, //ePLANE
true, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
true, //eTRIANGLEMESH
true, //eHEIGHTFIELD
true, //eCUSTOM
},
//eCONVEX,
{
false, //eSPHERE
false, //ePLANE
false, //eCAPSULE
false, //eBOX
false, //eCONVEX
false, //eCONVEXMESH
false, //ePARTICLESYSTEM
false, //eSOFTBODY,
false, //eTRIANGLEMESH
false, //eHEIGHTFIELD
false, //eCUSTOM
},
//eCONVEXMESH,
{
true, //eSPHERE
true, //ePLANE
true, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
true, //eTRIANGLEMESH
true, //eHEIGHTFIELD
true, //eCUSTOM
},
//ePARTICLESYSTEM
{
false, //eSPHERE
false, //ePLANE
false, //eCAPSULE
false, //eBOX
false, //eCONVEX
false, //eCONVEXMESH
false, //ePARTICLESYSTEM
false, //eSOFTBODY,
false, //eTRIANGLEMESH
false, //eHEIGHTFIELD
false, //eCUSTOM
},
//eSOFTBODY
{
false, //eSPHERE
false, //ePLANE
false, //eCAPSULE
false, //eBOX
false, //eCONVEX
false, //eCONVEXMESH
false, //ePARTICLESYSTEM
false, //eSOFTBODY,
false, //eTRIANGLEMESH
false, //eHEIGHTFIELD
false, //eCUSTOM
},
//eTRIANGLEMESH,
{
true, //eSPHERE
false, //ePLANE
true, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
false, //eTRIANGLEMESH
false, //eHEIGHTFIELD
true, //eCUSTOM
},
//eHEIGHTFIELD,
{
true, //eSPHERE
false, //ePLANE
true, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
true, //eSOFTBODY,
false, //eTRIANGLEMESH
false, //eHEIGHTFIELD
true, //eCUSTOM
},
//eCUSTOM,
{
true, //eSPHERE
true, //ePLANE
true, //eCAPSULE
true, //eBOX
false, //eCONVEX
true, //eCONVEXMESH
false, //ePARTICLESYSTEM
false, //eSOFTBODY,
true, //eTRIANGLEMESH
true, //eHEIGHTFIELD
true, //eCUSTOM
}
};
PX_COMPILE_TIME_ASSERT(sizeof(gEnablePCMCaching) / sizeof(gEnablePCMCaching[0]) == PxGeometryType::eGEOMETRY_COUNT);
}
void PxsContext::createTransformCache(PxVirtualAllocatorCallback& allocatorCallback)
{
mTransformCache = PX_NEW(PxsTransformCache)(allocatorCallback);
}
PxsContactManager* PxsContext::createContactManager(PxsContactManager* contactManager, bool useCCD)
{
PxsContactManager* cm = contactManager? contactManager : mContactManagerPool.get();
if(cm)
{
cm->getWorkUnit().clearCachedState();
if(!contactManager)
setActiveContactManager(cm, useCCD);
}
else
{
PX_WARN_ONCE("Reached limit of contact pairs.");
}
return cm;
}
void PxsContext::createCache(Gu::Cache& cache, PxGeometryType::Enum geomType0, PxGeometryType::Enum geomType1)
{
if(mPCM)
{
if(gEnablePCMCaching[geomType0][geomType1])
{
if(geomType0 <= PxGeometryType::eCONVEXMESH && geomType1 <= PxGeometryType::eCONVEXMESH)
{
if(geomType0 == PxGeometryType::eSPHERE || geomType1 == PxGeometryType::eSPHERE)
{
Gu::PersistentContactManifold* manifold = mSphereManifoldPool.allocate();
PX_PLACEMENT_NEW(manifold, Gu::SpherePersistentContactManifold());
cache.setManifold(manifold);
}
else
{
Gu::PersistentContactManifold* manifold = mManifoldPool.allocate();
PX_PLACEMENT_NEW(manifold, Gu::LargePersistentContactManifold());
cache.setManifold(manifold);
}
cache.getManifold().clearManifold();
}
else
{
//ML: raised 1 to indicate the manifold is multiManifold which is for contact gen in mesh/height field
//cache.manifold = 1;
cache.setMultiManifold(NULL);
}
}
else
{
//cache.manifold = 0;
cache.mCachedData = NULL;
cache.mManifoldFlags = 0;
}
}
}
void PxsContext::destroyContactManager(PxsContactManager* cm)
{
const PxU32 idx = cm->getIndex();
if(cm->getCCD())
mActiveContactManagersWithCCD.growAndReset(idx);
//mActiveContactManager.growAndReset(idx);
mContactManagerTouchEvent.growAndReset(idx);
mContactManagerPool.put(cm);
}
void PxsContext::destroyCache(Gu::Cache& cache)
{
if(cache.isManifold())
{
if(!cache.isMultiManifold())
{
Gu::PersistentContactManifold& manifold = cache.getManifold();
if(manifold.mCapacity == GU_SPHERE_MANIFOLD_CACHE_SIZE)
mSphereManifoldPool.deallocate(static_cast<Gu::SpherePersistentContactManifold*>(&manifold));
else
mManifoldPool.deallocate(static_cast<Gu::LargePersistentContactManifold*>(&manifold));
}
cache.mCachedData = NULL;
cache.mManifoldFlags = 0;
}
}
void PxsContext::setScratchBlock(void* addr, PxU32 size)
{
mScratchAllocator.setBlock(addr, size);
}
void PxsContext::shiftOrigin(const PxVec3& shift)
{
// transform cache
mTransformCache->shiftTransforms(-shift);
#if 0
if (getContactCacheFlag())
{
//Iterate all active contact managers
PxBitMap::Iterator it(mActiveContactManager);
PxU32 index = it.getNext();
while(index != PxBitMap::Iterator::DONE)
{
PxsContactManager* cm = mContactManagerPool.findByIndexFast(index);
PxcNpWorkUnit& npwUnit = cm->getWorkUnit();
// contact cache
if(!npwUnit.pairCache.isManifold())
{
PxU8* contactCachePtr = npwUnit.pairCache.mCachedData;
if (contactCachePtr)
{
PxcLocalContactsCache* lcc;
PxU8* contacts = PxcNpCacheRead(npwUnit.pairCache, lcc);
#if PX_DEBUG
PxcLocalContactsCache testCache;
PxU32 testBytes;
const PxU8* testPtr = PxcNpCacheRead2(npwUnit.pairCache, testCache, testBytes);
#endif
lcc->mTransform0.p -= shift;
lcc->mTransform1.p -= shift;
const PxU32 nbContacts = lcc->mNbCachedContacts;
const bool sameNormal = lcc->mSameNormal;
const bool useFaceIndices = lcc->mUseFaceIndices;
for(PxU32 i=0; i < nbContacts; i++)
{
if (i != nbContacts-1)
PxPrefetchLine(contacts, 128);
if(!i || !sameNormal)
contacts += sizeof(PxVec3);
PxVec3* cachedPoint = reinterpret_cast<PxVec3*>(contacts);
*cachedPoint -= shift;
contacts += sizeof(PxVec3);
contacts += sizeof(PxReal);
if(useFaceIndices)
contacts += 2 * sizeof(PxU32);
}
#if PX_DEBUG
PX_ASSERT(contacts == (testPtr + testBytes));
#endif
}
}
index = it.getNext();
}
}
#endif
// adjust visualization culling box
if(!mVisualizationCullingBox.isEmpty())
{
mVisualizationCullingBox.minimum -= shift;
mVisualizationCullingBox.maximum -= shift;
}
}
void PxsContext::swapStreams()
{
mNpMemBlockPool.swapNpCacheStreams();
}
void PxsContext::mergeCMDiscreteUpdateResults(PxBaseTask* /*continuation*/)
{
PX_PROFILE_ZONE("Sim.narrowPhaseMerge", mContextID);
mNpImplementationContext->appendContactManagers();
//Note: the iterator extracts all the items and returns them to the cache on destruction(for thread safety).
PxcThreadCoherentCacheIterator<PxcNpThreadContext, PxcNpContext> threadContextIt(mNpThreadContextPool);
for(PxcNpThreadContext* threadContext = threadContextIt.getNext(); threadContext; threadContext = threadContextIt.getNext())
{
mCMTouchEventCount[PXS_LOST_TOUCH_COUNT] += threadContext->getLocalLostTouchCount();
mCMTouchEventCount[PXS_NEW_TOUCH_COUNT] += threadContext->getLocalNewTouchCount();
#if PX_ENABLE_SIM_STATS
for(PxU32 i=0;i<PxGeometryType::eGEOMETRY_COUNT;i++)
{
#if PX_DEBUG
for(PxU32 j=0; j<i; j++)
PX_ASSERT(!threadContext->mDiscreteContactPairs[i][j]);
#endif
for(PxU32 j=i; j<PxGeometryType::eGEOMETRY_COUNT; j++)
{
const PxU32 nb = threadContext->mDiscreteContactPairs[i][j];
const PxU32 nbModified = threadContext->mModifiedContactPairs[i][j];
mSimStats.mNbDiscreteContactPairs[i][j] += nb;
mSimStats.mNbModifiedContactPairs[i][j] += nbModified;
mSimStats.mNbDiscreteContactPairsTotal += nb;
}
}
mSimStats.mNbDiscreteContactPairsWithCacheHits += threadContext->mNbDiscreteContactPairsWithCacheHits;
mSimStats.mNbDiscreteContactPairsWithContacts += threadContext->mNbDiscreteContactPairsWithContacts;
mSimStats.mTotalCompressedContactSize += threadContext->mCompressedCacheSize;
//KS - this data is not available yet
//mSimStats.mTotalConstraintSize += threadContext->mConstraintSize;
threadContext->clearStats();
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
mContactManagerTouchEvent.combineInPlace<PxBitMap::OR>(threadContext->getLocalChangeTouch());
//mContactManagerPatchChangeEvent.combineInPlace<PxBitMap::OR>(threadContext->getLocalPatchChangeMap());
mMaxPatches = PxMax(mMaxPatches, threadContext->mMaxPatches);
threadContext->mMaxPatches = 0;
}
}
void PxsContext::updateContactManager(PxReal dt, bool hasContactDistanceChanged, PxBaseTask* continuation, PxBaseTask* firstPassContinuation,
Cm::FanoutTask* updateBoundAndShapeTask)
{
PX_ASSERT(mNpImplementationContext);
return mNpImplementationContext->updateContactManager(dt, hasContactDistanceChanged, continuation,
firstPassContinuation, updateBoundAndShapeTask);
}
void PxsContext::secondPassUpdateContactManager(PxReal dt, PxBaseTask* continuation)
{
PX_ASSERT(mNpImplementationContext);
mNpImplementationContext->secondPassUpdateContactManager(dt, continuation);
}
void PxsContext::fetchUpdateContactManager()
{
PX_ASSERT(mNpImplementationContext);
mNpImplementationContext->fetchUpdateContactManager();
mergeCMDiscreteUpdateResults(NULL);
}
void PxsContext::resetThreadContexts()
{
//Note: the iterator extracts all the items and returns them to the cache on destruction(for thread safety).
PxcThreadCoherentCacheIterator<PxcNpThreadContext, PxcNpContext> threadContextIt(mNpThreadContextPool);
PxcNpThreadContext* threadContext = threadContextIt.getNext();
while(threadContext != NULL)
{
threadContext->reset(mContactManagerTouchEvent.size());
threadContext = threadContextIt.getNext();
}
}
bool PxsContext::getManagerTouchEventCount(PxU32* newTouch, PxU32* lostTouch, PxU32* ccdTouch) const
{
if(newTouch)
*newTouch = mCMTouchEventCount[PXS_NEW_TOUCH_COUNT];
if(lostTouch)
*lostTouch = mCMTouchEventCount[PXS_LOST_TOUCH_COUNT];
if(ccdTouch)
*ccdTouch = mCMTouchEventCount[PXS_CCD_RETOUCH_COUNT];
return true;
}
void PxsContext::fillManagerTouchEvents(PxvContactManagerTouchEvent* newTouch, PxU32& newTouchCount,
PxvContactManagerTouchEvent* lostTouch, PxU32& lostTouchCount,
PxvContactManagerTouchEvent* ccdTouch, PxU32& ccdTouchCount)
{
PX_PROFILE_ZONE("PxsContext::fillManagerTouchEvents", mContextID);
const PxvContactManagerTouchEvent* newTouchStart = newTouch;
const PxvContactManagerTouchEvent* lostTouchStart = lostTouch;
const PxvContactManagerTouchEvent* ccdTouchStart = ccdTouch;
const PxvContactManagerTouchEvent* newTouchEnd = newTouch + newTouchCount;
const PxvContactManagerTouchEvent* lostTouchEnd = lostTouch + lostTouchCount;
const PxvContactManagerTouchEvent* ccdTouchEnd = ccdTouch + ccdTouchCount;
PX_UNUSED(newTouchEnd);
PX_UNUSED(lostTouchEnd);
PX_UNUSED(ccdTouchEnd);
const PxU32* bits = mContactManagerTouchEvent.getWords();
if(bits)
{
// PT: ### bitmap iterator pattern
const PxU32 lastSetBit = mContactManagerTouchEvent.findLast();
for(PxU32 w = 0; w <= lastSetBit >> 5; ++w)
{
for(PxU32 b = bits[w]; b; b &= b-1)
{
const PxU32 index = PxU32(w<<5|PxLowestSetBit(b));
PxsContactManager* cm = mContactManagerPool.findByIndexFast(index);
if(cm->getTouchStatus())
{
if(!cm->getHasCCDRetouch())
{
PX_ASSERT(newTouch < newTouchEnd);
newTouch->setCMTouchEventUserData(cm->getShapeInteraction());
newTouch++;
}
else
{
PX_ASSERT(ccdTouch);
PX_ASSERT(ccdTouch < ccdTouchEnd);
ccdTouch->setCMTouchEventUserData(cm->getShapeInteraction());
cm->clearCCDRetouch();
ccdTouch++;
}
}
else
{
PX_ASSERT(lostTouch < lostTouchEnd);
lostTouch->setCMTouchEventUserData(cm->getShapeInteraction());
lostTouch++;
}
}
}
}
newTouchCount = PxU32(newTouch - newTouchStart);
lostTouchCount = PxU32(lostTouch - lostTouchStart);
ccdTouchCount = PxU32(ccdTouch - ccdTouchStart);
}
void PxsContext::beginUpdate()
{
#if PX_ENABLE_SIM_STATS
mSimStats.clearAll();
#else
PX_CATCH_UNDEFINED_ENABLE_SIM_STATS
#endif
}

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PxsMemoryManager.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxArray.h"
using namespace physx;
namespace
{
class PxsDefaultMemoryAllocator : public PxVirtualAllocatorCallback
{
public:
virtual void* allocate(size_t size, int, const char*, int) PX_OVERRIDE PX_FINAL { return PX_ALLOC(size, "unused"); }
virtual void deallocate(void* ptr) PX_OVERRIDE PX_FINAL { PX_FREE(ptr); }
};
class PxsDefaultMemoryManager : public PxsMemoryManager
{
public:
// PxsMemoryManager
virtual PxVirtualAllocatorCallback* getHostMemoryAllocator() PX_OVERRIDE PX_FINAL { return &mDefaultMemoryAllocator; }
virtual PxVirtualAllocatorCallback* getDeviceMemoryAllocator() PX_OVERRIDE PX_FINAL { return NULL; }
//~PxsMemoryManager
PxsDefaultMemoryAllocator mDefaultMemoryAllocator;
};
}
PxsMemoryManager* physx::createDefaultMemoryManager()
{
return PX_NEW(PxsDefaultMemoryManager);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,431 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxProfileZone.h"
#include "PxsSimpleIslandManager.h"
#include "foundation/PxSort.h"
#include "PxsContactManager.h"
#include "CmTask.h"
#include "DyVArticulation.h"
using namespace physx;
using namespace IG;
///////////////////////////////////////////////////////////////////////////////
ThirdPassTask::ThirdPassTask(PxU64 contextID, SimpleIslandManager& islandManager, IslandSim& islandSim) : Cm::Task(contextID), mIslandManager(islandManager), mIslandSim(islandSim)
{
}
void ThirdPassTask::runInternal()
{
PX_PROFILE_ZONE("Basic.thirdPassIslandGen", mContextID);
mIslandSim.removeDestroyedEdges();
mIslandSim.processLostEdges(mIslandManager.mDestroyedNodes, true, true, mIslandManager.mMaxDirtyNodesPerFrame);
}
///////////////////////////////////////////////////////////////////////////////
PostThirdPassTask::PostThirdPassTask(PxU64 contextID, SimpleIslandManager& islandManager) : Cm::Task(contextID), mIslandManager(islandManager)
{
}
void PostThirdPassTask::runInternal()
{
PX_PROFILE_ZONE("Basic.postThirdPassIslandGen", mContextID);
for (PxU32 a = 0; a < mIslandManager.mDestroyedNodes.size(); ++a)
mIslandManager.mNodeHandles.freeHandle(mIslandManager.mDestroyedNodes[a].index());
mIslandManager.mDestroyedNodes.clear();
for (PxU32 a = 0; a < mIslandManager.mDestroyedEdges.size(); ++a)
mIslandManager.mEdgeHandles.freeHandle(mIslandManager.mDestroyedEdges[a]);
mIslandManager.mDestroyedEdges.clear();
PX_ASSERT(mIslandManager.validateDeactivations());
}
///////////////////////////////////////////////////////////////////////////////
SimpleIslandManager::SimpleIslandManager(bool useEnhancedDeterminism, bool gpu, PxU64 contextID) :
mDestroyedNodes ("mDestroyedNodes"),
mDestroyedEdges ("mDestroyedEdges"),
mAccurateIslandManager (mCpuData, gpu ? &mGpuData : NULL, contextID),
mSpeculativeIslandManager (mCpuData, NULL, contextID),
mSpeculativeThirdPassTask (contextID, *this, mSpeculativeIslandManager),
mAccurateThirdPassTask (contextID, *this, mAccurateIslandManager),
mPostThirdPassTask (contextID, *this),
mContextID (contextID),
mGPU (gpu)
{
if(gpu)
mGpuData.mFirstPartitionEdges.resize(1024);
mMaxDirtyNodesPerFrame = useEnhancedDeterminism ? 0xFFFFFFFF : 1000u;
}
SimpleIslandManager::~SimpleIslandManager()
{
}
PxNodeIndex SimpleIslandManager::addNode(bool isActive, bool isKinematic, Node::NodeType type, void* object)
{
const PxU32 handle = mNodeHandles.getHandle();
const PxNodeIndex nodeIndex(handle);
mAccurateIslandManager .addNode(isActive, isKinematic, type, nodeIndex, object);
mSpeculativeIslandManager .addNode(isActive, isKinematic, type, nodeIndex, object);
return nodeIndex;
}
void SimpleIslandManager::removeNode(const PxNodeIndex index)
{
PX_ASSERT(mNodeHandles.isValidHandle(index.index()));
mDestroyedNodes.pushBack(index);
}
EdgeIndex SimpleIslandManager::addEdge(void* edge, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction)
{
const EdgeIndex handle = mEdgeHandles.getHandle();
const PxU32 nodeIds = 2 * handle;
if (mCpuData.mEdgeNodeIndices.size() == nodeIds)
{
PX_PROFILE_ZONE("ReserveEdges", mContextID);
const PxU32 newSize = nodeIds + 2048;
mCpuData.mEdgeNodeIndices.resize(newSize);
// PT: newSize is for mEdgeNodeIndices which holds two indices per edge. We only need half that size for regular edge-indexed buffers.
mAuxCpuData.mConstraintOrCm.resize(newSize/2);
mInteractions.resize(newSize/2);
}
mCpuData.mEdgeNodeIndices[nodeIds] = nodeHandle1;
mCpuData.mEdgeNodeIndices[nodeIds + 1] = nodeHandle2;
mAuxCpuData.mConstraintOrCm[handle] = edge;
mInteractions[handle] = interaction;
return handle;
}
EdgeIndex SimpleIslandManager::resizeEdgeArrays(EdgeIndex handle, bool flag)
{
if(mConnectedMap.size() == handle)
mConnectedMap.resize(2 * (handle + 1));
if(mGPU && mGpuData.mFirstPartitionEdges.capacity() == handle)
mGpuData.mFirstPartitionEdges.resize(2 * (handle + 1));
if(flag)
mConnectedMap.reset(handle); // PT: for contact manager
else
mConnectedMap.set(handle); // PT: for constraint
return handle;
}
///////////////////////////////////////////////////////////////////////////////
// PT: the two functions below are to replicate SimpleIslandManager::addContactManager() multi-threaded
void SimpleIslandManager::preallocateContactManagers(PxU32 nb, EdgeIndex* handles)
{
// PT: part from SimpleIslandManager::addContactManager / addEdge
EdgeIndex maxHandle = 0;
{
{
PX_PROFILE_ZONE("getHandles", mContextID);
for(PxU32 i=0;i<nb;i++)
{
const EdgeIndex handle = mEdgeHandles.getHandle(); // PT: TODO: better version
handles[i] = handle;
if(handle>maxHandle)
maxHandle = handle;
}
}
const PxU32 nodeIds = 2 * maxHandle;
if (mCpuData.mEdgeNodeIndices.size() <= nodeIds)
{
PX_PROFILE_ZONE("ReserveEdges", mContextID);
const PxU32 newSize = nodeIds + 2048;
mCpuData.mEdgeNodeIndices.resize(newSize);
mAuxCpuData.mConstraintOrCm.resize(newSize/2);
mInteractions.resize(newSize/2);
}
}
// PT: part from SimpleIslandManager::addContactManager / mSpeculativeIslandManager.addConnection()
mSpeculativeIslandManager.preallocateConnections(maxHandle);
// PT: part from SimpleIslandManager::addContactManager / resizeEdgeArrays
// PT: TODO: refactor with regular code
if(mConnectedMap.size() <= maxHandle)
mConnectedMap.resize(2 * (maxHandle + 1));
if(mGPU && mGpuData.mFirstPartitionEdges.capacity() <= maxHandle)
mGpuData.mFirstPartitionEdges.resize(2 * (maxHandle + 1));
}
bool SimpleIslandManager::addPreallocatedContactManager(EdgeIndex handle, PxsContactManager* manager, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction, Edge::EdgeType edgeType)
{
// PT: part of SimpleIslandManager::addEdge that can be multi-threaded
{
const PxU32 nodeIds = 2 * handle;
mCpuData.mEdgeNodeIndices[nodeIds] = nodeHandle1;
mCpuData.mEdgeNodeIndices[nodeIds + 1] = nodeHandle2;
mAuxCpuData.mConstraintOrCm[handle] = manager;
mInteractions[handle] = interaction;
}
// PT: part of mSpeculativeIslandManager.addConnection() that can be multi-threaded
bool status = mSpeculativeIslandManager.addConnectionPreallocated(nodeHandle1, nodeHandle2, edgeType, handle);
if (manager)
manager->getWorkUnit().mEdgeIndex = handle;
// PT: part of SimpleIslandManager::addContactManager / resizeEdgeArrays() for contact manager
{
// PT: this is effectively just: mConnectedMap.reset(handle); // PT: for contact manager
// So just this, with atomics: map[index >> 5] &= ~(1 << (index & 31));
PxU32* map = mConnectedMap.getWords() + (handle >> 5);
PxAtomicAnd(reinterpret_cast<volatile PxI32*>(map), ~(1 << (handle & 31)));
}
return status;
}
///////////////////////////////////////////////////////////////////////////////
EdgeIndex SimpleIslandManager::addContactManager(PxsContactManager* manager, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction, Edge::EdgeType edgeType)
{
const EdgeIndex handle = addEdge(manager, nodeHandle1, nodeHandle2, interaction);
mSpeculativeIslandManager.addConnection(nodeHandle1, nodeHandle2, edgeType, handle);
if (manager)
manager->getWorkUnit().mEdgeIndex = handle;
return resizeEdgeArrays(handle, true);
}
EdgeIndex SimpleIslandManager::addConstraint(Dy::Constraint* constraint, PxNodeIndex nodeHandle1, PxNodeIndex nodeHandle2, Sc::Interaction* interaction)
{
const EdgeIndex handle = addEdge(constraint, nodeHandle1, nodeHandle2, interaction);
mAccurateIslandManager.addConnection(nodeHandle1, nodeHandle2, Edge::eCONSTRAINT, handle);
mSpeculativeIslandManager.addConnection(nodeHandle1, nodeHandle2, Edge::eCONSTRAINT, handle);
return resizeEdgeArrays(handle, false);
}
void SimpleIslandManager::activateNode(PxNodeIndex index)
{
mAccurateIslandManager.activateNode(index);
mSpeculativeIslandManager.activateNode(index);
}
void SimpleIslandManager::deactivateNode(PxNodeIndex index)
{
mAccurateIslandManager.deactivateNode(index);
mSpeculativeIslandManager.deactivateNode(index);
}
void SimpleIslandManager::putNodeToSleep(PxNodeIndex index)
{
mAccurateIslandManager.putNodeToSleep(index);
mSpeculativeIslandManager.putNodeToSleep(index);
}
void SimpleIslandManager::removeConnection(EdgeIndex edgeIndex)
{
if(edgeIndex == IG_INVALID_EDGE)
return;
mDestroyedEdges.pushBack(edgeIndex);
mSpeculativeIslandManager.removeConnection(edgeIndex);
if(mConnectedMap.test(edgeIndex))
{
mAccurateIslandManager.removeConnection(edgeIndex);
mConnectedMap.reset(edgeIndex);
}
mAuxCpuData.mConstraintOrCm[edgeIndex] = NULL;
mInteractions[edgeIndex] = NULL;
}
void SimpleIslandManager::firstPassIslandGen()
{
PX_PROFILE_ZONE("Basic.firstPassIslandGen", mContextID);
mSpeculativeIslandManager.clearDeactivations();
mSpeculativeIslandManager.wakeIslands();
mSpeculativeIslandManager.processNewEdges();
mSpeculativeIslandManager.removeDestroyedEdges();
mSpeculativeIslandManager.processLostEdges(mDestroyedNodes, false, false, mMaxDirtyNodesPerFrame);
}
void SimpleIslandManager::additionalSpeculativeActivation()
{
mSpeculativeIslandManager.wakeIslands2();
}
void SimpleIslandManager::secondPassIslandGen()
{
PX_PROFILE_ZONE("Basic.secondPassIslandGen", mContextID);
secondPassIslandGenPart1();
secondPassIslandGenPart2();
}
// PT: first part of secondPassIslandGen().
// We can put in this function any code that does not modify data we read in PxgIncrementalPartition::processLostFoundPatches().
// The two will overlap / run in parallel.
void SimpleIslandManager::secondPassIslandGenPart1()
{
PX_PROFILE_ZONE("Basic.secondPassIslandGenPart1", mContextID);
mAccurateIslandManager.wakeIslands();
mAccurateIslandManager.processNewEdges();
}
// PT: second part of secondPassIslandGen(). Will run serially after PxgIncrementalPartition::processLostFoundPatches().
void SimpleIslandManager::secondPassIslandGenPart2()
{
PX_PROFILE_ZONE("Basic.secondPassIslandGenPart2", mContextID);
// PT: TODO: analyze remaining code below, maybe we can move more of it to Part1
mAccurateIslandManager.removeDestroyedEdges();
mAccurateIslandManager.processLostEdges(mDestroyedNodes, false, false, mMaxDirtyNodesPerFrame);
for(PxU32 a = 0; a < mDestroyedNodes.size(); ++a)
mNodeHandles.freeHandle(mDestroyedNodes[a].index());
mDestroyedNodes.clear();
//mDestroyedEdges.clear();
}
void SimpleIslandManager::thirdPassIslandGen(PxBaseTask* continuation)
{
mAccurateIslandManager.clearDeactivations();
mPostThirdPassTask.setContinuation(continuation);
mSpeculativeThirdPassTask.setContinuation(&mPostThirdPassTask);
mAccurateThirdPassTask.setContinuation(&mPostThirdPassTask);
mSpeculativeThirdPassTask.removeReference();
mAccurateThirdPassTask.removeReference();
mPostThirdPassTask.removeReference();
//PX_PROFILE_ZONE("Basic.thirdPassIslandGen", mContextID);
//mSpeculativeIslandManager.removeDestroyedEdges();
//mSpeculativeIslandManager.processLostEdges(mDestroyedNodes, true, true);
//mAccurateIslandManager.removeDestroyedEdges();
//mAccurateIslandManager.processLostEdges(mDestroyedNodes, true, true);
}
bool SimpleIslandManager::validateDeactivations() const
{
//This method sanity checks the deactivations produced by third-pass island gen. Specifically, it ensures that any bodies that
//the speculative IG wants to deactivate are also candidates for deactivation in the accurate island gen. In practice, both should be the case. If this fails, something went wrong...
const PxNodeIndex* const nodeIndices = mSpeculativeIslandManager.getNodesToDeactivate(Node::eRIGID_BODY_TYPE);
const PxU32 nbNodesToDeactivate = mSpeculativeIslandManager.getNbNodesToDeactivate(Node::eRIGID_BODY_TYPE);
for(PxU32 i = 0; i < nbNodesToDeactivate; ++i)
{
//Node is active in accurate sim => mismatch between accurate and inaccurate sim!
const Node& node = mAccurateIslandManager.getNode(nodeIndices[i]);
const Node& speculativeNode = mSpeculativeIslandManager.getNode(nodeIndices[i]);
//KS - we need to verify that the bodies in the "deactivating" list are still candidates for deactivation. There are cases where they may not no longer be candidates, e.g. if the application
//put bodies to sleep and activated them
if(node.isActive() && !speculativeNode.isActive())
return false;
}
return true;
}
bool SimpleIslandManager::checkInternalConsistency()
{
return mAccurateIslandManager.checkInternalConsistency() && mSpeculativeIslandManager.checkInternalConsistency();
}
void SimpleIslandManager::setEdgeConnected(EdgeIndex edgeIndex, Edge::EdgeType edgeType)
{
if(!mConnectedMap.test(edgeIndex))
{
mAccurateIslandManager.addConnection(mCpuData.mEdgeNodeIndices[edgeIndex * 2], mCpuData.mEdgeNodeIndices[edgeIndex * 2 + 1], edgeType, edgeIndex);
mConnectedMap.set(edgeIndex);
}
}
void SimpleIslandManager::setEdgeDisconnected(EdgeIndex edgeIndex)
{
if(mConnectedMap.test(edgeIndex))
{
//PX_ASSERT(!mAccurateIslandManager.getEdge(edgeIndex).isInDirtyList());
mAccurateIslandManager.removeConnection(edgeIndex);
mConnectedMap.reset(edgeIndex);
}
}
void SimpleIslandManager::deactivateEdge(const EdgeIndex edgeIndex)
{
if (mGPU && mGpuData.mFirstPartitionEdges[edgeIndex])
{
//this is the partition edges created/updated by the gpu solver
mGpuData.mDestroyedPartitionEdges.pushBack(mGpuData.mFirstPartitionEdges[edgeIndex]);
mGpuData.mFirstPartitionEdges[edgeIndex] = NULL;
}
}
void SimpleIslandManager::setEdgeRigidCM(const EdgeIndex edgeIndex, PxsContactManager* cm)
{
mAuxCpuData.mConstraintOrCm[edgeIndex] = cm;
cm->getWorkUnit().mEdgeIndex = edgeIndex;
}
void SimpleIslandManager::clearEdgeRigidCM(const EdgeIndex edgeIndex)
{
mAuxCpuData.mConstraintOrCm[edgeIndex] = NULL;
deactivateEdge(edgeIndex);
}
void SimpleIslandManager::setKinematic(PxNodeIndex nodeIndex)
{
mAccurateIslandManager.setKinematic(nodeIndex);
mSpeculativeIslandManager.setKinematic(nodeIndex);
}
void SimpleIslandManager::setDynamic(PxNodeIndex nodeIndex)
{
mAccurateIslandManager.setDynamic(nodeIndex);
mSpeculativeIslandManager.setDynamic(nodeIndex);
}