feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,44 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#undef OMNI_PVD_CLASS_BEGIN
#undef OMNI_PVD_CLASS_DERIVED_BEGIN
#undef OMNI_PVD_CLASS_UNTYPED_BEGIN
#undef OMNI_PVD_CLASS_UNTYPED_DERIVED_BEGIN
#undef OMNI_PVD_CLASS_END
#undef OMNI_PVD_ENUM_BEGIN
#undef OMNI_PVD_ENUM_END
#undef OMNI_PVD_ATTRIBUTE
#undef OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE
#undef OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE
#undef OMNI_PVD_ATTRIBUTE_STRING
#undef OMNI_PVD_ATTRIBUTE_UNIQUE_LIST
#undef OMNI_PVD_ATTRIBUTE_FLAG
#undef OMNI_PVD_ENUM_VALUE_EXPLICIT
#undef OMNI_PVD_ENUM_VALUE

View File

@@ -0,0 +1,455 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//
// The macro logic in this header (and the headers CmOmniPvdAutoGenRegisterData.h and
// and CmOmniPvdAutoGenSetData.h) is meant as a helper to automatically generate a
// structure that stores all PVD class and attribute handles for a module, handles the
// registration logic and adds methods for object creation, setting attribute
// values etc. At the core of the generation logic is a user defined header file
// that describes the classes and attributes as follows:
//
// OMNI_PVD_CLASS_BEGIN(MyClass1)
// OMNI_PVD_ATTRIBUTE(MyClass1, myAttr1, PxReal, OmniPvdDataType::eFLOAT32)
// OMNI_PVD_ATTRIBUTE(MyClass1, myAttr2, PxReal, OmniPvdDataType::eFLOAT32)
// OMNI_PVD_CLASS_END(MyClass1)
//
// OMNI_PVD_CLASS_UNTYPED_BEGIN(MyClass2)
// OMNI_PVD_ATTRIBUTE(MyClass2, myAttr1, PxU32, OmniPvdDataType::eUINT32)
// OMNI_PVD_CLASS_END(MyClass2)
//
// The structure to create from this description will look somewhat like this:
//
// struct MyModulePvdObjectsDescriptor
// {
//
// struct PvdMyClass1
// {
// typedef MyClass1 ObjectType;
// static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectRef) { return reinterpret_cast<OmniPvdObjectHandle>(&objectRef); }
//
// OmniPvdClassHandle classHandle;
//
// void createInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) const
// {
// writer.createObject(contextHandle, classHandle, getObjectHandle(objectRef), NULL);
// }
//
// static void destroyInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef)
// {
// writer.destroyObject(contextHandle, getObjectHandle(objectRef));
// }
//
// OmniPvdAttributeHandle myAttr1;
// void set_myAttr1_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const PxReal& value) const
// {
// writer.setAttribute(contextHandle, getObjectHandle(objectRef), myAttr1, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<OmniPvdDataType::eFLOAT32>());
// }
//
// OmniPvdAttributeHandle myAttr2;
// void set_myAttr2_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const PxReal& value) const
// {
// writer.setAttribute(contextHandle, getObjectHandle(objectRef), myAttr2, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<OmniPvdDataType::eFLOAT32>());
// }
// };
// PvdMyClass1 pvdMyClass1;
//
//
// struct PvdMyClass2
// {
// typedef OmniPvdObjectHandle ObjectType;
// static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectHandle) { return objectHandle; }
//
// OmniPvdClassHandle classHandle;
//
// void createInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) const
// {
// writer.createObject(contextHandle, classHandle, getObjectHandle(objectRef), NULL);
// }
//
// static void destroyInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef)
// {
// writer.destroyObject(contextHandle, getObjectHandle(objectRef));
// }
//
// OmniPvdAttributeHandle myAttr1;
// void set_myAttr1_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const PxU32& value) const
// {
// writer.setAttribute(contextHandle, getObjectHandle(objectRef), myAttr1, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<OmniPvdDataType::eUINT32>());
// }
// };
// PvdMyClass2 pvdMyClass2;
//
//
// void myRegisterDataMethod(OmniPvdWriter& writer)
// {
// pvdMyClass1.classHandle = writer.registerClass("MyClass1");
// pvdMyClass1.myAttr1 = writer.registerAttribute(pvdMyClass1.classHandle, "myAttr1", OmniPvdDataType::eFLOAT32, 1);
// pvdMyClass1.myAttr2 = writer.registerAttribute(pvdMyClass1.classHandle, "myAttr2", OmniPvdDataType::eFLOAT32, 1);
//
// pvdMyClass2.classHandle = writer.registerClass("MyClass2");
// pvdMyClass2.myAttr1 = writer.registerAttribute(pvdMyClass2.classHandle, "myAttr1", OmniPvdDataType::eUINT32, 1);
// }
//
// };
//
// Assuming the class and attribute definitions are in a file called MyModulePvdObjectDefinitions.h,
// the described structure can be generated like this:
//
// struct MyModulePvdObjectsDescriptor
// {
//
// #include "CmOmniPvdAutoGenCreateRegistrationStruct.h"
// #include "MyModulePvdObjectDefinitions.h"
// #include "CmOmniPvdAutoGenClearDefines.h"
//
// // custom registration data related members that are not auto-generated can go here, for example
//
//
// void myRegisterDataMethod(OmniPvdWriter& writer)
// {
// #define OMNI_PVD_WRITER_VAR writer
//
// #include "CmOmniPvdAutoGenRegisterData.h"
// #include "MyModulePvdObjectDefinitions.h"
// #include "CmOmniPvdAutoGenClearDefines.h"
//
// // custom registration code that is not auto-generated can go here too
//
// #undef OMNI_PVD_WRITER_VAR
// }
// };
//
// As can be seen, CmOmniPvdAutoGenCreateRegistrationStruct.h is responsible for generating the structs,
// members and setter methods. CmOmniPvdAutoGenRegisterData.h is responsible for generating the registration
// code (note that defining OMNI_PVD_WRITER_VAR is important in this context since it is used inside
// CmOmniPvdAutoGenRegisterData.h)
//
// Note that it is the user's responsibility to include the necessary headers before applying these helpers
// (for example, OmniPvdDefines.h etc.).
//
// Last but not least, the helpers in CmOmniPvdAutoGenSetData.h provide a way to use this structure to
// set values of attributes, create class instances etc. An example usage is shown below:
//
// OmniPvdContextHandle contextHandle; // assuming this holds the context the objects belong to
// MyClass1 myClass1Instance;
// PxReal value; // assuming this holds the value to set the attribute to
//
// OMNI_PVD_CREATE(contextHandle, MyClass1, myClass1Instance);
// OMNI_PVD_SET(contextHandle, MyClass1, myAttr1, myClass1Instance, value);
//
// To use these helper macros, the following things need to be defined before including CmOmniPvdAutoGenSetData.h:
//
// #define OMNI_PVD_GET_WRITER(writer)
// OmniPvdWriter* writer = GetPvdWriterForMyModule();
//
// #define OMNI_PVD_GET_REGISTRATION_DATA(regData)
// MyModulePvdObjectsDescriptor* regData = GetPvdObjectsDescForMyModule();
//
// #include "CmOmniPvdAutoGenSetData.h"
//
// GetPvdWriterForMyModule() and GetPvdObjectsDescForMyModule() just stand for the logic the user needs
// to provide to access the OmniPvdWriter object and the generated description structure. In the given example,
// the variables "writer" and "regData" need to be assigned but the code to do so will be user specific.
//
//
#define OMNI_PVD_CLASS_INTERNALS \
\
OmniPvdClassHandle classHandle; \
\
void createInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) const \
{ \
writer.createObject(contextHandle, classHandle, getObjectHandle(objectRef), NULL); \
} \
\
static void destroyInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) \
{ \
writer.destroyObject(contextHandle, getObjectHandle(objectRef)); \
}
//
// Define a PVD class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: name of the class to register in PVD (note: has to be an existing C++ class)
//
#define OMNI_PVD_CLASS_BEGIN(classID) \
\
struct Pvd##classID \
{ \
typedef classID ObjectType; \
\
static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectRef) { return reinterpret_cast<OmniPvdObjectHandle>(&objectRef); } \
\
OMNI_PVD_CLASS_INTERNALS
//
// Define a PVD class that is derived from another class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: see OMNI_PVD_CLASS_BEGIN
// baseClassID: the name of the class to derive from
//
#define OMNI_PVD_CLASS_DERIVED_BEGIN(classID, baseClassID) OMNI_PVD_CLASS_BEGIN(classID)
//
// Define a PVD class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: name of the class to register in PVD (note: the class does not need to match an actually existing
// class but still needs to follow C++ naming conventions)
//
#define OMNI_PVD_CLASS_UNTYPED_BEGIN(classID) \
\
struct Pvd##classID \
{ \
typedef OmniPvdObjectHandle ObjectType; \
\
static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectHandle) { return objectHandle; } \
\
OMNI_PVD_CLASS_INTERNALS
//
// Define a PVD class that is derived from another class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: see OMNI_PVD_CLASS_UNTYPED_BEGIN
// baseClassID: the name of the class to derive from
//
#define OMNI_PVD_CLASS_UNTYPED_DERIVED_BEGIN(classID, baseClassID) OMNI_PVD_CLASS_UNTYPED_BEGIN(classID)
//
// See OMNI_PVD_CLASS_BEGIN for more info.
//
#define OMNI_PVD_CLASS_END(classID) \
\
}; \
Pvd##classID pvd##classID;
//
// Define a PVD enum class.
//
// Note: has to be paired with OMNI_PVD_ENUM_END
//
// enumID: name of the enum class (has to follow C++ naming conventions)
//
#define OMNI_PVD_ENUM_BEGIN(enumID) \
\
struct Pvd##enumID \
{ \
OmniPvdClassHandle classHandle;
//
// See OMNI_PVD_ENUM_BEGIN
//
#define OMNI_PVD_ENUM_END(enumID) OMNI_PVD_CLASS_END(enumID)
//
// Define a simple PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// classID: name of the class to add the attribute to (see OMNI_PVD_CLASS_BEGIN)
// attributeID: name of the attribute (has to follow C++ naming conventions)
// valueType: attribute data type (int, float etc.)
// pvdDataType: PVD attribute data type (see OmniPvdDataType)
//
#define OMNI_PVD_ATTRIBUTE(classID, attributeID, valueType, pvdDataType) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
PX_ASSERT(sizeof(valueType) == getOmniPvdDataTypeSize<pvdDataType>()); \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<pvdDataType>()); \
}
//
// Define a fixed size multi-value PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// The attribute is a fixed size array of values of the given pvd data type.
//
// entryCount: number of entries the array will hold.
//
// See OMNI_PVD_ATTRIBUTE for the other parameters. Note that valueType is
// expected to hold a type that matches the size of the whole array, i.e.,
// sizeof(valueType) == entryCount * getOmniPvdDataTypeSize<pvdDataType>()
//
#define OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE(classID, attributeID, valueType, pvdDataType, entryCount) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
const uint32_t byteSize = static_cast<uint32_t>(sizeof(valueType)); \
PX_ASSERT(byteSize == (entryCount * getOmniPvdDataTypeSize<pvdDataType>())); \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(&value), byteSize); \
}
//
// Define a variable size multi-value PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// The attribute is a variable size array of values of the given pvd data type.
//
// See OMNI_PVD_ATTRIBUTE for a parameter description. Note that valueType is expected
// to define the type of a single array element, for example, int for an integer array.
//
#define OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE(classID, attributeID, valueType, pvdDataType) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType* values, uint32_t valueCount) const \
{ \
const uint32_t byteSize = valueCount * getOmniPvdDataTypeSize<pvdDataType>(); \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(values), byteSize); \
}
//
// Define a string PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// See OMNI_PVD_ATTRIBUTE for a parameter description.
//
#define OMNI_PVD_ATTRIBUTE_STRING(classID, attributeID) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const char* values, uint32_t valueCount) const \
{ \
const uint32_t byteSize = valueCount; \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(values), byteSize); \
}
//
// Define a unique list PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// See OMNI_PVD_ATTRIBUTE for a parameter description. Note that valueType is expected
// to define the class the list will hold pointers to. If it shall hold pointers to
// instances of class MyClass, then the valueType is MyClass.
//
#define OMNI_PVD_ATTRIBUTE_UNIQUE_LIST(classID, attributeID, valueType) \
\
OmniPvdAttributeHandle attributeID; \
\
void addTo_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
const OmniPvdObjectHandle objHandle = reinterpret_cast<OmniPvdObjectHandle>(&value); \
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&objHandle); \
writer.addToUniqueListAttribute(contextHandle, getObjectHandle(objectRef), attributeID, ptr, sizeof(OmniPvdObjectHandle)); \
} \
\
void removeFrom_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
const OmniPvdObjectHandle objHandle = reinterpret_cast<OmniPvdObjectHandle>(&value); \
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&objHandle); \
writer.removeFromUniqueListAttribute(contextHandle, getObjectHandle(objectRef), attributeID, ptr, sizeof(OmniPvdObjectHandle)); \
}
//
// Define a flag PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// enumType: the enum type this attribute refers to
// enumID: the name of the enum class that describes the enum (see OMNI_PVD_ENUM_BEGIN)
//
// See OMNI_PVD_ATTRIBUTE for the other parameters.
//
#define OMNI_PVD_ATTRIBUTE_FLAG(classID, attributeID, enumType, enumID) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const enumType& value) const \
{ \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(&value), sizeof(enumType)); \
}
//
// Define an enum entry.
//
// Note: needs to be placed between a OMNI_PVD_ENUM_BEGIN, OMNI_PVD_ENUM_END
// sequence
//
// enumID: name of the enum class to add an entry to (see OMNI_PVD_ENUM_BEGIN)
// enumEntryID: the name of the enum entry to add to the enum class (has to follow C++ naming conventions)
// value: the enum value
//
#define OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, value)
//
// Define an enum entry.
//
// Note: needs to be placed between a OMNI_PVD_ENUM_BEGIN, OMNI_PVD_ENUM_END
// sequence
//
// See OMNI_PVD_ENUM_VALUE_EXPLICIT for a description of the parameters. This shorter form expects the enum to
// have a C++ definition of the form:
//
// struct <enumID>
// {
// enum Enum
// {
// <enumEntryID> = ...
// }
// }
//
// such that the value can be derived using: <enumID>::<enumEntryID>
//
#define OMNI_PVD_ENUM_VALUE(enumID, enumEntryID) \
\
OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, enumID::enumEntryID)

View File

@@ -0,0 +1,102 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//
// The macro logic in this header will generate the PVD class/attribute registration
// code based on a class/attribute definition file. OMNI_PVD_WRITER_VAR needs to be
// defined before including this header. OMNI_PVD_WRITER_VAR has to represent the
// variable that holds a reference to a OmniPvdWriter instance. See
// CmOmniPvdAutoGenCreateRegistrationStruct.h for a more detailed overview of the
// whole approach. The various parameters are described there too.
//
#define OMNI_PVD_CLASS_BEGIN(classID) \
\
pvd##classID.classHandle = OMNI_PVD_WRITER_VAR.registerClass(#classID);
#define OMNI_PVD_CLASS_DERIVED_BEGIN(classID, baseClassID) \
\
pvd##classID.classHandle = OMNI_PVD_WRITER_VAR.registerClass(#classID, pvd##baseClassID.classHandle);
#define OMNI_PVD_CLASS_UNTYPED_BEGIN(classID) OMNI_PVD_CLASS_BEGIN(classID)
#define OMNI_PVD_CLASS_UNTYPED_DERIVED_BEGIN(classID, baseClassID) OMNI_PVD_CLASS_DERIVED_BEGIN(classID, baseClassID)
#define OMNI_PVD_CLASS_END(classID)
#define OMNI_PVD_ENUM_BEGIN(enumID) OMNI_PVD_CLASS_BEGIN(enumID)
#define OMNI_PVD_ENUM_END(enumID) OMNI_PVD_CLASS_END(enumID)
#define OMNI_PVD_ATTRIBUTE(classID, attributeID, valueType, pvdDataType) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, pvdDataType, 1);
#define OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE(classID, attributeID, valueType, pvdDataType, entryCount) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, pvdDataType, entryCount);
#define OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE(classID, attributeID, valueType, pvdDataType) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, pvdDataType, 0);
#define OMNI_PVD_ATTRIBUTE_STRING(classID, attributeID) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, OmniPvdDataType::eSTRING, 1);
#define OMNI_PVD_ATTRIBUTE_UNIQUE_LIST(classID, attributeID, valueType) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerUniqueListAttribute(pvd##classID.classHandle, #attributeID, OmniPvdDataType::eOBJECT_HANDLE);
#define OMNI_PVD_ATTRIBUTE_FLAG(classID, attributeID, enumType, enumID) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerFlagsAttribute(pvd##classID.classHandle, #attributeID, pvd##enumID.classHandle);
#define OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, value) \
\
OMNI_PVD_WRITER_VAR.registerEnumValue(pvd##enumID.classHandle, #enumEntryID, value);
#define OMNI_PVD_ENUM_VALUE(enumID, enumEntryID) \
\
OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, enumID::enumEntryID)

View File

@@ -0,0 +1,282 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//
// This header provides macros to register PVD object instances, to set PVD attribute
// values etc. This only works in combination with a registration structure that was
// defined using the logic in CmOmniPvdAutoGenCreateRegistrationStruct.h.
// OMNI_PVD_GET_WRITER and OMNI_PVD_GET_REGISTRATION_DATA have to be defined before
// including this header. These two macros need to fetch and assign the pointer to
// the OmniPvdWriter instance and the registration structure instance respectively.
// See CmOmniPvdAutoGenCreateRegistrationStruct.h for a more detailed overview of the
// whole approach.
//
#if PX_SUPPORT_OMNI_PVD
//
// It is recommended to use this macro when multiple PVD attributes get written
// in one go since the writer and registration structure is then fetched once only.
//
// Note: has to be paired with OMNI_PVD_WRITE_SCOPE_END
//
// writer: a pointer to the OmniPvdWriter instance will get assigned to a variable
// named "writer"
// regData: a pointer to the registration structure instance will get assigned to
// a variable named "regData"
//
// General usage would look like this:
//
// OMNI_PVD_WRITE_SCOPE_BEGIN(writer, regData)
// OMNI_PVD_SET_EXPLICIT(writer, regData, ...)
// OMNI_PVD_SET_EXPLICIT(writer, regData, ...)
// ...
// OMNI_PVD_WRITE_SCOPE_END
//
#define OMNI_PVD_WRITE_SCOPE_BEGIN(writer, regData) \
\
OMNI_PVD_GET_WRITER(writer) \
if (writer != NULL) \
{ \
OMNI_PVD_GET_REGISTRATION_DATA(regData)
//
// See OMNI_PVD_WRITE_SCOPE_BEGIN for more info.
//
#define OMNI_PVD_WRITE_SCOPE_END \
\
}
//
// Create a PVD object instance using the provided pointers to the writer and registration
// structure instance.
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_CREATE_EXPLICIT(writer, regData, contextHandle, classID, objectRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.createInstance(*writer, contextHandle, objectRef);
//
// Create a PVD object instance.
//
// Note: if attribute values are to be set directly after the object instance registration,
// it is recommended to use OMNI_PVD_WRITE_SCOPE_BEGIN & OMNI_PVD_CREATE_EXPLICIT etc. instead
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_CREATE(contextHandle, classID, objectRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_CREATE_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, objectRef); \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Destroy a PVD object instance using the provided pointer to the writer instance.
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_DESTROY_EXPLICIT(writer, regData, contextHandle, classID, objectRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.destroyInstance(*writer, contextHandle, objectRef);
//
// Destroy a PVD object instance.
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_DESTROY(contextHandle, classID, objectRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_DESTROY_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, objectRef); \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Set a PVD attribute value using the provided pointers to the writer and registration
// structure instance.
//
// writer: the variable named "writer" has to hold a pointer to the OmniPvdWriter instance
// regData: the variable named "regData" has to hold a pointer to the registration
// structure
//
// See OMNI_PVD_SET for a description of the other parameters.
//
#define OMNI_PVD_SET_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.set_##attributeID##_(*writer, contextHandle, objectRef, valueRef);
//
// Set a PVD attribute value.
//
// Note: if multiple attribute values should get set in a row, it is recommended
// to use OMNI_PVD_WRITE_SCOPE_BEGIN & OMNI_PVD_SET_EXPLICIT etc. instead
//
// contextHandle: the handle of the context the object instance belongs to
// classID: the name of the class (as defined in OMNI_PVD_CLASS_BEGIN() etc.) the attribute
// belongs to
// attributeID: the name of the attribute (as defined in OMNI_PVD_ATTRIBUTE() etc.) to set the
// value for
// objectRef: reference to the class instance to set the attribute for (for untyped classes this shall be
// a reference to a OmniPvdObjectHandle. For typed classes, the pointer value will be used as the
// object handle value).
// valueRef: a reference to a variable that holds the value to set the attribute to
//
#define OMNI_PVD_SET(contextHandle, classID, attributeID, objectRef, valueRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valueRef) \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Set PVD array attribute values (variable size array) using the provided pointers to the writer and registration
// structure instance.
//
// valuesPtr: pointer to the array data to set the attribute to
// valueCount: number of entries in valuePtr
//
// See OMNI_PVD_SET for a description of the other parameters.
//
#define OMNI_PVD_SET_ARRAY_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.set_##attributeID##_(*writer, contextHandle, objectRef, valuesPtr, valueCount);
//
// Set PVD array attribute values (variable size array).
//
// Note: if multiple attribute values should get set in a row, it is recommended
// to use OMNI_PVD_WRITE_SCOPE_BEGIN & OMNI_PVD_SET_EXPLICIT etc. instead
//
// See OMNI_PVD_SET_ARRAY_EXPLICIT for a description of the parameters.
//
#define OMNI_PVD_SET_ARRAY(contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount) \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Add an entry to a PVD unique list attribute using the provided pointers to the writer and registration
// structure instance.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_ADD_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.addTo_##attributeID##_(*writer, contextHandle, objectRef, valueRef);
//
// Add an entry to a PVD unique list attribute.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_ADD(contextHandle, classID, attributeID, objectRef, valueRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_ADD_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valueRef) \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Remove an entry from a PVD unique list attribute using the provided pointers to the writer and registration
// structure instance.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_REMOVE_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.removeFrom_##attributeID##_(*writer, contextHandle, objectRef, valueRef);
//
// Remove an entry from a PVD unique list attribute.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_REMOVE(contextHandle, classID, attributeID, objectRef, valueRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_REMOVE_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valueRef) \
OMNI_PVD_WRITE_SCOPE_END \
}
#else
#define OMNI_PVD_WRITE_SCOPE_BEGIN(writer, regData)
#define OMNI_PVD_WRITE_SCOPE_END
#define OMNI_PVD_CREATE_EXPLICIT(writer, regData, contextHandle, classID, objectRef)
#define OMNI_PVD_CREATE(contextHandle, classID, objectRef)
#define OMNI_PVD_DESTROY_EXPLICIT(writer, regData, contextHandle, classID, objectRef)
#define OMNI_PVD_DESTROY(contextHandle, classID, objectRef)
#define OMNI_PVD_SET_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_SET(contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_SET_ARRAY_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount)
#define OMNI_PVD_SET_ARRAY(contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount)
#define OMNI_PVD_ADD_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_ADD(contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_REMOVE_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_REMOVE(contextHandle, classID, attributeID, objectRef, valueRef)
#endif // PX_SUPPORT_OMNI_PVD

View File

@@ -0,0 +1,72 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_WINDOWS_LOADLIBRARY_H
#define CM_WINDOWS_LOADLIBRARY_H
#include "foundation/PxPreprocessor.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "common/windows/PxWindowsDelayLoadHook.h"
namespace physx
{
namespace Cm
{
EXTERN_C IMAGE_DOS_HEADER __ImageBase;
PX_INLINE HMODULE WINAPI loadLibrary(const char* name)
{
return ::LoadLibraryA( name );
};
PX_INLINE FARPROC WINAPI physXCommonDliNotePreLoadLibrary(const char* libraryName, const physx::PxDelayLoadHook* delayLoadHook)
{
if(!delayLoadHook)
{
return (FARPROC)loadLibrary(libraryName);
}
else
{
if(strstr(libraryName, "PhysXFoundation"))
{
return (FARPROC)Cm::loadLibrary(delayLoadHook->getPhysXFoundationDllName());
}
if(strstr(libraryName, "PhysXCommon"))
{
return (FARPROC)Cm::loadLibrary(delayLoadHook->getPhysXCommonDllName());
}
}
return NULL;
}
} // namespace Cm
} // namespace physx
#endif // CM_WINDOWS_LOADLIBRARY_H

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_WINDOWS_MODULEUPDATELOADER_H
#define CM_WINDOWS_MODULEUPDATELOADER_H
#include "foundation/PxPreprocessor.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Cm
{
#if PX_X64
#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader64.dll"
#else
#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader.dll"
#endif
class PX_PHYSX_COMMON_API CmModuleUpdateLoader
{
public:
CmModuleUpdateLoader(const char* updateLoaderDllName);
~CmModuleUpdateLoader();
// Loads the given module through the update loader. Loads it from the path if
// the update loader doesn't find the requested module. Returns NULL if no
// module found.
HMODULE LoadModule(const char* moduleName, const char* appGUID);
protected:
HMODULE mUpdateLoaderDllHandle;
FARPROC mGetUpdatedModuleFunc;
};
} // namespace Cm
} // namespace physx
#endif // CM_WINDOWS_MODULEUPDATELOADER_H

View File

@@ -0,0 +1,160 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_BLOCK_ARRAY_H
#define CM_BLOCK_ARRAY_H
#include "foundation/PxAssert.h"
#include "foundation/PxMath.h"
#include "foundation/PxMemory.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxArray.h"
namespace physx
{
namespace Cm
{
template <typename T, PxU32 SlabSize = 4096>
class BlockArray
{
PxArray<T*> mBlocks;
PxU32 mSize;
PxU32 mCapacity;
public:
BlockArray() : mSize(0), mCapacity(0)
{
}
~BlockArray()
{
for (PxU32 a = 0; a < mBlocks.size(); ++a)
{
for (PxU32 i = 0; i < SlabSize; ++i)
{
mBlocks[a][i].~T();
}
PX_FREE(mBlocks[a]);
}
mBlocks.resize(0);
}
PX_NOINLINE void reserve(PxU32 capacity)
{
if (capacity > mCapacity)
{
PxU32 nbSlabsRequired = (capacity + SlabSize - 1) / SlabSize;
PxU32 nbSlabsToAllocate = nbSlabsRequired - mBlocks.size();
mCapacity += nbSlabsToAllocate * SlabSize;
for (PxU32 a = 0; a < nbSlabsToAllocate; ++a)
{
T* ts = reinterpret_cast<T*>(PX_ALLOC(sizeof(T) * SlabSize, "BlockArray"));
for(PxU32 i = 0; i < SlabSize; ++i)
PX_PLACEMENT_NEW(ts+i, T)();
mBlocks.pushBack(ts);
}
}
}
PX_NOINLINE void resize(PxU32 size)
{
if(size != mSize)
{
reserve(size);
for (PxU32 a = mSize; a < size; ++a)
{
mBlocks[a / SlabSize][a&(SlabSize - 1)].~T();
mBlocks[a / SlabSize][a&(SlabSize - 1)] = T();
}
mSize = size;
}
}
void forceSize_Unsafe(PxU32 size)
{
PX_ASSERT(size <= mCapacity);
mSize = size;
}
void remove(PxU32 idx)
{
PX_ASSERT(idx < mSize);
for (PxU32 a = idx; a < mSize; ++a)
{
mBlocks[a / SlabSize][a&(SlabSize-1)] = mBlocks[(a + 1) / SlabSize][(a + 1) &(SlabSize-1)];
}
mSize--;
mBlocks[mSize / SlabSize][mSize&(SlabSize - 1)].~T();
}
void replaceWithLast(PxU32 idx)
{
PX_ASSERT(idx < mSize);
--mSize;
mBlocks[idx / SlabSize][idx%SlabSize] = mBlocks[mSize / SlabSize][mSize%SlabSize];
}
T& operator [] (const PxU32 idx)
{
PX_ASSERT(idx < mSize);
return mBlocks[idx / SlabSize][idx%SlabSize];
}
const T& operator [] (const PxU32 idx) const
{
PX_ASSERT(idx < mSize);
return mBlocks[idx / SlabSize][idx%SlabSize];
}
void pushBack(const T& item)
{
reserve(mSize + 1);
mBlocks[mSize / SlabSize][mSize%SlabSize] = item;
mSize++;
}
PxU32 capacity() const { return mCapacity; }
PxU32 size() const { return mSize; }
};
}
}
#endif

View File

@@ -0,0 +1,215 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmCollection.h"
using namespace physx;
using namespace Cm;
void Collection::add(PxBase& object, PxSerialObjectId id)
{
PxSerialObjectId originId = getId(object);
if( originId != PX_SERIAL_OBJECT_ID_INVALID)
{
if( originId != id)
{
PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL,
"PxCollection::add called for an object that has an associated id already present in the collection!");
}
return;
}
if(id != PX_SERIAL_OBJECT_ID_INVALID)
{
if(!mIds.insert(id, &object))
{
PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL,
"PxCollection::add called with an id which is already used in the collection");
return;
}
}
mObjects[&object] = id;
}
void Collection::remove(PxBase& object)
{
PX_CHECK_AND_RETURN(contains(object), "PxCollection::remove called for an object not contained in the collection!");
const ObjectToIdMap::Entry* e = mObjects.find(&object);
if(e)
{
mIds.erase(e->second);
mObjects.erase(&object);
}
}
bool Collection::contains(PxBase& object) const
{
return mObjects.find(&object) != NULL;
}
void Collection::addId(PxBase& object, PxSerialObjectId id)
{
PX_CHECK_AND_RETURN(contains(object), "PxCollection::addId called for object that is not contained in the collection!");
PX_CHECK_AND_RETURN(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::addId called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
PX_CHECK_AND_RETURN(mIds.find(id) == NULL, "PxCollection::addId called with an id which is already used in the collection!");
const ObjectToIdMap::Entry* e = mObjects.find(&object);
if(e && e->second != PX_SERIAL_OBJECT_ID_INVALID)
mIds.erase(e->second);
mIds.insert(id, &object);
mObjects[&object] = id;
}
void Collection::removeId(PxSerialObjectId id)
{
PX_CHECK_AND_RETURN(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::removeId called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
PX_CHECK_AND_RETURN(mIds.find(id), "PxCollection::removeId called with PxSerialObjectId not contained in the collection!");
const IdToObjectMap::Entry* e = mIds.find(id);
if(e)
{
mObjects[e->second] = PX_SERIAL_OBJECT_ID_INVALID;
mIds.erase(id);
}
}
PxBase* Collection::find(PxSerialObjectId id) const
{
PX_CHECK_AND_RETURN_NULL(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::find called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
const IdToObjectMap::Entry* e = mIds.find(id);
return e ? static_cast<PxBase*>(e->second) : NULL;
}
void Collection::add(PxCollection& _collection)
{
Collection& collection = static_cast<Collection&>(_collection);
PX_CHECK_AND_RETURN(this != &collection, "PxCollection::add(PxCollection&) called with itself!");
mObjects.reserve(mObjects.size() + collection.mObjects.size());
const ObjectToIdMap::Entry* e = collection.mObjects.getEntries();
for (PxU32 i = 0; i < collection.mObjects.size(); ++i)
{
PxSerialObjectId id = e[i].second;
if( id != PX_SERIAL_OBJECT_ID_INVALID)
{
if(!mIds.insert(id, e[i].first))
{
if(mIds[id] != e[i].first)
{
PX_CHECK_MSG( false, "PxCollection::add(PxCollection&) called with conflicting id!");
mObjects.insert(e[i].first, PX_SERIAL_OBJECT_ID_INVALID);
}
}
else
mObjects[ e[i].first ] = id;
}
else
mObjects.insert(e[i].first, PX_SERIAL_OBJECT_ID_INVALID);
}
}
void Collection::remove(PxCollection& _collection)
{
Collection& collection = static_cast<Collection&>(_collection);
PX_CHECK_AND_RETURN(this != &collection, "PxCollection::remove(PxCollection&) called with itself!");
const ObjectToIdMap::Entry* e = collection.mObjects.getEntries();
for (PxU32 i = 0; i < collection.mObjects.size(); ++i)
{
const ObjectToIdMap::Entry* e1 = mObjects.find(e[i].first);
if(e1)
{
mIds.erase(e1->second);
mObjects.erase(e1->first);
}
}
}
PxU32 Collection::getNbObjects() const
{
return mObjects.size();
}
PxBase& Collection::getObject(PxU32 index) const
{
PX_ASSERT(index < mObjects.size());
return *mObjects.getEntries()[index].first;
}
PxU32 Collection::getObjects(PxBase** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PX_CHECK_AND_RETURN_NULL(userBuffer != NULL, "PxCollection::getObjects called with userBuffer NULL!");
PX_CHECK_AND_RETURN_NULL(bufferSize != 0, "PxCollection::getObjects called with bufferSize 0!");
PxU32 dstIndex = 0;
const ObjectToIdMap::Entry* e = mObjects.getEntries();
for (PxU32 srcIndex = startIndex; srcIndex < mObjects.size() && dstIndex < bufferSize; ++srcIndex)
userBuffer[dstIndex++] = e[srcIndex].first;
return dstIndex;
}
PxU32 Collection::getNbIds() const
{
return mIds.size();
}
PxSerialObjectId Collection::getId(const PxBase& object) const
{
const ObjectToIdMap::Entry* e = mObjects.find(const_cast<PxBase*>(&object));
return e ? e->second : PX_SERIAL_OBJECT_ID_INVALID;
}
PxU32 Collection::getIds(PxSerialObjectId* userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PX_CHECK_AND_RETURN_NULL(userBuffer != NULL, "PxCollection::getIds called with userBuffer NULL!");
PX_CHECK_AND_RETURN_NULL(bufferSize != 0, "PxCollection::getIds called with bufferSize 0!");
PxU32 dstIndex = 0;
IdToObjectMap::Iterator srcIt = (const_cast<IdToObjectMap&>(mIds)).getIterator();
while (!srcIt.done() && dstIndex < bufferSize)
{
if(srcIt->first != PX_SERIAL_OBJECT_ID_INVALID)
{
if(startIndex > 0)
startIndex--;
else
userBuffer[dstIndex++] = srcIt->first;
}
srcIt++;
}
return dstIndex;
}
PxCollection* PxCreateCollection()
{
return PX_NEW(Collection);
}

View File

@@ -0,0 +1,96 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_COLLECTION_H
#define CM_COLLECTION_H
#include "common/PxCollection.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAllocator.h"
namespace physx
{
namespace Cm
{
template <class Key,
class Value,
class HashFn = PxHash<Key>,
class Allocator = PxAllocator >
class CollectionHashMap : public PxCoalescedHashMap< Key, Value, HashFn, Allocator>
{
typedef physx::PxHashMapBase< Key, Value, HashFn, Allocator> MapBase;
typedef PxPair<const Key,Value> EntryData;
public:
CollectionHashMap(PxU32 initialTableSize = 64, float loadFactor = 0.75f):
PxCoalescedHashMap< Key, Value, HashFn, Allocator>(initialTableSize,loadFactor) {}
void insertUnique(const Key& k, const Value& v)
{
PX_PLACEMENT_NEW(MapBase::mBase.insertUnique(k), EntryData)(k,v);
}
};
class Collection : public PxCollection, public PxUserAllocated
{
public:
typedef CollectionHashMap<PxBase*, PxSerialObjectId> ObjectToIdMap;
typedef CollectionHashMap<PxSerialObjectId, PxBase*> IdToObjectMap;
virtual void add(PxBase& object, PxSerialObjectId ref);
virtual void remove(PxBase& object);
virtual bool contains(PxBase& object) const;
virtual void addId(PxBase& object, PxSerialObjectId id);
virtual void removeId(PxSerialObjectId id);
virtual PxBase* find(PxSerialObjectId ref) const;
virtual void add(PxCollection& collection);
virtual void remove(PxCollection& collection);
virtual PxU32 getNbObjects() const;
virtual PxBase& getObject(PxU32 index) const;
virtual PxU32 getObjects(PxBase** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
virtual PxU32 getNbIds() const;
virtual PxSerialObjectId getId(const PxBase& object) const;
virtual PxU32 getIds(PxSerialObjectId* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
void release() { PX_DELETE_THIS; }
// Only for internal use. Bypasses virtual calls, specialized behaviour.
PX_INLINE void internalAdd(PxBase* s, PxSerialObjectId id = PX_SERIAL_OBJECT_ID_INVALID) { mObjects.insertUnique(s, id); }
PX_INLINE PxU32 internalGetNbObjects() const { return mObjects.size(); }
PX_INLINE PxBase* internalGetObject(PxU32 i) const { PX_ASSERT(i<mObjects.size()); return mObjects.getEntries()[i].first; }
PX_INLINE const ObjectToIdMap::Entry* internalGetObjects() const { return mObjects.getEntries(); }
IdToObjectMap mIds;
ObjectToIdMap mObjects;
};
}
}
#endif

View File

@@ -0,0 +1,188 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_CONE_LIMIT_HELPER_H
#define CM_CONE_LIMIT_HELPER_H
// This class contains methods for supporting the tan-quarter swing limit - that
// is the, ellipse defined by tanQ(theta)^2/tanQ(thetaMax)^2 + tanQ(phi)^2/tanQ(phiMax)^2 = 1
//
// Angles are passed as an PxVec3 swing vector with x = 0 and y and z the swing angles
// around the y and z axes
#include "foundation/PxMathUtils.h"
namespace physx
{
namespace Cm
{
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal tanAdd(PxReal tan1, PxReal tan2)
{
PX_ASSERT(PxAbs(1.0f-tan1*tan2)>1e-6f);
return (tan1+tan2)/(1.0f-tan1*tan2);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float computeAxisAndError(const PxVec3& r, const PxVec3& d, const PxVec3& twistAxis, PxVec3& axis)
{
// the point on the cone defined by the tanQ swing vector r
// this code is equal to quatFromTanQVector(r).rotate(PxVec3(1.0f, 0.0f, 0.0f);
const PxVec3 p(1.0f, 0.0f, 0.0f);
const PxReal r2 = r.dot(r), a = 1.0f - r2, b = 1.0f/(1.0f+r2), b2 = b*b;
const PxReal v1 = 2.0f * a * b2;
const PxVec3 v2(a, 2.0f * r.z, -2.0f * r.y); // a*p + 2*r.cross(p);
const PxVec3 coneLine = v1 * v2 - p; // already normalized
// the derivative of coneLine in the direction d
const PxReal rd = r.dot(d);
const PxReal dv1 = -4.0f * rd * (3.0f - r2)*b2*b;
const PxVec3 dv2(-2.0f * rd, 2.0f * d.z, -2.0f * d.y);
const PxVec3 coneNormal = v1 * dv2 + dv1 * v2;
axis = coneLine.cross(coneNormal)/coneNormal.magnitude();
return coneLine.cross(axis).dot(twistAxis);
}
// this is here because it's used in both LL and Extensions. However, it
// should STAY IN THE SDK CODE BASE because it's SDK-specific
class ConeLimitHelper
{
public:
PX_CUDA_CALLABLE ConeLimitHelper(PxReal tanQSwingY, PxReal tanQSwingZ, PxReal tanQPadding)
: mTanQYMax(tanQSwingY), mTanQZMax(tanQSwingZ), mTanQPadding(tanQPadding) {}
// whether the point is inside the (inwardly) padded cone - if it is, there's no limit
// constraint
PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& tanQSwing) const
{
const PxReal tanQSwingYPadded = tanAdd(PxAbs(tanQSwing.y),mTanQPadding);
const PxReal tanQSwingZPadded = tanAdd(PxAbs(tanQSwing.z),mTanQPadding);
return PxSqr(tanQSwingYPadded/mTanQYMax)+PxSqr(tanQSwingZPadded/mTanQZMax) <= 1;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 clamp(const PxVec3& tanQSwing, PxVec3& normal) const
{
const PxVec3 p = PxEllipseClamp(tanQSwing, PxVec3(0.0f, mTanQYMax, mTanQZMax));
normal = PxVec3(0.0f, p.y/PxSqr(mTanQYMax), p.z/PxSqr(mTanQZMax));
#ifdef PX_PARANOIA_ELLIPSE_CHECK
PxReal err = PxAbs(PxSqr(p.y/mTanQYMax) + PxSqr(p.z/mTanQZMax) - 1);
PX_ASSERT(err<1e-3);
#endif
return p;
}
// input is a swing quat, such that swing.x = twist.y = twist.z = 0, q = swing * twist
// The routine is agnostic to the sign of q.w (i.e. we don't need the minimal-rotation swing)
// output is an axis such that positive rotation increases the angle outward from the
// limit (i.e. the image of the x axis), the error is the sine of the angular difference,
// positive if the twist axis is inside the cone
PX_CUDA_CALLABLE bool getLimit(const PxQuat& swing, PxVec3& axis, PxReal& error) const
{
PX_ASSERT(swing.w>0.0f);
const PxVec3 twistAxis = swing.getBasisVector0();
const PxVec3 tanQSwing = PxVec3(0.0f, PxTanHalf(swing.z,swing.w), -PxTanHalf(swing.y,swing.w));
if(contains(tanQSwing))
return false;
PxVec3 normal, clamped = clamp(tanQSwing, normal);
// rotation vector and ellipse normal
const PxVec3 r(0.0f, -clamped.z, clamped.y), d(0.0f, -normal.z, normal.y);
error = computeAxisAndError(r, d, twistAxis, axis);
PX_ASSERT(PxAbs(axis.magnitude()-1)<1e-5f);
#ifdef PX_PARANOIA_ELLIPSE_CHECK
bool inside = PxSqr(tanQSwing.y/mTanQYMax) + PxSqr(tanQSwing.z/mTanQZMax) <= 1;
PX_ASSERT(inside && error>-1e-4f || !inside && error<1e-4f);
#endif
return true;
}
private:
PxReal mTanQYMax, mTanQZMax, mTanQPadding;
};
class ConeLimitHelperTanLess
{
public:
PX_CUDA_CALLABLE ConeLimitHelperTanLess(PxReal swingY, PxReal swingZ)
: mYMax(swingY), mZMax(swingZ) {}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 clamp(const PxVec3& swing, PxVec3& normal) const
{
// finds the closest point on the ellipse to a given point
const PxVec3 p = PxEllipseClamp(swing, PxVec3(0.0f, mYMax, mZMax));
// normal to the point on ellipse
normal = PxVec3(0.0f, p.y/PxSqr(mYMax), p.z/PxSqr(mZMax));
#ifdef PX_PARANOIA_ELLIPSE_CHECK
PxReal err = PxAbs(PxSqr(p.y/mYMax) + PxSqr(p.z/mZMax) - 1);
PX_ASSERT(err<1e-3);
#endif
return p;
}
// input is a swing quat, such that swing.x = twist.y = twist.z = 0, q = swing * twist
// The routine is agnostic to the sign of q.w (i.e. we don't need the minimal-rotation swing)
// output is an axis such that positive rotation increases the angle outward from the
// limit (i.e. the image of the x axis), the error is the sine of the angular difference,
// positive if the twist axis is inside the cone
PX_CUDA_CALLABLE void getLimit(const PxQuat& swing, PxVec3& axis, PxReal& error) const
{
PX_ASSERT(swing.w>0.0f);
const PxVec3 twistAxis = swing.getBasisVector0();
// get the angles from the swing quaternion
const PxVec3 swingAngle(0.0f, 4.0f * PxAtan2(swing.y, 1.0f + swing.w), 4.0f * PxAtan2(swing.z, 1.0f + swing.w));
PxVec3 normal, clamped = clamp(swingAngle, normal);
// rotation vector and ellipse normal
const PxVec3 r(0.0f, PxTan(clamped.y/4.0f), PxTan(clamped.z/4.0f)), d(0.0f, normal.y, normal.z);
error = computeAxisAndError(r, d, twistAxis, axis);
PX_ASSERT(PxAbs(axis.magnitude()-1.0f)<1e-5f);
}
private:
PxReal mYMax, mZMax;
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,154 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_FLUSH_POOL_H
#define CM_FLUSH_POOL_H
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBitUtils.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
/*
Pool used to allocate variable sized tasks. It's intended to be cleared after a short period (time step).
*/
namespace physx
{
namespace Cm
{
static const PxU32 sSpareChunkCount = 2;
class FlushPool
{
PX_NOCOPY(FlushPool)
public:
FlushPool(PxU32 chunkSize) : mChunks("FlushPoolChunk"), mChunkIndex(0), mOffset(0), mChunkSize(chunkSize)
{
mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8")));
}
~FlushPool()
{
for (PxU32 i = 0; i < mChunks.size(); ++i)
PX_FREE(mChunks[i]);
}
// alignment must be a power of two
void* allocate(PxU32 size, PxU32 alignment=16)
{
PxMutex::ScopedLock lock(mMutex);
return allocateNotThreadSafe(size, alignment);
}
// alignment must be a power of two
void* allocateNotThreadSafe(PxU32 size, PxU32 alignment=16)
{
PX_ASSERT(PxIsPowerOfTwo(alignment));
PX_ASSERT(size <= mChunkSize && !mChunks.empty());
// padding for alignment
size_t unalignedStart = size_t(mChunks[mChunkIndex]+mOffset);
PxU32 pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart);
if (mOffset + size + pad > mChunkSize)
{
mChunkIndex++;
mOffset = 0;
if (mChunkIndex >= mChunks.size())
mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8")));
// update padding to ensure new alloc is aligned
unalignedStart = size_t(mChunks[mChunkIndex]);
pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart);
}
void* ptr = mChunks[mChunkIndex] + mOffset + pad;
PX_ASSERT((size_t(ptr)&(size_t(alignment)-1)) == 0);
mOffset += size + pad;
return ptr;
}
void clear(PxU32 spareChunkCount = sSpareChunkCount)
{
PxMutex::ScopedLock lock(mMutex);
clearNotThreadSafe(spareChunkCount);
}
void clearNotThreadSafe(PxU32 spareChunkCount = sSpareChunkCount)
{
//release memory not used previously
PxU32 targetSize = mChunkIndex+spareChunkCount;
while (mChunks.size() > targetSize)
{
PxU8* ptr = mChunks.popBack();
PX_FREE(ptr);
}
mChunkIndex = 0;
mOffset = 0;
}
void resetNotThreadSafe()
{
PxU8* firstChunk = mChunks[0];
for (PxU32 i = 1; i < mChunks.size(); ++i)
PX_FREE(mChunks[i]);
mChunks.clear();
mChunks.pushBack(firstChunk);
mChunkIndex = 0;
mOffset = 0;
}
void lock()
{
mMutex.lock();
}
void unlock()
{
mMutex.unlock();
}
private:
PxMutex mMutex;
PxArray<PxU8*> mChunks;
PxU32 mChunkIndex;
PxU32 mOffset;
PxU32 mChunkSize;
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,203 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_ID_POOL_H
#define CM_ID_POOL_H
#include "foundation/PxArray.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Cm
{
template<class FreeBuffer>
class IDPoolBase : public PxUserAllocated
{
protected:
PxU32 mCurrentID;
FreeBuffer mFreeIDs;
public:
IDPoolBase() : mCurrentID(0) {}
void freeID(PxU32 id)
{
// Allocate on first call
// Add released ID to the array of free IDs
if(id == (mCurrentID - 1))
--mCurrentID;
else
mFreeIDs.pushBack(id);
}
void freeAll()
{
mCurrentID = 0;
mFreeIDs.clear();
}
PxU32 getNewID()
{
// If recycled IDs are available, use them
const PxU32 size = mFreeIDs.size();
if(size)
{
// Recycle last ID
return mFreeIDs.popBack();
}
// Else create a new ID
return mCurrentID++;
}
PxU32 getNumUsedID() const
{
return mCurrentID - mFreeIDs.size();
}
PxU32 getMaxID() const
{
return mCurrentID;
}
};
//This class extends IDPoolBase. This is mainly used for when it is unsafe for the application to free the id immediately so that it can
//defer the free process until it is safe to do so
template<class FreeBuffer>
class DeferredIDPoolBase : public IDPoolBase<FreeBuffer>
{
FreeBuffer mDeferredFreeIDs;
public:
//release an index into the deferred list
void deferredFreeID(PxU32 id)
{
mDeferredFreeIDs.pushBack(id);
}
//release the deferred indices into the free list
void processDeferredIds()
{
const PxU32 deferredFreeIDCount = mDeferredFreeIDs.size();
for(PxU32 a = 0; a < deferredFreeIDCount;++a)
{
IDPoolBase<FreeBuffer>::freeID(mDeferredFreeIDs[a]);
}
mDeferredFreeIDs.clear();
}
//release all indices
void freeAll()
{
mDeferredFreeIDs.clear();
IDPoolBase<FreeBuffer>::freeAll();
}
PxU32 getNumUsedID() const
{
return IDPoolBase<FreeBuffer>::getNumUsedID() - mDeferredFreeIDs.size();
}
FreeBuffer& getDeferredFreeIDs() { return mDeferredFreeIDs; }
};
//This is spu friendly fixed size array
template <typename T, uint32_t N>
class InlineFixedArray
{
T mArr[N];
PxU32 mSize;
public:
InlineFixedArray() : mSize(0)
{
}
~InlineFixedArray(){}
void pushBack(const T& t)
{
PX_ASSERT(mSize < N);
mArr[mSize++] = t;
}
T popBack()
{
PX_ASSERT(mSize > 0);
return mArr[--mSize];
}
void clear() { mSize = 0; }
T& operator [] (PxU32 index) { PX_ASSERT(index < N); return mArr[index]; }
const T& operator [] (PxU32 index) const { PX_ASSERT(index < N); return mArr[index]; }
PxU32 size() const { return mSize; }
};
//Fix size IDPool
template<PxU32 Capacity>
class InlineIDPool : public IDPoolBase<InlineFixedArray<PxU32, Capacity> >
{
public:
PxU32 getNumRemainingIDs()
{
return Capacity - this->getNumUsedID();
}
};
//Dynamic resize IDPool
class IDPool : public IDPoolBase<PxArray<PxU32> >
{
};
//This class is used to recycle indices. It supports deferred release, so that until processDeferredIds is called,
//released indices will not be reallocated. This class will fail if the calling code request more id than the InlineDeferredIDPoll
//has. It is the calling code's responsibility to ensure that this does not happen.
template<PxU32 Capacity>
class InlineDeferredIDPool : public DeferredIDPoolBase<InlineFixedArray<PxU32, Capacity> >
{
public:
PxU32 getNumRemainingIDs()
{
return Capacity - IDPoolBase< InlineFixedArray<PxU32, Capacity> >::getNumUsedID();
}
};
//Dynamic resize DeferredIDPool
class DeferredIDPool : public DeferredIDPoolBase<PxArray<PxU32> >
{
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_MATRIX34_H
#define CM_MATRIX34_H
#include "foundation/PxMat34.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Cm
{
#if !PX_CUDA_COMPILER
// PT: similar to PxMat33Padded
class Matrix34FromTransform : public PxMat34
{
public:
//! Construct from a PxTransform
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34FromTransform(const PxTransform& other)
{
using namespace aos;
const QuatV qV = V4LoadU(&other.q.x);
Vec3V column0V, column1V, column2V;
QuatGetMat33V(qV, column0V, column1V, column2V);
// From "buildFrom"
// PT: TODO: investigate if these overlapping stores are a problem
V4StoreU(Vec4V_From_Vec3V(column0V), &m.column0.x);
V4StoreU(Vec4V_From_Vec3V(column1V), &m.column1.x);
V4StoreU(Vec4V_From_Vec3V(column2V), &m.column2.x);
p = other.p;
}
};
#endif
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,296 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_POOL_H
#define CM_POOL_H
#include "foundation/PxSort.h"
#include "foundation/PxMutex.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxBitMap.h"
namespace physx
{
namespace Cm
{
/*!
Allocator for pools of data structures
Also decodes indices (which can be computed from handles) into objects. To make this
faster, the EltsPerSlab must be a power of two
*/
template <class T>
class PoolList : public PxAllocatorTraits<T>::Type
{
typedef typename PxAllocatorTraits<T>::Type Alloc;
PX_NOCOPY(PoolList)
public:
PX_INLINE PoolList(const Alloc& alloc, PxU32 eltsPerSlab)
: Alloc(alloc),
mEltsPerSlab(eltsPerSlab),
mSlabCount(0),
mFreeList(0),
mFreeCount(0),
mSlabs(NULL)
{
PX_ASSERT(mEltsPerSlab>0);
PX_ASSERT((mEltsPerSlab & (mEltsPerSlab-1)) == 0);
mLog2EltsPerSlab = 0;
for(mLog2EltsPerSlab=0; mEltsPerSlab!=PxU32(1<<mLog2EltsPerSlab); mLog2EltsPerSlab++)
;
}
PX_INLINE ~PoolList()
{
destroy();
}
PX_INLINE void destroy()
{
// Run all destructors
for(PxU32 i=0;i<mSlabCount;i++)
{
PX_ASSERT(mSlabs);
T* slab = mSlabs[i];
for(PxU32 j=0;j<mEltsPerSlab;j++)
{
slab[j].~T();
}
}
//Deallocate
for(PxU32 i=0;i<mSlabCount;i++)
{
Alloc::deallocate(mSlabs[i]);
mSlabs[i] = NULL;
}
mSlabCount = 0;
if(mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = NULL;
if(mSlabs)
{
Alloc::deallocate(mSlabs);
mSlabs = NULL;
}
}
PxU32 preallocate(const PxU32 nbRequired, T** elements)
{
//(1) Allocate and pull out an array of X elements
PxU32 nbToAllocate = nbRequired > mFreeCount ? nbRequired - mFreeCount : 0;
PxU32 nbElements = nbRequired - nbToAllocate;
PxMemCopy(elements, mFreeList + (mFreeCount - nbElements), sizeof(T*) * nbElements);
//PxU32 originalFreeCount = mFreeCount;
mFreeCount -= nbElements;
if (nbToAllocate)
{
PX_ASSERT(mFreeCount == 0);
PxU32 nbSlabs = (nbToAllocate + mEltsPerSlab - 1) / mEltsPerSlab; //The number of slabs we need to allocate...
//allocate our slabs...
PxU32 freeCount = mFreeCount;
for (PxU32 i = 0; i < nbSlabs; ++i)
{
//KS - would be great to allocate this using a single allocation but it will make releasing slabs fail later :(
T * mAddr = reinterpret_cast<T*>(Alloc::allocate(mEltsPerSlab * sizeof(T), PX_FL));
if (!mAddr)
return nbElements; //Allocation failed so only return the set of elements we could allocate from the free list
PxU32 newSlabCount = mSlabCount+1;
// Make sure the usage bitmap is up-to-size
if (mUseBitmap.size() < newSlabCount*mEltsPerSlab)
{
mUseBitmap.resize(2 * newSlabCount*mEltsPerSlab); //set last element as not used
if (mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = reinterpret_cast<T**>(Alloc::allocate(2 * newSlabCount * mEltsPerSlab * sizeof(T*), PX_FL));
T** slabs = reinterpret_cast<T**>(Alloc::allocate(2* newSlabCount *sizeof(T*), PX_FL));
if (mSlabs)
{
PxMemCopy(slabs, mSlabs, sizeof(T*)*mSlabCount);
Alloc::deallocate(mSlabs);
}
mSlabs = slabs;
}
mSlabs[mSlabCount++] = mAddr;
PxU32 baseIndex = (mSlabCount-1) * mEltsPerSlab;
//Now add all these to the mFreeList and elements...
PxI32 idx = PxI32(mEltsPerSlab - 1);
for (; idx >= PxI32(nbToAllocate); --idx)
{
mFreeList[freeCount++] = PX_PLACEMENT_NEW(mAddr + idx, T(baseIndex + idx));
}
PxU32 origElements = nbElements;
T** writeIdx = elements + nbElements;
for (; idx >= 0; --idx)
{
writeIdx[idx] = PX_PLACEMENT_NEW(mAddr + idx, T(baseIndex + idx));
nbElements++;
}
nbToAllocate -= (nbElements - origElements);
}
mFreeCount = freeCount;
}
PX_ASSERT(nbElements == nbRequired);
for (PxU32 a = 0; a < nbElements; ++a)
{
mUseBitmap.set(elements[a]->getIndex());
}
return nbRequired;
}
// TODO: would be nice to add templated construct/destroy methods like ObjectPool
PX_INLINE T* get()
{
if(mFreeCount == 0 && !extend())
return 0;
T* element = mFreeList[--mFreeCount];
mUseBitmap.set(element->getIndex());
return element;
}
PX_INLINE void put(T* element)
{
PxU32 i = element->getIndex();
mUseBitmap.reset(i);
mFreeList[mFreeCount++] = element;
}
/*
WARNING: Unlike findByIndexFast below, this method is NOT safe to use if another thread
is concurrently updating the pool (e.g. through put/get/extend/getIterator), since the
safety boundedTest uses mSlabCount and mUseBitmap.
*/
PX_FORCE_INLINE T* findByIndex(PxU32 index) const
{
if(index>=mSlabCount*mEltsPerSlab || !(mUseBitmap.boundedTest(index)))
return 0;
return mSlabs[index>>mLog2EltsPerSlab] + (index&(mEltsPerSlab-1));
}
/*
This call is safe to do while other threads update the pool.
*/
PX_FORCE_INLINE T* findByIndexFast(PxU32 index) const
{
return mSlabs[index>>mLog2EltsPerSlab] + (index&(mEltsPerSlab-1));
}
bool extend()
{
T * mAddr = reinterpret_cast<T*>(Alloc::allocate(mEltsPerSlab * sizeof(T), PX_FL));
if(!mAddr)
return false;
PxU32 newSlabCount = mSlabCount+1;
// Make sure the usage bitmap is up-to-size
if(mUseBitmap.size() < newSlabCount*mEltsPerSlab)
{
mUseBitmap.resize(2* newSlabCount*mEltsPerSlab); //set last element as not used
if(mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = reinterpret_cast<T**>(Alloc::allocate(2* newSlabCount * mEltsPerSlab * sizeof(T*), PX_FL));
T** slabs = reinterpret_cast<T**>(Alloc::allocate(2 * newSlabCount * sizeof(T*), PX_FL));
if (mSlabs)
{
PxMemCopy(slabs, mSlabs, sizeof(T*)*mSlabCount);
Alloc::deallocate(mSlabs);
}
mSlabs = slabs;
}
mSlabs[mSlabCount++] = mAddr;
// Add to free list in descending order so that lowest indices get allocated first -
// the FW context code currently *relies* on this behavior to grab the zero-index volume
// which can't be allocated to the user. TODO: fix this
PxU32 baseIndex = (mSlabCount-1) * mEltsPerSlab;
PxU32 freeCount = mFreeCount;
for(PxI32 i=PxI32(mEltsPerSlab-1);i>=0;i--)
mFreeList[freeCount++] = PX_PLACEMENT_NEW(mAddr+i, T(baseIndex+ i));
mFreeCount = freeCount;
return true;
}
PX_INLINE PxU32 getMaxUsedIndex() const
{
return mUseBitmap.findLast();
}
PX_INLINE PxBitMap::Iterator getIterator() const
{
return PxBitMap::Iterator(mUseBitmap);
}
private:
const PxU32 mEltsPerSlab;
PxU32 mSlabCount;
PxU32 mLog2EltsPerSlab;
T** mFreeList;
PxU32 mFreeCount;
T** mSlabs;
PxBitMap mUseBitmap;
};
}
}
#endif

View File

@@ -0,0 +1,414 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_PREALLOCATING_POOL_H
#define CM_PREALLOCATING_POOL_H
#include "foundation/PxUserAllocated.h"
#include "foundation/PxSort.h"
#include "foundation/PxArray.h"
namespace physx
{
namespace Cm
{
class PreallocatingRegion
{
public:
PX_FORCE_INLINE PreallocatingRegion() : mMemory(NULL), mFirstFree(NULL), mNbElements(0) {}
void init(PxU32 maxElements, PxU32 elementSize, const char* typeName)
{
mFirstFree = NULL;
mNbElements = 0;
PX_ASSERT(typeName);
PX_UNUSED(typeName);
mMemory = reinterpret_cast<PxU8*>(PX_ALLOC(sizeof(PxU8)*elementSize*maxElements, typeName?typeName:"SceneSim Pool")); // ### addActor alloc
PX_ASSERT(elementSize*maxElements>=sizeof(void*));
}
void reset()
{
PX_FREE(mMemory);
}
PX_FORCE_INLINE PxU8* allocateMemory(PxU32 maxElements, PxU32 elementSize)
{
if(mFirstFree)
{
PxU8* recycled = reinterpret_cast<PxU8*>(mFirstFree);
void** recycled32 = reinterpret_cast<void**>(recycled);
mFirstFree = *recycled32;
return recycled;
}
else
{
if(mNbElements==maxElements)
return NULL; // Out of memory
const PxU32 freeIndex = mNbElements++;
return mMemory + freeIndex * elementSize;
}
}
void deallocateMemory(PxU32 maxElements, PxU32 elementSize, PxU8* element)
{
PX_ASSERT(element);
PX_ASSERT(element>=mMemory && element<mMemory + maxElements * elementSize);
PX_UNUSED(elementSize);
PX_UNUSED(maxElements);
void** recycled32 = reinterpret_cast<void**>(element);
*recycled32 = mFirstFree;
mFirstFree = element;
}
PX_FORCE_INLINE bool operator < (const PreallocatingRegion& p) const
{
return mMemory < p.mMemory;
}
PX_FORCE_INLINE bool operator > (const PreallocatingRegion& p) const
{
return mMemory > p.mMemory;
}
PxU8* mMemory;
void* mFirstFree;
PxU32 mNbElements;
};
class PreallocatingRegionManager
{
public:
PreallocatingRegionManager(PxU32 maxElements, PxU32 elementSize, const char* typeName)
: mMaxElements (maxElements)
, mElementSize (elementSize)
, mActivePoolIndex (0)
, mPools ("MyPoolManagerPools")
, mNeedsSorting (true)
, mTypeName (typeName)
{
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
mPools.pushBack(tmp);
}
~PreallocatingRegionManager()
{
const PxU32 nbPools = mPools.size();
for(PxU32 i=0;i<nbPools;i++)
mPools[i].reset();
}
void preAllocate(PxU32 n)
{
if(!n)
return;
const PxU32 nbPools = mPools.size();
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
PxU32 availableSpace = nbPools * maxElements;
while(n>availableSpace)
{
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
mPools.pushBack(tmp);
availableSpace += maxElements;
}
}
PX_FORCE_INLINE PxU8* allocateMemory()
{
PX_ASSERT(mActivePoolIndex<mPools.size());
PxU8* memory = mPools[mActivePoolIndex].allocateMemory(mMaxElements, mElementSize);
return memory ? memory : searchForMemory();
}
void deallocateMemory(PxU8* element)
{
if(!element)
return;
if(mNeedsSorting)
PxSort(mPools.begin(), mPools.size());
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
const PxU32 slabSize = maxElements * elementSize;
const PxU32 nbPools = mPools.size();
// O(log n) search
int first = 0;
int last = int(nbPools-1);
while(first<=last)
{
const int mid = (first+last)>>1;
PreallocatingRegion& candidate = mPools[PxU32(mid)];
if(contains(candidate.mMemory, slabSize, element))
{
candidate.deallocateMemory(maxElements, elementSize, element);
// when we sorted earlier we trashed the active index, but at least this region has a free element
if(mNeedsSorting)
mActivePoolIndex = PxU32(mid);
mNeedsSorting = false;
return;
}
if(candidate.mMemory<element)
first = mid+1;
else
last = mid-1;
}
PX_ASSERT(0);
}
private:
PreallocatingRegionManager& operator=(const PreallocatingRegionManager&);
PxU8* searchForMemory()
{
const PxU32 nbPools = mPools.size();
const PxU32 activePoolIndex = mActivePoolIndex;
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
for(PxU32 i=0;i<nbPools;i++)
{
if(i==activePoolIndex)
continue;
PxU8* memory = mPools[i].allocateMemory(maxElements, elementSize);
if(memory)
{
mActivePoolIndex = i;
return memory;
}
}
mActivePoolIndex = nbPools;
mNeedsSorting = true;
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
PreallocatingRegion& newPool = mPools.pushBack(tmp); // ### addActor alloc (StaticSim, ShapeSim, SceneQueryShapeData)
return newPool.allocateMemory(maxElements, elementSize);
}
PX_FORCE_INLINE bool contains(PxU8* memory, const PxU32 slabSize, PxU8* element)
{
return element>=memory && element<memory+slabSize;
}
const PxU32 mMaxElements;
const PxU32 mElementSize;
PxU32 mActivePoolIndex;
PxArray<PreallocatingRegion> mPools;
bool mNeedsSorting;
const char* mTypeName;
};
template<class T>
class PreallocatingPool : public PxUserAllocated
{
PreallocatingPool<T>& operator=(const PreallocatingPool<T>&);
public:
PreallocatingPool(PxU32 maxElements, const char* typeName) : mPool(maxElements, sizeof(T), typeName)
{
}
~PreallocatingPool()
{
}
PX_FORCE_INLINE void preAllocate(PxU32 n)
{
mPool.preAllocate(n);
}
PX_INLINE T* allocate()
{
return reinterpret_cast<T*>(mPool.allocateMemory());
}
PX_FORCE_INLINE T* allocateAndPrefetch()
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
PxPrefetch(t, sizeof(T));
return t;
}
PX_INLINE T* construct()
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T()) : NULL;
}
template<class A1>
PX_INLINE T* construct(A1& a)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a)) : NULL;
}
template<class A1, class A2>
PX_INLINE T* construct(A1& a, A2& b)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b)) : NULL;
}
template<class A1, class A2, class A3>
PX_INLINE T* construct(A1& a, A2& b, A3& c)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b,c)) : NULL;
}
template<class A1, class A2, class A3, class A4>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b,c,d)) : NULL;
}
template<class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b,c,d,e)) : NULL;
}
////
PX_INLINE T* construct(T* t)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T());
}
template<class A1>
PX_INLINE T* construct(T* t, A1& a)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a));
}
template<class A1, class A2>
PX_INLINE T* construct(T* t, A1& a, A2& b)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b));
}
template<class A1, class A2, class A3>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b,c));
}
template<class A1, class A2, class A3, class A4>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c, A4& d)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b,c,d));
}
template<class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c, A4& d, A5& e)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b,c,d,e));
}
PX_INLINE void destroy(T* const p)
{
if(p)
{
p->~T();
mPool.deallocateMemory(reinterpret_cast<PxU8*>(p));
}
}
PX_INLINE void releasePreallocated(T* const p)
{
if(p)
mPool.deallocateMemory(reinterpret_cast<PxU8*>(p));
}
protected:
PreallocatingRegionManager mPool;
};
template<class T>
class BufferedPreallocatingPool : public PreallocatingPool<T>
{
PxArray<T*> mDeletedElems;
PX_NOCOPY(BufferedPreallocatingPool<T>)
public:
BufferedPreallocatingPool(PxU32 maxElements, const char* typeName) : PreallocatingPool<T>(maxElements, typeName)
{
}
PX_INLINE void destroy(T* const p)
{
if (p)
{
p->~T();
mDeletedElems.pushBack(p);
}
}
void processPendingDeletedElems()
{
for (PxU32 i = 0; i < mDeletedElems.size(); ++i)
this->mPool.deallocateMemory(reinterpret_cast<PxU8*>(mDeletedElems[i]));
mDeletedElems.clear();
}
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,234 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_PRIORITY_QUEUE_H
#define CM_PRIORITY_QUEUE_H
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
namespace physx
{
namespace Cm
{
template<class Element, class Comparator = PxLess<Element> >
class PriorityQueueBase : protected Comparator // inherit so that stateless comparators take no space
{
public:
PriorityQueueBase(const Comparator& less, Element* elements) : Comparator(less), mHeapSize(0), mDataPtr(elements)
{
}
~PriorityQueueBase()
{
}
//! Get the element with the highest priority
PX_FORCE_INLINE const Element top() const
{
return mDataPtr[0];
}
//! Get the element with the highest priority
PX_FORCE_INLINE Element top()
{
return mDataPtr[0];
}
//! Check to whether the priority queue is empty
PX_FORCE_INLINE bool empty() const
{
return (mHeapSize == 0);
}
//! Empty the priority queue
PX_FORCE_INLINE void clear()
{
mHeapSize = 0;
}
//! Insert a new element into the priority queue. Only valid when size() is less than Capacity
PX_FORCE_INLINE void push(const Element& value)
{
PxU32 newIndex;
PxU32 parentIndex = parent(mHeapSize);
for (newIndex = mHeapSize; newIndex > 0 && compare(value, mDataPtr[parentIndex]); newIndex = parentIndex, parentIndex = parent(newIndex))
{
mDataPtr[ newIndex ] = mDataPtr[parentIndex];
}
mDataPtr[newIndex] = value;
mHeapSize++;
PX_ASSERT(valid());
}
//! Delete the highest priority element. Only valid when non-empty.
PX_FORCE_INLINE Element pop()
{
PX_ASSERT(mHeapSize > 0);
PxU32 i, child;
//try to avoid LHS
PxU32 tempHs = mHeapSize-1;
mHeapSize = tempHs;
Element min = mDataPtr[0];
Element last = mDataPtr[tempHs];
for (i = 0; (child = left(i)) < tempHs; i = child)
{
/* Find highest priority child */
const PxU32 rightChild = child + 1;
child += ((rightChild < tempHs) & compare((mDataPtr[rightChild]), (mDataPtr[child]))) ? 1 : 0;
if(compare(last, mDataPtr[child]))
break;
mDataPtr[i] = mDataPtr[child];
}
mDataPtr[ i ] = last;
PX_ASSERT(valid());
return min;
}
//! Make sure the priority queue sort all elements correctly
bool valid() const
{
const Element& min = mDataPtr[0];
for(PxU32 i=1; i<mHeapSize; ++i)
{
if(compare(mDataPtr[i], min))
return false;
}
return true;
}
//! Return number of elements in the priority queue
PxU32 size() const
{
return mHeapSize;
}
protected:
PxU32 mHeapSize;
Element* mDataPtr;
PX_FORCE_INLINE bool compare(const Element& a, const Element& b) const
{
return Comparator::operator()(a,b);
}
static PX_FORCE_INLINE PxU32 left(PxU32 nodeIndex)
{
return (nodeIndex << 1) + 1;
}
static PX_FORCE_INLINE PxU32 parent(PxU32 nodeIndex)
{
return (nodeIndex - 1) >> 1;
}
private:
PriorityQueueBase<Element, Comparator>& operator = (const PriorityQueueBase<Element, Comparator>);
};
template <typename Element, PxU32 Capacity, typename Comparator>
class InlinePriorityQueue : public PriorityQueueBase<Element, Comparator>
{
Element mData[Capacity];
public:
InlinePriorityQueue(const Comparator& less = Comparator()) : PriorityQueueBase<Element, Comparator>(less, mData)
{
}
PX_FORCE_INLINE void push(Element& elem)
{
PX_ASSERT(this->mHeapSize < Capacity);
PriorityQueueBase<Element, Comparator>::push(elem);
}
private:
InlinePriorityQueue<Element, Capacity, Comparator>& operator = (const InlinePriorityQueue<Element, Capacity, Comparator>);
};
template <typename Element, typename Comparator, typename Alloc = typename physx::PxAllocatorTraits<Element>::Type>
class PriorityQueue : public PriorityQueueBase<Element, Comparator>, protected Alloc
{
PxU32 mCapacity;
public:
PriorityQueue(const Comparator& less = Comparator(), PxU32 initialCapacity = 0, Alloc alloc = Alloc())
: PriorityQueueBase<Element, Comparator>(less, NULL), Alloc(alloc), mCapacity(initialCapacity)
{
if(initialCapacity > 0)
this->mDataPtr = reinterpret_cast<Element*>(Alloc::allocate(sizeof(Element)*initialCapacity, PX_FL));
}
~PriorityQueue()
{
if(this->mDataPtr)
this->deallocate(this->mDataPtr);
}
PX_FORCE_INLINE void push(Element& elem)
{
if(this->mHeapSize == mCapacity)
{
reserve((this->mHeapSize+1)*2);
}
PriorityQueueBase<Element, Comparator>::push(elem);
}
PX_FORCE_INLINE PxU32 capacity()
{
return mCapacity;
}
PX_FORCE_INLINE void reserve(const PxU32 newCapacity)
{
if(newCapacity > mCapacity)
{
Element* newElems = reinterpret_cast<Element*>(Alloc::allocate(sizeof(Element)*newCapacity, PX_FL));
if(this->mDataPtr)
{
physx::PxMemCopy(newElems, this->mDataPtr, sizeof(Element) * this->mHeapSize);
Alloc::deallocate(this->mDataPtr);
}
this->mDataPtr = newElems;
mCapacity = newCapacity;
}
}
private:
PriorityQueue<Element, Comparator, Alloc>& operator = (const PriorityQueue<Element, Comparator, Alloc>);
};
}
}
#endif

View File

@@ -0,0 +1,191 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "foundation/PxBitUtils.h"
#include "CmPtrTable.h"
#include "CmUtils.h"
using namespace physx;
using namespace Cm;
PtrTable::PtrTable() :
mList (NULL),
mCount (0),
mOwnsMemory (true),
mBufferUsed (false)
{
}
PtrTable::~PtrTable()
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mCount == 0);
PX_ASSERT(mList == NULL);
}
void PtrTable::clear(PtrTableStorageManager& sm)
{
if(mOwnsMemory && mCount>1)
{
const PxU32 implicitCapacity = PxNextPowerOfTwo(PxU32(mCount)-1);
sm.deallocate(mList, implicitCapacity);
}
mList = NULL;
mOwnsMemory = true;
mCount = 0;
}
PxU32 PtrTable::find(const void* ptr) const
{
const PxU32 nbPtrs = mCount;
void*const * PX_RESTRICT ptrs = getPtrs();
for(PxU32 i=0; i<nbPtrs; i++)
{
if(ptrs[i] == ptr)
return i;
}
return 0xffffffff;
}
void PtrTable::exportExtraData(PxSerializationContext& stream)
{
if(mCount>1)
{
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mList, sizeof(void*)*mCount);
}
}
void PtrTable::importExtraData(PxDeserializationContext& context)
{
if(mCount>1)
mList = context.readExtraData<void*, PX_SERIAL_ALIGN>(mCount);
}
void PtrTable::realloc(PxU32 oldCapacity, PxU32 newCapacity, PtrTableStorageManager& sm)
{
PX_ASSERT((mOwnsMemory && oldCapacity) || (!mOwnsMemory && oldCapacity == 0));
PX_ASSERT(newCapacity);
if(mOwnsMemory && sm.canReuse(oldCapacity, newCapacity))
return;
void** newMem = sm.allocate(newCapacity);
PxMemCopy(newMem, mList, mCount * sizeof(void*));
if(mOwnsMemory)
sm.deallocate(mList, oldCapacity);
mList = newMem;
mOwnsMemory = true;
}
void PtrTable::add(void* ptr, PtrTableStorageManager& sm)
{
if(mCount == 0) // 0 -> 1, easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mList == NULL);
PX_ASSERT(!mBufferUsed);
mSingle = ptr;
mCount = 1;
mBufferUsed = true;
return;
}
if(mCount == 1) // 1 -> 2, easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mBufferUsed);
void* single = mSingle;
mList = sm.allocate(2);
mList[0] = single;
mBufferUsed = false;
mOwnsMemory = true;
}
else
{
PX_ASSERT(!mBufferUsed);
if(!mOwnsMemory) // don't own the memory, must always alloc
realloc(0, PxNextPowerOfTwo(mCount), sm); // we're guaranteed nextPowerOfTwo(x) > x
else if(PxIsPowerOfTwo(mCount)) // count is at implicit capacity, so realloc
realloc(mCount, PxU32(mCount)*2, sm); // ... to next higher power of 2
PX_ASSERT(mOwnsMemory);
}
mList[mCount++] = ptr;
}
void PtrTable::replaceWithLast(PxU32 index, PtrTableStorageManager& sm)
{
PX_ASSERT(mCount!=0);
if(mCount == 1) // 1 -> 0 easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mBufferUsed);
mList = NULL;
mCount = 0;
mBufferUsed = false;
}
else if(mCount == 2) // 2 -> 1 easy case
{
PX_ASSERT(!mBufferUsed);
void* ptr = mList[1-index];
if(mOwnsMemory)
sm.deallocate(mList, 2);
mSingle = ptr;
mCount = 1;
mBufferUsed = true;
mOwnsMemory = true;
}
else
{
PX_ASSERT(!mBufferUsed);
mList[index] = mList[--mCount]; // remove before adjusting memory
if(!mOwnsMemory) // don't own the memory, must alloc
realloc(0, PxNextPowerOfTwo(PxU32(mCount)-1), sm); // if currently a power of 2, don't jump to the next one
else if(PxIsPowerOfTwo(mCount)) // own the memory, and implicit capacity requires that we downsize
realloc(PxU32(mCount)*2, PxU32(mCount), sm); // ... from the next power of 2, which was the old implicit capacity
PX_ASSERT(mOwnsMemory);
}
}

View File

@@ -0,0 +1,122 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_PTR_TABLE_H
#define CM_PTR_TABLE_H
#include "foundation/PxConstructor.h"
#include "foundation/PxIO.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
class PxSerializationContext;
class PxDeserializationContext;
class PxOutputStream;
namespace Cm
{
class PtrTableStorageManager
{
// This will typically be backed by a MultiPool implementation with fallback to the user
// allocator. For MultiPool, when deallocating we want to know what the previously requested size was
// so we can release into the right pool
public:
virtual void** allocate(PxU32 capacity) = 0;
virtual void deallocate(void** addr, PxU32 originalCapacity) = 0;
// whether memory allocated at one capacity can (and should) be safely reused at a different capacity
// allows realloc-style reuse by clients.
virtual bool canReuse(PxU32 originalCapacity, PxU32 newCapacity) = 0;
protected:
virtual ~PtrTableStorageManager() {}
};
// specialized class to hold an array of pointers with extrinsic storage management,
// serialization-compatible with 3.3.1 PtrTable
//
// note that extrinsic storage implies you *must* clear the table before the destructor runs
//
// capacity is implicit:
// if the memory is not owned (i.e. came from deserialization) then the capacity is exactly mCount
// else if mCount==0, capacity is 0
// else the capacity is the power of 2 >= mCount
//
// one implication of this is that if we want to add or remove a pointer from unowned memory, we always realloc
struct PX_PHYSX_COMMON_API PtrTable
{
PtrTable();
~PtrTable();
void add(void* ptr, PtrTableStorageManager& sm);
void replaceWithLast(PxU32 index, PtrTableStorageManager& sm);
void clear(PtrTableStorageManager& sm);
PxU32 find(const void* ptr) const;
PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
PX_FORCE_INLINE void*const* getPtrs() const { return mCount == 1 ? &mSingle : mList; }
PX_FORCE_INLINE void** getPtrs() { return mCount == 1 ? &mSingle : mList; }
// SERIALIZATION
// 3.3.1 compatibility fixup: this implementation ALWAYS sets 'ownsMemory' if the size is 0 or 1
PtrTable(const PxEMPTY)
{
mOwnsMemory = mCount<2;
if(mCount == 0)
mList = NULL;
}
void exportExtraData(PxSerializationContext& stream);
void importExtraData(PxDeserializationContext& context);
private:
void realloc(PxU32 oldCapacity, PxU32 newCapacity, PtrTableStorageManager& sm);
union
{
void* mSingle;
void** mList;
};
PxU16 mCount;
bool mOwnsMemory;
bool mBufferUsed; // dark magic in serialization requires this, otherwise redundant because it's logically equivalent to mCount == 1.
public:
PxU32 mFreeSlot; // PT: padding bytes on x64
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,559 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxAssert.h"
#include "CmRadixSort.h"
// PT: code archeology: this initially came from ICE (IceRevisitedRadix.h/cpp). Consider putting it back the way it was initially.
using namespace physx;
using namespace Cm;
#if defined(__BIG_ENDIAN__) || defined(_XBOX)
#define H0_OFFSET 768
#define H1_OFFSET 512
#define H2_OFFSET 256
#define H3_OFFSET 0
#define BYTES_INC (3-j)
#else
#define H0_OFFSET 0
#define H1_OFFSET 256
#define H2_OFFSET 512
#define H3_OFFSET 768
#define BYTES_INC j
#endif
#define CREATE_HISTOGRAMS(type, buffer) \
/* Clear counters/histograms */ \
PxMemZero(mHistogram1024, 256*4*sizeof(PxU32)); \
\
/* Prepare to count */ \
const PxU8* PX_RESTRICT p = reinterpret_cast<const PxU8*>(input); \
const PxU8* PX_RESTRICT pe = &p[nb*4]; \
PxU32* PX_RESTRICT h0= &mHistogram1024[H0_OFFSET]; /* Histogram for first pass (LSB)*/ \
PxU32* PX_RESTRICT h1= &mHistogram1024[H1_OFFSET]; /* Histogram for second pass */ \
PxU32* PX_RESTRICT h2= &mHistogram1024[H2_OFFSET]; /* Histogram for third pass */ \
PxU32* PX_RESTRICT h3= &mHistogram1024[H3_OFFSET]; /* Histogram for last pass (MSB)*/ \
\
bool AlreadySorted = true; /* Optimism... */ \
\
if(INVALID_RANKS) \
{ \
/* Prepare for temporal coherence */ \
const type* PX_RESTRICT Running = reinterpret_cast<const type*>(buffer); \
type PrevVal = *Running; \
\
while(p!=pe) \
{ \
/* Read input buffer in previous sorted order */ \
const type Val = *Running++; \
/* Check whether already sorted or not */ \
if(Val<PrevVal) { AlreadySorted = false; break; } /* Early out */ \
/* Update for next iteration */ \
PrevVal = Val; \
\
/* Create histograms */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
} \
\
/* If all input values are already sorted, we just have to return and leave the */ \
/* previous list unchanged. That way the routine may take advantage of temporal */ \
/* coherence, for example when used to sort transparent faces. */ \
if(AlreadySorted) \
{ \
mNbHits++; \
for(PxU32 i=0;i<nb;i++) mRanks[i] = i; \
return *this; \
} \
} \
else \
{ \
/* Prepare for temporal coherence */ \
const PxU32* PX_RESTRICT Indices = mRanks; \
type PrevVal = type(buffer[*Indices]); \
\
while(p!=pe) \
{ \
/* Read input buffer in previous sorted order */ \
const type Val = type(buffer[*Indices++]); \
/* Check whether already sorted or not */ \
if(Val<PrevVal) { AlreadySorted = false; break; } /* Early out */ \
/* Update for next iteration */ \
PrevVal = Val; \
\
/* Create histograms */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
} \
\
/* If all input values are already sorted, we just have to return and leave the */ \
/* previous list unchanged. That way the routine may take advantage of temporal */ \
/* coherence, for example when used to sort transparent faces. */ \
if(AlreadySorted) { mNbHits++; return *this; } \
} \
\
/* Else there has been an early out and we must finish computing the histograms */ \
while(p!=pe) \
{ \
/* Create histograms without the previous overhead */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
}
PX_INLINE const PxU32* CheckPassValidity(PxU32 pass, const PxU32* mHistogram1024, PxU32 nb, const void* input, PxU8& UniqueVal)
{
// Shortcut to current counters
const PxU32* CurCount = &mHistogram1024[pass<<8];
// Check pass validity
// If all values have the same byte, sorting is useless.
// It may happen when sorting bytes or words instead of dwords.
// This routine actually sorts words faster than dwords, and bytes
// faster than words. Standard running time (O(4*n))is reduced to O(2*n)
// for words and O(n) for bytes. Running time for floats depends on actual values...
// Get first byte
UniqueVal = *((reinterpret_cast<const PxU8*>(input))+pass);
// Check that byte's counter
if(CurCount[UniqueVal]==nb)
return NULL;
return CurCount;
}
RadixSort::RadixSort() : mCurrentSize(0), mRanks(NULL), mRanks2(NULL), mHistogram1024(0), mLinks256(0), mTotalCalls(0), mNbHits(0), mDeleteRanks(true)
{
// Initialize indices
INVALIDATE_RANKS;
}
RadixSort::~RadixSort()
{
}
/**
* Main sort routine.
* This one is for integer values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input [in] a list of integer values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \param hint [in] RADIX_SIGNED to handle negative values, RADIX_UNSIGNED if you know your input buffer only contains positive values
* \return Self-Reference
*/
RadixSort& RadixSort::Sort(const PxU32* input, PxU32 nb, RadixHint hint)
{
PX_ASSERT(mHistogram1024);
PX_ASSERT(mLinks256);
PX_ASSERT(mRanks);
PX_ASSERT(mRanks2);
// Checkings
if(!input || !nb || nb&0x80000000)
return *this;
// Stats
mTotalCalls++;
// Create histograms (counters). Counters for all passes are created in one run.
// Pros: read input buffer once instead of four times
// Cons: mHistogram1024 is 4Kb instead of 1Kb
// We must take care of signed/unsigned values for temporal coherence.... I just
// have 2 code paths even if just a single opcode changes. Self-modifying code, someone?
if(hint==RADIX_UNSIGNED) { CREATE_HISTOGRAMS(PxU32, input); }
else { CREATE_HISTOGRAMS(PxI32, input); }
// Compute #negative values involved if needed
PxU32 NbNegativeValues = 0;
if(hint==RADIX_SIGNED)
{
// An efficient way to compute the number of negatives values we'll have to deal with is simply to sum the 128
// last values of the last histogram. Last histogram because that's the one for the Most Significant Byte,
// responsible for the sign. 128 last values because the 128 first ones are related to positive numbers.
PxU32* PX_RESTRICT h3= &mHistogram1024[768];
for(PxU32 i=128;i<256;i++) NbNegativeValues += h3[i]; // 768 for last histogram, 128 for negative part
}
// Radix sort, j is the pass number (0=LSB, 3=MSB)
for(PxU32 j=0;j<4;j++)
{
// CHECK_PASS_VALIDITY(j);
PxU8 UniqueVal;
const PxU32* PX_RESTRICT CurCount = CheckPassValidity(j, mHistogram1024, nb, input, UniqueVal);
// Sometimes the fourth (negative) pass is skipped because all numbers are negative and the MSB is 0xFF (for example). This is
// not a problem, numbers are correctly sorted anyway.
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Should we care about negative values?
if(j!=3 || hint==RADIX_UNSIGNED)
{
// Here we deal with positive values only
// Create offsets
Links256[0] = mRanks2;
for(PxU32 i=1;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
}
else
{
// This is a special case to correctly handle negative integers. They're sorted in the right order but at the wrong place.
// Create biased offsets, in order for negative numbers to be sorted as well
Links256[0] = &mRanks2[NbNegativeValues]; // First positive number takes place after the negative ones
for(PxU32 i=1;i<128;i++)
Links256[i] = Links256[i-1] + CurCount[i-1]; // 1 to 128 for positive numbers
// Fixing the wrong place for negative values
Links256[128] = mRanks2;
for(PxU32 i=129;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
}
// Perform Radix Sort
const PxU8* PX_RESTRICT InputBytes = reinterpret_cast<const PxU8*>(input);
InputBytes += BYTES_INC;
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
*Links256[InputBytes[i<<2]]++ = i;
VALIDATE_RANKS;
}
else
{
PxU32* PX_RESTRICT Indices = mRanks;
PxU32* PX_RESTRICT IndicesEnd = &mRanks[nb];
while(Indices!=IndicesEnd)
{
const PxU32 id = *Indices++;
*Links256[InputBytes[id<<2]]++ = id;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
return *this;
}
/**
* Main sort routine.
* This one is for floating-point values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input2 [in] a list of floating-point values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \return Self-Reference
* \warning only sorts IEEE floating-point values
*/
RadixSort& RadixSort::Sort(const float* input2, PxU32 nb)
{
PX_ASSERT(mHistogram1024);
PX_ASSERT(mLinks256);
PX_ASSERT(mRanks);
PX_ASSERT(mRanks2);
// Checkings
if(!input2 || !nb || nb&0x80000000)
return *this;
// Stats
mTotalCalls++;
const PxU32* PX_RESTRICT input = reinterpret_cast<const PxU32*>(input2);
// Allocate histograms & offsets on the stack
//PxU32 mHistogram1024[256*4];
//PxU32* mLinks256[256];
// Create histograms (counters). Counters for all passes are created in one run.
// Pros: read input buffer once instead of four times
// Cons: mHistogram1024 is 4Kb instead of 1Kb
// Floating-point values are always supposed to be signed values, so there's only one code path there.
// Please note the floating point comparison needed for temporal coherence! Although the resulting asm code
// is dreadful, this is surprisingly not such a performance hit - well, I suppose that's a big one on first
// generation Pentiums....We can't make comparison on integer representations because, as Chris said, it just
// wouldn't work with mixed positive/negative values....
{ CREATE_HISTOGRAMS(float, input2); }
// Compute #negative values involved if needed
PxU32 NbNegativeValues = 0;
// An efficient way to compute the number of negatives values we'll have to deal with is simply to sum the 128
// last values of the last histogram. Last histogram because that's the one for the Most Significant Byte,
// responsible for the sign. 128 last values because the 128 first ones are related to positive numbers.
// ### is that ok on Apple ?!
PxU32* PX_RESTRICT h3= &mHistogram1024[768];
for(PxU32 i=128;i<256;i++) NbNegativeValues += h3[i]; // 768 for last histogram, 128 for negative part
// Radix sort, j is the pass number (0=LSB, 3=MSB)
for(PxU32 j=0;j<4;j++)
{
PxU8 UniqueVal;
const PxU32* PX_RESTRICT CurCount = CheckPassValidity(j, mHistogram1024, nb, input, UniqueVal);
// Should we care about negative values?
if(j!=3)
{
// Here we deal with positive values only
// CHECK_PASS_VALIDITY(j);
// const bool PerformPass = CheckPassValidity(j, mHistogram1024, nb, input);
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Create offsets
Links256[0] = mRanks2;
for(PxU32 i=1;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
// Perform Radix Sort
const PxU8* PX_RESTRICT InputBytes = reinterpret_cast<const PxU8*>(input);
InputBytes += BYTES_INC;
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
*Links256[InputBytes[i<<2]]++ = i;
VALIDATE_RANKS;
}
else
{
PxU32* PX_RESTRICT Indices = mRanks;
PxU32* PX_RESTRICT IndicesEnd = &mRanks[nb];
while(Indices!=IndicesEnd)
{
const PxU32 id = *Indices++;
*Links256[InputBytes[id<<2]]++ = id;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
else
{
// This is a special case to correctly handle negative values
// CHECK_PASS_VALIDITY(j);
// const bool PerformPass = CheckPassValidity(j, mHistogram1024, nb, input);
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Create biased offsets, in order for negative numbers to be sorted as well
Links256[0] = &mRanks2[NbNegativeValues]; // First positive number takes place after the negative ones
for(PxU32 i=1;i<128;i++)
Links256[i] = Links256[i-1] + CurCount[i-1]; // 1 to 128 for positive numbers
// We must reverse the sorting order for negative numbers!
Links256[255] = mRanks2;
for(PxU32 i=0;i<127;i++)
Links256[254-i] = Links256[255-i] + CurCount[255-i]; // Fixing the wrong order for negative values
for(PxU32 i=128;i<256;i++)
Links256[i] += CurCount[i]; // Fixing the wrong place for negative values
// Perform Radix Sort
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
{
const PxU32 Radix = input[i]>>24; // Radix byte, same as above. AND is useless here (PxU32).
// ### cmp to be killed. Not good. Later.
if(Radix<128) *Links256[Radix]++ = i; // Number is positive, same as above
else *(--Links256[Radix]) = i; // Number is negative, flip the sorting order
}
VALIDATE_RANKS;
}
else
{
const PxU32* PX_RESTRICT Ranks = mRanks;
for(PxU32 i=0;i<nb;i++)
{
const PxU32 Radix = input[Ranks[i]]>>24; // Radix byte, same as above. AND is useless here (PxU32).
// ### cmp to be killed. Not good. Later.
if(Radix<128) *Links256[Radix]++ = Ranks[i]; // Number is positive, same as above
else *(--Links256[Radix]) = Ranks[i]; // Number is negative, flip the sorting order
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
else
{
// The pass is useless, yet we still have to reverse the order of current list if all values are negative.
if(UniqueVal>=128)
{
if(INVALID_RANKS)
{
// ###Possible?
for(PxU32 i=0;i<nb;i++) mRanks2[i] = nb-i-1;
VALIDATE_RANKS;
}
else
{
for(PxU32 i=0;i<nb;i++) mRanks2[i] = mRanks[nb-i-1];
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
}
}
return *this;
}
bool RadixSort::SetBuffers(PxU32* ranks0, PxU32* ranks1, PxU32* histogram1024, PxU32** links256)
{
if(!ranks0 || !ranks1 || !histogram1024 || !links256)
return false;
mRanks = ranks0;
mRanks2 = ranks1;
mHistogram1024 = histogram1024;
mLinks256 = links256;
mDeleteRanks = false;
INVALIDATE_RANKS;
return true;
}
#include "foundation/PxAllocator.h"
using namespace physx;
using namespace Cm;
RadixSortBuffered::RadixSortBuffered()
: RadixSort()
{
}
RadixSortBuffered::~RadixSortBuffered()
{
reset();
}
void RadixSortBuffered::reset()
{
// Release everything
if(mDeleteRanks)
{
PX_FREE(mRanks2);
PX_FREE(mRanks);
}
mCurrentSize = 0;
INVALIDATE_RANKS;
}
/**
* Resizes the inner lists.
* \param nb [in] new size (number of dwords)
* \return true if success
*/
bool RadixSortBuffered::Resize(PxU32 nb)
{
if(mDeleteRanks)
{
// Free previously used ram
PX_FREE(mRanks2);
PX_FREE(mRanks);
// Get some fresh one
mRanks = PX_ALLOCATE(PxU32, nb, "RadixSortBuffered:mRanks");
mRanks2 = PX_ALLOCATE(PxU32, nb, "RadixSortBuffered:mRanks2");
}
return true;
}
PX_INLINE void RadixSortBuffered::CheckResize(PxU32 nb)
{
PxU32 CurSize = CURRENT_SIZE;
if(nb!=CurSize)
{
if(nb>CurSize)
Resize(nb);
mCurrentSize = nb;
INVALIDATE_RANKS;
}
}
/**
* Main sort routine.
* This one is for integer values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input [in] a list of integer values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \param hint [in] RADIX_SIGNED to handle negative values, RADIX_UNSIGNED if you know your input buffer only contains positive values
* \return Self-Reference
*/
RadixSortBuffered& RadixSortBuffered::Sort(const PxU32* input, PxU32 nb, RadixHint hint)
{
// Checkings
if(!input || !nb || nb&0x80000000)
return *this;
// Resize lists if needed
CheckResize(nb);
//Set histogram buffers.
PxU32 histogram[1024];
PxU32* links[256];
mHistogram1024 = histogram;
mLinks256 = links;
RadixSort::Sort(input, nb, hint);
return *this;
}
/**
* Main sort routine.
* This one is for floating-point values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input2 [in] a list of floating-point values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \return Self-Reference
* \warning only sorts IEEE floating-point values
*/
RadixSortBuffered& RadixSortBuffered::Sort(const float* input2, PxU32 nb)
{
// Checkings
if(!input2 || !nb || nb&0x80000000)
return *this;
// Resize lists if needed
CheckResize(nb);
//Set histogram buffers.
PxU32 histogram[1024];
PxU32* links[256];
mHistogram1024 = histogram;
mLinks256 = links;
RadixSort::Sort(input2, nb);
return *this;
}

View File

@@ -0,0 +1,117 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RADIX_SORT_H
#define CM_RADIX_SORT_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Cm
{
enum RadixHint
{
RADIX_SIGNED, //!< Input values are signed
RADIX_UNSIGNED, //!< Input values are unsigned
RADIX_FORCE_DWORD = 0x7fffffff
};
#define INVALIDATE_RANKS mCurrentSize|=0x80000000
#define VALIDATE_RANKS mCurrentSize&=0x7fffffff
#define CURRENT_SIZE (mCurrentSize&0x7fffffff)
#define INVALID_RANKS (mCurrentSize&0x80000000)
class PX_PHYSX_COMMON_API RadixSort
{
PX_NOCOPY(RadixSort)
public:
RadixSort();
virtual ~RadixSort();
// Sorting methods
RadixSort& Sort(const PxU32* input, PxU32 nb, RadixHint hint=RADIX_SIGNED);
RadixSort& Sort(const float* input, PxU32 nb);
//! Access to results. mRanks is a list of indices in sorted order, i.e. in the order you may further process your data
PX_FORCE_INLINE const PxU32* GetRanks() const { return mRanks; }
//! mIndices2 gets trashed on calling the sort routine, but otherwise you can recycle it the way you want.
PX_FORCE_INLINE PxU32* GetRecyclable() const { return mRanks2; }
//! Returns the total number of calls to the radix sorter.
PX_FORCE_INLINE PxU32 GetNbTotalCalls() const { return mTotalCalls; }
//! Returns the number of eraly exits due to temporal coherence.
PX_FORCE_INLINE PxU32 GetNbHits() const { return mNbHits; }
PX_FORCE_INLINE void invalidateRanks() { INVALIDATE_RANKS; }
bool SetBuffers(PxU32* ranks0, PxU32* ranks1, PxU32* histogram1024, PxU32** links256);
protected:
PxU32 mCurrentSize; //!< Current size of the indices list
PxU32* mRanks; //!< Two lists, swapped each pass
PxU32* mRanks2;
PxU32* mHistogram1024;
PxU32** mLinks256;
// Stats
PxU32 mTotalCalls; //!< Total number of calls to the sort routine
PxU32 mNbHits; //!< Number of early exits due to coherence
// Stack-radix
bool mDeleteRanks; //!<
};
#define StackRadixSort(name, ranks0, ranks1) \
RadixSort name; \
PxU32 histogramBuffer[1024]; \
PxU32* linksBuffer[256]; \
name.SetBuffers(ranks0, ranks1, histogramBuffer, linksBuffer);
class PX_PHYSX_COMMON_API RadixSortBuffered : public RadixSort
{
public:
RadixSortBuffered();
~RadixSortBuffered();
void reset();
RadixSortBuffered& Sort(const PxU32* input, PxU32 nb, RadixHint hint=RADIX_SIGNED);
RadixSortBuffered& Sort(const float* input, PxU32 nb);
private:
RadixSortBuffered(const RadixSortBuffered& object);
RadixSortBuffered& operator=(const RadixSortBuffered& object);
// Internal methods
void CheckResize(PxU32 nb);
bool Resize(PxU32 nb);
};
}
}
#endif // CM_RADIX_SORT_H

View File

@@ -0,0 +1,224 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RANDOM_H
#define CM_RANDOM_H
#include "foundation/PxQuat.h"
#include "foundation/PxVec3.h"
#define TEST_MAX_RAND 0xffff
namespace physx
{
namespace Cm
{
class BasicRandom
{
public:
BasicRandom(PxU32 seed = 0) : mRnd(seed) {}
~BasicRandom() {}
PX_FORCE_INLINE void setSeed(PxU32 seed) { mRnd = seed; }
PX_FORCE_INLINE PxU32 getCurrentValue() const { return mRnd; }
PxU32 randomize() { mRnd = mRnd * 2147001325 + 715136305; return mRnd; }
PX_FORCE_INLINE PxU32 rand() { return randomize() & 0xffff; }
PX_FORCE_INLINE PxU32 rand32() { return randomize() & 0xffffffff; }
PxF32 rand(PxF32 a, PxF32 b)
{
const PxF32 r = rand32() / (static_cast<PxF32>(0xffffffff));
return r * (b - a) + a;
}
PxI32 rand(PxI32 a, PxI32 b)
{
return a + static_cast<PxI32>(rand32() % (b - a));
}
PxF32 randomFloat()
{
return rand() / (static_cast<PxF32>(0xffff)) - 0.5f;
}
PxF32 randomFloat32()
{
return rand32() / (static_cast<PxF32>(0xffffffff)) - 0.5f;
}
PxF32 randomFloat32(PxReal a, PxReal b) { return rand32() / PxF32(0xffffffff)*(b - a) + a; }
void unitRandomPt(physx::PxVec3& v)
{
v = unitRandomPt();
}
void unitRandomQuat(physx::PxQuat& v)
{
v = unitRandomQuat();
}
PxVec3 unitRandomPt()
{
PxVec3 v;
do
{
v.x = randomFloat();
v.y = randomFloat();
v.z = randomFloat();
} while (v.normalize() < 1e-6f);
return v;
}
PxQuat unitRandomQuat()
{
PxQuat v;
do
{
v.x = randomFloat();
v.y = randomFloat();
v.z = randomFloat();
v.w = randomFloat();
} while (v.normalize() < 1e-6f);
return v;
}
private:
PxU32 mRnd;
};
//--------------------------------------
// Fast, very good random numbers
//
// Period = 2^249
//
// Kirkpatrick, S., and E. Stoll, 1981; A Very Fast Shift-Register
// Sequence Random Number Generator, Journal of Computational Physics,
// V. 40.
//
// Maier, W.L., 1991; A Fast Pseudo Random Number Generator,
// Dr. Dobb's Journal, May, pp. 152 - 157
class RandomR250
{
public:
RandomR250(PxI32 s)
{
setSeed(s);
}
void setSeed(PxI32 s)
{
BasicRandom lcg(s);
mIndex = 0;
PxI32 j;
for (j = 0; j < 250; j++) // fill r250 buffer with bit values
mBuffer[j] = lcg.randomize();
for (j = 0; j < 250; j++) // set some MSBs to 1
if (lcg.randomize() > 0x40000000L)
mBuffer[j] |= 0x80000000L;
PxU32 msb = 0x80000000; // turn on diagonal bit
PxU32 mask = 0xffffffff; // turn off the leftmost bits
for (j = 0; j < 32; j++)
{
const PxI32 k = 7 * j + 3; // select a word to operate on
mBuffer[k] &= mask; // turn off bits left of the diagonal
mBuffer[k] |= msb; // turn on the diagonal bit
mask >>= 1;
msb >>= 1;
}
}
PxU32 randI()
{
PxI32 j;
// wrap pointer around
if (mIndex >= 147) j = mIndex - 147;
else j = mIndex + 103;
const PxU32 new_rand = mBuffer[mIndex] ^ mBuffer[j];
mBuffer[mIndex] = new_rand;
// increment pointer for next time
if (mIndex >= 249) mIndex = 0;
else mIndex++;
return new_rand >> 1;
}
PxReal randUnit()
{
PxU32 mask = (1 << 23) - 1;
return PxF32(randI()&(mask)) / PxF32(mask);
}
PxReal rand(PxReal lower, PxReal upper)
{
return lower + randUnit() * (upper - lower);
}
private:
PxU32 mBuffer[250];
PxI32 mIndex;
};
static RandomR250 gRandomR250(0x95d6739b);
PX_FORCE_INLINE PxU32 Rand()
{
return gRandomR250.randI() & TEST_MAX_RAND;
}
PX_FORCE_INLINE PxF32 Rand(PxF32 a, PxF32 b)
{
const PxF32 r = static_cast<PxF32>(Rand()) / (static_cast<PxF32>(TEST_MAX_RAND));
return r * (b - a) + a;
}
PX_FORCE_INLINE PxF32 RandLegacy(PxF32 a, PxF32 b)
{
const PxF32 r = static_cast<PxF32>(Rand()) / (static_cast<PxF32>(0x7fff) + 1.0f);
return r * (b - a) + a;
}
//returns numbers from [a, b-1]
PX_FORCE_INLINE PxI32 Rand(PxI32 a, PxI32 b)
{
return a + static_cast<PxI32>(Rand() % (b - a));
}
PX_FORCE_INLINE void SetSeed(PxU32 seed)
{
gRandomR250.setSeed(seed);
}
}
}
#endif

View File

@@ -0,0 +1,136 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_REFCOUNTABLE_H
#define CM_REFCOUNTABLE_H
#include "foundation/PxAssert.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxAllocator.h"
#include "common/PxBase.h"
namespace physx
{
namespace Cm
{
// PT: this is used to re-implement RefCountable using the ref-counter in PxBase, i.e. to dissociate
// the RefCountable data from the RefCountable code. The goal is to be able to store the ref counter
// in the padding bytes of PxBase, and also to avoid two v-table pointers in the class.
class RefCountableExt : public PxRefCounted
{
public:
RefCountableExt() : PxRefCounted(0, PxBaseFlags(0)) {}
void preExportDataReset()
{
mBuiltInRefCount = 1;
}
void incRefCount()
{
volatile PxI32* val = reinterpret_cast<volatile PxI32*>(&mBuiltInRefCount);
PxAtomicIncrement(val);
// value better be greater than 1, or we've created a ref to an undefined object
PX_ASSERT(mBuiltInRefCount>1);
}
void decRefCount()
{
PX_ASSERT(mBuiltInRefCount>0);
volatile PxI32* val = reinterpret_cast<volatile PxI32*>(&mBuiltInRefCount);
if(physx::PxAtomicDecrement(val) == 0)
onRefCountZero();
}
PX_FORCE_INLINE PxU32 getRefCount() const
{
return mBuiltInRefCount;
}
};
PX_FORCE_INLINE void RefCountable_preExportDataReset(PxRefCounted& base) { static_cast<RefCountableExt&>(base).preExportDataReset(); }
PX_FORCE_INLINE void RefCountable_incRefCount(PxRefCounted& base) { static_cast<RefCountableExt&>(base).incRefCount(); }
PX_FORCE_INLINE void RefCountable_decRefCount(PxRefCounted& base) { static_cast<RefCountableExt&>(base).decRefCount(); }
PX_FORCE_INLINE PxU32 RefCountable_getRefCount(const PxRefCounted& base) { return static_cast<const RefCountableExt&>(base).getRefCount(); }
// simple thread-safe reference count
// when the ref count is zero, the object is in an undefined state (pending delete)
class RefCountable
{
public:
// PX_SERIALIZATION
RefCountable(const PxEMPTY) { PX_ASSERT(mRefCount == 1); }
void preExportDataReset() { mRefCount = 1; }
//~PX_SERIALIZATION
explicit RefCountable(PxU32 initialCount = 1)
: mRefCount(PxI32(initialCount))
{
PX_ASSERT(mRefCount!=0);
}
virtual ~RefCountable() {}
/**
Calls 'delete this;'. It needs to be overloaded for classes also deriving from
PxBase and call 'Cm::deletePxBase(this);' instead.
*/
virtual void onRefCountZero()
{
PX_DELETE_THIS;
}
void incRefCount()
{
physx::PxAtomicIncrement(&mRefCount);
// value better be greater than 1, or we've created a ref to an undefined object
PX_ASSERT(mRefCount>1);
}
void decRefCount()
{
PX_ASSERT(mRefCount>0);
if(physx::PxAtomicDecrement(&mRefCount) == 0)
onRefCountZero();
}
PX_FORCE_INLINE PxU32 getRefCount() const
{
return PxU32(mRefCount);
}
private:
volatile PxI32 mRefCount;
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,126 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RENDER_BUFFER_H
#define CM_RENDER_BUFFER_H
#include "common/PxRenderBuffer.h"
#include "CmUtils.h"
#include "foundation/PxArray.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Cm
{
/**
Implementation of PxRenderBuffer.
*/
class RenderBuffer : public PxRenderBuffer, public PxUserAllocated
{
template <typename T>
void append(PxArray<T>& dst, const T* src, PxU32 count)
{
dst.reserve(dst.size() + count);
for(const T* end=src+count; src<end; ++src)
dst.pushBack(*src);
}
public:
RenderBuffer() :
mPoints("renderBufferPoints"),
mLines("renderBufferLines"),
mTriangles("renderBufferTriangles")
{}
virtual PxU32 getNbPoints() const { return mPoints.size(); }
virtual const PxDebugPoint* getPoints() const { return mPoints.begin(); }
virtual void addPoint(const PxDebugPoint& point) { mPoints.pushBack(point); }
virtual PxU32 getNbLines() const { return mLines.size(); }
virtual const PxDebugLine* getLines() const { return mLines.begin(); }
virtual void addLine(const PxDebugLine& line) { mLines.pushBack(line); }
virtual PxDebugLine* reserveLines(const PxU32 nbLines) {return reserveContainerMemory(mLines, nbLines);}
virtual PxDebugPoint* reservePoints(const PxU32 nbPoints) { return reserveContainerMemory(mPoints, nbPoints); }
virtual PxU32 getNbTriangles() const { return mTriangles.size(); }
virtual const PxDebugTriangle* getTriangles() const { return mTriangles.begin(); }
virtual void addTriangle(const PxDebugTriangle& triangle) { mTriangles.pushBack(triangle); }
virtual void append(const PxRenderBuffer& other)
{
append(mPoints, other.getPoints(), other.getNbPoints());
append(mLines, other.getLines(), other.getNbLines());
append(mTriangles, other.getTriangles(), other.getNbTriangles());
}
virtual void clear()
{
mPoints.clear();
mLines.clear();
mTriangles.clear();
}
virtual bool empty() const
{
return mPoints.empty() && mLines.empty() && mTriangles.empty();
}
virtual void shift(const PxVec3& delta)
{
for(PxU32 i=0; i < mPoints.size(); i++)
mPoints[i].pos += delta;
for(PxU32 i=0; i < mLines.size(); i++)
{
mLines[i].pos0 += delta;
mLines[i].pos1 += delta;
}
for(PxU32 i=0; i < mTriangles.size(); i++)
{
mTriangles[i].pos0 += delta;
mTriangles[i].pos1 += delta;
mTriangles[i].pos2 += delta;
}
}
PxArray<PxDebugPoint> mPoints;
PxArray<PxDebugLine> mLines;
PxArray<PxDebugTriangle> mTriangles;
};
} // Cm
}
#endif

View File

@@ -0,0 +1,241 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_SCALING_H
#define CM_SCALING_H
#include "foundation/PxBounds3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxMat34.h"
#include "foundation/PxSIMDHelpers.h"
#include "geometry/PxMeshScale.h"
#include "CmUtils.h"
namespace physx
{
namespace Cm
{
// PT: same as PxMeshScale::toMat33() but faster
PX_FORCE_INLINE PxMat33 toMat33(const PxMeshScale& meshScale)
{
const PxMat33Padded rot(meshScale.rotation);
PxMat33 trans = rot.getTranspose();
trans.column0 *= meshScale.scale[0];
trans.column1 *= meshScale.scale[1];
trans.column2 *= meshScale.scale[2];
return trans * rot;
}
// class that can perform scaling fast. Relatively large size, generated from PxMeshScale on demand.
// CS: I've removed most usages of this class, because most of the time only one-way transform is needed.
// If you only need a temporary FastVertex2ShapeScaling, setup your transform as PxMat34Legacy and use
// normal matrix multiplication or a transform() overload to convert points and bounds between spaces.
class FastVertex2ShapeScaling
{
public:
PX_INLINE FastVertex2ShapeScaling()
{
//no scaling by default:
vertex2ShapeSkew = PxMat33(PxIdentity);
shape2VertexSkew = PxMat33(PxIdentity);
mFlipNormal = false;
}
PX_INLINE explicit FastVertex2ShapeScaling(const PxMeshScale& scale)
{
init(scale);
}
PX_INLINE FastVertex2ShapeScaling(const PxVec3& scale, const PxQuat& rotation)
{
init(scale, rotation);
}
PX_INLINE void init(const PxMeshScale& scale)
{
init(scale.scale, scale.rotation);
}
PX_INLINE void setIdentity()
{
vertex2ShapeSkew = PxMat33(PxIdentity);
shape2VertexSkew = PxMat33(PxIdentity);
mFlipNormal = false;
}
PX_INLINE void init(const PxVec3& scale, const PxQuat& rotation)
{
// TODO: may want to optimize this for cases where we have uniform or axis aligned scaling!
// That would introduce branches and it's unclear to me whether that's faster than just doing the math.
// Lazy computation would be another option, at the cost of introducing even more branches.
const PxMat33Padded R(rotation);
vertex2ShapeSkew = R.getTranspose();
const PxMat33 diagonal = PxMat33::createDiagonal(scale);
vertex2ShapeSkew = vertex2ShapeSkew * diagonal;
vertex2ShapeSkew = vertex2ShapeSkew * R;
/*
The inverse, is, explicitly:
shape2VertexSkew.setTransposed(R);
shape2VertexSkew.multiplyDiagonal(PxVec3(1.0f/scale.x, 1.0f/scale.y, 1.0f/scale.z));
shape2VertexSkew *= R;
It may be competitive to compute the inverse -- though this has a branch in it:
*/
shape2VertexSkew = vertex2ShapeSkew.getInverse();
mFlipNormal = ((scale.x * scale.y * scale.z) < 0.0f);
}
PX_FORCE_INLINE void flipNormal(PxVec3& v1, PxVec3& v2) const
{
if (mFlipNormal)
{
PxVec3 tmp = v1; v1 = v2; v2 = tmp;
}
}
PX_FORCE_INLINE PxVec3 operator* (const PxVec3& src) const
{
return vertex2ShapeSkew * src;
}
PX_FORCE_INLINE PxVec3 operator% (const PxVec3& src) const
{
return shape2VertexSkew * src;
}
PX_FORCE_INLINE const PxMat33& getVertex2ShapeSkew() const
{
return vertex2ShapeSkew;
}
PX_FORCE_INLINE const PxMat33& getShape2VertexSkew() const
{
return shape2VertexSkew;
}
PX_INLINE PxMat34 getVertex2WorldSkew(const PxMat34& shape2world) const
{
const PxMat34 vertex2worldSkew = shape2world * getVertex2ShapeSkew();
//vertex2worldSkew = shape2world * [vertex2shapeSkew, 0]
//[aR at] * [bR bt] = [aR * bR aR * bt + at] NOTE: order of operations important so it works when this ?= left ?= right.
return vertex2worldSkew;
}
PX_INLINE PxMat34 getWorld2VertexSkew(const PxMat34& shape2world) const
{
//world2vertexSkew = shape2vertex * invPQ(shape2world)
//[aR 0] * [bR' -bR'*bt] = [aR * bR' -aR * bR' * bt + 0]
const PxMat33 rotate( shape2world[0], shape2world[1], shape2world[2] );
const PxMat33 M = getShape2VertexSkew() * rotate.getTranspose();
return PxMat34(M[0], M[1], M[2], -M * shape2world[3]);
}
//! Transforms a shape space OBB to a vertex space OBB. All 3 params are in and out.
void transformQueryBounds(PxVec3& center, PxVec3& extents, PxMat33& basis) const
{
basis.column0 = shape2VertexSkew * (basis.column0 * extents.x);
basis.column1 = shape2VertexSkew * (basis.column1 * extents.y);
basis.column2 = shape2VertexSkew * (basis.column2 * extents.z);
center = shape2VertexSkew * center;
extents = PxOptimizeBoundingBox(basis);
}
void transformPlaneToShapeSpace(const PxVec3& nIn, const PxReal dIn, PxVec3& nOut, PxReal& dOut) const
{
const PxVec3 tmp = shape2VertexSkew.transformTranspose(nIn);
const PxReal denom = 1.0f / tmp.magnitude();
nOut = tmp * denom;
dOut = dIn * denom;
}
PX_FORCE_INLINE bool flipsNormal() const { return mFlipNormal; }
private:
PxMat33 vertex2ShapeSkew;
PxMat33 shape2VertexSkew;
bool mFlipNormal;
};
PX_FORCE_INLINE void getScaledVertices(PxVec3* v, const PxVec3& v0, const PxVec3& v1, const PxVec3& v2, bool idtMeshScale, const Cm::FastVertex2ShapeScaling& scaling)
{
if(idtMeshScale)
{
v[0] = v0;
v[1] = v1;
v[2] = v2;
}
else
{
const PxI32 winding = scaling.flipsNormal() ? 1 : 0;
v[0] = scaling * v0;
v[1+winding] = scaling * v1;
v[2-winding] = scaling * v2;
}
}
} // namespace Cm
PX_INLINE PxMat34 operator*(const PxTransform& transform, const PxMeshScale& scale)
{
const PxMat33Padded tmp(transform.q);
return PxMat34(tmp * Cm::toMat33(scale), transform.p);
}
PX_INLINE PxMat34 operator*(const PxMeshScale& scale, const PxTransform& transform)
{
const PxMat33 scaleMat = Cm::toMat33(scale);
const PxMat33Padded t(transform.q);
const PxMat33 r = scaleMat * t;
const PxVec3 p = scaleMat * transform.p;
return PxMat34(r, p);
}
PX_INLINE PxMat34 operator*(const PxMat34& transform, const PxMeshScale& scale)
{
return PxMat34(transform.m * Cm::toMat33(scale), transform.p);
}
PX_INLINE PxMat34 operator*(const PxMeshScale& scale, const PxMat34& transform)
{
const PxMat33 scaleMat = Cm::toMat33(scale);
return PxMat34(scaleMat * transform.m, scaleMat * transform.p);
}
}
#endif

View File

@@ -0,0 +1,416 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUtilities.h"
#include "CmSerialize.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAlloca.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Cm;
void physx::readChunk(PxI8& a, PxI8& b, PxI8& c, PxI8& d, PxInputStream& stream)
{
stream.read(&a, sizeof(PxI8));
stream.read(&b, sizeof(PxI8));
stream.read(&c, sizeof(PxI8));
stream.read(&d, sizeof(PxI8));
}
///////////////////////////////////////////////////////////////////////////////
PxU16 physx::readWord(bool mismatch, PxInputStream& stream)
{
PxU16 d;
stream.read(&d, sizeof(PxU16));
if(mismatch)
flip(d);
return d;
}
PxU32 physx::readDword(bool mismatch, PxInputStream& stream)
{
PxU32 d;
stream.read(&d, sizeof(PxU32));
if(mismatch)
flip(d);
return d;
}
PxF32 physx::readFloat(bool mismatch, PxInputStream& stream)
{
union
{
PxU32 d;
PxF32 f;
} u;
stream.read(&u.d, sizeof(PxU32));
if(mismatch)
flip(u.d);
return u.f;
}
///////////////////////////////////////////////////////////////////////////////
void physx::writeWord(PxU16 value, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
flip(value);
stream.write(&value, sizeof(PxU16));
}
void physx::writeDword(PxU32 value, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
flip(value);
stream.write(&value, sizeof(PxU32));
}
void physx::writeFloat(PxF32 value, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
flip(value);
stream.write(&value, sizeof(PxF32));
}
///////////////////////////////////////////////////////////////////////////////
bool physx::readFloatBuffer(PxF32* dest, PxU32 nbFloats, bool mismatch, PxInputStream& stream)
{
stream.read(dest, sizeof(PxF32)*nbFloats);
if(mismatch)
{
for(PxU32 i=0;i<nbFloats;i++)
flip(dest[i]);
}
return true;
}
void physx::writeFloatBuffer(const PxF32* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
{
while(nb--)
{
PxF32 f = *src++;
flip(f);
stream.write(&f, sizeof(PxF32));
}
}
else
stream.write(src, sizeof(PxF32) * nb);
}
void physx::writeWordBuffer(const PxU16* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
{
while(nb--)
{
PxU16 w = *src++;
flip(w);
stream.write(&w, sizeof(PxU16));
}
}
else
stream.write(src, sizeof(PxU16) * nb);
}
void physx::readWordBuffer(PxU16* dest, PxU32 nb, bool mismatch, PxInputStream& stream)
{
stream.read(dest, sizeof(PxU16)*nb);
if(mismatch)
{
for(PxU32 i=0;i<nb;i++)
{
flip(dest[i]);
}
}
}
void physx::writeWordBuffer(const PxI16* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
if (mismatch)
{
while (nb--)
{
PxI16 w = *src++;
flip(w);
stream.write(&w, sizeof(PxI16));
}
}
else
stream.write(src, sizeof(PxI16) * nb);
}
void physx::readByteBuffer(PxU8* dest, PxU32 nb, PxInputStream& stream)
{
stream.read(dest, sizeof(PxU8) * nb);
}
void physx::writeByteBuffer(const PxU8* src, PxU32 nb, PxOutputStream& stream)
{
stream.write(src, sizeof(PxU8) * nb);
}
void physx::readWordBuffer(PxI16* dest, PxU32 nb, bool mismatch, PxInputStream& stream)
{
stream.read(dest, sizeof(PxI16)*nb);
if (mismatch)
{
for (PxU32 i = 0; i < nb; i++)
{
flip(dest[i]);
}
}
}
///////////////////////////////////////////////////////////////////////////////
bool physx::writeHeader(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxU32 version, bool mismatch, PxOutputStream& stream)
{
// Store endianness
PxI8 streamFlags = PxLittleEndian();
if(mismatch)
streamFlags^=1;
// Export header
writeChunk('N', 'X', 'S', streamFlags, stream); // "Novodex stream" identifier
writeChunk(a, b, c, d, stream); // Chunk identifier
writeDword(version, mismatch, stream);
return true;
}
bool Cm::WriteHeader(PxU8 a, PxU8 b, PxU8 c, PxU8 d, PxU32 version, bool mismatch, PxOutputStream& stream)
{
// Store endianness
PxU8 streamFlags = PxU8(PxLittleEndian());
if(mismatch)
streamFlags^=1;
// Export header
writeChunk('I', 'C', 'E', PxI8(streamFlags), stream); // ICE identifier
writeChunk(PxI8(a), PxI8(b), PxI8(c), PxI8(d), stream); // Chunk identifier
writeDword(version, mismatch, stream);
return true;
}
bool physx::readHeader(PxI8 a_, PxI8 b_, PxI8 c_, PxI8 d_, PxU32& version, bool& mismatch, PxInputStream& stream)
{
// Import header
PxI8 a, b, c, d;
readChunk(a, b, c, d, stream);
if(a!='N' || b!='X' || c!='S')
return false;
const PxI8 fileLittleEndian = d&1;
mismatch = fileLittleEndian!=PxLittleEndian();
readChunk(a, b, c, d, stream);
if(a!=a_ || b!=b_ || c!=c_ || d!=d_)
return false;
version = readDword(mismatch, stream);
return true;
}
bool Cm::ReadHeader(PxU8 a_, PxU8 b_, PxU8 c_, PxU8 d_, PxU32& version, bool& mismatch, PxInputStream& stream)
{
// Import header
PxI8 a, b, c, d;
readChunk(a, b, c, d, stream);
if(a!='I' || b!='C' || c!='E')
return false;
const PxU8 FileLittleEndian = PxU8(d&1);
mismatch = FileLittleEndian!=PxLittleEndian();
readChunk(a, b, c, d, stream);
if(a!=a_ || b!=b_ || c!=c_ || d!=d_)
return false;
version = readDword(mismatch, stream);
return true;
}
///////////////////////////////////////////////////////////////////////////////
PxU32 physx::computeMaxIndex(const PxU32* indices, PxU32 nbIndices)
{
PxU32 maxIndex=0;
while(nbIndices--)
{
PxU32 currentIndex = *indices++;
if(currentIndex>maxIndex)
maxIndex = currentIndex;
}
return maxIndex;
}
PxU16 physx::computeMaxIndex(const PxU16* indices, PxU32 nbIndices)
{
PxU16 maxIndex=0;
while(nbIndices--)
{
PxU16 currentIndex = *indices++;
if(currentIndex>maxIndex)
maxIndex = currentIndex;
}
return maxIndex;
}
void physx::storeIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
for(PxU32 i=0;i<nbIndices;i++)
{
PxU8 data = PxU8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if(maxIndex<=0xffff)
{
for(PxU32 i=0;i<nbIndices;i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
{
writeIntBuffer(indices, nbIndices, platformMismatch, stream);
}
}
void physx::readIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
PxU8 data;
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&data, sizeof(PxU8));
indices[i] = data;
}
}
else if(maxIndex<=0xffff)
{
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = readWord(platformMismatch, stream);
}
else
{
readIntBuffer(indices, nbIndices, platformMismatch, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
void Cm::StoreIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
for(PxU32 i=0;i<nbIndices;i++)
{
PxU8 data = PxU8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if(maxIndex<=0xffff)
{
for(PxU32 i=0;i<nbIndices;i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
{
// WriteDwordBuffer(indices, nbIndices, platformMismatch, stream);
for(PxU32 i=0;i<nbIndices;i++)
writeDword(indices[i], platformMismatch, stream);
}
}
void Cm::ReadIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
PxU8* tmp = reinterpret_cast<PxU8*>(PxAlloca(nbIndices*sizeof(PxU8)));
stream.read(tmp, nbIndices*sizeof(PxU8));
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = tmp[i];
// for(PxU32 i=0;i<nbIndices;i++)
// indices[i] = stream.ReadByte();
}
else if(maxIndex<=0xffff)
{
PxU16* tmp = reinterpret_cast<PxU16*>(PxAlloca(nbIndices*sizeof(PxU16)));
readWordBuffer(tmp, nbIndices, platformMismatch, stream);
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = tmp[i];
// for(PxU32 i=0;i<nbIndices;i++)
// indices[i] = ReadWord(platformMismatch, stream);
}
else
{
ReadDwordBuffer(indices, nbIndices, platformMismatch, stream);
}
}
void Cm::StoreIndices(PxU16 maxIndex, PxU32 nbIndices, const PxU16* indices, PxOutputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
for(PxU32 i=0;i<nbIndices;i++)
{
PxU8 data = PxU8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else
{
for(PxU32 i=0;i<nbIndices;i++)
writeWord(indices[i], platformMismatch, stream);
}
}
void Cm::ReadIndices(PxU16 maxIndex, PxU32 nbIndices, PxU16* indices, PxInputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
PxU8* tmp = reinterpret_cast<PxU8*>(PxAlloca(nbIndices*sizeof(PxU8)));
stream.read(tmp, nbIndices*sizeof(PxU8));
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = tmp[i];
}
else
{
readWordBuffer(indices, nbIndices, platformMismatch, stream);
}
}

View File

@@ -0,0 +1,197 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_SERIALIZE_H
#define CM_SERIALIZE_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxIO.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxUtilities.h"
namespace physx
{
PX_INLINE void flip(PxU16& v)
{
PxU8* b = reinterpret_cast<PxU8*>(&v);
PxU8 temp = b[0];
b[0] = b[1];
b[1] = temp;
}
PX_INLINE void flip(PxI16& v)
{
PxI8* b = reinterpret_cast<PxI8*>(&v);
PxI8 temp = b[0];
b[0] = b[1];
b[1] = temp;
}
PX_INLINE void flip(PxU32& v)
{
PxU8* b = reinterpret_cast<PxU8*>(&v);
PxU8 temp = b[0];
b[0] = b[3];
b[3] = temp;
temp = b[1];
b[1] = b[2];
b[2] = temp;
}
// MS: It is important to modify the value directly and not use a temporary variable or a return
// value. The reason for this is that a flipped float might have a bit pattern which indicates
// an invalid float. If such a float is assigned to another float, the bit pattern
// can change again (maybe to map invalid floats to a common invalid pattern?).
// When reading the float and flipping again, the changed bit pattern will result in a different
// float than the original one.
PX_INLINE void flip(PxF32& v)
{
PxU8* b = reinterpret_cast<PxU8*>(&v);
PxU8 temp = b[0];
b[0] = b[3];
b[3] = temp;
temp = b[1];
b[1] = b[2];
b[2] = temp;
}
PX_INLINE void writeChunk(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxOutputStream& stream)
{
stream.write(&a, sizeof(PxI8));
stream.write(&b, sizeof(PxI8));
stream.write(&c, sizeof(PxI8));
stream.write(&d, sizeof(PxI8));
}
void readChunk(PxI8& a, PxI8& b, PxI8& c, PxI8& d, PxInputStream& stream);
PxU16 readWord(bool mismatch, PxInputStream& stream);
PxU32 readDword(bool mismatch, PxInputStream& stream);
PxF32 readFloat(bool mismatch, PxInputStream& stream);
void writeWord(PxU16 value, bool mismatch, PxOutputStream& stream);
void writeDword(PxU32 value, bool mismatch, PxOutputStream& stream);
void writeFloat(PxF32 value, bool mismatch, PxOutputStream& stream);
bool readFloatBuffer(PxF32* dest, PxU32 nbFloats, bool mismatch, PxInputStream& stream);
void writeFloatBuffer(const PxF32* src, PxU32 nb, bool mismatch, PxOutputStream& stream);
void writeWordBuffer(const PxU16* src, PxU32 nb, bool mismatch, PxOutputStream& stream);
void readWordBuffer(PxU16* dest, PxU32 nb, bool mismatch, PxInputStream& stream);
void writeWordBuffer(const PxI16* src, PxU32 nb, bool mismatch, PxOutputStream& stream);
void readWordBuffer(PxI16* dest, PxU32 nb, bool mismatch, PxInputStream& stream);
void writeByteBuffer(const PxU8* src, PxU32 nb, PxOutputStream& stream);
void readByteBuffer(PxU8* dest, PxU32 nb, PxInputStream& stream);
bool writeHeader(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxU32 version, bool mismatch, PxOutputStream& stream);
bool readHeader(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxU32& version, bool& mismatch, PxInputStream& stream);
PX_INLINE bool readIntBuffer(PxU32* dest, PxU32 nbInts, bool mismatch, PxInputStream& stream)
{
return readFloatBuffer(reinterpret_cast<PxF32*>(dest), nbInts, mismatch, stream);
}
PX_INLINE void writeIntBuffer(const PxU32* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
writeFloatBuffer(reinterpret_cast<const PxF32*>(src), nb, mismatch, stream);
}
PX_INLINE bool ReadDwordBuffer(PxU32* dest, PxU32 nb, bool mismatch, PxInputStream& stream)
{
return readFloatBuffer(reinterpret_cast<float*>(dest), nb, mismatch, stream);
}
PX_INLINE void WriteDwordBuffer(const PxU32* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
writeFloatBuffer(reinterpret_cast<const float*>(src), nb, mismatch, stream);
}
PxU32 computeMaxIndex(const PxU32* indices, PxU32 nbIndices);
PxU16 computeMaxIndex(const PxU16* indices, PxU32 nbIndices);
void storeIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch);
void readIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch);
// PT: see PX-1163
PX_FORCE_INLINE bool readBigEndianVersionNumber(PxInputStream& stream, bool mismatch_, PxU32& fileVersion, bool& mismatch)
{
// PT: allright this is going to be subtle:
// - in version 1 the data was always saved in big-endian format
// - *including the version number*!
// - so we cannot just read the version "as usual" using the passed mismatch param
// PT: mismatch value for version 1
mismatch = (PxLittleEndian() == 1);
const PxU32 rawFileVersion = readDword(false, stream);
if(rawFileVersion==1)
{
// PT: this is a version-1 file with no flip
fileVersion = 1;
PX_ASSERT(!mismatch);
}
else
{
PxU32 fileVersionFlipped = rawFileVersion;
flip(fileVersionFlipped);
if(fileVersionFlipped==1)
{
// PT: this is a version-1 file with flip
fileVersion = 1;
PX_ASSERT(mismatch);
}
else
{
// PT: this is at least version 2 so we can process it "as usual"
mismatch = mismatch_;
fileVersion = mismatch_ ? fileVersionFlipped : rawFileVersion;
}
}
PX_ASSERT(fileVersion<=3);
if(fileVersion>3)
return false;
return true;
}
// PT: TODO: copied from IceSerialize.h, still needs to be refactored/cleaned up.
namespace Cm
{
bool WriteHeader(PxU8 a, PxU8 b, PxU8 c, PxU8 d, PxU32 version, bool mismatch, PxOutputStream& stream);
bool ReadHeader(PxU8 a_, PxU8 b_, PxU8 c_, PxU8 d_, PxU32& version, bool& mismatch, PxInputStream& stream);
void StoreIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch);
void ReadIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch);
void StoreIndices(PxU16 maxIndex, PxU32 nbIndices, const PxU16* indices, PxOutputStream& stream, bool platformMismatch);
void ReadIndices(PxU16 maxIndex, PxU32 nbIndices, PxU16* indices, PxInputStream& stream, bool platformMismatch);
}
}
#endif

View File

@@ -0,0 +1,532 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_SPATIAL_VECTOR_H
#define CM_SPATIAL_VECTOR_H
#include "foundation/PxVec3.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxTransform.h"
/*!
Combination of two R3 vectors.
*/
namespace physx
{
namespace Cm
{
PX_ALIGN_PREFIX(16)
class SpatialVector
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector()
{}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector(const PxVec3& lin, const PxVec3& ang)
: linear(lin), pad0(0.0f), angular(ang), pad1(0.0f)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~SpatialVector()
{}
// PT: this one is very important. Without it, the Xbox compiler generates weird "float-to-int" and "int-to-float" LHS
// each time we copy a SpatialVector (see for example PIX on "solveSimpleGroupA" without this operator).
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVector& v)
{
linear = v.linear;
pad0 = 0.0f;
angular = v.angular;
pad1 = 0.0f;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector zero() { return SpatialVector(PxVec3(0),PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator+(const SpatialVector& v) const
{
return SpatialVector(linear+v.linear,angular+v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator-(const SpatialVector& v) const
{
return SpatialVector(linear-v.linear,angular-v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator-() const
{
return SpatialVector(-linear,-angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator *(PxReal s) const
{
return SpatialVector(linear*s,angular*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator+=(const SpatialVector& v)
{
linear+=v.linear;
angular+=v.angular;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator-=(const SpatialVector& v)
{
linear-=v.linear;
angular-=v.angular;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return angular.magnitude() + linear.magnitude();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVector& v) const
{
return linear.dot(v.linear) + angular.dot(v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return linear.isFinite() && angular.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVector scale(PxReal l, PxReal a) const
{
return Cm::SpatialVector(linear*l, angular*a);
}
PxVec3 linear;
PxReal pad0;
PxVec3 angular;
PxReal pad1;
}
PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct SpatialVectorF
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF(const PxReal* v)
: pad0(0.0f), pad1(0.0f)
{
top.x = v[0]; top.y = v[1]; top.z = v[2];
bottom.x = v[3]; bottom.y = v[4]; bottom.z = v[5];
}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF(const PxVec3& top_, const PxVec3& bottom_)
: top(top_), pad0(0.0f), bottom(bottom_), pad1(0.0f)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~SpatialVectorF()
{}
// PT: this one is very important. Without it, the Xbox compiler generates weird "float-to-int" and "int-to-float" LHS
// each time we copy a SpatialVector (see for example PIX on "solveSimpleGroupA" without this operator).
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVectorF& v)
{
top = v.top;
pad0 = 0.0f;
bottom = v.bottom;
pad1 = 0.0f;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF Zero() { return SpatialVectorF(PxVec3(0), PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator+(const SpatialVectorF& v) const
{
return SpatialVectorF(top + v.top, bottom + v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator-(const SpatialVectorF& v) const
{
return SpatialVectorF(top - v.top, bottom - v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator-() const
{
return SpatialVectorF(-top, -bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator *(PxReal s) const
{
return SpatialVectorF(top*s, bottom*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF multiply(const SpatialVectorF& v) const
{
return SpatialVectorF(top.multiply(v.top), bottom.multiply(v.bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator *= (const PxReal s)
{
top *= s;
bottom *= s;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const SpatialVectorF& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const SpatialVectorF& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return top.magnitude() + bottom.magnitude();
}
PX_FORCE_INLINE PxReal magnitudeSquared() const
{
return top.magnitudeSquared() + bottom.magnitudeSquared();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const SpatialVectorF& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
/*PxVec3 p0 = bottom.multiply(v.top);
PxVec3 p1 = top.multiply(v.bottom);
PxReal result = (((p1.y + p1.z) + (p0.z + p1.x)) + (p0.x + p0.y));
return result;*/
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVectorF& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVector& v) const
{
return bottom.dot(v.angular) + top.dot(v.linear);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF cross(const SpatialVectorF& v) const
{
SpatialVectorF a;
a.top = top.cross(v.top);
a.bottom = top.cross(v.bottom) + bottom.cross(v.top);
return a;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF abs() const
{
return SpatialVectorF(top.abs(), bottom.abs());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF rotate(const PxTransform& rot) const
{
return SpatialVectorF(rot.rotate(top), rot.rotate(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF rotateInv(const PxTransform& rot) const
{
return SpatialVectorF(rot.rotateInv(top), rot.rotateInv(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return top.isFinite() && bottom.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid(const PxReal maxV) const
{
const bool tValid = ((PxAbs(top.x) <= maxV) && (PxAbs(top.y) <= maxV) && (PxAbs(top.z) <= maxV));
const bool bValid = ((PxAbs(bottom.x) <= maxV) && (PxAbs(bottom.y) <= maxV) && (PxAbs(bottom.z) <= maxV));
return tValid && bValid;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF scale(PxReal l, PxReal a) const
{
return Cm::SpatialVectorF(top*l, bottom*a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void assignTo(PxReal* val) const
{
val[0] = top.x; val[1] = top.y; val[2] = top.z;
val[3] = bottom.x; val[4] = bottom.y; val[5] = bottom.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator [] (const PxU32 index)
{
PX_ASSERT(index < 6);
if(index < 3)
return top[index];
return bottom[index-3];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxReal& operator [] (const PxU32 index) const
{
PX_ASSERT(index < 6);
if (index < 3)
return top[index];
return bottom[index-3];
}
PxVec3 top;
PxReal pad0;
PxVec3 bottom;
PxReal pad1;
} PX_ALIGN_SUFFIX(16);
struct UnAlignedSpatialVector
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector(const PxReal* v)
{
top.x = v[0]; top.y = v[1]; top.z = v[2];
bottom.x = v[3]; bottom.y = v[4]; bottom.z = v[5];
}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector(const PxVec3& top_, const PxVec3& bottom_)
: top(top_), bottom(bottom_)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~UnAlignedSpatialVector()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVectorF& v)
{
top = v.top;
bottom = v.bottom;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector Zero() { return UnAlignedSpatialVector(PxVec3(0), PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator+(const UnAlignedSpatialVector& v) const
{
return UnAlignedSpatialVector(top + v.top, bottom + v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator-(const UnAlignedSpatialVector& v) const
{
return UnAlignedSpatialVector(top - v.top, bottom - v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator-() const
{
return UnAlignedSpatialVector(-top, -bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator *(PxReal s) const
{
return UnAlignedSpatialVector(top*s, bottom*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator *= (const PxReal s)
{
top *= s;
bottom *= s;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const UnAlignedSpatialVector& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const SpatialVectorF& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const UnAlignedSpatialVector& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const SpatialVectorF& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return top.magnitude() + bottom.magnitude();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitudeSquared() const
{
return top.magnitudeSquared() + bottom.magnitudeSquared();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const UnAlignedSpatialVector& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const SpatialVectorF& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const UnAlignedSpatialVector& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVectorF& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector cross(const UnAlignedSpatialVector& v) const
{
UnAlignedSpatialVector a;
a.top = top.cross(v.top);
a.bottom = top.cross(v.bottom) + bottom.cross(v.top);
return a;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector abs() const
{
return UnAlignedSpatialVector(top.abs(), bottom.abs());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector rotate(const PxTransform& rot) const
{
return UnAlignedSpatialVector(rot.rotate(top), rot.rotate(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector rotateInv(const PxTransform& rot) const
{
return UnAlignedSpatialVector(rot.rotateInv(top), rot.rotateInv(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return top.isFinite() && bottom.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid(const PxReal maxV) const
{
const bool tValid = ((top.x <= maxV) && (top.y <= maxV) && (top.z <= maxV));
const bool bValid = ((bottom.x <= maxV) && (bottom.y <= maxV) && (bottom.z <= maxV));
return tValid && bValid;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector scale(PxReal l, PxReal a) const
{
return Cm::UnAlignedSpatialVector(top*l, bottom*a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void assignTo(PxReal* val) const
{
val[0] = top.x; val[1] = top.y; val[2] = top.z;
val[3] = bottom.x; val[4] = bottom.y; val[5] = bottom.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator [] (const PxU32 index)
{
PX_ASSERT(index < 6);
return (&top.x)[index];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxReal& operator [] (const PxU32 index) const
{
PX_ASSERT(index < 6);
return (&top.x)[index];
}
PxVec3 top; //12 12
PxVec3 bottom; //12 24
};
PX_ALIGN_PREFIX(16)
struct SpatialVectorV
{
aos::Vec3V linear;
aos::Vec3V angular;
PX_FORCE_INLINE SpatialVectorV() {}
PX_FORCE_INLINE SpatialVectorV(PxZERO): linear(aos::V3Zero()), angular(aos::V3Zero()) {}
PX_FORCE_INLINE SpatialVectorV(const Cm::SpatialVector& v): linear(aos::V3LoadA(&v.linear.x)), angular(aos::V3LoadA(&v.angular.x)) {}
PX_FORCE_INLINE SpatialVectorV(const aos::Vec3VArg l, const aos::Vec3VArg a): linear(l), angular(a) {}
PX_FORCE_INLINE SpatialVectorV(const SpatialVectorV& other): linear(other.linear), angular(other.angular) {}
PX_FORCE_INLINE SpatialVectorV& operator=(const SpatialVectorV& other) { linear = other.linear; angular = other.angular; return *this; }
PX_FORCE_INLINE SpatialVectorV operator+(const SpatialVectorV& other) const { return SpatialVectorV(aos::V3Add(linear,other.linear),
aos::V3Add(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV& operator+=(const SpatialVectorV& other) { linear = aos::V3Add(linear,other.linear);
angular = aos::V3Add(angular, other.angular);
return *this;
}
PX_FORCE_INLINE SpatialVectorV operator-(const SpatialVectorV& other) const { return SpatialVectorV(aos::V3Sub(linear,other.linear),
aos::V3Sub(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV operator-() const { return SpatialVectorV(aos::V3Neg(linear), aos::V3Neg(angular)); }
PX_FORCE_INLINE SpatialVectorV operator*(const aos::FloatVArg r) const { return SpatialVectorV(aos::V3Scale(linear,r), aos::V3Scale(angular,r)); }
PX_FORCE_INLINE SpatialVectorV& operator-=(const SpatialVectorV& other) { linear = aos::V3Sub(linear,other.linear);
angular = aos::V3Sub(angular, other.angular);
return *this;
}
PX_FORCE_INLINE aos::FloatV dot(const SpatialVectorV& other) const { return aos::V3SumElems(aos::V3Add(aos::V3Mul(linear, other.linear), aos::V3Mul(angular, other.angular))); }
PX_FORCE_INLINE SpatialVectorV multiply(const SpatialVectorV& other) const { return SpatialVectorV(aos::V3Mul(linear, other.linear), aos::V3Mul(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV multiplyAdd(const SpatialVectorV& m, const SpatialVectorV& a) const { return SpatialVectorV(aos::V3MulAdd(linear, m.linear, a.linear), aos::V3MulAdd(angular, m.angular, a.angular)); }
PX_FORCE_INLINE SpatialVectorV scale(const aos::FloatV& a, const aos::FloatV& b) const { return SpatialVectorV(aos::V3Scale(linear, a), aos::V3Scale(angular, b)); }
}PX_ALIGN_SUFFIX(16);
} // namespace Cm
PX_COMPILE_TIME_ASSERT(sizeof(Cm::SpatialVector) == 32);
PX_COMPILE_TIME_ASSERT(sizeof(Cm::SpatialVectorV) == 32);
}
#endif

View File

@@ -0,0 +1,299 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_TASK_H
#define CM_TASK_H
#include "task/PxTask.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxMutex.h"
#include "foundation/PxInlineArray.h"
#include "foundation/PxFPU.h"
// PT: this shouldn't be in Cm. The whole task manager is in the PhysX DLL so we cannot use any of these inside the Common DLL
namespace physx
{
namespace Cm
{
// wrapper around the public PxLightCpuTask
// internal SDK tasks should be inherited from
// this and override the runInternal() method
// to ensure that the correct floating point
// state is set / reset during execution
class Task : public physx::PxLightCpuTask
{
public:
Task(PxU64 contextId)
{
mContextID = contextId;
}
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
runInternal();
}
virtual void runInternal()=0;
};
// same as Cm::Task but inheriting from physx::PxBaseTask
// instead of PxLightCpuTask
class BaseTask : public physx::PxBaseTask
{
public:
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
runInternal();
}
virtual void runInternal()=0;
};
template <class T, void (T::*Fn)(physx::PxBaseTask*) >
class DelegateTask : public Cm::Task, public PxUserAllocated
{
public:
DelegateTask(PxU64 contextID, T* obj, const char* name) : Cm::Task(contextID), mObj(obj), mName(name) {}
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
(mObj->*Fn)(mCont);
}
virtual void runInternal()
{
(mObj->*Fn)(mCont);
}
virtual const char* getName() const
{
return mName;
}
void setObject(T* obj) { mObj = obj; }
private:
T* mObj;
const char* mName;
};
/**
\brief A task that maintains a list of dependent tasks.
This task maintains a list of dependent tasks that have their reference counts
reduced on completion of the task.
The refcount is incremented every time a dependent task is added.
*/
class FanoutTask : public Cm::BaseTask
{
PX_NOCOPY(FanoutTask)
public:
FanoutTask(PxU64 contextID, const char* name) : Cm::BaseTask(), mRefCount(0), mName(name), mNotifySubmission(false) { mContextID = contextID; }
virtual void runInternal() {}
virtual const char* getName() const { return mName; }
/**
Swap mDependents with mReferencesToRemove when refcount goes to 0.
*/
virtual void removeReference()
{
PxMutex::ScopedLock lock(mMutex);
if (!physx::PxAtomicDecrement(&mRefCount))
{
// prevents access to mReferencesToRemove until release
physx::PxAtomicIncrement(&mRefCount);
mNotifySubmission = false;
PX_ASSERT(mReferencesToRemove.empty());
for (PxU32 i = 0; i < mDependents.size(); i++)
mReferencesToRemove.pushBack(mDependents[i]);
mDependents.clear();
mTm->getCpuDispatcher()->submitTask(*this);
}
}
/**
\brief Increases reference count
*/
virtual void addReference()
{
PxMutex::ScopedLock lock(mMutex);
physx::PxAtomicIncrement(&mRefCount);
mNotifySubmission = true;
}
/**
\brief Return the ref-count for this task
*/
PX_INLINE PxI32 getReference() const
{
return mRefCount;
}
/**
Sets the task manager. Doesn't increase the reference count.
*/
PX_INLINE void setTaskManager(physx::PxTaskManager& tm)
{
mTm = &tm;
}
/**
Adds a dependent task. It also sets the task manager querying it from the dependent task.
The refcount is incremented every time a dependent task is added.
*/
PX_INLINE void addDependent(physx::PxBaseTask& dependent)
{
PxMutex::ScopedLock lock(mMutex);
physx::PxAtomicIncrement(&mRefCount);
mTm = dependent.getTaskManager();
mDependents.pushBack(&dependent);
dependent.addReference();
mNotifySubmission = true;
}
/**
Reduces reference counts of the continuation task and the dependent tasks, also
clearing the copy of continuation and dependents task list.
*/
virtual void release()
{
PxInlineArray<physx::PxBaseTask*, 10> referencesToRemove;
{
PxMutex::ScopedLock lock(mMutex);
const PxU32 contCount = mReferencesToRemove.size();
referencesToRemove.reserve(contCount);
for (PxU32 i=0; i < contCount; ++i)
referencesToRemove.pushBack(mReferencesToRemove[i]);
mReferencesToRemove.clear();
// allow access to mReferencesToRemove again
if (mNotifySubmission)
{
removeReference();
}
else
{
physx::PxAtomicDecrement(&mRefCount);
}
// the scoped lock needs to get freed before the continuation tasks get (potentially) submitted because
// those continuation tasks might trigger events that delete this task and corrupt the memory of the
// mutex (for example, assume this task is a member of the scene then the submitted tasks cause the simulation
// to finish and then the scene gets released which in turn will delete this task. When this task then finally
// continues the heap memory will be corrupted.
}
for (PxU32 i=0; i < referencesToRemove.size(); ++i)
referencesToRemove[i]->removeReference();
}
protected:
volatile PxI32 mRefCount;
const char* mName;
PxInlineArray<physx::PxBaseTask*, 4> mDependents;
PxInlineArray<physx::PxBaseTask*, 4> mReferencesToRemove;
bool mNotifySubmission;
PxMutex mMutex; // guarding mDependents and mNotifySubmission
};
/**
\brief Specialization of FanoutTask class in order to provide the delegation mechanism.
*/
template <class T, void (T::*Fn)(physx::PxBaseTask*) >
class DelegateFanoutTask : public FanoutTask, public PxUserAllocated
{
public:
DelegateFanoutTask(PxU64 contextID, T* obj, const char* name) :
FanoutTask(contextID, name), mObj(obj) { }
virtual void runInternal()
{
physx::PxBaseTask* continuation = mReferencesToRemove.empty() ? NULL : mReferencesToRemove[0];
(mObj->*Fn)(continuation);
}
void setObject(T* obj) { mObj = obj; }
private:
T* mObj;
};
PX_FORCE_INLINE void startTask(Cm::Task* task, PxBaseTask* continuation)
{
if(continuation)
{
// PT: TODO: just make this a PxBaseTask function?
task->setContinuation(continuation);
task->removeReference();
}
else
task->runInternal();
}
template<class T>
PX_FORCE_INLINE void updateTaskLinkedList(T*& previousTask, T* task, T*& head)
{
if(previousTask)
previousTask->mNext = task;
else
head = task;
previousTask = task;
}
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,139 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_TRANSFORM_UTILS_H
#define CM_TRANSFORM_UTILS_H
#include "foundation/PxVecMath.h"
namespace
{
using namespace physx::aos;
// V3PrepareCross would help here, but it's not on all platforms yet...
PX_FORCE_INLINE void transformFast(const FloatVArg wa, const Vec3VArg va, const Vec3VArg pa,
const FloatVArg wb, const Vec3VArg vb, const Vec3VArg pb,
FloatV& wo, Vec3V& vo, Vec3V& po)
{
wo = FSub(FMul(wa, wb), V3Dot(va, vb));
vo = V3ScaleAdd(va, wb, V3ScaleAdd(vb, wa, V3Cross(va, vb)));
const Vec3V t1 = V3Scale(pb, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec3V t2 = V3ScaleAdd(V3Cross(va, pb), wa, t1);
const Vec3V t3 = V3ScaleAdd(va, V3Dot(va, pb), t2);
po = V3ScaleAdd(t3, FLoad(2.f), pa);
}
PX_FORCE_INLINE void transformInvFast(const FloatVArg wa, const Vec3VArg va, const Vec3VArg pa,
const FloatVArg wb, const Vec3VArg vb, const Vec3VArg pb,
FloatV& wo, Vec3V& vo, Vec3V& po)
{
wo = FScaleAdd(wa, wb, V3Dot(va, vb));
vo = V3NegScaleSub(va, wb, V3ScaleAdd(vb, wa, V3Cross(vb, va)));
const Vec3V pt = V3Sub(pb, pa);
const Vec3V t1 = V3Scale(pt, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec3V t2 = V3ScaleAdd(V3Cross(pt, va), wa, t1);
const Vec3V t3 = V3ScaleAdd(va, V3Dot(va, pt), t2);
po = V3Add(t3,t3);
}
}
namespace physx
{
namespace Cm
{
// PT: actor2World * shape2Actor
PX_FORCE_INLINE void getStaticGlobalPoseAligned(const PxTransform& actor2World, const PxTransform& shape2Actor, PxTransform& outTransform)
{
using namespace aos;
PX_ASSERT((size_t(&actor2World)&15) == 0);
PX_ASSERT((size_t(&shape2Actor)&15) == 0);
PX_ASSERT((size_t(&outTransform)&15) == 0);
const Vec3V actor2WorldPos = V3LoadA(actor2World.p);
const QuatV actor2WorldRot = QuatVLoadA(&actor2World.q.x);
const Vec3V shape2ActorPos = V3LoadA(shape2Actor.p);
const QuatV shape2ActorRot = QuatVLoadA(&shape2Actor.q.x);
Vec3V v,p;
FloatV w;
transformFast(V4GetW(actor2WorldRot), Vec3V_From_Vec4V(actor2WorldRot), actor2WorldPos,
V4GetW(shape2ActorRot), Vec3V_From_Vec4V(shape2ActorRot), shape2ActorPos,
w, v, p);
V3StoreA(p, outTransform.p);
V4StoreA(V4SetW(v,w), &outTransform.q.x);
}
// PT: body2World * body2Actor.getInverse() * shape2Actor
PX_FORCE_INLINE void getDynamicGlobalPoseAligned(const PxTransform& body2World, const PxTransform& shape2Actor, const PxTransform& body2Actor, PxTransform& outTransform)
{
PX_ASSERT((size_t(&body2World)&15) == 0);
PX_ASSERT((size_t(&shape2Actor)&15) == 0);
PX_ASSERT((size_t(&body2Actor)&15) == 0);
PX_ASSERT((size_t(&outTransform)&15) == 0);
using namespace aos;
const Vec3V shape2ActorPos = V3LoadA(shape2Actor.p);
const QuatV shape2ActorRot = QuatVLoadA(&shape2Actor.q.x);
const Vec3V body2ActorPos = V3LoadA(body2Actor.p);
const QuatV body2ActorRot = QuatVLoadA(&body2Actor.q.x);
const Vec3V body2WorldPos = V3LoadA(body2World.p);
const QuatV body2WorldRot = QuatVLoadA(&body2World.q.x);
Vec3V v1, p1, v2, p2;
FloatV w1, w2;
transformInvFast(V4GetW(body2ActorRot), Vec3V_From_Vec4V(body2ActorRot), body2ActorPos,
V4GetW(shape2ActorRot), Vec3V_From_Vec4V(shape2ActorRot), shape2ActorPos,
w1, v1, p1);
transformFast(V4GetW(body2WorldRot), Vec3V_From_Vec4V(body2WorldRot), body2WorldPos,
w1, v1, p1,
w2, v2, p2);
V3StoreA(p2, outTransform.p);
V4StoreA(V4SetW(v2, w2), &outTransform.q.x);
}
}
}
#endif

View File

@@ -0,0 +1,301 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_UTILS_H
#define CM_UTILS_H
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxBounds3.h"
#include "common/PxBase.h"
#include "foundation/PxInlineArray.h"
#include "foundation/PxArray.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
namespace physx
{
namespace Cm
{
template<class DstType, class SrcType>
PX_FORCE_INLINE PxU32 getArrayOfPointers(DstType** PX_RESTRICT userBuffer, PxU32 bufferSize, PxU32 startIndex, SrcType*const* PX_RESTRICT src, PxU32 size)
{
const PxU32 remainder = PxU32(PxMax<PxI32>(PxI32(size - startIndex), 0));
const PxU32 writeCount = PxMin(remainder, bufferSize);
src += startIndex;
for(PxU32 i=0;i<writeCount;i++)
userBuffer[i] = static_cast<DstType*>(src[i]);
return writeCount;
}
PX_CUDA_CALLABLE PX_INLINE void transformInertiaTensor(const PxVec3& invD, const PxMat33& M, PxMat33& mIInv)
{
const float axx = invD.x*M(0,0), axy = invD.x*M(1,0), axz = invD.x*M(2,0);
const float byx = invD.y*M(0,1), byy = invD.y*M(1,1), byz = invD.y*M(2,1);
const float czx = invD.z*M(0,2), czy = invD.z*M(1,2), czz = invD.z*M(2,2);
mIInv(0,0) = axx*M(0,0) + byx*M(0,1) + czx*M(0,2);
mIInv(1,1) = axy*M(1,0) + byy*M(1,1) + czy*M(1,2);
mIInv(2,2) = axz*M(2,0) + byz*M(2,1) + czz*M(2,2);
mIInv(0,1) = mIInv(1,0) = axx*M(1,0) + byx*M(1,1) + czx*M(1,2);
mIInv(0,2) = mIInv(2,0) = axx*M(2,0) + byx*M(2,1) + czx*M(2,2);
mIInv(1,2) = mIInv(2,1) = axy*M(2,0) + byy*M(2,1) + czy*M(2,2);
}
// PT: TODO: refactor this with PxBounds3 header
PX_FORCE_INLINE PxVec3 basisExtent(const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent)
{
// extended basis vectors
const PxVec3 c0 = basis0 * extent.x;
const PxVec3 c1 = basis1 * extent.y;
const PxVec3 c2 = basis2 * extent.z;
// find combination of base vectors that produces max. distance for each component = sum of abs()
return PxVec3 ( PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x),
PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y),
PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z));
}
PX_FORCE_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent)
{
const PxVec3 w = basisExtent(basis0, basis1, basis2, extent);
return PxBounds3(center - w, center + w);
}
PX_FORCE_INLINE bool isValid(const PxVec3& c, const PxVec3& e)
{
return (c.isFinite() && e.isFinite() && (((e.x >= 0.0f) && (e.y >= 0.0f) && (e.z >= 0.0f)) ||
((e.x == -PX_MAX_BOUNDS_EXTENTS) &&
(e.y == -PX_MAX_BOUNDS_EXTENTS) &&
(e.z == -PX_MAX_BOUNDS_EXTENTS))));
}
PX_FORCE_INLINE bool isEmpty(const PxVec3& c, const PxVec3& e)
{
PX_UNUSED(c);
PX_ASSERT(isValid(c, e));
return e.x<0.0f;
}
// Array with externally managed storage.
// Allocation and resize policy are managed by the owner,
// Very minimal functionality right now, just POD types
template <typename T,
typename Owner,
typename IndexType,
void (Owner::*realloc)(T*& currentMem, IndexType& currentCapacity, IndexType size, IndexType requiredMinCapacity)>
class OwnedArray
{
public:
OwnedArray()
: mData(0)
, mCapacity(0)
, mSize(0)
{}
~OwnedArray() // owner must call releaseMem before destruction
{
PX_ASSERT(mCapacity==0);
}
void pushBack(T& element, Owner& owner)
{
// there's a failure case if here if we push an existing element which causes a resize -
// a rare case not worth coding around; if you need it, copy the element then push it.
PX_ASSERT(&element<mData || &element>=mData+mSize);
if(mSize==mCapacity)
(owner.*realloc)(mData, mCapacity, mSize, IndexType(mSize+1));
PX_ASSERT(mData && mSize<mCapacity);
mData[mSize++] = element;
}
IndexType size() const
{
return mSize;
}
void replaceWithLast(IndexType index)
{
PX_ASSERT(index<mSize);
mData[index] = mData[--mSize];
}
T* begin() const
{
return mData;
}
T* end() const
{
return mData+mSize;
}
T& operator [](IndexType index)
{
PX_ASSERT(index<mSize);
return mData[index];
}
const T& operator [](IndexType index) const
{
PX_ASSERT(index<mSize);
return mData[index];
}
void reserve(IndexType capacity, Owner &owner)
{
if(capacity>=mCapacity)
(owner.*realloc)(mData, mCapacity, mSize, capacity);
}
void releaseMem(Owner &owner)
{
mSize = 0;
(owner.*realloc)(mData, mCapacity, 0, 0);
}
private:
T* mData;
IndexType mCapacity;
IndexType mSize;
// just in case someone tries to use a non-POD in here
union FailIfNonPod
{
T t;
int x;
};
};
/**
Any object deriving from PxBase needs to call this function instead of 'delete object;'.
We don't want to implement 'operator delete' in PxBase because that would impose how
memory of derived classes is allocated. Even though most or all of the time derived classes will
be user allocated, we don't want to put UserAllocatable into the API and derive from that.
*/
template<typename T>
PX_INLINE void deletePxBase(T* object)
{
if(object->getBaseFlags() & PxBaseFlag::eOWNS_MEMORY)
{
PX_DELETE(object);
}
else
object->~T();
}
#define PX_PADDING_8 0xcd
#define PX_PADDING_16 0xcdcd
#define PX_PADDING_32 0xcdcdcdcd
/**
Macro to instantiate a type for serialization testing.
Note: Only use PX_NEW_SERIALIZED once in a scope.
*/
#if PX_CHECKED
#define PX_NEW_SERIALIZED(v,T) \
void* _buf = physx::PxReflectionAllocator<T>().allocate(sizeof(T), PX_FL); \
PxMarkSerializedMemory(_buf, sizeof(T)); \
v = PX_PLACEMENT_NEW(_buf, T)
#else
#define PX_NEW_SERIALIZED(v,T) v = PX_NEW(T)
#endif
template<typename T, class Alloc>
struct ArrayAccess: public PxArray<T, Alloc>
{
void store(PxSerializationContext& context) const
{
if(this->mData && (this->mSize || this->capacity()))
context.writeData(this->mData, this->capacity()*sizeof(T));
}
void load(PxDeserializationContext& context)
{
if(this->mData && (this->mSize || this->capacity()))
this->mData = context.readExtraData<T>(this->capacity());
}
};
template<typename T, typename Alloc>
void exportArray(const PxArray<T, Alloc>& a, PxSerializationContext& context)
{
static_cast<const ArrayAccess<T, Alloc>&>(a).store(context);
}
template<typename T, typename Alloc>
void importArray(PxArray<T, Alloc>& a, PxDeserializationContext& context)
{
static_cast<ArrayAccess<T, Alloc>&>(a).load(context);
}
template<typename T, PxU32 N, typename Alloc>
void exportInlineArray(const PxInlineArray<T, N, Alloc>& a, PxSerializationContext& context)
{
if(!a.isInlined())
Cm::exportArray(a, context);
}
template<typename T, PxU32 N, typename Alloc>
void importInlineArray(PxInlineArray<T, N, Alloc>& a, PxDeserializationContext& context)
{
if(!a.isInlined())
Cm::importArray(a, context);
}
template<class T>
static PX_INLINE T* reserveContainerMemory(PxArray<T>& container, PxU32 nb)
{
const PxU32 maxNbEntries = container.capacity();
const PxU32 requiredSize = container.size() + nb;
if(requiredSize>maxNbEntries)
{
const PxU32 naturalGrowthSize = maxNbEntries ? maxNbEntries*2 : 2;
const PxU32 newSize = PxMax(requiredSize, naturalGrowthSize);
container.reserve(newSize);
}
T* buf = container.end();
container.forceSize_Unsafe(requiredSize);
return buf;
}
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,154 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmVisualization.h"
using namespace physx;
using namespace Cm;
static const PxU32 gLimitColor = PxU32(PxDebugColor::eARGB_YELLOW);
void Cm::visualizeJointFrames(PxRenderOutput& out, PxReal scale, const PxTransform& parent, const PxTransform& child)
{
if(scale==0.0f)
return;
out << parent << PxDebugBasis(PxVec3(scale, scale, scale) * 1.5f,
PxU32(PxDebugColor::eARGB_DARKRED), PxU32(PxDebugColor::eARGB_DARKGREEN), PxU32(PxDebugColor::eARGB_DARKBLUE));
out << child << PxDebugBasis(PxVec3(scale, scale, scale));
}
void Cm::visualizeLinearLimit(PxRenderOutput& out, PxReal scale, const PxTransform& t0, const PxTransform& /*t1*/, PxReal value)
{
if(scale==0.0f)
return;
// debug circle is around z-axis, and we want it around x-axis
PxTransform r(t0.p+value*t0.q.getBasisVector0(), t0.q*PxQuat(PxPi/2,PxVec3(0,1.f,0)));
out << gLimitColor;
out << PxTransform(PxIdentity);
out << PxDebugArrow(t0.p,r.p-t0.p);
out << r << PxDebugCircle(20, scale*0.3f);
}
void Cm::visualizeAngularLimit(PxRenderOutput& out, PxReal scale, const PxTransform& t, PxReal lower, PxReal upper)
{
if(scale==0.0f)
return;
out << t << gLimitColor;
out << PxRenderOutput::LINES
<< PxVec3(0) << PxVec3(0, PxCos(lower), PxSin(lower)) * scale
<< PxVec3(0) << PxVec3(0, PxCos(upper), PxSin(upper)) * scale;
out << PxRenderOutput::LINESTRIP;
PxReal angle = lower, step = (upper-lower)/20;
for(PxU32 i=0; i<=20; i++, angle += step)
out << PxVec3(0, PxCos(angle), PxSin(angle)) * scale;
}
void Cm::visualizeLimitCone(PxRenderOutput& out, PxReal scale, const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ)
{
if(scale==0.0f)
return;
out << t << gLimitColor;
out << PxRenderOutput::LINES;
PxVec3 prev(0,0,0);
const PxU32 LINES = 32;
for(PxU32 i=0;i<=LINES;i++)
{
PxReal angle = 2*PxPi/LINES*i;
PxReal c = PxCos(angle), s = PxSin(angle);
PxVec3 rv(0,-tanQSwingZ*s, tanQSwingY*c);
PxReal rv2 = rv.magnitudeSquared();
PxQuat q = PxQuat(0,2*rv.y,2*rv.z,1-rv2) * (1/(1+rv2));
PxVec3 a = q.rotate(PxVec3(1.0f,0,0)) * scale;
out << prev << a << PxVec3(0) << a;
prev = a;
}
}
void Cm::visualizeDoubleCone(PxRenderOutput& out, PxReal scale, const PxTransform& t, PxReal angle)
{
if(scale==0.0f)
return;
out << t << gLimitColor;
const PxReal height = PxTan(angle);
const PxU32 LINES = 32;
out << PxRenderOutput::LINESTRIP;
const PxReal step = PxPi*2/LINES;
for(PxU32 i=0; i<=LINES; i++)
out << PxVec3(height, PxCos(step * i), PxSin(step * i)) * scale;
angle = 0;
out << PxRenderOutput::LINESTRIP;
for(PxU32 i=0; i<=LINES; i++, angle += PxPi*2/LINES)
out << PxVec3(-height, PxCos(step * i), PxSin(step * i)) * scale;
angle = 0;
out << PxRenderOutput::LINES;
for(PxU32 i=0;i<LINES;i++, angle += PxPi*2/LINES)
{
out << PxVec3(0) << PxVec3(-height, PxCos(step * i), PxSin(step * i)) * scale;
out << PxVec3(0) << PxVec3(height, PxCos(step * i), PxSin(step * i)) * scale;
}
}
void Cm::renderOutputDebugBox(PxRenderOutput& out, const PxBounds3& box)
{
out << PxDebugBox(box, true);
}
void Cm::renderOutputDebugCircle(PxRenderOutput& out, PxU32 s, PxReal r)
{
out << PxDebugCircle(s, r);
}
void Cm::renderOutputDebugBasis(PxRenderOutput& out, const PxDebugBasis& basis)
{
out << basis;
}
void Cm::renderOutputDebugArrow(PxRenderOutput& out, const PxDebugArrow& arrow)
{
out << arrow;
}

View File

@@ -0,0 +1,125 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_VISUALIZATION_H
#define CM_VISUALIZATION_H
#include "foundation/PxTransform.h"
#include "common/PxRenderOutput.h"
#include "PxConstraintDesc.h"
namespace physx
{
namespace Cm
{
// PT: the force-inlined functions in PxRenderOutput generate a lot of code. Use these non-inlined functions instead.
PX_PHYSX_COMMON_API void renderOutputDebugBox(PxRenderOutput& out, const PxBounds3& box);
PX_PHYSX_COMMON_API void renderOutputDebugCircle(PxRenderOutput& out, PxU32 s, PxReal r);
PX_PHYSX_COMMON_API void renderOutputDebugBasis(PxRenderOutput& out, const PxDebugBasis& basis);
PX_PHYSX_COMMON_API void renderOutputDebugArrow(PxRenderOutput& out, const PxDebugArrow& arrow);
PX_PHYSX_COMMON_API void visualizeJointFrames(PxRenderOutput& out,
PxReal scale,
const PxTransform& parent,
const PxTransform& child);
PX_PHYSX_COMMON_API void visualizeLinearLimit(PxRenderOutput& out,
PxReal scale,
const PxTransform& t0,
const PxTransform& t1,
PxReal value);
PX_PHYSX_COMMON_API void visualizeAngularLimit(PxRenderOutput& out,
PxReal scale,
const PxTransform& t0,
PxReal lower,
PxReal upper);
PX_PHYSX_COMMON_API void visualizeLimitCone(PxRenderOutput& out,
PxReal scale,
const PxTransform& t,
PxReal ySwing,
PxReal zSwing);
PX_PHYSX_COMMON_API void visualizeDoubleCone(PxRenderOutput& out,
PxReal scale,
const PxTransform& t,
PxReal angle);
struct ConstraintImmediateVisualizer : public PxConstraintVisualizer
{
PxF32 mFrameScale;
PxF32 mLimitScale;
PxRenderOutput& mCmOutput;
//Not possible to implement
ConstraintImmediateVisualizer& operator=( const ConstraintImmediateVisualizer& );
ConstraintImmediateVisualizer(PxF32 frameScale, PxF32 limitScale, PxRenderOutput& output) :
mFrameScale (frameScale),
mLimitScale (limitScale),
mCmOutput (output)
{
}
virtual void visualizeJointFrames(const PxTransform& parent, const PxTransform& child) PX_OVERRIDE
{
Cm::visualizeJointFrames(mCmOutput, mFrameScale, parent, child);
}
virtual void visualizeLinearLimit(const PxTransform& t0, const PxTransform& t1, PxReal value) PX_OVERRIDE
{
Cm::visualizeLinearLimit(mCmOutput, mLimitScale, t0, t1, value);
}
virtual void visualizeAngularLimit(const PxTransform& t0, PxReal lower, PxReal upper) PX_OVERRIDE
{
Cm::visualizeAngularLimit(mCmOutput, mLimitScale, t0, lower, upper);
}
virtual void visualizeLimitCone(const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ) PX_OVERRIDE
{
Cm::visualizeLimitCone(mCmOutput, mLimitScale, t, tanQSwingY, tanQSwingZ);
}
virtual void visualizeDoubleCone(const PxTransform& t, PxReal angle) PX_OVERRIDE
{
Cm::visualizeDoubleCone(mCmOutput, mLimitScale, t, angle);
}
virtual void visualizeLine( const PxVec3& p0, const PxVec3& p1, PxU32 color) PX_OVERRIDE
{
mCmOutput << color;
mCmOutput.outputSegment(p0, p1);
}
};
}
}
#endif

View File

@@ -0,0 +1,87 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/windows/PxWindowsDelayLoadHook.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "windows/CmWindowsLoadLibrary.h"
static const physx::PxDelayLoadHook* gCommonDelayLoadHook = NULL;
void physx::PxSetPhysXCommonDelayLoadHook(const physx::PxDelayLoadHook* hook)
{
gCommonDelayLoadHook = hook;
}
// delay loading is enabled only for non static configuration
#if !defined PX_PHYSX_STATIC_LIB
// Prior to Visual Studio 2015 Update 3, these hooks were non-const.
#define DELAYIMP_INSECURE_WRITABLE_HOOKS
#include <delayimp.h>
using namespace physx;
#pragma comment(lib, "delayimp")
FARPROC WINAPI commonDelayHook(unsigned dliNotify, PDelayLoadInfo pdli)
{
switch (dliNotify) {
case dliStartProcessing :
break;
case dliNotePreLoadLibrary :
{
return Cm::physXCommonDliNotePreLoadLibrary(pdli->szDll,gCommonDelayLoadHook);
}
break;
case dliNotePreGetProcAddress :
break;
case dliFailLoadLib :
break;
case dliFailGetProc :
break;
case dliNoteEndProcessing :
break;
default :
return NULL;
}
return NULL;
}
PfnDliHook __pfnDliNotifyHook2 = commonDelayHook;
#endif

View File

@@ -0,0 +1,134 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
#define NX_USE_SDK_DLLS
#include "PhysXUpdateLoader.h"
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
#include "windows/CmWindowsModuleUpdateLoader.h"
#include "windows/CmWindowsLoadLibrary.h"
#include "stdio.h"
namespace physx { namespace Cm {
#if PX_VC
#pragma warning(disable: 4191) //'operator/operation' : unsafe conversion from 'type of expression' to 'type required'
#endif
typedef HMODULE (*GetUpdatedModule_FUNC)(const char*, const char*);
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
typedef void (*setLogging_FUNC)(PXUL_ErrorCode, pt2LogFunc);
static void LogMessage(PXUL_ErrorCode messageType, char* message)
{
switch(messageType)
{
case PXUL_ERROR_MESSAGES:
getFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL,
"PhysX Update Loader Error: %s.", message);
break;
case PXUL_WARNING_MESSAGES:
getFoundation().error(PX_WARN, "PhysX Update Loader Warning: %s.", message);
break;
case PXUL_INFO_MESSAGES:
getFoundation().error(PX_INFO, "PhysX Update Loader Information: %s.", message);
break;
default:
getFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL,
"Unknown message type from update loader.");
break;
}
}
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
CmModuleUpdateLoader::CmModuleUpdateLoader(const char* updateLoaderDllName)
: mGetUpdatedModuleFunc(NULL)
{
mUpdateLoaderDllHandle = loadLibrary(updateLoaderDllName);
if (mUpdateLoaderDllHandle != NULL)
{
mGetUpdatedModuleFunc = GetProcAddress(mUpdateLoaderDllHandle, "GetUpdatedModule");
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
setLogging_FUNC setLoggingFunc;
setLoggingFunc = (setLogging_FUNC)GetProcAddress(mUpdateLoaderDllHandle, "setLoggingFunction");
if(setLoggingFunc != NULL)
{
setLoggingFunc(PXUL_ERROR_MESSAGES, LogMessage);
}
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
}
}
CmModuleUpdateLoader::~CmModuleUpdateLoader()
{
if (mUpdateLoaderDllHandle != NULL)
{
FreeLibrary(mUpdateLoaderDllHandle);
mUpdateLoaderDllHandle = NULL;
}
}
HMODULE CmModuleUpdateLoader::LoadModule(const char* moduleName, const char* appGUID)
{
HMODULE result = NULL;
if (mGetUpdatedModuleFunc != NULL)
{
// Try to get the module through PhysXUpdateLoader
GetUpdatedModule_FUNC getUpdatedModuleFunc = (GetUpdatedModule_FUNC)mGetUpdatedModuleFunc;
result = getUpdatedModuleFunc(moduleName, appGUID);
}
else
{
// If no PhysXUpdateLoader, just load the DLL directly
result = loadLibrary(moduleName);
if (result == NULL)
{
const DWORD err = GetLastError();
printf("%s:%i: loadLibrary error when loading %s: %lu\n", PX_FL, moduleName, err);
}
}
return result;
}
}; // end of namespace
}; // end of namespace

View File

@@ -0,0 +1,120 @@
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
cmake_minimum_required(VERSION 3.16)
project(PhysX C CXX)
CMAKE_POLICY(SET CMP0057 NEW) # Enable IN_LIST
# Check if this is a public release build
IF(NOT DEFINED PUBLIC_RELEASE)
SET(PUBLIC_RELEASE 0) # Default to private release
ENDIF()
IF(PUBLIC_RELEASE)
MESSAGE("Building for PUBLIC RELEASE")
ENDIF()
OPTION(PX_SCALAR_MATH "Disable SIMD math" OFF)
OPTION(PX_GENERATE_STATIC_LIBRARIES "Generate static libraries" OFF)
OPTION(PX_EXPORT_LOWLEVEL_PDB "Export low level pdb's" OFF)
IF(NOT DEFINED PHYSX_ROOT_DIR)
STRING(REPLACE "\\" "/" BRD_TEMP $ENV{PHYSX_ROOT_DIR})
# This env variable is set by GenerateProjects.bat, and is no longer available when CMake rebuilds, so this stores it in the cache
SET(PHYSX_ROOT_DIR ${BRD_TEMP} CACHE INTERNAL "Root of the PhysX source tree")
ENDIF()
MESSAGE("PHYSX ROOT ${PHYSX_ROOT_DIR}")
IF(NOT EXISTS ${PHYSX_ROOT_DIR})
MESSAGE(FATAL_ERROR "PHYSX_ROOT_DIR environment variable wasn't set or was invalid.")
ENDIF()
MESSAGE("PhysX Build Platform: " ${TARGET_BUILD_PLATFORM})
MESSAGE("Using CXX Compiler: " ${CMAKE_CXX_COMPILER})
INCLUDE(NvidiaBuildOptions)
IF(CMAKE_CONFIGURATION_TYPES)
SET(CMAKE_CONFIGURATION_TYPES debug checked profile release)
SET(CMAKE_CONFIGURATION_TYPES "${CMAKE_CONFIGURATION_TYPES}" CACHE STRING
"Reset config to what we need"
FORCE)
SET(CMAKE_SHARED_LINKER_FLAGS_CHECKED "")
SET(CMAKE_SHARED_LINKER_FLAGS_PROFILE "")
# Build PDBs for all configurations
SET(CMAKE_SHARED_LINKER_FLAGS "/DEBUG")
ENDIF()
# Prevent failure due to command line limitations
IF(USE_RESPONSE_FILES)
SET(CMAKE_C_USE_RESPONSE_FILE_FOR_OBJECTS 1)
SET(CMAKE_C_USE_RESPONSE_FILE_FOR_INCLUDES 1)
SET(CMAKE_C_USE_RESPONSE_FILE_FOR_LIBRARIES 1)
SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_OBJECTS 1)
SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_INCLUDES 1)
SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_LIBRARIES 1)
ENDIF()
IF($ENV{PHYSX_AUTOBUILD})
IF($ENV{PHYSX_AUTOBUILD} STREQUAL "1")
SET(PHYSX_AUTOBUILD "PHYSX_AUTOBUILD")
ENDIF()
ENDIF()
SET(PROJECT_CMAKE_FILES_DIR source/compiler/cmake)
SET(PLATFORM_CMAKELISTS ${PHYSX_ROOT_DIR}/${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/CMakeLists.txt)
IF(NOT EXISTS ${PLATFORM_CMAKELISTS})
MESSAGE(FATAL_ERROR "Unable to find platform CMakeLists.txt for ${TARGET_BUILD_PLATFORM} at ${PLATFORM_CMAKELISTS}")
ENDIF()
SET(CMAKE_POSITION_INDEPENDENT_CODE ON)
SET(SOURCE_DISTRO_FILE_LIST "")
SET(HEADER_GUARD_NAME "CONFIG")
SET(HEADER_CONTENT "")
FILE(READ ${PHYSX_ROOT_DIR}/buildtools/templates/boilerplate_bsd.txt BOILERPLATE_CONTENT)
# Include the platform specific CMakeLists
INCLUDE(${PHYSX_ROOT_DIR}/${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/CMakeLists.txt)
# generate PxConfig.h header that will contain PhysX configuration defines like PX_PHYSX_STATIC_LIB
CONFIGURE_FILE(${PHYSX_ROOT_DIR}/buildtools/templates/PxIncludeTemplate.h ${PHYSX_ROOT_DIR}/include/PxConfig.h)
IF(PX_GENERATE_SOURCE_DISTRO)
FOREACH(FILE_NAME ${SOURCE_DISTRO_FILE_LIST})
FILE(APPEND "${CMAKE_CURRENT_BINARY_DIR}/source_distro_list.txt" "${FILE_NAME}\n")
ENDFOREACH()
ENDIF()

View File

@@ -0,0 +1,252 @@
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
STRING(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LOWERCASE)
#TODO: Fix warnings
SET(CLANG_WARNINGS "-ferror-limit=0 -Wall -Wextra -Werror -Weverything\
-Wno-unused-but-set-variable \
-Wno-switch-default \
-Wno-cast-qual \
-Wno-invalid-offsetof \
-Wno-unsafe-buffer-usage \
-Wno-alloca \
-Wno-atomic-implicit-seq-cst \
-Wno-c++98-compat-pedantic \
-Wno-c++98-compat \
-Wno-cast-align \
-Wno-conversion \
-Wno-covered-switch-default \
-Wno-deprecated \
-Wno-documentation-deprecated-sync \
-Wno-documentation-unknown-command \
-Wno-exit-time-destructors \
-Wno-extra-semi-stmt \
-Wno-float-equal \
-Wno-format-nonliteral \
-Wno-global-constructors \
-Wno-implicit-fallthrough \
-Wno-inconsistent-missing-destructor-override \
-Wno-inconsistent-missing-override \
-Wno-missing-noreturn \
-Wno-missing-prototypes \
-Wno-missing-variable-declarations \
-Wno-newline-eof \
-Wno-non-virtual-dtor \
-Wno-old-style-cast \
-Wno-padded \
-Wno-reserved-id-macro \
-Wno-suggest-destructor-override \
-Wno-suggest-override \
-Wno-switch-enum \
-Wno-undef \
-Wno-undefined-reinterpret-cast \
-Wno-unknown-warning-option \
-Wno-unreachable-code \
-Wno-unused-function \
-Wno-unused-macros \
-Wno-unused-member-function \
-Wno-unused-private-field \
-Wno-used-but-marked-unused \
-Wno-weak-template-vtables \
-Wno-weak-vtables \
-Wno-zero-as-null-pointer-constant \
-Wno-reserved-identifier \
-Wno-undefined-func-template \
")
SET(GCC_WARNINGS "-Wall -Werror \
-Wno-address \
-Wno-aligned-new \
-Wno-array-bounds \
-Wno-class-memaccess \
-Wno-conversion-null \
-Wno-format \
-Wno-format-overflow \
-Wno-invalid-offsetof \
-Wno-misleading-indentation \
-Wno-mismatched-new-delete \
-Wno-nonnull \
-Wno-nonnull-compare \
-Wno-pragmas \
-Wno-restrict \
-Wno-stringop-overflow \
-Wno-stringop-overread \
-Wno-subobject-linkage \
-Wno-template-id-cdtor \
-Wno-uninitialized \
-Wno-unused-but-set-variable \
-Wno-unused-function \
-Wno-unused-result \
-Wno-unknown-pragmas \
-Wno-use-after-free \
")
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
# gw: these optimizations are disabled for now. fp-contract causes floating point inaccuracy and deviation in behavior,
# tree-vrp and delete-null-pointer-checks cause known bugs on some versions of gcc
IF ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
SET(AARCH64_FLAGS "-ffp-contract=off -fno-delete-null-pointer-checks -faligned-new")
ELSEIF ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
SET(AARCH64_FLAGS "-ffp-contract=off -Wno-unsupported-floating-point-opt")
ENDIF()
ELSE()
SET(AARCH64_FLAGS "")
ENDIF()
SET(COMMON_CXX_FLAGS "-std=c++14 -D_GLIBCXX_USE_CXX11_ABI=1 -fno-rtti -fno-exceptions -ffunction-sections -fdata-sections -fvisibility=hidden ${AARCH64_FLAGS}")
IF ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
IF ("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "10.0.0")
SET(PHYSX_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_CXX_FLAGS} -fstrict-aliasing -Wstrict-aliasing=2 -Wno-shadow ${CLANG_WARNINGS}" CACHE INTERNAL "PhysX CXX")
ELSE()
SET(PHYSX_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_CXX_FLAGS} -fstrict-aliasing -Wstrict-aliasing=2 -ffp-exception-behavior=maytrap ${CLANG_WARNINGS}" CACHE INTERNAL "PhysX CXX")
ENDIF()
ELSEIF ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
SET(PHYSX_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_CXX_FLAGS} -fno-strict-aliasing ${GCC_WARNINGS}" CACHE INTERNAL "PhysX CXX")
ENDIF()
IF (PX_USE_MOLD_LINKER)
SET (PHYSX_CXX_FLAGS "${PHYSX_CXX_FLAGS} -Wno-unused-command-line-argument -fuse-ld=mold " CACHE INTERNAL "PhysX CXX")
ENDIF()
# Build debug info for all configurations
SET(PHYSX_CXX_FLAGS_DEBUG "-O0 -g3 -gdwarf-2" CACHE INTERNAL "PhysX Debug CXX Flags")
SET(PHYSX_CXX_FLAGS_CHECKED "-O3 -g3 -gdwarf-2" CACHE INTERNAL "PhysX Checked CXX Flags")
SET(PHYSX_CXX_FLAGS_PROFILE "-O3" CACHE INTERNAL "PhysX Profile CXX Flags")
SET(PHYSX_CXX_FLAGS_RELEASE "-O3" CACHE INTERNAL "PhysX Release CXX Flags")
# These flags are local to the directory the CMakeLists.txt is in, so don't get carried over to OTHER CMakeLists.txt (thus the CACHE variables above)
SET(CMAKE_CXX_FLAGS ${PHYSX_CXX_FLAGS})
SET(CMAKE_CXX_FLAGS_DEBUG ${PHYSX_CXX_FLAGS_DEBUG})
SET(CMAKE_CXX_FLAGS_CHECKED ${PHYSX_CXX_FLAGS_CHECKED})
SET(CMAKE_CXX_FLAGS_PROFILE ${PHYSX_CXX_FLAGS_PROFILE})
SET(CMAKE_CXX_FLAGS_RELEASE ${PHYSX_CXX_FLAGS_RELEASE})
IF(PUBLIC_RELEASE)
SET(PUBLIC_RELEASE_FLAG "PX_PUBLIC_RELEASE=1")
ELSE()
SET(PUBLIC_RELEASE_FLAG "PX_PUBLIC_RELEASE=0")
ENDIF()
# Controls PX_NVTX for all projects
SET(NVTX_FLAG "PX_NVTX=0")
IF(${CMAKE_CROSSCOMPILING} AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
SET(PX_SUPPORT_OMNI_PVD_FLAG "PX_SUPPORT_OMNI_PVD=0")
ELSE()
SET(PX_SUPPORT_OMNI_PVD_FLAG "PX_SUPPORT_OMNI_PVD=1")
ENDIF()
# Set DISABLE_CUDA_PHYSX when GPU projects are not enabled
IF(NOT PX_GENERATE_GPU_PROJECTS_ONLY AND NOT PX_GENERATE_GPU_PROJECTS)
# CMake automatically adds the appropriate prefix (-D for GCC/Clang) when the definition
# is added to the target via target_compile_definitions or add_definitions
SET(DISABLE_CUDA_DEF "DISABLE_CUDA_PHYSX")
ENDIF()
SET(PHYSX_LINUX_COMPILE_DEFS "${PHYSX_AUTOBUILD};${PUBLIC_RELEASE_FLAG};${DISABLE_CUDA_DEF}" CACHE INTERNAL "Base PhysX preprocessor definitions")
SET(PHYSX_LINUX_DEBUG_COMPILE_DEFS "PX_DEBUG=1;PX_CHECKED=1;${NVTX_FLAG};PX_SUPPORT_PVD=1;${PX_SUPPORT_OMNI_PVD_FLAG}" CACHE INTERNAL "Debug PhysX preprocessor definitions")
SET(PHYSX_LINUX_CHECKED_COMPILE_DEFS "NDEBUG;PX_CHECKED=1;${NVTX_FLAG};PX_SUPPORT_PVD=1;${PX_SUPPORT_OMNI_PVD_FLAG}" CACHE INTERNAL "Checked PhysX preprocessor definitions")
SET(PHYSX_LINUX_PROFILE_COMPILE_DEFS "NDEBUG;PX_PROFILE=1;${NVTX_FLAG};PX_SUPPORT_PVD=1;${PX_SUPPORT_OMNI_PVD_FLAG}" CACHE INTERNAL "Profile PhysX preprocessor definitions")
SET(PHYSX_LINUX_RELEASE_COMPILE_DEFS "NDEBUG;PX_SUPPORT_PVD=0;PX_SUPPORT_OMNI_PVD=0" CACHE INTERNAL "Release PhysX preprocessor definitions")
# IF(PUBLIC_RELEASE)
# IF(NOT PHYSX_PHYSXGPU_PATH)
# SET(PHYSX_PHYSXGPU_PATH "$ENV{PM_PhysXGpu_PATH}/bin" CACHE INTERNAL "PhysXGpu copy path")
# ENDIF()
# IF(NOT GPU_LIB_COPIED)
# IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
# SET(GPU_LIB_COPIED 1 CACHE INTERNAL "PhysX GPU so files copied")
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.aarch64/checked/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_DEBUG})
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.aarch64/checked/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_CHECKED})
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.aarch64/profile/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_PROFILE})
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.aarch64/release/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_RELEASE})
# INSTALL(PROGRAMS ${PHYSX_PHYSXGPU_PATH}/linux.aarch64/$<$<CONFIG:debug>:checked>$<$<CONFIG:release>:release>$<$<CONFIG:checked>:checked>$<$<CONFIG:profile>:profile>/libPhysXGpu_64.so
# DESTINATION $<$<CONFIG:debug>:${PX_ROOT_LIB_DIR}/debug>$<$<CONFIG:release>:${PX_ROOT_LIB_DIR}/release>$<$<CONFIG:checked>:${PX_ROOT_LIB_DIR}/checked>$<$<CONFIG:profile>:${PX_ROOT_LIB_DIR}/profile> OPTIONAL)
# ELSEIF(UNIX)
# SET(GPU_LIB_COPIED 1 CACHE INTERNAL "PhysX GPU so files copied")
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.x86_64/checked/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_DEBUG})
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.x86_64/checked/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_CHECKED})
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.x86_64/profile/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_PROFILE})
# FILE(COPY ${PHYSX_PHYSXGPU_PATH}/linux.x86_64/release/libPhysXGpu_64.so DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_RELEASE})
# INSTALL(PROGRAMS ${PHYSX_PHYSXGPU_PATH}/linux.x86_64/$<$<CONFIG:debug>:checked>$<$<CONFIG:release>:release>$<$<CONFIG:checked>:checked>$<$<CONFIG:profile>:profile>/libPhysXGpu_64.so
# DESTINATION $<$<CONFIG:debug>:${PX_ROOT_LIB_DIR}/debug>$<$<CONFIG:release>:${PX_ROOT_LIB_DIR}/release>$<$<CONFIG:checked>:${PX_ROOT_LIB_DIR}/checked>$<$<CONFIG:profile>:${PX_ROOT_LIB_DIR}/profile> OPTIONAL)
# ENDIF()
# ENDIF()
# ENDIF()
# Include all of the projects
IF(PX_GENERATE_GPU_PROJECTS_ONLY)
INCLUDE(PhysXCommon.cmake)
INCLUDE(PhysXFoundation.cmake)
INCLUDE(LowLevelAABB.cmake)
SET_PROPERTY(TARGET PhysXCommon PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXFoundation PROPERTY FOLDER "PhysX SDK")
ELSE()
INCLUDE(PhysXFoundation.cmake)
INCLUDE(LowLevel.cmake)
INCLUDE(LowLevelAABB.cmake)
INCLUDE(LowLevelDynamics.cmake)
INCLUDE(PhysX.cmake)
INCLUDE(PhysXCharacterKinematic.cmake)
INCLUDE(PhysXCommon.cmake)
INCLUDE(PhysXCooking.cmake)
INCLUDE(PhysXExtensions.cmake)
INCLUDE(PhysXVehicle.cmake)
INCLUDE(SceneQuery.cmake)
INCLUDE(SimulationController.cmake)
INCLUDE(PhysXPvdSDK.cmake)
INCLUDE(PhysXTask.cmake)
# Set folder PhysX SDK to all common SDK source projects
SET_PROPERTY(TARGET PhysX PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCharacterKinematic PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCommon PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCooking PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXExtensions PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXVehicle2 PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET LowLevel PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET LowLevelAABB PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET LowLevelDynamics PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET SceneQuery PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET SimulationController PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXPvdSDK PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXTask PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXFoundation PROPERTY FOLDER "PhysX SDK")
SET(PHYSXDISTRO_LIBS PhysXFoundation PhysX PhysXCharacterKinematic PhysXPvdSDK PhysXCommon PhysXCooking PhysXExtensions PhysXVehicle2)
INSTALL(
TARGETS ${PHYSXDISTRO_LIBS}
EXPORT PhysXSDK
DESTINATION $<$<CONFIG:debug>:${PX_ROOT_LIB_DIR}/debug>$<$<CONFIG:release>:${PX_ROOT_LIB_DIR}/release>$<$<CONFIG:checked>:${PX_ROOT_LIB_DIR}/checked>$<$<CONFIG:profile>:${PX_ROOT_LIB_DIR}/profile>
)
ENDIF()

View File

@@ -0,0 +1,239 @@
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
OPTION(PX_COPY_EXTERNAL_DLL "Copy external dlls into SDK bin directory" OFF)
OPTION(PX_FLOAT_POINT_PRECISE_MATH "Float point precise math" OFF)
OPTION(PX_USE_NVTX "Enabled NVTX profiling" OFF)
# We define the CXX flags for this CMakeLists and all others that are included afterwards. This is a GLOBAL setting.
# If/when the solutions go standalone (say, visual tests) - those CMakeLists will need to be fixed.
SET(PHYSX_WARNING_DISABLES "/wd4514 /wd4820 /wd4127 /wd4710 /wd4711 /wd4577 /wd4996")
# Cache the CXX flags so the other CMakeLists.txt can use them if needed
IF(PX_FLOAT_POINT_PRECISE_MATH)
SET(PHYSX_FP_MODE "/fp:precise")
ELSE()
SET(PHYSX_FP_MODE "/fp:fast")
ENDIF()
# Common Flags
# PT: changed /Ox to /O2 because "the /Ox compiler option enables only a subset of the speed optimization options enabled by /O2."
# See https://docs.microsoft.com/en-us/cpp/build/reference/ox-full-optimization?view=vs-2019
SET(PHYSX_COMMON_FLAGS "/std:c++14 /d2Zi+ /MP /WX /W4 /GF /GS- /Gd ${PHYSX_FP_MODE} /Oy ${PHYSX_WARNING_DISABLES}")
SET(PHYSX_COMMON_FLAGS_DEBUG "/Od ${WINCRT_DEBUG} /RTCu /Zi")
SET(PHYSX_COMMON_FLAGS_CHECKED "/O2 ${WINCRT_NDEBUG} /Zi")
SET(PHYSX_COMMON_FLAGS_PROFILE "/O2 ${WINCRT_NDEBUG} /Zi")
SET(PHYSX_COMMON_FLAGS_RELEASE "/O2 ${WINCRT_NDEBUG} /Zi")
# C++ Specific Flags
IF(CMAKE_CL_64)
SET(PHYSX_CXX_FLAGS "${PHYSX_COMMON_FLAGS} /GR-" CACHE INTERNAL "PhysX CXX")
ELSE()
SET(PHYSX_CXX_FLAGS "/arch:SSE2 ${PHYSX_COMMON_FLAGS} /GR-" CACHE INTERNAL "PhysX CXX")
ENDIF()
# C Flags
IF(CMAKE_CL_64)
SET(PHYSX_C_FLAGS "${PHYSX_COMMON_FLAGS}" CACHE INTERNAL "PhysX C")
ELSE()
SET(PHYSX_C_FLAGS "/arch:SSE2 ${PHYSX_COMMON_FLAGS}" CACHE INTERNAL "PhysX C")
ENDIF()
SET(PHYSX_CXX_FLAGS_DEBUG "${PHYSX_COMMON_FLAGS_DEBUG}" CACHE INTERNAL "PhysX Debug CXX Flags")
SET(PHYSX_CXX_FLAGS_CHECKED "${PHYSX_COMMON_FLAGS_CHECKED}" CACHE INTERNAL "PhysX Checked CXX Flags")
SET(PHYSX_CXX_FLAGS_PROFILE "${PHYSX_COMMON_FLAGS_PROFILE}" CACHE INTERNAL "PhysX Profile CXX Flags")
SET(PHYSX_CXX_FLAGS_RELEASE "${PHYSX_COMMON_FLAGS_RELEASE}" CACHE INTERNAL "PhysX Release CXX Flags")
SET(PHYSX_C_FLAGS_DEBUG "${PHYSX_COMMON_FLAGS_DEBUG}" CACHE INTERNAL "PhysX Debug C Flags")
SET(PHYSX_C_FLAGS_CHECKED "${PHYSX_COMMON_FLAGS_CHECKED}" CACHE INTERNAL "PhysX Checked C Flags")
SET(PHYSX_C_FLAGS_PROFILE "${PHYSX_COMMON_FLAGS_PROFILE}" CACHE INTERNAL "PhysX Profile C Flags")
SET(PHYSX_C_FLAGS_RELEASE "${PHYSX_COMMON_FLAGS_RELEASE}" CACHE INTERNAL "PhysX Release C Flags")
IF(PUBLIC_RELEASE)
SET(PUBLIC_RELEASE_FLAG "PX_PUBLIC_RELEASE=1")
ELSE()
SET(PUBLIC_RELEASE_FLAG "PX_PUBLIC_RELEASE=0")
ENDIF()
# cache lib type defs
IF(PX_GENERATE_STATIC_LIBRARIES)
SET(PHYSX_LIBTYPE_DEFS "PX_PHYSX_STATIC_LIB;" CACHE INTERNAL "PhysX lib type defs")
ENDIF()
# These flags are local to the directory the CMakeLists.txt is in, so don't get carried over to OTHER CMakeLists.txt (thus the CACHE variables above)
SET(CMAKE_CXX_FLAGS ${PHYSX_CXX_FLAGS})
SET(CMAKE_CXX_FLAGS_DEBUG ${PHYSX_CXX_FLAGS_DEBUG})
SET(CMAKE_CXX_FLAGS_CHECKED ${PHYSX_CXX_FLAGS_CHECKED})
SET(CMAKE_CXX_FLAGS_PROFILE ${PHYSX_CXX_FLAGS_PROFILE})
SET(CMAKE_CXX_FLAGS_RELEASE ${PHYSX_CXX_FLAGS_RELEASE})
# Build PDBs for all configurations
SET(CMAKE_SHARED_LINKER_FLAGS "/DEBUG /INCREMENTAL:NO")
SET(CMAKE_SHARED_LINKER_FLAGS_DEBUG "/DEBUG /INCREMENTAL:NO")
SET(CMAKE_SHARED_LINKER_FLAGS_CHECKED "/DEBUG /INCREMENTAL:NO /OPT:REF")
SET(CMAKE_SHARED_LINKER_FLAGS_PROFILE "/DEBUG /INCREMENTAL:NO /OPT:REF")
SET(CMAKE_SHARED_LINKER_FLAGS_RELEASE "/DEBUG /INCREMENTAL:NO /OPT:REF")
IF(CMAKE_CL_64)
SET(WIN64_FLAG "WIN64")
ENDIF(CMAKE_CL_64)
IF(PX_SCALAR_MATH)
SET(SCALAR_MATH_FLAG "PX_SIMD_DISABLED")
ENDIF()
# Controls PX_NVTX for all projects
IF(PX_USE_NVTX)
SET(NVTX_FLAG "PX_NVTX=1")
ELSE()
SET(NVTX_FLAG "PX_NVTX=0")
ENDIF()
IF(NOT PX_GENERATE_GPU_PROJECTS_ONLY AND NOT PX_GENERATE_GPU_PROJECTS)
# CMake automatically adds the appropriate prefix (/D for MSVC) when the definition
# is added to the target via target_compile_definitions or add_definitions
SET(DISABLE_CUDA_DEF "DISABLE_CUDA_PHYSX")
ENDIF()
# Now set PHYSX_WINDOWS_COMPILE_DEFS including the additional definition if applicable.
SET(PHYSX_WINDOWS_COMPILE_DEFS "WIN32;${WIN64_FLAG};${SCALAR_MATH_FLAG};_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_WINSOCK_DEPRECATED_NO_WARNINGS;${PHYSX_AUTOBUILD};${PUBLIC_RELEASE_FLAG};${DISABLE_CUDA_DEF}"
CACHE INTERNAL "Base PhysX preprocessor definitions")
IF(CMAKE_CL_64)
SET(PX_SUPPORT_OMNI_PVD_FLAG "PX_SUPPORT_OMNI_PVD=1")
ELSE()
SET(PX_SUPPORT_OMNI_PVD_FLAG "PX_SUPPORT_OMNI_PVD=0")
ENDIF()
SET(PHYSX_WINDOWS_DEBUG_COMPILE_DEFS "PX_DEBUG=1;PX_CHECKED=1;${NVTX_FLAG};PX_SUPPORT_PVD=1;${PX_SUPPORT_OMNI_PVD_FLAG}" CACHE INTERNAL "Debug PhysX preprocessor definitions")
SET(PHYSX_WINDOWS_CHECKED_COMPILE_DEFS "PX_CHECKED=1;${NVTX_FLAG};PX_SUPPORT_PVD=1;${PX_SUPPORT_OMNI_PVD_FLAG}" CACHE INTERNAL "Checked PhysX preprocessor definitions")
SET(PHYSX_WINDOWS_PROFILE_COMPILE_DEFS "PX_PROFILE=1;${NVTX_FLAG};PX_SUPPORT_PVD=1;${PX_SUPPORT_OMNI_PVD_FLAG}" CACHE INTERNAL "Profile PhysX preprocessor definitions")
SET(PHYSX_WINDOWS_RELEASE_COMPILE_DEFS "PX_SUPPORT_PVD=0;PX_SUPPORT_OMNI_PVD=0" CACHE INTERNAL "Release PhysX preprocessor definitions")
# copy the external dlls
IF(PX_COPY_EXTERNAL_DLL OR PUBLIC_RELEASE)
IF(NOT PHYSX_SLN_PHYSXDEVICE_PATH)
SET(PHYSX_SLN_PHYSXDEVICE_PATH "$ENV{PM_PhysXDevice_PATH}/bin/x86/" CACHE INTERNAL "PhysX device copy path")
ENDIF()
IF(NOT PHYSX_SLN_FREEGLUT_PATH)
SET(PHYSX_SLN_FREEGLUT_PATH "$ENV{PM_freeglut_PATH}/bin/" CACHE INTERNAL "PhysX freeglut copy path")
ENDIF()
IF(CMAKE_CL_64)
IF(NOT PUBLIC_RELEASE)
FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_DEBUG})
FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_PROFILE})
FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_RELEASE})
FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_CHECKED})
ENDIF()
FILE(COPY ${PHYSX_SLN_FREEGLUT_PATH}/win64/freeglutd.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_DEBUG})
FILE(COPY ${PHYSX_SLN_FREEGLUT_PATH}/win64/freeglut.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_PROFILE})
FILE(COPY ${PHYSX_SLN_FREEGLUT_PATH}/win64/freeglut.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_RELEASE})
FILE(COPY ${PHYSX_SLN_FREEGLUT_PATH}/win64/freeglut.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_CHECKED})
# ELSE()
# FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_DEBUG})
# FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_PROFILE})
# FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_RELEASE})
# FILE(COPY ${PHYSX_SLN_PHYSXDEVICE_PATH}/PhysXDevice.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_CHECKED})
ENDIF()
ENDIF()
# for public release we copy the dlls for GPU other platforms so we dont keep duplicates around the repository
# IF(PUBLIC_RELEASE)
# IF(NOT PHYSX_SLN_PHYSXGPU_PATH)
# SET(PHYSX_SLN_PHYSXGPU_PATH "$ENV{PM_PhysXGpu_PATH}/bin/" CACHE INTERNAL "PhysXGpu copy path")
# ENDIF()
# IF(NOT GPU_DLL_COPIED)
# SET(GPU_DLL_COPIED 1 CACHE INTERNAL "PhysX GPU dlls copied")
# IF(CMAKE_CL_64)
# FILE(COPY ${PHYSX_SLN_PHYSXGPU_PATH}/win.x86_64.vc142.mt/checked/PhysXGpu_64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_CHECKED})
# FILE(COPY ${PHYSX_SLN_PHYSXGPU_PATH}/win.x86_64.vc142.mt/profile/PhysXGpu_64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_PROFILE})
# FILE(COPY ${PHYSX_SLN_PHYSXGPU_PATH}/win.x86_64.vc142.mt/release/PhysXGpu_64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_RELEASE})
# FILE(COPY ${PHYSX_SLN_PHYSXGPU_PATH}/win.x86_64.vc142.mt/checked/PhysXGpu_64.dll DESTINATION ${PX_EXE_OUTPUT_DIRECTORY_DEBUG})
# ENDIF()
# ENDIF()
# ENDIF()
# Include all of the projects
IF(PX_GENERATE_GPU_PROJECTS_ONLY)
INCLUDE(PhysXFoundation.cmake)
INCLUDE(PhysXCommon.cmake)
INCLUDE(LowLevelAABB.cmake)
SET_PROPERTY(TARGET PhysXFoundation PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCommon PROPERTY FOLDER "PhysX SDK")
ELSE()
INCLUDE(PhysXFoundation.cmake)
INCLUDE(LowLevel.cmake)
INCLUDE(LowLevelAABB.cmake)
INCLUDE(LowLevelDynamics.cmake)
INCLUDE(PhysX.cmake)
INCLUDE(PhysXCharacterKinematic.cmake)
INCLUDE(PhysXCommon.cmake)
INCLUDE(PhysXCooking.cmake)
INCLUDE(PhysXExtensions.cmake)
INCLUDE(PhysXVehicle.cmake)
INCLUDE(SceneQuery.cmake)
INCLUDE(SimulationController.cmake)
INCLUDE(PhysXPvdSDK.cmake)
INCLUDE(PhysXTask.cmake)
# Set folder PhysX SDK to all common SDK source projects
SET_PROPERTY(TARGET PhysX PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCharacterKinematic PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCommon PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXCooking PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXExtensions PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXVehicle2 PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET LowLevel PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET LowLevelAABB PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET LowLevelDynamics PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET SceneQuery PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET SimulationController PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXPvdSDK PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXTask PROPERTY FOLDER "PhysX SDK")
SET_PROPERTY(TARGET PhysXFoundation PROPERTY FOLDER "PhysX SDK")
IF(PX_GENERATE_STATIC_LIBRARIES)
SET(PHYSXDISTRO_LIBS PhysXFoundation PhysX PhysXCharacterKinematic PhysXPvdSDK PhysXCommon PhysXCooking PhysXExtensions PhysXVehicle2)
ELSE()
SET(PHYSXDISTRO_LIBS PhysXFoundation PhysX PhysXCharacterKinematic PhysXPvdSDK PhysXCommon PhysXCooking PhysXExtensions PhysXVehicle2 PhysXTask)
ENDIF()
INSTALL(
TARGETS ${PHYSXDISTRO_LIBS}
EXPORT PhysXSDK
DESTINATION $<$<CONFIG:debug>:${PX_ROOT_LIB_DIR}/debug>$<$<CONFIG:release>:${PX_ROOT_LIB_DIR}/release>$<$<CONFIG:checked>:${PX_ROOT_LIB_DIR}/checked>$<$<CONFIG:profile>:${PX_ROOT_LIB_DIR}/profile>
)
ENDIF()
IF(PX_GENERATE_STATIC_LIBRARIES)
STRING(APPEND HEADER_CONTENT "#define PX_PHYSX_STATIC_LIB\n")
ENDIF()

View File

@@ -0,0 +1,108 @@
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
cmake_minimum_required(VERSION 3.16)
# PROJECT(CUDA) will enable building CUDA files ( .cu )
PROJECT(PhysX C CXX CUDA)
IF(DEFINED ENV{PM_winsdk_PATH} AND NOT "$ENV{PM_winsdk_PATH}" STREQUAL "" AND DEFINED ENV{PM_msvc_PATH} AND NOT "$ENV{PM_msvc_PATH}" STREQUAL "")
SET(CMAKE_VS_SDK_INCLUDE_DIRECTORIES "$ENV{PM_winsdk_PATH}/include/ucrt;$ENV{PM_winsdk_PATH}/include/um;$ENV{PM_winsdk_PATH}/include/shared")
SET(CMAKE_VS_SDK_LIBRARY_DIRECTORIES "$ENV{PM_winsdk_PATH}/lib/ucrt/x64;$ENV{PM_winsdk_PATH}/lib/um/x64;$ENV{VCToolsInstallDir}/lib/x64;$ENV{VCToolsInstallDir}/atlmfc/lib/x64")
ENDIF()
OPTION(PX_GENERATE_GPU_STATIC_LIBRARIES "Generate PhysXGPU static libraries" OFF)
OPTION(PX_GENERATE_GPU_REDUCED_ARCHITECTURES "Generate only a reduced number of GPU architectures for faster compilation" OFF)
CMAKE_POLICY(SET CMP0057 NEW) # Enable IN_LIST
# This is required to be defined by external callers!
IF(NOT DEFINED PHYSX_ROOT_DIR)
MESSAGE(FATAL_ERROR "PHYSX_ROOT_DIR variable wasn't set.")
ENDIF()
IF(NOT EXISTS ${PHYSX_ROOT_DIR})
MESSAGE(FATAL_ERROR "PHYSX_ROOT_DIR variable was invalid.")
ENDIF()
INCLUDE(NvidiaBuildOptions)
INCLUDE(SetCudaArch)
SET(CMAKE_POSITION_INDEPENDENT_CODE ON)
IF(CMAKE_CONFIGURATION_TYPES)
SET(CMAKE_CONFIGURATION_TYPES debug checked profile release)
SET(CMAKE_CONFIGURATION_TYPES "${CMAKE_CONFIGURATION_TYPES}" CACHE STRING
"Reset config to what we need"
FORCE)
# Need to define these at least once.
SET(CMAKE_SHARED_LINKER_FLAGS_CHECKED "/DEBUG")
SET(CMAKE_SHARED_LINKER_FLAGS_PROFILE "/DEBUG")
SET(CMAKE_EXE_LINKER_FLAGS_PROFILE "/DEBUG")
SET(CMAKE_EXE_LINKER_FLAGS_CHECKED "/DEBUG")
ENDIF()
SET(PROJECT_CMAKE_FILES_DIR source/compiler/cmakegpu)
SET(PLATFORM_CMAKELISTS ${PHYSX_ROOT_DIR}/${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/CMakeLists.txt)
IF(NOT EXISTS ${PLATFORM_CMAKELISTS})
MESSAGE(FATAL_ERROR "Unable to find platform CMakeLists.txt for ${TARGET_BUILD_PLATFORM} at ${PLATFORM_CMAKELISTS}")
ENDIF()
SET(SOURCE_DISTRO_FILE_LIST "")
# CUDA Flags that are common to all platforms:
# Note: We need to compile to sass for all architectures we want to support.
# However, to enable forward compatibility with new architectures,
# we need to compile ptx for the latest arch supported by the cuda toolkit.
# This will allow the jit compiler in the driver to compile ptx to sass of the newer arch.
# No need to set --generate-code=arch=compute_75,code=[compute_75,sm_75]
# since that's the default set in: compiler\internal\CMakeLists.txt (using CMAKE_CUDA_ARCHITECTURES)
# see policy CMP0104
IF(PX_GENERATE_GPU_REDUCED_ARCHITECTURES)
GENERATE_ARCH_CODE_LIST(SASS "80,86,89,90,100,120" PTX "120")
ELSE()
# Volta is the minimum compute arch required because NGC supports V100
GENERATE_ARCH_CODE_LIST(SASS "70,80,86,89,90,100,120" PTX "120")
ENDIF()
# Force response files off because clangd does not parse them
set(CMAKE_CUDA_USE_RESPONSE_FILE_FOR_INCLUDES 0)
set(CMAKE_CUDA_USE_RESPONSE_FILE_FOR_LIBRARIES 0)
set(CMAKE_CUDA_USE_RESPONSE_FILE_FOR_OBJECTS 0)
# Cuda setup that is the same for all platforms and sub-projects
SET(PHYSX_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Werror=all-warnings -use_fast_math -ftz=true -prec-div=false -prec-sqrt=false -t0 -D_CONSOLE" CACHE INTERNAL "PhysX CUDA")
# Include the platform specific CMakeLists (The other CUDA flags that are specific to each platform are defined there)
INCLUDE(${PHYSX_ROOT_DIR}/${PROJECT_CMAKE_FILES_DIR}/${TARGET_BUILD_PLATFORM}/CMakeLists.txt)
IF(PX_GENERATE_SOURCE_DISTRO)
FOREACH(FILE_NAME ${SOURCE_DISTRO_FILE_LIST})
FILE(APPEND "${CMAKE_CURRENT_BINARY_DIR}/source_distro_list.txt" "${FILE_NAME}\n")
ENDFOREACH()
ENDIF()

View File

@@ -0,0 +1,102 @@
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
IF(NOT DEFINED PHYSX_LINUX_COMPILE_DEFS)
MESSAGE(FATAL ERROR "Snippets uses the PhysX compile defs, and they're not defined when they need to be.")
ENDIF()
IF (NOT DEFINED PHYSX_CXX_FLAGS)
MESSAGE(FATAL ERROR "Snippets uses the PhysX CXX flags, and they're not defined when they need to be.")
ENDIF()
# Get the CXX Flags from the Cached variables set by the PhysX CMakeLists
SET(CMAKE_CXX_FLAGS "${PHYSX_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS_DEBUG "${PHYSX_CXX_FLAGS_DEBUG}")
SET(CMAKE_CXX_FLAGS_CHECKED ${PHYSX_CXX_FLAGS_CHECKED})
SET(CMAKE_CXX_FLAGS_PROFILE ${PHYSX_CXX_FLAGS_PROFILE})
SET(CMAKE_CXX_FLAGS_RELEASE ${PHYSX_CXX_FLAGS_RELEASE})
# CUDA flags specific to Linux:
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
SET(MSSE2_OPTIONS "")
# Add the diagnostic suppression flag specifically for aarch64
SET(AARCH64_DIAG_SUPPRESS_FLAGS "-diag-suppress 20054")
ELSE()
SET(MSSE2_OPTIONS "-msse2,-mfpmath=sse,-m64,")
SET(AARCH64_DIAG_SUPPRESS_FLAGS "")
ENDIF()
# CUDA flags specific to linux:
SET(CUDA_SUPPRESS_WARNINGS "-Wno-unused-function -Wno-inconsistent-missing-override -Wno-parentheses -Wno-unknown-warning-option")
SET(PHYSX_CUDA_FLAGS "${PHYSX_CUDA_FLAGS} ${CUDA_SUPPRESS_WARNINGS} ${AARCH64_DIAG_SUPPRESS_FLAGS}")
SET(PHYSX_CUDA_FLAGS_DEBUG "${CUDA_DEBUG_FLAG} --compiler-options=-Wall,-O3,-fPIC,${MSSE2_OPTIONS}-fvisibility=hidden -G -g" CACHE INTERNAL "PhysX Debug CUDA Flags")
SET(PHYSX_CUDA_FLAGS_CHECKED "${CUDA_NDEBUG_FLAG} -lineinfo --compiler-options=-Wall,-O3,-fPIC,${MSSE2_OPTIONS}-fvisibility=hidden" CACHE INTERNAL "PhysX Checked CUDA Flags")
SET(PHYSX_CUDA_FLAGS_PROFILE "${CUDA_NDEBUG_FLAG} -lineinfo --compiler-options=-Wall,-O3,-fPIC,${MSSE2_OPTIONS}-fvisibility=hidden" CACHE INTERNAL "PhysX Profile CUDA Flags")
SET(PHYSX_CUDA_FLAGS_RELEASE "${CUDA_NDEBUG_FLAG} -lineinfo --compiler-options=-Wall,-O3,-fPIC,${MSSE2_OPTIONS}-fvisibility=hidden" CACHE INTERNAL "PhysX Release CUDA Flags")
# These flags are local to the directory the CMakeLists.txt is in, so don't get carried over to OTHER CMakeLists.txt (thus the CACHE variables above)
SET(CMAKE_CUDA_FLAGS ${PHYSX_CUDA_FLAGS})
SET(CMAKE_CUDA_FLAGS_DEBUG ${PHYSX_CUDA_FLAGS_DEBUG})
SET(CMAKE_CUDA_FLAGS_CHECKED ${PHYSX_CUDA_FLAGS_CHECKED})
SET(CMAKE_CUDA_FLAGS_PROFILE ${PHYSX_CUDA_FLAGS_PROFILE})
SET(CMAKE_CUDA_FLAGS_RELEASE ${PHYSX_CUDA_FLAGS_RELEASE})
# SET(GENERATED_GPU_CUDA_FILES "")
# Include all of the projects
INCLUDE(PhysXBroadphaseGpu.cmake)
INCLUDE(PhysXCommonGpu.cmake)
INCLUDE(PhysXNarrowphaseGpu.cmake)
INCLUDE(PhysXSimulationControllerGpu.cmake)
INCLUDE(PhysXSolverGpu.cmake)
INCLUDE(PhysXCudaContextManager.cmake)
INCLUDE(PhysXArticulationGpu.cmake)
INCLUDE(PhysXGpuDependencies.cmake)
INCLUDE(PhysXGpu.cmake) # must be last
SET_PROPERTY(TARGET PhysXBroadphaseGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXCommonGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXNarrowphaseGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXSimulationControllerGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXArticulationGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXSolverGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXGpuDependencies PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXCudaContextManager PROPERTY FOLDER "PhysX SDK/GPU")
SET(PHYSXDISTRO_LIBS PhysXGpu)
INSTALL(
TARGETS ${PHYSXDISTRO_LIBS}
EXPORT PhysXSDK
DESTINATION $<$<CONFIG:debug>:${PX_ROOT_LIB_DIR}/debug>$<$<CONFIG:release>:${PX_ROOT_LIB_DIR}/release>$<$<CONFIG:checked>:${PX_ROOT_LIB_DIR}/checked>$<$<CONFIG:profile>:${PX_ROOT_LIB_DIR}/profile>
)

View File

@@ -0,0 +1,109 @@
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
IF(NOT DEFINED PHYSX_WINDOWS_COMPILE_DEFS)
MESSAGE(FATAL ERROR "PHYSXGPU uses the PhysX compile defs, and they're not defined when they need to be.")
ENDIF()
IF (NOT DEFINED PHYSX_CXX_FLAGS)
MESSAGE(FATAL ERROR "PHYSXGPU uses the PhysX CXX flags, and they're not defined when they need to be.")
ENDIF()
# Get the CXX Flags from the Cached variables set by the PhysX CMakeLists
SET(CMAKE_CXX_FLAGS "${PHYSX_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS_DEBUG ${PHYSX_CXX_FLAGS_DEBUG})
SET(CMAKE_CXX_FLAGS_CHECKED ${PHYSX_CXX_FLAGS_CHECKED})
SET(CMAKE_CXX_FLAGS_PROFILE ${PHYSX_CXX_FLAGS_PROFILE})
SET(CMAKE_CXX_FLAGS_RELEASE ${PHYSX_CXX_FLAGS_RELEASE})
# Get the CXX Flags from the Cached variables set by the PhysX CMakeLists
SET(CMAKE_C_FLAGS "${PHYSX_C_FLAGS}")
SET(CMAKE_C_FLAGS_DEBUG ${PHYSX_C_FLAGS_DEBUG})
SET(CMAKE_C_FLAGS_CHECKED ${PHYSX_C_FLAGS_CHECKED})
SET(CMAKE_C_FLAGS_PROFILE ${PHYSX_C_FLAGS_PROFILE})
SET(CMAKE_C_FLAGS_RELEASE ${PHYSX_C_FLAGS_RELEASE})
# CUDA flags specific to windows:
SET(CUDA_SUPPRESS_WARNINGS "-Xptxas -w -ftemplate-backtrace-limit=2")
SET(PHYSX_CUDA_FLAGS "${PHYSX_CUDA_FLAGS} -D_WIN32_WINNT=0x0601 ${CUDA_SUPPRESS_WARNINGS}")
# Build debug info for all configurations
SET(PHYSX_CUDA_FLAGS_DEBUG "${CUDA_DEBUG_FLAG} --compiler-options=/W4,/nologo,/Zi,/Od,/RTC1,/wd4505,/wd4459,/wd4324,/wd4244 -G -g" CACHE INTERNAL "PhysX Debug CUDA Flags")
SET(PHYSX_CUDA_FLAGS_CHECKED "${CUDA_NDEBUG_FLAG} -lineinfo --compiler-options=/W4,/nologo,/Ot,/Ox,/Zi,/wd4505,/wd4459,/wd4324,/wd4244" CACHE INTERNAL "PhysX Checked CUDA Flags")
SET(PHYSX_CUDA_FLAGS_PROFILE "${CUDA_NDEBUG_FLAG} -lineinfo --compiler-options=/W4,/nologo,/Ot,/Ox,/Zi,/wd4505,/wd4459,/wd4324,/wd4244" CACHE INTERNAL "PhysX Profile CUDA Flags")
SET(PHYSX_CUDA_FLAGS_RELEASE "${CUDA_NDEBUG_FLAG} -lineinfo --compiler-options=/W4,/nologo,/Ot,/Ox,/Zi,/wd4505,/wd4459,/wd4324,/wd4244" CACHE INTERNAL "PhysX Release CUDA Flags")
# These flags are local to the directory the CMakeLists.txt is in, so don't get carried over to OTHER CMakeLists.txt (thus the CACHE variables above)
SET(CMAKE_CUDA_FLAGS ${PHYSX_CUDA_FLAGS})
SET(CMAKE_CUDA_FLAGS_DEBUG ${PHYSX_CUDA_FLAGS_DEBUG})
SET(CMAKE_CUDA_FLAGS_CHECKED ${PHYSX_CUDA_FLAGS_CHECKED})
SET(CMAKE_CUDA_FLAGS_PROFILE ${PHYSX_CUDA_FLAGS_PROFILE})
SET(CMAKE_CUDA_FLAGS_RELEASE ${PHYSX_CUDA_FLAGS_RELEASE})
IF(PX_GENERATE_GPU_STATIC_LIBRARIES)
SET(PHYSXGPU_LIBTYPE_DEFS "PX_PHYSX_GPU_STATIC;" CACHE INTERNAL "PhysX GPU lib type defs")
ENDIF()
# Build PDBs for all configurations
SET(CMAKE_SHARED_LINKER_FLAGS "/DEBUG /INCREMENTAL:NO")
SET(CMAKE_SHARED_LINKER_FLAGS_DEBUG "/DEBUG /INCREMENTAL:NO")
SET(CMAKE_SHARED_LINKER_FLAGS_CHECKED "/DEBUG /INCREMENTAL:NO /OPT:REF")
SET(CMAKE_SHARED_LINKER_FLAGS_PROFILE "/DEBUG /INCREMENTAL:NO /OPT:REF")
SET(CMAKE_SHARED_LINKER_FLAGS_RELEASE "/DEBUG /INCREMENTAL:NO /OPT:REF")
# SET(GENERATED_GPU_CUDA_FILES "")
# Include all of the GPU projects
INCLUDE(PhysXBroadphaseGpu.cmake)
INCLUDE(PhysXCommonGpu.cmake)
INCLUDE(PhysXNarrowphaseGpu.cmake)
INCLUDE(PhysXSimulationControllerGpu.cmake)
INCLUDE(PhysXSolverGpu.cmake)
INCLUDE(PhysXCudaContextManager.cmake)
INCLUDE(PhysXArticulationGpu.cmake)
INCLUDE(PhysXGpuDependencies.cmake)
INCLUDE(PhysXGpu.cmake) # must be the last PhysXGPU
SET_PROPERTY(TARGET PhysXBroadphaseGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXCommonGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXNarrowphaseGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXSimulationControllerGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXArticulationGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXSolverGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXGpuDependencies PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXGpu PROPERTY FOLDER "PhysX SDK/GPU")
SET_PROPERTY(TARGET PhysXCudaContextManager PROPERTY FOLDER "PhysX SDK/GPU")
SET(PHYSXDISTRO_LIBS PhysXGpu)
INSTALL(
TARGETS ${PHYSXDISTRO_LIBS}
EXPORT PhysXSDK
DESTINATION $<$<CONFIG:debug>:${PX_ROOT_LIB_DIR}/debug>$<$<CONFIG:release>:${PX_ROOT_LIB_DIR}/release>$<$<CONFIG:checked>:${PX_ROOT_LIB_DIR}/checked>$<$<CONFIG:profile>:${PX_ROOT_LIB_DIR}/profile>
)

View File

@@ -0,0 +1,90 @@
// Microsoft Visual C++ generated resource script.
//
#include "resource.h"
#define APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
//
// Generated from the TEXTINCLUDE 2 resource.
//
#include "windows.h"
/////////////////////////////////////////////////////////////////////////////
#undef APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
// English (U.S.) resources
#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
#ifdef _WIN32
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
#pragma code_page(1252)
#endif //_WIN32
#ifdef APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// TEXTINCLUDE
//
1 TEXTINCLUDE
BEGIN
"resource.h\0"
END
2 TEXTINCLUDE
BEGIN
"#include ""windows.h""\r\r\r\0"
END
3 TEXTINCLUDE
BEGIN
"\0"
END
#endif // APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// Version
//
VS_VERSION_INFO VERSIONINFO
FILEVERSION RC_PHYSX_VER
PRODUCTVERSION RC_PHYSX_VER
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "CompanyName", RC_COMPANY_NAME_STR
VALUE "FileDescription", "PhysX " RC_PTR_STR "bit Dynamic Link Library"
VALUE "FileVersion", RC_PHYSX_VER_STR
VALUE "InternalName", "PhysX_" RC_PTR_STR
VALUE "LegalCopyright", RC_LEGAL_COPYRIGHT_STR
VALUE "OriginalFilename", "PhysX_" RC_PTR_STR ".dll"
VALUE "ProductName", RC_PRODUCT_NAME_STR
VALUE "ProductVersion", RC_PHYSX_VER_STR
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END
#endif // English (U.S.) resources
/////////////////////////////////////////////////////////////////////////////

View File

@@ -0,0 +1,90 @@
// Microsoft Visual C++ generated resource script.
//
#include "resource.h"
#define APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
//
// Generated from the TEXTINCLUDE 2 resource.
//
#include "windows.h"
/////////////////////////////////////////////////////////////////////////////
#undef APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
// English (U.S.) resources
#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
#ifdef _WIN32
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
#pragma code_page(1252)
#endif //_WIN32
#ifdef APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// TEXTINCLUDE
//
1 TEXTINCLUDE
BEGIN
"resource.h\0"
END
2 TEXTINCLUDE
BEGIN
"#include ""windows.h""\r\r\r\0"
END
3 TEXTINCLUDE
BEGIN
"\0"
END
#endif // APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// Version
//
VS_VERSION_INFO VERSIONINFO
FILEVERSION RC_PHYSX_VER
PRODUCTVERSION RC_PHYSX_VER
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "CompanyName", RC_COMPANY_NAME_STR
VALUE "FileDescription", "PhysXCommon " RC_PTR_STR "bit Dynamic Link Library"
VALUE "FileVersion", RC_PHYSX_VER_STR
VALUE "InternalName", "PhysXCommon_" RC_PTR_STR
VALUE "LegalCopyright", RC_LEGAL_COPYRIGHT_STR
VALUE "OriginalFilename", "PhysXCommon_" RC_PTR_STR ".dll"
VALUE "ProductName", RC_PRODUCT_NAME_STR
VALUE "ProductVersion", RC_PHYSX_VER_STR
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END
#endif // English (U.S.) resources
/////////////////////////////////////////////////////////////////////////////

View File

@@ -0,0 +1,91 @@
// Microsoft Visual C++ generated resource script.
//
#include "resource.h"
#define APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
//
// Generated from the TEXTINCLUDE 2 resource.
//
#include "windows.h"
/////////////////////////////////////////////////////////////////////////////
#undef APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
// English (U.S.) resources
#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
#ifdef _WIN32
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
#pragma code_page(1252)
#endif //_WIN32
#ifdef APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// TEXTINCLUDE
//
1 TEXTINCLUDE
BEGIN
"resource.h\0"
END
2 TEXTINCLUDE
BEGIN
"#include ""windows.h""\r\r\r\0"
END
3 TEXTINCLUDE
BEGIN
"\0"
END
#endif // APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// Version
//
VS_VERSION_INFO VERSIONINFO
FILEVERSION RC_PHYSX_VER
PRODUCTVERSION RC_PHYSX_VER
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "CompanyName", RC_COMPANY_NAME_STR
VALUE "FileDescription", "PhysXCooking " RC_PTR_STR "bit Dynamic Link Library"
VALUE "FileVersion", RC_PHYSX_VER_STR
VALUE "InternalName", "PhysXCooking_" RC_PTR_STR
VALUE "LegalCopyright", RC_LEGAL_COPYRIGHT_STR
VALUE "OriginalFilename", "PhysXCooking_" RC_PTR_STR ".dll"
VALUE "ProductName", RC_PRODUCT_NAME_STR
VALUE "ProductVersion", RC_PHYSX_VER_STR
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END
#endif // English (U.S.) resources
/////////////////////////////////////////////////////////////////////////////

View File

@@ -0,0 +1,89 @@
// Microsoft Visual C++ generated resource script.
//
#include "resource.h"
#define APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
//
// Generated from the TEXTINCLUDE 2 resource.
//
#include "windows.h"
/////////////////////////////////////////////////////////////////////////////
#undef APSTUDIO_READONLY_SYMBOLS
/////////////////////////////////////////////////////////////////////////////
// English (U.S.) resources
#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
#ifdef _WIN32
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
#pragma code_page(1252)
#endif //_WIN32
#ifdef APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// TEXTINCLUDE
//
1 TEXTINCLUDE
BEGIN
"resource.h\0"
END
2 TEXTINCLUDE
BEGIN
"#include ""windows.h""\r\r\0"
END
3 TEXTINCLUDE
BEGIN
"\0"
END
#endif // APSTUDIO_INVOKED
/////////////////////////////////////////////////////////////////////////////
//
// Version
//
VS_VERSION_INFO VERSIONINFO
FILEVERSION RC_PHYSX_VER
PRODUCTVERSION RC_PHYSX_VER
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "CompanyName", RC_COMPANY_NAME_STR
VALUE "FileDescription", "PhysXGpu " RC_PTR_STR "bit Dynamic Link Library"
VALUE "FileVersion", RC_PHYSX_VER_STR
VALUE "InternalName", "PhysXGpu_" RC_PTR_STR
VALUE "LegalCopyright", RC_LEGAL_COPYRIGHT_STR
VALUE "OriginalFilename", "PhysXGpu_" RC_PTR_STR ".dll"
VALUE "ProductName", RC_PRODUCT_NAME_STR
VALUE "ProductVersion", RC_PHYSX_VER_STR
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END
#endif // English (U.S.) resources
/////////////////////////////////////////////////////////////////////////////

View File

@@ -0,0 +1,62 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//{{NO_DEPENDENCIES}}
// Microsoft Visual C++ generated include file.
// Used by *.rc files
//
// Next default values for new objects
//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NEXT_RESOURCE_VALUE 101
#define _APS_NEXT_COMMAND_VALUE 40001
#define _APS_NEXT_CONTROL_VALUE 1000
#define _APS_NEXT_SYMED_VALUE 101
#endif
#endif
#include "..\..\..\..\include\foundation\PxPhysicsVersion.h"
#define RC_STRINGIFY(x) #x
#define RC_GETSTR(x) RC_STRINGIFY(x)
#define RC_PHYSX_VER PX_PHYSICS_VERSION_MAJOR,PX_PHYSICS_VERSION_MINOR,PX_PHYSICS_VERSION_BUGFIX,0
#define RC_PHYSX_VER_STR RC_GETSTR(PX_PHYSICS_VERSION_MAJOR) "." RC_GETSTR(PX_PHYSICS_VERSION_MINOR) "." RC_GETSTR(PX_PHYSICS_VERSION_BUGFIX) ".0"
#define RC_COMPANY_NAME_STR "NVIDIA Corporation"
#define RC_LEGAL_COPYRIGHT_STR "Copyright (C) 2023 NVIDIA Corporation"
#if defined(_WIN64)
#define RC_PTR_STR "64"
#elif defined(_WIN32)
#define RC_PTR_STR "32"
#endif
#define RC_PRODUCT_NAME_STR "PhysX"

View File

@@ -0,0 +1,52 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef CUDA_CONTEXT_MANAGER_H
#define CUDA_CONTEXT_MANAGER_H
#include "foundation/PxPreprocessor.h"
#if PX_SUPPORT_GPU_PHYSX
namespace physx
{
class PxCudaContextManager;
class PxCudaContextManagerDesc;
class PxErrorCallback;
/**
Creates cuda context manager for PhysX and APEX.
Set launchSynchronous to true for Cuda to report the actual point of failure
*/
PxCudaContextManager* createCudaContextManager(const PxCudaContextManagerDesc& desc, PxErrorCallback& errorCallback, bool launchSynchronous);
}
#endif
#endif

View File

@@ -0,0 +1,84 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef __CUDA_KERNEL_WRANGLER__
#define __CUDA_KERNEL_WRANGLER__
#include "foundation/PxPreprocessor.h"
// Make this header is safe for inclusion in headers that are shared with device code.
#if !PX_CUDA_COMPILER
#include "foundation/PxUserAllocated.h"
#include "foundation/PxArray.h"
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
#pragma clang diagnostic ignored "-Wdisabled-macro-expansion"
#endif
#include <cuda.h>
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic pop
#endif
namespace physx
{
class PxCudaContextManager;
class PxCudaContext;
class KernelWrangler : public PxUserAllocated
{
PX_NOCOPY(KernelWrangler)
public:
KernelWrangler(PxCudaContextManager& cudaContextManager, PxErrorCallback& errorCallback, const char** funcNames, uint16_t numFuncs);
virtual ~KernelWrangler() {}
PX_FORCE_INLINE CUfunction getCuFunction(uint16_t funcIndex) const
{
CUfunction func = mCuFunctions[ funcIndex ];
PX_ASSERT(func);
return func;
}
const char* getCuFunctionName(uint16_t funcIndex) const;
PX_FORCE_INLINE bool hadError() const { return mError; }
protected:
bool mError;
const char** mKernelNames;
PxArray<CUfunction> mCuFunctions;
PxCudaContextManager& mCudaContextManager;
PxCudaContext* mCudaContext;
PxErrorCallback& mErrorCallback;
};
}
#endif
#endif

View File

@@ -0,0 +1,168 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef PXG_MEMORY_TRACKER_H
#define PXG_MEMORY_TRACKER_H
#include "foundation/PxAllocator.h"
#include "foundation/PxErrors.h"
#include "foundation/PxFoundation.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxMutex.h"
#include "foundation/PxMemory.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxString.h"
#include "foundation/PxAssert.h"
#include <stdio.h>
// usage:
//
// create a static MemTracker object in your allocator .cpp
// use registerMemory/unregisterMemory in your allocation/deallocation functions.
//
// please wrap all tracking code in PX_DEBUG to avoid contaminating release builds.
//
struct AllocInfo
{
const void* mPtr;
bool mIsGpuPointer;
physx::PxU64 mNumBytes;
const char* mFileName;
physx::PxI32 mLineNumber;
AllocInfo(const void* ptr, bool isGpuPointer, physx::PxU64 numBytes, const char* fileName, physx::PxI32 lineNumber) :
mPtr(ptr), mIsGpuPointer(isGpuPointer), mNumBytes(numBytes), mFileName(fileName), mLineNumber(lineNumber)
{
}
PX_FORCE_INLINE void operator = (const AllocInfo& other)
{
mPtr = other.mPtr;
mIsGpuPointer = other.mIsGpuPointer;
mNumBytes = other.mNumBytes;
mFileName = other.mFileName;
mLineNumber = other.mLineNumber;
}
};
class MemTracker
{
AllocInfo* mMemBlockList;
physx::PxU32 mCapacity;
physx::PxU32 mNumElementsInUse;
physx::PxRawAllocator mAllocator;
physx::PxMutexT<physx::PxRawAllocator> mMutex;
void doubleSize()
{
mCapacity = 2 * mCapacity;
AllocInfo* mNewPtr = (AllocInfo*)mAllocator.allocate(mCapacity * sizeof(AllocInfo), PX_FL);
physx::PxMemCopy(reinterpret_cast<void*>(mNewPtr), reinterpret_cast<const void*>(mMemBlockList), mNumElementsInUse * sizeof(AllocInfo));
mAllocator.deallocate(mMemBlockList);
mMemBlockList = mNewPtr;
}
public:
MemTracker()
{
mCapacity = 64;
mMemBlockList = (AllocInfo*)mAllocator.allocate(mCapacity * sizeof(AllocInfo), PX_FL);
mNumElementsInUse = 0;
}
void registerMemory(void* ptr, bool isGpuMemory, physx::PxU64 numBytes, const char* filename, physx::PxI32 lineNumber)
{
physx::PxMutexT<physx::PxRawAllocator>::ScopedLock lock(mMutex);
if (mNumElementsInUse == mCapacity)
doubleSize();
mMemBlockList[mNumElementsInUse] = AllocInfo(ptr, isGpuMemory, numBytes, filename, lineNumber);
++mNumElementsInUse;
}
bool unregisterMemory(void* ptr, bool isGpuMemory)
{
physx::PxMutexT<physx::PxRawAllocator>::ScopedLock lock(mMutex);
if (mMemBlockList)
for (physx::PxU32 i = 0; i < mNumElementsInUse; ++i)
{
if (mMemBlockList[i].mPtr == ptr && mMemBlockList[i].mIsGpuPointer == isGpuMemory)
{
mMemBlockList[i] = mMemBlockList[mNumElementsInUse - 1];
--mNumElementsInUse;
return true;
}
}
return false;
}
void checkForLeaks()
{
physx::PxMutexT<physx::PxRawAllocator>::ScopedLock lock(mMutex);
if (mMemBlockList)
{
for (physx::PxU32 i = 0; i < mNumElementsInUse; ++i)
{
const AllocInfo& info = mMemBlockList[i];
if(PxIsFoundationValid()) // error callback requires foundation
{
char msg[512];
physx::Pxsnprintf(msg, 512, "Memory not freed: Ptr: %p, numBytes: %zu, file: %s, line: %i isDeviceMem %u\n", info.mPtr, info.mNumBytes, info.mFileName, info.mLineNumber, info.mIsGpuPointer);
PxGetErrorCallback()->reportError(physx::PxErrorCode::eINTERNAL_ERROR, msg, PX_FL);
}
else
{
printf("Memory not freed: Ptr: %p, numBytes: %zu, file: %s, line: %i isDeviceMem %u\n", info.mPtr, info.mNumBytes, info.mFileName, info.mLineNumber, info.mIsGpuPointer);
}
}
// assert to make tests fail.
//if (mNumElementsInUse > 0)
// PX_ALWAYS_ASSERT();
}
}
~MemTracker()
{
checkForLeaks();
if (mMemBlockList)
{
mAllocator.deallocate(mMemBlockList);
mMemBlockList = NULL;
}
}
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,232 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#include <stdio.h>
#include "foundation/PxPreprocessor.h"
#include "foundation/PxAssert.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxString.h"
// from the point of view of this source file the GPU library is linked statically
#ifndef PX_PHYSX_GPU_STATIC
#define PX_PHYSX_GPU_STATIC
#endif
#include "PxPhysXGpu.h"
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
#pragma clang diagnostic ignored "-Wdisabled-macro-expansion"
#endif
#include <cuda.h>
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic pop
#endif
#include <texture_types.h>
#include <vector_types.h>
#include "cudamanager/PxCudaContextManager.h"
#include "cudamanager/PxCudaContext.h"
#include "CudaKernelWrangler.h"
using namespace physx;
const char* KernelWrangler::getCuFunctionName(uint16_t funcIndex) const
{
// PT: this is wrong, functions there are not listed in the same order as mCuFunctions
//return gFunctionTable[funcIndex].functionName;
return mKernelNames[funcIndex];
}
KernelWrangler::KernelWrangler(PxCudaContextManager& cudaContextManager, PxErrorCallback& errorCallback, const char** funcNames, uint16_t numFuncs)
: mError(false)
, mKernelNames(NULL)
, mCuFunctions("CuFunctions")
, mCudaContextManager(cudaContextManager)
, mCudaContext(cudaContextManager.getCudaContext())
, mErrorCallback(errorCallback)
{
// PT: TODO: consider revisiting this code so that the function order is kept the same between mCuFunctions and gFunctionTable.
// That way the initial getCuFunctionName implementation could be kept and we could decouple the code from the external funcNames array again.
// PT: proper names defined in PxgKernelWrangler.cpp. We assume the data there remains valid for the lifetime of the app and we don't need to do a copy.
PX_ASSERT(funcNames);
mKernelNames = funcNames;
// matchup funcNames to CUDA modules, get CUfunction handles
CUmodule* cuModules = cudaContextManager.getCuModules();
PxKernelIndex* cuFunctionTable = PxGpuGetCudaFunctionTable();
const PxU32 cuFunctionTableSize = PxGpuGetCudaFunctionTableSize();
mCuFunctions.resize(numFuncs, NULL);
if (mCudaContextManager.tryAcquireContext())
{
for (uint32_t i = 0; i < numFuncs; ++i)
{
// search through all known functions
for (uint32_t j = 0; ; ++j)
{
if (j == cuFunctionTableSize)
{
// printf("Could not find registered CUDA function '%s'.\n", funcNames[i]);
char buffer[256];
Pxsnprintf(buffer, 256, "Could not find registered CUDA function '%s'.", funcNames[i]);
mErrorCallback.reportError(PxErrorCode::eINTERNAL_ERROR, buffer, PX_FL);
mError = true;
break;
}
if (!Pxstrcmp(cuFunctionTable[j].functionName, funcNames[i]))
{
PxCUresult ret = mCudaContext->moduleGetFunction(&mCuFunctions[i], cuModules[cuFunctionTable[j].moduleIndex], funcNames[i]);
if (ret != CUDA_SUCCESS)
{
char buffer[256];
Pxsnprintf(buffer, 256, "Could not find CUDA module containing function '%s'.", funcNames[i]);
mErrorCallback.reportError(PxErrorCode::eINTERNAL_ERROR, buffer, PX_FL);
mError = true;
// return;
}
break;
}
}
}
mCudaContextManager.releaseContext();
}
else
{
char buffer[256];
Pxsnprintf(buffer, 256, "Failed to acquire the cuda context.");
mErrorCallback.reportError(PxErrorCode::eINTERNAL_ERROR, buffer, PX_FL);
mError = true;
}
}
/*
* Workaround hacks for using nvcc --compiler output object files
* without linking with CUDART. We must implement our own versions
* of these functions that the object files are hard-coded to call into.
* These calls are all made _before_ main() during static initialization
* of this DLL.
*/
#include <driver_types.h>
#if PX_WINDOWS_FAMILY
#define CUDARTAPI __stdcall
#endif
struct uint3;
struct dim3;
extern "C"
void** CUDARTAPI __cudaRegisterFatBinary(void* fatBin)
{
return PxGpuCudaRegisterFatBinary(fatBin);
}
extern "C"
void CUDARTAPI __cudaRegisterFatBinaryEnd(void ** /*fatCubinHandle*/)
{
}
extern "C"
void CUDARTAPI __cudaUnregisterFatBinary(void** fatCubinHandle)
{
// jcarius: not ideal because the module may still be loaded
PxGpuGetCudaModuleTable()[(int)(size_t) fatCubinHandle] = 0;
}
extern "C"
void CUDARTAPI __cudaRegisterTexture(void**, const struct textureReference*, const void**, const char*, int, int, int)
{
}
extern "C" void CUDARTAPI __cudaRegisterVar(void**, char*, char*, const char*, int, int, int, int)
{
}
extern "C" void CUDARTAPI __cudaRegisterShared(void**, void**)
{
}
extern "C"
void CUDARTAPI __cudaRegisterFunction(void** fatCubinHandle, const char*,
char*, const char* deviceName, int, uint3*, uint3*, dim3*, dim3*, int*)
{
PxGpuCudaRegisterFunction((int)(size_t) fatCubinHandle, deviceName);
}
/* These functions are implemented just to resolve link dependencies */
extern "C"
cudaError_t CUDARTAPI cudaLaunch(const char* entry)
{
PX_UNUSED(entry);
return cudaSuccess;
}
extern "C"
cudaError_t CUDARTAPI cudaLaunchKernel( const void* , dim3 , dim3 , void** , size_t , cudaStream_t )
{
return cudaSuccess;
}
extern "C"
cudaError_t CUDARTAPI cudaSetupArgument(const void*, size_t, size_t)
{
return cudaSuccess;
}
extern "C"
struct cudaChannelFormatDesc CUDARTAPI cudaCreateChannelDesc(
int x, int y, int z, int w, enum cudaChannelFormatKind f)
{
struct cudaChannelFormatDesc desc;
desc.x = x;
desc.y = y;
desc.z = z;
desc.w = w;
desc.f = f;
return desc;
}
extern "C"
cudaError_t CUDARTAPI __cudaPopCallConfiguration(
dim3 *,
dim3 *,
size_t *,
void *)
{
return cudaSuccess;
}

View File

@@ -0,0 +1,247 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef PSFILEBUFFER_PSFILEBUFFER_H
#define PSFILEBUFFER_PSFILEBUFFER_H
#include "filebuf/PxFileBuf.h"
#include "foundation/PxUserAllocated.h"
#include <stdio.h>
namespace physx
{
namespace general_PxIOStream2
{
//Use this class if you want to use your own allocator
class PxFileBufferBase : public PxFileBuf
{
public:
PxFileBufferBase(const char *fileName,OpenMode mode)
{
mOpenMode = mode;
mFph = NULL;
mFileLength = 0;
mSeekRead = 0;
mSeekWrite = 0;
mSeekCurrent = 0;
switch ( mode )
{
case OPEN_READ_ONLY:
mFph = fopen(fileName,"rb");
break;
case OPEN_WRITE_ONLY:
mFph = fopen(fileName,"wb");
break;
case OPEN_READ_WRITE_NEW:
mFph = fopen(fileName,"wb+");
break;
case OPEN_READ_WRITE_EXISTING:
mFph = fopen(fileName,"rb+");
break;
case OPEN_FILE_NOT_FOUND:
break;
}
if ( mFph )
{
fseek(mFph,0L,SEEK_END);
mFileLength = static_cast<uint32_t>(ftell(mFph));
fseek(mFph,0L,SEEK_SET);
}
else
{
mOpenMode = OPEN_FILE_NOT_FOUND;
}
}
virtual ~PxFileBufferBase()
{
close();
}
virtual void close()
{
if( mFph )
{
fclose(mFph);
mFph = 0;
}
}
virtual SeekType isSeekable() const
{
return mSeekType;
}
virtual uint32_t read(void* buffer, uint32_t size)
{
uint32_t ret = 0;
if ( mFph )
{
setSeekRead();
ret = static_cast<uint32_t>(::fread(buffer,1,size,mFph));
mSeekRead+=ret;
mSeekCurrent+=ret;
}
return ret;
}
virtual uint32_t peek(void* buffer, uint32_t size)
{
uint32_t ret = 0;
if ( mFph )
{
uint32_t loc = tellRead();
setSeekRead();
ret = static_cast<uint32_t>(::fread(buffer,1,size,mFph));
mSeekCurrent+=ret;
seekRead(loc);
}
return ret;
}
virtual uint32_t write(const void* buffer, uint32_t size)
{
uint32_t ret = 0;
if ( mFph )
{
setSeekWrite();
ret = static_cast<uint32_t>(::fwrite(buffer,1,size,mFph));
mSeekWrite+=ret;
mSeekCurrent+=ret;
if ( mSeekWrite > mFileLength )
{
mFileLength = mSeekWrite;
}
}
return ret;
}
virtual uint32_t tellRead() const
{
return mSeekRead;
}
virtual uint32_t tellWrite() const
{
return mSeekWrite;
}
virtual uint32_t seekRead(uint32_t loc)
{
mSeekRead = loc;
if ( mSeekRead > mFileLength )
{
mSeekRead = mFileLength;
}
return mSeekRead;
}
virtual uint32_t seekWrite(uint32_t loc)
{
mSeekWrite = loc;
if ( mSeekWrite > mFileLength )
{
mSeekWrite = mFileLength;
}
return mSeekWrite;
}
virtual void flush()
{
if ( mFph )
{
::fflush(mFph);
}
}
virtual OpenMode getOpenMode() const
{
return mOpenMode;
}
virtual uint32_t getFileLength() const
{
return mFileLength;
}
private:
// Moves the actual file pointer to the current read location
void setSeekRead()
{
if ( mSeekRead != mSeekCurrent && mFph )
{
if ( mSeekRead >= mFileLength )
{
fseek(mFph,0L,SEEK_END);
}
else
{
fseek(mFph,static_cast<long>(mSeekRead),SEEK_SET);
}
mSeekCurrent = mSeekRead = static_cast<uint32_t>(ftell(mFph));
}
}
// Moves the actual file pointer to the current write location
void setSeekWrite()
{
if ( mSeekWrite != mSeekCurrent && mFph )
{
if ( mSeekWrite >= mFileLength )
{
fseek(mFph,0L,SEEK_END);
}
else
{
fseek(mFph,static_cast<long>(mSeekWrite),SEEK_SET);
}
mSeekCurrent = mSeekWrite = static_cast<uint32_t>(ftell(mFph));
}
}
FILE *mFph;
uint32_t mSeekRead;
uint32_t mSeekWrite;
uint32_t mSeekCurrent;
uint32_t mFileLength;
SeekType mSeekType;
OpenMode mOpenMode;
};
//Use this class if you want to use PhysX memory allocator
class PsFileBuffer: public PxFileBufferBase, public PxUserAllocated
{
public:
PsFileBuffer(const char *fileName,OpenMode mode): PxFileBufferBase(fileName, mode) {}
};
}
using namespace general_PxIOStream2;
}
#endif // PSFILEBUFFER_PSFILEBUFFER_H

View File

@@ -0,0 +1,28 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.

View File

@@ -0,0 +1,66 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxString.h"
#include <stdio.h>
#include <stdlib.h>
#if PX_WINDOWS_FAMILY
#include <crtdbg.h>
#elif PX_SWITCH
#include "foundation/switch/PxSwitchAbort.h"
#endif
void physx::PxAssert(const char* expr, const char* file, int line, bool& ignore)
{
PX_UNUSED(ignore); // is used only in debug windows config
char buffer[1024];
#if PX_WINDOWS_FAMILY
sprintf_s(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr);
#else
sprintf(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr);
#endif
physx::PxPrintString(buffer);
#if PX_WINDOWS_FAMILY&& PX_DEBUG && PX_DEBUG_CRT
// _CrtDbgReport returns -1 on error, 1 on 'retry', 0 otherwise including 'ignore'.
// Hitting 'abort' will terminate the process immediately.
int result = _CrtDbgReport(_CRT_ASSERT, file, line, NULL, "%s", buffer);
int mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_REPORT_MODE);
ignore = _CRTDBG_MODE_WNDW == mode && result == 0;
if(ignore)
return;
__debugbreak();
#elif PX_WINDOWS_FAMILY&& PX_CHECKED
__debugbreak();
#elif PX_SWITCH
abort(buffer);
#else
abort();
#endif
}

View File

@@ -0,0 +1,333 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "FdFoundation.h"
#include "foundation/PxString.h"
#include "foundation/PxPhysicsVersion.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBroadcast.h"
namespace physx
{
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4251) // class needs to have dll-interface to be used by clients of class
#endif
class PX_FOUNDATION_API Foundation : public PxFoundation, public PxUserAllocated
{
PX_NOCOPY(Foundation)
public:
// PxFoundation
virtual void release() PX_OVERRIDE;
virtual PxErrorCallback& getErrorCallback() PX_OVERRIDE { return mErrorCallback; }
virtual void setErrorLevel(PxErrorCode::Enum mask) PX_OVERRIDE { mErrorMask = mask; }
virtual PxErrorCode::Enum getErrorLevel() const PX_OVERRIDE { return mErrorMask; }
virtual PxAllocatorCallback& getAllocatorCallback() PX_OVERRIDE { return mAllocatorCallback; }
virtual bool getReportAllocationNames() const PX_OVERRIDE { return mReportAllocationNames; }
virtual void setReportAllocationNames(bool value) PX_OVERRIDE { mReportAllocationNames = value; }
virtual void registerAllocationListener(physx::PxAllocationListener& listener) PX_OVERRIDE;
virtual void deregisterAllocationListener(physx::PxAllocationListener& listener) PX_OVERRIDE;
virtual void registerErrorCallback(PxErrorCallback& listener) PX_OVERRIDE;
virtual void deregisterErrorCallback(PxErrorCallback& listener) PX_OVERRIDE;
virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, ...) PX_OVERRIDE;
virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, va_list) PX_OVERRIDE;
//~PxFoundation
Foundation(PxErrorCallback& errc, PxAllocatorCallback& alloc);
~Foundation();
// init order is tricky here: the mutexes require the allocator, the allocator may require the error stream
PxAllocatorCallback& mAllocatorCallback;
PxErrorCallback& mErrorCallback;
PxBroadcastingAllocator mBroadcastingAllocator;
PxBroadcastingErrorCallback mBroadcastingError;
bool mReportAllocationNames;
PxErrorCode::Enum mErrorMask;
Mutex mErrorMutex;
AllocFreeTable mTempAllocFreeTable;
Mutex mTempAllocMutex;
Mutex mListenerMutex;
PxU32 mRefCount;
static PxU32 mWarnOnceTimestap;
};
#if PX_VC
#pragma warning(pop)
#endif
} // namespace physx
using namespace physx;
static PxProfilerCallback* gProfilerCallback = NULL;
static Foundation* gInstance = NULL;
// PT: not in header so that people don't use it, only for temp allocator, will be removed
AllocFreeTable& getTempAllocFreeTable()
{
PX_ASSERT(gInstance);
return gInstance->mTempAllocFreeTable;
}
// PT: not in header so that people don't use it, only for temp allocator, will be removed
Mutex& getTempAllocMutex()
{
PX_ASSERT(gInstance);
return gInstance->mTempAllocMutex;
}
Foundation::Foundation(PxErrorCallback& errc, PxAllocatorCallback& alloc) :
mAllocatorCallback (alloc),
mErrorCallback (errc),
mBroadcastingAllocator (alloc, errc),
mBroadcastingError (errc),
#if PX_CHECKED
mReportAllocationNames (true),
#else
mReportAllocationNames (false),
#endif
mErrorMask (PxErrorCode::Enum(~0)),
mErrorMutex ("Foundation::mErrorMutex"),
mTempAllocMutex ("Foundation::mTempAllocMutex"),
mRefCount (0)
{
}
void deallocateTempBufferAllocations(AllocFreeTable& mTempAllocFreeTable);
Foundation::~Foundation()
{
deallocateTempBufferAllocations(mTempAllocFreeTable);
}
bool Foundation::error(PxErrorCode::Enum c, const char* file, int line, const char* messageFmt, ...)
{
va_list va;
va_start(va, messageFmt);
error(c, file, line, messageFmt, va);
va_end(va);
return false;
}
bool Foundation::error(PxErrorCode::Enum e, const char* file, int line, const char* messageFmt, va_list va)
{
PX_ASSERT(messageFmt);
if(e & mErrorMask)
{
// this function is reentrant but user's error callback may not be, so...
Mutex::ScopedLock lock(mErrorMutex);
// using a static fixed size buffer here because:
// 1. vsnprintf return values differ between platforms
// 2. va_start is only usable in functions with ellipses
// 3. ellipses (...) cannot be passed to called function
// which would be necessary to dynamically grow the buffer here
static const size_t bufSize = 1024;
char stringBuffer[bufSize];
Pxvsnprintf(stringBuffer, bufSize, messageFmt, va);
mBroadcastingError.reportError(e, stringBuffer, file, line);
}
return false;
}
void Foundation::release()
{
PX_ASSERT(gInstance);
if(gInstance->mRefCount == 1)
{
PxAllocatorCallback& alloc = gInstance->getAllocatorCallback();
gInstance->~Foundation();
alloc.deallocate(gInstance);
gInstance = NULL;
}
else
{
gInstance->error(PxErrorCode::eINVALID_OPERATION, PX_FL,
"Foundation destruction failed due to pending module references. Close/release all depending modules first.");
}
}
PxU32 Foundation::mWarnOnceTimestap = 0;
void Foundation::registerAllocationListener(PxAllocationListener& listener)
{
Mutex::ScopedLock lock(mListenerMutex);
mBroadcastingAllocator.registerListener(listener);
}
void Foundation::deregisterAllocationListener(PxAllocationListener& listener)
{
Mutex::ScopedLock lock(mListenerMutex);
mBroadcastingAllocator.deregisterListener(listener);
}
void Foundation::registerErrorCallback(PxErrorCallback& callback)
{
Mutex::ScopedLock lock(mListenerMutex);
mBroadcastingError.registerListener(callback);
}
void Foundation::deregisterErrorCallback(PxErrorCallback& callback)
{
Mutex::ScopedLock lock(mListenerMutex);
mBroadcastingError.deregisterListener(callback);
}
PxFoundation* PxCreateFoundation(PxU32 version, PxAllocatorCallback& allocator, PxErrorCallback& errorCallback)
{
if(version != PX_PHYSICS_VERSION)
{
char buffer[256];
Pxsnprintf(buffer, 256, "Wrong version: physics version is 0x%08x, tried to create 0x%08x", PX_PHYSICS_VERSION, version);
errorCallback.reportError(PxErrorCode::eINVALID_PARAMETER, buffer, PX_FL);
return 0;
}
if(!gInstance)
{
// if we don't assign this here, the Foundation object can't create member
// subobjects which require the allocator
gInstance = reinterpret_cast<Foundation*>(allocator.allocate(sizeof(Foundation), "Foundation", PX_FL));
if(gInstance)
{
PX_PLACEMENT_NEW(gInstance, Foundation)(errorCallback, allocator);
PX_ASSERT(gInstance->mRefCount == 0);
gInstance->mRefCount = 1;
// skip 0 which marks uninitialized timestaps in PX_WARN_ONCE
gInstance->mWarnOnceTimestap = (gInstance->mWarnOnceTimestap == PX_MAX_U32) ? 1 : gInstance->mWarnOnceTimestap + 1;
return gInstance;
}
else
{
errorCallback.reportError(PxErrorCode::eINTERNAL_ERROR, "Memory allocation for foundation object failed.", PX_FL);
}
}
else
{
errorCallback.reportError(PxErrorCode::eINVALID_OPERATION, "Foundation object exists already. Only one instance per process can be created.", PX_FL);
}
return 0;
}
void PxSetFoundationInstance(PxFoundation& foundation)
{
gInstance = &static_cast<Foundation&>(foundation);
}
PxAllocatorCallback* PxGetAllocatorCallback()
{
return &gInstance->getAllocatorCallback();
}
PxAllocatorCallback* PxGetBroadcastAllocator(bool* reportAllocationNames)
{
PX_ASSERT(gInstance);
if(reportAllocationNames)
*reportAllocationNames = gInstance->mReportAllocationNames;
return &gInstance->mBroadcastingAllocator;
}
PxErrorCallback* PX_CALL_CONV PxGetErrorCallback()
{
return &gInstance->getErrorCallback();
}
PxErrorCallback* PX_CALL_CONV PxGetBroadcastError()
{
return &gInstance->mBroadcastingError;
}
PxFoundation& PxGetFoundation()
{
PX_ASSERT(gInstance);
return *gInstance;
}
PxFoundation* PxIsFoundationValid()
{
return gInstance;
}
PxProfilerCallback* PxGetProfilerCallback()
{
return gProfilerCallback;
}
void PxSetProfilerCallback(PxProfilerCallback* profiler)
{
gProfilerCallback = profiler;
}
PxU32 PxGetWarnOnceTimeStamp()
{
PX_ASSERT(gInstance);
return gInstance->mWarnOnceTimestap;
}
void PxDecFoundationRefCount()
{
PX_ASSERT(gInstance);
if(gInstance->mRefCount > 0)
gInstance->mRefCount--;
else
gInstance->error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Foundation: Invalid deregistration detected.");
}
void PxIncFoundationRefCount()
{
PX_ASSERT(gInstance);
if(gInstance->mRefCount > 0)
gInstance->mRefCount++;
else
gInstance->error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Foundation: Invalid registration detected.");
}
// Private method to set the global foundation instance to NULL
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxResetFoundationInstance()
{
gInstance = NULL;
}

View File

@@ -0,0 +1,46 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_PSFOUNDATION_H
#define PX_FOUNDATION_PSFOUNDATION_H
#include "foundation/PxAllocator.h"
#include "foundation/PxArray.h"
#include "foundation/PxMutex.h"
namespace physx
{
union PxTempAllocatorChunk;
typedef PxMutexT<PxAllocator> Mutex;
typedef PxArray<PxTempAllocatorChunk*, PxAllocator> AllocFreeTable;
} // namespace physx
#endif

View File

@@ -0,0 +1,209 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxSIMDHelpers.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxVec4.h"
#include "foundation/PxAssert.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxTransform.h"
using namespace physx;
using namespace physx::intrinsics;
PX_FOUNDATION_API PxTransform physx::PxTransformFromPlaneEquation(const PxPlane& plane)
{
PxPlane p = plane;
p.normalize();
// special case handling for axis aligned planes
const PxReal halfsqrt2 = 0.707106781f;
PxQuat q;
if(2 == (p.n.x == 0.0f) + (p.n.y == 0.0f) + (p.n.z == 0.0f)) // special handling for axis aligned planes
{
if(p.n.x > 0) q = PxQuat(PxIdentity);
else if(p.n.x < 0) q = PxQuat(0, 0, 1.0f, 0);
else q = PxQuat(0.0f, -p.n.z, p.n.y, 1.0f) * halfsqrt2;
}
else q = PxShortestRotation(PxVec3(1.f,0,0), p.n);
return PxTransform(-p.n * p.d, q);
}
PX_FOUNDATION_API PxTransform physx::PxTransformFromSegment(const PxVec3& p0, const PxVec3& p1, PxReal* halfHeight)
{
const PxVec3 axis = p1-p0;
const PxReal height = axis.magnitude();
if(halfHeight)
*halfHeight = height/2;
return PxTransform((p1+p0) * 0.5f,
height<1e-6f ? PxQuat(PxIdentity) : PxShortestRotation(PxVec3(1.f,0,0), axis/height));
}
PX_FOUNDATION_API PxQuat physx::PxShortestRotation(const PxVec3& v0, const PxVec3& v1)
{
const PxReal d = v0.dot(v1);
const PxVec3 cross = v0.cross(v1);
const PxQuat q = d > -1 ? PxQuat(cross.x, cross.y, cross.z, 1 + d) : PxAbs(v0.x) < 0.1f ? PxQuat(0.0f, v0.z, -v0.y, 0.0f)
: PxQuat(v0.y, -v0.x, 0.0f, 0.0f);
return q.getNormalized();
}
// indexed rotation around axis, with sine and cosine of half-angle
static PxQuat indexedRotation(PxU32 axis, PxReal s, PxReal c)
{
PxReal v[3] = { 0, 0, 0 };
v[axis] = s;
return PxQuat(v[0], v[1], v[2], c);
}
PX_FOUNDATION_API PxVec3 physx::PxDiagonalize(const PxMat33& m, PxQuat& massFrame)
{
// jacobi rotation using quaternions (from an idea of Stan Melax, with fix for precision issues)
const PxU32 MAX_ITERS = 24;
PxQuat q(PxIdentity);
PxMat33 d;
for(PxU32 i = 0; i < MAX_ITERS; i++)
{
// PT: removed for now, it makes one UT fail because the error is slightly above the threshold
//const PxMat33Padded axes(q);
const PxMat33 axes(q);
d = axes.getTranspose() * m * axes;
const PxReal d0 = PxAbs(d[1][2]), d1 = PxAbs(d[0][2]), d2 = PxAbs(d[0][1]);
const PxU32 a = PxU32(d0 > d1 && d0 > d2 ? 0 : d1 > d2 ? 1 : 2); // rotation axis index, from largest off-diagonal element
const PxU32 a1 = PxGetNextIndex3(a), a2 = PxGetNextIndex3(a1);
if(d[a1][a2] == 0.0f || PxAbs(d[a1][a1] - d[a2][a2]) > 2e6f * PxAbs(2.0f * d[a1][a2]))
break;
const PxReal w = (d[a1][a1] - d[a2][a2]) / (2.0f * d[a1][a2]); // cot(2 * phi), where phi is the rotation angle
const PxReal absw = PxAbs(w);
PxQuat r;
if(absw > 1000)
r = indexedRotation(a, 1.0f / (4.0f * w), 1.0f); // h will be very close to 1, so use small angle approx instead
else
{
const PxReal t = 1.0f / (absw + PxSqrt(w * w + 1.0f)); // absolute value of tan phi
const PxReal h = 1.0f / PxSqrt(t * t + 1.0f); // absolute value of cos phi
PX_ASSERT(h != 1); // |w|<1000 guarantees this with typical IEEE754 machine eps (approx 6e-8)
r = indexedRotation(a, PxSqrt((1.0f - h) / 2.0f) * PxSign(w), PxSqrt((1.0f + h) / 2.0f));
}
q = (q * r).getNormalized();
}
massFrame = q;
return PxVec3(d.column0.x, d.column1.y, d.column2.z);
}
/**
\brief computes a oriented bounding box around the scaled basis.
\param basis Input = skewed basis, Output = (normalized) orthogonal basis.
\return Bounding box extent.
*/
PxVec3 physx::PxOptimizeBoundingBox(PxMat33& basis)
{
PxVec3* PX_RESTRICT vec = &basis[0]; // PT: don't copy vectors if not needed...
// PT: since we store the magnitudes to memory, we can avoid the FCMPs afterwards
PxVec3 magnitude(vec[0].magnitudeSquared(), vec[1].magnitudeSquared(), vec[2].magnitudeSquared());
// find indices sorted by magnitude
unsigned int i = magnitude[1] > magnitude[0] ? 1 : 0u;
unsigned int j = magnitude[2] > magnitude[1 - i] ? 2 : 1 - i;
const unsigned int k = 3 - i - j;
if(magnitude[i] < magnitude[j])
PxSwap(i, j);
PX_ASSERT(magnitude[i] >= magnitude[j] && magnitude[i] >= magnitude[k] && magnitude[j] >= magnitude[k]);
// ortho-normalize basis
PxReal invSqrt = PxRecipSqrt(magnitude[i]);
magnitude[i] *= invSqrt;
vec[i] *= invSqrt; // normalize the first axis
PxReal dotij = vec[i].dot(vec[j]);
PxReal dotik = vec[i].dot(vec[k]);
magnitude[i] += PxAbs(dotij) + PxAbs(dotik); // elongate the axis by projection of the other two
vec[j] -= vec[i] * dotij; // orthogonize the two remaining axii relative to vec[i]
vec[k] -= vec[i] * dotik;
magnitude[j] = vec[j].normalize();
PxReal dotjk = vec[j].dot(vec[k]);
magnitude[j] += PxAbs(dotjk); // elongate the axis by projection of the other one
vec[k] -= vec[j] * dotjk; // orthogonize vec[k] relative to vec[j]
magnitude[k] = vec[k].normalize();
return magnitude;
}
void physx::PxIntegrateTransform(const PxTransform& curTrans, const PxVec3& linvel, const PxVec3& angvel,
PxReal timeStep, PxTransform& result)
{
result.p = curTrans.p + linvel * timeStep;
// from void DynamicsContext::integrateAtomPose(PxsRigidBody* atom, Cm::BitMap &shapeChangedMap) const:
// Integrate the rotation using closed form quaternion integrator
PxReal w = angvel.magnitudeSquared();
if (w != 0.0f)
{
w = PxSqrt(w);
if (w != 0.0f)
{
const PxReal v = timeStep * w * 0.5f;
const PxReal q = PxCos(v);
const PxReal s = PxSin(v) / w;
const PxVec3 pqr = angvel * s;
const PxQuat quatVel(pqr.x, pqr.y, pqr.z, 0);
PxQuat out; // need to have temporary, otherwise we may overwrite input if &curTrans == &result.
out = quatVel * curTrans.q;
out.x += curTrans.q.x * q;
out.y += curTrans.q.y * q;
out.z += curTrans.q.z * q;
out.w += curTrans.q.w * q;
result.q = out;
return;
}
}
// orientation stays the same - convert from quat to matrix:
result.q = curTrans.q;
}

View File

@@ -0,0 +1,170 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxString.h"
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#if PX_WINDOWS_FAMILY
#pragma warning(push)
#pragma warning(disable : 4996) // unsafe string functions
#endif
#if PX_APPLE_FAMILY
#pragma clang diagnostic push
// error : format string is not a string literal
#pragma clang diagnostic ignored "-Wformat-nonliteral"
#endif
namespace physx
{
// cross-platform implementations
int32_t Pxstrcmp(const char* str1, const char* str2)
{
return (str1 && str2) ? ::strcmp(str1, str2) : -1;
}
int32_t Pxstrncmp(const char* str1, const char* str2, size_t count)
{
return ::strncmp(str1, str2, count);
}
int32_t Pxsnprintf(char* dst, size_t dstSize, const char* format, ...)
{
va_list arg;
va_start(arg, format);
int32_t r = Pxvsnprintf(dst, dstSize, format, arg);
va_end(arg);
return r;
}
int32_t Pxsscanf(const char* buffer, const char* format, ...)
{
va_list arg;
va_start(arg, format);
#if (PX_VC < 12) && !PX_LINUX
int32_t r = ::sscanf(buffer, format, arg);
#else
int32_t r = ::vsscanf(buffer, format, arg);
#endif
va_end(arg);
return r;
}
size_t Pxstrlcpy(char* dst, size_t dstSize, const char* src)
{
size_t i = 0;
if(dst && dstSize)
{
for(; i + 1 < dstSize && src[i]; i++) // copy up to dstSize-1 bytes
dst[i] = src[i];
dst[i] = 0; // always null-terminate
}
while(src[i]) // read any remaining characters in the src string to get the length
i++;
return i;
}
size_t Pxstrlcat(char* dst, size_t dstSize, const char* src)
{
size_t i = 0, s = 0;
if(dst && dstSize)
{
s = strlen(dst);
for(; i + s + 1 < dstSize && src[i]; i++) // copy until total is at most dstSize-1
dst[i + s] = src[i];
dst[i + s] = 0; // always null-terminate
}
while(src[i]) // read any remaining characters in the src string to get the length
i++;
return i + s;
}
void Pxstrlwr(char* str)
{
for(; *str; str++)
if(*str >= 'A' && *str <= 'Z')
*str += 32;
}
void Pxstrupr(char* str)
{
for(; *str; str++)
if(*str >= 'a' && *str <= 'z')
*str -= 32;
}
int32_t Pxvsnprintf(char* dst, size_t dstSize, const char* src, va_list arg)
{
#if PX_VC // MSVC is not C99-compliant...
int32_t result = dst ? ::vsnprintf(dst, dstSize, src, arg) : -1;
if(dst && (result == int32_t(dstSize) || result < 0))
dst[dstSize - 1] = 0; // string was truncated or there wasn't room for the NULL
if(result < 0)
result = _vscprintf(src, arg); // work out how long the answer would have been.
#else
int32_t result = ::vsnprintf(dst, dstSize, src, arg);
#endif
return result;
}
int32_t Pxstricmp(const char* str, const char* str1)
{
#if PX_VC
return (::_stricmp(str, str1));
#else
return (::strcasecmp(str, str1));
#endif
}
int32_t Pxstrnicmp(const char* str, const char* str1, size_t n)
{
#if PX_VC
return (::_strnicmp(str, str1, n));
#else
return (::strncasecmp(str, str1, n));
#endif
}
}//namespace physx
#if PX_APPLE_FAMILY
#pragma clang diagnostic pop
#endif
#if PX_WINDOWS_FAMILY
#pragma warning(pop)
#endif

View File

@@ -0,0 +1,145 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMath.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxBitUtils.h"
#include "foundation/PxArray.h"
#include "foundation/PxMutex.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxTempAllocator.h"
#include "FdFoundation.h"
#if PX_VC
#pragma warning(disable : 4706) // assignment within conditional expression
#endif
physx::AllocFreeTable& getTempAllocFreeTable();
physx::Mutex& getTempAllocMutex();
namespace physx
{
union PxTempAllocatorChunk
{
PxTempAllocatorChunk* mNext; // while chunk is free
PxU32 mIndex; // while chunk is allocated
PxU8 mPad[16]; // 16 byte aligned allocations
};
namespace
{
typedef PxTempAllocatorChunk Chunk;
const PxU32 sMinIndex = 8; // 256B min
const PxU32 sMaxIndex = 17; // 128kB max
}
void* PxTempAllocator::allocate(size_t size, const char* filename, PxI32 line)
{
if(!size)
return 0;
PxU32 index = PxMax(PxHighestSetBit(PxU32(size) + sizeof(Chunk) - 1), sMinIndex);
Chunk* chunk = 0;
if(index < sMaxIndex)
{
Mutex::ScopedLock lock(getTempAllocMutex());
// find chunk up to 16x bigger than necessary
AllocFreeTable& freeTable = getTempAllocFreeTable();
Chunk** it = freeTable.begin() + index - sMinIndex;
Chunk** end = PxMin(it + 3, freeTable.end());
while(it < end && !(*it))
++it;
if(it < end)
{
// pop top off freelist
chunk = *it;
*it = chunk->mNext;
index = PxU32(it - freeTable.begin() + sMinIndex);
}
else
// create new chunk
chunk = reinterpret_cast<Chunk*>(PxAllocator().allocate(size_t(2 << index), filename, line));
}
else
{
// too big for temp allocation, forward to base allocator
chunk = reinterpret_cast<Chunk*>(PxAllocator().allocate(size + sizeof(Chunk), filename, line));
}
chunk->mIndex = index;
void* ret = chunk + 1;
PX_ASSERT((size_t(ret) & 0xf) == 0); // SDK types require at minimum 16 byte alignment.
return ret;
}
void PxTempAllocator::deallocate(void* ptr)
{
if(!ptr)
return;
Chunk* chunk = reinterpret_cast<Chunk*>(ptr) - 1;
PxU32 index = chunk->mIndex;
if(index >= sMaxIndex)
return PxAllocator().deallocate(chunk);
Mutex::ScopedLock lock(getTempAllocMutex());
index -= sMinIndex;
AllocFreeTable& freeTable = getTempAllocFreeTable();
if(freeTable.size() <= index)
freeTable.resize(index + 1);
chunk->mNext = freeTable[index];
freeTable[index] = chunk;
}
} // namespace physx
using namespace physx;
void deallocateTempBufferAllocations(AllocFreeTable& mTempAllocFreeTable)
{
PxAllocator alloc;
for(PxU32 i = 0; i < mTempAllocFreeTable.size(); ++i)
{
for(PxTempAllocatorChunk* ptr = mTempAllocFreeTable[i]; ptr;)
{
PxTempAllocatorChunk* next = ptr->mNext;
alloc.deallocate(ptr);
ptr = next;
}
}
mTempAllocFreeTable.reset();
}

View File

@@ -0,0 +1,171 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAtomic.h"
#if ! PX_EMSCRIPTEN
#define PAUSE() asm("nop")
#else
#define PAUSE()
#endif
namespace physx
{
void* PxAtomicCompareExchangePointer(volatile void** dest, void* exch, void* comp)
{
return __sync_val_compare_and_swap(const_cast<void**>(dest), comp, exch);
}
PxI32 PxAtomicCompareExchange(volatile PxI32* dest, PxI32 exch, PxI32 comp)
{
return __sync_val_compare_and_swap(dest, comp, exch);
}
PxI64 PxAtomicCompareExchange(volatile PxI64* dest, PxI64 exch, PxI64 comp)
{
return __sync_val_compare_and_swap(dest, comp, exch);
}
PxI32 PxAtomicIncrement(volatile PxI32* val)
{
return __sync_add_and_fetch(val, 1);
}
PxI64 PxAtomicIncrement(volatile PxI64* val)
{
return __sync_add_and_fetch(val, 1);
}
PxI32 PxAtomicDecrement(volatile PxI32* val)
{
return __sync_sub_and_fetch(val, 1);
}
PxI64 PxAtomicDecrement(volatile PxI64* val)
{
return __sync_sub_and_fetch(val, 1);
}
PxI32 PxAtomicAdd(volatile PxI32* val, PxI32 delta)
{
return __sync_add_and_fetch(val, delta);
}
PxI64 PxAtomicAdd(volatile PxI64* val, PxI64 delta)
{
return __sync_add_and_fetch(val, delta);
}
PxI32 PxAtomicMax(volatile PxI32* val, PxI32 val2)
{
PxI32 oldVal, newVal;
do
{
PAUSE();
oldVal = *val;
if(val2 > oldVal)
newVal = val2;
else
newVal = oldVal;
} while(PxAtomicCompareExchange(val, newVal, oldVal) != oldVal);
return *val;
}
PxI64 PxAtomicMax(volatile PxI64* val, PxI64 val2)
{
PxI64 oldVal, newVal;
do
{
PAUSE();
oldVal = *val;
if(val2 > oldVal)
newVal = val2;
else
newVal = oldVal;
} while(PxAtomicCompareExchange(val, newVal, oldVal) != oldVal);
return *val;
}
PxI32 PxAtomicExchange(volatile PxI32* val, PxI32 val2)
{
PxI32 newVal, oldVal;
do
{
PAUSE();
oldVal = *val;
newVal = val2;
} while(PxAtomicCompareExchange(val, newVal, oldVal) != oldVal);
return oldVal;
}
PxI64 PxAtomicExchange(volatile PxI64* val, PxI64 val2)
{
PxI64 newVal, oldVal;
do
{
PAUSE();
oldVal = *val;
newVal = val2;
} while(PxAtomicCompareExchange(val, newVal, oldVal) != oldVal);
return oldVal;
}
PxI32 PxAtomicOr(volatile PxI32* val, PxI32 mask)
{
return __sync_or_and_fetch(val, mask);
}
PxI64 PxAtomicOr(volatile PxI64* val, PxI64 mask)
{
return __sync_or_and_fetch(val, mask);
}
PxI32 PxAtomicAnd(volatile PxI32* val, PxI32 mask)
{
return __sync_and_and_fetch(val, mask);
}
PxI64 PxAtomicAnd(volatile PxI64* val, PxI64 mask)
{
return __sync_and_and_fetch(val, mask);
}
} // namespace physx

View File

@@ -0,0 +1,107 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxFPU.h"
#if !defined(__CYGWIN__)
#include <fenv.h>
PX_COMPILE_TIME_ASSERT(8 * sizeof(uint32_t) >= sizeof(fenv_t));
#endif
#if PX_OSX
// osx defines SIMD as standard for floating point operations.
#include <xmmintrin.h>
#endif
physx::PxFPUGuard::PxFPUGuard()
{
#if defined(__CYGWIN__)
#pragma message "FPUGuard::FPUGuard() is not implemented"
#elif PX_OSX
mControlWords[0] = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
#elif defined(__EMSCRIPTEN__)
// not supported
#else
PX_COMPILE_TIME_ASSERT(sizeof(fenv_t) <= sizeof(mControlWords));
fegetenv(reinterpret_cast<fenv_t*>(mControlWords));
fesetenv(FE_DFL_ENV);
#if PX_LINUX
// need to explicitly disable exceptions because fesetenv does not modify
// the sse control word on 32bit linux (64bit is fine, but do it here just be sure)
fedisableexcept(FE_ALL_EXCEPT);
#endif
#endif
}
physx::PxFPUGuard::~PxFPUGuard()
{
#if defined(__CYGWIN__)
#pragma message "PxFPUGuard::~PxFPUGuard() is not implemented"
#elif PX_OSX
// restore control word and clear exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWords[0] & ~_MM_EXCEPT_MASK);
#elif defined(__EMSCRIPTEN__)
// not supported
#else
fesetenv(reinterpret_cast<fenv_t*>(mControlWords));
#endif
}
PX_FOUNDATION_API void physx::PxEnableFPExceptions()
{
#if PX_LINUX && !defined(__EMSCRIPTEN__)
feclearexcept(FE_ALL_EXCEPT);
feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW);
#elif PX_OSX
// clear any pending exceptions
// (setting exception state flags cause exceptions on the first following fp operation)
uint32_t control = _mm_getcsr() & ~_MM_EXCEPT_MASK;
// enable all fp exceptions except inexact and underflow (common, benign)
// note: denorm has to be disabled as well because underflow can create denorms
_mm_setcsr((control & ~_MM_MASK_MASK) | _MM_MASK_INEXACT | _MM_MASK_UNDERFLOW | _MM_MASK_DENORM);
#endif
}
PX_FOUNDATION_API void physx::PxDisableFPExceptions()
{
#if PX_LINUX && !defined(__EMSCRIPTEN__)
fedisableexcept(FE_ALL_EXCEPT);
#elif PX_OSX
// clear any pending exceptions
// (setting exception state flags cause exceptions on the first following fp operation)
uint32_t control = _mm_getcsr() & ~_MM_EXCEPT_MASK;
_mm_setcsr(control | _MM_MASK_MASK);
#endif
}

View File

@@ -0,0 +1,199 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxMutex.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxThread.h"
#include <pthread.h>
namespace physx
{
#if PX_LINUX
#include <sched.h>
static int gMutexProtocol = PTHREAD_PRIO_INHERIT;
PX_FORCE_INLINE bool isLegalProtocol(const int mutexProtocol)
{
return
(
(PTHREAD_PRIO_NONE == mutexProtocol) ||
(PTHREAD_PRIO_INHERIT == mutexProtocol) ||
((PTHREAD_PRIO_PROTECT == mutexProtocol) && ((sched_getscheduler(0) == SCHED_FIFO) || (sched_getscheduler(0) == SCHED_RR)))
);
}
bool PxSetMutexProtocol(const int mutexProtocol)
{
if(isLegalProtocol(mutexProtocol))
{
gMutexProtocol = mutexProtocol;
return true;
}
return false;
}
int PxGetMutexProtocol()
{
return gMutexProtocol;
}
#endif //PX_LINUX
namespace
{
struct MutexUnixImpl
{
pthread_mutex_t lock;
PxThread::Id owner;
};
MutexUnixImpl* getMutex(PxMutexImpl* impl)
{
return reinterpret_cast<MutexUnixImpl*>(impl);
}
}
PxMutexImpl::PxMutexImpl()
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
#if PX_LINUX
pthread_mutexattr_setprotocol(&attr, gMutexProtocol);
pthread_mutexattr_setprioceiling(&attr, 0);
#endif
pthread_mutex_init(&getMutex(this)->lock, &attr);
pthread_mutexattr_destroy(&attr);
}
PxMutexImpl::~PxMutexImpl()
{
pthread_mutex_destroy(&getMutex(this)->lock);
}
void PxMutexImpl::lock()
{
int err = pthread_mutex_lock(&getMutex(this)->lock);
PX_ASSERT(!err);
PX_UNUSED(err);
#if PX_DEBUG
getMutex(this)->owner = PxThread::getId();
#endif
}
bool PxMutexImpl::trylock()
{
bool success = !pthread_mutex_trylock(&getMutex(this)->lock);
#if PX_DEBUG
if(success)
getMutex(this)->owner = PxThread::getId();
#endif
return success;
}
void PxMutexImpl::unlock()
{
#if PX_DEBUG
if(getMutex(this)->owner != PxThread::getId())
{
PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__,
"Mutex must be unlocked only by thread that has already acquired lock");
return;
}
#endif
int err = pthread_mutex_unlock(&getMutex(this)->lock);
PX_ASSERT(!err);
PX_UNUSED(err);
}
uint32_t PxMutexImpl::getSize()
{
return sizeof(MutexUnixImpl);
}
class ReadWriteLockImpl
{
public:
PxMutex mutex;
volatile int readerCounter;
};
PxReadWriteLock::PxReadWriteLock()
{
mImpl = reinterpret_cast<ReadWriteLockImpl*>(PX_ALLOC(sizeof(ReadWriteLockImpl), "ReadWriteLockImpl"));
PX_PLACEMENT_NEW(mImpl, ReadWriteLockImpl);
mImpl->readerCounter = 0;
}
PxReadWriteLock::~PxReadWriteLock()
{
mImpl->~ReadWriteLockImpl();
PX_FREE(mImpl);
}
void PxReadWriteLock::lockReader(bool takeLock)
{
if(takeLock)
mImpl->mutex.lock();
PxAtomicIncrement(&mImpl->readerCounter);
if(takeLock)
mImpl->mutex.unlock();
}
void PxReadWriteLock::lockWriter()
{
mImpl->mutex.lock();
// spin lock until no readers
while(mImpl->readerCounter);
}
void PxReadWriteLock::unlockReader()
{
PxAtomicDecrement(&mImpl->readerCounter);
}
void PxReadWriteLock::unlockWriter()
{
mImpl->mutex.unlock();
}
} // namespace physx

View File

@@ -0,0 +1,40 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxString.h"
#include <stdio.h>
namespace physx
{
void PxPrintString(const char* str)
{
puts(str);
}
} // namespace physx

View File

@@ -0,0 +1,152 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAllocator.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxSList.h"
#include "foundation/PxThread.h"
#include <pthread.h>
#if PX_EMSCRIPTEN
#define USE_MUTEX
#endif
namespace physx
{
namespace
{
#if defined(USE_MUTEX)
class ScopedMutexLock
{
pthread_mutex_t& mMutex;
public:
PX_INLINE ScopedMutexLock(pthread_mutex_t& mutex) : mMutex(mutex)
{
pthread_mutex_lock(&mMutex);
}
PX_INLINE ~ScopedMutexLock()
{
pthread_mutex_unlock(&mMutex);
}
};
typedef ScopedMutexLock ScopedLock;
#else
struct ScopedSpinLock
{
PX_FORCE_INLINE ScopedSpinLock(volatile int32_t& lock) : mLock(lock)
{
while(__sync_lock_test_and_set(&mLock, 1))
{
// spinning without atomics is usually
// causing less bus traffic. -> only one
// CPU is modifying the cache line.
while(lock)
PxSpinLockPause();
}
}
PX_FORCE_INLINE ~ScopedSpinLock()
{
__sync_lock_release(&mLock);
}
private:
volatile int32_t& mLock;
};
typedef ScopedSpinLock ScopedLock;
#endif
struct SListDetail
{
PxSListEntry* head;
#if defined(USE_MUTEX)
pthread_mutex_t lock;
#else
volatile int32_t lock;
#endif
};
template <typename T>
SListDetail* getDetail(T* impl)
{
return reinterpret_cast<SListDetail*>(impl);
}
}
PxSListImpl::PxSListImpl()
{
getDetail(this)->head = NULL;
#if defined(USE_MUTEX)
pthread_mutex_init(&getDetail(this)->lock, NULL);
#else
getDetail(this)->lock = 0; // 0 == unlocked
#endif
}
PxSListImpl::~PxSListImpl()
{
#if defined(USE_MUTEX)
pthread_mutex_destroy(&getDetail(this)->lock);
#endif
}
void PxSListImpl::push(PxSListEntry* entry)
{
ScopedLock lock(getDetail(this)->lock);
entry->mNext = getDetail(this)->head;
getDetail(this)->head = entry;
}
PxSListEntry* PxSListImpl::pop()
{
ScopedLock lock(getDetail(this)->lock);
PxSListEntry* result = getDetail(this)->head;
if(result != NULL)
getDetail(this)->head = result->mNext;
return result;
}
PxSListEntry* PxSListImpl::flush()
{
ScopedLock lock(getDetail(this)->lock);
PxSListEntry* result = getDetail(this)->head;
getDetail(this)->head = NULL;
return result;
}
uint32_t PxSListImpl::getSize()
{
return sizeof(SListDetail);
}
} // namespace physx

View File

@@ -0,0 +1,479 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxSocket.h"
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <sys/time.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#define INVALID_SOCKET -1
#ifndef SOMAXCONN
#define SOMAXCONN 5
#endif
namespace physx
{
const uint32_t PxSocket::DEFAULT_BUFFER_SIZE = 32768;
class SocketImpl
{
public:
SocketImpl(bool isBlocking);
virtual ~SocketImpl();
bool connect(const char* host, uint16_t port, uint32_t timeout);
bool listen(uint16_t port);
bool accept(bool block);
void disconnect();
void setBlocking(bool blocking);
virtual uint32_t write(const uint8_t* data, uint32_t length);
virtual bool flush();
uint32_t read(uint8_t* data, uint32_t length);
PX_FORCE_INLINE bool isBlocking() const
{
return mIsBlocking;
}
PX_FORCE_INLINE bool isConnected() const
{
return mIsConnected;
}
PX_FORCE_INLINE const char* getHost() const
{
return mHost;
}
PX_FORCE_INLINE uint16_t getPort() const
{
return mPort;
}
protected:
bool nonBlockingTimeout() const;
int32_t mSocket;
int32_t mListenSocket;
const char* mHost;
uint16_t mPort;
bool mIsConnected;
bool mIsBlocking;
bool mListenMode;
};
void socketSetBlockingInternal(int32_t socket, bool blocking);
SocketImpl::SocketImpl(bool isBlocking)
: mSocket(INVALID_SOCKET)
, mListenSocket(INVALID_SOCKET)
, mHost(NULL)
, mPort(0)
, mIsConnected(false)
, mIsBlocking(isBlocking)
, mListenMode(false)
{
}
SocketImpl::~SocketImpl()
{
}
bool SocketImpl::connect(const char* host, uint16_t port, uint32_t timeout)
{
sockaddr_in socketAddress;
intrinsics::memSet(&socketAddress, 0, sizeof(sockaddr_in));
socketAddress.sin_family = AF_INET;
socketAddress.sin_port = htons(port);
// get host
hostent* hp = gethostbyname(host);
if(!hp)
{
in_addr a;
a.s_addr = inet_addr(host);
hp = gethostbyaddr(reinterpret_cast<const char*>(&a), sizeof(in_addr), AF_INET);
if(!hp)
return false;
}
intrinsics::memCopy(&socketAddress.sin_addr, hp->h_addr_list[0], hp->h_length);
// connect
mSocket = socket(AF_INET, SOCK_STREAM, 0);
if(mSocket == INVALID_SOCKET)
return false;
socketSetBlockingInternal(mSocket, false);
int connectRet = ::connect(mSocket, reinterpret_cast<sockaddr*>(&socketAddress), sizeof(socketAddress));
if(connectRet < 0)
{
if(errno != EINPROGRESS)
{
disconnect();
return false;
}
// Setup poll function call to monitor the connect call.
// By querying for POLLOUT we're checking if the socket is
// ready for writing.
pollfd pfd;
pfd.fd = mSocket;
pfd.events = POLLOUT;
const int pollResult = ::poll(&pfd, 1, timeout /*milliseconds*/);
const bool pollTimeout = (pollResult == 0);
const bool pollError = (pollResult < 0); // an error inside poll happened. Can check error with `errno` variable.
if(pollTimeout || pollError)
{
disconnect();
return false;
}
else
{
PX_ASSERT(pollResult == 1);
// check that event was precisely POLLOUT and not anything else (e.g., errors, hang-up)
bool test = (pfd.revents & POLLOUT) && !(pfd.revents & (~POLLOUT));
if(!test)
{
disconnect();
return false;
}
}
// check if we are really connected, above code seems to return
// true if host is a unix machine even if the connection was
// not accepted.
char buffer;
if(recv(mSocket, &buffer, 0, 0) < 0)
{
if(errno != EWOULDBLOCK)
{
disconnect();
return false;
}
}
}
socketSetBlockingInternal(mSocket, mIsBlocking);
#if PX_APPLE_FAMILY
int noSigPipe = 1;
setsockopt(mSocket, SOL_SOCKET, SO_NOSIGPIPE, &noSigPipe, sizeof(int));
#endif
mIsConnected = true;
mPort = port;
mHost = host;
return true;
}
bool SocketImpl::listen(uint16_t port)
{
mListenSocket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
if(mListenSocket == INVALID_SOCKET)
return false;
// enable address reuse: "Address already in use" error message
int yes = 1;
if(setsockopt(mListenSocket, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1)
return false;
mListenMode = true;
sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = INADDR_ANY;
intrinsics::memSet(addr.sin_zero, '\0', sizeof addr.sin_zero);
return bind(mListenSocket, reinterpret_cast<sockaddr*>(&addr), sizeof(addr)) != -1 &&
::listen(mListenSocket, SOMAXCONN) != -1;
}
bool SocketImpl::accept(bool block)
{
if(mIsConnected || !mListenMode)
return false;
// set the listen socket to be non-blocking.
socketSetBlockingInternal(mListenSocket, block);
int32_t clientSocket = ::accept(mListenSocket, 0, 0);
if(clientSocket == INVALID_SOCKET)
return false;
mSocket = clientSocket;
mIsConnected = true;
socketSetBlockingInternal(mSocket, mIsBlocking); // force the mode to whatever the user set
return mIsConnected;
}
void SocketImpl::disconnect()
{
if(mListenSocket != INVALID_SOCKET)
{
close(mListenSocket);
mListenSocket = INVALID_SOCKET;
}
if(mSocket != INVALID_SOCKET)
{
if(mIsConnected)
{
socketSetBlockingInternal(mSocket, true);
shutdown(mSocket, SHUT_RDWR);
}
close(mSocket);
mSocket = INVALID_SOCKET;
}
mIsConnected = false;
mListenMode = false;
mPort = 0;
mHost = NULL;
}
bool SocketImpl::nonBlockingTimeout() const
{
return !mIsBlocking && errno == EWOULDBLOCK;
}
void socketSetBlockingInternal(int32_t socket, bool blocking)
{
int mode = fcntl(socket, F_GETFL, 0);
if(!blocking)
mode |= O_NONBLOCK;
else
mode &= ~O_NONBLOCK;
fcntl(socket, F_SETFL, mode);
}
// should be cross-platform from here down
void SocketImpl::setBlocking(bool blocking)
{
if(blocking != mIsBlocking)
{
mIsBlocking = blocking;
if(isConnected())
socketSetBlockingInternal(mSocket, blocking);
}
}
bool SocketImpl::flush()
{
return true;
}
uint32_t SocketImpl::write(const uint8_t* data, uint32_t length)
{
if(length == 0)
return 0;
int sent = send(mSocket, reinterpret_cast<const char*>(data), int32_t(length), 0);
if(sent <= 0 && !nonBlockingTimeout())
disconnect();
return uint32_t(sent > 0 ? sent : 0);
}
uint32_t SocketImpl::read(uint8_t* data, uint32_t length)
{
if(length == 0)
return 0;
int32_t received = recv(mSocket, reinterpret_cast<char*>(data), int32_t(length), 0);
if(received <= 0 && !nonBlockingTimeout())
disconnect();
return uint32_t(received > 0 ? received : 0);
}
class BufferedSocketImpl : public SocketImpl
{
public:
BufferedSocketImpl(bool isBlocking) : SocketImpl(isBlocking), mBufferPos(0)
{
}
virtual ~BufferedSocketImpl()
{
}
bool flush();
uint32_t write(const uint8_t* data, uint32_t length);
private:
uint32_t mBufferPos;
uint8_t mBuffer[PxSocket::DEFAULT_BUFFER_SIZE];
};
bool BufferedSocketImpl::flush()
{
uint32_t totalBytesWritten = 0;
while(totalBytesWritten < mBufferPos && mIsConnected)
totalBytesWritten += int32_t(SocketImpl::write(mBuffer + totalBytesWritten, mBufferPos - totalBytesWritten));
bool ret = (totalBytesWritten == mBufferPos);
mBufferPos = 0;
return ret;
}
uint32_t BufferedSocketImpl::write(const uint8_t* data, uint32_t length)
{
uint32_t bytesWritten = 0;
while(mBufferPos + length >= PxSocket::DEFAULT_BUFFER_SIZE)
{
uint32_t currentChunk = PxSocket::DEFAULT_BUFFER_SIZE - mBufferPos;
intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, currentChunk);
bytesWritten += uint32_t(currentChunk); // for the user, this is consumed even if we fail to shove it down a
// non-blocking socket
uint32_t sent = SocketImpl::write(mBuffer, PxSocket::DEFAULT_BUFFER_SIZE);
mBufferPos = PxSocket::DEFAULT_BUFFER_SIZE - sent;
if(sent < PxSocket::DEFAULT_BUFFER_SIZE) // non-blocking or error
{
if(sent) // we can reasonably hope this is rare
intrinsics::memMove(mBuffer, mBuffer + sent, mBufferPos);
return bytesWritten;
}
length -= currentChunk;
}
if(length > 0)
{
intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, length);
bytesWritten += length;
mBufferPos += length;
}
return bytesWritten;
}
PxSocket::PxSocket(bool inIsBuffering, bool isBlocking)
{
if(inIsBuffering)
{
void* mem = PX_ALLOC(sizeof(BufferedSocketImpl), "BufferedSocketImpl");
mImpl = PX_PLACEMENT_NEW(mem, BufferedSocketImpl)(isBlocking);
}
else
{
void* mem = PX_ALLOC(sizeof(SocketImpl), "SocketImpl");
mImpl = PX_PLACEMENT_NEW(mem, SocketImpl)(isBlocking);
}
}
PxSocket::~PxSocket()
{
mImpl->flush();
mImpl->disconnect();
mImpl->~SocketImpl();
PX_FREE(mImpl);
}
bool PxSocket::connect(const char* host, uint16_t port, uint32_t timeout)
{
return mImpl->connect(host, port, timeout);
}
bool PxSocket::listen(uint16_t port)
{
return mImpl->listen(port);
}
bool PxSocket::accept(bool block)
{
return mImpl->accept(block);
}
void PxSocket::disconnect()
{
mImpl->disconnect();
}
bool PxSocket::isConnected() const
{
return mImpl->isConnected();
}
const char* PxSocket::getHost() const
{
return mImpl->getHost();
}
uint16_t PxSocket::getPort() const
{
return mImpl->getPort();
}
bool PxSocket::flush()
{
if(!mImpl->isConnected())
return false;
return mImpl->flush();
}
uint32_t PxSocket::write(const uint8_t* data, uint32_t length)
{
if(!mImpl->isConnected())
return 0;
return mImpl->write(data, length);
}
uint32_t PxSocket::read(uint8_t* data, uint32_t length)
{
if(!mImpl->isConnected())
return 0;
return mImpl->read(data, length);
}
void PxSocket::setBlocking(bool blocking)
{
mImpl->setBlocking(blocking);
}
bool PxSocket::isBlocking() const
{
return mImpl->isBlocking();
}
} // namespace physx

View File

@@ -0,0 +1,159 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxSync.h"
#include <errno.h>
#include <stdio.h>
#include <pthread.h>
#include <time.h>
#include <sys/time.h>
namespace physx
{
namespace
{
class SyncImpl
{
public:
pthread_mutex_t mutex;
pthread_cond_t cond;
volatile int setCounter;
volatile bool is_set;
};
SyncImpl* getSync(PxSyncImpl* impl)
{
return reinterpret_cast<SyncImpl*>(impl);
}
}
uint32_t PxSyncImpl::getSize()
{
return sizeof(SyncImpl);
}
struct PxUnixScopeLock
{
PxUnixScopeLock(pthread_mutex_t& m) : mMutex(m)
{
pthread_mutex_lock(&mMutex);
}
~PxUnixScopeLock()
{
pthread_mutex_unlock(&mMutex);
}
private:
pthread_mutex_t& mMutex;
};
PxSyncImpl::PxSyncImpl()
{
int status = pthread_mutex_init(&getSync(this)->mutex, 0);
PX_ASSERT(!status);
status = pthread_cond_init(&getSync(this)->cond, 0);
PX_ASSERT(!status);
PX_UNUSED(status);
getSync(this)->is_set = false;
getSync(this)->setCounter = 0;
}
PxSyncImpl::~PxSyncImpl()
{
pthread_cond_destroy(&getSync(this)->cond);
pthread_mutex_destroy(&getSync(this)->mutex);
}
void PxSyncImpl::reset()
{
PxUnixScopeLock lock(getSync(this)->mutex);
getSync(this)->is_set = false;
}
void PxSyncImpl::set()
{
PxUnixScopeLock lock(getSync(this)->mutex);
if(!getSync(this)->is_set)
{
getSync(this)->is_set = true;
getSync(this)->setCounter++;
pthread_cond_broadcast(&getSync(this)->cond);
}
}
bool PxSyncImpl::wait(uint32_t ms)
{
PxUnixScopeLock lock(getSync(this)->mutex);
int lastSetCounter = getSync(this)->setCounter;
if(!getSync(this)->is_set)
{
if(ms == uint32_t(-1))
{
// have to loop here and check is_set since pthread_cond_wait can return successfully
// even if it was not signaled by pthread_cond_broadcast (OS efficiency design decision)
int status = 0;
while(!status && !getSync(this)->is_set && (lastSetCounter == getSync(this)->setCounter))
status = pthread_cond_wait(&getSync(this)->cond, &getSync(this)->mutex);
PX_ASSERT((!status && getSync(this)->is_set) || (lastSetCounter != getSync(this)->setCounter));
}
else
{
timespec ts;
timeval tp;
gettimeofday(&tp, NULL);
uint32_t sec = ms / 1000;
uint32_t usec = (ms - 1000 * sec) * 1000;
// sschirm: taking into account that us might accumulate to a second
// otherwise the pthread_cond_timedwait complains on osx.
usec = tp.tv_usec + usec;
uint32_t div_sec = usec / 1000000;
uint32_t rem_usec = usec - div_sec * 1000000;
ts.tv_sec = tp.tv_sec + sec + div_sec;
ts.tv_nsec = rem_usec * 1000;
// have to loop here and check is_set since pthread_cond_timedwait can return successfully
// even if it was not signaled by pthread_cond_broadcast (OS efficiency design decision)
int status = 0;
while(!status && !getSync(this)->is_set && (lastSetCounter == getSync(this)->setCounter))
status = pthread_cond_timedwait(&getSync(this)->cond, &getSync(this)->mutex, &ts);
PX_ASSERT((!status && getSync(this)->is_set) || (status == ETIMEDOUT) ||
(lastSetCounter != getSync(this)->setCounter));
}
}
return getSync(this)->is_set || (lastSetCounter != getSync(this)->setCounter);
}
} // namespace physx

View File

@@ -0,0 +1,466 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxThread.h"
#include <math.h>
#if !PX_APPLE_FAMILY && !defined(__CYGWIN__) && !PX_EMSCRIPTEN
#include <bits/local_lim.h> // PTHREAD_STACK_MIN
#endif
#include <stdio.h>
#include <pthread.h>
#include <unistd.h>
#include <sys/syscall.h>
#if !PX_APPLE_FAMILY && !PX_EMSCRIPTEN
#include <asm/unistd.h>
#include <sys/resource.h>
#endif
#if PX_APPLE_FAMILY
#include <sys/types.h>
#include <sys/sysctl.h>
#include <TargetConditionals.h>
#include <pthread.h>
#endif
#define PxSpinLockPause() asm("nop")
namespace physx
{
namespace
{
typedef enum
{
ePxThreadNotStarted,
ePxThreadStarted,
ePxThreadStopped
} PxThreadState;
class ThreadImpl
{
public:
PxThreadImpl::ExecuteFn fn;
void* arg;
volatile int32_t quitNow;
volatile int32_t threadStarted;
volatile int32_t state;
pthread_t thread;
pid_t tid;
uint32_t affinityMask;
const char* name;
};
ThreadImpl* getThread(PxThreadImpl* impl)
{
return reinterpret_cast<ThreadImpl*>(impl);
}
static void setTid(ThreadImpl& threadImpl)
{
// query TID
// AM: TODO: neither of the below are implemented
#if PX_APPLE_FAMILY
threadImpl.tid = syscall(SYS_gettid);
#elif PX_EMSCRIPTEN
threadImpl.tid = pthread_self();
#else
threadImpl.tid = syscall(__NR_gettid);
#endif
// notify/unblock parent thread
PxAtomicCompareExchange(&(threadImpl.threadStarted), 1, 0);
}
void* PxThreadStart(void* arg)
{
ThreadImpl* impl = getThread(reinterpret_cast<PxThreadImpl*>(arg));
impl->state = ePxThreadStarted;
// run setTid in thread's context
setTid(*impl);
// then run either the passed in function or execute from the derived class (Runnable).
if(impl->fn)
(*impl->fn)(impl->arg);
else if(impl->arg)
(reinterpret_cast<PxRunnable*>(impl->arg))->execute();
return 0;
}
}
uint32_t PxThreadImpl::getSize()
{
return sizeof(ThreadImpl);
}
PxThreadImpl::Id PxThreadImpl::getId()
{
return Id(pthread_self());
}
PxThreadImpl::PxThreadImpl()
{
getThread(this)->thread = 0;
getThread(this)->tid = 0;
getThread(this)->state = ePxThreadNotStarted;
getThread(this)->quitNow = 0;
getThread(this)->threadStarted = 0;
getThread(this)->fn = NULL;
getThread(this)->arg = NULL;
getThread(this)->affinityMask = 0;
getThread(this)->name = "set my name before starting me";
}
PxThreadImpl::PxThreadImpl(PxThreadImpl::ExecuteFn fn, void* arg, const char* name)
{
getThread(this)->thread = 0;
getThread(this)->tid = 0;
getThread(this)->state = ePxThreadNotStarted;
getThread(this)->quitNow = 0;
getThread(this)->threadStarted = 0;
getThread(this)->fn = fn;
getThread(this)->arg = arg;
getThread(this)->affinityMask = 0;
getThread(this)->name = name;
start(0, NULL);
}
PxThreadImpl::~PxThreadImpl()
{
if(getThread(this)->state == ePxThreadStarted)
kill();
}
void PxThreadImpl::start(uint32_t stackSize, PxRunnable* runnable)
{
if(getThread(this)->state != ePxThreadNotStarted)
return;
if(stackSize == 0)
stackSize = getDefaultStackSize();
#if defined(PTHREAD_STACK_MIN)
if(stackSize < PTHREAD_STACK_MIN)
{
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__,
"PxThreadImpl::start(): stack size was set below PTHREAD_STACK_MIN");
stackSize = PTHREAD_STACK_MIN;
}
#endif
if(runnable && !getThread(this)->arg && !getThread(this)->fn)
getThread(this)->arg = runnable;
pthread_attr_t attr;
int status = pthread_attr_init(&attr);
PX_ASSERT(!status);
PX_UNUSED(status);
status = pthread_attr_setstacksize(&attr, stackSize);
PX_ASSERT(!status);
status = pthread_create(&getThread(this)->thread, &attr, PxThreadStart, this);
PX_ASSERT(!status);
// wait for thread to startup and write out TID
// otherwise TID dependent calls like setAffinity will fail.
while(PxAtomicCompareExchange(&(getThread(this)->threadStarted), 1, 1) == 0)
yield();
// here we are sure that getThread(this)->state >= ePxThreadStarted
status = pthread_attr_destroy(&attr);
PX_ASSERT(!status);
// apply stored affinity mask
if(getThread(this)->affinityMask)
setAffinityMask(getThread(this)->affinityMask);
if (getThread(this)->name)
setName(getThread(this)->name);
}
void PxThreadImpl::signalQuit()
{
PxAtomicIncrement(&(getThread(this)->quitNow));
}
bool PxThreadImpl::waitForQuit()
{
if(getThread(this)->state == ePxThreadNotStarted)
return false;
// works also with a stopped/exited thread if the handle is still valid
pthread_join(getThread(this)->thread, NULL);
getThread(this)->state = ePxThreadStopped;
return true;
}
bool PxThreadImpl::quitIsSignalled()
{
return PxAtomicCompareExchange(&(getThread(this)->quitNow), 0, 0) != 0;
}
#if defined(PX_GCC_FAMILY)
__attribute__((noreturn))
#endif
void PxThreadImpl::quit()
{
getThread(this)->state = ePxThreadStopped;
pthread_exit(0);
}
void PxThreadImpl::kill()
{
if(getThread(this)->state == ePxThreadStarted)
pthread_cancel(getThread(this)->thread);
getThread(this)->state = ePxThreadStopped;
}
void PxThreadImpl::sleep(uint32_t ms)
{
timespec sleepTime;
uint32_t remainder = ms % 1000;
sleepTime.tv_sec = ms - remainder;
sleepTime.tv_nsec = remainder * 1000000L;
while(nanosleep(&sleepTime, &sleepTime) == -1)
continue;
}
void PxThreadImpl::yield()
{
sched_yield();
}
void PxThreadImpl::yieldProcessor()
{
#if (PX_ARM || PX_A64)
__asm__ __volatile__("yield");
#else
__asm__ __volatile__("pause");
#endif
}
uint32_t PxThreadImpl::setAffinityMask(uint32_t mask)
{
// Same as windows impl if mask is zero
if(!mask)
return 0;
getThread(this)->affinityMask = mask;
uint64_t prevMask = 0;
if(getThread(this)->state == ePxThreadStarted)
{
#if PX_EMSCRIPTEN
// not supported
#elif !PX_APPLE_FAMILY // Apple doesn't support syscall with getaffinity and setaffinity
int32_t errGet = syscall(__NR_sched_getaffinity, getThread(this)->tid, sizeof(prevMask), &prevMask);
if(errGet < 0)
return 0;
int32_t errSet = syscall(__NR_sched_setaffinity, getThread(this)->tid, sizeof(mask), &mask);
if(errSet != 0)
return 0;
#endif
}
return uint32_t(prevMask);
}
void PxThreadImpl::setName(const char* name)
{
getThread(this)->name = name;
if (getThread(this)->state == ePxThreadStarted)
{
// not implemented because most unix APIs expect setName()
// to be called from the thread's context. Example see next comment:
// this works only with the current thread and can rename
// the main process if used in the wrong context:
// prctl(PR_SET_NAME, reinterpret_cast<unsigned long>(name) ,0,0,0);
PX_UNUSED(name);
}
}
#if !PX_APPLE_FAMILY
static PxThreadPriority::Enum convertPriorityFromLinux(uint32_t inPrio, int policy)
{
PX_COMPILE_TIME_ASSERT(PxThreadPriority::eLOW > PxThreadPriority::eHIGH);
PX_COMPILE_TIME_ASSERT(PxThreadPriority::eHIGH == 0);
int maxL = sched_get_priority_max(policy);
int minL = sched_get_priority_min(policy);
int rangeL = maxL - minL;
int rangeNv = PxThreadPriority::eLOW - PxThreadPriority::eHIGH;
// case for default scheduler policy
if(rangeL == 0)
return PxThreadPriority::eNORMAL;
float floatPrio = (float(maxL - inPrio) * float(rangeNv)) / float(rangeL);
return PxThreadPriority::Enum(int(roundf(floatPrio)));
}
static int convertPriorityToLinux(PxThreadPriority::Enum inPrio, int policy)
{
int maxL = sched_get_priority_max(policy);
int minL = sched_get_priority_min(policy);
int rangeL = maxL - minL;
int rangeNv = PxThreadPriority::eLOW - PxThreadPriority::eHIGH;
// case for default scheduler policy
if(rangeL == 0)
return 0;
float floatPrio = (float(PxThreadPriority::eLOW - inPrio) * float(rangeL)) / float(rangeNv);
return minL + int(roundf(floatPrio));
}
#endif
void PxThreadImpl::setPriority(PxThreadPriority::Enum val)
{
PX_UNUSED(val);
#if !PX_APPLE_FAMILY
int policy;
sched_param s_param;
pthread_getschedparam(getThread(this)->thread, &policy, &s_param);
s_param.sched_priority = convertPriorityToLinux(val, policy);
pthread_setschedparam(getThread(this)->thread, policy, &s_param);
#endif
}
PxThreadPriority::Enum PxThreadImpl::getPriority(Id pthread)
{
PX_UNUSED(pthread);
#if !PX_APPLE_FAMILY
int policy;
sched_param s_param;
int ret = pthread_getschedparam(pthread_t(pthread), &policy, &s_param);
if(ret == 0)
return convertPriorityFromLinux(s_param.sched_priority, policy);
else
return PxThreadPriority::eNORMAL;
#else
return PxThreadPriority::eNORMAL;
#endif
}
uint32_t PxThreadImpl::getNbPhysicalCores()
{
#if PX_APPLE_FAMILY
int count;
size_t size = sizeof(count);
return sysctlbyname("hw.physicalcpu", &count, &size, NULL, 0) ? 0 : count;
#else
// Linux exposes CPU topology using /sys/devices/system/cpu
// https://www.kernel.org/doc/Documentation/cputopology.txt
if(FILE* f = fopen("/sys/devices/system/cpu/possible", "r"))
{
int minIndex, maxIndex;
int n = fscanf(f, "%d-%d", &minIndex, &maxIndex);
fclose(f);
if(n == 2)
return (maxIndex - minIndex) + 1;
else if(n == 1)
return minIndex + 1;
}
// For non-Linux kernels this fallback is possibly the best we can do
// but will report logical (hyper-threaded) counts
int n = sysconf(_SC_NPROCESSORS_CONF);
if(n < 0)
return 0;
else
return n;
#endif
}
PxU32 PxTlsAlloc()
{
pthread_key_t key;
int status = pthread_key_create(&key, NULL);
PX_ASSERT(!status);
PX_UNUSED(status);
return PxU32(key);
}
void PxTlsFree(PxU32 index)
{
int status = pthread_key_delete(pthread_key_t(index));
PX_ASSERT(!status);
PX_UNUSED(status);
}
void* PxTlsGet(PxU32 index)
{
return reinterpret_cast<void*>(pthread_getspecific(pthread_key_t(index)));
}
size_t PxTlsGetValue(PxU32 index)
{
return reinterpret_cast<size_t>(pthread_getspecific(pthread_key_t(index)));
}
PxU32 PxTlsSet(PxU32 index, void* value)
{
int status = pthread_setspecific(pthread_key_t(index), value);
PX_ASSERT(!status);
return !status;
}
PxU32 PxTlsSetValue(PxU32 index, size_t value)
{
int status = pthread_setspecific(pthread_key_t(index), reinterpret_cast<void*>(value));
PX_ASSERT(!status);
return !status;
}
// DM: On Linux x86-32, without implementation-specific restrictions
// the default stack size for a new thread should be 2 megabytes (kernel.org).
// NOTE: take care of this value on other architectures!
PxU32 PxThreadImpl::getDefaultStackSize()
{
return 1 << 21;
}
} // namespace physx

View File

@@ -0,0 +1,115 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxTime.h"
#include <time.h>
#include <sys/time.h>
#if PX_APPLE_FAMILY
#include <mach/mach_time.h>
#endif
// Use real-time high-precision timer.
#if !PX_APPLE_FAMILY
#define CLOCKID CLOCK_REALTIME
#endif
namespace physx
{
static const PxCounterFrequencyToTensOfNanos gCounterFreq = PxTime::getCounterFrequency();
const PxCounterFrequencyToTensOfNanos& PxTime::getBootCounterFrequency()
{
return gCounterFreq;
}
static PxTime::Second getTimeSeconds()
{
static struct timeval _tv;
gettimeofday(&_tv, NULL);
return double(_tv.tv_sec) + double(_tv.tv_usec) * 0.000001;
}
PxTime::PxTime()
{
mLastTime = getTimeSeconds();
}
PxTime::Second PxTime::getElapsedSeconds()
{
PxTime::Second curTime = getTimeSeconds();
PxTime::Second diff = curTime - mLastTime;
mLastTime = curTime;
return diff;
}
PxTime::Second PxTime::peekElapsedSeconds()
{
PxTime::Second curTime = getTimeSeconds();
PxTime::Second diff = curTime - mLastTime;
return diff;
}
PxTime::Second PxTime::getLastTime() const
{
return mLastTime;
}
#if PX_APPLE_FAMILY
PxCounterFrequencyToTensOfNanos PxTime::getCounterFrequency()
{
mach_timebase_info_data_t info;
mach_timebase_info(&info);
// mach_absolute_time * (info.numer/info.denom) is in units of nano seconds
return PxCounterFrequencyToTensOfNanos(info.numer, info.denom * 10);
}
uint64_t PxTime::getCurrentCounterValue()
{
return mach_absolute_time();
}
#else
PxCounterFrequencyToTensOfNanos PxTime::getCounterFrequency()
{
return PxCounterFrequencyToTensOfNanos(1, 10);
}
uint64_t PxTime::getCurrentCounterValue()
{
struct timespec mCurrTimeInt;
clock_gettime(CLOCKID, &mCurrTimeInt);
// Convert to nanos as this doesn't cause a large divide here
return (static_cast<uint64_t>(mCurrTimeInt.tv_sec) * 1000000000) + (static_cast<uint64_t>(mCurrTimeInt.tv_nsec));
}
#endif
} // namespace physx

View File

@@ -0,0 +1,172 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/windows/PxWindowsInclude.h"
#include "foundation/PxAtomic.h"
namespace physx
{
PxI32 PxAtomicExchange(volatile PxI32* val, PxI32 val2)
{
return (PxI32)InterlockedExchange((volatile LONG*)val, (LONG)val2);
}
PxI64 PxAtomicExchange(volatile PxI64* val, PxI64 val2)
{
return (PxI64)InterlockedExchange64((volatile LONG64*)val, (LONG64)val2);
}
PxI32 PxAtomicCompareExchange(volatile PxI32* dest, PxI32 exch, PxI32 comp)
{
return (PxI32)InterlockedCompareExchange((volatile LONG*)dest, exch, comp);
}
PxI64 PxAtomicCompareExchange(volatile PxI64* dest, PxI64 exch, PxI64 comp)
{
return (PxI64)InterlockedCompareExchange64((volatile LONG64*)dest, exch, comp);
}
void* PxAtomicCompareExchangePointer(volatile void** dest, void* exch, void* comp)
{
return InterlockedCompareExchangePointer((volatile PVOID*)dest, exch, comp);
}
PxI32 PxAtomicIncrement(volatile PxI32* val)
{
return (PxI32)InterlockedIncrement((volatile LONG*)val);
}
PxI64 PxAtomicIncrement(volatile PxI64* val)
{
return (PxI64)InterlockedIncrement64((volatile LONG64*)val);
}
PxI32 PxAtomicDecrement(volatile PxI32* val)
{
return (PxI32)InterlockedDecrement((volatile LONG*)val);
}
PxI64 PxAtomicDecrement(volatile PxI64* val)
{
return (PxI64)InterlockedDecrement64((volatile LONG64*)val);
}
PxI32 PxAtomicAdd(volatile PxI32* val, PxI32 delta)
{
if(1)
{
return (PxI32)InterlockedAdd((volatile LONG*)val, delta);
}
else
{
LONG newValue, oldValue;
do
{
oldValue = *val;
newValue = oldValue + delta;
} while(InterlockedCompareExchange((volatile LONG*)val, newValue, oldValue) != oldValue);
return newValue;
}
}
PxI64 PxAtomicAdd(volatile PxI64* val, PxI64 delta)
{
if(1)
{
return (PxI64)InterlockedAdd64((volatile LONG64*)val, delta);
}
else
{
LONG64 newValue, oldValue;
do
{
oldValue = *val;
newValue = oldValue + delta;
} while(InterlockedCompareExchange64((volatile LONG64*)val, newValue, oldValue) != oldValue);
return newValue;
}
}
PxI32 PxAtomicMax(volatile PxI32* val, PxI32 val2)
{
// Could do this more efficiently in asm...
LONG newValue, oldValue;
do
{
oldValue = *val;
newValue = val2 > oldValue ? val2 : oldValue;
} while(InterlockedCompareExchange((volatile LONG*)val, newValue, oldValue) != oldValue);
return newValue;
}
PxI64 PxAtomicMax(volatile PxI64* val, PxI64 val2)
{
// Could do this more efficiently in asm...
LONG64 newValue, oldValue;
do
{
oldValue = *val;
newValue = val2 > oldValue ? val2 : oldValue;
} while(InterlockedCompareExchange64((volatile LONG64*)val, newValue, oldValue) != oldValue);
return newValue;
}
PxI32 PxAtomicOr(volatile PxI32* val, PxI32 mask)
{
return (PxI32)InterlockedOr((volatile LONG*)val, mask);
}
PxI64 PxAtomicOr(volatile PxI64* val, PxI64 mask)
{
return (PxI64)InterlockedOr64((volatile LONG64*)val, mask);
}
PxI32 PxAtomicAnd(volatile PxI32* val, PxI32 mask)
{
return (PxI32)InterlockedAnd((volatile LONG*)val, mask);
}
PxI64 PxAtomicAnd(volatile PxI64* val, PxI64 mask)
{
return (PxI64)InterlockedAnd64((volatile LONG64*)val, mask);
}
} // namespace physx

View File

@@ -0,0 +1,88 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxFPU.h"
#include "float.h"
#include "foundation/PxIntrinsics.h"
#if PX_X64 || PX_ARM || PX_A64
#define _MCW_ALL _MCW_DN | _MCW_EM | _MCW_RC
#else
#define _MCW_ALL _MCW_DN | _MCW_EM | _MCW_IC | _MCW_RC | _MCW_PC
#endif
physx::PxFPUGuard::PxFPUGuard()
{
// default plus FTZ and DAZ
#if PX_X64 || PX_ARM || PX_A64
// query current control word state
_controlfp_s(mControlWords, 0, 0);
// set both x87 and sse units to default + DAZ
unsigned int cw;
_controlfp_s(&cw, _CW_DEFAULT | _DN_FLUSH, _MCW_ALL);
#else
// query current control word state
__control87_2(0, 0, mControlWords, mControlWords + 1);
// set both x87 and sse units to default + DAZ
unsigned int x87, sse;
__control87_2(_CW_DEFAULT | _DN_FLUSH, _MCW_ALL, &x87, &sse);
#endif
}
physx::PxFPUGuard::~PxFPUGuard()
{
_clearfp();
#if PX_X64 || PX_ARM || PX_A64
// reset FP state
unsigned int cw;
_controlfp_s(&cw, *mControlWords, _MCW_ALL);
#else
// reset FP state
unsigned int x87, sse;
__control87_2(mControlWords[0], _MCW_ALL, &x87, 0);
__control87_2(mControlWords[1], _MCW_ALL, 0, &sse);
#endif
}
void physx::PxEnableFPExceptions()
{
// clear any pending exceptions
_clearfp();
// enable all fp exceptions except inexact and underflow (common, benign)
_controlfp_s(NULL, uint32_t(~_MCW_EM) | _EM_INEXACT | _EM_UNDERFLOW, _MCW_EM);
}
void physx::PxDisableFPExceptions()
{
_controlfp_s(NULL, _MCW_EM, _MCW_EM);
}

View File

@@ -0,0 +1,154 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/windows/PxWindowsInclude.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxMutex.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxThread.h"
namespace physx
{
namespace
{
struct MutexWinImpl
{
CRITICAL_SECTION mLock;
PxThread::Id mOwner;
};
}
static PX_FORCE_INLINE MutexWinImpl* getMutex(PxMutexImpl* impl)
{
return reinterpret_cast<MutexWinImpl*>(impl);
}
PxMutexImpl::PxMutexImpl()
{
InitializeCriticalSection(&getMutex(this)->mLock);
getMutex(this)->mOwner = 0;
}
PxMutexImpl::~PxMutexImpl()
{
DeleteCriticalSection(&getMutex(this)->mLock);
}
void PxMutexImpl::lock()
{
EnterCriticalSection(&getMutex(this)->mLock);
#if PX_DEBUG
getMutex(this)->mOwner = PxThread::getId();
#endif
}
bool PxMutexImpl::trylock()
{
bool success = TryEnterCriticalSection(&getMutex(this)->mLock) != 0;
#if PX_DEBUG
if(success)
getMutex(this)->mOwner = PxThread::getId();
#endif
return success;
}
void PxMutexImpl::unlock()
{
#if PX_DEBUG
// ensure we are already holding the lock
if(getMutex(this)->mOwner != PxThread::getId())
{
PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Mutex must be unlocked only by thread that has already acquired lock");
return;
}
#endif
LeaveCriticalSection(&getMutex(this)->mLock);
}
uint32_t PxMutexImpl::getSize()
{
return sizeof(MutexWinImpl);
}
class ReadWriteLockImpl
{
PX_NOCOPY(ReadWriteLockImpl)
public:
ReadWriteLockImpl()
{
}
PxMutex mutex;
volatile LONG readerCount; // handle recursive writer locking
};
PxReadWriteLock::PxReadWriteLock()
{
mImpl = reinterpret_cast<ReadWriteLockImpl*>(PX_ALLOC(sizeof(ReadWriteLockImpl), "ReadWriteLockImpl"));
PX_PLACEMENT_NEW(mImpl, ReadWriteLockImpl);
mImpl->readerCount = 0;
}
PxReadWriteLock::~PxReadWriteLock()
{
mImpl->~ReadWriteLockImpl();
PX_FREE(mImpl);
}
void PxReadWriteLock::lockReader(bool takeLock)
{
if(takeLock)
mImpl->mutex.lock();
InterlockedIncrement(&mImpl->readerCount);
if(takeLock)
mImpl->mutex.unlock();
}
void PxReadWriteLock::lockWriter()
{
mImpl->mutex.lock();
// spin lock until no readers
while(mImpl->readerCount);
}
void PxReadWriteLock::unlockReader()
{
InterlockedDecrement(&mImpl->readerCount);
}
void PxReadWriteLock::unlockWriter()
{
mImpl->mutex.unlock();
}
} // namespace physx

View File

@@ -0,0 +1,42 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxString.h"
#include <stdio.h>
#include "foundation/windows/PxWindowsInclude.h"
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
void physx::PxPrintString(const char* str)
{
puts(str); // do not use printf here, since str can contain multiple % signs that will not be printed
OutputDebugStringA(str);
OutputDebugStringA("\n");
}

View File

@@ -0,0 +1,68 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/windows/PxWindowsInclude.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxSList.h"
using namespace physx;
template <typename T>
static PX_FORCE_INLINE SLIST_HEADER* getDetail(T* impl)
{
return reinterpret_cast<SLIST_HEADER*>(impl);
}
PxSListImpl::PxSListImpl()
{
InitializeSListHead(getDetail(this));
}
PxSListImpl::~PxSListImpl()
{
}
void PxSListImpl::push(PxSListEntry* entry)
{
InterlockedPushEntrySList(getDetail(this), reinterpret_cast<SLIST_ENTRY*>(entry));
}
PxSListEntry* PxSListImpl::pop()
{
return reinterpret_cast<PxSListEntry*>(InterlockedPopEntrySList(getDetail(this)));
}
PxSListEntry* PxSListImpl::flush()
{
return reinterpret_cast<PxSListEntry*>(InterlockedFlushSList(getDetail(this)));
}
uint32_t PxSListImpl::getSize()
{
return sizeof(SLIST_HEADER);
}

View File

@@ -0,0 +1,438 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMathIntrinsics.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "foundation/PxSocket.h"
#include "foundation/PxThread.h"
#include "foundation/PxArray.h"
#include <Winsock2.h>
#pragma comment(lib, "Ws2_32")
namespace physx
{
const uint32_t PxSocket::DEFAULT_BUFFER_SIZE = 32768;
class SocketImpl
{
public:
SocketImpl(bool isBlocking);
virtual ~SocketImpl();
bool connect(const char* host, uint16_t port, uint32_t timeout);
bool listen(uint16_t port);
bool accept(bool block);
void disconnect();
void setBlocking(bool blocking);
virtual uint32_t write(const uint8_t* data, uint32_t length);
virtual bool flush();
uint32_t read(uint8_t* data, uint32_t length);
PX_FORCE_INLINE bool isBlocking() const
{
return mIsBlocking;
}
PX_FORCE_INLINE bool isConnected() const
{
return mIsConnected;
}
PX_FORCE_INLINE const char* getHost() const
{
return mHost;
}
PX_FORCE_INLINE uint16_t getPort() const
{
return mPort;
}
protected:
bool nonBlockingTimeout() const;
void setBlockingInternal(SOCKET socket, bool blocking);
mutable SOCKET mSocket;
SOCKET mListenSocket;
const char* mHost;
uint16_t mPort;
mutable bool mIsConnected;
bool mIsBlocking;
bool mListenMode;
bool mSocketLayerIntialized;
};
SocketImpl::SocketImpl(bool isBlocking)
: mSocket(INVALID_SOCKET)
, mListenSocket(INVALID_SOCKET)
, mPort(0)
, mHost(NULL)
, mIsConnected(false)
, mIsBlocking(isBlocking)
, mListenMode(false)
, mSocketLayerIntialized(false)
{
WORD vreq;
WSADATA wsaData;
vreq = MAKEWORD(2, 2);
mSocketLayerIntialized = (WSAStartup(vreq, &wsaData) == 0);
}
SocketImpl::~SocketImpl()
{
if(mSocketLayerIntialized)
WSACleanup();
}
void SocketImpl::setBlockingInternal(SOCKET socket, bool blocking)
{
uint32_t mode = uint32_t(blocking ? 0 : 1);
ioctlsocket(socket, FIONBIO, (u_long*)&mode);
}
bool SocketImpl::connect(const char* host, uint16_t port, uint32_t timeout)
{
if(!mSocketLayerIntialized)
return false;
sockaddr_in socketAddress;
hostent* hp;
intrinsics::memSet(&socketAddress, 0, sizeof(sockaddr_in));
socketAddress.sin_family = AF_INET;
socketAddress.sin_port = htons(port);
// get host
hp = gethostbyname(host);
if(!hp)
{
in_addr a;
a.s_addr = inet_addr(host);
hp = gethostbyaddr((const char*)&a, sizeof(in_addr), AF_INET);
if(!hp)
return false;
}
intrinsics::memCopy(&socketAddress.sin_addr, hp->h_addr_list[0], (uint32_t)hp->h_length);
// connect
mSocket = socket(PF_INET, SOCK_STREAM, 0);
if(mSocket == INVALID_SOCKET)
return false;
setBlockingInternal(mSocket, false);
::connect(mSocket, (sockaddr*)&socketAddress, sizeof(socketAddress));
// Setup poll function call to monitor the connect call.
// By querying for POLLOUT we're checking if the socket is
// ready for writing.
WSAPOLLFD pfd;
pfd.fd = mSocket;
pfd.events = POLLOUT;
const int pollResult = WSAPoll(&pfd, 1, timeout /*milliseconds*/);
const bool pollTimeout = (pollResult == 0);
const bool pollError = (pollResult == SOCKET_ERROR); // an error inside poll happened. Can check error with `WSAGetLastError`.
if(pollTimeout || pollError)
{
disconnect();
return false;
}
else
{
PX_ASSERT(pollResult == 1);
// check that event was precisely POLLOUT and not anything else (e.g., errors, hang-up)
bool test = (pfd.revents & POLLOUT) && !(pfd.revents & (~POLLOUT));
if(!test)
{
disconnect();
return false;
}
}
setBlockingInternal(mSocket, mIsBlocking);
mIsConnected = true;
mPort = port;
mHost = host;
return true;
}
bool SocketImpl::listen(uint16_t port)
{
if(!mSocketLayerIntialized)
return false;
mListenSocket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
if(mListenSocket == INVALID_SOCKET)
return false;
mListenMode = true;
sockaddr_in addr = { 0 };
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = htonl(INADDR_ANY);
return bind(mListenSocket, (sockaddr*)&addr, sizeof(addr)) == 0 && ::listen(mListenSocket, SOMAXCONN) == 0;
}
bool SocketImpl::accept(bool block)
{
if(mIsConnected || !mListenMode)
return false;
// set the listen socket to be non-blocking.
setBlockingInternal(mListenSocket, block);
SOCKET clientSocket = ::accept(mListenSocket, 0, 0);
if(clientSocket == INVALID_SOCKET)
return false;
mSocket = clientSocket;
mIsConnected = true;
setBlockingInternal(mSocket, mIsBlocking); // force the mode to whatever the user set
return mIsConnected;
}
void SocketImpl::disconnect()
{
if(mListenSocket != INVALID_SOCKET)
{
closesocket(mListenSocket);
mListenSocket = INVALID_SOCKET;
}
if(mSocket != INVALID_SOCKET)
{
WSASendDisconnect(mSocket, NULL);
closesocket(mSocket);
mSocket = INVALID_SOCKET;
}
mIsConnected = false;
mListenMode = false;
mPort = 0;
mHost = NULL;
}
bool SocketImpl::nonBlockingTimeout() const
{
return !mIsBlocking && WSAGetLastError() == WSAEWOULDBLOCK;
}
// should be cross-platform from here down
void SocketImpl::setBlocking(bool blocking)
{
if(blocking != mIsBlocking)
{
mIsBlocking = blocking;
if(isConnected())
setBlockingInternal(mSocket, blocking);
}
}
bool SocketImpl::flush()
{
return true;
}
uint32_t SocketImpl::write(const uint8_t* data, uint32_t length)
{
if(length == 0)
return 0;
int sent = send(mSocket, (const char*)data, (int32_t)length, 0);
if(sent <= 0 && !nonBlockingTimeout())
disconnect();
return uint32_t(sent > 0 ? sent : 0);
}
uint32_t SocketImpl::read(uint8_t* data, uint32_t length)
{
if(length == 0)
return 0;
int32_t received = recv(mSocket, (char*)data, (int32_t)length, 0);
if(received <= 0 && !nonBlockingTimeout())
disconnect();
return uint32_t(received > 0 ? received : 0);
}
class BufferedSocketImpl : public SocketImpl
{
public:
BufferedSocketImpl(bool isBlocking) : SocketImpl(isBlocking), mBufferPos(0)
{
}
virtual ~BufferedSocketImpl()
{
}
bool flush();
uint32_t write(const uint8_t* data, uint32_t length);
private:
uint32_t mBufferPos;
uint8_t mBuffer[PxSocket::DEFAULT_BUFFER_SIZE];
};
bool BufferedSocketImpl::flush()
{
uint32_t totalBytesWritten = 0;
while(totalBytesWritten < mBufferPos && mIsConnected)
totalBytesWritten += (int32_t)SocketImpl::write(mBuffer + totalBytesWritten, mBufferPos - totalBytesWritten);
bool ret = (totalBytesWritten == mBufferPos);
mBufferPos = 0;
return ret;
}
uint32_t BufferedSocketImpl::write(const uint8_t* data, uint32_t length)
{
uint32_t bytesWritten = 0;
while(mBufferPos + length >= PxSocket::DEFAULT_BUFFER_SIZE)
{
uint32_t currentChunk = PxSocket::DEFAULT_BUFFER_SIZE - mBufferPos;
intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, currentChunk);
bytesWritten += (uint32_t)currentChunk; // for the user, this is consumed even if we fail to shove it down a
// non-blocking socket
uint32_t sent = SocketImpl::write(mBuffer, PxSocket::DEFAULT_BUFFER_SIZE);
mBufferPos = PxSocket::DEFAULT_BUFFER_SIZE - sent;
if(sent < PxSocket::DEFAULT_BUFFER_SIZE) // non-blocking or error
{
if(sent) // we can reasonably hope this is rare
intrinsics::memMove(mBuffer, mBuffer + sent, mBufferPos);
return bytesWritten;
}
length -= currentChunk;
}
if(length > 0)
{
intrinsics::memCopy(mBuffer + mBufferPos, data + bytesWritten, length);
bytesWritten += length;
mBufferPos += length;
}
return bytesWritten;
}
PxSocket::PxSocket(bool inIsBuffering, bool isBlocking)
{
if(inIsBuffering)
{
void* mem = PX_ALLOC(sizeof(BufferedSocketImpl), "BufferedSocketImpl");
mImpl = PX_PLACEMENT_NEW(mem, BufferedSocketImpl)(isBlocking);
}
else
{
void* mem = PX_ALLOC(sizeof(SocketImpl), "SocketImpl");
mImpl = PX_PLACEMENT_NEW(mem, SocketImpl)(isBlocking);
}
}
PxSocket::~PxSocket()
{
mImpl->flush();
mImpl->disconnect();
mImpl->~SocketImpl();
PX_FREE(mImpl);
}
bool PxSocket::connect(const char* host, uint16_t port, uint32_t timeout)
{
return mImpl->connect(host, port, timeout);
}
bool PxSocket::listen(uint16_t port)
{
return mImpl->listen(port);
}
bool PxSocket::accept(bool block)
{
return mImpl->accept(block);
}
void PxSocket::disconnect()
{
mImpl->disconnect();
}
bool PxSocket::isConnected() const
{
return mImpl->isConnected();
}
const char* PxSocket::getHost() const
{
return mImpl->getHost();
}
uint16_t PxSocket::getPort() const
{
return mImpl->getPort();
}
bool PxSocket::flush()
{
if(!mImpl->isConnected())
return false;
return mImpl->flush();
}
uint32_t PxSocket::write(const uint8_t* data, uint32_t length)
{
if(!mImpl->isConnected())
return 0;
return mImpl->write(data, length);
}
uint32_t PxSocket::read(uint8_t* data, uint32_t length)
{
if(!mImpl->isConnected())
return 0;
return mImpl->read(data, length);
}
void PxSocket::setBlocking(bool blocking)
{
mImpl->setBlocking(blocking);
}
bool PxSocket::isBlocking() const
{
return mImpl->isBlocking();
}
} // namespace physx

View File

@@ -0,0 +1,72 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/windows/PxWindowsInclude.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxSync.h"
using namespace physx;
static PX_FORCE_INLINE HANDLE& getSync(PxSyncImpl* impl)
{
return *reinterpret_cast<HANDLE*>(impl);
}
uint32_t PxSyncImpl::getSize()
{
return sizeof(HANDLE);
}
PxSyncImpl::PxSyncImpl()
{
getSync(this) = CreateEvent(0, true, false, 0);
}
PxSyncImpl::~PxSyncImpl()
{
CloseHandle(getSync(this));
}
void PxSyncImpl::reset()
{
ResetEvent(getSync(this));
}
void PxSyncImpl::set()
{
SetEvent(getSync(this));
}
bool PxSyncImpl::wait(uint32_t milliseconds)
{
if(milliseconds == -1)
milliseconds = INFINITE;
return WaitForSingleObject(getSync(this), milliseconds) == WAIT_OBJECT_0;
}

View File

@@ -0,0 +1,421 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/windows/PxWindowsInclude.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxAssert.h"
#include "foundation/PxThread.h"
#include "foundation/PxAlloca.h"
// an exception for setting the thread name in Microsoft debuggers
#define NS_MS_VC_EXCEPTION 0x406D1388
namespace physx
{
namespace
{
#if PX_VC
#pragma warning(disable : 4061) // enumerator 'identifier' in switch of enum 'enumeration' is not handled
#pragma warning(disable : 4191) //'operator/operation' : unsafe conversion from 'type of expression' to 'type required'
#endif
// struct for naming a thread in the debugger
#pragma pack(push, 8)
typedef struct tagTHREADNAME_INFO
{
DWORD dwType; // Must be 0x1000.
LPCSTR szName; // Pointer to name (in user addr space).
DWORD dwThreadID; // Thread ID (-1=caller thread).
DWORD dwFlags; // Reserved for future use, must be zero.
} THREADNAME_INFO;
#pragma pack(pop)
class ThreadImpl
{
public:
enum State
{
NotStarted,
Started,
Stopped
};
HANDLE thread;
LONG quitNow; // Should be 32bit aligned on SMP systems.
State state;
DWORD threadID;
PxThreadImpl::ExecuteFn fn;
void* arg;
uint32_t affinityMask;
const char* name;
};
static PX_FORCE_INLINE ThreadImpl* getThread(PxThreadImpl* impl)
{
return reinterpret_cast<ThreadImpl*>(impl);
}
static DWORD WINAPI PxThreadStart(LPVOID arg)
{
ThreadImpl* impl = getThread((PxThreadImpl*)arg);
// run either the passed in function or execute from the derived class (Runnable).
if(impl->fn)
(*impl->fn)(impl->arg);
else if(impl->arg)
((PxRunnable*)impl->arg)->execute();
return 0;
}
// cache physical thread count
static uint32_t gPhysicalCoreCount = 0;
}
uint32_t PxThreadImpl::getSize()
{
return sizeof(ThreadImpl);
}
PxThreadImpl::Id PxThreadImpl::getId()
{
return static_cast<Id>(GetCurrentThreadId());
}
// fwd GetLogicalProcessorInformation()
typedef BOOL(WINAPI* LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
uint32_t PxThreadImpl::getNbPhysicalCores()
{
if(!gPhysicalCoreCount)
{
// modified example code from: http://msdn.microsoft.com/en-us/library/ms683194
LPFN_GLPI glpi;
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = NULL;
DWORD returnLength = 0;
DWORD processorCoreCount = 0;
DWORD byteOffset = 0;
glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")), "GetLogicalProcessorInformation");
if(NULL == glpi)
{
// GetLogicalProcessorInformation not supported on OS < XP Service Pack 3
return 0;
}
DWORD rc = (DWORD)glpi(NULL, &returnLength);
PX_ASSERT(rc == FALSE);
PX_UNUSED(rc);
// first query reports required buffer space
if(GetLastError() == ERROR_INSUFFICIENT_BUFFER)
{
buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)PxAlloca(returnLength);
}
else
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Error querying buffer size for number of physical processors");
return 0;
}
// retrieve data
rc = (DWORD)glpi(buffer, &returnLength);
if(rc != TRUE)
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Error querying number of physical processors");
return 0;
}
ptr = buffer;
while(byteOffset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= returnLength)
{
switch(ptr->Relationship)
{
case RelationProcessorCore:
processorCoreCount++;
break;
default:
break;
}
byteOffset += sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
ptr++;
}
gPhysicalCoreCount = processorCoreCount;
}
return gPhysicalCoreCount;
}
PxThreadImpl::PxThreadImpl()
{
getThread(this)->thread = NULL;
getThread(this)->state = ThreadImpl::NotStarted;
getThread(this)->quitNow = 0;
getThread(this)->fn = NULL;
getThread(this)->arg = NULL;
getThread(this)->affinityMask = 0;
getThread(this)->name = NULL;
}
PxThreadImpl::PxThreadImpl(ExecuteFn fn, void* arg, const char* name)
{
getThread(this)->thread = NULL;
getThread(this)->state = ThreadImpl::NotStarted;
getThread(this)->quitNow = 0;
getThread(this)->fn = fn;
getThread(this)->arg = arg;
getThread(this)->affinityMask = 0;
getThread(this)->name = name;
start(0, NULL);
}
PxThreadImpl::~PxThreadImpl()
{
if(getThread(this)->state == ThreadImpl::Started)
kill();
CloseHandle(getThread(this)->thread);
}
void PxThreadImpl::start(uint32_t stackSize, PxRunnable* runnable)
{
if(getThread(this)->state != ThreadImpl::NotStarted)
return;
getThread(this)->state = ThreadImpl::Started;
if(runnable && !getThread(this)->arg && !getThread(this)->fn)
getThread(this)->arg = runnable;
getThread(this)->thread =
CreateThread(NULL, stackSize, PxThreadStart, (LPVOID) this, CREATE_SUSPENDED, &getThread(this)->threadID);
if(!getThread(this)->thread)
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "FdWindowsThread::start: Failed to create thread.");
getThread(this)->state = ThreadImpl::NotStarted;
return;
}
// set affinity, set name and resume
if(getThread(this)->affinityMask)
setAffinityMask(getThread(this)->affinityMask);
if (getThread(this)->name)
setName(getThread(this)->name);
DWORD rc = ResumeThread(getThread(this)->thread);
if(rc == DWORD(-1))
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "FdWindowsThread::start: Failed to resume thread.");
getThread(this)->state = ThreadImpl::NotStarted;
return;
}
}
void PxThreadImpl::signalQuit()
{
InterlockedIncrement(&(getThread(this)->quitNow));
}
bool PxThreadImpl::waitForQuit()
{
if(getThread(this)->state == ThreadImpl::NotStarted)
return false;
WaitForSingleObject(getThread(this)->thread, INFINITE);
getThread(this)->state = ThreadImpl::Stopped;
return true;
}
bool PxThreadImpl::quitIsSignalled()
{
return InterlockedCompareExchange(&(getThread(this)->quitNow), 0, 0) != 0;
}
void PxThreadImpl::quit()
{
getThread(this)->state = ThreadImpl::Stopped;
ExitThread(0);
}
void PxThreadImpl::kill()
{
if(getThread(this)->state == ThreadImpl::Started)
TerminateThread(getThread(this)->thread, 0);
getThread(this)->state = ThreadImpl::Stopped;
}
void PxThreadImpl::sleep(uint32_t ms)
{
Sleep(ms);
}
void PxThreadImpl::yield()
{
SwitchToThread();
}
void PxThreadImpl::yieldProcessor()
{
YieldProcessor();
}
uint32_t PxThreadImpl::setAffinityMask(uint32_t mask)
{
if(mask)
{
// store affinity
getThread(this)->affinityMask = mask;
// if thread already started apply immediately
if(getThread(this)->state == ThreadImpl::Started)
{
uint32_t err = uint32_t(SetThreadAffinityMask(getThread(this)->thread, mask));
return err;
}
}
return 0;
}
void PxThreadImpl::setName(const char* name)
{
getThread(this)->name = name;
if (getThread(this)->state == ThreadImpl::Started)
{
THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = name;
info.dwThreadID = getThread(this)->threadID;
info.dwFlags = 0;
// C++ Exceptions are disabled for this project, but SEH is not (and cannot be)
// http://stackoverflow.com/questions/943087/what-exactly-will-happen-if-i-disable-c-exceptions-in-a-project
__try
{
RaiseException(NS_MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
// this runs if not attached to a debugger (thus not really naming the thread)
}
}
}
void PxThreadImpl::setPriority(PxThreadPriority::Enum prio)
{
BOOL rc = false;
switch(prio)
{
case PxThreadPriority::eHIGH:
rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_HIGHEST);
break;
case PxThreadPriority::eABOVE_NORMAL:
rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_ABOVE_NORMAL);
break;
case PxThreadPriority::eNORMAL:
rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_NORMAL);
break;
case PxThreadPriority::eBELOW_NORMAL:
rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_BELOW_NORMAL);
break;
case PxThreadPriority::eLOW:
rc = SetThreadPriority(getThread(this)->thread, THREAD_PRIORITY_LOWEST);
break;
default:
break;
}
if(!rc)
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "FdWindowsThread::setPriority: Failed to set thread priority.");
}
}
PxThreadPriority::Enum PxThreadImpl::getPriority(Id threadId)
{
PxThreadPriority::Enum retval = PxThreadPriority::eLOW;
int priority = GetThreadPriority((HANDLE)threadId);
PX_COMPILE_TIME_ASSERT(THREAD_PRIORITY_HIGHEST > THREAD_PRIORITY_ABOVE_NORMAL);
if(priority >= THREAD_PRIORITY_HIGHEST)
retval = PxThreadPriority::eHIGH;
else if(priority >= THREAD_PRIORITY_ABOVE_NORMAL)
retval = PxThreadPriority::eABOVE_NORMAL;
else if(priority >= THREAD_PRIORITY_NORMAL)
retval = PxThreadPriority::eNORMAL;
else if(priority >= THREAD_PRIORITY_BELOW_NORMAL)
retval = PxThreadPriority::eBELOW_NORMAL;
return retval;
}
PxU32 PxTlsAlloc()
{
DWORD rv = ::TlsAlloc();
PX_ASSERT(rv != TLS_OUT_OF_INDEXES);
return (PxU32)rv;
}
void PxTlsFree(PxU32 index)
{
::TlsFree(index);
}
void* PxTlsGet(PxU32 index)
{
return ::TlsGetValue(index);
}
size_t PxTlsGetValue(PxU32 index)
{
return size_t(::TlsGetValue(index));
}
PxU32 PxTlsSet(PxU32 index, void* value)
{
return PxU32(::TlsSetValue(index, value));
}
PxU32 PxTlsSetValue(PxU32 index, size_t value)
{
return PxU32(::TlsSetValue(index, reinterpret_cast<void*>(value)));
}
PxU32 PxThreadImpl::getDefaultStackSize()
{
return 1048576;
};
} // namespace physx

View File

@@ -0,0 +1,92 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxTime.h"
#include "foundation/windows/PxWindowsInclude.h"
using namespace physx;
static int64_t getTimeTicks()
{
LARGE_INTEGER a;
QueryPerformanceCounter(&a);
return a.QuadPart;
}
static double getTickDuration()
{
LARGE_INTEGER a;
QueryPerformanceFrequency(&a);
return 1.0f / double(a.QuadPart);
}
static double sTickDuration = getTickDuration();
static const PxCounterFrequencyToTensOfNanos gCounterFreq = PxTime::getCounterFrequency();
const PxCounterFrequencyToTensOfNanos& PxTime::getBootCounterFrequency()
{
return gCounterFreq;
}
PxCounterFrequencyToTensOfNanos PxTime::getCounterFrequency()
{
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
return PxCounterFrequencyToTensOfNanos(PxTime::sNumTensOfNanoSecondsInASecond, (uint64_t)freq.QuadPart);
}
uint64_t PxTime::getCurrentCounterValue()
{
LARGE_INTEGER ticks;
QueryPerformanceCounter(&ticks);
return (uint64_t)ticks.QuadPart;
}
PxTime::PxTime() : mTickCount(0)
{
getElapsedSeconds();
}
PxTime::Second PxTime::getElapsedSeconds()
{
int64_t lastTickCount = mTickCount;
mTickCount = getTimeTicks();
return (mTickCount - lastTickCount) * sTickDuration;
}
PxTime::Second PxTime::peekElapsedSeconds()
{
return (getTimeTicks() - mTickCount) * sTickDuration;
}
PxTime::Second PxTime::getLastTime() const
{
return mTickCount * sTickDuration;
}

View File

@@ -0,0 +1,84 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_ACTOR_SHAPE_MAP_H
#define GU_ACTOR_SHAPE_MAP_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxHashMap.h"
namespace physx
{
namespace Gu
{
typedef PxU64 ActorShapeData;
#define PX_INVALID_INDEX 0xffffffff
class ActorShapeMap
{
public:
PX_PHYSX_COMMON_API ActorShapeMap();
PX_PHYSX_COMMON_API ~ActorShapeMap();
PX_PHYSX_COMMON_API bool add(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData actorShapeData);
PX_PHYSX_COMMON_API bool remove(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData* removed);
PX_PHYSX_COMMON_API ActorShapeData find(PxU32 actorIndex, const void* actor, const void* shape) const;
struct ActorShape
{
PX_FORCE_INLINE ActorShape() {}
PX_FORCE_INLINE ActorShape(const void* actor, const void* shape) : mActor(actor), mShape(shape) {}
const void* mActor;
const void* mShape;
PX_FORCE_INLINE bool operator==(const ActorShape& p) const
{
return mActor == p.mActor && mShape == p.mShape;
}
};
private:
PxHashMap<ActorShape, ActorShapeData> mDatabase;
struct Cache
{
// const void* mActor;
const void* mShape;
ActorShapeData mData;
};
PxU32 mCacheSize;
Cache* mCache;
void resizeCache(PxU32 index);
};
}
}
#endif

View File

@@ -0,0 +1,203 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BOUNDS_H
#define GU_BOUNDS_H
#include "foundation/PxBounds3.h"
#include "foundation/PxFlags.h"
#include "foundation/PxVecMath.h"
#include "geometry/PxGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include <stddef.h>
#include "GuBox.h"
#include "GuCenterExtents.h"
#include "GuSphere.h"
#include "GuCapsule.h"
// PT: the PX_MAX_BOUNDS_EXTENTS value is too large and produces INF floats when the box values are squared in
// some collision routines. Thus, for the SQ subsystem we use this alternative (smaller) value to mark empty bounds.
// See PX-954 for details.
#define GU_EMPTY_BOUNDS_EXTENTS PxSqrt(0.25f * 1e33f)
namespace physx
{
class PxMeshScale;
namespace Gu
{
PX_FORCE_INLINE void computeCapsuleBounds(PxBounds3& bounds, const PxCapsuleGeometry& capsuleGeom, const PxTransform& pose, float contactOffset=0.0f, float inflation=1.0f)
{
const PxVec3 d = pose.q.getBasisVector0();
PxVec3 extents;
for(PxU32 ax = 0; ax<3; ax++)
extents[ax] = (PxAbs(d[ax]) * capsuleGeom.halfHeight + capsuleGeom.radius + contactOffset)*inflation;
bounds.minimum = pose.p - extents;
bounds.maximum = pose.p + extents;
}
//'contactOffset' and 'inflation' should not be used at the same time, i.e. either contactOffset==0.0f, or inflation==1.0f
PX_PHYSX_COMMON_API void computeBounds(PxBounds3& bounds, const PxGeometry& geometry, const PxTransform& transform, float contactOffset, float inflation); //AABB in world space.
PX_FORCE_INLINE PxBounds3 computeBounds(const PxGeometry& geometry, const PxTransform& pose)
{
PxBounds3 bounds;
computeBounds(bounds, geometry, pose, 0.0f, 1.0f);
return bounds;
}
void computeGlobalBox(PxBounds3& bounds, PxU32 nbPrims, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT primitives);
PX_PHYSX_COMMON_API void computeBoundsAroundVertices(PxBounds3& bounds, PxU32 nbVerts, const PxVec3* PX_RESTRICT verts);
PX_PHYSX_COMMON_API void computeTightBounds(PxBounds3& bounds, PxU32 nbVerts, const PxVec3* PX_RESTRICT verts, const PxTransform& pose, const PxMeshScale& scale, float contactOffset, float inflation);
PX_PHYSX_COMMON_API void computeLocalBoundsAndGeomEpsilon(const PxVec3* vertices, PxU32 nbVerties, PxBounds3& localBounds, PxReal& geomEpsilon);
#define StoreBounds(bounds, minV, maxV) \
V4StoreU(minV, &bounds.minimum.x); \
PX_ALIGN(16, PxVec4) max4; \
V4StoreA(maxV, &max4.x); \
bounds.maximum = PxVec3(max4.x, max4.y, max4.z);
// PT: TODO: - refactor with "inflateBounds" in GuBounds.cpp if possible
template<const bool useSIMD>
PX_FORCE_INLINE void inflateBounds(PxBounds3& dst, const PxBounds3& src, float enlargement)
{
const float coeff = 0.5f * enlargement;
if(useSIMD)
{
using namespace physx::aos;
Vec4V minV = V4LoadU(&src.minimum.x);
Vec4V maxV = V4LoadU(&src.maximum.x);
const Vec4V eV = V4Scale(V4Sub(maxV, minV), FLoad(coeff));
minV = V4Sub(minV, eV);
maxV = V4Add(maxV, eV);
StoreBounds(dst, minV, maxV);
}
else
{
// PT: this clumsy but necessary second codepath is used to read the last bound of the array
// (making sure we don't V4LoadU invalid memory). Implementation must stay in sync with the
// main codepath above. No, this is not very nice.
const PxVec3& minV = src.minimum;
const PxVec3& maxV = src.maximum;
const PxVec3 eV = (maxV - minV) * coeff;
dst.minimum = minV - eV;
dst.maximum = maxV + eV;
}
}
class ShapeData
{
public:
PX_PHYSX_COMMON_API ShapeData(const PxGeometry& g, const PxTransform& t, PxReal inflation);
// PT: used by overlaps (box, capsule, convex)
PX_FORCE_INLINE const PxVec3& getPrunerBoxGeomExtentsInflated() const { return mPrunerBoxGeomExtents; }
// PT: used by overlaps (box, capsule, convex)
PX_FORCE_INLINE const PxVec3& getPrunerWorldPos() const { return mGuBox.center; }
PX_FORCE_INLINE const PxBounds3& getPrunerInflatedWorldAABB() const { return mPrunerInflatedAABB; }
// PT: used by overlaps (box, capsule, convex)
PX_FORCE_INLINE const PxMat33& getPrunerWorldRot33() const { return mGuBox.rot; }
// PT: this one only used by overlaps so far (for sphere shape, pruner level)
PX_FORCE_INLINE const Gu::Sphere& getGuSphere() const
{
PX_ASSERT(mType == PxGeometryType::eSPHERE);
return reinterpret_cast<const Gu::Sphere&>(mGuSphere);
}
// PT: this one only used by sweeps so far (for box shape, NP level)
PX_FORCE_INLINE const Gu::Box& getGuBox() const
{
PX_ASSERT(mType == PxGeometryType::eBOX);
return mGuBox;
}
// PT: this one used by sweeps (NP level) and overlaps (pruner level) - for capsule shape
PX_FORCE_INLINE const Gu::Capsule& getGuCapsule() const
{
PX_ASSERT(mType == PxGeometryType::eCAPSULE);
return reinterpret_cast<const Gu::Capsule&>(mGuCapsule);
}
PX_FORCE_INLINE float getCapsuleHalfHeight() const
{
PX_ASSERT(mType == PxGeometryType::eCAPSULE);
return mGuBox.extents.x;
}
PX_FORCE_INLINE PxU32 isOBB() const { return PxU32(mIsOBB); }
PX_FORCE_INLINE PxGeometryType::Enum getType() const { return PxGeometryType::Enum(mType); }
PX_NOCOPY(ShapeData)
private:
// PT: box: pre-inflated box extents
// capsule: pre-inflated extents of box-around-capsule
// convex: pre-inflated extents of box-around-convex
// sphere: not used
PxVec3 mPrunerBoxGeomExtents; // used for pruners. This volume encloses but can differ from the original shape
// PT:
//
// box center = unchanged copy of initial shape's position, except for convex (position of box around convex)
// SIMD code will load it as a V4 (safe because member is not last of Gu structure)
//
// box rot = precomputed PxMat33 version of initial shape's rotation, except for convex (rotation of box around convex)
// SIMD code will load it as V4s (safe because member is not last of Gu structure)
//
// box extents = non-inflated initial box extents for box shape, half-height for capsule, otherwise not used
Gu::Box mGuBox;
PxBounds3 mPrunerInflatedAABB; // precomputed AABB for the pruner shape
PxU16 mIsOBB; // true for OBB, false for AABB. Also used as padding for mPrunerInflatedAABB, don't move.
PxU16 mType; // shape's type
// these union Gu shapes are only precomputed for narrow phase (not pruners), can be different from mPrunerVolume
// so need separate storage
union
{
PxU8 mGuCapsule[sizeof(Gu::Capsule)]; // 28
PxU8 mGuSphere[sizeof(Gu::Sphere)]; // 16
};
};
// PT: please make sure it fits in "one" cache line
PX_COMPILE_TIME_ASSERT(sizeof(ShapeData)==128);
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,219 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BOX_H
#define GU_BOX_H
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
class Capsule;
PX_PHYSX_COMMON_API void computeOBBPoints(PxVec3* PX_RESTRICT pts, const PxVec3& center, const PxVec3& extents, const PxVec3& base0, const PxVec3& base1, const PxVec3& base2);
/**
\brief Represents an oriented bounding box.
As a center point, extents(radii) and a rotation. i.e. the center of the box is at the center point,
the box is rotated around this point with the rotation and it is 2*extents in width, height and depth.
*/
/**
Box geometry
The rot member describes the world space orientation of the box.
The center member gives the world space position of the box.
The extents give the local space coordinates of the box corner in the positive octant.
Dimensions of the box are: 2*extent.
Transformation to world space is: worldPoint = rot * localPoint + center
Transformation to local space is: localPoint = T(rot) * (worldPoint - center)
Where T(M) denotes the transpose of M.
*/
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
class PX_PHYSX_COMMON_API Box
{
public:
/**
\brief Constructor
*/
PX_FORCE_INLINE Box()
{
}
/**
\brief Constructor
\param origin Center of the OBB
\param extent Extents/radii of the obb.
\param base rotation to apply to the obb.
*/
//! Construct from center, extent and rotation
PX_FORCE_INLINE Box(const PxVec3& origin, const PxVec3& extent, const PxMat33& base) : rot(base), center(origin), extents(extent)
{}
//! Copy constructor
PX_FORCE_INLINE Box(const Box& other) : rot(other.rot), center(other.center), extents(other.extents)
{}
/**
\brief Destructor
*/
PX_FORCE_INLINE ~Box()
{
}
//! Assignment operator
PX_FORCE_INLINE const Box& operator=(const Box& other)
{
rot = other.rot;
center = other.center;
extents = other.extents;
return *this;
}
/**
\brief Setups an empty box.
*/
PX_INLINE void setEmpty()
{
center = PxVec3(0);
extents = PxVec3(-PX_MAX_REAL, -PX_MAX_REAL, -PX_MAX_REAL);
rot = PxMat33(PxIdentity);
}
/**
\brief Checks the box is valid.
\return true if the box is valid
*/
PX_INLINE bool isValid() const
{
// Consistency condition for (Center, Extents) boxes: Extents >= 0.0f
if(extents.x < 0.0f) return false;
if(extents.y < 0.0f) return false;
if(extents.z < 0.0f) return false;
return true;
}
/////////////
PX_FORCE_INLINE void setAxes(const PxVec3& axis0, const PxVec3& axis1, const PxVec3& axis2)
{
rot.column0 = axis0;
rot.column1 = axis1;
rot.column2 = axis2;
}
PX_FORCE_INLINE PxVec3 rotate(const PxVec3& src) const
{
return rot * src;
}
PX_FORCE_INLINE PxVec3 rotateInv(const PxVec3& src) const
{
return rot.transformTranspose(src);
}
PX_FORCE_INLINE PxVec3 transform(const PxVec3& src) const
{
return rot * src + center;
}
PX_FORCE_INLINE PxTransform getTransform() const
{
return PxTransform(center, PxQuat(rot));
}
PX_INLINE PxVec3 computeAABBExtent() const
{
const PxReal a00 = PxAbs(rot[0][0]);
const PxReal a01 = PxAbs(rot[0][1]);
const PxReal a02 = PxAbs(rot[0][2]);
const PxReal a10 = PxAbs(rot[1][0]);
const PxReal a11 = PxAbs(rot[1][1]);
const PxReal a12 = PxAbs(rot[1][2]);
const PxReal a20 = PxAbs(rot[2][0]);
const PxReal a21 = PxAbs(rot[2][1]);
const PxReal a22 = PxAbs(rot[2][2]);
const PxReal ex = extents.x;
const PxReal ey = extents.y;
const PxReal ez = extents.z;
return PxVec3( a00 * ex + a10 * ey + a20 * ez,
a01 * ex + a11 * ey + a21 * ez,
a02 * ex + a12 * ey + a22 * ez);
}
/**
Computes the obb points.
\param pts [out] 8 box points
*/
PX_FORCE_INLINE void computeBoxPoints(PxVec3* PX_RESTRICT pts) const
{
Gu::computeOBBPoints(pts, center, extents, rot.column0, rot.column1, rot.column2);
}
void create(const Gu::Capsule& capsule);
PxMat33 rot;
PxVec3 center;
PxVec3 extents;
};
PX_COMPILE_TIME_ASSERT(sizeof(Gu::Box) == 60);
//! A padded version of Gu::Box, to safely load its data using SIMD
class BoxPadded : public Box
{
public:
PX_FORCE_INLINE BoxPadded() {}
PX_FORCE_INLINE ~BoxPadded() {}
PxU32 padding;
};
PX_COMPILE_TIME_ASSERT(sizeof(Gu::BoxPadded) == 64);
#if PX_VC
#pragma warning(pop)
#endif
}
}
#endif

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CACHED_FUNCS_H
#define GU_CACHED_FUNCS_H
#include "GuRaycastTests.h"
#include "GuSweepTests.h"
#include "GuOverlapTests.h"
namespace physx
{
namespace Gu
{
struct CachedFuncs
{
CachedFuncs() :
mCachedRaycastFuncs (Gu::getRaycastFuncTable()),
mCachedSweepFuncs (Gu::getSweepFuncTable()),
mCachedOverlapFuncs (Gu::getOverlapFuncTable())
{
}
const Gu::GeomRaycastTable& mCachedRaycastFuncs;
const Gu::GeomSweepFuncs& mCachedSweepFuncs;
const Gu::GeomOverlapTable* mCachedOverlapFuncs;
PX_NOCOPY(CachedFuncs)
};
}
}
#endif

View File

@@ -0,0 +1,87 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CAPSULE_H
#define GU_CAPSULE_H
#include "GuSegment.h"
namespace physx
{
namespace Gu
{
/**
\brief Represents a capsule.
*/
class Capsule : public Segment
{
public:
/**
\brief Constructor
*/
PX_INLINE Capsule()
{
}
/**
\brief Constructor
\param seg Line segment to create capsule from.
\param _radius Radius of the capsule.
*/
PX_INLINE Capsule(const Segment& seg, PxF32 _radius) : Segment(seg), radius(_radius)
{
}
/**
\brief Constructor
\param _p0 First segment point
\param _p1 Second segment point
\param _radius Radius of the capsule.
*/
PX_INLINE Capsule(const PxVec3& _p0, const PxVec3& _p1, PxF32 _radius) : Segment(_p0, _p1), radius(_radius)
{
}
/**
\brief Destructor
*/
PX_INLINE ~Capsule()
{
}
PxF32 radius;
};
}
}
#endif

View File

@@ -0,0 +1,123 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CENTER_EXTENTS_H
#define GU_CENTER_EXTENTS_H
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBounds3.h"
namespace physx
{
namespace Gu
{
class CenterExtents : public physx::PxUserAllocated
{
public:
PX_FORCE_INLINE CenterExtents() {}
PX_FORCE_INLINE CenterExtents(const PxBounds3& b) { mCenter = b.getCenter(); mExtents = b.getExtents(); }
PX_FORCE_INLINE ~CenterExtents() {}
PX_FORCE_INLINE void getMin(PxVec3& min) const { min = mCenter - mExtents; }
PX_FORCE_INLINE void getMax(PxVec3& max) const { max = mCenter + mExtents; }
PX_FORCE_INLINE float getMin(PxU32 axis) const { return mCenter[axis] - mExtents[axis]; }
PX_FORCE_INLINE float getMax(PxU32 axis) const { return mCenter[axis] + mExtents[axis]; }
PX_FORCE_INLINE PxVec3 getMin() const { return mCenter - mExtents; }
PX_FORCE_INLINE PxVec3 getMax() const { return mCenter + mExtents; }
PX_FORCE_INLINE void setMinMax(const PxVec3& min, const PxVec3& max)
{
mCenter = (max + min)*0.5f;
mExtents = (max - min)*0.5f;
}
PX_FORCE_INLINE PxU32 isInside(const CenterExtents& box) const
{
if(box.getMin(0)>getMin(0)) return 0;
if(box.getMin(1)>getMin(1)) return 0;
if(box.getMin(2)>getMin(2)) return 0;
if(box.getMax(0)<getMax(0)) return 0;
if(box.getMax(1)<getMax(1)) return 0;
if(box.getMax(2)<getMax(2)) return 0;
return 1;
}
PX_FORCE_INLINE void setEmpty()
{
mExtents = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
}
PX_FORCE_INLINE bool isEmpty() const
{
PX_ASSERT(isValid());
return mExtents.x<0.0f;
}
PX_FORCE_INLINE bool isFinite() const
{
return mCenter.isFinite() && mExtents.isFinite();
}
PX_FORCE_INLINE bool isValid() const
{
const PxVec3& c = mCenter;
const PxVec3& e = mExtents;
return (c.isFinite() && e.isFinite() && (((e.x >= 0.0f) && (e.y >= 0.0f) && (e.z >= 0.0f)) ||
((e.x == -PX_MAX_BOUNDS_EXTENTS) &&
(e.y == -PX_MAX_BOUNDS_EXTENTS) &&
(e.z == -PX_MAX_BOUNDS_EXTENTS))));
}
PX_FORCE_INLINE PxBounds3 transformFast(const PxMat33& matrix) const
{
PX_ASSERT(isValid());
return PxBounds3::basisExtent(matrix * mCenter, matrix, mExtents);
}
PxVec3 mCenter;
PxVec3 mExtents;
};
//! A padded version of CenterExtents, to safely load its data using SIMD
class CenterExtentsPadded : public CenterExtents
{
public:
PX_FORCE_INLINE CenterExtentsPadded() {}
PX_FORCE_INLINE ~CenterExtentsPadded() {}
PxU32 padding;
};
PX_COMPILE_TIME_ASSERT(sizeof(CenterExtentsPadded) == 7*4);
}
}
#endif

View File

@@ -0,0 +1,56 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CONVEX_GEOMETRY_H
#define GU_CONVEX_GEOMETRY_H
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxBounds3.h"
namespace physx
{
class PxConvexCoreGeometry;
class PxBounds3;
class PxRenderOutput;
namespace Gu
{
struct ConvexShape;
PX_PHYSX_COMMON_API bool makeConvexShape(const PxGeometry& geom, const PxTransform& pose, ConvexShape& convex);
PX_PHYSX_COMMON_API bool isGPUCompatible(const PxConvexCoreGeometry& convex);
PX_PHYSX_COMMON_API void computeMassInfo(const PxConvexCoreGeometry& convex, PxReal& density1Mass, PxMat33& inertiaTensor, PxVec3& centerOfMass);
PX_PHYSX_COMMON_API void visualize(const PxConvexCoreGeometry& convex, const PxTransform& pose, bool drawCore, const PxBounds3& cullbox, PxRenderOutput& out);
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,158 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_H
#define GU_COOKING_H
// PT: TODO: the SDK always had this questionable design decision that all APIs can include all high-level public headers,
// regardless of where they fit in the header hierarchy. For example PhysXCommon can include headers from the higher-level
// PhysX DLL. We take advantage of that here by including PxCooking from PhysXCommon. That way we can reuse the same code
// as before without decoupling it from high-level classes like PxConvexMeshDesc/etc. A cleaner solution would be to decouple
// the two and only use PxConvexMeshDesc/etc in the higher level cooking DLL. The lower-level Gu functions below would then
// operate either on Gu-level types (see e.g. PxBVH / GuBVH which was done this way), or on basic types like float and ints
// to pass vertex & triangle data around. We could also split the kitchen-sink PxCookingParams structure into separate classes
// for convex / triangle mesh / etc. Overall there might be some more refactoring to do here, and that's why these functions
// have been put in the "semi public" Gu API for now, instead of the Px API (which is more strict in terms of backward
// compatibility and how we deal with deprecated functions).
#include "cooking/PxCooking.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxMemory.h"
namespace physx
{
class PxInsertionCallback;
class PxOutputStream;
class PxBVHDesc;
class PxBVH;
class PxHeightField;
struct PxCookingParams;
namespace immediateCooking
{
PX_FORCE_INLINE static void gatherStrided(const void* src, void* dst, PxU32 nbElem, PxU32 elemSize, PxU32 stride)
{
const PxU8* s = reinterpret_cast<const PxU8*>(src);
PxU8* d = reinterpret_cast<PxU8*>(dst);
while(nbElem--)
{
PxMemCopy(d, s, elemSize);
d += elemSize;
s += stride;
}
}
PX_INLINE static bool platformMismatch()
{
// Get current endianness (the one for the platform where cooking is performed)
const PxI8 currentEndian = PxLittleEndian();
const bool mismatch = currentEndian!=1; // The files must be little endian - we don't have big endian platforms anymore.
return mismatch;
}
PX_C_EXPORT PX_PHYSX_COMMON_API PxInsertionCallback* getInsertionCallback(); // PT: should be a reference but using a pointer for C
// BVH
PX_C_EXPORT PX_PHYSX_COMMON_API bool cookBVH(const PxBVHDesc& desc, PxOutputStream& stream);
PX_C_EXPORT PX_PHYSX_COMMON_API PxBVH* createBVH(const PxBVHDesc& desc, PxInsertionCallback& insertionCallback);
PX_FORCE_INLINE PxBVH* createBVH(const PxBVHDesc& desc)
{
return createBVH(desc, *getInsertionCallback());
}
// Heightfield
PX_C_EXPORT PX_PHYSX_COMMON_API bool cookHeightField(const PxHeightFieldDesc& desc, PxOutputStream& stream);
PX_C_EXPORT PX_PHYSX_COMMON_API PxHeightField* createHeightField(const PxHeightFieldDesc& desc, PxInsertionCallback& insertionCallback);
PX_FORCE_INLINE PxHeightField* createHeightField(const PxHeightFieldDesc& desc)
{
return createHeightField(desc, *getInsertionCallback());
}
// Convex meshes
PX_C_EXPORT PX_PHYSX_COMMON_API bool cookConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc, PxOutputStream& stream, PxConvexMeshCookingResult::Enum* condition=NULL);
PX_C_EXPORT PX_PHYSX_COMMON_API PxConvexMesh* createConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc, PxInsertionCallback& insertionCallback, PxConvexMeshCookingResult::Enum* condition=NULL);
PX_FORCE_INLINE PxConvexMesh* createConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc)
{
return createConvexMesh(params, desc, *getInsertionCallback());
}
PX_C_EXPORT PX_PHYSX_COMMON_API bool validateConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc);
PX_C_EXPORT PX_PHYSX_COMMON_API bool computeHullPolygons(const PxCookingParams& params, const PxSimpleTriangleMesh& mesh, PxAllocatorCallback& inCallback, PxU32& nbVerts, PxVec3*& vertices,
PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& hullPolygons);
// Triangle meshes
PX_C_EXPORT PX_PHYSX_COMMON_API bool validateTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc);
PX_C_EXPORT PX_PHYSX_COMMON_API PxTriangleMesh* createTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc, PxInsertionCallback& insertionCallback, PxTriangleMeshCookingResult::Enum* condition=NULL);
PX_C_EXPORT PX_PHYSX_COMMON_API bool cookTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition=NULL);
PX_FORCE_INLINE PxTriangleMesh* createTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc)
{
return createTriangleMesh(params, desc, *getInsertionCallback());
}
// Tetrahedron & deformable volume meshes
PX_C_EXPORT PX_PHYSX_COMMON_API bool cookTetrahedronMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& meshDesc, PxOutputStream& stream);
PX_C_EXPORT PX_PHYSX_COMMON_API PxTetrahedronMesh* createTetrahedronMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& meshDesc, PxInsertionCallback& insertionCallback);
PX_FORCE_INLINE PxTetrahedronMesh* createTetrahedronMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& meshDesc)
{
return createTetrahedronMesh(params, meshDesc, *getInsertionCallback());
}
PX_C_EXPORT PX_PHYSX_COMMON_API bool cookDeformableVolumeMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc,
const PxDeformableVolumeSimulationDataDesc& softbodyDataDesc, PxOutputStream& stream);
PX_C_EXPORT PX_PHYSX_COMMON_API PxDeformableVolumeMesh* createDeformableVolumeMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc,
const PxDeformableVolumeSimulationDataDesc& softbodyDataDesc, PxInsertionCallback& insertionCallback);
PX_FORCE_INLINE PxDeformableVolumeMesh* createDeformableVolumeMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc,
const PxDeformableVolumeSimulationDataDesc& deformableVolumeDataDesc)
{
return createDeformableVolumeMesh(params, simulationMeshDesc, collisionMeshDesc, deformableVolumeDataDesc, *getInsertionCallback());
}
PX_C_EXPORT PX_PHYSX_COMMON_API PxCollisionMeshMappingData* computeModelsMapping(const PxCookingParams& params,
PxTetrahedronMeshData& simulationMesh, const PxTetrahedronMeshData& collisionMesh,
const PxDeformableVolumeCollisionData& collisionData, const PxBoundedData* vertexToTet = NULL);
PX_C_EXPORT PX_PHYSX_COMMON_API PxCollisionTetrahedronMeshData* computeCollisionData(const PxCookingParams& params, const PxTetrahedronMeshDesc& collisionMeshDesc);
PX_C_EXPORT PX_PHYSX_COMMON_API PxSimulationTetrahedronMeshData* computeSimulationData(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc);
PX_C_EXPORT PX_PHYSX_COMMON_API PxDeformableVolumeMesh* assembleDeformableVolumeMesh(PxTetrahedronMeshData& simulationMesh,
PxDeformableVolumeSimulationData& simulationData, PxTetrahedronMeshData& collisionMesh, PxDeformableVolumeCollisionData& collisionData,
PxCollisionMeshMappingData& mappingData, PxInsertionCallback& insertionCallback);
}
}
#endif

View File

@@ -0,0 +1,136 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_DISTANCE_POINT_TETRAHEDRON_H
#define GU_DISTANCE_POINT_TETRAHEDRON_H
#include "foundation/PxVec3.h"
#include "foundation/PxVec4.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuDistancePointTriangle.h"
#include "foundation/PxMathUtils.h"
namespace physx
{
namespace Gu
{
PX_INLINE PX_CUDA_CALLABLE PxVec4 PointOutsideOfPlane4(const PxVec3& p, const PxVec3& _a, const PxVec3& _b,
const PxVec3& _c, const PxVec3& _d)
{
const PxVec3 ap = p - _a;
const PxVec3 ab = _b - _a;
const PxVec3 ac = _c - _a;
const PxVec3 ad = _d - _a;
const PxVec3 v0 = ab.cross(ac);
const float signa0 = v0.dot(ap);
const float signd0 = v0.dot(ad);// V3Dot(v0, _d);
const PxVec3 v1 = ac.cross(ad);
const float signa1 = v1.dot(ap);
const float signd1 = v1.dot(ab);
const PxVec3 v2 = ad.cross(ab);
const float signa2 = v2.dot(ap);
const float signd2 = v2.dot(ac);// V3Dot(v2, _c);
const PxVec3 bd = _d - _b;
const PxVec3 bc = _c - _b;
const PxVec3 v3 = bd.cross(bc);
const float signd3 = v3.dot(p - _b);
const float signa3 = v3.dot(_a - _b);
//if combined signDist is least zero, p is outside of that face
PxVec4 result = PxVec4(signa0 * signd0, signa1 * signd1, signa2 * signd2, signa3 * signd3);
return result;
}
PX_PHYSX_COMMON_API PxVec3 closestPtPointTetrahedron(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec4& result);
PX_INLINE PX_CUDA_CALLABLE PxVec3 closestPtPointTetrahedron(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d)
{
const PxVec3 ab = b - a;
const PxVec3 ac = c - a;
const PxVec3 ad = d - a;
const PxVec3 bc = c - b;
const PxVec3 bd = d - b;
//point to face 0, 1, 2
PxVec3 bestClosestPt = closestPtPointTriangle2(p, a, b, c, ab, ac);
PxVec3 diff = bestClosestPt - p;
PxReal bestSqDist = diff.dot(diff);
// 0, 2, 3
PxVec3 closestPt = closestPtPointTriangle2(p, a, c, d, ac, ad);
diff = closestPt - p;
PxReal sqDist = diff.dot(diff);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
// 0, 3, 1
closestPt = closestPtPointTriangle2(p, a, d, b, ad, ab);
diff = closestPt - p;
sqDist = diff.dot(diff);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
// 1, 3, 2
closestPt = closestPtPointTriangle2(p, b, d, c, bd, bc);
diff = closestPt - p;
sqDist = diff.dot(diff);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
return bestClosestPt;
}
PX_INLINE PX_CUDA_CALLABLE PxVec3 closestPtPointTetrahedronWithInsideCheck(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxReal eps = 0)
{
PxVec4 tmpBarycentric;
PxComputeBarycentric(a, b, c, d, p, tmpBarycentric);
if ((tmpBarycentric.x >= -eps && tmpBarycentric.x <= 1.f + eps) && (tmpBarycentric.y >= -eps && tmpBarycentric.y <= 1.f + eps) &&
(tmpBarycentric.z >= -eps && tmpBarycentric.z <= 1.f + eps) && (tmpBarycentric.w >= -eps && tmpBarycentric.w <= 1.f + eps))
return p;
return closestPtPointTetrahedron(p, a, b, c, d);
}
}
}
#endif

View File

@@ -0,0 +1,215 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_DISTANCE_POINT_TRIANGLE_H
#define GU_DISTANCE_POINT_TRIANGLE_H
#include "foundation/PxVec3.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Gu
{
// PT: special version:
// - inlined
// - doesn't compute (s,t) output params
// - expects precomputed edges in input
PX_FORCE_INLINE PX_CUDA_CALLABLE PxVec3 closestPtPointTriangle2(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& ab, const PxVec3& ac)
{
// Check if P in vertex region outside A
//const PxVec3 ab = b - a;
//const PxVec3 ac = c - a;
const PxVec3 ap = p - a;
const float d1 = ab.dot(ap);
const float d2 = ac.dot(ap);
if(d1<=0.0f && d2<=0.0f)
return a; // Barycentric coords 1,0,0
// Check if P in vertex region outside B
const PxVec3 bp = p - b;
const float d3 = ab.dot(bp);
const float d4 = ac.dot(bp);
if(d3>=0.0f && d4<=d3)
return b; // Barycentric coords 0,1,0
// Check if P in edge region of AB, if so return projection of P onto AB
const float vc = d1*d4 - d3*d2;
if(vc<=0.0f && d1>=0.0f && d3<=0.0f)
{
const float v = d1 / (d1 - d3);
return a + v * ab; // barycentric coords (1-v, v, 0)
}
// Check if P in vertex region outside C
const PxVec3 cp = p - c;
const float d5 = ab.dot(cp);
const float d6 = ac.dot(cp);
if(d6>=0.0f && d5<=d6)
return c; // Barycentric coords 0,0,1
// Check if P in edge region of AC, if so return projection of P onto AC
const float vb = d5*d2 - d1*d6;
if(vb<=0.0f && d2>=0.0f && d6<=0.0f)
{
const float w = d2 / (d2 - d6);
return a + w * ac; // barycentric coords (1-w, 0, w)
}
// Check if P in edge region of BC, if so return projection of P onto BC
const float va = d3*d6 - d5*d4;
if(va<=0.0f && (d4-d3)>=0.0f && (d5-d6)>=0.0f)
{
const float w = (d4-d3) / ((d4 - d3) + (d5-d6));
return b + w * (c-b); // barycentric coords (0, 1-w, w)
}
// P inside face region. Compute Q through its barycentric coords (u,v,w)
const float denom = 1.0f / (va + vb + vc);
const float v = vb * denom;
const float w = vc * denom;
return a + ab*v + ac*w;
}
//Scales and translates triangle and query points to fit into the unit box to make calculations less prone to numerical cancellation.
//The returned point will still be in the same space as the input points.
PX_FORCE_INLINE PX_CUDA_CALLABLE PxVec3 closestPtPointTriangle2UnitBox(const PxVec3& queryPoint, const PxVec3& triA, const PxVec3& triB, const PxVec3& triC)
{
const PxVec3 min = queryPoint.minimum(triA.minimum(triB.minimum(triC)));
const PxVec3 max = queryPoint.maximum(triA.maximum(triB.maximum(triC)));
const PxVec3 size = max - min;
PxReal invScaling = PxMax(PxMax(size.x, size.y), PxMax(1e-12f, size.z));
PxReal scaling = 1.0f / invScaling;
PxVec3 p = (queryPoint - min) * scaling;
PxVec3 a = (triA - min) * scaling;
PxVec3 b = (triB - min) * scaling;
PxVec3 c = (triC - min) * scaling;
PxVec3 result = closestPtPointTriangle2(p, a, b, c, b - a, c - a);
return result * invScaling + min;
}
// Given the point `c`, return the closest point on the triangle (1, 0, 0), (0, 1, 0), (0, 0, 1).
// This function is a specialization of `Gu::closestPtPointTriangle2` for this specific triangle.
PX_FORCE_INLINE PX_CUDA_CALLABLE PxVec3 closestPtPointBaryTriangle(PxVec3 c)
{
const PxReal third = 1.0f / 3.0f; // constexpr
c -= PxVec3(third * (c.x + c.y + c.z - 1.0f));
// two negative: return positive vertex
if (c.y < 0.0f && c.z < 0.0f)
return PxVec3(1.0f, 0.0f, 0.0f);
if (c.x < 0.0f && c.z < 0.0f)
return PxVec3(0.0f, 1.0f, 0.0f);
if (c.x < 0.0f && c.y < 0.0f)
return PxVec3(0.0f, 0.0f, 1.0f);
// one negative: return projection onto line if it is on the edge, or the largest vertex otherwise
if (c.x < 0.0f)
{
const PxReal d = c.x * 0.5f;
const PxReal y = c.y + d;
const PxReal z = c.z + d;
if (y > 1.0f)
return PxVec3(0.0f, 1.0f, 0.0f);
if (z > 1.0f)
return PxVec3(0.0f, 0.0f, 1.0f);
return PxVec3(0.0f, y, z);
}
if (c.y < 0.0f)
{
const PxReal d = c.y * 0.5f;
const PxReal x = c.x + d;
const PxReal z = c.z + d;
if (x > 1.0f)
return PxVec3(1.0f, 0.0f, 0.0f);
if (z > 1.0f)
return PxVec3(0.0f, 0.0f, 1.0f);
return PxVec3(x, 0.0f, z);
}
if (c.z < 0.0f)
{
const PxReal d = c.z * 0.5f;
const PxReal x = c.x + d;
const PxReal y = c.y + d;
if (x > 1.0f)
return PxVec3(1.0f, 0.0f, 0.0f);
if (y > 1.0f)
return PxVec3(0.0f, 1.0f, 0.0f);
return PxVec3(x, y, 0.0f);
}
return c;
}
PX_PHYSX_COMMON_API PxVec3 closestPtPointTriangle(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, float& s, float& t);
PX_FORCE_INLINE PxReal distancePointTriangleSquared(const PxVec3& point,
const PxVec3& triangleOrigin, const PxVec3& triangleEdge0, const PxVec3& triangleEdge1,
PxReal* param0=NULL, PxReal* param1=NULL)
{
const PxVec3 pt0 = triangleEdge0 + triangleOrigin;
const PxVec3 pt1 = triangleEdge1 + triangleOrigin;
float s,t;
const PxVec3 cp = closestPtPointTriangle(point, triangleOrigin, pt0, pt1, s, t);
if(param0)
*param0 = s;
if(param1)
*param1 = t;
return (cp - point).magnitudeSquared();
}
PX_PHYSX_COMMON_API aos::FloatV distancePointTriangleSquared( const aos::Vec3VArg point,
const aos::Vec3VArg a,
const aos::Vec3VArg b,
const aos::Vec3VArg c,
aos::FloatV& u,
aos::FloatV& v,
aos::Vec3V& closestP);
//Scales and translates triangle and query points to fit into the unit box to make calculations less prone to numerical cancellation.
//The returned point and squared distance will still be in the same space as the input points.
PX_PHYSX_COMMON_API aos::FloatV distancePointTriangleSquared2UnitBox(
const aos::Vec3VArg point,
const aos::Vec3VArg a,
const aos::Vec3VArg b,
const aos::Vec3VArg c,
aos::FloatV& u,
aos::FloatV& v,
aos::Vec3V& closestP);
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,56 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_DISTANCE_SEGMENT_BOX_H
#define GU_DISTANCE_SEGMENT_BOX_H
#include "foundation/PxMat33.h"
#include "GuSegment.h"
#include "GuBox.h"
namespace physx
{
namespace Gu
{
//! Compute the smallest distance from the (finite) line segment to the box.
PX_PHYSX_COMMON_API PxReal distanceSegmentBoxSquared( const PxVec3& segmentPoint0, const PxVec3& segmentPoint1,
const PxVec3& boxOrigin, const PxVec3& boxExtent, const PxMat33& boxBase,
PxReal* segmentParam = NULL,
PxVec3* boxParam = NULL);
PX_FORCE_INLINE PxReal distanceSegmentBoxSquared(const Gu::Segment& segment, const Gu::Box& box, PxReal* t = NULL, PxVec3* p = NULL)
{
return distanceSegmentBoxSquared(segment.p0, segment.p1, box.center, box.extents, box.rot, t, p);
}
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,76 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_DISTANCE_SEGMENT_SEGMENT_H
#define GU_DISTANCE_SEGMENT_SEGMENT_H
#include "common/PxPhysXCommonConfig.h"
#include "GuSegment.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Gu
{
// This version fixes accuracy issues (e.g. TTP 4617), but needs to do 2 square roots in order
// to find the normalized direction and length of the segments, and then
// a division in order to renormalize the output
PX_PHYSX_COMMON_API PxReal distanceSegmentSegmentSquared( const PxVec3& origin0, const PxVec3& dir0, PxReal extent0,
const PxVec3& origin1, const PxVec3& dir1, PxReal extent1,
PxReal* s=NULL, PxReal* t=NULL);
PX_PHYSX_COMMON_API PxReal distanceSegmentSegmentSquared( const PxVec3& origin0, const PxVec3& extent0,
const PxVec3& origin1, const PxVec3& extent1,
PxReal* s=NULL, PxReal* t=NULL);
PX_FORCE_INLINE PxReal distanceSegmentSegmentSquared( const Gu::Segment& segment0,
const Gu::Segment& segment1,
PxReal* s=NULL, PxReal* t=NULL)
{
return distanceSegmentSegmentSquared( segment0.p0, segment0.computeDirection(),
segment1.p0, segment1.computeDirection(),
s, t);
}
PX_PHYSX_COMMON_API aos::FloatV distanceSegmentSegmentSquared( const aos::Vec3VArg p1, const aos::Vec3VArg d1, const aos::Vec3VArg p2, const aos::Vec3VArg d2,
aos::FloatV& param0,
aos::FloatV& param1);
// This function do four segment segment closest point test in one go
aos::Vec4V distanceSegmentSegmentSquared4( const aos::Vec3VArg p, const aos::Vec3VArg d,
const aos::Vec3VArg p02, const aos::Vec3VArg d02,
const aos::Vec3VArg p12, const aos::Vec3VArg d12,
const aos::Vec3VArg p22, const aos::Vec3VArg d22,
const aos::Vec3VArg p32, const aos::Vec3VArg d32,
aos::Vec4V& s, aos::Vec4V& t);
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,48 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_FACTORY_H
#define GU_FACTORY_H
#include "foundation/PxSimpleTypes.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
namespace physx
{
namespace Gu
{
class Pruner;
PX_C_EXPORT PX_PHYSX_COMMON_API Gu::Pruner* createBucketPruner(PxU64 contextID);
PX_C_EXPORT PX_PHYSX_COMMON_API Gu::Pruner* createAABBPruner(PxU64 contextID, bool dynamic, Gu::CompanionPrunerType type, Gu::BVHBuildStrategy buildStrategy, PxU32 nbObjectsPerNode);
PX_C_EXPORT PX_PHYSX_COMMON_API Gu::Pruner* createIncrementalPruner(PxU64 contextID);
}
}
#endif

View File

@@ -0,0 +1,53 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERSECTION_BOX_BOX_H
#define GU_INTERSECTION_BOX_BOX_H
#include "foundation/PxMat33.h"
#include "foundation/PxBounds3.h"
#include "GuBox.h"
namespace physx
{
namespace Gu
{
PX_PHYSX_COMMON_API bool intersectOBBOBB(const PxVec3& e0, const PxVec3& c0, const PxMat33& r0, const PxVec3& e1, const PxVec3& c1, const PxMat33& r1, bool full_test);
PX_FORCE_INLINE bool intersectOBBAABB(const Gu::Box& obb, const PxBounds3& aabb)
{
PxVec3 center = aabb.getCenter();
PxVec3 extents = aabb.getExtents();
return intersectOBBOBB(obb.extents, obb.center, obb.rot, extents, center, PxMat33(PxIdentity), true);
}
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,59 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERSECTION_TETRAHEDRON_BOX_H
#define GU_INTERSECTION_TETRAHEDRON_BOX_H
#include "foundation/PxVec3.h"
#include "foundation/PxBounds3.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
class Box;
class BoxPadded;
/**
Tests if a tetrahedron overlaps a box (AABB).
\param a [in] tetrahedron's first point
\param b [in] tetrahedron's second point
\param c [in] tetrahedron's third point
\param d [in] tetrahedron's fourth point
\param box [in] The axis aligned box box to check for overlap
\return true if tetrahedron overlaps box
*/
PX_PHYSX_COMMON_API bool intersectTetrahedronBox(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxBounds3& box);
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,173 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERSECTION_TETRAHEDRON_TETRAHEDRON_H
#define GU_INTERSECTION_TETRAHEDRON_TETRAHEDRON_H
#include "foundation/PxPlane.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
struct Tetrahedron
{
PxVec3 verts[4];
PxPlane planes[4];
PxVec3 centroid;
};
PX_INLINE PX_CUDA_CALLABLE PxPlane createPlane(const PxVec3& pa, const PxVec3& pb, const PxVec3& pc, const PxVec3& pd)
{
PxPlane plane(pa, pb, pc);
PxReal distance = plane.distance(pd);
if (distance > 0.f)
{
plane.n = -plane.n;
plane.d = -plane.d;
}
return plane;
}
PX_INLINE PX_CUDA_CALLABLE void constructTetrahedron(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d,
Tetrahedron& tet)
{
tet.verts[0] = a; tet.verts[1] = b; tet.verts[2] = c; tet.verts[3] = d;
tet.planes[0] = createPlane(a, b, c, d);
tet.planes[1] = createPlane(a, b, d, c);
tet.planes[2] = createPlane(a, c, d, b);
tet.planes[3] = createPlane(b, c, d, a);
tet.centroid = (a + b + c) * (1.f / 3.f);
}
PX_INLINE PX_CUDA_CALLABLE PxReal minProject(const PxPlane& plane, const Tetrahedron& tet)
{
/*return PxMin(plane.distance(tet.verts[0]), PxMin(plane.distance(tet.verts[1]),
PxMin(plane.distance(tet.verts[2]), plane.distance(tet.verts[3]))));*/
return PxMin(plane.n.dot(tet.verts[0]),PxMin(plane.n.dot(tet.verts[1]),
PxMin(plane.n.dot(tet.verts[2]), plane.n.dot(tet.verts[3])))) + plane.d;
}
PX_INLINE PX_CUDA_CALLABLE PxReal testSeparatingAxis(const PxVec3& axis, const Tetrahedron& tet0, const Tetrahedron& tet1)
{
PxReal min0, max0, min1, max1;
min0 = max0 = tet0.verts[0].dot(axis);
min1 = max1 = tet1.verts[0].dot(axis);
for (PxU32 i = 1; i < 4; ++i)
{
PxReal proj0 = tet0.verts[i].dot(axis);
PxReal proj1 = tet1.verts[i].dot(axis);
min0 = PxMin(proj0, min0);
max0 = PxMax(proj0, max0);
min1 = PxMin(proj1, min1);
max1 = PxMax(proj1, max1);
}
return PxMax(min1 - max0, min0 - max1);
}
template <bool TDoCross = true>
PX_INLINE PX_CUDA_CALLABLE PxReal satIntersect(const Tetrahedron& tet0, const Tetrahedron& tet1, const PxReal tolerance)
{
PxReal sep = minProject(tet0.planes[0], tet1);
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet0.planes[1], tet1));
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet0.planes[2], tet1));
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet0.planes[3], tet1));
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet1.planes[0], tet0));
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet1.planes[1], tet0));
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet1.planes[2], tet0));
if (sep > tolerance)
return sep;
sep = PxMax(sep, minProject(tet1.planes[3], tet0));
if (sep > tolerance)
return sep;
if (TDoCross)
{
PxVec3 axes0[6];
PxVec3 axes1[6];
axes0[0] = tet0.verts[1] - tet0.verts[0];
axes0[1] = tet0.verts[2] - tet0.verts[0];
axes0[2] = tet0.verts[3] - tet0.verts[0];
axes0[3] = tet0.verts[2] - tet0.verts[1];
axes0[4] = tet0.verts[3] - tet0.verts[1];
axes0[5] = tet0.verts[3] - tet0.verts[2];
axes1[0] = tet1.verts[1] - tet1.verts[0];
axes1[1] = tet1.verts[2] - tet1.verts[0];
axes1[2] = tet1.verts[3] - tet1.verts[0];
axes1[3] = tet1.verts[2] - tet1.verts[1];
axes1[4] = tet1.verts[3] - tet1.verts[1];
axes1[5] = tet1.verts[3] - tet1.verts[2];
for (PxU32 i = 0; i < 6; ++i)
{
const PxVec3 axis0 = axes0[i];
for (PxU32 j = 0; j < 6; ++j)
{
const PxVec3 axis1 = axes1[j];
PxVec3 sepAxis = axis0.cross(axis1);
const PxReal magSq = sepAxis.magnitudeSquared();
if (magSq > 1e-5f)
{
sepAxis = sepAxis * (1.f / PxSqrt(magSq));
const PxReal tSep = testSeparatingAxis(sepAxis, tet0, tet1);
sep = PxMax(sep, tSep);
if (sep > tolerance)
return sep;
}
}
}
}
return sep;
}
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,89 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERSECTION_TRIANGLE_BOX_H
#define GU_INTERSECTION_TRIANGLE_BOX_H
#include "foundation/PxMat33.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
class Box;
class BoxPadded;
/**
Tests if a triangle overlaps a box (AABB). This is the reference non-SIMD code.
\param center [in] the box center
\param extents [in] the box extents
\param p0 [in] triangle's first point
\param p1 [in] triangle's second point
\param p2 [in] triangle's third point
\return true if triangle overlaps box
*/
PX_PHYSX_COMMON_API PxIntBool intersectTriangleBox_ReferenceCode(const PxVec3& center, const PxVec3& extents, const PxVec3& p0, const PxVec3& p1, const PxVec3& p2);
/**
Tests if a triangle overlaps a box (AABB). This is the optimized SIMD code.
WARNING: the function has various SIMD requirements, left to the calling code:
- function will load 4 bytes after 'center'. Make sure it's safe to load from there.
- function will load 4 bytes after 'extents'. Make sure it's safe to load from there.
- function will load 4 bytes after 'p0'. Make sure it's safe to load from there.
- function will load 4 bytes after 'p1'. Make sure it's safe to load from there.
- function will load 4 bytes after 'p2'. Make sure it's safe to load from there.
If you can't guarantee these requirements, please use the non-SIMD reference code instead.
\param center [in] the box center.
\param extents [in] the box extents
\param p0 [in] triangle's first point
\param p1 [in] triangle's second point
\param p2 [in] triangle's third point
\return true if triangle overlaps box
*/
PX_PHYSX_COMMON_API PxIntBool intersectTriangleBox_Unsafe(const PxVec3& center, const PxVec3& extents, const PxVec3& p0, const PxVec3& p1, const PxVec3& p2);
/**
Tests if a triangle overlaps a box (OBB).
There are currently no SIMD-related requirements for p0, p1, p2.
\param box [in] the box
\param p0 [in] triangle's first point
\param p1 [in] triangle's second point
\param p2 [in] triangle's third point
\return true if triangle overlaps box
*/
PX_PHYSX_COMMON_API PxIntBool intersectTriangleBox(const BoxPadded& box, const PxVec3& p0, const PxVec3& p1, const PxVec3& p2);
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,263 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERSECTION_TRIANGLE_BOX_REF_H
#define GU_INTERSECTION_TRIANGLE_BOX_REF_H
#include "foundation/PxVec3.h"
/********************************************************/
/* AABB-triangle overlap test code */
/* by Tomas Akenine-M?r */
/* Function: int triBoxOverlap(float boxcenter[3], */
/* float boxhalfsize[3],float triverts[3][3]); */
/* History: */
/* 2001-03-05: released the code in its first version */
/* 2001-06-18: changed the order of the tests, faster */
/* */
/* Acknowledgement: Many thanks to Pierre Terdiman for */
/* suggestions and discussions on how to optimize code. */
/* Thanks to David Hunt for finding a ">="-bug! */
/********************************************************/
namespace physx
{
#define CROSS(dest,v1,v2) \
dest.x=v1.y*v2.z-v1.z*v2.y; \
dest.y=v1.z*v2.x-v1.x*v2.z; \
dest.z=v1.x*v2.y-v1.y*v2.x;
#define DOT(v1,v2) (v1.x*v2.x+v1.y*v2.y+v1.z*v2.z)
#define FINDMINMAX(x0, x1, x2, minimum, maximum) \
minimum = physx::intrinsics::selectMin(x0, x1); \
maximum = physx::intrinsics::selectMax(x0, x1); \
minimum = physx::intrinsics::selectMin(minimum, x2); \
maximum = physx::intrinsics::selectMax(maximum, x2);
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxIntBool planeBoxOverlap(const PxVec3& normal, PxReal d, const PxVec3& maxbox)
{
PxVec3 vmin, vmax;
if (normal.x>0.0f)
{
vmin.x = -maxbox.x;
vmax.x = maxbox.x;
}
else
{
vmin.x = maxbox.x;
vmax.x = -maxbox.x;
}
if (normal.y>0.0f)
{
vmin.y = -maxbox.y;
vmax.y = maxbox.y;
}
else
{
vmin.y = maxbox.y;
vmax.y = -maxbox.y;
}
if (normal.z>0.0f)
{
vmin.z = -maxbox.z;
vmax.z = maxbox.z;
}
else
{
vmin.z = maxbox.z;
vmax.z = -maxbox.z;
}
if(normal.dot(vmin) + d > 0.0f)
return PxIntFalse;
if(normal.dot(vmax) + d >= 0.0f)
return PxIntTrue;
return PxIntFalse;
}
/*======================== X-tests ========================*/
#define AXISTEST_X01(a, b, fa, fb) \
p0 = a*v0.y - b*v0.z; \
p2 = a*v2.y - b*v2.z; \
minimum = physx::intrinsics::selectMin(p0, p2); \
maximum = physx::intrinsics::selectMax(p0, p2); \
rad = fa * extents.y + fb * extents.z; \
if(minimum>rad || maximum<-rad) return PxIntFalse;
#define AXISTEST_X2(a, b, fa, fb) \
p0 = a*v0.y - b*v0.z; \
p1 = a*v1.y - b*v1.z; \
minimum = physx::intrinsics::selectMin(p0, p1); \
maximum = physx::intrinsics::selectMax(p0, p1); \
rad = fa * extents.y + fb * extents.z; \
if(minimum>rad || maximum<-rad) return PxIntFalse;
/*======================== Y-tests ========================*/
#define AXISTEST_Y02(a, b, fa, fb) \
p0 = -a*v0.x + b*v0.z; \
p2 = -a*v2.x + b*v2.z; \
minimum = physx::intrinsics::selectMin(p0, p2); \
maximum = physx::intrinsics::selectMax(p0, p2); \
rad = fa * extents.x + fb * extents.z; \
if(minimum>rad || maximum<-rad) return PxIntFalse;
#define AXISTEST_Y1(a, b, fa, fb) \
p0 = -a*v0.x + b*v0.z; \
p1 = -a*v1.x + b*v1.z; \
minimum = physx::intrinsics::selectMin(p0, p1); \
maximum = physx::intrinsics::selectMax(p0, p1); \
rad = fa * extents.x + fb * extents.z; \
if(minimum>rad || maximum<-rad) return PxIntFalse;
/*======================== Z-tests ========================*/
#define AXISTEST_Z12(a, b, fa, fb) \
p1 = a*v1.x - b*v1.y; \
p2 = a*v2.x - b*v2.y; \
minimum = physx::intrinsics::selectMin(p1, p2); \
maximum = physx::intrinsics::selectMax(p1, p2); \
rad = fa * extents.x + fb * extents.y; \
if(minimum>rad || maximum<-rad) return PxIntFalse;
#define AXISTEST_Z0(a, b, fa, fb) \
p0 = a*v0.x - b*v0.y; \
p1 = a*v1.x - b*v1.y; \
minimum = physx::intrinsics::selectMin(p0, p1); \
maximum = physx::intrinsics::selectMax(p0, p1); \
rad = fa * extents.x + fb * extents.y; \
if(minimum>rad || maximum<-rad) return PxIntFalse;
namespace Gu
{
template <const bool bDoVertexChecks = false>
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxIntBool intersectTriangleBox_RefImpl(const PxVec3& boxcenter, const PxVec3& extents, const PxVec3& tp0, const PxVec3& tp1, const PxVec3& tp2)
{
/* use separating axis theorem to test overlap between triangle and box */
/* need to test for overlap in these directions: */
/* 1) the {x,y,z}-directions (actually, since we use the AABB of the triangle */
/* we do not even need to test these) */
/* 2) normal of the triangle */
/* 3) crossproduct(edge from tri, {x,y,z}-directin) */
/* this gives 3x3=9 more tests */
// This is the fastest branch on Sun - move everything so that the boxcenter is in (0,0,0)
const PxVec3 v0 = tp0 - boxcenter;
const PxVec3 v1 = tp1 - boxcenter;
const PxVec3 v2 = tp2 - boxcenter;
if (bDoVertexChecks)
{
if (PxAbs(v0.x) <= extents.x && PxAbs(v0.y) <= extents.y && PxAbs(v0.z) <= extents.z)
return PxIntTrue;
if (PxAbs(v1.x) <= extents.x && PxAbs(v1.y) <= extents.y && PxAbs(v1.z) <= extents.z)
return PxIntTrue;
if (PxAbs(v2.x) <= extents.x && PxAbs(v2.y) <= extents.y && PxAbs(v2.z) <= extents.z)
return PxIntTrue;
}
// compute triangle edges
const PxVec3 e0 = v1 - v0; // tri edge 0
const PxVec3 e1 = v2 - v1; // tri edge 1
const PxVec3 e2 = v0 - v2; // tri edge 2
float minimum, maximum, rad, p0, p1, p2;
// Bullet 3: test the 9 tests first (this was faster)
float fex = PxAbs(e0.x);
float fey = PxAbs(e0.y);
float fez = PxAbs(e0.z);
AXISTEST_X01(e0.z, e0.y, fez, fey);
AXISTEST_Y02(e0.z, e0.x, fez, fex);
AXISTEST_Z12(e0.y, e0.x, fey, fex);
fex = PxAbs(e1.x);
fey = PxAbs(e1.y);
fez = PxAbs(e1.z);
AXISTEST_X01(e1.z, e1.y, fez, fey);
AXISTEST_Y02(e1.z, e1.x, fez, fex);
AXISTEST_Z0(e1.y, e1.x, fey, fex);
fex = PxAbs(e2.x);
fey = PxAbs(e2.y);
fez = PxAbs(e2.z);
AXISTEST_X2(e2.z, e2.y, fez, fey);
AXISTEST_Y1(e2.z, e2.x, fez, fex);
AXISTEST_Z12(e2.y, e2.x, fey, fex);
// Bullet 1:
// first test overlap in the {x,y,z}-directions
// find minimum, maximum of the triangle each direction, and test for overlap in
// that direction -- this is equivalent to testing a minimal AABB around
// the triangle against the AABB
// test in X-direction
FINDMINMAX(v0.x, v1.x, v2.x, minimum, maximum);
if(minimum>extents.x || maximum<-extents.x)
return PxIntFalse;
// test in Y-direction
FINDMINMAX(v0.y, v1.y, v2.y, minimum, maximum);
if(minimum>extents.y || maximum<-extents.y)
return PxIntFalse;
// test in Z-direction
FINDMINMAX(v0.z, v1.z, v2.z, minimum, maximum);
if(minimum>extents.z || maximum<-extents.z)
return PxIntFalse;
// Bullet 2:
// test if the box intersects the plane of the triangle
// compute plane equation of triangle: normal*x+d=0
PxVec3 normal;
CROSS(normal, e0, e1);
const float d = -DOT(normal, v0); // plane eq: normal.x+d=0
if(!planeBoxOverlap(normal, d, extents))
return PxIntFalse;
return PxIntTrue; // box and triangle overlaps
}
}
#undef CROSS
#undef DOT
#undef FINDMINMAX
#undef AXISTEST_X01
#undef AXISTEST_X2
#undef AXISTEST_Y02
#undef AXISTEST_Y1
#undef AXISTEST_Z12
#undef AXISTEST_Z0
}
#endif

View File

@@ -0,0 +1,57 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERSECTION_TRIANGLE_TRIANGLE_H
#define GU_INTERSECTION_TRIANGLE_TRIANGLE_H
#include "GuSegment.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
/**
Tests if a two triangles intersect
\param a1 [in] Fist point of the first triangle
\param b1 [in] Second point of the first triangle
\param c1 [in] Third point of the first triangle
\param a2 [in] Fist point of the second triangle
\param b2 [in] Second point of the second triangle
\param c2 [in] Third point of the second triangle
\param ignoreCoplanar [in] True to filter out coplanar triangles
\return true if triangles intersect
*/
PX_PHYSX_COMMON_API bool intersectTriangleTriangle( const PxVec3& a1, const PxVec3& b1, const PxVec3& c1,
const PxVec3& a2, const PxVec3& b2, const PxVec3& c2,
bool ignoreCoplanar = false);
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,115 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_OVERLAP_TESTS_H
#define GU_OVERLAP_TESTS_H
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "foundation/PxAssert.h"
#include "foundation/PxErrors.h"
#include "foundation/PxFoundation.h"
#include "geometry/PxGeometry.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryContext.h"
namespace physx
{
namespace Gu
{
class Capsule;
class Sphere;
// PT: this is just a shadow of what it used to be. We currently don't use TRIGGER_INSIDE anymore, but I leave it for now,
// since I really want to put this back the way it was before.
enum TriggerStatus
{
TRIGGER_DISJOINT,
TRIGGER_INSIDE,
TRIGGER_OVERLAP
};
// PT: currently only used for convex triggers
struct TriggerCache
{
PxVec3 dir;
PxU16 state;
PxU16 gjkState; //gjk succeed or fail
};
#define UNUSED_OVERLAP_THREAD_CONTEXT NULL
// PT: we use a define to be able to quickly change the signature of all overlap functions.
// (this also ensures they all use consistent names for passed parameters).
// \param[in] geom0 first geometry object
// \param[in] pose0 pose of first geometry object
// \param[in] geom1 second geometry object
// \param[in] pose1 pose of second geometry object
// \param[in] cache optional cached data for triggers
// \param[in] threadContext optional per-thread context
#define GU_OVERLAP_FUNC_PARAMS const PxGeometry& geom0, const PxTransform& pose0, \
const PxGeometry& geom1, const PxTransform& pose1, \
Gu::TriggerCache* cache, PxOverlapThreadContext* threadContext
// PT: function pointer for Geom-indexed overlap functions
// See GU_OVERLAP_FUNC_PARAMS for function parameters details.
// \return true if an overlap was found, false otherwise
typedef bool (*GeomOverlapFunc) (GU_OVERLAP_FUNC_PARAMS);
// PT: typedef for a bundle of all overlap functions, i.e. the function table itself (indexed by geom-type).
typedef GeomOverlapFunc GeomOverlapTable[PxGeometryType::eGEOMETRY_COUNT];
// PT: retrieves the overlap function table (for access by external non-Gu modules)
PX_PHYSX_COMMON_API const GeomOverlapTable* getOverlapFuncTable();
PX_FORCE_INLINE bool overlap( const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
const GeomOverlapTable* PX_RESTRICT overlapFuncs, PxOverlapThreadContext* threadContext)
{
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "Gu::overlap(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "Gu::overlap(): pose1 is not valid.", false);
if(geom0.getType() > geom1.getType())
{
GeomOverlapFunc overlapFunc = overlapFuncs[geom1.getType()][geom0.getType()];
PX_ASSERT(overlapFunc);
return overlapFunc(geom1, pose1, geom0, pose0, NULL, threadContext);
}
else
{
GeomOverlapFunc overlapFunc = overlapFuncs[geom0.getType()][geom1.getType()];
PX_ASSERT(overlapFunc);
return overlapFunc(geom0, pose0, geom1, pose1, NULL, threadContext);
}
}
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,227 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_PRUNER_H
#define GU_PRUNER_H
#include "foundation/PxUserAllocated.h"
#include "foundation/PxTransform.h"
#include "GuPrunerPayload.h"
#include "GuPrunerTypedef.h"
namespace physx
{
class PxRenderOutput;
class PxBounds3;
namespace Gu
{
class ShapeData;
struct PrunerRaycastCallback
{
PrunerRaycastCallback() {}
virtual ~PrunerRaycastCallback() {}
virtual bool invoke(PxReal& distance, PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms) = 0;
};
struct PrunerOverlapCallback
{
PrunerOverlapCallback() {}
virtual ~PrunerOverlapCallback() {}
virtual bool invoke(PxU32 primIndex, const PrunerPayload* payloads, const PxTransform* transforms) = 0;
};
class BasePruner : public PxUserAllocated
{
public:
BasePruner() {}
virtual ~BasePruner() {}
// shift the origin of the pruner objects
virtual void shiftOrigin(const PxVec3& shift) = 0;
// additional 'internal' interface
virtual void visualize(PxRenderOutput&, PxU32, PxU32) const {}
};
class Pruner : public BasePruner
{
public:
Pruner() {}
virtual ~Pruner() {}
/**
\brief Adds objects to the pruner.
\param[out] results Returned handles for added objects
\param[in] bounds Bounds of added objects. These bounds are used as-is so they should be pre-inflated if inflation is needed.
\param[in] data Payloads for added objects.
\param[in] transforms Transforms of added objects.
\param[in] count Number of objects in the arrays
\param[in] hasPruningStructure True if added objects have pruning structure. The structure will be merged later, adding the objects will not invalidate the pruner.
\return true if success, false if internal allocation failed. The first failing add results in a INVALID_PRUNERHANDLE.
\see PxPruningStructure
*/
virtual bool addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool hasPruningStructure) = 0;
/**
\brief Removes objects from the pruner.
\param[in] handles The objects to remove
\param[in] count The number of objects to remove
\param[in] removalCallback Optional callback, called for each removed object (giving access to its payload for keeping external structures in sync)
*/
virtual void removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback) = 0;
/**
\brief Updates objects with new bounds & transforms.
There are two ways to use this function:
1) manual bounds update: you can manually update the bounds via "getPayloadData" calls prior to calling "updateObjects".
In this case "updateObjects" only notifies the system that the data for these objects has changed. In this mode the
"inflation", "boundsIndices", "newBounds" and "newTransforms" parameters should remain null.
2) synchronization mode: in this case the new bounds (and optionally the new transforms) have been computed by an
external source and "updateObjects" tells the system to update its data from passed buffers. The new bounds are
always inflated by the "inflation" parameter while being copied. "boundsIndices" is an optional remap table, allowing
this call to only update a subset of the existing bounds (i.e. the updated bounds don't have to be first copied to a
separate contiguous buffer).
\param[in] handles The objects to update
\param[in] count The number of objects to update
\param[in] inflation Bounds inflation value
\param[in] boundsIndices The indices of the bounds in the bounds array (or NULL)
\param[in] newBounds Updated bounds array (or NULL)
\param[in] newTransforms Updated transforms array (or NULL)
*/
virtual void updateObjects(const PrunerHandle* handles, PxU32 count, float inflation=0.0f, const PxU32* boundsIndices=NULL, const PxBounds3* newBounds=NULL, const PxTransform32* newTransforms=NULL) = 0;
/**
\brief Gets rid of internal accel struct.
*/
virtual void purge() = 0;
/**
\brief Makes the queries consistent with previous changes.
This function must be called before starting queries on an updated Pruner and assert otherwise.
*/
virtual void commit() = 0;
/**
\brief Merges pruning structure to current pruner, parameters may differ for each pruner implementation.
\param[in] mergeParams Implementation-dependent merge data
*/
virtual void merge(const void* mergeParams) = 0;
/**
* Query functions
*
* Note: return value may disappear if PrunerCallback contains the necessary information
* currently it is still used for the dynamic pruner internally (to decide if added objects must be queried)
*/
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const = 0;
virtual bool overlap(const Gu::ShapeData& queryVolume, PrunerOverlapCallback&) const = 0;
virtual bool sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const = 0;
/**
\brief Retrieves the object's payload and data associated with the handle.
This function returns the payload associated with a given handle. Additionally it can return the
destination addresses for the object's bounds & transform. The user can then write the new bounds
and transform there, before eventually calling updateObjects().
\param[in] handle Object handle (initially returned by addObjects())
\param[out] data Optional location where to store the internal data associated with the payload.
\return The payload associated with the given handle.
*/
virtual const PrunerPayload& getPayloadData(PrunerHandle handle, PrunerPayloadData* data=NULL) const = 0;
/**
\brief Preallocate space
\param[in] nbEntries The number of entries to preallocate space for
*/
virtual void preallocate(PxU32 nbEntries) = 0;
/**
\brief Sets object's transform
\note This is equivalent to retrieving the transform's address with "getPayloadData" and writing
the transform there.
\param[in] handle Object handle (initially returned by addObjects())
\param[in] transform New transform
\return True if success
*/
virtual bool setTransform(PrunerHandle handle, const PxTransform& transform) = 0;
// PT: from the SQ branch, maybe temporary, unclear if a getType() function would be better etc
virtual bool isDynamic() const { return false; }
virtual void getGlobalBounds(PxBounds3&) const = 0;
};
/**
* Pruner building accel structure over time base class
*/
class DynamicPruner : public Pruner
{
public:
/**
* sets the rebuild hint rate used for step building the accel structure.
*/
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild) = 0;
/**
* Steps the accel structure build.
* synchronousCall specifies if initialization can happen. It should not initialize build when called from a different thread
* returns true if finished
*/
virtual bool buildStep(bool synchronousCall = true) = 0;
/**
* Prepares new tree build
* returns true if new tree is needed
*/
virtual bool prepareBuild() = 0;
};
}
}
#endif

View File

@@ -0,0 +1,63 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_PRUNER_MERGE_DATA_H
#define GU_PRUNER_MERGE_DATA_H
#include "foundation/PxSimpleTypes.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
// PT: TODO: refactor with BVHCoreData ?
struct AABBPrunerMergeData
{
AABBPrunerMergeData()
{
// PT: it's important to NOT initialize anything by default (for binary serialization)
}
PxU32 mNbNodes; // Nb nodes in AABB tree
BVHNode* mAABBTreeNodes; // AABB tree runtime nodes
PxU32 mNbObjects; // Nb objects in AABB tree
PxU32* mAABBTreeIndices; // AABB tree indices
void init(PxU32 nbNodes=0, BVHNode* nodes=NULL, PxU32 nbObjects=0, PxU32* indices=NULL)
{
mNbNodes = nbNodes;
mAABBTreeNodes = nodes;
mNbObjects = nbObjects;
mAABBTreeIndices = indices;
}
};
}
}
#endif

Some files were not shown because too many files have changed in this diff Show More