feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,44 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#undef OMNI_PVD_CLASS_BEGIN
#undef OMNI_PVD_CLASS_DERIVED_BEGIN
#undef OMNI_PVD_CLASS_UNTYPED_BEGIN
#undef OMNI_PVD_CLASS_UNTYPED_DERIVED_BEGIN
#undef OMNI_PVD_CLASS_END
#undef OMNI_PVD_ENUM_BEGIN
#undef OMNI_PVD_ENUM_END
#undef OMNI_PVD_ATTRIBUTE
#undef OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE
#undef OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE
#undef OMNI_PVD_ATTRIBUTE_STRING
#undef OMNI_PVD_ATTRIBUTE_UNIQUE_LIST
#undef OMNI_PVD_ATTRIBUTE_FLAG
#undef OMNI_PVD_ENUM_VALUE_EXPLICIT
#undef OMNI_PVD_ENUM_VALUE

View File

@@ -0,0 +1,455 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//
// The macro logic in this header (and the headers CmOmniPvdAutoGenRegisterData.h and
// and CmOmniPvdAutoGenSetData.h) is meant as a helper to automatically generate a
// structure that stores all PVD class and attribute handles for a module, handles the
// registration logic and adds methods for object creation, setting attribute
// values etc. At the core of the generation logic is a user defined header file
// that describes the classes and attributes as follows:
//
// OMNI_PVD_CLASS_BEGIN(MyClass1)
// OMNI_PVD_ATTRIBUTE(MyClass1, myAttr1, PxReal, OmniPvdDataType::eFLOAT32)
// OMNI_PVD_ATTRIBUTE(MyClass1, myAttr2, PxReal, OmniPvdDataType::eFLOAT32)
// OMNI_PVD_CLASS_END(MyClass1)
//
// OMNI_PVD_CLASS_UNTYPED_BEGIN(MyClass2)
// OMNI_PVD_ATTRIBUTE(MyClass2, myAttr1, PxU32, OmniPvdDataType::eUINT32)
// OMNI_PVD_CLASS_END(MyClass2)
//
// The structure to create from this description will look somewhat like this:
//
// struct MyModulePvdObjectsDescriptor
// {
//
// struct PvdMyClass1
// {
// typedef MyClass1 ObjectType;
// static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectRef) { return reinterpret_cast<OmniPvdObjectHandle>(&objectRef); }
//
// OmniPvdClassHandle classHandle;
//
// void createInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) const
// {
// writer.createObject(contextHandle, classHandle, getObjectHandle(objectRef), NULL);
// }
//
// static void destroyInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef)
// {
// writer.destroyObject(contextHandle, getObjectHandle(objectRef));
// }
//
// OmniPvdAttributeHandle myAttr1;
// void set_myAttr1_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const PxReal& value) const
// {
// writer.setAttribute(contextHandle, getObjectHandle(objectRef), myAttr1, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<OmniPvdDataType::eFLOAT32>());
// }
//
// OmniPvdAttributeHandle myAttr2;
// void set_myAttr2_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const PxReal& value) const
// {
// writer.setAttribute(contextHandle, getObjectHandle(objectRef), myAttr2, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<OmniPvdDataType::eFLOAT32>());
// }
// };
// PvdMyClass1 pvdMyClass1;
//
//
// struct PvdMyClass2
// {
// typedef OmniPvdObjectHandle ObjectType;
// static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectHandle) { return objectHandle; }
//
// OmniPvdClassHandle classHandle;
//
// void createInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) const
// {
// writer.createObject(contextHandle, classHandle, getObjectHandle(objectRef), NULL);
// }
//
// static void destroyInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef)
// {
// writer.destroyObject(contextHandle, getObjectHandle(objectRef));
// }
//
// OmniPvdAttributeHandle myAttr1;
// void set_myAttr1_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const PxU32& value) const
// {
// writer.setAttribute(contextHandle, getObjectHandle(objectRef), myAttr1, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<OmniPvdDataType::eUINT32>());
// }
// };
// PvdMyClass2 pvdMyClass2;
//
//
// void myRegisterDataMethod(OmniPvdWriter& writer)
// {
// pvdMyClass1.classHandle = writer.registerClass("MyClass1");
// pvdMyClass1.myAttr1 = writer.registerAttribute(pvdMyClass1.classHandle, "myAttr1", OmniPvdDataType::eFLOAT32, 1);
// pvdMyClass1.myAttr2 = writer.registerAttribute(pvdMyClass1.classHandle, "myAttr2", OmniPvdDataType::eFLOAT32, 1);
//
// pvdMyClass2.classHandle = writer.registerClass("MyClass2");
// pvdMyClass2.myAttr1 = writer.registerAttribute(pvdMyClass2.classHandle, "myAttr1", OmniPvdDataType::eUINT32, 1);
// }
//
// };
//
// Assuming the class and attribute definitions are in a file called MyModulePvdObjectDefinitions.h,
// the described structure can be generated like this:
//
// struct MyModulePvdObjectsDescriptor
// {
//
// #include "CmOmniPvdAutoGenCreateRegistrationStruct.h"
// #include "MyModulePvdObjectDefinitions.h"
// #include "CmOmniPvdAutoGenClearDefines.h"
//
// // custom registration data related members that are not auto-generated can go here, for example
//
//
// void myRegisterDataMethod(OmniPvdWriter& writer)
// {
// #define OMNI_PVD_WRITER_VAR writer
//
// #include "CmOmniPvdAutoGenRegisterData.h"
// #include "MyModulePvdObjectDefinitions.h"
// #include "CmOmniPvdAutoGenClearDefines.h"
//
// // custom registration code that is not auto-generated can go here too
//
// #undef OMNI_PVD_WRITER_VAR
// }
// };
//
// As can be seen, CmOmniPvdAutoGenCreateRegistrationStruct.h is responsible for generating the structs,
// members and setter methods. CmOmniPvdAutoGenRegisterData.h is responsible for generating the registration
// code (note that defining OMNI_PVD_WRITER_VAR is important in this context since it is used inside
// CmOmniPvdAutoGenRegisterData.h)
//
// Note that it is the user's responsibility to include the necessary headers before applying these helpers
// (for example, OmniPvdDefines.h etc.).
//
// Last but not least, the helpers in CmOmniPvdAutoGenSetData.h provide a way to use this structure to
// set values of attributes, create class instances etc. An example usage is shown below:
//
// OmniPvdContextHandle contextHandle; // assuming this holds the context the objects belong to
// MyClass1 myClass1Instance;
// PxReal value; // assuming this holds the value to set the attribute to
//
// OMNI_PVD_CREATE(contextHandle, MyClass1, myClass1Instance);
// OMNI_PVD_SET(contextHandle, MyClass1, myAttr1, myClass1Instance, value);
//
// To use these helper macros, the following things need to be defined before including CmOmniPvdAutoGenSetData.h:
//
// #define OMNI_PVD_GET_WRITER(writer)
// OmniPvdWriter* writer = GetPvdWriterForMyModule();
//
// #define OMNI_PVD_GET_REGISTRATION_DATA(regData)
// MyModulePvdObjectsDescriptor* regData = GetPvdObjectsDescForMyModule();
//
// #include "CmOmniPvdAutoGenSetData.h"
//
// GetPvdWriterForMyModule() and GetPvdObjectsDescForMyModule() just stand for the logic the user needs
// to provide to access the OmniPvdWriter object and the generated description structure. In the given example,
// the variables "writer" and "regData" need to be assigned but the code to do so will be user specific.
//
//
#define OMNI_PVD_CLASS_INTERNALS \
\
OmniPvdClassHandle classHandle; \
\
void createInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) const \
{ \
writer.createObject(contextHandle, classHandle, getObjectHandle(objectRef), NULL); \
} \
\
static void destroyInstance(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef) \
{ \
writer.destroyObject(contextHandle, getObjectHandle(objectRef)); \
}
//
// Define a PVD class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: name of the class to register in PVD (note: has to be an existing C++ class)
//
#define OMNI_PVD_CLASS_BEGIN(classID) \
\
struct Pvd##classID \
{ \
typedef classID ObjectType; \
\
static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectRef) { return reinterpret_cast<OmniPvdObjectHandle>(&objectRef); } \
\
OMNI_PVD_CLASS_INTERNALS
//
// Define a PVD class that is derived from another class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: see OMNI_PVD_CLASS_BEGIN
// baseClassID: the name of the class to derive from
//
#define OMNI_PVD_CLASS_DERIVED_BEGIN(classID, baseClassID) OMNI_PVD_CLASS_BEGIN(classID)
//
// Define a PVD class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: name of the class to register in PVD (note: the class does not need to match an actually existing
// class but still needs to follow C++ naming conventions)
//
#define OMNI_PVD_CLASS_UNTYPED_BEGIN(classID) \
\
struct Pvd##classID \
{ \
typedef OmniPvdObjectHandle ObjectType; \
\
static OmniPvdObjectHandle getObjectHandle(const ObjectType& objectHandle) { return objectHandle; } \
\
OMNI_PVD_CLASS_INTERNALS
//
// Define a PVD class that is derived from another class.
//
// Note: has to be paired with OMNI_PVD_CLASS_END
//
// classID: see OMNI_PVD_CLASS_UNTYPED_BEGIN
// baseClassID: the name of the class to derive from
//
#define OMNI_PVD_CLASS_UNTYPED_DERIVED_BEGIN(classID, baseClassID) OMNI_PVD_CLASS_UNTYPED_BEGIN(classID)
//
// See OMNI_PVD_CLASS_BEGIN for more info.
//
#define OMNI_PVD_CLASS_END(classID) \
\
}; \
Pvd##classID pvd##classID;
//
// Define a PVD enum class.
//
// Note: has to be paired with OMNI_PVD_ENUM_END
//
// enumID: name of the enum class (has to follow C++ naming conventions)
//
#define OMNI_PVD_ENUM_BEGIN(enumID) \
\
struct Pvd##enumID \
{ \
OmniPvdClassHandle classHandle;
//
// See OMNI_PVD_ENUM_BEGIN
//
#define OMNI_PVD_ENUM_END(enumID) OMNI_PVD_CLASS_END(enumID)
//
// Define a simple PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// classID: name of the class to add the attribute to (see OMNI_PVD_CLASS_BEGIN)
// attributeID: name of the attribute (has to follow C++ naming conventions)
// valueType: attribute data type (int, float etc.)
// pvdDataType: PVD attribute data type (see OmniPvdDataType)
//
#define OMNI_PVD_ATTRIBUTE(classID, attributeID, valueType, pvdDataType) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
PX_ASSERT(sizeof(valueType) == getOmniPvdDataTypeSize<pvdDataType>()); \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(&value), getOmniPvdDataTypeSize<pvdDataType>()); \
}
//
// Define a fixed size multi-value PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// The attribute is a fixed size array of values of the given pvd data type.
//
// entryCount: number of entries the array will hold.
//
// See OMNI_PVD_ATTRIBUTE for the other parameters. Note that valueType is
// expected to hold a type that matches the size of the whole array, i.e.,
// sizeof(valueType) == entryCount * getOmniPvdDataTypeSize<pvdDataType>()
//
#define OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE(classID, attributeID, valueType, pvdDataType, entryCount) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
const uint32_t byteSize = static_cast<uint32_t>(sizeof(valueType)); \
PX_ASSERT(byteSize == (entryCount * getOmniPvdDataTypeSize<pvdDataType>())); \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(&value), byteSize); \
}
//
// Define a variable size multi-value PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// The attribute is a variable size array of values of the given pvd data type.
//
// See OMNI_PVD_ATTRIBUTE for a parameter description. Note that valueType is expected
// to define the type of a single array element, for example, int for an integer array.
//
#define OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE(classID, attributeID, valueType, pvdDataType) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType* values, uint32_t valueCount) const \
{ \
const uint32_t byteSize = valueCount * getOmniPvdDataTypeSize<pvdDataType>(); \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(values), byteSize); \
}
//
// Define a string PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// See OMNI_PVD_ATTRIBUTE for a parameter description.
//
#define OMNI_PVD_ATTRIBUTE_STRING(classID, attributeID) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const char* values, uint32_t valueCount) const \
{ \
const uint32_t byteSize = valueCount; \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(values), byteSize); \
}
//
// Define a unique list PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// See OMNI_PVD_ATTRIBUTE for a parameter description. Note that valueType is expected
// to define the class the list will hold pointers to. If it shall hold pointers to
// instances of class MyClass, then the valueType is MyClass.
//
#define OMNI_PVD_ATTRIBUTE_UNIQUE_LIST(classID, attributeID, valueType) \
\
OmniPvdAttributeHandle attributeID; \
\
void addTo_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
const OmniPvdObjectHandle objHandle = reinterpret_cast<OmniPvdObjectHandle>(&value); \
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&objHandle); \
writer.addToUniqueListAttribute(contextHandle, getObjectHandle(objectRef), attributeID, ptr, sizeof(OmniPvdObjectHandle)); \
} \
\
void removeFrom_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const valueType& value) const \
{ \
const OmniPvdObjectHandle objHandle = reinterpret_cast<OmniPvdObjectHandle>(&value); \
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&objHandle); \
writer.removeFromUniqueListAttribute(contextHandle, getObjectHandle(objectRef), attributeID, ptr, sizeof(OmniPvdObjectHandle)); \
}
//
// Define a flag PVD attribute.
//
// Note: needs to be placed between a OMNI_PVD_CLASS_BEGIN, OMNI_PVD_CLASS_END
// sequence
//
// enumType: the enum type this attribute refers to
// enumID: the name of the enum class that describes the enum (see OMNI_PVD_ENUM_BEGIN)
//
// See OMNI_PVD_ATTRIBUTE for the other parameters.
//
#define OMNI_PVD_ATTRIBUTE_FLAG(classID, attributeID, enumType, enumID) \
\
OmniPvdAttributeHandle attributeID; \
void set_##attributeID##_(OmniPvdWriter& writer, OmniPvdContextHandle contextHandle, const ObjectType& objectRef, const enumType& value) const \
{ \
writer.setAttribute(contextHandle, getObjectHandle(objectRef), attributeID, reinterpret_cast<const uint8_t*>(&value), sizeof(enumType)); \
}
//
// Define an enum entry.
//
// Note: needs to be placed between a OMNI_PVD_ENUM_BEGIN, OMNI_PVD_ENUM_END
// sequence
//
// enumID: name of the enum class to add an entry to (see OMNI_PVD_ENUM_BEGIN)
// enumEntryID: the name of the enum entry to add to the enum class (has to follow C++ naming conventions)
// value: the enum value
//
#define OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, value)
//
// Define an enum entry.
//
// Note: needs to be placed between a OMNI_PVD_ENUM_BEGIN, OMNI_PVD_ENUM_END
// sequence
//
// See OMNI_PVD_ENUM_VALUE_EXPLICIT for a description of the parameters. This shorter form expects the enum to
// have a C++ definition of the form:
//
// struct <enumID>
// {
// enum Enum
// {
// <enumEntryID> = ...
// }
// }
//
// such that the value can be derived using: <enumID>::<enumEntryID>
//
#define OMNI_PVD_ENUM_VALUE(enumID, enumEntryID) \
\
OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, enumID::enumEntryID)

View File

@@ -0,0 +1,102 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//
// The macro logic in this header will generate the PVD class/attribute registration
// code based on a class/attribute definition file. OMNI_PVD_WRITER_VAR needs to be
// defined before including this header. OMNI_PVD_WRITER_VAR has to represent the
// variable that holds a reference to a OmniPvdWriter instance. See
// CmOmniPvdAutoGenCreateRegistrationStruct.h for a more detailed overview of the
// whole approach. The various parameters are described there too.
//
#define OMNI_PVD_CLASS_BEGIN(classID) \
\
pvd##classID.classHandle = OMNI_PVD_WRITER_VAR.registerClass(#classID);
#define OMNI_PVD_CLASS_DERIVED_BEGIN(classID, baseClassID) \
\
pvd##classID.classHandle = OMNI_PVD_WRITER_VAR.registerClass(#classID, pvd##baseClassID.classHandle);
#define OMNI_PVD_CLASS_UNTYPED_BEGIN(classID) OMNI_PVD_CLASS_BEGIN(classID)
#define OMNI_PVD_CLASS_UNTYPED_DERIVED_BEGIN(classID, baseClassID) OMNI_PVD_CLASS_DERIVED_BEGIN(classID, baseClassID)
#define OMNI_PVD_CLASS_END(classID)
#define OMNI_PVD_ENUM_BEGIN(enumID) OMNI_PVD_CLASS_BEGIN(enumID)
#define OMNI_PVD_ENUM_END(enumID) OMNI_PVD_CLASS_END(enumID)
#define OMNI_PVD_ATTRIBUTE(classID, attributeID, valueType, pvdDataType) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, pvdDataType, 1);
#define OMNI_PVD_ATTRIBUTE_ARRAY_FIXED_SIZE(classID, attributeID, valueType, pvdDataType, entryCount) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, pvdDataType, entryCount);
#define OMNI_PVD_ATTRIBUTE_ARRAY_VARIABLE_SIZE(classID, attributeID, valueType, pvdDataType) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, pvdDataType, 0);
#define OMNI_PVD_ATTRIBUTE_STRING(classID, attributeID) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerAttribute(pvd##classID.classHandle, #attributeID, OmniPvdDataType::eSTRING, 1);
#define OMNI_PVD_ATTRIBUTE_UNIQUE_LIST(classID, attributeID, valueType) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerUniqueListAttribute(pvd##classID.classHandle, #attributeID, OmniPvdDataType::eOBJECT_HANDLE);
#define OMNI_PVD_ATTRIBUTE_FLAG(classID, attributeID, enumType, enumID) \
\
pvd##classID.attributeID = OMNI_PVD_WRITER_VAR.registerFlagsAttribute(pvd##classID.classHandle, #attributeID, pvd##enumID.classHandle);
#define OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, value) \
\
OMNI_PVD_WRITER_VAR.registerEnumValue(pvd##enumID.classHandle, #enumEntryID, value);
#define OMNI_PVD_ENUM_VALUE(enumID, enumEntryID) \
\
OMNI_PVD_ENUM_VALUE_EXPLICIT(enumID, enumEntryID, enumID::enumEntryID)

View File

@@ -0,0 +1,282 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
//
// This header provides macros to register PVD object instances, to set PVD attribute
// values etc. This only works in combination with a registration structure that was
// defined using the logic in CmOmniPvdAutoGenCreateRegistrationStruct.h.
// OMNI_PVD_GET_WRITER and OMNI_PVD_GET_REGISTRATION_DATA have to be defined before
// including this header. These two macros need to fetch and assign the pointer to
// the OmniPvdWriter instance and the registration structure instance respectively.
// See CmOmniPvdAutoGenCreateRegistrationStruct.h for a more detailed overview of the
// whole approach.
//
#if PX_SUPPORT_OMNI_PVD
//
// It is recommended to use this macro when multiple PVD attributes get written
// in one go since the writer and registration structure is then fetched once only.
//
// Note: has to be paired with OMNI_PVD_WRITE_SCOPE_END
//
// writer: a pointer to the OmniPvdWriter instance will get assigned to a variable
// named "writer"
// regData: a pointer to the registration structure instance will get assigned to
// a variable named "regData"
//
// General usage would look like this:
//
// OMNI_PVD_WRITE_SCOPE_BEGIN(writer, regData)
// OMNI_PVD_SET_EXPLICIT(writer, regData, ...)
// OMNI_PVD_SET_EXPLICIT(writer, regData, ...)
// ...
// OMNI_PVD_WRITE_SCOPE_END
//
#define OMNI_PVD_WRITE_SCOPE_BEGIN(writer, regData) \
\
OMNI_PVD_GET_WRITER(writer) \
if (writer != NULL) \
{ \
OMNI_PVD_GET_REGISTRATION_DATA(regData)
//
// See OMNI_PVD_WRITE_SCOPE_BEGIN for more info.
//
#define OMNI_PVD_WRITE_SCOPE_END \
\
}
//
// Create a PVD object instance using the provided pointers to the writer and registration
// structure instance.
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_CREATE_EXPLICIT(writer, regData, contextHandle, classID, objectRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.createInstance(*writer, contextHandle, objectRef);
//
// Create a PVD object instance.
//
// Note: if attribute values are to be set directly after the object instance registration,
// it is recommended to use OMNI_PVD_WRITE_SCOPE_BEGIN & OMNI_PVD_CREATE_EXPLICIT etc. instead
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_CREATE(contextHandle, classID, objectRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_CREATE_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, objectRef); \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Destroy a PVD object instance using the provided pointer to the writer instance.
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_DESTROY_EXPLICIT(writer, regData, contextHandle, classID, objectRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.destroyInstance(*writer, contextHandle, objectRef);
//
// Destroy a PVD object instance.
//
// See OMNI_PVD_SET_EXPLICIT and OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_DESTROY(contextHandle, classID, objectRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_DESTROY_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, objectRef); \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Set a PVD attribute value using the provided pointers to the writer and registration
// structure instance.
//
// writer: the variable named "writer" has to hold a pointer to the OmniPvdWriter instance
// regData: the variable named "regData" has to hold a pointer to the registration
// structure
//
// See OMNI_PVD_SET for a description of the other parameters.
//
#define OMNI_PVD_SET_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.set_##attributeID##_(*writer, contextHandle, objectRef, valueRef);
//
// Set a PVD attribute value.
//
// Note: if multiple attribute values should get set in a row, it is recommended
// to use OMNI_PVD_WRITE_SCOPE_BEGIN & OMNI_PVD_SET_EXPLICIT etc. instead
//
// contextHandle: the handle of the context the object instance belongs to
// classID: the name of the class (as defined in OMNI_PVD_CLASS_BEGIN() etc.) the attribute
// belongs to
// attributeID: the name of the attribute (as defined in OMNI_PVD_ATTRIBUTE() etc.) to set the
// value for
// objectRef: reference to the class instance to set the attribute for (for untyped classes this shall be
// a reference to a OmniPvdObjectHandle. For typed classes, the pointer value will be used as the
// object handle value).
// valueRef: a reference to a variable that holds the value to set the attribute to
//
#define OMNI_PVD_SET(contextHandle, classID, attributeID, objectRef, valueRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_SET_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valueRef) \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Set PVD array attribute values (variable size array) using the provided pointers to the writer and registration
// structure instance.
//
// valuesPtr: pointer to the array data to set the attribute to
// valueCount: number of entries in valuePtr
//
// See OMNI_PVD_SET for a description of the other parameters.
//
#define OMNI_PVD_SET_ARRAY_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.set_##attributeID##_(*writer, contextHandle, objectRef, valuesPtr, valueCount);
//
// Set PVD array attribute values (variable size array).
//
// Note: if multiple attribute values should get set in a row, it is recommended
// to use OMNI_PVD_WRITE_SCOPE_BEGIN & OMNI_PVD_SET_EXPLICIT etc. instead
//
// See OMNI_PVD_SET_ARRAY_EXPLICIT for a description of the parameters.
//
#define OMNI_PVD_SET_ARRAY(contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_SET_ARRAY_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount) \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Add an entry to a PVD unique list attribute using the provided pointers to the writer and registration
// structure instance.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_ADD_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.addTo_##attributeID##_(*writer, contextHandle, objectRef, valueRef);
//
// Add an entry to a PVD unique list attribute.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_ADD(contextHandle, classID, attributeID, objectRef, valueRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_ADD_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valueRef) \
OMNI_PVD_WRITE_SCOPE_END \
}
//
// Remove an entry from a PVD unique list attribute using the provided pointers to the writer and registration
// structure instance.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_REMOVE_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef) \
\
PX_ASSERT(writer); \
PX_ASSERT(regData); \
regData->pvd##classID.removeFrom_##attributeID##_(*writer, contextHandle, objectRef, valueRef);
//
// Remove an entry from a PVD unique list attribute.
//
// See OMNI_PVD_SET for a description of the parameters.
//
#define OMNI_PVD_REMOVE(contextHandle, classID, attributeID, objectRef, valueRef) \
\
{ \
OMNI_PVD_WRITE_SCOPE_BEGIN(pvdWriter, pvdRegData) \
OMNI_PVD_REMOVE_EXPLICIT(pvdWriter, pvdRegData, contextHandle, classID, attributeID, objectRef, valueRef) \
OMNI_PVD_WRITE_SCOPE_END \
}
#else
#define OMNI_PVD_WRITE_SCOPE_BEGIN(writer, regData)
#define OMNI_PVD_WRITE_SCOPE_END
#define OMNI_PVD_CREATE_EXPLICIT(writer, regData, contextHandle, classID, objectRef)
#define OMNI_PVD_CREATE(contextHandle, classID, objectRef)
#define OMNI_PVD_DESTROY_EXPLICIT(writer, regData, contextHandle, classID, objectRef)
#define OMNI_PVD_DESTROY(contextHandle, classID, objectRef)
#define OMNI_PVD_SET_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_SET(contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_SET_ARRAY_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount)
#define OMNI_PVD_SET_ARRAY(contextHandle, classID, attributeID, objectRef, valuesPtr, valueCount)
#define OMNI_PVD_ADD_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_ADD(contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_REMOVE_EXPLICIT(writer, regData, contextHandle, classID, attributeID, objectRef, valueRef)
#define OMNI_PVD_REMOVE(contextHandle, classID, attributeID, objectRef, valueRef)
#endif // PX_SUPPORT_OMNI_PVD

View File

@@ -0,0 +1,72 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_WINDOWS_LOADLIBRARY_H
#define CM_WINDOWS_LOADLIBRARY_H
#include "foundation/PxPreprocessor.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "common/windows/PxWindowsDelayLoadHook.h"
namespace physx
{
namespace Cm
{
EXTERN_C IMAGE_DOS_HEADER __ImageBase;
PX_INLINE HMODULE WINAPI loadLibrary(const char* name)
{
return ::LoadLibraryA( name );
};
PX_INLINE FARPROC WINAPI physXCommonDliNotePreLoadLibrary(const char* libraryName, const physx::PxDelayLoadHook* delayLoadHook)
{
if(!delayLoadHook)
{
return (FARPROC)loadLibrary(libraryName);
}
else
{
if(strstr(libraryName, "PhysXFoundation"))
{
return (FARPROC)Cm::loadLibrary(delayLoadHook->getPhysXFoundationDllName());
}
if(strstr(libraryName, "PhysXCommon"))
{
return (FARPROC)Cm::loadLibrary(delayLoadHook->getPhysXCommonDllName());
}
}
return NULL;
}
} // namespace Cm
} // namespace physx
#endif // CM_WINDOWS_LOADLIBRARY_H

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_WINDOWS_MODULEUPDATELOADER_H
#define CM_WINDOWS_MODULEUPDATELOADER_H
#include "foundation/PxPreprocessor.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Cm
{
#if PX_X64
#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader64.dll"
#else
#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader.dll"
#endif
class PX_PHYSX_COMMON_API CmModuleUpdateLoader
{
public:
CmModuleUpdateLoader(const char* updateLoaderDllName);
~CmModuleUpdateLoader();
// Loads the given module through the update loader. Loads it from the path if
// the update loader doesn't find the requested module. Returns NULL if no
// module found.
HMODULE LoadModule(const char* moduleName, const char* appGUID);
protected:
HMODULE mUpdateLoaderDllHandle;
FARPROC mGetUpdatedModuleFunc;
};
} // namespace Cm
} // namespace physx
#endif // CM_WINDOWS_MODULEUPDATELOADER_H

View File

@@ -0,0 +1,160 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_BLOCK_ARRAY_H
#define CM_BLOCK_ARRAY_H
#include "foundation/PxAssert.h"
#include "foundation/PxMath.h"
#include "foundation/PxMemory.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxArray.h"
namespace physx
{
namespace Cm
{
template <typename T, PxU32 SlabSize = 4096>
class BlockArray
{
PxArray<T*> mBlocks;
PxU32 mSize;
PxU32 mCapacity;
public:
BlockArray() : mSize(0), mCapacity(0)
{
}
~BlockArray()
{
for (PxU32 a = 0; a < mBlocks.size(); ++a)
{
for (PxU32 i = 0; i < SlabSize; ++i)
{
mBlocks[a][i].~T();
}
PX_FREE(mBlocks[a]);
}
mBlocks.resize(0);
}
PX_NOINLINE void reserve(PxU32 capacity)
{
if (capacity > mCapacity)
{
PxU32 nbSlabsRequired = (capacity + SlabSize - 1) / SlabSize;
PxU32 nbSlabsToAllocate = nbSlabsRequired - mBlocks.size();
mCapacity += nbSlabsToAllocate * SlabSize;
for (PxU32 a = 0; a < nbSlabsToAllocate; ++a)
{
T* ts = reinterpret_cast<T*>(PX_ALLOC(sizeof(T) * SlabSize, "BlockArray"));
for(PxU32 i = 0; i < SlabSize; ++i)
PX_PLACEMENT_NEW(ts+i, T)();
mBlocks.pushBack(ts);
}
}
}
PX_NOINLINE void resize(PxU32 size)
{
if(size != mSize)
{
reserve(size);
for (PxU32 a = mSize; a < size; ++a)
{
mBlocks[a / SlabSize][a&(SlabSize - 1)].~T();
mBlocks[a / SlabSize][a&(SlabSize - 1)] = T();
}
mSize = size;
}
}
void forceSize_Unsafe(PxU32 size)
{
PX_ASSERT(size <= mCapacity);
mSize = size;
}
void remove(PxU32 idx)
{
PX_ASSERT(idx < mSize);
for (PxU32 a = idx; a < mSize; ++a)
{
mBlocks[a / SlabSize][a&(SlabSize-1)] = mBlocks[(a + 1) / SlabSize][(a + 1) &(SlabSize-1)];
}
mSize--;
mBlocks[mSize / SlabSize][mSize&(SlabSize - 1)].~T();
}
void replaceWithLast(PxU32 idx)
{
PX_ASSERT(idx < mSize);
--mSize;
mBlocks[idx / SlabSize][idx%SlabSize] = mBlocks[mSize / SlabSize][mSize%SlabSize];
}
T& operator [] (const PxU32 idx)
{
PX_ASSERT(idx < mSize);
return mBlocks[idx / SlabSize][idx%SlabSize];
}
const T& operator [] (const PxU32 idx) const
{
PX_ASSERT(idx < mSize);
return mBlocks[idx / SlabSize][idx%SlabSize];
}
void pushBack(const T& item)
{
reserve(mSize + 1);
mBlocks[mSize / SlabSize][mSize%SlabSize] = item;
mSize++;
}
PxU32 capacity() const { return mCapacity; }
PxU32 size() const { return mSize; }
};
}
}
#endif

View File

@@ -0,0 +1,215 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmCollection.h"
using namespace physx;
using namespace Cm;
void Collection::add(PxBase& object, PxSerialObjectId id)
{
PxSerialObjectId originId = getId(object);
if( originId != PX_SERIAL_OBJECT_ID_INVALID)
{
if( originId != id)
{
PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL,
"PxCollection::add called for an object that has an associated id already present in the collection!");
}
return;
}
if(id != PX_SERIAL_OBJECT_ID_INVALID)
{
if(!mIds.insert(id, &object))
{
PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL,
"PxCollection::add called with an id which is already used in the collection");
return;
}
}
mObjects[&object] = id;
}
void Collection::remove(PxBase& object)
{
PX_CHECK_AND_RETURN(contains(object), "PxCollection::remove called for an object not contained in the collection!");
const ObjectToIdMap::Entry* e = mObjects.find(&object);
if(e)
{
mIds.erase(e->second);
mObjects.erase(&object);
}
}
bool Collection::contains(PxBase& object) const
{
return mObjects.find(&object) != NULL;
}
void Collection::addId(PxBase& object, PxSerialObjectId id)
{
PX_CHECK_AND_RETURN(contains(object), "PxCollection::addId called for object that is not contained in the collection!");
PX_CHECK_AND_RETURN(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::addId called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
PX_CHECK_AND_RETURN(mIds.find(id) == NULL, "PxCollection::addId called with an id which is already used in the collection!");
const ObjectToIdMap::Entry* e = mObjects.find(&object);
if(e && e->second != PX_SERIAL_OBJECT_ID_INVALID)
mIds.erase(e->second);
mIds.insert(id, &object);
mObjects[&object] = id;
}
void Collection::removeId(PxSerialObjectId id)
{
PX_CHECK_AND_RETURN(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::removeId called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
PX_CHECK_AND_RETURN(mIds.find(id), "PxCollection::removeId called with PxSerialObjectId not contained in the collection!");
const IdToObjectMap::Entry* e = mIds.find(id);
if(e)
{
mObjects[e->second] = PX_SERIAL_OBJECT_ID_INVALID;
mIds.erase(id);
}
}
PxBase* Collection::find(PxSerialObjectId id) const
{
PX_CHECK_AND_RETURN_NULL(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::find called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
const IdToObjectMap::Entry* e = mIds.find(id);
return e ? static_cast<PxBase*>(e->second) : NULL;
}
void Collection::add(PxCollection& _collection)
{
Collection& collection = static_cast<Collection&>(_collection);
PX_CHECK_AND_RETURN(this != &collection, "PxCollection::add(PxCollection&) called with itself!");
mObjects.reserve(mObjects.size() + collection.mObjects.size());
const ObjectToIdMap::Entry* e = collection.mObjects.getEntries();
for (PxU32 i = 0; i < collection.mObjects.size(); ++i)
{
PxSerialObjectId id = e[i].second;
if( id != PX_SERIAL_OBJECT_ID_INVALID)
{
if(!mIds.insert(id, e[i].first))
{
if(mIds[id] != e[i].first)
{
PX_CHECK_MSG( false, "PxCollection::add(PxCollection&) called with conflicting id!");
mObjects.insert(e[i].first, PX_SERIAL_OBJECT_ID_INVALID);
}
}
else
mObjects[ e[i].first ] = id;
}
else
mObjects.insert(e[i].first, PX_SERIAL_OBJECT_ID_INVALID);
}
}
void Collection::remove(PxCollection& _collection)
{
Collection& collection = static_cast<Collection&>(_collection);
PX_CHECK_AND_RETURN(this != &collection, "PxCollection::remove(PxCollection&) called with itself!");
const ObjectToIdMap::Entry* e = collection.mObjects.getEntries();
for (PxU32 i = 0; i < collection.mObjects.size(); ++i)
{
const ObjectToIdMap::Entry* e1 = mObjects.find(e[i].first);
if(e1)
{
mIds.erase(e1->second);
mObjects.erase(e1->first);
}
}
}
PxU32 Collection::getNbObjects() const
{
return mObjects.size();
}
PxBase& Collection::getObject(PxU32 index) const
{
PX_ASSERT(index < mObjects.size());
return *mObjects.getEntries()[index].first;
}
PxU32 Collection::getObjects(PxBase** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PX_CHECK_AND_RETURN_NULL(userBuffer != NULL, "PxCollection::getObjects called with userBuffer NULL!");
PX_CHECK_AND_RETURN_NULL(bufferSize != 0, "PxCollection::getObjects called with bufferSize 0!");
PxU32 dstIndex = 0;
const ObjectToIdMap::Entry* e = mObjects.getEntries();
for (PxU32 srcIndex = startIndex; srcIndex < mObjects.size() && dstIndex < bufferSize; ++srcIndex)
userBuffer[dstIndex++] = e[srcIndex].first;
return dstIndex;
}
PxU32 Collection::getNbIds() const
{
return mIds.size();
}
PxSerialObjectId Collection::getId(const PxBase& object) const
{
const ObjectToIdMap::Entry* e = mObjects.find(const_cast<PxBase*>(&object));
return e ? e->second : PX_SERIAL_OBJECT_ID_INVALID;
}
PxU32 Collection::getIds(PxSerialObjectId* userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PX_CHECK_AND_RETURN_NULL(userBuffer != NULL, "PxCollection::getIds called with userBuffer NULL!");
PX_CHECK_AND_RETURN_NULL(bufferSize != 0, "PxCollection::getIds called with bufferSize 0!");
PxU32 dstIndex = 0;
IdToObjectMap::Iterator srcIt = (const_cast<IdToObjectMap&>(mIds)).getIterator();
while (!srcIt.done() && dstIndex < bufferSize)
{
if(srcIt->first != PX_SERIAL_OBJECT_ID_INVALID)
{
if(startIndex > 0)
startIndex--;
else
userBuffer[dstIndex++] = srcIt->first;
}
srcIt++;
}
return dstIndex;
}
PxCollection* PxCreateCollection()
{
return PX_NEW(Collection);
}

View File

@@ -0,0 +1,96 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_COLLECTION_H
#define CM_COLLECTION_H
#include "common/PxCollection.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAllocator.h"
namespace physx
{
namespace Cm
{
template <class Key,
class Value,
class HashFn = PxHash<Key>,
class Allocator = PxAllocator >
class CollectionHashMap : public PxCoalescedHashMap< Key, Value, HashFn, Allocator>
{
typedef physx::PxHashMapBase< Key, Value, HashFn, Allocator> MapBase;
typedef PxPair<const Key,Value> EntryData;
public:
CollectionHashMap(PxU32 initialTableSize = 64, float loadFactor = 0.75f):
PxCoalescedHashMap< Key, Value, HashFn, Allocator>(initialTableSize,loadFactor) {}
void insertUnique(const Key& k, const Value& v)
{
PX_PLACEMENT_NEW(MapBase::mBase.insertUnique(k), EntryData)(k,v);
}
};
class Collection : public PxCollection, public PxUserAllocated
{
public:
typedef CollectionHashMap<PxBase*, PxSerialObjectId> ObjectToIdMap;
typedef CollectionHashMap<PxSerialObjectId, PxBase*> IdToObjectMap;
virtual void add(PxBase& object, PxSerialObjectId ref);
virtual void remove(PxBase& object);
virtual bool contains(PxBase& object) const;
virtual void addId(PxBase& object, PxSerialObjectId id);
virtual void removeId(PxSerialObjectId id);
virtual PxBase* find(PxSerialObjectId ref) const;
virtual void add(PxCollection& collection);
virtual void remove(PxCollection& collection);
virtual PxU32 getNbObjects() const;
virtual PxBase& getObject(PxU32 index) const;
virtual PxU32 getObjects(PxBase** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
virtual PxU32 getNbIds() const;
virtual PxSerialObjectId getId(const PxBase& object) const;
virtual PxU32 getIds(PxSerialObjectId* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
void release() { PX_DELETE_THIS; }
// Only for internal use. Bypasses virtual calls, specialized behaviour.
PX_INLINE void internalAdd(PxBase* s, PxSerialObjectId id = PX_SERIAL_OBJECT_ID_INVALID) { mObjects.insertUnique(s, id); }
PX_INLINE PxU32 internalGetNbObjects() const { return mObjects.size(); }
PX_INLINE PxBase* internalGetObject(PxU32 i) const { PX_ASSERT(i<mObjects.size()); return mObjects.getEntries()[i].first; }
PX_INLINE const ObjectToIdMap::Entry* internalGetObjects() const { return mObjects.getEntries(); }
IdToObjectMap mIds;
ObjectToIdMap mObjects;
};
}
}
#endif

View File

@@ -0,0 +1,188 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_CONE_LIMIT_HELPER_H
#define CM_CONE_LIMIT_HELPER_H
// This class contains methods for supporting the tan-quarter swing limit - that
// is the, ellipse defined by tanQ(theta)^2/tanQ(thetaMax)^2 + tanQ(phi)^2/tanQ(phiMax)^2 = 1
//
// Angles are passed as an PxVec3 swing vector with x = 0 and y and z the swing angles
// around the y and z axes
#include "foundation/PxMathUtils.h"
namespace physx
{
namespace Cm
{
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal tanAdd(PxReal tan1, PxReal tan2)
{
PX_ASSERT(PxAbs(1.0f-tan1*tan2)>1e-6f);
return (tan1+tan2)/(1.0f-tan1*tan2);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float computeAxisAndError(const PxVec3& r, const PxVec3& d, const PxVec3& twistAxis, PxVec3& axis)
{
// the point on the cone defined by the tanQ swing vector r
// this code is equal to quatFromTanQVector(r).rotate(PxVec3(1.0f, 0.0f, 0.0f);
const PxVec3 p(1.0f, 0.0f, 0.0f);
const PxReal r2 = r.dot(r), a = 1.0f - r2, b = 1.0f/(1.0f+r2), b2 = b*b;
const PxReal v1 = 2.0f * a * b2;
const PxVec3 v2(a, 2.0f * r.z, -2.0f * r.y); // a*p + 2*r.cross(p);
const PxVec3 coneLine = v1 * v2 - p; // already normalized
// the derivative of coneLine in the direction d
const PxReal rd = r.dot(d);
const PxReal dv1 = -4.0f * rd * (3.0f - r2)*b2*b;
const PxVec3 dv2(-2.0f * rd, 2.0f * d.z, -2.0f * d.y);
const PxVec3 coneNormal = v1 * dv2 + dv1 * v2;
axis = coneLine.cross(coneNormal)/coneNormal.magnitude();
return coneLine.cross(axis).dot(twistAxis);
}
// this is here because it's used in both LL and Extensions. However, it
// should STAY IN THE SDK CODE BASE because it's SDK-specific
class ConeLimitHelper
{
public:
PX_CUDA_CALLABLE ConeLimitHelper(PxReal tanQSwingY, PxReal tanQSwingZ, PxReal tanQPadding)
: mTanQYMax(tanQSwingY), mTanQZMax(tanQSwingZ), mTanQPadding(tanQPadding) {}
// whether the point is inside the (inwardly) padded cone - if it is, there's no limit
// constraint
PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& tanQSwing) const
{
const PxReal tanQSwingYPadded = tanAdd(PxAbs(tanQSwing.y),mTanQPadding);
const PxReal tanQSwingZPadded = tanAdd(PxAbs(tanQSwing.z),mTanQPadding);
return PxSqr(tanQSwingYPadded/mTanQYMax)+PxSqr(tanQSwingZPadded/mTanQZMax) <= 1;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 clamp(const PxVec3& tanQSwing, PxVec3& normal) const
{
const PxVec3 p = PxEllipseClamp(tanQSwing, PxVec3(0.0f, mTanQYMax, mTanQZMax));
normal = PxVec3(0.0f, p.y/PxSqr(mTanQYMax), p.z/PxSqr(mTanQZMax));
#ifdef PX_PARANOIA_ELLIPSE_CHECK
PxReal err = PxAbs(PxSqr(p.y/mTanQYMax) + PxSqr(p.z/mTanQZMax) - 1);
PX_ASSERT(err<1e-3);
#endif
return p;
}
// input is a swing quat, such that swing.x = twist.y = twist.z = 0, q = swing * twist
// The routine is agnostic to the sign of q.w (i.e. we don't need the minimal-rotation swing)
// output is an axis such that positive rotation increases the angle outward from the
// limit (i.e. the image of the x axis), the error is the sine of the angular difference,
// positive if the twist axis is inside the cone
PX_CUDA_CALLABLE bool getLimit(const PxQuat& swing, PxVec3& axis, PxReal& error) const
{
PX_ASSERT(swing.w>0.0f);
const PxVec3 twistAxis = swing.getBasisVector0();
const PxVec3 tanQSwing = PxVec3(0.0f, PxTanHalf(swing.z,swing.w), -PxTanHalf(swing.y,swing.w));
if(contains(tanQSwing))
return false;
PxVec3 normal, clamped = clamp(tanQSwing, normal);
// rotation vector and ellipse normal
const PxVec3 r(0.0f, -clamped.z, clamped.y), d(0.0f, -normal.z, normal.y);
error = computeAxisAndError(r, d, twistAxis, axis);
PX_ASSERT(PxAbs(axis.magnitude()-1)<1e-5f);
#ifdef PX_PARANOIA_ELLIPSE_CHECK
bool inside = PxSqr(tanQSwing.y/mTanQYMax) + PxSqr(tanQSwing.z/mTanQZMax) <= 1;
PX_ASSERT(inside && error>-1e-4f || !inside && error<1e-4f);
#endif
return true;
}
private:
PxReal mTanQYMax, mTanQZMax, mTanQPadding;
};
class ConeLimitHelperTanLess
{
public:
PX_CUDA_CALLABLE ConeLimitHelperTanLess(PxReal swingY, PxReal swingZ)
: mYMax(swingY), mZMax(swingZ) {}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 clamp(const PxVec3& swing, PxVec3& normal) const
{
// finds the closest point on the ellipse to a given point
const PxVec3 p = PxEllipseClamp(swing, PxVec3(0.0f, mYMax, mZMax));
// normal to the point on ellipse
normal = PxVec3(0.0f, p.y/PxSqr(mYMax), p.z/PxSqr(mZMax));
#ifdef PX_PARANOIA_ELLIPSE_CHECK
PxReal err = PxAbs(PxSqr(p.y/mYMax) + PxSqr(p.z/mZMax) - 1);
PX_ASSERT(err<1e-3);
#endif
return p;
}
// input is a swing quat, such that swing.x = twist.y = twist.z = 0, q = swing * twist
// The routine is agnostic to the sign of q.w (i.e. we don't need the minimal-rotation swing)
// output is an axis such that positive rotation increases the angle outward from the
// limit (i.e. the image of the x axis), the error is the sine of the angular difference,
// positive if the twist axis is inside the cone
PX_CUDA_CALLABLE void getLimit(const PxQuat& swing, PxVec3& axis, PxReal& error) const
{
PX_ASSERT(swing.w>0.0f);
const PxVec3 twistAxis = swing.getBasisVector0();
// get the angles from the swing quaternion
const PxVec3 swingAngle(0.0f, 4.0f * PxAtan2(swing.y, 1.0f + swing.w), 4.0f * PxAtan2(swing.z, 1.0f + swing.w));
PxVec3 normal, clamped = clamp(swingAngle, normal);
// rotation vector and ellipse normal
const PxVec3 r(0.0f, PxTan(clamped.y/4.0f), PxTan(clamped.z/4.0f)), d(0.0f, normal.y, normal.z);
error = computeAxisAndError(r, d, twistAxis, axis);
PX_ASSERT(PxAbs(axis.magnitude()-1.0f)<1e-5f);
}
private:
PxReal mYMax, mZMax;
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,154 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_FLUSH_POOL_H
#define CM_FLUSH_POOL_H
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBitUtils.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
/*
Pool used to allocate variable sized tasks. It's intended to be cleared after a short period (time step).
*/
namespace physx
{
namespace Cm
{
static const PxU32 sSpareChunkCount = 2;
class FlushPool
{
PX_NOCOPY(FlushPool)
public:
FlushPool(PxU32 chunkSize) : mChunks("FlushPoolChunk"), mChunkIndex(0), mOffset(0), mChunkSize(chunkSize)
{
mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8")));
}
~FlushPool()
{
for (PxU32 i = 0; i < mChunks.size(); ++i)
PX_FREE(mChunks[i]);
}
// alignment must be a power of two
void* allocate(PxU32 size, PxU32 alignment=16)
{
PxMutex::ScopedLock lock(mMutex);
return allocateNotThreadSafe(size, alignment);
}
// alignment must be a power of two
void* allocateNotThreadSafe(PxU32 size, PxU32 alignment=16)
{
PX_ASSERT(PxIsPowerOfTwo(alignment));
PX_ASSERT(size <= mChunkSize && !mChunks.empty());
// padding for alignment
size_t unalignedStart = size_t(mChunks[mChunkIndex]+mOffset);
PxU32 pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart);
if (mOffset + size + pad > mChunkSize)
{
mChunkIndex++;
mOffset = 0;
if (mChunkIndex >= mChunks.size())
mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8")));
// update padding to ensure new alloc is aligned
unalignedStart = size_t(mChunks[mChunkIndex]);
pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart);
}
void* ptr = mChunks[mChunkIndex] + mOffset + pad;
PX_ASSERT((size_t(ptr)&(size_t(alignment)-1)) == 0);
mOffset += size + pad;
return ptr;
}
void clear(PxU32 spareChunkCount = sSpareChunkCount)
{
PxMutex::ScopedLock lock(mMutex);
clearNotThreadSafe(spareChunkCount);
}
void clearNotThreadSafe(PxU32 spareChunkCount = sSpareChunkCount)
{
//release memory not used previously
PxU32 targetSize = mChunkIndex+spareChunkCount;
while (mChunks.size() > targetSize)
{
PxU8* ptr = mChunks.popBack();
PX_FREE(ptr);
}
mChunkIndex = 0;
mOffset = 0;
}
void resetNotThreadSafe()
{
PxU8* firstChunk = mChunks[0];
for (PxU32 i = 1; i < mChunks.size(); ++i)
PX_FREE(mChunks[i]);
mChunks.clear();
mChunks.pushBack(firstChunk);
mChunkIndex = 0;
mOffset = 0;
}
void lock()
{
mMutex.lock();
}
void unlock()
{
mMutex.unlock();
}
private:
PxMutex mMutex;
PxArray<PxU8*> mChunks;
PxU32 mChunkIndex;
PxU32 mOffset;
PxU32 mChunkSize;
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,203 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_ID_POOL_H
#define CM_ID_POOL_H
#include "foundation/PxArray.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Cm
{
template<class FreeBuffer>
class IDPoolBase : public PxUserAllocated
{
protected:
PxU32 mCurrentID;
FreeBuffer mFreeIDs;
public:
IDPoolBase() : mCurrentID(0) {}
void freeID(PxU32 id)
{
// Allocate on first call
// Add released ID to the array of free IDs
if(id == (mCurrentID - 1))
--mCurrentID;
else
mFreeIDs.pushBack(id);
}
void freeAll()
{
mCurrentID = 0;
mFreeIDs.clear();
}
PxU32 getNewID()
{
// If recycled IDs are available, use them
const PxU32 size = mFreeIDs.size();
if(size)
{
// Recycle last ID
return mFreeIDs.popBack();
}
// Else create a new ID
return mCurrentID++;
}
PxU32 getNumUsedID() const
{
return mCurrentID - mFreeIDs.size();
}
PxU32 getMaxID() const
{
return mCurrentID;
}
};
//This class extends IDPoolBase. This is mainly used for when it is unsafe for the application to free the id immediately so that it can
//defer the free process until it is safe to do so
template<class FreeBuffer>
class DeferredIDPoolBase : public IDPoolBase<FreeBuffer>
{
FreeBuffer mDeferredFreeIDs;
public:
//release an index into the deferred list
void deferredFreeID(PxU32 id)
{
mDeferredFreeIDs.pushBack(id);
}
//release the deferred indices into the free list
void processDeferredIds()
{
const PxU32 deferredFreeIDCount = mDeferredFreeIDs.size();
for(PxU32 a = 0; a < deferredFreeIDCount;++a)
{
IDPoolBase<FreeBuffer>::freeID(mDeferredFreeIDs[a]);
}
mDeferredFreeIDs.clear();
}
//release all indices
void freeAll()
{
mDeferredFreeIDs.clear();
IDPoolBase<FreeBuffer>::freeAll();
}
PxU32 getNumUsedID() const
{
return IDPoolBase<FreeBuffer>::getNumUsedID() - mDeferredFreeIDs.size();
}
FreeBuffer& getDeferredFreeIDs() { return mDeferredFreeIDs; }
};
//This is spu friendly fixed size array
template <typename T, uint32_t N>
class InlineFixedArray
{
T mArr[N];
PxU32 mSize;
public:
InlineFixedArray() : mSize(0)
{
}
~InlineFixedArray(){}
void pushBack(const T& t)
{
PX_ASSERT(mSize < N);
mArr[mSize++] = t;
}
T popBack()
{
PX_ASSERT(mSize > 0);
return mArr[--mSize];
}
void clear() { mSize = 0; }
T& operator [] (PxU32 index) { PX_ASSERT(index < N); return mArr[index]; }
const T& operator [] (PxU32 index) const { PX_ASSERT(index < N); return mArr[index]; }
PxU32 size() const { return mSize; }
};
//Fix size IDPool
template<PxU32 Capacity>
class InlineIDPool : public IDPoolBase<InlineFixedArray<PxU32, Capacity> >
{
public:
PxU32 getNumRemainingIDs()
{
return Capacity - this->getNumUsedID();
}
};
//Dynamic resize IDPool
class IDPool : public IDPoolBase<PxArray<PxU32> >
{
};
//This class is used to recycle indices. It supports deferred release, so that until processDeferredIds is called,
//released indices will not be reallocated. This class will fail if the calling code request more id than the InlineDeferredIDPoll
//has. It is the calling code's responsibility to ensure that this does not happen.
template<PxU32 Capacity>
class InlineDeferredIDPool : public DeferredIDPoolBase<InlineFixedArray<PxU32, Capacity> >
{
public:
PxU32 getNumRemainingIDs()
{
return Capacity - IDPoolBase< InlineFixedArray<PxU32, Capacity> >::getNumUsedID();
}
};
//Dynamic resize DeferredIDPool
class DeferredIDPool : public DeferredIDPoolBase<PxArray<PxU32> >
{
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_MATRIX34_H
#define CM_MATRIX34_H
#include "foundation/PxMat34.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Cm
{
#if !PX_CUDA_COMPILER
// PT: similar to PxMat33Padded
class Matrix34FromTransform : public PxMat34
{
public:
//! Construct from a PxTransform
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34FromTransform(const PxTransform& other)
{
using namespace aos;
const QuatV qV = V4LoadU(&other.q.x);
Vec3V column0V, column1V, column2V;
QuatGetMat33V(qV, column0V, column1V, column2V);
// From "buildFrom"
// PT: TODO: investigate if these overlapping stores are a problem
V4StoreU(Vec4V_From_Vec3V(column0V), &m.column0.x);
V4StoreU(Vec4V_From_Vec3V(column1V), &m.column1.x);
V4StoreU(Vec4V_From_Vec3V(column2V), &m.column2.x);
p = other.p;
}
};
#endif
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,296 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_POOL_H
#define CM_POOL_H
#include "foundation/PxSort.h"
#include "foundation/PxMutex.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxBitMap.h"
namespace physx
{
namespace Cm
{
/*!
Allocator for pools of data structures
Also decodes indices (which can be computed from handles) into objects. To make this
faster, the EltsPerSlab must be a power of two
*/
template <class T>
class PoolList : public PxAllocatorTraits<T>::Type
{
typedef typename PxAllocatorTraits<T>::Type Alloc;
PX_NOCOPY(PoolList)
public:
PX_INLINE PoolList(const Alloc& alloc, PxU32 eltsPerSlab)
: Alloc(alloc),
mEltsPerSlab(eltsPerSlab),
mSlabCount(0),
mFreeList(0),
mFreeCount(0),
mSlabs(NULL)
{
PX_ASSERT(mEltsPerSlab>0);
PX_ASSERT((mEltsPerSlab & (mEltsPerSlab-1)) == 0);
mLog2EltsPerSlab = 0;
for(mLog2EltsPerSlab=0; mEltsPerSlab!=PxU32(1<<mLog2EltsPerSlab); mLog2EltsPerSlab++)
;
}
PX_INLINE ~PoolList()
{
destroy();
}
PX_INLINE void destroy()
{
// Run all destructors
for(PxU32 i=0;i<mSlabCount;i++)
{
PX_ASSERT(mSlabs);
T* slab = mSlabs[i];
for(PxU32 j=0;j<mEltsPerSlab;j++)
{
slab[j].~T();
}
}
//Deallocate
for(PxU32 i=0;i<mSlabCount;i++)
{
Alloc::deallocate(mSlabs[i]);
mSlabs[i] = NULL;
}
mSlabCount = 0;
if(mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = NULL;
if(mSlabs)
{
Alloc::deallocate(mSlabs);
mSlabs = NULL;
}
}
PxU32 preallocate(const PxU32 nbRequired, T** elements)
{
//(1) Allocate and pull out an array of X elements
PxU32 nbToAllocate = nbRequired > mFreeCount ? nbRequired - mFreeCount : 0;
PxU32 nbElements = nbRequired - nbToAllocate;
PxMemCopy(elements, mFreeList + (mFreeCount - nbElements), sizeof(T*) * nbElements);
//PxU32 originalFreeCount = mFreeCount;
mFreeCount -= nbElements;
if (nbToAllocate)
{
PX_ASSERT(mFreeCount == 0);
PxU32 nbSlabs = (nbToAllocate + mEltsPerSlab - 1) / mEltsPerSlab; //The number of slabs we need to allocate...
//allocate our slabs...
PxU32 freeCount = mFreeCount;
for (PxU32 i = 0; i < nbSlabs; ++i)
{
//KS - would be great to allocate this using a single allocation but it will make releasing slabs fail later :(
T * mAddr = reinterpret_cast<T*>(Alloc::allocate(mEltsPerSlab * sizeof(T), PX_FL));
if (!mAddr)
return nbElements; //Allocation failed so only return the set of elements we could allocate from the free list
PxU32 newSlabCount = mSlabCount+1;
// Make sure the usage bitmap is up-to-size
if (mUseBitmap.size() < newSlabCount*mEltsPerSlab)
{
mUseBitmap.resize(2 * newSlabCount*mEltsPerSlab); //set last element as not used
if (mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = reinterpret_cast<T**>(Alloc::allocate(2 * newSlabCount * mEltsPerSlab * sizeof(T*), PX_FL));
T** slabs = reinterpret_cast<T**>(Alloc::allocate(2* newSlabCount *sizeof(T*), PX_FL));
if (mSlabs)
{
PxMemCopy(slabs, mSlabs, sizeof(T*)*mSlabCount);
Alloc::deallocate(mSlabs);
}
mSlabs = slabs;
}
mSlabs[mSlabCount++] = mAddr;
PxU32 baseIndex = (mSlabCount-1) * mEltsPerSlab;
//Now add all these to the mFreeList and elements...
PxI32 idx = PxI32(mEltsPerSlab - 1);
for (; idx >= PxI32(nbToAllocate); --idx)
{
mFreeList[freeCount++] = PX_PLACEMENT_NEW(mAddr + idx, T(baseIndex + idx));
}
PxU32 origElements = nbElements;
T** writeIdx = elements + nbElements;
for (; idx >= 0; --idx)
{
writeIdx[idx] = PX_PLACEMENT_NEW(mAddr + idx, T(baseIndex + idx));
nbElements++;
}
nbToAllocate -= (nbElements - origElements);
}
mFreeCount = freeCount;
}
PX_ASSERT(nbElements == nbRequired);
for (PxU32 a = 0; a < nbElements; ++a)
{
mUseBitmap.set(elements[a]->getIndex());
}
return nbRequired;
}
// TODO: would be nice to add templated construct/destroy methods like ObjectPool
PX_INLINE T* get()
{
if(mFreeCount == 0 && !extend())
return 0;
T* element = mFreeList[--mFreeCount];
mUseBitmap.set(element->getIndex());
return element;
}
PX_INLINE void put(T* element)
{
PxU32 i = element->getIndex();
mUseBitmap.reset(i);
mFreeList[mFreeCount++] = element;
}
/*
WARNING: Unlike findByIndexFast below, this method is NOT safe to use if another thread
is concurrently updating the pool (e.g. through put/get/extend/getIterator), since the
safety boundedTest uses mSlabCount and mUseBitmap.
*/
PX_FORCE_INLINE T* findByIndex(PxU32 index) const
{
if(index>=mSlabCount*mEltsPerSlab || !(mUseBitmap.boundedTest(index)))
return 0;
return mSlabs[index>>mLog2EltsPerSlab] + (index&(mEltsPerSlab-1));
}
/*
This call is safe to do while other threads update the pool.
*/
PX_FORCE_INLINE T* findByIndexFast(PxU32 index) const
{
return mSlabs[index>>mLog2EltsPerSlab] + (index&(mEltsPerSlab-1));
}
bool extend()
{
T * mAddr = reinterpret_cast<T*>(Alloc::allocate(mEltsPerSlab * sizeof(T), PX_FL));
if(!mAddr)
return false;
PxU32 newSlabCount = mSlabCount+1;
// Make sure the usage bitmap is up-to-size
if(mUseBitmap.size() < newSlabCount*mEltsPerSlab)
{
mUseBitmap.resize(2* newSlabCount*mEltsPerSlab); //set last element as not used
if(mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = reinterpret_cast<T**>(Alloc::allocate(2* newSlabCount * mEltsPerSlab * sizeof(T*), PX_FL));
T** slabs = reinterpret_cast<T**>(Alloc::allocate(2 * newSlabCount * sizeof(T*), PX_FL));
if (mSlabs)
{
PxMemCopy(slabs, mSlabs, sizeof(T*)*mSlabCount);
Alloc::deallocate(mSlabs);
}
mSlabs = slabs;
}
mSlabs[mSlabCount++] = mAddr;
// Add to free list in descending order so that lowest indices get allocated first -
// the FW context code currently *relies* on this behavior to grab the zero-index volume
// which can't be allocated to the user. TODO: fix this
PxU32 baseIndex = (mSlabCount-1) * mEltsPerSlab;
PxU32 freeCount = mFreeCount;
for(PxI32 i=PxI32(mEltsPerSlab-1);i>=0;i--)
mFreeList[freeCount++] = PX_PLACEMENT_NEW(mAddr+i, T(baseIndex+ i));
mFreeCount = freeCount;
return true;
}
PX_INLINE PxU32 getMaxUsedIndex() const
{
return mUseBitmap.findLast();
}
PX_INLINE PxBitMap::Iterator getIterator() const
{
return PxBitMap::Iterator(mUseBitmap);
}
private:
const PxU32 mEltsPerSlab;
PxU32 mSlabCount;
PxU32 mLog2EltsPerSlab;
T** mFreeList;
PxU32 mFreeCount;
T** mSlabs;
PxBitMap mUseBitmap;
};
}
}
#endif

View File

@@ -0,0 +1,414 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_PREALLOCATING_POOL_H
#define CM_PREALLOCATING_POOL_H
#include "foundation/PxUserAllocated.h"
#include "foundation/PxSort.h"
#include "foundation/PxArray.h"
namespace physx
{
namespace Cm
{
class PreallocatingRegion
{
public:
PX_FORCE_INLINE PreallocatingRegion() : mMemory(NULL), mFirstFree(NULL), mNbElements(0) {}
void init(PxU32 maxElements, PxU32 elementSize, const char* typeName)
{
mFirstFree = NULL;
mNbElements = 0;
PX_ASSERT(typeName);
PX_UNUSED(typeName);
mMemory = reinterpret_cast<PxU8*>(PX_ALLOC(sizeof(PxU8)*elementSize*maxElements, typeName?typeName:"SceneSim Pool")); // ### addActor alloc
PX_ASSERT(elementSize*maxElements>=sizeof(void*));
}
void reset()
{
PX_FREE(mMemory);
}
PX_FORCE_INLINE PxU8* allocateMemory(PxU32 maxElements, PxU32 elementSize)
{
if(mFirstFree)
{
PxU8* recycled = reinterpret_cast<PxU8*>(mFirstFree);
void** recycled32 = reinterpret_cast<void**>(recycled);
mFirstFree = *recycled32;
return recycled;
}
else
{
if(mNbElements==maxElements)
return NULL; // Out of memory
const PxU32 freeIndex = mNbElements++;
return mMemory + freeIndex * elementSize;
}
}
void deallocateMemory(PxU32 maxElements, PxU32 elementSize, PxU8* element)
{
PX_ASSERT(element);
PX_ASSERT(element>=mMemory && element<mMemory + maxElements * elementSize);
PX_UNUSED(elementSize);
PX_UNUSED(maxElements);
void** recycled32 = reinterpret_cast<void**>(element);
*recycled32 = mFirstFree;
mFirstFree = element;
}
PX_FORCE_INLINE bool operator < (const PreallocatingRegion& p) const
{
return mMemory < p.mMemory;
}
PX_FORCE_INLINE bool operator > (const PreallocatingRegion& p) const
{
return mMemory > p.mMemory;
}
PxU8* mMemory;
void* mFirstFree;
PxU32 mNbElements;
};
class PreallocatingRegionManager
{
public:
PreallocatingRegionManager(PxU32 maxElements, PxU32 elementSize, const char* typeName)
: mMaxElements (maxElements)
, mElementSize (elementSize)
, mActivePoolIndex (0)
, mPools ("MyPoolManagerPools")
, mNeedsSorting (true)
, mTypeName (typeName)
{
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
mPools.pushBack(tmp);
}
~PreallocatingRegionManager()
{
const PxU32 nbPools = mPools.size();
for(PxU32 i=0;i<nbPools;i++)
mPools[i].reset();
}
void preAllocate(PxU32 n)
{
if(!n)
return;
const PxU32 nbPools = mPools.size();
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
PxU32 availableSpace = nbPools * maxElements;
while(n>availableSpace)
{
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
mPools.pushBack(tmp);
availableSpace += maxElements;
}
}
PX_FORCE_INLINE PxU8* allocateMemory()
{
PX_ASSERT(mActivePoolIndex<mPools.size());
PxU8* memory = mPools[mActivePoolIndex].allocateMemory(mMaxElements, mElementSize);
return memory ? memory : searchForMemory();
}
void deallocateMemory(PxU8* element)
{
if(!element)
return;
if(mNeedsSorting)
PxSort(mPools.begin(), mPools.size());
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
const PxU32 slabSize = maxElements * elementSize;
const PxU32 nbPools = mPools.size();
// O(log n) search
int first = 0;
int last = int(nbPools-1);
while(first<=last)
{
const int mid = (first+last)>>1;
PreallocatingRegion& candidate = mPools[PxU32(mid)];
if(contains(candidate.mMemory, slabSize, element))
{
candidate.deallocateMemory(maxElements, elementSize, element);
// when we sorted earlier we trashed the active index, but at least this region has a free element
if(mNeedsSorting)
mActivePoolIndex = PxU32(mid);
mNeedsSorting = false;
return;
}
if(candidate.mMemory<element)
first = mid+1;
else
last = mid-1;
}
PX_ASSERT(0);
}
private:
PreallocatingRegionManager& operator=(const PreallocatingRegionManager&);
PxU8* searchForMemory()
{
const PxU32 nbPools = mPools.size();
const PxU32 activePoolIndex = mActivePoolIndex;
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
for(PxU32 i=0;i<nbPools;i++)
{
if(i==activePoolIndex)
continue;
PxU8* memory = mPools[i].allocateMemory(maxElements, elementSize);
if(memory)
{
mActivePoolIndex = i;
return memory;
}
}
mActivePoolIndex = nbPools;
mNeedsSorting = true;
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
PreallocatingRegion& newPool = mPools.pushBack(tmp); // ### addActor alloc (StaticSim, ShapeSim, SceneQueryShapeData)
return newPool.allocateMemory(maxElements, elementSize);
}
PX_FORCE_INLINE bool contains(PxU8* memory, const PxU32 slabSize, PxU8* element)
{
return element>=memory && element<memory+slabSize;
}
const PxU32 mMaxElements;
const PxU32 mElementSize;
PxU32 mActivePoolIndex;
PxArray<PreallocatingRegion> mPools;
bool mNeedsSorting;
const char* mTypeName;
};
template<class T>
class PreallocatingPool : public PxUserAllocated
{
PreallocatingPool<T>& operator=(const PreallocatingPool<T>&);
public:
PreallocatingPool(PxU32 maxElements, const char* typeName) : mPool(maxElements, sizeof(T), typeName)
{
}
~PreallocatingPool()
{
}
PX_FORCE_INLINE void preAllocate(PxU32 n)
{
mPool.preAllocate(n);
}
PX_INLINE T* allocate()
{
return reinterpret_cast<T*>(mPool.allocateMemory());
}
PX_FORCE_INLINE T* allocateAndPrefetch()
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
PxPrefetch(t, sizeof(T));
return t;
}
PX_INLINE T* construct()
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T()) : NULL;
}
template<class A1>
PX_INLINE T* construct(A1& a)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a)) : NULL;
}
template<class A1, class A2>
PX_INLINE T* construct(A1& a, A2& b)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b)) : NULL;
}
template<class A1, class A2, class A3>
PX_INLINE T* construct(A1& a, A2& b, A3& c)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b,c)) : NULL;
}
template<class A1, class A2, class A3, class A4>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b,c,d)) : NULL;
}
template<class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? PX_PLACEMENT_NEW(t, T(a,b,c,d,e)) : NULL;
}
////
PX_INLINE T* construct(T* t)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T());
}
template<class A1>
PX_INLINE T* construct(T* t, A1& a)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a));
}
template<class A1, class A2>
PX_INLINE T* construct(T* t, A1& a, A2& b)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b));
}
template<class A1, class A2, class A3>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b,c));
}
template<class A1, class A2, class A3, class A4>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c, A4& d)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b,c,d));
}
template<class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c, A4& d, A5& e)
{
PX_ASSERT(t);
return PX_PLACEMENT_NEW(t, T(a,b,c,d,e));
}
PX_INLINE void destroy(T* const p)
{
if(p)
{
p->~T();
mPool.deallocateMemory(reinterpret_cast<PxU8*>(p));
}
}
PX_INLINE void releasePreallocated(T* const p)
{
if(p)
mPool.deallocateMemory(reinterpret_cast<PxU8*>(p));
}
protected:
PreallocatingRegionManager mPool;
};
template<class T>
class BufferedPreallocatingPool : public PreallocatingPool<T>
{
PxArray<T*> mDeletedElems;
PX_NOCOPY(BufferedPreallocatingPool<T>)
public:
BufferedPreallocatingPool(PxU32 maxElements, const char* typeName) : PreallocatingPool<T>(maxElements, typeName)
{
}
PX_INLINE void destroy(T* const p)
{
if (p)
{
p->~T();
mDeletedElems.pushBack(p);
}
}
void processPendingDeletedElems()
{
for (PxU32 i = 0; i < mDeletedElems.size(); ++i)
this->mPool.deallocateMemory(reinterpret_cast<PxU8*>(mDeletedElems[i]));
mDeletedElems.clear();
}
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,234 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_PRIORITY_QUEUE_H
#define CM_PRIORITY_QUEUE_H
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
namespace physx
{
namespace Cm
{
template<class Element, class Comparator = PxLess<Element> >
class PriorityQueueBase : protected Comparator // inherit so that stateless comparators take no space
{
public:
PriorityQueueBase(const Comparator& less, Element* elements) : Comparator(less), mHeapSize(0), mDataPtr(elements)
{
}
~PriorityQueueBase()
{
}
//! Get the element with the highest priority
PX_FORCE_INLINE const Element top() const
{
return mDataPtr[0];
}
//! Get the element with the highest priority
PX_FORCE_INLINE Element top()
{
return mDataPtr[0];
}
//! Check to whether the priority queue is empty
PX_FORCE_INLINE bool empty() const
{
return (mHeapSize == 0);
}
//! Empty the priority queue
PX_FORCE_INLINE void clear()
{
mHeapSize = 0;
}
//! Insert a new element into the priority queue. Only valid when size() is less than Capacity
PX_FORCE_INLINE void push(const Element& value)
{
PxU32 newIndex;
PxU32 parentIndex = parent(mHeapSize);
for (newIndex = mHeapSize; newIndex > 0 && compare(value, mDataPtr[parentIndex]); newIndex = parentIndex, parentIndex = parent(newIndex))
{
mDataPtr[ newIndex ] = mDataPtr[parentIndex];
}
mDataPtr[newIndex] = value;
mHeapSize++;
PX_ASSERT(valid());
}
//! Delete the highest priority element. Only valid when non-empty.
PX_FORCE_INLINE Element pop()
{
PX_ASSERT(mHeapSize > 0);
PxU32 i, child;
//try to avoid LHS
PxU32 tempHs = mHeapSize-1;
mHeapSize = tempHs;
Element min = mDataPtr[0];
Element last = mDataPtr[tempHs];
for (i = 0; (child = left(i)) < tempHs; i = child)
{
/* Find highest priority child */
const PxU32 rightChild = child + 1;
child += ((rightChild < tempHs) & compare((mDataPtr[rightChild]), (mDataPtr[child]))) ? 1 : 0;
if(compare(last, mDataPtr[child]))
break;
mDataPtr[i] = mDataPtr[child];
}
mDataPtr[ i ] = last;
PX_ASSERT(valid());
return min;
}
//! Make sure the priority queue sort all elements correctly
bool valid() const
{
const Element& min = mDataPtr[0];
for(PxU32 i=1; i<mHeapSize; ++i)
{
if(compare(mDataPtr[i], min))
return false;
}
return true;
}
//! Return number of elements in the priority queue
PxU32 size() const
{
return mHeapSize;
}
protected:
PxU32 mHeapSize;
Element* mDataPtr;
PX_FORCE_INLINE bool compare(const Element& a, const Element& b) const
{
return Comparator::operator()(a,b);
}
static PX_FORCE_INLINE PxU32 left(PxU32 nodeIndex)
{
return (nodeIndex << 1) + 1;
}
static PX_FORCE_INLINE PxU32 parent(PxU32 nodeIndex)
{
return (nodeIndex - 1) >> 1;
}
private:
PriorityQueueBase<Element, Comparator>& operator = (const PriorityQueueBase<Element, Comparator>);
};
template <typename Element, PxU32 Capacity, typename Comparator>
class InlinePriorityQueue : public PriorityQueueBase<Element, Comparator>
{
Element mData[Capacity];
public:
InlinePriorityQueue(const Comparator& less = Comparator()) : PriorityQueueBase<Element, Comparator>(less, mData)
{
}
PX_FORCE_INLINE void push(Element& elem)
{
PX_ASSERT(this->mHeapSize < Capacity);
PriorityQueueBase<Element, Comparator>::push(elem);
}
private:
InlinePriorityQueue<Element, Capacity, Comparator>& operator = (const InlinePriorityQueue<Element, Capacity, Comparator>);
};
template <typename Element, typename Comparator, typename Alloc = typename physx::PxAllocatorTraits<Element>::Type>
class PriorityQueue : public PriorityQueueBase<Element, Comparator>, protected Alloc
{
PxU32 mCapacity;
public:
PriorityQueue(const Comparator& less = Comparator(), PxU32 initialCapacity = 0, Alloc alloc = Alloc())
: PriorityQueueBase<Element, Comparator>(less, NULL), Alloc(alloc), mCapacity(initialCapacity)
{
if(initialCapacity > 0)
this->mDataPtr = reinterpret_cast<Element*>(Alloc::allocate(sizeof(Element)*initialCapacity, PX_FL));
}
~PriorityQueue()
{
if(this->mDataPtr)
this->deallocate(this->mDataPtr);
}
PX_FORCE_INLINE void push(Element& elem)
{
if(this->mHeapSize == mCapacity)
{
reserve((this->mHeapSize+1)*2);
}
PriorityQueueBase<Element, Comparator>::push(elem);
}
PX_FORCE_INLINE PxU32 capacity()
{
return mCapacity;
}
PX_FORCE_INLINE void reserve(const PxU32 newCapacity)
{
if(newCapacity > mCapacity)
{
Element* newElems = reinterpret_cast<Element*>(Alloc::allocate(sizeof(Element)*newCapacity, PX_FL));
if(this->mDataPtr)
{
physx::PxMemCopy(newElems, this->mDataPtr, sizeof(Element) * this->mHeapSize);
Alloc::deallocate(this->mDataPtr);
}
this->mDataPtr = newElems;
mCapacity = newCapacity;
}
}
private:
PriorityQueue<Element, Comparator, Alloc>& operator = (const PriorityQueue<Element, Comparator, Alloc>);
};
}
}
#endif

View File

@@ -0,0 +1,191 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "foundation/PxBitUtils.h"
#include "CmPtrTable.h"
#include "CmUtils.h"
using namespace physx;
using namespace Cm;
PtrTable::PtrTable() :
mList (NULL),
mCount (0),
mOwnsMemory (true),
mBufferUsed (false)
{
}
PtrTable::~PtrTable()
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mCount == 0);
PX_ASSERT(mList == NULL);
}
void PtrTable::clear(PtrTableStorageManager& sm)
{
if(mOwnsMemory && mCount>1)
{
const PxU32 implicitCapacity = PxNextPowerOfTwo(PxU32(mCount)-1);
sm.deallocate(mList, implicitCapacity);
}
mList = NULL;
mOwnsMemory = true;
mCount = 0;
}
PxU32 PtrTable::find(const void* ptr) const
{
const PxU32 nbPtrs = mCount;
void*const * PX_RESTRICT ptrs = getPtrs();
for(PxU32 i=0; i<nbPtrs; i++)
{
if(ptrs[i] == ptr)
return i;
}
return 0xffffffff;
}
void PtrTable::exportExtraData(PxSerializationContext& stream)
{
if(mCount>1)
{
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mList, sizeof(void*)*mCount);
}
}
void PtrTable::importExtraData(PxDeserializationContext& context)
{
if(mCount>1)
mList = context.readExtraData<void*, PX_SERIAL_ALIGN>(mCount);
}
void PtrTable::realloc(PxU32 oldCapacity, PxU32 newCapacity, PtrTableStorageManager& sm)
{
PX_ASSERT((mOwnsMemory && oldCapacity) || (!mOwnsMemory && oldCapacity == 0));
PX_ASSERT(newCapacity);
if(mOwnsMemory && sm.canReuse(oldCapacity, newCapacity))
return;
void** newMem = sm.allocate(newCapacity);
PxMemCopy(newMem, mList, mCount * sizeof(void*));
if(mOwnsMemory)
sm.deallocate(mList, oldCapacity);
mList = newMem;
mOwnsMemory = true;
}
void PtrTable::add(void* ptr, PtrTableStorageManager& sm)
{
if(mCount == 0) // 0 -> 1, easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mList == NULL);
PX_ASSERT(!mBufferUsed);
mSingle = ptr;
mCount = 1;
mBufferUsed = true;
return;
}
if(mCount == 1) // 1 -> 2, easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mBufferUsed);
void* single = mSingle;
mList = sm.allocate(2);
mList[0] = single;
mBufferUsed = false;
mOwnsMemory = true;
}
else
{
PX_ASSERT(!mBufferUsed);
if(!mOwnsMemory) // don't own the memory, must always alloc
realloc(0, PxNextPowerOfTwo(mCount), sm); // we're guaranteed nextPowerOfTwo(x) > x
else if(PxIsPowerOfTwo(mCount)) // count is at implicit capacity, so realloc
realloc(mCount, PxU32(mCount)*2, sm); // ... to next higher power of 2
PX_ASSERT(mOwnsMemory);
}
mList[mCount++] = ptr;
}
void PtrTable::replaceWithLast(PxU32 index, PtrTableStorageManager& sm)
{
PX_ASSERT(mCount!=0);
if(mCount == 1) // 1 -> 0 easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mBufferUsed);
mList = NULL;
mCount = 0;
mBufferUsed = false;
}
else if(mCount == 2) // 2 -> 1 easy case
{
PX_ASSERT(!mBufferUsed);
void* ptr = mList[1-index];
if(mOwnsMemory)
sm.deallocate(mList, 2);
mSingle = ptr;
mCount = 1;
mBufferUsed = true;
mOwnsMemory = true;
}
else
{
PX_ASSERT(!mBufferUsed);
mList[index] = mList[--mCount]; // remove before adjusting memory
if(!mOwnsMemory) // don't own the memory, must alloc
realloc(0, PxNextPowerOfTwo(PxU32(mCount)-1), sm); // if currently a power of 2, don't jump to the next one
else if(PxIsPowerOfTwo(mCount)) // own the memory, and implicit capacity requires that we downsize
realloc(PxU32(mCount)*2, PxU32(mCount), sm); // ... from the next power of 2, which was the old implicit capacity
PX_ASSERT(mOwnsMemory);
}
}

View File

@@ -0,0 +1,122 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_PTR_TABLE_H
#define CM_PTR_TABLE_H
#include "foundation/PxConstructor.h"
#include "foundation/PxIO.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
class PxSerializationContext;
class PxDeserializationContext;
class PxOutputStream;
namespace Cm
{
class PtrTableStorageManager
{
// This will typically be backed by a MultiPool implementation with fallback to the user
// allocator. For MultiPool, when deallocating we want to know what the previously requested size was
// so we can release into the right pool
public:
virtual void** allocate(PxU32 capacity) = 0;
virtual void deallocate(void** addr, PxU32 originalCapacity) = 0;
// whether memory allocated at one capacity can (and should) be safely reused at a different capacity
// allows realloc-style reuse by clients.
virtual bool canReuse(PxU32 originalCapacity, PxU32 newCapacity) = 0;
protected:
virtual ~PtrTableStorageManager() {}
};
// specialized class to hold an array of pointers with extrinsic storage management,
// serialization-compatible with 3.3.1 PtrTable
//
// note that extrinsic storage implies you *must* clear the table before the destructor runs
//
// capacity is implicit:
// if the memory is not owned (i.e. came from deserialization) then the capacity is exactly mCount
// else if mCount==0, capacity is 0
// else the capacity is the power of 2 >= mCount
//
// one implication of this is that if we want to add or remove a pointer from unowned memory, we always realloc
struct PX_PHYSX_COMMON_API PtrTable
{
PtrTable();
~PtrTable();
void add(void* ptr, PtrTableStorageManager& sm);
void replaceWithLast(PxU32 index, PtrTableStorageManager& sm);
void clear(PtrTableStorageManager& sm);
PxU32 find(const void* ptr) const;
PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
PX_FORCE_INLINE void*const* getPtrs() const { return mCount == 1 ? &mSingle : mList; }
PX_FORCE_INLINE void** getPtrs() { return mCount == 1 ? &mSingle : mList; }
// SERIALIZATION
// 3.3.1 compatibility fixup: this implementation ALWAYS sets 'ownsMemory' if the size is 0 or 1
PtrTable(const PxEMPTY)
{
mOwnsMemory = mCount<2;
if(mCount == 0)
mList = NULL;
}
void exportExtraData(PxSerializationContext& stream);
void importExtraData(PxDeserializationContext& context);
private:
void realloc(PxU32 oldCapacity, PxU32 newCapacity, PtrTableStorageManager& sm);
union
{
void* mSingle;
void** mList;
};
PxU16 mCount;
bool mOwnsMemory;
bool mBufferUsed; // dark magic in serialization requires this, otherwise redundant because it's logically equivalent to mCount == 1.
public:
PxU32 mFreeSlot; // PT: padding bytes on x64
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,559 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxAssert.h"
#include "CmRadixSort.h"
// PT: code archeology: this initially came from ICE (IceRevisitedRadix.h/cpp). Consider putting it back the way it was initially.
using namespace physx;
using namespace Cm;
#if defined(__BIG_ENDIAN__) || defined(_XBOX)
#define H0_OFFSET 768
#define H1_OFFSET 512
#define H2_OFFSET 256
#define H3_OFFSET 0
#define BYTES_INC (3-j)
#else
#define H0_OFFSET 0
#define H1_OFFSET 256
#define H2_OFFSET 512
#define H3_OFFSET 768
#define BYTES_INC j
#endif
#define CREATE_HISTOGRAMS(type, buffer) \
/* Clear counters/histograms */ \
PxMemZero(mHistogram1024, 256*4*sizeof(PxU32)); \
\
/* Prepare to count */ \
const PxU8* PX_RESTRICT p = reinterpret_cast<const PxU8*>(input); \
const PxU8* PX_RESTRICT pe = &p[nb*4]; \
PxU32* PX_RESTRICT h0= &mHistogram1024[H0_OFFSET]; /* Histogram for first pass (LSB)*/ \
PxU32* PX_RESTRICT h1= &mHistogram1024[H1_OFFSET]; /* Histogram for second pass */ \
PxU32* PX_RESTRICT h2= &mHistogram1024[H2_OFFSET]; /* Histogram for third pass */ \
PxU32* PX_RESTRICT h3= &mHistogram1024[H3_OFFSET]; /* Histogram for last pass (MSB)*/ \
\
bool AlreadySorted = true; /* Optimism... */ \
\
if(INVALID_RANKS) \
{ \
/* Prepare for temporal coherence */ \
const type* PX_RESTRICT Running = reinterpret_cast<const type*>(buffer); \
type PrevVal = *Running; \
\
while(p!=pe) \
{ \
/* Read input buffer in previous sorted order */ \
const type Val = *Running++; \
/* Check whether already sorted or not */ \
if(Val<PrevVal) { AlreadySorted = false; break; } /* Early out */ \
/* Update for next iteration */ \
PrevVal = Val; \
\
/* Create histograms */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
} \
\
/* If all input values are already sorted, we just have to return and leave the */ \
/* previous list unchanged. That way the routine may take advantage of temporal */ \
/* coherence, for example when used to sort transparent faces. */ \
if(AlreadySorted) \
{ \
mNbHits++; \
for(PxU32 i=0;i<nb;i++) mRanks[i] = i; \
return *this; \
} \
} \
else \
{ \
/* Prepare for temporal coherence */ \
const PxU32* PX_RESTRICT Indices = mRanks; \
type PrevVal = type(buffer[*Indices]); \
\
while(p!=pe) \
{ \
/* Read input buffer in previous sorted order */ \
const type Val = type(buffer[*Indices++]); \
/* Check whether already sorted or not */ \
if(Val<PrevVal) { AlreadySorted = false; break; } /* Early out */ \
/* Update for next iteration */ \
PrevVal = Val; \
\
/* Create histograms */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
} \
\
/* If all input values are already sorted, we just have to return and leave the */ \
/* previous list unchanged. That way the routine may take advantage of temporal */ \
/* coherence, for example when used to sort transparent faces. */ \
if(AlreadySorted) { mNbHits++; return *this; } \
} \
\
/* Else there has been an early out and we must finish computing the histograms */ \
while(p!=pe) \
{ \
/* Create histograms without the previous overhead */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
}
PX_INLINE const PxU32* CheckPassValidity(PxU32 pass, const PxU32* mHistogram1024, PxU32 nb, const void* input, PxU8& UniqueVal)
{
// Shortcut to current counters
const PxU32* CurCount = &mHistogram1024[pass<<8];
// Check pass validity
// If all values have the same byte, sorting is useless.
// It may happen when sorting bytes or words instead of dwords.
// This routine actually sorts words faster than dwords, and bytes
// faster than words. Standard running time (O(4*n))is reduced to O(2*n)
// for words and O(n) for bytes. Running time for floats depends on actual values...
// Get first byte
UniqueVal = *((reinterpret_cast<const PxU8*>(input))+pass);
// Check that byte's counter
if(CurCount[UniqueVal]==nb)
return NULL;
return CurCount;
}
RadixSort::RadixSort() : mCurrentSize(0), mRanks(NULL), mRanks2(NULL), mHistogram1024(0), mLinks256(0), mTotalCalls(0), mNbHits(0), mDeleteRanks(true)
{
// Initialize indices
INVALIDATE_RANKS;
}
RadixSort::~RadixSort()
{
}
/**
* Main sort routine.
* This one is for integer values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input [in] a list of integer values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \param hint [in] RADIX_SIGNED to handle negative values, RADIX_UNSIGNED if you know your input buffer only contains positive values
* \return Self-Reference
*/
RadixSort& RadixSort::Sort(const PxU32* input, PxU32 nb, RadixHint hint)
{
PX_ASSERT(mHistogram1024);
PX_ASSERT(mLinks256);
PX_ASSERT(mRanks);
PX_ASSERT(mRanks2);
// Checkings
if(!input || !nb || nb&0x80000000)
return *this;
// Stats
mTotalCalls++;
// Create histograms (counters). Counters for all passes are created in one run.
// Pros: read input buffer once instead of four times
// Cons: mHistogram1024 is 4Kb instead of 1Kb
// We must take care of signed/unsigned values for temporal coherence.... I just
// have 2 code paths even if just a single opcode changes. Self-modifying code, someone?
if(hint==RADIX_UNSIGNED) { CREATE_HISTOGRAMS(PxU32, input); }
else { CREATE_HISTOGRAMS(PxI32, input); }
// Compute #negative values involved if needed
PxU32 NbNegativeValues = 0;
if(hint==RADIX_SIGNED)
{
// An efficient way to compute the number of negatives values we'll have to deal with is simply to sum the 128
// last values of the last histogram. Last histogram because that's the one for the Most Significant Byte,
// responsible for the sign. 128 last values because the 128 first ones are related to positive numbers.
PxU32* PX_RESTRICT h3= &mHistogram1024[768];
for(PxU32 i=128;i<256;i++) NbNegativeValues += h3[i]; // 768 for last histogram, 128 for negative part
}
// Radix sort, j is the pass number (0=LSB, 3=MSB)
for(PxU32 j=0;j<4;j++)
{
// CHECK_PASS_VALIDITY(j);
PxU8 UniqueVal;
const PxU32* PX_RESTRICT CurCount = CheckPassValidity(j, mHistogram1024, nb, input, UniqueVal);
// Sometimes the fourth (negative) pass is skipped because all numbers are negative and the MSB is 0xFF (for example). This is
// not a problem, numbers are correctly sorted anyway.
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Should we care about negative values?
if(j!=3 || hint==RADIX_UNSIGNED)
{
// Here we deal with positive values only
// Create offsets
Links256[0] = mRanks2;
for(PxU32 i=1;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
}
else
{
// This is a special case to correctly handle negative integers. They're sorted in the right order but at the wrong place.
// Create biased offsets, in order for negative numbers to be sorted as well
Links256[0] = &mRanks2[NbNegativeValues]; // First positive number takes place after the negative ones
for(PxU32 i=1;i<128;i++)
Links256[i] = Links256[i-1] + CurCount[i-1]; // 1 to 128 for positive numbers
// Fixing the wrong place for negative values
Links256[128] = mRanks2;
for(PxU32 i=129;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
}
// Perform Radix Sort
const PxU8* PX_RESTRICT InputBytes = reinterpret_cast<const PxU8*>(input);
InputBytes += BYTES_INC;
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
*Links256[InputBytes[i<<2]]++ = i;
VALIDATE_RANKS;
}
else
{
PxU32* PX_RESTRICT Indices = mRanks;
PxU32* PX_RESTRICT IndicesEnd = &mRanks[nb];
while(Indices!=IndicesEnd)
{
const PxU32 id = *Indices++;
*Links256[InputBytes[id<<2]]++ = id;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
return *this;
}
/**
* Main sort routine.
* This one is for floating-point values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input2 [in] a list of floating-point values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \return Self-Reference
* \warning only sorts IEEE floating-point values
*/
RadixSort& RadixSort::Sort(const float* input2, PxU32 nb)
{
PX_ASSERT(mHistogram1024);
PX_ASSERT(mLinks256);
PX_ASSERT(mRanks);
PX_ASSERT(mRanks2);
// Checkings
if(!input2 || !nb || nb&0x80000000)
return *this;
// Stats
mTotalCalls++;
const PxU32* PX_RESTRICT input = reinterpret_cast<const PxU32*>(input2);
// Allocate histograms & offsets on the stack
//PxU32 mHistogram1024[256*4];
//PxU32* mLinks256[256];
// Create histograms (counters). Counters for all passes are created in one run.
// Pros: read input buffer once instead of four times
// Cons: mHistogram1024 is 4Kb instead of 1Kb
// Floating-point values are always supposed to be signed values, so there's only one code path there.
// Please note the floating point comparison needed for temporal coherence! Although the resulting asm code
// is dreadful, this is surprisingly not such a performance hit - well, I suppose that's a big one on first
// generation Pentiums....We can't make comparison on integer representations because, as Chris said, it just
// wouldn't work with mixed positive/negative values....
{ CREATE_HISTOGRAMS(float, input2); }
// Compute #negative values involved if needed
PxU32 NbNegativeValues = 0;
// An efficient way to compute the number of negatives values we'll have to deal with is simply to sum the 128
// last values of the last histogram. Last histogram because that's the one for the Most Significant Byte,
// responsible for the sign. 128 last values because the 128 first ones are related to positive numbers.
// ### is that ok on Apple ?!
PxU32* PX_RESTRICT h3= &mHistogram1024[768];
for(PxU32 i=128;i<256;i++) NbNegativeValues += h3[i]; // 768 for last histogram, 128 for negative part
// Radix sort, j is the pass number (0=LSB, 3=MSB)
for(PxU32 j=0;j<4;j++)
{
PxU8 UniqueVal;
const PxU32* PX_RESTRICT CurCount = CheckPassValidity(j, mHistogram1024, nb, input, UniqueVal);
// Should we care about negative values?
if(j!=3)
{
// Here we deal with positive values only
// CHECK_PASS_VALIDITY(j);
// const bool PerformPass = CheckPassValidity(j, mHistogram1024, nb, input);
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Create offsets
Links256[0] = mRanks2;
for(PxU32 i=1;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
// Perform Radix Sort
const PxU8* PX_RESTRICT InputBytes = reinterpret_cast<const PxU8*>(input);
InputBytes += BYTES_INC;
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
*Links256[InputBytes[i<<2]]++ = i;
VALIDATE_RANKS;
}
else
{
PxU32* PX_RESTRICT Indices = mRanks;
PxU32* PX_RESTRICT IndicesEnd = &mRanks[nb];
while(Indices!=IndicesEnd)
{
const PxU32 id = *Indices++;
*Links256[InputBytes[id<<2]]++ = id;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
else
{
// This is a special case to correctly handle negative values
// CHECK_PASS_VALIDITY(j);
// const bool PerformPass = CheckPassValidity(j, mHistogram1024, nb, input);
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Create biased offsets, in order for negative numbers to be sorted as well
Links256[0] = &mRanks2[NbNegativeValues]; // First positive number takes place after the negative ones
for(PxU32 i=1;i<128;i++)
Links256[i] = Links256[i-1] + CurCount[i-1]; // 1 to 128 for positive numbers
// We must reverse the sorting order for negative numbers!
Links256[255] = mRanks2;
for(PxU32 i=0;i<127;i++)
Links256[254-i] = Links256[255-i] + CurCount[255-i]; // Fixing the wrong order for negative values
for(PxU32 i=128;i<256;i++)
Links256[i] += CurCount[i]; // Fixing the wrong place for negative values
// Perform Radix Sort
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
{
const PxU32 Radix = input[i]>>24; // Radix byte, same as above. AND is useless here (PxU32).
// ### cmp to be killed. Not good. Later.
if(Radix<128) *Links256[Radix]++ = i; // Number is positive, same as above
else *(--Links256[Radix]) = i; // Number is negative, flip the sorting order
}
VALIDATE_RANKS;
}
else
{
const PxU32* PX_RESTRICT Ranks = mRanks;
for(PxU32 i=0;i<nb;i++)
{
const PxU32 Radix = input[Ranks[i]]>>24; // Radix byte, same as above. AND is useless here (PxU32).
// ### cmp to be killed. Not good. Later.
if(Radix<128) *Links256[Radix]++ = Ranks[i]; // Number is positive, same as above
else *(--Links256[Radix]) = Ranks[i]; // Number is negative, flip the sorting order
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
else
{
// The pass is useless, yet we still have to reverse the order of current list if all values are negative.
if(UniqueVal>=128)
{
if(INVALID_RANKS)
{
// ###Possible?
for(PxU32 i=0;i<nb;i++) mRanks2[i] = nb-i-1;
VALIDATE_RANKS;
}
else
{
for(PxU32 i=0;i<nb;i++) mRanks2[i] = mRanks[nb-i-1];
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
}
}
return *this;
}
bool RadixSort::SetBuffers(PxU32* ranks0, PxU32* ranks1, PxU32* histogram1024, PxU32** links256)
{
if(!ranks0 || !ranks1 || !histogram1024 || !links256)
return false;
mRanks = ranks0;
mRanks2 = ranks1;
mHistogram1024 = histogram1024;
mLinks256 = links256;
mDeleteRanks = false;
INVALIDATE_RANKS;
return true;
}
#include "foundation/PxAllocator.h"
using namespace physx;
using namespace Cm;
RadixSortBuffered::RadixSortBuffered()
: RadixSort()
{
}
RadixSortBuffered::~RadixSortBuffered()
{
reset();
}
void RadixSortBuffered::reset()
{
// Release everything
if(mDeleteRanks)
{
PX_FREE(mRanks2);
PX_FREE(mRanks);
}
mCurrentSize = 0;
INVALIDATE_RANKS;
}
/**
* Resizes the inner lists.
* \param nb [in] new size (number of dwords)
* \return true if success
*/
bool RadixSortBuffered::Resize(PxU32 nb)
{
if(mDeleteRanks)
{
// Free previously used ram
PX_FREE(mRanks2);
PX_FREE(mRanks);
// Get some fresh one
mRanks = PX_ALLOCATE(PxU32, nb, "RadixSortBuffered:mRanks");
mRanks2 = PX_ALLOCATE(PxU32, nb, "RadixSortBuffered:mRanks2");
}
return true;
}
PX_INLINE void RadixSortBuffered::CheckResize(PxU32 nb)
{
PxU32 CurSize = CURRENT_SIZE;
if(nb!=CurSize)
{
if(nb>CurSize)
Resize(nb);
mCurrentSize = nb;
INVALIDATE_RANKS;
}
}
/**
* Main sort routine.
* This one is for integer values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input [in] a list of integer values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \param hint [in] RADIX_SIGNED to handle negative values, RADIX_UNSIGNED if you know your input buffer only contains positive values
* \return Self-Reference
*/
RadixSortBuffered& RadixSortBuffered::Sort(const PxU32* input, PxU32 nb, RadixHint hint)
{
// Checkings
if(!input || !nb || nb&0x80000000)
return *this;
// Resize lists if needed
CheckResize(nb);
//Set histogram buffers.
PxU32 histogram[1024];
PxU32* links[256];
mHistogram1024 = histogram;
mLinks256 = links;
RadixSort::Sort(input, nb, hint);
return *this;
}
/**
* Main sort routine.
* This one is for floating-point values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input2 [in] a list of floating-point values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \return Self-Reference
* \warning only sorts IEEE floating-point values
*/
RadixSortBuffered& RadixSortBuffered::Sort(const float* input2, PxU32 nb)
{
// Checkings
if(!input2 || !nb || nb&0x80000000)
return *this;
// Resize lists if needed
CheckResize(nb);
//Set histogram buffers.
PxU32 histogram[1024];
PxU32* links[256];
mHistogram1024 = histogram;
mLinks256 = links;
RadixSort::Sort(input2, nb);
return *this;
}

View File

@@ -0,0 +1,117 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RADIX_SORT_H
#define CM_RADIX_SORT_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Cm
{
enum RadixHint
{
RADIX_SIGNED, //!< Input values are signed
RADIX_UNSIGNED, //!< Input values are unsigned
RADIX_FORCE_DWORD = 0x7fffffff
};
#define INVALIDATE_RANKS mCurrentSize|=0x80000000
#define VALIDATE_RANKS mCurrentSize&=0x7fffffff
#define CURRENT_SIZE (mCurrentSize&0x7fffffff)
#define INVALID_RANKS (mCurrentSize&0x80000000)
class PX_PHYSX_COMMON_API RadixSort
{
PX_NOCOPY(RadixSort)
public:
RadixSort();
virtual ~RadixSort();
// Sorting methods
RadixSort& Sort(const PxU32* input, PxU32 nb, RadixHint hint=RADIX_SIGNED);
RadixSort& Sort(const float* input, PxU32 nb);
//! Access to results. mRanks is a list of indices in sorted order, i.e. in the order you may further process your data
PX_FORCE_INLINE const PxU32* GetRanks() const { return mRanks; }
//! mIndices2 gets trashed on calling the sort routine, but otherwise you can recycle it the way you want.
PX_FORCE_INLINE PxU32* GetRecyclable() const { return mRanks2; }
//! Returns the total number of calls to the radix sorter.
PX_FORCE_INLINE PxU32 GetNbTotalCalls() const { return mTotalCalls; }
//! Returns the number of eraly exits due to temporal coherence.
PX_FORCE_INLINE PxU32 GetNbHits() const { return mNbHits; }
PX_FORCE_INLINE void invalidateRanks() { INVALIDATE_RANKS; }
bool SetBuffers(PxU32* ranks0, PxU32* ranks1, PxU32* histogram1024, PxU32** links256);
protected:
PxU32 mCurrentSize; //!< Current size of the indices list
PxU32* mRanks; //!< Two lists, swapped each pass
PxU32* mRanks2;
PxU32* mHistogram1024;
PxU32** mLinks256;
// Stats
PxU32 mTotalCalls; //!< Total number of calls to the sort routine
PxU32 mNbHits; //!< Number of early exits due to coherence
// Stack-radix
bool mDeleteRanks; //!<
};
#define StackRadixSort(name, ranks0, ranks1) \
RadixSort name; \
PxU32 histogramBuffer[1024]; \
PxU32* linksBuffer[256]; \
name.SetBuffers(ranks0, ranks1, histogramBuffer, linksBuffer);
class PX_PHYSX_COMMON_API RadixSortBuffered : public RadixSort
{
public:
RadixSortBuffered();
~RadixSortBuffered();
void reset();
RadixSortBuffered& Sort(const PxU32* input, PxU32 nb, RadixHint hint=RADIX_SIGNED);
RadixSortBuffered& Sort(const float* input, PxU32 nb);
private:
RadixSortBuffered(const RadixSortBuffered& object);
RadixSortBuffered& operator=(const RadixSortBuffered& object);
// Internal methods
void CheckResize(PxU32 nb);
bool Resize(PxU32 nb);
};
}
}
#endif // CM_RADIX_SORT_H

View File

@@ -0,0 +1,224 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RANDOM_H
#define CM_RANDOM_H
#include "foundation/PxQuat.h"
#include "foundation/PxVec3.h"
#define TEST_MAX_RAND 0xffff
namespace physx
{
namespace Cm
{
class BasicRandom
{
public:
BasicRandom(PxU32 seed = 0) : mRnd(seed) {}
~BasicRandom() {}
PX_FORCE_INLINE void setSeed(PxU32 seed) { mRnd = seed; }
PX_FORCE_INLINE PxU32 getCurrentValue() const { return mRnd; }
PxU32 randomize() { mRnd = mRnd * 2147001325 + 715136305; return mRnd; }
PX_FORCE_INLINE PxU32 rand() { return randomize() & 0xffff; }
PX_FORCE_INLINE PxU32 rand32() { return randomize() & 0xffffffff; }
PxF32 rand(PxF32 a, PxF32 b)
{
const PxF32 r = rand32() / (static_cast<PxF32>(0xffffffff));
return r * (b - a) + a;
}
PxI32 rand(PxI32 a, PxI32 b)
{
return a + static_cast<PxI32>(rand32() % (b - a));
}
PxF32 randomFloat()
{
return rand() / (static_cast<PxF32>(0xffff)) - 0.5f;
}
PxF32 randomFloat32()
{
return rand32() / (static_cast<PxF32>(0xffffffff)) - 0.5f;
}
PxF32 randomFloat32(PxReal a, PxReal b) { return rand32() / PxF32(0xffffffff)*(b - a) + a; }
void unitRandomPt(physx::PxVec3& v)
{
v = unitRandomPt();
}
void unitRandomQuat(physx::PxQuat& v)
{
v = unitRandomQuat();
}
PxVec3 unitRandomPt()
{
PxVec3 v;
do
{
v.x = randomFloat();
v.y = randomFloat();
v.z = randomFloat();
} while (v.normalize() < 1e-6f);
return v;
}
PxQuat unitRandomQuat()
{
PxQuat v;
do
{
v.x = randomFloat();
v.y = randomFloat();
v.z = randomFloat();
v.w = randomFloat();
} while (v.normalize() < 1e-6f);
return v;
}
private:
PxU32 mRnd;
};
//--------------------------------------
// Fast, very good random numbers
//
// Period = 2^249
//
// Kirkpatrick, S., and E. Stoll, 1981; A Very Fast Shift-Register
// Sequence Random Number Generator, Journal of Computational Physics,
// V. 40.
//
// Maier, W.L., 1991; A Fast Pseudo Random Number Generator,
// Dr. Dobb's Journal, May, pp. 152 - 157
class RandomR250
{
public:
RandomR250(PxI32 s)
{
setSeed(s);
}
void setSeed(PxI32 s)
{
BasicRandom lcg(s);
mIndex = 0;
PxI32 j;
for (j = 0; j < 250; j++) // fill r250 buffer with bit values
mBuffer[j] = lcg.randomize();
for (j = 0; j < 250; j++) // set some MSBs to 1
if (lcg.randomize() > 0x40000000L)
mBuffer[j] |= 0x80000000L;
PxU32 msb = 0x80000000; // turn on diagonal bit
PxU32 mask = 0xffffffff; // turn off the leftmost bits
for (j = 0; j < 32; j++)
{
const PxI32 k = 7 * j + 3; // select a word to operate on
mBuffer[k] &= mask; // turn off bits left of the diagonal
mBuffer[k] |= msb; // turn on the diagonal bit
mask >>= 1;
msb >>= 1;
}
}
PxU32 randI()
{
PxI32 j;
// wrap pointer around
if (mIndex >= 147) j = mIndex - 147;
else j = mIndex + 103;
const PxU32 new_rand = mBuffer[mIndex] ^ mBuffer[j];
mBuffer[mIndex] = new_rand;
// increment pointer for next time
if (mIndex >= 249) mIndex = 0;
else mIndex++;
return new_rand >> 1;
}
PxReal randUnit()
{
PxU32 mask = (1 << 23) - 1;
return PxF32(randI()&(mask)) / PxF32(mask);
}
PxReal rand(PxReal lower, PxReal upper)
{
return lower + randUnit() * (upper - lower);
}
private:
PxU32 mBuffer[250];
PxI32 mIndex;
};
static RandomR250 gRandomR250(0x95d6739b);
PX_FORCE_INLINE PxU32 Rand()
{
return gRandomR250.randI() & TEST_MAX_RAND;
}
PX_FORCE_INLINE PxF32 Rand(PxF32 a, PxF32 b)
{
const PxF32 r = static_cast<PxF32>(Rand()) / (static_cast<PxF32>(TEST_MAX_RAND));
return r * (b - a) + a;
}
PX_FORCE_INLINE PxF32 RandLegacy(PxF32 a, PxF32 b)
{
const PxF32 r = static_cast<PxF32>(Rand()) / (static_cast<PxF32>(0x7fff) + 1.0f);
return r * (b - a) + a;
}
//returns numbers from [a, b-1]
PX_FORCE_INLINE PxI32 Rand(PxI32 a, PxI32 b)
{
return a + static_cast<PxI32>(Rand() % (b - a));
}
PX_FORCE_INLINE void SetSeed(PxU32 seed)
{
gRandomR250.setSeed(seed);
}
}
}
#endif

View File

@@ -0,0 +1,136 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_REFCOUNTABLE_H
#define CM_REFCOUNTABLE_H
#include "foundation/PxAssert.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxAllocator.h"
#include "common/PxBase.h"
namespace physx
{
namespace Cm
{
// PT: this is used to re-implement RefCountable using the ref-counter in PxBase, i.e. to dissociate
// the RefCountable data from the RefCountable code. The goal is to be able to store the ref counter
// in the padding bytes of PxBase, and also to avoid two v-table pointers in the class.
class RefCountableExt : public PxRefCounted
{
public:
RefCountableExt() : PxRefCounted(0, PxBaseFlags(0)) {}
void preExportDataReset()
{
mBuiltInRefCount = 1;
}
void incRefCount()
{
volatile PxI32* val = reinterpret_cast<volatile PxI32*>(&mBuiltInRefCount);
PxAtomicIncrement(val);
// value better be greater than 1, or we've created a ref to an undefined object
PX_ASSERT(mBuiltInRefCount>1);
}
void decRefCount()
{
PX_ASSERT(mBuiltInRefCount>0);
volatile PxI32* val = reinterpret_cast<volatile PxI32*>(&mBuiltInRefCount);
if(physx::PxAtomicDecrement(val) == 0)
onRefCountZero();
}
PX_FORCE_INLINE PxU32 getRefCount() const
{
return mBuiltInRefCount;
}
};
PX_FORCE_INLINE void RefCountable_preExportDataReset(PxRefCounted& base) { static_cast<RefCountableExt&>(base).preExportDataReset(); }
PX_FORCE_INLINE void RefCountable_incRefCount(PxRefCounted& base) { static_cast<RefCountableExt&>(base).incRefCount(); }
PX_FORCE_INLINE void RefCountable_decRefCount(PxRefCounted& base) { static_cast<RefCountableExt&>(base).decRefCount(); }
PX_FORCE_INLINE PxU32 RefCountable_getRefCount(const PxRefCounted& base) { return static_cast<const RefCountableExt&>(base).getRefCount(); }
// simple thread-safe reference count
// when the ref count is zero, the object is in an undefined state (pending delete)
class RefCountable
{
public:
// PX_SERIALIZATION
RefCountable(const PxEMPTY) { PX_ASSERT(mRefCount == 1); }
void preExportDataReset() { mRefCount = 1; }
//~PX_SERIALIZATION
explicit RefCountable(PxU32 initialCount = 1)
: mRefCount(PxI32(initialCount))
{
PX_ASSERT(mRefCount!=0);
}
virtual ~RefCountable() {}
/**
Calls 'delete this;'. It needs to be overloaded for classes also deriving from
PxBase and call 'Cm::deletePxBase(this);' instead.
*/
virtual void onRefCountZero()
{
PX_DELETE_THIS;
}
void incRefCount()
{
physx::PxAtomicIncrement(&mRefCount);
// value better be greater than 1, or we've created a ref to an undefined object
PX_ASSERT(mRefCount>1);
}
void decRefCount()
{
PX_ASSERT(mRefCount>0);
if(physx::PxAtomicDecrement(&mRefCount) == 0)
onRefCountZero();
}
PX_FORCE_INLINE PxU32 getRefCount() const
{
return PxU32(mRefCount);
}
private:
volatile PxI32 mRefCount;
};
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,126 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RENDER_BUFFER_H
#define CM_RENDER_BUFFER_H
#include "common/PxRenderBuffer.h"
#include "CmUtils.h"
#include "foundation/PxArray.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Cm
{
/**
Implementation of PxRenderBuffer.
*/
class RenderBuffer : public PxRenderBuffer, public PxUserAllocated
{
template <typename T>
void append(PxArray<T>& dst, const T* src, PxU32 count)
{
dst.reserve(dst.size() + count);
for(const T* end=src+count; src<end; ++src)
dst.pushBack(*src);
}
public:
RenderBuffer() :
mPoints("renderBufferPoints"),
mLines("renderBufferLines"),
mTriangles("renderBufferTriangles")
{}
virtual PxU32 getNbPoints() const { return mPoints.size(); }
virtual const PxDebugPoint* getPoints() const { return mPoints.begin(); }
virtual void addPoint(const PxDebugPoint& point) { mPoints.pushBack(point); }
virtual PxU32 getNbLines() const { return mLines.size(); }
virtual const PxDebugLine* getLines() const { return mLines.begin(); }
virtual void addLine(const PxDebugLine& line) { mLines.pushBack(line); }
virtual PxDebugLine* reserveLines(const PxU32 nbLines) {return reserveContainerMemory(mLines, nbLines);}
virtual PxDebugPoint* reservePoints(const PxU32 nbPoints) { return reserveContainerMemory(mPoints, nbPoints); }
virtual PxU32 getNbTriangles() const { return mTriangles.size(); }
virtual const PxDebugTriangle* getTriangles() const { return mTriangles.begin(); }
virtual void addTriangle(const PxDebugTriangle& triangle) { mTriangles.pushBack(triangle); }
virtual void append(const PxRenderBuffer& other)
{
append(mPoints, other.getPoints(), other.getNbPoints());
append(mLines, other.getLines(), other.getNbLines());
append(mTriangles, other.getTriangles(), other.getNbTriangles());
}
virtual void clear()
{
mPoints.clear();
mLines.clear();
mTriangles.clear();
}
virtual bool empty() const
{
return mPoints.empty() && mLines.empty() && mTriangles.empty();
}
virtual void shift(const PxVec3& delta)
{
for(PxU32 i=0; i < mPoints.size(); i++)
mPoints[i].pos += delta;
for(PxU32 i=0; i < mLines.size(); i++)
{
mLines[i].pos0 += delta;
mLines[i].pos1 += delta;
}
for(PxU32 i=0; i < mTriangles.size(); i++)
{
mTriangles[i].pos0 += delta;
mTriangles[i].pos1 += delta;
mTriangles[i].pos2 += delta;
}
}
PxArray<PxDebugPoint> mPoints;
PxArray<PxDebugLine> mLines;
PxArray<PxDebugTriangle> mTriangles;
};
} // Cm
}
#endif

View File

@@ -0,0 +1,241 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_SCALING_H
#define CM_SCALING_H
#include "foundation/PxBounds3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxMat34.h"
#include "foundation/PxSIMDHelpers.h"
#include "geometry/PxMeshScale.h"
#include "CmUtils.h"
namespace physx
{
namespace Cm
{
// PT: same as PxMeshScale::toMat33() but faster
PX_FORCE_INLINE PxMat33 toMat33(const PxMeshScale& meshScale)
{
const PxMat33Padded rot(meshScale.rotation);
PxMat33 trans = rot.getTranspose();
trans.column0 *= meshScale.scale[0];
trans.column1 *= meshScale.scale[1];
trans.column2 *= meshScale.scale[2];
return trans * rot;
}
// class that can perform scaling fast. Relatively large size, generated from PxMeshScale on demand.
// CS: I've removed most usages of this class, because most of the time only one-way transform is needed.
// If you only need a temporary FastVertex2ShapeScaling, setup your transform as PxMat34Legacy and use
// normal matrix multiplication or a transform() overload to convert points and bounds between spaces.
class FastVertex2ShapeScaling
{
public:
PX_INLINE FastVertex2ShapeScaling()
{
//no scaling by default:
vertex2ShapeSkew = PxMat33(PxIdentity);
shape2VertexSkew = PxMat33(PxIdentity);
mFlipNormal = false;
}
PX_INLINE explicit FastVertex2ShapeScaling(const PxMeshScale& scale)
{
init(scale);
}
PX_INLINE FastVertex2ShapeScaling(const PxVec3& scale, const PxQuat& rotation)
{
init(scale, rotation);
}
PX_INLINE void init(const PxMeshScale& scale)
{
init(scale.scale, scale.rotation);
}
PX_INLINE void setIdentity()
{
vertex2ShapeSkew = PxMat33(PxIdentity);
shape2VertexSkew = PxMat33(PxIdentity);
mFlipNormal = false;
}
PX_INLINE void init(const PxVec3& scale, const PxQuat& rotation)
{
// TODO: may want to optimize this for cases where we have uniform or axis aligned scaling!
// That would introduce branches and it's unclear to me whether that's faster than just doing the math.
// Lazy computation would be another option, at the cost of introducing even more branches.
const PxMat33Padded R(rotation);
vertex2ShapeSkew = R.getTranspose();
const PxMat33 diagonal = PxMat33::createDiagonal(scale);
vertex2ShapeSkew = vertex2ShapeSkew * diagonal;
vertex2ShapeSkew = vertex2ShapeSkew * R;
/*
The inverse, is, explicitly:
shape2VertexSkew.setTransposed(R);
shape2VertexSkew.multiplyDiagonal(PxVec3(1.0f/scale.x, 1.0f/scale.y, 1.0f/scale.z));
shape2VertexSkew *= R;
It may be competitive to compute the inverse -- though this has a branch in it:
*/
shape2VertexSkew = vertex2ShapeSkew.getInverse();
mFlipNormal = ((scale.x * scale.y * scale.z) < 0.0f);
}
PX_FORCE_INLINE void flipNormal(PxVec3& v1, PxVec3& v2) const
{
if (mFlipNormal)
{
PxVec3 tmp = v1; v1 = v2; v2 = tmp;
}
}
PX_FORCE_INLINE PxVec3 operator* (const PxVec3& src) const
{
return vertex2ShapeSkew * src;
}
PX_FORCE_INLINE PxVec3 operator% (const PxVec3& src) const
{
return shape2VertexSkew * src;
}
PX_FORCE_INLINE const PxMat33& getVertex2ShapeSkew() const
{
return vertex2ShapeSkew;
}
PX_FORCE_INLINE const PxMat33& getShape2VertexSkew() const
{
return shape2VertexSkew;
}
PX_INLINE PxMat34 getVertex2WorldSkew(const PxMat34& shape2world) const
{
const PxMat34 vertex2worldSkew = shape2world * getVertex2ShapeSkew();
//vertex2worldSkew = shape2world * [vertex2shapeSkew, 0]
//[aR at] * [bR bt] = [aR * bR aR * bt + at] NOTE: order of operations important so it works when this ?= left ?= right.
return vertex2worldSkew;
}
PX_INLINE PxMat34 getWorld2VertexSkew(const PxMat34& shape2world) const
{
//world2vertexSkew = shape2vertex * invPQ(shape2world)
//[aR 0] * [bR' -bR'*bt] = [aR * bR' -aR * bR' * bt + 0]
const PxMat33 rotate( shape2world[0], shape2world[1], shape2world[2] );
const PxMat33 M = getShape2VertexSkew() * rotate.getTranspose();
return PxMat34(M[0], M[1], M[2], -M * shape2world[3]);
}
//! Transforms a shape space OBB to a vertex space OBB. All 3 params are in and out.
void transformQueryBounds(PxVec3& center, PxVec3& extents, PxMat33& basis) const
{
basis.column0 = shape2VertexSkew * (basis.column0 * extents.x);
basis.column1 = shape2VertexSkew * (basis.column1 * extents.y);
basis.column2 = shape2VertexSkew * (basis.column2 * extents.z);
center = shape2VertexSkew * center;
extents = PxOptimizeBoundingBox(basis);
}
void transformPlaneToShapeSpace(const PxVec3& nIn, const PxReal dIn, PxVec3& nOut, PxReal& dOut) const
{
const PxVec3 tmp = shape2VertexSkew.transformTranspose(nIn);
const PxReal denom = 1.0f / tmp.magnitude();
nOut = tmp * denom;
dOut = dIn * denom;
}
PX_FORCE_INLINE bool flipsNormal() const { return mFlipNormal; }
private:
PxMat33 vertex2ShapeSkew;
PxMat33 shape2VertexSkew;
bool mFlipNormal;
};
PX_FORCE_INLINE void getScaledVertices(PxVec3* v, const PxVec3& v0, const PxVec3& v1, const PxVec3& v2, bool idtMeshScale, const Cm::FastVertex2ShapeScaling& scaling)
{
if(idtMeshScale)
{
v[0] = v0;
v[1] = v1;
v[2] = v2;
}
else
{
const PxI32 winding = scaling.flipsNormal() ? 1 : 0;
v[0] = scaling * v0;
v[1+winding] = scaling * v1;
v[2-winding] = scaling * v2;
}
}
} // namespace Cm
PX_INLINE PxMat34 operator*(const PxTransform& transform, const PxMeshScale& scale)
{
const PxMat33Padded tmp(transform.q);
return PxMat34(tmp * Cm::toMat33(scale), transform.p);
}
PX_INLINE PxMat34 operator*(const PxMeshScale& scale, const PxTransform& transform)
{
const PxMat33 scaleMat = Cm::toMat33(scale);
const PxMat33Padded t(transform.q);
const PxMat33 r = scaleMat * t;
const PxVec3 p = scaleMat * transform.p;
return PxMat34(r, p);
}
PX_INLINE PxMat34 operator*(const PxMat34& transform, const PxMeshScale& scale)
{
return PxMat34(transform.m * Cm::toMat33(scale), transform.p);
}
PX_INLINE PxMat34 operator*(const PxMeshScale& scale, const PxMat34& transform)
{
const PxMat33 scaleMat = Cm::toMat33(scale);
return PxMat34(scaleMat * transform.m, scaleMat * transform.p);
}
}
#endif

View File

@@ -0,0 +1,416 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUtilities.h"
#include "CmSerialize.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAlloca.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Cm;
void physx::readChunk(PxI8& a, PxI8& b, PxI8& c, PxI8& d, PxInputStream& stream)
{
stream.read(&a, sizeof(PxI8));
stream.read(&b, sizeof(PxI8));
stream.read(&c, sizeof(PxI8));
stream.read(&d, sizeof(PxI8));
}
///////////////////////////////////////////////////////////////////////////////
PxU16 physx::readWord(bool mismatch, PxInputStream& stream)
{
PxU16 d;
stream.read(&d, sizeof(PxU16));
if(mismatch)
flip(d);
return d;
}
PxU32 physx::readDword(bool mismatch, PxInputStream& stream)
{
PxU32 d;
stream.read(&d, sizeof(PxU32));
if(mismatch)
flip(d);
return d;
}
PxF32 physx::readFloat(bool mismatch, PxInputStream& stream)
{
union
{
PxU32 d;
PxF32 f;
} u;
stream.read(&u.d, sizeof(PxU32));
if(mismatch)
flip(u.d);
return u.f;
}
///////////////////////////////////////////////////////////////////////////////
void physx::writeWord(PxU16 value, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
flip(value);
stream.write(&value, sizeof(PxU16));
}
void physx::writeDword(PxU32 value, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
flip(value);
stream.write(&value, sizeof(PxU32));
}
void physx::writeFloat(PxF32 value, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
flip(value);
stream.write(&value, sizeof(PxF32));
}
///////////////////////////////////////////////////////////////////////////////
bool physx::readFloatBuffer(PxF32* dest, PxU32 nbFloats, bool mismatch, PxInputStream& stream)
{
stream.read(dest, sizeof(PxF32)*nbFloats);
if(mismatch)
{
for(PxU32 i=0;i<nbFloats;i++)
flip(dest[i]);
}
return true;
}
void physx::writeFloatBuffer(const PxF32* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
{
while(nb--)
{
PxF32 f = *src++;
flip(f);
stream.write(&f, sizeof(PxF32));
}
}
else
stream.write(src, sizeof(PxF32) * nb);
}
void physx::writeWordBuffer(const PxU16* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
if(mismatch)
{
while(nb--)
{
PxU16 w = *src++;
flip(w);
stream.write(&w, sizeof(PxU16));
}
}
else
stream.write(src, sizeof(PxU16) * nb);
}
void physx::readWordBuffer(PxU16* dest, PxU32 nb, bool mismatch, PxInputStream& stream)
{
stream.read(dest, sizeof(PxU16)*nb);
if(mismatch)
{
for(PxU32 i=0;i<nb;i++)
{
flip(dest[i]);
}
}
}
void physx::writeWordBuffer(const PxI16* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
if (mismatch)
{
while (nb--)
{
PxI16 w = *src++;
flip(w);
stream.write(&w, sizeof(PxI16));
}
}
else
stream.write(src, sizeof(PxI16) * nb);
}
void physx::readByteBuffer(PxU8* dest, PxU32 nb, PxInputStream& stream)
{
stream.read(dest, sizeof(PxU8) * nb);
}
void physx::writeByteBuffer(const PxU8* src, PxU32 nb, PxOutputStream& stream)
{
stream.write(src, sizeof(PxU8) * nb);
}
void physx::readWordBuffer(PxI16* dest, PxU32 nb, bool mismatch, PxInputStream& stream)
{
stream.read(dest, sizeof(PxI16)*nb);
if (mismatch)
{
for (PxU32 i = 0; i < nb; i++)
{
flip(dest[i]);
}
}
}
///////////////////////////////////////////////////////////////////////////////
bool physx::writeHeader(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxU32 version, bool mismatch, PxOutputStream& stream)
{
// Store endianness
PxI8 streamFlags = PxLittleEndian();
if(mismatch)
streamFlags^=1;
// Export header
writeChunk('N', 'X', 'S', streamFlags, stream); // "Novodex stream" identifier
writeChunk(a, b, c, d, stream); // Chunk identifier
writeDword(version, mismatch, stream);
return true;
}
bool Cm::WriteHeader(PxU8 a, PxU8 b, PxU8 c, PxU8 d, PxU32 version, bool mismatch, PxOutputStream& stream)
{
// Store endianness
PxU8 streamFlags = PxU8(PxLittleEndian());
if(mismatch)
streamFlags^=1;
// Export header
writeChunk('I', 'C', 'E', PxI8(streamFlags), stream); // ICE identifier
writeChunk(PxI8(a), PxI8(b), PxI8(c), PxI8(d), stream); // Chunk identifier
writeDword(version, mismatch, stream);
return true;
}
bool physx::readHeader(PxI8 a_, PxI8 b_, PxI8 c_, PxI8 d_, PxU32& version, bool& mismatch, PxInputStream& stream)
{
// Import header
PxI8 a, b, c, d;
readChunk(a, b, c, d, stream);
if(a!='N' || b!='X' || c!='S')
return false;
const PxI8 fileLittleEndian = d&1;
mismatch = fileLittleEndian!=PxLittleEndian();
readChunk(a, b, c, d, stream);
if(a!=a_ || b!=b_ || c!=c_ || d!=d_)
return false;
version = readDword(mismatch, stream);
return true;
}
bool Cm::ReadHeader(PxU8 a_, PxU8 b_, PxU8 c_, PxU8 d_, PxU32& version, bool& mismatch, PxInputStream& stream)
{
// Import header
PxI8 a, b, c, d;
readChunk(a, b, c, d, stream);
if(a!='I' || b!='C' || c!='E')
return false;
const PxU8 FileLittleEndian = PxU8(d&1);
mismatch = FileLittleEndian!=PxLittleEndian();
readChunk(a, b, c, d, stream);
if(a!=a_ || b!=b_ || c!=c_ || d!=d_)
return false;
version = readDword(mismatch, stream);
return true;
}
///////////////////////////////////////////////////////////////////////////////
PxU32 physx::computeMaxIndex(const PxU32* indices, PxU32 nbIndices)
{
PxU32 maxIndex=0;
while(nbIndices--)
{
PxU32 currentIndex = *indices++;
if(currentIndex>maxIndex)
maxIndex = currentIndex;
}
return maxIndex;
}
PxU16 physx::computeMaxIndex(const PxU16* indices, PxU32 nbIndices)
{
PxU16 maxIndex=0;
while(nbIndices--)
{
PxU16 currentIndex = *indices++;
if(currentIndex>maxIndex)
maxIndex = currentIndex;
}
return maxIndex;
}
void physx::storeIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
for(PxU32 i=0;i<nbIndices;i++)
{
PxU8 data = PxU8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if(maxIndex<=0xffff)
{
for(PxU32 i=0;i<nbIndices;i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
{
writeIntBuffer(indices, nbIndices, platformMismatch, stream);
}
}
void physx::readIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
PxU8 data;
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&data, sizeof(PxU8));
indices[i] = data;
}
}
else if(maxIndex<=0xffff)
{
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = readWord(platformMismatch, stream);
}
else
{
readIntBuffer(indices, nbIndices, platformMismatch, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
void Cm::StoreIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
for(PxU32 i=0;i<nbIndices;i++)
{
PxU8 data = PxU8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if(maxIndex<=0xffff)
{
for(PxU32 i=0;i<nbIndices;i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
{
// WriteDwordBuffer(indices, nbIndices, platformMismatch, stream);
for(PxU32 i=0;i<nbIndices;i++)
writeDword(indices[i], platformMismatch, stream);
}
}
void Cm::ReadIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
PxU8* tmp = reinterpret_cast<PxU8*>(PxAlloca(nbIndices*sizeof(PxU8)));
stream.read(tmp, nbIndices*sizeof(PxU8));
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = tmp[i];
// for(PxU32 i=0;i<nbIndices;i++)
// indices[i] = stream.ReadByte();
}
else if(maxIndex<=0xffff)
{
PxU16* tmp = reinterpret_cast<PxU16*>(PxAlloca(nbIndices*sizeof(PxU16)));
readWordBuffer(tmp, nbIndices, platformMismatch, stream);
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = tmp[i];
// for(PxU32 i=0;i<nbIndices;i++)
// indices[i] = ReadWord(platformMismatch, stream);
}
else
{
ReadDwordBuffer(indices, nbIndices, platformMismatch, stream);
}
}
void Cm::StoreIndices(PxU16 maxIndex, PxU32 nbIndices, const PxU16* indices, PxOutputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
for(PxU32 i=0;i<nbIndices;i++)
{
PxU8 data = PxU8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else
{
for(PxU32 i=0;i<nbIndices;i++)
writeWord(indices[i], platformMismatch, stream);
}
}
void Cm::ReadIndices(PxU16 maxIndex, PxU32 nbIndices, PxU16* indices, PxInputStream& stream, bool platformMismatch)
{
if(maxIndex<=0xff)
{
PxU8* tmp = reinterpret_cast<PxU8*>(PxAlloca(nbIndices*sizeof(PxU8)));
stream.read(tmp, nbIndices*sizeof(PxU8));
for(PxU32 i=0;i<nbIndices;i++)
indices[i] = tmp[i];
}
else
{
readWordBuffer(indices, nbIndices, platformMismatch, stream);
}
}

View File

@@ -0,0 +1,197 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_SERIALIZE_H
#define CM_SERIALIZE_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxIO.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxUtilities.h"
namespace physx
{
PX_INLINE void flip(PxU16& v)
{
PxU8* b = reinterpret_cast<PxU8*>(&v);
PxU8 temp = b[0];
b[0] = b[1];
b[1] = temp;
}
PX_INLINE void flip(PxI16& v)
{
PxI8* b = reinterpret_cast<PxI8*>(&v);
PxI8 temp = b[0];
b[0] = b[1];
b[1] = temp;
}
PX_INLINE void flip(PxU32& v)
{
PxU8* b = reinterpret_cast<PxU8*>(&v);
PxU8 temp = b[0];
b[0] = b[3];
b[3] = temp;
temp = b[1];
b[1] = b[2];
b[2] = temp;
}
// MS: It is important to modify the value directly and not use a temporary variable or a return
// value. The reason for this is that a flipped float might have a bit pattern which indicates
// an invalid float. If such a float is assigned to another float, the bit pattern
// can change again (maybe to map invalid floats to a common invalid pattern?).
// When reading the float and flipping again, the changed bit pattern will result in a different
// float than the original one.
PX_INLINE void flip(PxF32& v)
{
PxU8* b = reinterpret_cast<PxU8*>(&v);
PxU8 temp = b[0];
b[0] = b[3];
b[3] = temp;
temp = b[1];
b[1] = b[2];
b[2] = temp;
}
PX_INLINE void writeChunk(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxOutputStream& stream)
{
stream.write(&a, sizeof(PxI8));
stream.write(&b, sizeof(PxI8));
stream.write(&c, sizeof(PxI8));
stream.write(&d, sizeof(PxI8));
}
void readChunk(PxI8& a, PxI8& b, PxI8& c, PxI8& d, PxInputStream& stream);
PxU16 readWord(bool mismatch, PxInputStream& stream);
PxU32 readDword(bool mismatch, PxInputStream& stream);
PxF32 readFloat(bool mismatch, PxInputStream& stream);
void writeWord(PxU16 value, bool mismatch, PxOutputStream& stream);
void writeDword(PxU32 value, bool mismatch, PxOutputStream& stream);
void writeFloat(PxF32 value, bool mismatch, PxOutputStream& stream);
bool readFloatBuffer(PxF32* dest, PxU32 nbFloats, bool mismatch, PxInputStream& stream);
void writeFloatBuffer(const PxF32* src, PxU32 nb, bool mismatch, PxOutputStream& stream);
void writeWordBuffer(const PxU16* src, PxU32 nb, bool mismatch, PxOutputStream& stream);
void readWordBuffer(PxU16* dest, PxU32 nb, bool mismatch, PxInputStream& stream);
void writeWordBuffer(const PxI16* src, PxU32 nb, bool mismatch, PxOutputStream& stream);
void readWordBuffer(PxI16* dest, PxU32 nb, bool mismatch, PxInputStream& stream);
void writeByteBuffer(const PxU8* src, PxU32 nb, PxOutputStream& stream);
void readByteBuffer(PxU8* dest, PxU32 nb, PxInputStream& stream);
bool writeHeader(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxU32 version, bool mismatch, PxOutputStream& stream);
bool readHeader(PxI8 a, PxI8 b, PxI8 c, PxI8 d, PxU32& version, bool& mismatch, PxInputStream& stream);
PX_INLINE bool readIntBuffer(PxU32* dest, PxU32 nbInts, bool mismatch, PxInputStream& stream)
{
return readFloatBuffer(reinterpret_cast<PxF32*>(dest), nbInts, mismatch, stream);
}
PX_INLINE void writeIntBuffer(const PxU32* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
writeFloatBuffer(reinterpret_cast<const PxF32*>(src), nb, mismatch, stream);
}
PX_INLINE bool ReadDwordBuffer(PxU32* dest, PxU32 nb, bool mismatch, PxInputStream& stream)
{
return readFloatBuffer(reinterpret_cast<float*>(dest), nb, mismatch, stream);
}
PX_INLINE void WriteDwordBuffer(const PxU32* src, PxU32 nb, bool mismatch, PxOutputStream& stream)
{
writeFloatBuffer(reinterpret_cast<const float*>(src), nb, mismatch, stream);
}
PxU32 computeMaxIndex(const PxU32* indices, PxU32 nbIndices);
PxU16 computeMaxIndex(const PxU16* indices, PxU32 nbIndices);
void storeIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch);
void readIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch);
// PT: see PX-1163
PX_FORCE_INLINE bool readBigEndianVersionNumber(PxInputStream& stream, bool mismatch_, PxU32& fileVersion, bool& mismatch)
{
// PT: allright this is going to be subtle:
// - in version 1 the data was always saved in big-endian format
// - *including the version number*!
// - so we cannot just read the version "as usual" using the passed mismatch param
// PT: mismatch value for version 1
mismatch = (PxLittleEndian() == 1);
const PxU32 rawFileVersion = readDword(false, stream);
if(rawFileVersion==1)
{
// PT: this is a version-1 file with no flip
fileVersion = 1;
PX_ASSERT(!mismatch);
}
else
{
PxU32 fileVersionFlipped = rawFileVersion;
flip(fileVersionFlipped);
if(fileVersionFlipped==1)
{
// PT: this is a version-1 file with flip
fileVersion = 1;
PX_ASSERT(mismatch);
}
else
{
// PT: this is at least version 2 so we can process it "as usual"
mismatch = mismatch_;
fileVersion = mismatch_ ? fileVersionFlipped : rawFileVersion;
}
}
PX_ASSERT(fileVersion<=3);
if(fileVersion>3)
return false;
return true;
}
// PT: TODO: copied from IceSerialize.h, still needs to be refactored/cleaned up.
namespace Cm
{
bool WriteHeader(PxU8 a, PxU8 b, PxU8 c, PxU8 d, PxU32 version, bool mismatch, PxOutputStream& stream);
bool ReadHeader(PxU8 a_, PxU8 b_, PxU8 c_, PxU8 d_, PxU32& version, bool& mismatch, PxInputStream& stream);
void StoreIndices(PxU32 maxIndex, PxU32 nbIndices, const PxU32* indices, PxOutputStream& stream, bool platformMismatch);
void ReadIndices(PxU32 maxIndex, PxU32 nbIndices, PxU32* indices, PxInputStream& stream, bool platformMismatch);
void StoreIndices(PxU16 maxIndex, PxU32 nbIndices, const PxU16* indices, PxOutputStream& stream, bool platformMismatch);
void ReadIndices(PxU16 maxIndex, PxU32 nbIndices, PxU16* indices, PxInputStream& stream, bool platformMismatch);
}
}
#endif

View File

@@ -0,0 +1,532 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_SPATIAL_VECTOR_H
#define CM_SPATIAL_VECTOR_H
#include "foundation/PxVec3.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxTransform.h"
/*!
Combination of two R3 vectors.
*/
namespace physx
{
namespace Cm
{
PX_ALIGN_PREFIX(16)
class SpatialVector
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector()
{}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector(const PxVec3& lin, const PxVec3& ang)
: linear(lin), pad0(0.0f), angular(ang), pad1(0.0f)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~SpatialVector()
{}
// PT: this one is very important. Without it, the Xbox compiler generates weird "float-to-int" and "int-to-float" LHS
// each time we copy a SpatialVector (see for example PIX on "solveSimpleGroupA" without this operator).
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVector& v)
{
linear = v.linear;
pad0 = 0.0f;
angular = v.angular;
pad1 = 0.0f;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector zero() { return SpatialVector(PxVec3(0),PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator+(const SpatialVector& v) const
{
return SpatialVector(linear+v.linear,angular+v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator-(const SpatialVector& v) const
{
return SpatialVector(linear-v.linear,angular-v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator-() const
{
return SpatialVector(-linear,-angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator *(PxReal s) const
{
return SpatialVector(linear*s,angular*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator+=(const SpatialVector& v)
{
linear+=v.linear;
angular+=v.angular;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator-=(const SpatialVector& v)
{
linear-=v.linear;
angular-=v.angular;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return angular.magnitude() + linear.magnitude();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVector& v) const
{
return linear.dot(v.linear) + angular.dot(v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return linear.isFinite() && angular.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVector scale(PxReal l, PxReal a) const
{
return Cm::SpatialVector(linear*l, angular*a);
}
PxVec3 linear;
PxReal pad0;
PxVec3 angular;
PxReal pad1;
}
PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct SpatialVectorF
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF(const PxReal* v)
: pad0(0.0f), pad1(0.0f)
{
top.x = v[0]; top.y = v[1]; top.z = v[2];
bottom.x = v[3]; bottom.y = v[4]; bottom.z = v[5];
}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF(const PxVec3& top_, const PxVec3& bottom_)
: top(top_), pad0(0.0f), bottom(bottom_), pad1(0.0f)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~SpatialVectorF()
{}
// PT: this one is very important. Without it, the Xbox compiler generates weird "float-to-int" and "int-to-float" LHS
// each time we copy a SpatialVector (see for example PIX on "solveSimpleGroupA" without this operator).
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVectorF& v)
{
top = v.top;
pad0 = 0.0f;
bottom = v.bottom;
pad1 = 0.0f;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF Zero() { return SpatialVectorF(PxVec3(0), PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator+(const SpatialVectorF& v) const
{
return SpatialVectorF(top + v.top, bottom + v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator-(const SpatialVectorF& v) const
{
return SpatialVectorF(top - v.top, bottom - v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator-() const
{
return SpatialVectorF(-top, -bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator *(PxReal s) const
{
return SpatialVectorF(top*s, bottom*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF multiply(const SpatialVectorF& v) const
{
return SpatialVectorF(top.multiply(v.top), bottom.multiply(v.bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator *= (const PxReal s)
{
top *= s;
bottom *= s;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const SpatialVectorF& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const SpatialVectorF& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return top.magnitude() + bottom.magnitude();
}
PX_FORCE_INLINE PxReal magnitudeSquared() const
{
return top.magnitudeSquared() + bottom.magnitudeSquared();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const SpatialVectorF& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
/*PxVec3 p0 = bottom.multiply(v.top);
PxVec3 p1 = top.multiply(v.bottom);
PxReal result = (((p1.y + p1.z) + (p0.z + p1.x)) + (p0.x + p0.y));
return result;*/
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVectorF& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVector& v) const
{
return bottom.dot(v.angular) + top.dot(v.linear);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF cross(const SpatialVectorF& v) const
{
SpatialVectorF a;
a.top = top.cross(v.top);
a.bottom = top.cross(v.bottom) + bottom.cross(v.top);
return a;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF abs() const
{
return SpatialVectorF(top.abs(), bottom.abs());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF rotate(const PxTransform& rot) const
{
return SpatialVectorF(rot.rotate(top), rot.rotate(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF rotateInv(const PxTransform& rot) const
{
return SpatialVectorF(rot.rotateInv(top), rot.rotateInv(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return top.isFinite() && bottom.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid(const PxReal maxV) const
{
const bool tValid = ((PxAbs(top.x) <= maxV) && (PxAbs(top.y) <= maxV) && (PxAbs(top.z) <= maxV));
const bool bValid = ((PxAbs(bottom.x) <= maxV) && (PxAbs(bottom.y) <= maxV) && (PxAbs(bottom.z) <= maxV));
return tValid && bValid;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF scale(PxReal l, PxReal a) const
{
return Cm::SpatialVectorF(top*l, bottom*a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void assignTo(PxReal* val) const
{
val[0] = top.x; val[1] = top.y; val[2] = top.z;
val[3] = bottom.x; val[4] = bottom.y; val[5] = bottom.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator [] (const PxU32 index)
{
PX_ASSERT(index < 6);
if(index < 3)
return top[index];
return bottom[index-3];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxReal& operator [] (const PxU32 index) const
{
PX_ASSERT(index < 6);
if (index < 3)
return top[index];
return bottom[index-3];
}
PxVec3 top;
PxReal pad0;
PxVec3 bottom;
PxReal pad1;
} PX_ALIGN_SUFFIX(16);
struct UnAlignedSpatialVector
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector(const PxReal* v)
{
top.x = v[0]; top.y = v[1]; top.z = v[2];
bottom.x = v[3]; bottom.y = v[4]; bottom.z = v[5];
}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector(const PxVec3& top_, const PxVec3& bottom_)
: top(top_), bottom(bottom_)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~UnAlignedSpatialVector()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVectorF& v)
{
top = v.top;
bottom = v.bottom;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector Zero() { return UnAlignedSpatialVector(PxVec3(0), PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator+(const UnAlignedSpatialVector& v) const
{
return UnAlignedSpatialVector(top + v.top, bottom + v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator-(const UnAlignedSpatialVector& v) const
{
return UnAlignedSpatialVector(top - v.top, bottom - v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator-() const
{
return UnAlignedSpatialVector(-top, -bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator *(PxReal s) const
{
return UnAlignedSpatialVector(top*s, bottom*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator *= (const PxReal s)
{
top *= s;
bottom *= s;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const UnAlignedSpatialVector& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const SpatialVectorF& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const UnAlignedSpatialVector& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const SpatialVectorF& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return top.magnitude() + bottom.magnitude();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitudeSquared() const
{
return top.magnitudeSquared() + bottom.magnitudeSquared();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const UnAlignedSpatialVector& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const SpatialVectorF& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const UnAlignedSpatialVector& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVectorF& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector cross(const UnAlignedSpatialVector& v) const
{
UnAlignedSpatialVector a;
a.top = top.cross(v.top);
a.bottom = top.cross(v.bottom) + bottom.cross(v.top);
return a;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector abs() const
{
return UnAlignedSpatialVector(top.abs(), bottom.abs());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector rotate(const PxTransform& rot) const
{
return UnAlignedSpatialVector(rot.rotate(top), rot.rotate(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector rotateInv(const PxTransform& rot) const
{
return UnAlignedSpatialVector(rot.rotateInv(top), rot.rotateInv(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return top.isFinite() && bottom.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid(const PxReal maxV) const
{
const bool tValid = ((top.x <= maxV) && (top.y <= maxV) && (top.z <= maxV));
const bool bValid = ((bottom.x <= maxV) && (bottom.y <= maxV) && (bottom.z <= maxV));
return tValid && bValid;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector scale(PxReal l, PxReal a) const
{
return Cm::UnAlignedSpatialVector(top*l, bottom*a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void assignTo(PxReal* val) const
{
val[0] = top.x; val[1] = top.y; val[2] = top.z;
val[3] = bottom.x; val[4] = bottom.y; val[5] = bottom.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator [] (const PxU32 index)
{
PX_ASSERT(index < 6);
return (&top.x)[index];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxReal& operator [] (const PxU32 index) const
{
PX_ASSERT(index < 6);
return (&top.x)[index];
}
PxVec3 top; //12 12
PxVec3 bottom; //12 24
};
PX_ALIGN_PREFIX(16)
struct SpatialVectorV
{
aos::Vec3V linear;
aos::Vec3V angular;
PX_FORCE_INLINE SpatialVectorV() {}
PX_FORCE_INLINE SpatialVectorV(PxZERO): linear(aos::V3Zero()), angular(aos::V3Zero()) {}
PX_FORCE_INLINE SpatialVectorV(const Cm::SpatialVector& v): linear(aos::V3LoadA(&v.linear.x)), angular(aos::V3LoadA(&v.angular.x)) {}
PX_FORCE_INLINE SpatialVectorV(const aos::Vec3VArg l, const aos::Vec3VArg a): linear(l), angular(a) {}
PX_FORCE_INLINE SpatialVectorV(const SpatialVectorV& other): linear(other.linear), angular(other.angular) {}
PX_FORCE_INLINE SpatialVectorV& operator=(const SpatialVectorV& other) { linear = other.linear; angular = other.angular; return *this; }
PX_FORCE_INLINE SpatialVectorV operator+(const SpatialVectorV& other) const { return SpatialVectorV(aos::V3Add(linear,other.linear),
aos::V3Add(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV& operator+=(const SpatialVectorV& other) { linear = aos::V3Add(linear,other.linear);
angular = aos::V3Add(angular, other.angular);
return *this;
}
PX_FORCE_INLINE SpatialVectorV operator-(const SpatialVectorV& other) const { return SpatialVectorV(aos::V3Sub(linear,other.linear),
aos::V3Sub(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV operator-() const { return SpatialVectorV(aos::V3Neg(linear), aos::V3Neg(angular)); }
PX_FORCE_INLINE SpatialVectorV operator*(const aos::FloatVArg r) const { return SpatialVectorV(aos::V3Scale(linear,r), aos::V3Scale(angular,r)); }
PX_FORCE_INLINE SpatialVectorV& operator-=(const SpatialVectorV& other) { linear = aos::V3Sub(linear,other.linear);
angular = aos::V3Sub(angular, other.angular);
return *this;
}
PX_FORCE_INLINE aos::FloatV dot(const SpatialVectorV& other) const { return aos::V3SumElems(aos::V3Add(aos::V3Mul(linear, other.linear), aos::V3Mul(angular, other.angular))); }
PX_FORCE_INLINE SpatialVectorV multiply(const SpatialVectorV& other) const { return SpatialVectorV(aos::V3Mul(linear, other.linear), aos::V3Mul(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV multiplyAdd(const SpatialVectorV& m, const SpatialVectorV& a) const { return SpatialVectorV(aos::V3MulAdd(linear, m.linear, a.linear), aos::V3MulAdd(angular, m.angular, a.angular)); }
PX_FORCE_INLINE SpatialVectorV scale(const aos::FloatV& a, const aos::FloatV& b) const { return SpatialVectorV(aos::V3Scale(linear, a), aos::V3Scale(angular, b)); }
}PX_ALIGN_SUFFIX(16);
} // namespace Cm
PX_COMPILE_TIME_ASSERT(sizeof(Cm::SpatialVector) == 32);
PX_COMPILE_TIME_ASSERT(sizeof(Cm::SpatialVectorV) == 32);
}
#endif

View File

@@ -0,0 +1,299 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_TASK_H
#define CM_TASK_H
#include "task/PxTask.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxMutex.h"
#include "foundation/PxInlineArray.h"
#include "foundation/PxFPU.h"
// PT: this shouldn't be in Cm. The whole task manager is in the PhysX DLL so we cannot use any of these inside the Common DLL
namespace physx
{
namespace Cm
{
// wrapper around the public PxLightCpuTask
// internal SDK tasks should be inherited from
// this and override the runInternal() method
// to ensure that the correct floating point
// state is set / reset during execution
class Task : public physx::PxLightCpuTask
{
public:
Task(PxU64 contextId)
{
mContextID = contextId;
}
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
runInternal();
}
virtual void runInternal()=0;
};
// same as Cm::Task but inheriting from physx::PxBaseTask
// instead of PxLightCpuTask
class BaseTask : public physx::PxBaseTask
{
public:
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
runInternal();
}
virtual void runInternal()=0;
};
template <class T, void (T::*Fn)(physx::PxBaseTask*) >
class DelegateTask : public Cm::Task, public PxUserAllocated
{
public:
DelegateTask(PxU64 contextID, T* obj, const char* name) : Cm::Task(contextID), mObj(obj), mName(name) {}
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
(mObj->*Fn)(mCont);
}
virtual void runInternal()
{
(mObj->*Fn)(mCont);
}
virtual const char* getName() const
{
return mName;
}
void setObject(T* obj) { mObj = obj; }
private:
T* mObj;
const char* mName;
};
/**
\brief A task that maintains a list of dependent tasks.
This task maintains a list of dependent tasks that have their reference counts
reduced on completion of the task.
The refcount is incremented every time a dependent task is added.
*/
class FanoutTask : public Cm::BaseTask
{
PX_NOCOPY(FanoutTask)
public:
FanoutTask(PxU64 contextID, const char* name) : Cm::BaseTask(), mRefCount(0), mName(name), mNotifySubmission(false) { mContextID = contextID; }
virtual void runInternal() {}
virtual const char* getName() const { return mName; }
/**
Swap mDependents with mReferencesToRemove when refcount goes to 0.
*/
virtual void removeReference()
{
PxMutex::ScopedLock lock(mMutex);
if (!physx::PxAtomicDecrement(&mRefCount))
{
// prevents access to mReferencesToRemove until release
physx::PxAtomicIncrement(&mRefCount);
mNotifySubmission = false;
PX_ASSERT(mReferencesToRemove.empty());
for (PxU32 i = 0; i < mDependents.size(); i++)
mReferencesToRemove.pushBack(mDependents[i]);
mDependents.clear();
mTm->getCpuDispatcher()->submitTask(*this);
}
}
/**
\brief Increases reference count
*/
virtual void addReference()
{
PxMutex::ScopedLock lock(mMutex);
physx::PxAtomicIncrement(&mRefCount);
mNotifySubmission = true;
}
/**
\brief Return the ref-count for this task
*/
PX_INLINE PxI32 getReference() const
{
return mRefCount;
}
/**
Sets the task manager. Doesn't increase the reference count.
*/
PX_INLINE void setTaskManager(physx::PxTaskManager& tm)
{
mTm = &tm;
}
/**
Adds a dependent task. It also sets the task manager querying it from the dependent task.
The refcount is incremented every time a dependent task is added.
*/
PX_INLINE void addDependent(physx::PxBaseTask& dependent)
{
PxMutex::ScopedLock lock(mMutex);
physx::PxAtomicIncrement(&mRefCount);
mTm = dependent.getTaskManager();
mDependents.pushBack(&dependent);
dependent.addReference();
mNotifySubmission = true;
}
/**
Reduces reference counts of the continuation task and the dependent tasks, also
clearing the copy of continuation and dependents task list.
*/
virtual void release()
{
PxInlineArray<physx::PxBaseTask*, 10> referencesToRemove;
{
PxMutex::ScopedLock lock(mMutex);
const PxU32 contCount = mReferencesToRemove.size();
referencesToRemove.reserve(contCount);
for (PxU32 i=0; i < contCount; ++i)
referencesToRemove.pushBack(mReferencesToRemove[i]);
mReferencesToRemove.clear();
// allow access to mReferencesToRemove again
if (mNotifySubmission)
{
removeReference();
}
else
{
physx::PxAtomicDecrement(&mRefCount);
}
// the scoped lock needs to get freed before the continuation tasks get (potentially) submitted because
// those continuation tasks might trigger events that delete this task and corrupt the memory of the
// mutex (for example, assume this task is a member of the scene then the submitted tasks cause the simulation
// to finish and then the scene gets released which in turn will delete this task. When this task then finally
// continues the heap memory will be corrupted.
}
for (PxU32 i=0; i < referencesToRemove.size(); ++i)
referencesToRemove[i]->removeReference();
}
protected:
volatile PxI32 mRefCount;
const char* mName;
PxInlineArray<physx::PxBaseTask*, 4> mDependents;
PxInlineArray<physx::PxBaseTask*, 4> mReferencesToRemove;
bool mNotifySubmission;
PxMutex mMutex; // guarding mDependents and mNotifySubmission
};
/**
\brief Specialization of FanoutTask class in order to provide the delegation mechanism.
*/
template <class T, void (T::*Fn)(physx::PxBaseTask*) >
class DelegateFanoutTask : public FanoutTask, public PxUserAllocated
{
public:
DelegateFanoutTask(PxU64 contextID, T* obj, const char* name) :
FanoutTask(contextID, name), mObj(obj) { }
virtual void runInternal()
{
physx::PxBaseTask* continuation = mReferencesToRemove.empty() ? NULL : mReferencesToRemove[0];
(mObj->*Fn)(continuation);
}
void setObject(T* obj) { mObj = obj; }
private:
T* mObj;
};
PX_FORCE_INLINE void startTask(Cm::Task* task, PxBaseTask* continuation)
{
if(continuation)
{
// PT: TODO: just make this a PxBaseTask function?
task->setContinuation(continuation);
task->removeReference();
}
else
task->runInternal();
}
template<class T>
PX_FORCE_INLINE void updateTaskLinkedList(T*& previousTask, T* task, T*& head)
{
if(previousTask)
previousTask->mNext = task;
else
head = task;
previousTask = task;
}
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,139 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_TRANSFORM_UTILS_H
#define CM_TRANSFORM_UTILS_H
#include "foundation/PxVecMath.h"
namespace
{
using namespace physx::aos;
// V3PrepareCross would help here, but it's not on all platforms yet...
PX_FORCE_INLINE void transformFast(const FloatVArg wa, const Vec3VArg va, const Vec3VArg pa,
const FloatVArg wb, const Vec3VArg vb, const Vec3VArg pb,
FloatV& wo, Vec3V& vo, Vec3V& po)
{
wo = FSub(FMul(wa, wb), V3Dot(va, vb));
vo = V3ScaleAdd(va, wb, V3ScaleAdd(vb, wa, V3Cross(va, vb)));
const Vec3V t1 = V3Scale(pb, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec3V t2 = V3ScaleAdd(V3Cross(va, pb), wa, t1);
const Vec3V t3 = V3ScaleAdd(va, V3Dot(va, pb), t2);
po = V3ScaleAdd(t3, FLoad(2.f), pa);
}
PX_FORCE_INLINE void transformInvFast(const FloatVArg wa, const Vec3VArg va, const Vec3VArg pa,
const FloatVArg wb, const Vec3VArg vb, const Vec3VArg pb,
FloatV& wo, Vec3V& vo, Vec3V& po)
{
wo = FScaleAdd(wa, wb, V3Dot(va, vb));
vo = V3NegScaleSub(va, wb, V3ScaleAdd(vb, wa, V3Cross(vb, va)));
const Vec3V pt = V3Sub(pb, pa);
const Vec3V t1 = V3Scale(pt, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec3V t2 = V3ScaleAdd(V3Cross(pt, va), wa, t1);
const Vec3V t3 = V3ScaleAdd(va, V3Dot(va, pt), t2);
po = V3Add(t3,t3);
}
}
namespace physx
{
namespace Cm
{
// PT: actor2World * shape2Actor
PX_FORCE_INLINE void getStaticGlobalPoseAligned(const PxTransform& actor2World, const PxTransform& shape2Actor, PxTransform& outTransform)
{
using namespace aos;
PX_ASSERT((size_t(&actor2World)&15) == 0);
PX_ASSERT((size_t(&shape2Actor)&15) == 0);
PX_ASSERT((size_t(&outTransform)&15) == 0);
const Vec3V actor2WorldPos = V3LoadA(actor2World.p);
const QuatV actor2WorldRot = QuatVLoadA(&actor2World.q.x);
const Vec3V shape2ActorPos = V3LoadA(shape2Actor.p);
const QuatV shape2ActorRot = QuatVLoadA(&shape2Actor.q.x);
Vec3V v,p;
FloatV w;
transformFast(V4GetW(actor2WorldRot), Vec3V_From_Vec4V(actor2WorldRot), actor2WorldPos,
V4GetW(shape2ActorRot), Vec3V_From_Vec4V(shape2ActorRot), shape2ActorPos,
w, v, p);
V3StoreA(p, outTransform.p);
V4StoreA(V4SetW(v,w), &outTransform.q.x);
}
// PT: body2World * body2Actor.getInverse() * shape2Actor
PX_FORCE_INLINE void getDynamicGlobalPoseAligned(const PxTransform& body2World, const PxTransform& shape2Actor, const PxTransform& body2Actor, PxTransform& outTransform)
{
PX_ASSERT((size_t(&body2World)&15) == 0);
PX_ASSERT((size_t(&shape2Actor)&15) == 0);
PX_ASSERT((size_t(&body2Actor)&15) == 0);
PX_ASSERT((size_t(&outTransform)&15) == 0);
using namespace aos;
const Vec3V shape2ActorPos = V3LoadA(shape2Actor.p);
const QuatV shape2ActorRot = QuatVLoadA(&shape2Actor.q.x);
const Vec3V body2ActorPos = V3LoadA(body2Actor.p);
const QuatV body2ActorRot = QuatVLoadA(&body2Actor.q.x);
const Vec3V body2WorldPos = V3LoadA(body2World.p);
const QuatV body2WorldRot = QuatVLoadA(&body2World.q.x);
Vec3V v1, p1, v2, p2;
FloatV w1, w2;
transformInvFast(V4GetW(body2ActorRot), Vec3V_From_Vec4V(body2ActorRot), body2ActorPos,
V4GetW(shape2ActorRot), Vec3V_From_Vec4V(shape2ActorRot), shape2ActorPos,
w1, v1, p1);
transformFast(V4GetW(body2WorldRot), Vec3V_From_Vec4V(body2WorldRot), body2WorldPos,
w1, v1, p1,
w2, v2, p2);
V3StoreA(p2, outTransform.p);
V4StoreA(V4SetW(v2, w2), &outTransform.q.x);
}
}
}
#endif

View File

@@ -0,0 +1,301 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_UTILS_H
#define CM_UTILS_H
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxBounds3.h"
#include "common/PxBase.h"
#include "foundation/PxInlineArray.h"
#include "foundation/PxArray.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
namespace physx
{
namespace Cm
{
template<class DstType, class SrcType>
PX_FORCE_INLINE PxU32 getArrayOfPointers(DstType** PX_RESTRICT userBuffer, PxU32 bufferSize, PxU32 startIndex, SrcType*const* PX_RESTRICT src, PxU32 size)
{
const PxU32 remainder = PxU32(PxMax<PxI32>(PxI32(size - startIndex), 0));
const PxU32 writeCount = PxMin(remainder, bufferSize);
src += startIndex;
for(PxU32 i=0;i<writeCount;i++)
userBuffer[i] = static_cast<DstType*>(src[i]);
return writeCount;
}
PX_CUDA_CALLABLE PX_INLINE void transformInertiaTensor(const PxVec3& invD, const PxMat33& M, PxMat33& mIInv)
{
const float axx = invD.x*M(0,0), axy = invD.x*M(1,0), axz = invD.x*M(2,0);
const float byx = invD.y*M(0,1), byy = invD.y*M(1,1), byz = invD.y*M(2,1);
const float czx = invD.z*M(0,2), czy = invD.z*M(1,2), czz = invD.z*M(2,2);
mIInv(0,0) = axx*M(0,0) + byx*M(0,1) + czx*M(0,2);
mIInv(1,1) = axy*M(1,0) + byy*M(1,1) + czy*M(1,2);
mIInv(2,2) = axz*M(2,0) + byz*M(2,1) + czz*M(2,2);
mIInv(0,1) = mIInv(1,0) = axx*M(1,0) + byx*M(1,1) + czx*M(1,2);
mIInv(0,2) = mIInv(2,0) = axx*M(2,0) + byx*M(2,1) + czx*M(2,2);
mIInv(1,2) = mIInv(2,1) = axy*M(2,0) + byy*M(2,1) + czy*M(2,2);
}
// PT: TODO: refactor this with PxBounds3 header
PX_FORCE_INLINE PxVec3 basisExtent(const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent)
{
// extended basis vectors
const PxVec3 c0 = basis0 * extent.x;
const PxVec3 c1 = basis1 * extent.y;
const PxVec3 c2 = basis2 * extent.z;
// find combination of base vectors that produces max. distance for each component = sum of abs()
return PxVec3 ( PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x),
PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y),
PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z));
}
PX_FORCE_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent)
{
const PxVec3 w = basisExtent(basis0, basis1, basis2, extent);
return PxBounds3(center - w, center + w);
}
PX_FORCE_INLINE bool isValid(const PxVec3& c, const PxVec3& e)
{
return (c.isFinite() && e.isFinite() && (((e.x >= 0.0f) && (e.y >= 0.0f) && (e.z >= 0.0f)) ||
((e.x == -PX_MAX_BOUNDS_EXTENTS) &&
(e.y == -PX_MAX_BOUNDS_EXTENTS) &&
(e.z == -PX_MAX_BOUNDS_EXTENTS))));
}
PX_FORCE_INLINE bool isEmpty(const PxVec3& c, const PxVec3& e)
{
PX_UNUSED(c);
PX_ASSERT(isValid(c, e));
return e.x<0.0f;
}
// Array with externally managed storage.
// Allocation and resize policy are managed by the owner,
// Very minimal functionality right now, just POD types
template <typename T,
typename Owner,
typename IndexType,
void (Owner::*realloc)(T*& currentMem, IndexType& currentCapacity, IndexType size, IndexType requiredMinCapacity)>
class OwnedArray
{
public:
OwnedArray()
: mData(0)
, mCapacity(0)
, mSize(0)
{}
~OwnedArray() // owner must call releaseMem before destruction
{
PX_ASSERT(mCapacity==0);
}
void pushBack(T& element, Owner& owner)
{
// there's a failure case if here if we push an existing element which causes a resize -
// a rare case not worth coding around; if you need it, copy the element then push it.
PX_ASSERT(&element<mData || &element>=mData+mSize);
if(mSize==mCapacity)
(owner.*realloc)(mData, mCapacity, mSize, IndexType(mSize+1));
PX_ASSERT(mData && mSize<mCapacity);
mData[mSize++] = element;
}
IndexType size() const
{
return mSize;
}
void replaceWithLast(IndexType index)
{
PX_ASSERT(index<mSize);
mData[index] = mData[--mSize];
}
T* begin() const
{
return mData;
}
T* end() const
{
return mData+mSize;
}
T& operator [](IndexType index)
{
PX_ASSERT(index<mSize);
return mData[index];
}
const T& operator [](IndexType index) const
{
PX_ASSERT(index<mSize);
return mData[index];
}
void reserve(IndexType capacity, Owner &owner)
{
if(capacity>=mCapacity)
(owner.*realloc)(mData, mCapacity, mSize, capacity);
}
void releaseMem(Owner &owner)
{
mSize = 0;
(owner.*realloc)(mData, mCapacity, 0, 0);
}
private:
T* mData;
IndexType mCapacity;
IndexType mSize;
// just in case someone tries to use a non-POD in here
union FailIfNonPod
{
T t;
int x;
};
};
/**
Any object deriving from PxBase needs to call this function instead of 'delete object;'.
We don't want to implement 'operator delete' in PxBase because that would impose how
memory of derived classes is allocated. Even though most or all of the time derived classes will
be user allocated, we don't want to put UserAllocatable into the API and derive from that.
*/
template<typename T>
PX_INLINE void deletePxBase(T* object)
{
if(object->getBaseFlags() & PxBaseFlag::eOWNS_MEMORY)
{
PX_DELETE(object);
}
else
object->~T();
}
#define PX_PADDING_8 0xcd
#define PX_PADDING_16 0xcdcd
#define PX_PADDING_32 0xcdcdcdcd
/**
Macro to instantiate a type for serialization testing.
Note: Only use PX_NEW_SERIALIZED once in a scope.
*/
#if PX_CHECKED
#define PX_NEW_SERIALIZED(v,T) \
void* _buf = physx::PxReflectionAllocator<T>().allocate(sizeof(T), PX_FL); \
PxMarkSerializedMemory(_buf, sizeof(T)); \
v = PX_PLACEMENT_NEW(_buf, T)
#else
#define PX_NEW_SERIALIZED(v,T) v = PX_NEW(T)
#endif
template<typename T, class Alloc>
struct ArrayAccess: public PxArray<T, Alloc>
{
void store(PxSerializationContext& context) const
{
if(this->mData && (this->mSize || this->capacity()))
context.writeData(this->mData, this->capacity()*sizeof(T));
}
void load(PxDeserializationContext& context)
{
if(this->mData && (this->mSize || this->capacity()))
this->mData = context.readExtraData<T>(this->capacity());
}
};
template<typename T, typename Alloc>
void exportArray(const PxArray<T, Alloc>& a, PxSerializationContext& context)
{
static_cast<const ArrayAccess<T, Alloc>&>(a).store(context);
}
template<typename T, typename Alloc>
void importArray(PxArray<T, Alloc>& a, PxDeserializationContext& context)
{
static_cast<ArrayAccess<T, Alloc>&>(a).load(context);
}
template<typename T, PxU32 N, typename Alloc>
void exportInlineArray(const PxInlineArray<T, N, Alloc>& a, PxSerializationContext& context)
{
if(!a.isInlined())
Cm::exportArray(a, context);
}
template<typename T, PxU32 N, typename Alloc>
void importInlineArray(PxInlineArray<T, N, Alloc>& a, PxDeserializationContext& context)
{
if(!a.isInlined())
Cm::importArray(a, context);
}
template<class T>
static PX_INLINE T* reserveContainerMemory(PxArray<T>& container, PxU32 nb)
{
const PxU32 maxNbEntries = container.capacity();
const PxU32 requiredSize = container.size() + nb;
if(requiredSize>maxNbEntries)
{
const PxU32 naturalGrowthSize = maxNbEntries ? maxNbEntries*2 : 2;
const PxU32 newSize = PxMax(requiredSize, naturalGrowthSize);
container.reserve(newSize);
}
T* buf = container.end();
container.forceSize_Unsafe(requiredSize);
return buf;
}
} // namespace Cm
}
#endif

View File

@@ -0,0 +1,154 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmVisualization.h"
using namespace physx;
using namespace Cm;
static const PxU32 gLimitColor = PxU32(PxDebugColor::eARGB_YELLOW);
void Cm::visualizeJointFrames(PxRenderOutput& out, PxReal scale, const PxTransform& parent, const PxTransform& child)
{
if(scale==0.0f)
return;
out << parent << PxDebugBasis(PxVec3(scale, scale, scale) * 1.5f,
PxU32(PxDebugColor::eARGB_DARKRED), PxU32(PxDebugColor::eARGB_DARKGREEN), PxU32(PxDebugColor::eARGB_DARKBLUE));
out << child << PxDebugBasis(PxVec3(scale, scale, scale));
}
void Cm::visualizeLinearLimit(PxRenderOutput& out, PxReal scale, const PxTransform& t0, const PxTransform& /*t1*/, PxReal value)
{
if(scale==0.0f)
return;
// debug circle is around z-axis, and we want it around x-axis
PxTransform r(t0.p+value*t0.q.getBasisVector0(), t0.q*PxQuat(PxPi/2,PxVec3(0,1.f,0)));
out << gLimitColor;
out << PxTransform(PxIdentity);
out << PxDebugArrow(t0.p,r.p-t0.p);
out << r << PxDebugCircle(20, scale*0.3f);
}
void Cm::visualizeAngularLimit(PxRenderOutput& out, PxReal scale, const PxTransform& t, PxReal lower, PxReal upper)
{
if(scale==0.0f)
return;
out << t << gLimitColor;
out << PxRenderOutput::LINES
<< PxVec3(0) << PxVec3(0, PxCos(lower), PxSin(lower)) * scale
<< PxVec3(0) << PxVec3(0, PxCos(upper), PxSin(upper)) * scale;
out << PxRenderOutput::LINESTRIP;
PxReal angle = lower, step = (upper-lower)/20;
for(PxU32 i=0; i<=20; i++, angle += step)
out << PxVec3(0, PxCos(angle), PxSin(angle)) * scale;
}
void Cm::visualizeLimitCone(PxRenderOutput& out, PxReal scale, const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ)
{
if(scale==0.0f)
return;
out << t << gLimitColor;
out << PxRenderOutput::LINES;
PxVec3 prev(0,0,0);
const PxU32 LINES = 32;
for(PxU32 i=0;i<=LINES;i++)
{
PxReal angle = 2*PxPi/LINES*i;
PxReal c = PxCos(angle), s = PxSin(angle);
PxVec3 rv(0,-tanQSwingZ*s, tanQSwingY*c);
PxReal rv2 = rv.magnitudeSquared();
PxQuat q = PxQuat(0,2*rv.y,2*rv.z,1-rv2) * (1/(1+rv2));
PxVec3 a = q.rotate(PxVec3(1.0f,0,0)) * scale;
out << prev << a << PxVec3(0) << a;
prev = a;
}
}
void Cm::visualizeDoubleCone(PxRenderOutput& out, PxReal scale, const PxTransform& t, PxReal angle)
{
if(scale==0.0f)
return;
out << t << gLimitColor;
const PxReal height = PxTan(angle);
const PxU32 LINES = 32;
out << PxRenderOutput::LINESTRIP;
const PxReal step = PxPi*2/LINES;
for(PxU32 i=0; i<=LINES; i++)
out << PxVec3(height, PxCos(step * i), PxSin(step * i)) * scale;
angle = 0;
out << PxRenderOutput::LINESTRIP;
for(PxU32 i=0; i<=LINES; i++, angle += PxPi*2/LINES)
out << PxVec3(-height, PxCos(step * i), PxSin(step * i)) * scale;
angle = 0;
out << PxRenderOutput::LINES;
for(PxU32 i=0;i<LINES;i++, angle += PxPi*2/LINES)
{
out << PxVec3(0) << PxVec3(-height, PxCos(step * i), PxSin(step * i)) * scale;
out << PxVec3(0) << PxVec3(height, PxCos(step * i), PxSin(step * i)) * scale;
}
}
void Cm::renderOutputDebugBox(PxRenderOutput& out, const PxBounds3& box)
{
out << PxDebugBox(box, true);
}
void Cm::renderOutputDebugCircle(PxRenderOutput& out, PxU32 s, PxReal r)
{
out << PxDebugCircle(s, r);
}
void Cm::renderOutputDebugBasis(PxRenderOutput& out, const PxDebugBasis& basis)
{
out << basis;
}
void Cm::renderOutputDebugArrow(PxRenderOutput& out, const PxDebugArrow& arrow)
{
out << arrow;
}

View File

@@ -0,0 +1,125 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_VISUALIZATION_H
#define CM_VISUALIZATION_H
#include "foundation/PxTransform.h"
#include "common/PxRenderOutput.h"
#include "PxConstraintDesc.h"
namespace physx
{
namespace Cm
{
// PT: the force-inlined functions in PxRenderOutput generate a lot of code. Use these non-inlined functions instead.
PX_PHYSX_COMMON_API void renderOutputDebugBox(PxRenderOutput& out, const PxBounds3& box);
PX_PHYSX_COMMON_API void renderOutputDebugCircle(PxRenderOutput& out, PxU32 s, PxReal r);
PX_PHYSX_COMMON_API void renderOutputDebugBasis(PxRenderOutput& out, const PxDebugBasis& basis);
PX_PHYSX_COMMON_API void renderOutputDebugArrow(PxRenderOutput& out, const PxDebugArrow& arrow);
PX_PHYSX_COMMON_API void visualizeJointFrames(PxRenderOutput& out,
PxReal scale,
const PxTransform& parent,
const PxTransform& child);
PX_PHYSX_COMMON_API void visualizeLinearLimit(PxRenderOutput& out,
PxReal scale,
const PxTransform& t0,
const PxTransform& t1,
PxReal value);
PX_PHYSX_COMMON_API void visualizeAngularLimit(PxRenderOutput& out,
PxReal scale,
const PxTransform& t0,
PxReal lower,
PxReal upper);
PX_PHYSX_COMMON_API void visualizeLimitCone(PxRenderOutput& out,
PxReal scale,
const PxTransform& t,
PxReal ySwing,
PxReal zSwing);
PX_PHYSX_COMMON_API void visualizeDoubleCone(PxRenderOutput& out,
PxReal scale,
const PxTransform& t,
PxReal angle);
struct ConstraintImmediateVisualizer : public PxConstraintVisualizer
{
PxF32 mFrameScale;
PxF32 mLimitScale;
PxRenderOutput& mCmOutput;
//Not possible to implement
ConstraintImmediateVisualizer& operator=( const ConstraintImmediateVisualizer& );
ConstraintImmediateVisualizer(PxF32 frameScale, PxF32 limitScale, PxRenderOutput& output) :
mFrameScale (frameScale),
mLimitScale (limitScale),
mCmOutput (output)
{
}
virtual void visualizeJointFrames(const PxTransform& parent, const PxTransform& child) PX_OVERRIDE
{
Cm::visualizeJointFrames(mCmOutput, mFrameScale, parent, child);
}
virtual void visualizeLinearLimit(const PxTransform& t0, const PxTransform& t1, PxReal value) PX_OVERRIDE
{
Cm::visualizeLinearLimit(mCmOutput, mLimitScale, t0, t1, value);
}
virtual void visualizeAngularLimit(const PxTransform& t0, PxReal lower, PxReal upper) PX_OVERRIDE
{
Cm::visualizeAngularLimit(mCmOutput, mLimitScale, t0, lower, upper);
}
virtual void visualizeLimitCone(const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ) PX_OVERRIDE
{
Cm::visualizeLimitCone(mCmOutput, mLimitScale, t, tanQSwingY, tanQSwingZ);
}
virtual void visualizeDoubleCone(const PxTransform& t, PxReal angle) PX_OVERRIDE
{
Cm::visualizeDoubleCone(mCmOutput, mLimitScale, t, angle);
}
virtual void visualizeLine( const PxVec3& p0, const PxVec3& p1, PxU32 color) PX_OVERRIDE
{
mCmOutput << color;
mCmOutput.outputSegment(p0, p1);
}
};
}
}
#endif

View File

@@ -0,0 +1,87 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/windows/PxWindowsDelayLoadHook.h"
#include "foundation/windows/PxWindowsInclude.h"
#include "windows/CmWindowsLoadLibrary.h"
static const physx::PxDelayLoadHook* gCommonDelayLoadHook = NULL;
void physx::PxSetPhysXCommonDelayLoadHook(const physx::PxDelayLoadHook* hook)
{
gCommonDelayLoadHook = hook;
}
// delay loading is enabled only for non static configuration
#if !defined PX_PHYSX_STATIC_LIB
// Prior to Visual Studio 2015 Update 3, these hooks were non-const.
#define DELAYIMP_INSECURE_WRITABLE_HOOKS
#include <delayimp.h>
using namespace physx;
#pragma comment(lib, "delayimp")
FARPROC WINAPI commonDelayHook(unsigned dliNotify, PDelayLoadInfo pdli)
{
switch (dliNotify) {
case dliStartProcessing :
break;
case dliNotePreLoadLibrary :
{
return Cm::physXCommonDliNotePreLoadLibrary(pdli->szDll,gCommonDelayLoadHook);
}
break;
case dliNotePreGetProcAddress :
break;
case dliFailLoadLib :
break;
case dliFailGetProc :
break;
case dliNoteEndProcessing :
break;
default :
return NULL;
}
return NULL;
}
PfnDliHook __pfnDliNotifyHook2 = commonDelayHook;
#endif

View File

@@ -0,0 +1,134 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
#define NX_USE_SDK_DLLS
#include "PhysXUpdateLoader.h"
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
#include "windows/CmWindowsModuleUpdateLoader.h"
#include "windows/CmWindowsLoadLibrary.h"
#include "stdio.h"
namespace physx { namespace Cm {
#if PX_VC
#pragma warning(disable: 4191) //'operator/operation' : unsafe conversion from 'type of expression' to 'type required'
#endif
typedef HMODULE (*GetUpdatedModule_FUNC)(const char*, const char*);
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
typedef void (*setLogging_FUNC)(PXUL_ErrorCode, pt2LogFunc);
static void LogMessage(PXUL_ErrorCode messageType, char* message)
{
switch(messageType)
{
case PXUL_ERROR_MESSAGES:
getFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL,
"PhysX Update Loader Error: %s.", message);
break;
case PXUL_WARNING_MESSAGES:
getFoundation().error(PX_WARN, "PhysX Update Loader Warning: %s.", message);
break;
case PXUL_INFO_MESSAGES:
getFoundation().error(PX_INFO, "PhysX Update Loader Information: %s.", message);
break;
default:
getFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL,
"Unknown message type from update loader.");
break;
}
}
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
CmModuleUpdateLoader::CmModuleUpdateLoader(const char* updateLoaderDllName)
: mGetUpdatedModuleFunc(NULL)
{
mUpdateLoaderDllHandle = loadLibrary(updateLoaderDllName);
if (mUpdateLoaderDllHandle != NULL)
{
mGetUpdatedModuleFunc = GetProcAddress(mUpdateLoaderDllHandle, "GetUpdatedModule");
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
setLogging_FUNC setLoggingFunc;
setLoggingFunc = (setLogging_FUNC)GetProcAddress(mUpdateLoaderDllHandle, "setLoggingFunction");
if(setLoggingFunc != NULL)
{
setLoggingFunc(PXUL_ERROR_MESSAGES, LogMessage);
}
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
}
}
CmModuleUpdateLoader::~CmModuleUpdateLoader()
{
if (mUpdateLoaderDllHandle != NULL)
{
FreeLibrary(mUpdateLoaderDllHandle);
mUpdateLoaderDllHandle = NULL;
}
}
HMODULE CmModuleUpdateLoader::LoadModule(const char* moduleName, const char* appGUID)
{
HMODULE result = NULL;
if (mGetUpdatedModuleFunc != NULL)
{
// Try to get the module through PhysXUpdateLoader
GetUpdatedModule_FUNC getUpdatedModuleFunc = (GetUpdatedModule_FUNC)mGetUpdatedModuleFunc;
result = getUpdatedModuleFunc(moduleName, appGUID);
}
else
{
// If no PhysXUpdateLoader, just load the DLL directly
result = loadLibrary(moduleName);
if (result == NULL)
{
const DWORD err = GetLastError();
printf("%s:%i: loadLibrary error when loading %s: %lu\n", PX_FL, moduleName, err);
}
}
return result;
}
}; // end of namespace
}; // end of namespace