feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,94 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ALIGNED_MALLOC_H
#define PX_ALIGNED_MALLOC_H
#include "PxUserAllocated.h"
/*!
Allocate aligned memory.
Alignment must be a power of 2!
-- should be templated by a base allocator
*/
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
Allocator, which is used to access the global PxAllocatorCallback instance
(used for dynamic data types template instantiation), which can align memory
*/
// SCS: AlignedMalloc with 3 params not found, seems not used on PC either
// disabled for now to avoid GCC error
template <uint32_t N, typename BaseAllocator = PxAllocator>
class PxAlignedAllocator : public BaseAllocator
{
public:
PxAlignedAllocator(const BaseAllocator& base = BaseAllocator()) : BaseAllocator(base)
{
}
void* allocate(size_t size, const char* file, int line, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
size_t pad = N - 1 + sizeof(size_t); // store offset for delete.
uint8_t* base = reinterpret_cast<uint8_t*>(BaseAllocator::allocate(size + pad, file, line));
if (!base)
return NULL;
uint8_t* ptr = reinterpret_cast<uint8_t*>(size_t(base + pad) & ~(size_t(N) - 1)); // aligned pointer, ensuring N
// is a size_t
// wide mask
reinterpret_cast<size_t*>(ptr)[-1] = size_t(ptr - base); // store offset
return ptr;
}
void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
if (ptr == NULL)
return;
uint8_t* base = reinterpret_cast<uint8_t*>(ptr) - reinterpret_cast<size_t*>(ptr)[-1];
BaseAllocator::deallocate(base);
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,90 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ALLOCA_H
#define PX_ALLOCA_H
#include "foundation/PxTempAllocator.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
template <typename T, typename Alloc = PxTempAllocator>
class PxScopedPointer : private Alloc
{
public:
~PxScopedPointer()
{
if(mOwned)
Alloc::deallocate(mPointer);
}
operator T*() const
{
return mPointer;
}
T* mPointer;
bool mOwned;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
// Don't use inline for alloca !!!
#if PX_WINDOWS_FAMILY
#include <malloc.h>
#define PxAlloca(x) _alloca(x)
#elif PX_LINUX
#include <malloc.h>
#define PxAlloca(x) alloca(x)
#elif PX_APPLE_FAMILY
#include <alloca.h>
#define PxAlloca(x) alloca(x)
#elif PX_SWITCH
#include <malloc.h>
#define PxAlloca(x) alloca(x)
#endif
#define PxAllocaAligned(x, alignment) ((size_t(PxAlloca(x + alignment)) + (alignment - 1)) & ~size_t(alignment - 1))
/*! Stack allocation for \c count instances of \c type. Falling back to temp allocator if using more than 4kB. */
#define PX_ALLOCA(var, type, count) \
physx::PxScopedPointer<type> var; \
{ \
const uint32_t size = sizeof(type) * (count); \
var.mOwned = size > 4096; \
if(var.mOwned) \
var.mPointer = reinterpret_cast<type*>(physx::PxTempAllocator().allocate(size, PX_FL)); \
else \
var.mPointer = reinterpret_cast<type*>(PxAlloca(size)); \
}
#endif

View File

@@ -0,0 +1,257 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ALLOCATOR_H
#define PX_ALLOCATOR_H
#include "foundation/PxAllocatorCallback.h"
#include "foundation/PxAssert.h"
#include "foundation/PxFoundation.h"
#include "foundation/PxIO.h"
#include <stdlib.h>
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4577)
#endif
#if PX_WINDOWS_FAMILY
#if(_MSC_VER >= 1923)
#include <typeinfo>
#else
#include <typeinfo.h>
#endif
#endif
#if(PX_APPLE_FAMILY)
#include <typeinfo>
#endif
#include <new>
#if PX_VC
#pragma warning(pop)
#endif
// PT: the rules are simple:
// - PX_ALLOC/PX_ALLOCATE/PX_FREE is similar to malloc/free. Use that for POD/anything that doesn't need ctor/dtor.
// - PX_NEW/PX_DELETE is similar to new/delete. Use that for anything that needs a ctor/dtor.
// - Everything goes through the user allocator.
// - Inherit from PxUserAllocated to PX_NEW something. Do it even on small classes, it's free.
// - You cannot PX_NEW a POD. Use PX_ALLOC.
#define PX_ALLOC(n, name) physx::PxAllocator().allocate(n, PX_FL)
// PT: use this one to reduce the amount of visible reinterpret_cast
#define PX_ALLOCATE(type, count, name) reinterpret_cast<type*>(PX_ALLOC(count*sizeof(type), name))
#define PX_FREE(x) \
if(x) \
{ \
physx::PxAllocator().deallocate(x); \
x = NULL; \
}
#define PX_FREE_THIS physx::PxAllocator().deallocate(this)
// PT: placement new is only needed when you control where the object is created (i.e. you already have an address for it before creating the object).
// So there are basically 2 legitimate placement new usages in PhysX:
// - binary deserialization
// - arrays/pools
// If you end up writing "PX_PLACEMENT_NEW(PX_ALLOC(sizeof(X), "X")", consider deriving X from PxUserAllocated and using PX_NEW instead.
#define PX_PLACEMENT_NEW(p, T) new (p) T
#define PX_NEW(T) new (physx::PxReflectionAllocator<T>(), PX_FL) T
#define PX_DELETE_THIS delete this
#define PX_DELETE(x) if(x) { delete x; x = NULL; }
#define PX_DELETE_ARRAY(x) if(x) { delete []x; x = NULL; }
#define PX_RELEASE(x) if(x) { x->release(); x = NULL; }
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Allocator used to access the global PxAllocatorCallback instance without providing additional information.
*/
class PxAllocator
{
public:
PX_FORCE_INLINE PxAllocator(const char* = NULL){}
static PX_FORCE_INLINE void* allocate(size_t size, const char* file, int line, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
return size ? PxGetBroadcastAllocator()->allocate(size, "", file, line) : NULL;
}
static PX_FORCE_INLINE void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
if(ptr)
PxGetBroadcastAllocator()->deallocate(ptr);
}
};
/**
* \brief Bootstrap allocator using malloc/free.
* Don't use unless your objects get allocated before foundation is initialized.
*/
class PxRawAllocator
{
public:
PxRawAllocator(const char* = 0) {}
static PX_FORCE_INLINE void* allocate(size_t size, const char*, int, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
// malloc returns valid pointer for size==0, no need to check
return ::malloc(size);
}
static PX_FORCE_INLINE void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
// free(0) is guaranteed to have no side effect, no need to check
::free(ptr);
}
};
/**
\brief Virtual allocator callback used to provide run-time defined allocators to foundation types like Array or Bitmap.
This is used by VirtualAllocator
*/
class PxVirtualAllocatorCallback
{
public:
PxVirtualAllocatorCallback() {}
virtual ~PxVirtualAllocatorCallback() {}
virtual void* allocate(size_t size, int group, const char* file, int line) = 0;
virtual void deallocate(void* ptr) = 0;
};
/**
\brief Virtual allocator to be used by foundation types to provide run-time defined allocators.
Due to the fact that Array extends its allocator, rather than contains a reference/pointer to it, the VirtualAllocator must
be a concrete type containing a pointer to a virtual callback. The callback may not be available at instantiation time,
therefore methods are provided to set the callback later.
*/
class PxVirtualAllocator
{
public:
PxVirtualAllocator(PxVirtualAllocatorCallback* callback = NULL, int group = 0) : mCallback(callback), mGroup(group) {}
PX_FORCE_INLINE void* allocate(size_t size, const char* file, int line, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
PX_ASSERT(mCallback);
if (size)
return mCallback->allocate(size, mGroup, file, line);
return NULL;
}
PX_FORCE_INLINE void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
PX_ASSERT(mCallback);
if (ptr)
mCallback->deallocate(ptr);
}
PX_FORCE_INLINE void setCallback(PxVirtualAllocatorCallback* callback)
{
mCallback = callback;
}
PX_FORCE_INLINE PxVirtualAllocatorCallback* getCallback()
{
return mCallback;
}
private:
PxVirtualAllocatorCallback* mCallback;
const int mGroup;
PxVirtualAllocator& operator=(const PxVirtualAllocator&);
};
/**
\brief Allocator used to access the global PxAllocatorCallback instance using a static name derived from T.
*/
template <typename T>
class PxReflectionAllocator
{
static const char* getName(bool reportAllocationNames)
{
if(!reportAllocationNames)
return "<allocation names disabled>";
#if PX_GCC_FAMILY
return __PRETTY_FUNCTION__;
#else
// name() calls malloc(), raw_name() wouldn't
return typeid(T).name();
#endif
}
public:
PxReflectionAllocator(const PxEMPTY) {}
PxReflectionAllocator(const char* = 0) {}
inline PxReflectionAllocator(const PxReflectionAllocator&) {}
static PX_FORCE_INLINE void* allocate(size_t size, const char* filename, int line, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
if(!size)
return NULL;
bool reportAllocationNames;
PxAllocatorCallback* cb = PxGetBroadcastAllocator(&reportAllocationNames);
return cb->allocate(size, getName(reportAllocationNames), filename, line);
}
static PX_FORCE_INLINE void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
if(ptr)
PxGetBroadcastAllocator()->deallocate(ptr);
}
};
template <typename T>
struct PxAllocatorTraits
{
typedef PxReflectionAllocator<T> Type;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,90 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ALLOCATOR_CALLBACK_H
#define PX_ALLOCATOR_CALLBACK_H
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Abstract base class for an application defined memory allocator that can be used by the Nv library.
\note The SDK state should not be modified from within any allocation/free function.
<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread
or the physics processing thread(s).
*/
class PxAllocatorCallback
{
public:
virtual ~PxAllocatorCallback()
{
}
/**
\brief Allocates size bytes of memory, which must be 16-byte aligned.
This method should never return NULL. If you run out of memory, then
you should terminate the app or take some other appropriate action.
<b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
and physics processing thread(s).
\param size Number of bytes to allocate.
\param typeName Name of the datatype that is being allocated
\param filename The source file which allocated the memory
\param line The source line which allocated the memory
\return The allocated block of memory.
*/
virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0;
/**
\brief Frees memory previously allocated by allocate().
<b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
and physics processing thread(s).
\param ptr Memory to free.
*/
virtual void deallocate(void* ptr) = 0;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,41 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_AOS_H
#define PX_AOS_H
#if PX_WINDOWS && !PX_NEON
#include "windows/PxWindowsAoS.h"
#elif(PX_UNIX_FAMILY || PX_SWITCH)
#include "unix/PxUnixAoS.h"
#else
#error "Platform not supported!"
#endif
#endif

View File

@@ -0,0 +1,724 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ARRAY_H
#define PX_ARRAY_H
#include "foundation/PxAssert.h"
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxMemory.h"
#include "foundation/PxIO.h"
namespace physx
{
/*!
An array is a sequential container.
Implementation note
* entries between 0 and size are valid objects
* we use inheritance to build this because the array is included inline in a lot
of objects and we want the allocator to take no space if it's not stateful, which
aggregation doesn't allow. Also, we want the metadata at the front for the inline
case where the allocator contains some inline storage space
*/
template <class T, class Alloc = typename PxAllocatorTraits<T>::Type>
class PxArray : protected Alloc
{
public:
typedef T* Iterator;
typedef const T* ConstIterator;
explicit PxArray(const PxEMPTY v) : Alloc(v)
{
if(mData)
mCapacity |= PX_SIGN_BITMASK;
}
/*!
Default array constructor. Initialize an empty array
*/
PX_INLINE explicit PxArray(const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0)
{
}
/*!
Initialize array with given capacity
*/
PX_INLINE explicit PxArray(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc())
: Alloc(alloc), mData(0), mSize(0), mCapacity(0)
{
resize(size, a);
}
/*!
Copy-constructor. Copy all entries from other array
*/
template <class A>
PX_INLINE explicit PxArray(const PxArray<T, A>& other, const Alloc& alloc = Alloc())
: Alloc(alloc)
{
copy(other);
}
// This is necessary else the basic default copy constructor is used in the case of both arrays being of the same
// template instance
// The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words,
// the presence of a template constructor does not suppress the implicit declaration of the copy constructor.
// Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because
// 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/)
// *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ
PX_INLINE PxArray(const PxArray& other, const Alloc& alloc = Alloc()) : Alloc(alloc)
{
copy(other);
}
/*!
Initialize array with given length
*/
PX_INLINE explicit PxArray(const T* first, const T* last, const Alloc& alloc = Alloc())
: Alloc(alloc), mSize(last < first ? 0 : uint32_t(last - first)), mCapacity(mSize)
{
mData = allocate(mSize);
copy(mData, mData + mSize, first);
}
/*!
Destructor
*/
PX_INLINE ~PxArray()
{
destroy(mData, mData + mSize);
if(capacity() && !isInUserMemory())
deallocate(mData);
}
/*!
Assignment operator. Copy content (deep-copy)
*/
template <class A>
PX_INLINE PxArray& operator=(const PxArray<T, A>& rhs)
{
if(&rhs == this)
return *this;
clear();
reserve(rhs.mSize);
copy(mData, mData + rhs.mSize, rhs.mData);
mSize = rhs.mSize;
return *this;
}
PX_INLINE PxArray& operator=(const PxArray& t) // Needs to be declared, see comment at copy-constructor
{
return operator=<Alloc>(t);
}
/*!
Array indexing operator.
\param i
The index of the element that will be returned.
\return
The element i in the array.
*/
PX_FORCE_INLINE const T& operator[](uint32_t i) const
{
PX_ASSERT(i < mSize);
return mData[i];
}
/*!
Array indexing operator.
\param i
The index of the element that will be returned.
\return
The element i in the array.
*/
PX_FORCE_INLINE T& operator[](uint32_t i)
{
PX_ASSERT(i < mSize);
return mData[i];
}
/*!
Returns a pointer to the initial element of the array.
\return
a pointer to the initial element of the array.
*/
PX_FORCE_INLINE ConstIterator begin() const
{
return mData;
}
PX_FORCE_INLINE Iterator begin()
{
return mData;
}
/*!
Returns an iterator beyond the last element of the array. Do not dereference.
\return
a pointer to the element beyond the last element of the array.
*/
PX_FORCE_INLINE ConstIterator end() const
{
return mData + mSize;
}
PX_FORCE_INLINE Iterator end()
{
return mData + mSize;
}
/*!
Returns a reference to the first element of the array. Undefined if the array is empty.
\return a reference to the first element of the array
*/
PX_FORCE_INLINE const T& front() const
{
PX_ASSERT(mSize);
return mData[0];
}
PX_FORCE_INLINE T& front()
{
PX_ASSERT(mSize);
return mData[0];
}
/*!
Returns a reference to the last element of the array. Undefined if the array is empty
\return a reference to the last element of the array
*/
PX_FORCE_INLINE const T& back() const
{
PX_ASSERT(mSize);
return mData[mSize - 1];
}
PX_FORCE_INLINE T& back()
{
PX_ASSERT(mSize);
return mData[mSize - 1];
}
/*!
Returns the number of entries in the array. This can, and probably will,
differ from the array capacity.
\return
The number of of entries in the array.
*/
PX_FORCE_INLINE uint32_t size() const
{
return mSize;
}
/*!
Clears the array.
*/
PX_INLINE void clear()
{
destroy(mData, mData + mSize);
mSize = 0;
}
/*!
Returns whether the array is empty (i.e. whether its size is 0).
\return
true if the array is empty
*/
PX_FORCE_INLINE bool empty() const
{
return mSize == 0;
}
/*!
Finds the first occurrence of an element in the array.
\param a
The element to find.
*/
PX_INLINE Iterator find(const T& a)
{
uint32_t index;
for(index = 0; index < mSize && mData[index] != a; index++)
;
return mData + index;
}
PX_INLINE ConstIterator find(const T& a) const
{
uint32_t index;
for(index = 0; index < mSize && mData[index] != a; index++)
;
return mData + index;
}
/////////////////////////////////////////////////////////////////////////
/*!
Adds one element to the end of the array. Operation is O(1).
\param a
The element that will be added to this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE T& pushBack(const T& a)
{
if(capacity() <= mSize)
return growAndPushBack(a);
PX_PLACEMENT_NEW(reinterpret_cast<void*>(mData + mSize), T)(a);
return mData[mSize++];
}
/////////////////////////////////////////////////////////////////////////
/*!
Returns the element at the end of the array. Only legal if the array is non-empty.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE T popBack()
{
PX_ASSERT(mSize);
T t = mData[mSize - 1];
mData[--mSize].~T();
return t;
}
/////////////////////////////////////////////////////////////////////////
/*!
Construct one element at the end of the array. Operation is O(1).
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE T& insert()
{
if(capacity() <= mSize)
grow(capacityIncrement());
T* ptr = mData + mSize++;
PX_PLACEMENT_NEW(ptr, T); // not 'T()' because PODs should not get default-initialized.
return *ptr;
}
/////////////////////////////////////////////////////////////////////////
/*!
Subtracts the element on position i from the array and replace it with
the last element.
Operation is O(1)
\param i
The position of the element that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE void replaceWithLast(uint32_t i)
{
PX_ASSERT(i < mSize);
mData[i] = mData[--mSize];
mData[mSize].~T();
}
PX_INLINE void replaceWithLast(Iterator i)
{
replaceWithLast(static_cast<uint32_t>(i - mData));
}
/////////////////////////////////////////////////////////////////////////
/*!
Replaces the first occurrence of the element a with the last element
Operation is O(n)
\param a
The position of the element that will be subtracted from this array.
\return true if the element has been removed.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE bool findAndReplaceWithLast(const T& a)
{
uint32_t index = 0;
while(index < mSize && mData[index] != a)
++index;
if(index == mSize)
return false;
replaceWithLast(index);
return true;
}
/////////////////////////////////////////////////////////////////////////
/*!
Subtracts the element on position i from the array. Shift the entire
array one step.
Operation is O(n)
\param i
The position of the element that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE void remove(uint32_t i)
{
PX_ASSERT(i < mSize);
T* it = mData + i;
it->~T();
while (++i < mSize)
{
PX_PLACEMENT_NEW(it, T(mData[i]));
++it;
it->~T();
}
--mSize;
}
/////////////////////////////////////////////////////////////////////////
/*!
Removes a range from the array. Shifts the array so order is maintained.
Operation is O(n)
\param begin
The starting position of the element that will be subtracted from this array.
\param count
The number of elments that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE void removeRange(uint32_t begin, uint32_t count)
{
PX_ASSERT(begin < mSize);
PX_ASSERT((begin + count) <= mSize);
for(uint32_t i = 0; i < count; i++)
mData[begin + i].~T(); // call the destructor on the ones being removed first.
T* dest = &mData[begin]; // location we are copying the tail end objects to
T* src = &mData[begin + count]; // start of tail objects
uint32_t move_count = mSize - (begin + count); // compute remainder that needs to be copied down
for(uint32_t i = 0; i < move_count; i++)
{
PX_PLACEMENT_NEW(dest, T(*src)); // copy the old one to the new location
src->~T(); // call the destructor on the old location
dest++;
src++;
}
mSize -= count;
}
//////////////////////////////////////////////////////////////////////////
/*!
Resize array
*/
//////////////////////////////////////////////////////////////////////////
PX_NOINLINE void resize(const uint32_t size, const T& a = T());
PX_NOINLINE void resizeUninitialized(const uint32_t size);
//////////////////////////////////////////////////////////////////////////
/*!
Resize array such that only as much memory is allocated to hold the
existing elements
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void shrink()
{
recreate(mSize);
}
//////////////////////////////////////////////////////////////////////////
/*!
Deletes all array elements and frees memory.
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void reset()
{
resize(0);
shrink();
}
//////////////////////////////////////////////////////////////////////////
/*!
Resets or clears the array depending on occupancy.
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void resetOrClear()
{
const PxU32 c = capacity();
const PxU32 s = size();
if(s>=c/2)
clear();
else
reset();
}
//////////////////////////////////////////////////////////////////////////
/*!
Ensure that the array has at least size capacity.
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void reserve(const uint32_t capacity)
{
if(capacity > this->capacity())
grow(capacity);
}
//////////////////////////////////////////////////////////////////////////
/*!
Query the capacity(allocated mem) for the array.
*/
//////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE uint32_t capacity() const
{
return mCapacity & ~PX_SIGN_BITMASK;
}
//////////////////////////////////////////////////////////////////////////
/*!
Unsafe function to force the size of the array
*/
//////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE void forceSize_Unsafe(uint32_t size)
{
PX_ASSERT(size <= mCapacity);
mSize = size;
}
//////////////////////////////////////////////////////////////////////////
/*!
Swap contents of an array without allocating temporary storage
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void swap(PxArray<T, Alloc>& other)
{
PxSwap(mData, other.mData);
PxSwap(mSize, other.mSize);
PxSwap(mCapacity, other.mCapacity);
}
//////////////////////////////////////////////////////////////////////////
/*!
Assign a range of values to this vector (resizes to length of range)
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void assign(const T* first, const T* last)
{
resizeUninitialized(uint32_t(last - first));
copy(begin(), end(), first);
}
// We need one bit to mark arrays that have been deserialized from a user-provided memory block.
// For alignment & memory saving purpose we store that bit in the rarely used capacity member.
PX_FORCE_INLINE uint32_t isInUserMemory() const
{
return mCapacity & PX_SIGN_BITMASK;
}
/// return reference to allocator
PX_INLINE Alloc& getAllocator()
{
return *this;
}
protected:
// constructor for where we don't own the memory
PxArray(T* memory, uint32_t size, uint32_t capacity, const Alloc& alloc = Alloc())
: Alloc(alloc), mData(memory), mSize(size), mCapacity(capacity | PX_SIGN_BITMASK)
{
}
template <class A>
PX_NOINLINE void copy(const PxArray<T, A>& other);
PX_INLINE T* allocate(uint32_t size, uint32_t* cookie=NULL)
{
if(size > 0)
{
T* p = reinterpret_cast<T*>(Alloc::allocate(sizeof(T) * size, PX_FL, cookie));
PxMarkSerializedMemory(p, sizeof(T) * size);
return p;
}
return NULL;
}
PX_INLINE void deallocate(void* mem, uint32_t* cookie=NULL)
{
Alloc::deallocate(mem, cookie);
}
static PX_INLINE void create(T* first, T* last, const T& a)
{
for(; first < last; ++first)
::PX_PLACEMENT_NEW(first, T(a));
}
static PX_INLINE void copy(T* first, T* last, const T* src)
{
if(last <= first)
return;
for(; first < last; ++first, ++src)
::PX_PLACEMENT_NEW(first, T(*src));
}
static PX_INLINE void destroy(T* first, T* last)
{
for(; first < last; ++first)
first->~T();
}
/*!
Called when pushBack() needs to grow the array.
\param a The element that will be added to this array.
*/
PX_NOINLINE T& growAndPushBack(const T& a);
/*!
Resizes the available memory for the array.
\param capacity
The number of entries that the set should be able to hold.
*/
PX_INLINE void grow(uint32_t capacity)
{
PX_ASSERT(this->capacity() < capacity);
recreate(capacity);
}
/*!
Creates a new memory block, copies all entries to the new block and destroys old entries.
\param capacity
The number of entries that the set should be able to hold.
*/
PX_NOINLINE void recreate(uint32_t capacity);
// The idea here is to prevent accidental bugs with pushBack or insert. Unfortunately
// it interacts badly with InlineArrays with smaller inline allocations.
// TODO(dsequeira): policy template arg, this is exactly what they're for.
PX_INLINE uint32_t capacityIncrement() const
{
const uint32_t capacity = this->capacity();
return capacity == 0 ? 1 : capacity * 2;
}
T* mData;
uint32_t mSize;
uint32_t mCapacity;
};
template <class T, class Alloc>
PX_NOINLINE void PxArray<T, Alloc>::resize(const uint32_t size, const T& a)
{
reserve(size);
create(mData + mSize, mData + size, a);
destroy(mData + size, mData + mSize);
mSize = size;
}
template <class T, class Alloc>
template <class A>
PX_NOINLINE void PxArray<T, Alloc>::copy(const PxArray<T, A>& other)
{
if(!other.empty())
{
mData = allocate(mSize = mCapacity = other.size());
copy(mData, mData + mSize, other.begin());
}
else
{
mData = NULL;
mSize = 0;
mCapacity = 0;
}
// mData = allocate(other.mSize);
// mSize = other.mSize;
// mCapacity = other.mSize;
// copy(mData, mData + mSize, other.mData);
}
template <class T, class Alloc>
PX_NOINLINE void PxArray<T, Alloc>::resizeUninitialized(const uint32_t size)
{
reserve(size);
mSize = size;
}
template <class T, class Alloc>
PX_NOINLINE T& PxArray<T, Alloc>::growAndPushBack(const T& a)
{
const uint32_t capacity = capacityIncrement();
uint32_t cookie;
T* newData = allocate(capacity, &cookie);
PX_ASSERT((!capacity) || (newData && (newData != mData)));
copy(newData, newData + mSize, mData);
// inserting element before destroying old array
// avoids referencing destroyed object when duplicating array element.
PX_PLACEMENT_NEW(reinterpret_cast<void*>(newData + mSize), T)(a);
destroy(mData, mData + mSize);
if(!isInUserMemory())
deallocate(mData, &cookie);
mData = newData;
mCapacity = capacity;
return mData[mSize++];
}
template <class T, class Alloc>
PX_NOINLINE void PxArray<T, Alloc>::recreate(uint32_t capacity)
{
uint32_t cookie;
T* newData = allocate(capacity, &cookie);
PX_ASSERT((!capacity) || (newData && (newData != mData)));
copy(newData, newData + mSize, mData);
destroy(mData, mData + mSize);
if(!isInUserMemory())
deallocate(mData, &cookie);
mData = newData;
mCapacity = capacity;
}
template <class T, class Alloc>
PX_INLINE void swap(PxArray<T, Alloc>& x, PxArray<T, Alloc>& y)
{
x.swap(y);
}
} // namespace physx
#endif

View File

@@ -0,0 +1,85 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ASSERT_H
#define PX_ASSERT_H
#include <stdint.h>
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
* \brief Built-in assert function
*/
PX_FOUNDATION_API void PxAssert(const char* exp, const char* file, int line, bool& ignore);
#if !PX_ENABLE_ASSERTS
#define PX_ASSERT(exp) ((void)0)
#define PX_ALWAYS_ASSERT_MESSAGE(exp) ((void)0)
#define PX_ASSERT_WITH_MESSAGE(condition, message) ((void)0)
#else
#if PX_VC
#define PX_CODE_ANALYSIS_ASSUME(exp) \
__analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a PX_ASSERT is used
// to "guard" illegal mem access, for example.
#else
#define PX_CODE_ANALYSIS_ASSUME(exp)
#endif
#define PX_ASSERT(exp) \
{ \
static bool _ignore = false; \
((void)((!!(exp)) || (!_ignore && (physx::PxAssert(#exp, PX_FL, _ignore), false)))); \
PX_CODE_ANALYSIS_ASSUME(exp); \
}
#define PX_ALWAYS_ASSERT_MESSAGE(exp) \
{ \
static bool _ignore = false; \
if(!_ignore) \
physx::PxAssert(exp, PX_FL, _ignore); \
}
#define PX_ASSERT_WITH_MESSAGE(exp, message) \
{ \
static bool _ignore = false; \
((void)((!!(exp)) || (!_ignore && (physx::PxAssert(message, PX_FL, _ignore), false)))); \
PX_CODE_ANALYSIS_ASSUME(exp); \
}
#endif // !PX_ENABLE_ASSERTS
#define PX_ALWAYS_ASSERT() PX_ASSERT(0)
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,79 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ATOMIC_H
#define PX_ATOMIC_H
#include "foundation/PxFoundationConfig.h"
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/* set *dest equal to val. Return the old value of *dest */
PX_FOUNDATION_API PxI32 PxAtomicExchange(volatile PxI32* dest, PxI32 val);
PX_FOUNDATION_API PxI64 PxAtomicExchange(volatile PxI64* dest, PxI64 val);
/* if *dest == comp, replace with exch. Return original value of *dest */
PX_FOUNDATION_API PxI32 PxAtomicCompareExchange(volatile PxI32* dest, PxI32 exch, PxI32 comp);
PX_FOUNDATION_API PxI64 PxAtomicCompareExchange(volatile PxI64* dest, PxI64 exch, PxI64 comp);
/* if *dest == comp, replace with exch. Return original value of *dest */
PX_FOUNDATION_API void* PxAtomicCompareExchangePointer(volatile void** dest, void* exch, void* comp);
/* increment the specified location. Return the incremented value */
PX_FOUNDATION_API PxI32 PxAtomicIncrement(volatile PxI32* val);
PX_FOUNDATION_API PxI64 PxAtomicIncrement(volatile PxI64* val);
/* decrement the specified location. Return the decremented value */
PX_FOUNDATION_API PxI32 PxAtomicDecrement(volatile PxI32* val);
PX_FOUNDATION_API PxI64 PxAtomicDecrement(volatile PxI64* val);
/* add delta to *val. Return the new value */
PX_FOUNDATION_API PxI32 PxAtomicAdd(volatile PxI32* val, PxI32 delta);
PX_FOUNDATION_API PxI64 PxAtomicAdd(volatile PxI64* val, PxI64 delta);
/* compute the maximum of dest and val. Return the new value */
PX_FOUNDATION_API PxI32 PxAtomicMax(volatile PxI32* val, PxI32 val2);
PX_FOUNDATION_API PxI64 PxAtomicMax(volatile PxI64* val, PxI64 val2);
/* or mask to *val. Return the new value */
PX_FOUNDATION_API PxI32 PxAtomicOr(volatile PxI32* val, PxI32 mask);
PX_FOUNDATION_API PxI64 PxAtomicOr(volatile PxI64* val, PxI64 mask);
/* and mask to *val. Return the new value */
PX_FOUNDATION_API PxI32 PxAtomicAnd(volatile PxI32* val, PxI32 mask);
PX_FOUNDATION_API PxI64 PxAtomicAnd(volatile PxI64* val, PxI64 mask);
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,145 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BASIC_TEMPLATES_H
#define PX_BASIC_TEMPLATES_H
#include "foundation/PxPreprocessor.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
template <typename A>
struct PxEqual
{
bool operator()(const A& a, const A& b) const
{
return a == b;
}
};
template <typename A>
struct PxLess
{
bool operator()(const A& a, const A& b) const
{
return a < b;
}
};
template <typename A>
struct PxGreater
{
bool operator()(const A& a, const A& b) const
{
return a > b;
}
};
template <class F, class S>
class PxPair
{
public:
F first;
S second;
PX_CUDA_CALLABLE PX_INLINE PxPair() : first(F()), second(S())
{
}
PX_CUDA_CALLABLE PX_INLINE PxPair(const F& f, const S& s) : first(f), second(s)
{
}
PX_CUDA_CALLABLE PX_INLINE PxPair(const PxPair& p) : first(p.first), second(p.second)
{
}
PX_CUDA_CALLABLE PX_INLINE PxPair& operator=(const PxPair& p)
{
first = p.first;
second = p.second;
return *this;
}
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPair& p) const
{
return first == p.first && second == p.second;
}
PX_CUDA_CALLABLE PX_INLINE bool operator<(const PxPair& p) const
{
if (first < p.first)
return true;
else
return !(p.first < first) && (second < p.second);
}
};
template <unsigned int A>
struct PxLogTwo
{
static const unsigned int value = PxLogTwo<(A >> 1)>::value + 1;
};
template <>
struct PxLogTwo<1>
{
static const unsigned int value = 0;
};
template <typename T>
struct PxUnConst
{
typedef T Type;
};
template <typename T>
struct PxUnConst<const T>
{
typedef T Type;
};
template <typename T>
T PxPointerOffset(void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<char*>(p) + offset);
}
template <typename T>
T PxPointerOffset(const void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<const char*>(p) + offset);
}
template <class T>
PX_CUDA_CALLABLE PX_INLINE void PxSwap(T& x, T& y)
{
const T tmp = x;
x = y;
y = tmp;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,84 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BIT_AND_DATA_H
#define PX_BIT_AND_DATA_H
#include "foundation/PxIO.h"
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
template <typename storageType, storageType bitMask>
class PxBitAndDataT
{
public:
PX_FORCE_INLINE PxBitAndDataT(const PxEMPTY)
{
}
PX_FORCE_INLINE PxBitAndDataT() : mData(0)
{
}
PX_FORCE_INLINE PxBitAndDataT(storageType data, bool bit = false)
{
mData = bit ? storageType(data | bitMask) : data;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE operator storageType() const
{
return storageType(mData & ~bitMask);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void setBit()
{
mData |= bitMask;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void clearBit()
{
mData &= ~bitMask;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE storageType isBitSet() const
{
return storageType(mData & bitMask);
}
protected:
storageType mData;
};
typedef PxBitAndDataT<PxU8, 0x80> PxBitAndByte;
typedef PxBitAndDataT<PxU16, 0x8000> PxBitAndWord;
typedef PxBitAndDataT<PxU32, 0x80000000> PxBitAndDword;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,511 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BITMAP_H
#define PX_BITMAP_H
#include "foundation/PxAssert.h"
#include "foundation/PxMath.h"
#include "foundation/PxMemory.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxBitUtils.h"
#include "foundation/PxConstructor.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
Hold a bitmap with operations to set,reset or test given bit.
We inhibit copy to prevent unintentional copies. If a copy is desired copy() should be used or
alternatively a copy constructor implemented.
*/
template<class PxAllocator>
class PxBitMapBase : public PxUserAllocated
{
PX_NOCOPY(PxBitMapBase)
public:
// PX_SERIALIZATION
/* todo: explicit */ PxBitMapBase(const PxEMPTY)
{
if(mMap)
mWordCount |= PX_SIGN_BITMASK;
}
//~PX_SERIALIZATION
PX_INLINE PxBitMapBase(const PxAllocator& allocator) : mMap(0), mWordCount(0), mAllocator(allocator) {}
PX_INLINE PxBitMapBase() : mMap(0), mWordCount(0) {}
PX_INLINE ~PxBitMapBase()
{
release();
}
PX_INLINE void release()
{
if(mMap && !isInUserMemory())
mAllocator.deallocate(mMap);
mMap = NULL;
}
PX_FORCE_INLINE PxAllocator& getAllocator() { return mAllocator; }
PX_INLINE void growAndSet(PxU32 index)
{
extend(index + 1);
mMap[index >> 5] |= 1 << (index & 31);
}
PX_INLINE void growAndReset(PxU32 index)
{
extend(index + 1);
mMap[index >> 5] &= ~(1 << (index & 31));
}
PX_INLINE PxIntBool boundedTest(PxU32 index) const
{
return PxIntBool(index >> 5 >= getWordCount() ? PxIntFalse : (mMap[index >> 5] & (1 << (index & 31))));
}
PX_INLINE void boundedReset(PxU32 index)
{
if((index >> 5) < getWordCount())
mMap[index >> 5] &= ~(1 << (index & 31));
}
// Special optimized versions, when you _know_ your index is in range
PX_INLINE void set(PxU32 index)
{
PX_ASSERT(index<getWordCount() * 32);
mMap[index >> 5] |= 1 << (index & 31);
}
PX_INLINE void reset(PxU32 index)
{
PX_ASSERT(index<getWordCount() * 32);
mMap[index >> 5] &= ~(1 << (index & 31));
}
PX_INLINE PxIntBool test(PxU32 index) const
{
PX_ASSERT(index<getWordCount() * 32);
return PxIntBool(mMap[index >> 5] & (1 << (index & 31)));
}
// nibble == 4 bits
PX_INLINE PxU32 getNibbleFast(PxU32 nibIndex) const
{
const PxU32 bitIndex = nibIndex << 2;
PX_ASSERT(bitIndex < getWordCount() * 32);
return (mMap[bitIndex >> 5] >> (bitIndex & 31)) & 0xf;
}
PX_INLINE void andNibbleFast(PxU32 nibIndex, PxU32 mask)
{
//TODO: there has to be a faster way...
const PxU32 bitIndex = nibIndex << 2;
const PxU32 shift = (bitIndex & 31);
const PxU32 nibMask = (0xfu << shift);
PX_ASSERT(bitIndex < getWordCount() * 32);
mMap[bitIndex >> 5] &= ((mask << shift) | ~nibMask);
}
PX_INLINE void orNibbleFast(PxU32 nibIndex, PxU32 mask)
{
PX_ASSERT(!(mask & ~0xfu)); //check extra bits are not set
const PxU32 bitIndex = nibIndex << 2;
const PxU32 shift = bitIndex & 31;
PX_ASSERT(bitIndex < getWordCount() * 32);
mMap[bitIndex >> 5] |= (mask << shift);
}
void clear()
{
PxMemSet(mMap, 0, getWordCount() * sizeof(PxU32));
}
void resizeAndClear(PxU32 newBitCount)
{
extendUninitialized(newBitCount);
PxMemSet(mMap, 0, getWordCount() * sizeof(PxU32));
}
void setEmpty()
{
mMap = NULL;
mWordCount = 0;
}
void setWords(PxU32* map, PxU32 wordCount)
{
mMap = map;
mWordCount = wordCount | PX_SIGN_BITMASK;
}
// !!! only sets /last/ bit to value
void resize(PxU32 newBitCount, bool value = false)
{
PX_ASSERT(!value); // only new class supports this
PX_UNUSED(value);
extend(newBitCount);
}
PX_FORCE_INLINE PxU32 size() const { return getWordCount() * 32; }
void copy(const PxBitMapBase& a)
{
extendUninitialized(a.getWordCount() << 5);
PxMemCopy(mMap, a.mMap, a.getWordCount() * sizeof(PxU32));
if(getWordCount() > a.getWordCount())
PxMemSet(mMap + a.getWordCount(), 0, (getWordCount() - a.getWordCount()) * sizeof(PxU32));
}
PX_INLINE PxU32 count() const
{
// NOTE: we can probably do this faster, since the last steps in PxBitCount can be defered to
// the end of the seq. + 64/128bits at a time + native bit counting instructions(360 is fast non micro code).
PxU32 count = 0;
const PxU32 wordCount = getWordCount();
for(PxU32 i = 0; i<wordCount; i++)
count += PxBitCount(mMap[i]);
return count;
}
PX_INLINE PxU32 count(PxU32 start, PxU32 length) const
{
const PxU32 end = PxMin(getWordCount() << 5, start + length);
PxU32 count = 0;
for(PxU32 i = start; i<end; i++)
count += (test(i) != 0);
return count;
}
//! returns 0 if no bits set (!!!)
PxU32 findLast() const
{
const PxU32 wordCount = getWordCount();
for(PxU32 i = wordCount; i-- > 0;)
{
if(mMap[i])
return (i << 5) + PxHighestSetBit(mMap[i]);
}
return PxU32(0);
}
bool hasAnyBitSet() const
{
const PxU32 wordCount = getWordCount();
for(PxU32 i = 0; i<wordCount; i++)
{
if (mMap[i])
return true;
}
return false;
}
// the obvious combiners and some used in the SDK
struct OR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a | b; } };
struct AND { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a&b; } };
struct XOR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a^b; } };
// we use auxiliary functions here so as not to generate combiners for every combination
// of allocators
template<class Combiner, class _>
PX_INLINE void combineInPlace(const PxBitMapBase<_>& b)
{
combine1<Combiner>(b.mMap, b.getWordCount());
}
template<class Combiner, class _1, class _2>
PX_INLINE void combine(const PxBitMapBase<_1>& a, const PxBitMapBase<_2>& b)
{
combine2<Combiner>(a.mMap, a.getWordCount(), b.mMap, b.getWordCount());
}
PX_FORCE_INLINE const PxU32* getWords() const { return mMap; }
PX_FORCE_INLINE PxU32* getWords() { return mMap; }
// PX_SERIALIZATION
PX_FORCE_INLINE PxU32 getWordCount() const { return mWordCount & ~PX_SIGN_BITMASK; }
// We need one bit to mark arrays that have been deserialized from a user-provided memory block.
PX_FORCE_INLINE PxU32 isInUserMemory() const { return mWordCount & PX_SIGN_BITMASK; }
//~PX_SERIALIZATION
/*!
Iterate over indices in a bitmap
This iterator is good because it finds the set bit without looping over the cached bits upto 31 times.
However it does require a variable shift.
*/
class Iterator
{
public:
static const PxU32 DONE = 0xffffffff;
PX_INLINE Iterator(const PxBitMapBase &map) : mBitMap(map)
{
reset();
}
PX_INLINE Iterator& operator=(const Iterator& other)
{
PX_ASSERT(&mBitMap == &other.mBitMap);
mBlock = other.mBlock;
mIndex = other.mIndex;
return *this;
}
PX_INLINE PxU32 getNext()
{
if(mBlock)
{
PxU32 block = mBlock;
PxU32 index = mIndex;
const PxU32 bitIndex = index << 5 | PxLowestSetBit(block);
block &= block - 1;
PxU32 wordCount = mBitMap.getWordCount();
while(!block && ++index < wordCount)
block = mBitMap.mMap[index];
mBlock = block;
mIndex = index;
return bitIndex;
}
return DONE;
}
PX_INLINE void reset()
{
PxU32 index = 0;
PxU32 block = 0;
PxU32 wordCount = mBitMap.getWordCount();
while(index < wordCount && ((block = mBitMap.mMap[index]) == 0))
++index;
mBlock = block;
mIndex = index;
}
private:
PxU32 mBlock, mIndex;
const PxBitMapBase& mBitMap;
};
// DS: faster but less general: hasBits() must be true or getNext() is illegal so it is the calling code's responsibility to ensure that getNext() is not called illegally.
class PxLoopIterator
{
PX_NOCOPY(PxLoopIterator)
public:
PX_FORCE_INLINE PxLoopIterator(const PxBitMapBase &map) : mMap(map.getWords()), mBlock(0), mIndex(-1), mWordCount(PxI32(map.getWordCount())) {}
PX_FORCE_INLINE bool hasBits()
{
PX_ASSERT(mIndex<mWordCount);
while (mBlock == 0)
{
if (++mIndex == mWordCount)
return false;
mBlock = mMap[mIndex];
}
return true;
}
PX_FORCE_INLINE PxU32 getNext()
{
PX_ASSERT(mIndex<mWordCount && mBlock != 0);
PxU32 result = PxU32(mIndex) << 5 | PxLowestSetBit(mBlock); // will assert if mask is zero
mBlock &= (mBlock - 1);
return result;
}
private:
const PxU32*const mMap;
PxU32 mBlock; // the word we're currently scanning
PxI32 mIndex; // the index of the word we're currently looking at
PxI32 mWordCount;
};
//Class to iterate over the bitmap from a particular start location rather than the beginning of the list
class PxCircularIterator
{
public:
static const PxU32 DONE = 0xffffffff;
PX_INLINE PxCircularIterator(const PxBitMapBase &map, PxU32 index) : mBitMap(map)
{
PxU32 localIndex = 0;
PxU32 startIndex = 0;
const PxU32 wordCount = mBitMap.getWordCount();
if((index << 5) < wordCount)
{
localIndex = index << 5;
startIndex = localIndex;
}
PxU32 block = 0;
if(localIndex < wordCount)
{
block = mBitMap.mMap[localIndex];
if(block == 0)
{
localIndex = (localIndex + 1) % wordCount;
while(localIndex != startIndex && (block = mBitMap.mMap[localIndex]) == 0)
localIndex = (localIndex + 1) % wordCount;
}
}
mIndex = localIndex;
mBlock = block;
mStartIndex = startIndex;
}
PX_INLINE PxU32 getNext()
{
if(mBlock)
{
PxU32 index = mIndex;
PxU32 block = mBlock;
const PxU32 startIndex = mStartIndex;
PxU32 bitIndex = index << 5 | PxLowestSetBit(block);
block &= block - 1;
PxU32 wordCount = mBitMap.getWordCount();
while (!block && (index = ((index + 1) % wordCount)) != startIndex)
block = mBitMap.mMap[index];
mIndex = index;
mBlock = block;
return bitIndex;
}
return DONE;
}
private:
PxU32 mBlock, mIndex;
PxU32 mStartIndex;
const PxBitMapBase& mBitMap;
PX_NOCOPY(PxCircularIterator)
};
protected:
PxU32* mMap; //one bit per index
PxU32 mWordCount;
PxAllocator mAllocator;
PxU8 mPadding[3]; // PT: "mAllocator" is empty but consumes 1 byte
void extend(PxU32 size)
{
const PxU32 newWordCount = (size + 31) >> 5;
if (newWordCount > getWordCount())
{
PxU32* newMap = reinterpret_cast<PxU32*>(mAllocator.allocate(newWordCount * sizeof(PxU32), PX_FL));
if (mMap)
{
PxMemCopy(newMap, mMap, getWordCount() * sizeof(PxU32));
if (!isInUserMemory())
mAllocator.deallocate(mMap);
}
PxMemSet(newMap + getWordCount(), 0, (newWordCount - getWordCount()) * sizeof(PxU32));
mMap = newMap;
// also resets the isInUserMemory bit
mWordCount = newWordCount;
}
}
void extendUninitialized(PxU32 size)
{
PxU32 newWordCount = (size + 31) >> 5;
if (newWordCount > getWordCount())
{
if (mMap && !isInUserMemory())
mAllocator.deallocate(mMap);
// also resets the isInUserMemory bit
mWordCount = newWordCount;
mMap = reinterpret_cast<PxU32*>(mAllocator.allocate(mWordCount * sizeof(PxU32), PX_FL));
}
}
template<class Combiner>
void combine1(const PxU32* words, PxU32 length)
{
extend(length << 5);
PxU32 combineLength = PxMin(getWordCount(), length);
for (PxU32 i = 0; i<combineLength; i++)
mMap[i] = Combiner()(mMap[i], words[i]);
}
template<class Combiner>
void combine2(const PxU32* words1, PxU32 length1,
const PxU32* words2, PxU32 length2)
{
extendUninitialized(PxMax(length1, length2) << 5);
PxU32 commonSize = PxMin(length1, length2);
for (PxU32 i = 0; i<commonSize; i++)
mMap[i] = Combiner()(words1[i], words2[i]);
for (PxU32 i = commonSize; i<length1; i++)
mMap[i] = Combiner()(words1[i], 0);
for (PxU32 i = commonSize; i<length2; i++)
mMap[i] = Combiner()(0, words2[i]);
}
friend class Iterator;
};
typedef PxBitMapBase<PxAllocator> PxBitMap;
typedef PxBitMapBase<PxVirtualAllocator> PxBitMapPinned;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,128 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BIT_UTILS_H
#define PX_BIT_UTILS_H
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxAssert.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxMathIntrinsics.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
PX_INLINE uint32_t PxBitCount(uint32_t v)
{
// from http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
uint32_t const w = v - ((v >> 1) & 0x55555555);
uint32_t const x = (w & 0x33333333) + ((w >> 2) & 0x33333333);
return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
}
PX_INLINE bool PxIsPowerOfTwo(uint32_t x)
{
return x != 0 && (x & (x - 1)) == 0;
}
// "Next Largest Power of 2
// Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm
// that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with
// the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next
// largest power of 2. For a 32-bit value:"
PX_INLINE uint32_t PxNextPowerOfTwo(uint32_t x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
return x + 1;
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
PX_INLINE uint32_t PxLowestSetBit(uint32_t x)
{
PX_ASSERT(x);
return PxLowestSetBitUnsafe(x);
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
PX_INLINE uint32_t PxLowestSetBit(uint64_t x)
{
PX_ASSERT(x);
return PxLowestSetBitUnsafe(x);
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
PX_INLINE uint32_t PxHighestSetBit(uint32_t x)
{
PX_ASSERT(x);
return PxHighestSetBitUnsafe(x);
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
PX_INLINE uint32_t PxHighestSetBit(uint64_t x)
{
PX_ASSERT(x);
return PxHighestSetBitUnsafe(x);
}
// Helper function to approximate log2 of an integer value
// assumes that the input is actually power of two.
PX_INLINE uint32_t PxILog2(uint32_t num)
{
for(uint32_t i = 0; i < 32; i++)
{
num >>= 1;
if(num == 0)
return i;
}
PX_ASSERT(0);
return uint32_t(-1);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,496 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BOUNDS3_H
#define PX_BOUNDS3_H
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// maximum extents defined such that floating point exceptions are avoided for standard use cases
#define PX_MAX_BOUNDS_EXTENTS (PX_MAX_REAL * 0.25f)
/**
\brief Class representing 3D range or axis aligned bounding box.
Stored as minimum and maximum extent corners. Alternate representation
would be center and dimensions.
May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes.
Empty bounds have to be represented as minimum = PX_MAX_BOUNDS_EXTENTS and maximum = -PX_MAX_BOUNDS_EXTENTS for all
axes.
All other representations are invalid and the behavior is undefined.
*/
class PxBounds3
{
public:
/**
\brief Default constructor, not performing any initialization for performance reason.
\remark Use empty() function below to construct empty bounds.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3()
{
}
/**
\brief Construct from two bounding points
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxVec3& minimum, const PxVec3& maximum);
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(const PxBounds3& other)
{
minimum = other.minimum;
maximum = other.maximum;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3(const PxBounds3& other)
{
minimum = other.minimum;
maximum = other.maximum;
}
/**
\brief Return empty bounds.
*/
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 empty();
/**
\brief returns the AABB containing v0 and v1.
\param v0 first point included in the AABB.
\param v1 second point included in the AABB.
*/
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 boundsOfPoints(const PxVec3& v0, const PxVec3& v1);
/**
\brief returns the AABB from center and extents vectors.
\param center Center vector
\param extent Extents vector
*/
static PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 centerExtents(const PxVec3& center, const PxVec3& extent);
/**
\brief Construct from center, extent, and (not necessarily orthogonal) basis
*/
static PX_CUDA_CALLABLE PX_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent);
/**
\brief Construct from pose and extent
*/
static PX_CUDA_CALLABLE PX_INLINE PxBounds3 poseExtent(const PxTransform& pose, const PxVec3& extent);
/**
\brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
This version is safe to call for empty bounds.
\param[in] matrix Transform to apply, can contain scaling as well
\param[in] bounds The bounds to transform.
*/
static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxMat33& matrix, const PxBounds3& bounds);
/**
\brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead.
\param[in] matrix Transform to apply, can contain scaling as well
\param[in] bounds The bounds to transform.
*/
static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxMat33& matrix, const PxBounds3& bounds);
/**
\brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
This version is safe to call for empty bounds.
\param[in] transform Transform to apply, can contain scaling as well
\param[in] bounds The bounds to transform.
*/
static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformSafe(const PxTransform& transform, const PxBounds3& bounds);
/**
\brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead.
\param[in] transform Transform to apply, can contain scaling as well
\param[in] bounds The bounds to transform.
*/
static PX_CUDA_CALLABLE PX_INLINE PxBounds3 transformFast(const PxTransform& transform, const PxBounds3& bounds);
/**
\brief Sets empty to true
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void setEmpty();
/**
\brief Sets the bounds to maximum size [-PX_MAX_BOUNDS_EXTENTS, PX_MAX_BOUNDS_EXTENTS].
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void setMaximal();
/**
\brief expands the volume to include v
\param v Point to expand to.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxVec3& v);
/**
\brief expands the volume to include b.
\param b Bounds to perform union with.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void include(const PxBounds3& b);
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isEmpty() const;
/**
\brief indicates whether the intersection of this and b is empty or not.
\param b Bounds to test for intersection.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects(const PxBounds3& b) const;
/**
\brief computes the 1D-intersection between two AABBs, on a given axis.
\param a the other AABB
\param axis the axis (0, 1, 2)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool intersects1D(const PxBounds3& a, uint32_t axis) const;
/**
\brief indicates if these bounds contain v.
\param v Point to test against bounds.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& v) const;
/**
\brief checks a box is inside another box.
\param box the other AABB
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isInside(const PxBounds3& box) const;
/**
\brief returns the center of this axis aligned box.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getCenter() const;
/**
\brief get component of the box's center along a given axis
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float getCenter(uint32_t axis) const;
/**
\brief get component of the box's extents along a given axis
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float getExtents(uint32_t axis) const;
/**
\brief returns the dimensions (width/height/depth) of this axis aligned box.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getDimensions() const;
/**
\brief returns the extents, which are half of the width/height/depth.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getExtents() const;
/**
\brief scales the AABB.
This version is safe to call for empty bounds.
\param scale Factor to scale AABB by.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleSafe(float scale);
/**
\brief scales the AABB.
Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead.
\param scale Factor to scale AABB by.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void scaleFast(float scale);
/**
fattens the AABB in all 3 dimensions by the given distance.
This version is safe to call for empty bounds.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenSafe(float distance);
/**
fattens the AABB in all 3 dimensions by the given distance.
Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void fattenFast(float distance);
/**
checks that the AABB values are not NaN
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const;
/**
checks that the AABB values describe a valid configuration.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid() const;
/**
Finds the closest point in the box to the point p. If p is contained, this will be p, otherwise it
will be the closest point on the surface of the box.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 closestPoint(const PxVec3& p) const;
PxVec3 minimum, maximum;
};
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3::PxBounds3(const PxVec3& minimum_, const PxVec3& maximum_)
: minimum(minimum_), maximum(maximum_)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::empty()
{
return PxBounds3(PxVec3(PX_MAX_BOUNDS_EXTENTS), PxVec3(-PX_MAX_BOUNDS_EXTENTS));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isFinite() const
{
return minimum.isFinite() && maximum.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::boundsOfPoints(const PxVec3& v0, const PxVec3& v1)
{
return PxBounds3(v0.minimum(v1), v0.maximum(v1));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxBounds3 PxBounds3::centerExtents(const PxVec3& center, const PxVec3& extent)
{
return PxBounds3(center - extent, center + extent);
}
PX_CUDA_CALLABLE PX_INLINE PxBounds3
PxBounds3::basisExtent(const PxVec3& center, const PxMat33& basis, const PxVec3& extent)
{
// extended basis vectors
const PxVec3 c0 = basis.column0 * extent.x;
const PxVec3 c1 = basis.column1 * extent.y;
const PxVec3 c2 = basis.column2 * extent.z;
// find combination of base vectors that produces max. distance for each component = sum of abs()
const PxVec3 w( PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x),
PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y),
PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z));
return PxBounds3(center - w, center + w);
}
PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::poseExtent(const PxTransform& pose, const PxVec3& extent)
{
return basisExtent(pose.p, PxMat33(pose.q), extent);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setEmpty()
{
minimum = PxVec3(PX_MAX_BOUNDS_EXTENTS);
maximum = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::setMaximal()
{
minimum = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
maximum = PxVec3(PX_MAX_BOUNDS_EXTENTS);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxVec3& v)
{
PX_ASSERT(isValid());
minimum = minimum.minimum(v);
maximum = maximum.maximum(v);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::include(const PxBounds3& b)
{
PX_ASSERT(isValid());
minimum = minimum.minimum(b.minimum);
maximum = maximum.maximum(b.maximum);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isEmpty() const
{
PX_ASSERT(isValid());
return minimum.x > maximum.x;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects(const PxBounds3& b) const
{
PX_ASSERT(isValid() && b.isValid());
return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y ||
b.minimum.z > maximum.z || minimum.z > b.maximum.z);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::intersects1D(const PxBounds3& a, uint32_t axis) const
{
PX_ASSERT(isValid() && a.isValid());
return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::contains(const PxVec3& v) const
{
PX_ASSERT(isValid());
return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z ||
v.z > maximum.z);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isInside(const PxBounds3& box) const
{
PX_ASSERT(isValid() && box.isValid());
if(box.minimum.x > minimum.x)
return false;
if(box.minimum.y > minimum.y)
return false;
if(box.minimum.z > minimum.z)
return false;
if(box.maximum.x < maximum.x)
return false;
if(box.maximum.y < maximum.y)
return false;
if(box.maximum.z < maximum.z)
return false;
return true;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getCenter() const
{
PX_ASSERT(isValid());
return (minimum + maximum) * 0.5f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getCenter(uint32_t axis) const
{
PX_ASSERT(isValid());
return (minimum[axis] + maximum[axis]) * 0.5f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxBounds3::getExtents(uint32_t axis) const
{
PX_ASSERT(isValid());
return (maximum[axis] - minimum[axis]) * 0.5f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getDimensions() const
{
PX_ASSERT(isValid());
return maximum - minimum;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::getExtents() const
{
PX_ASSERT(isValid());
return getDimensions() * 0.5f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleSafe(float scale)
{
PX_ASSERT(isValid());
if(!isEmpty())
scaleFast(scale);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::scaleFast(float scale)
{
PX_ASSERT(isValid());
*this = centerExtents(getCenter(), getExtents() * scale);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenSafe(float distance)
{
PX_ASSERT(isValid());
if(!isEmpty())
fattenFast(distance);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxBounds3::fattenFast(float distance)
{
PX_ASSERT(isValid());
minimum.x -= distance;
minimum.y -= distance;
minimum.z -= distance;
maximum.x += distance;
maximum.y += distance;
maximum.z += distance;
}
PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxMat33& matrix, const PxBounds3& bounds)
{
PX_ASSERT(bounds.isValid());
return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds;
}
PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxMat33& matrix, const PxBounds3& bounds)
{
PX_ASSERT(bounds.isValid());
return PxBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents());
}
PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformSafe(const PxTransform& transform, const PxBounds3& bounds)
{
PX_ASSERT(bounds.isValid());
return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds;
}
PX_CUDA_CALLABLE PX_INLINE PxBounds3 PxBounds3::transformFast(const PxTransform& transform, const PxBounds3& bounds)
{
PX_ASSERT(bounds.isValid());
return PxBounds3::basisExtent(transform.transform(bounds.getCenter()), PxMat33(transform.q), bounds.getExtents());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxBounds3::isValid() const
{
return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) ||
((minimum.x == PX_MAX_BOUNDS_EXTENTS) && (minimum.y == PX_MAX_BOUNDS_EXTENTS) &&
(minimum.z == PX_MAX_BOUNDS_EXTENTS) && (maximum.x == -PX_MAX_BOUNDS_EXTENTS) &&
(maximum.y == -PX_MAX_BOUNDS_EXTENTS) && (maximum.z == -PX_MAX_BOUNDS_EXTENTS))));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxBounds3::closestPoint(const PxVec3& p) const
{
return minimum.maximum(maximum.minimum(p));
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,276 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BROADCAST_H
#define PX_BROADCAST_H
#include "foundation/PxInlineArray.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxErrorCallback.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Abstract listener class that listens to allocation and deallocation events from the
foundation memory system.
<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread
or the physics processing thread(s).
*/
class PxAllocationListener
{
public:
/**
\brief callback when memory is allocated.
\param size Size of the allocation in bytes.
\param typeName Type this data is being allocated for.
\param filename File the allocation came from.
\param line the allocation came from.
\param allocatedMemory memory that will be returned from the allocation.
*/
virtual void onAllocation(size_t size, const char* typeName, const char* filename, int line,
void* allocatedMemory) = 0;
/**
\brief callback when memory is deallocated.
\param allocatedMemory memory just before allocation.
*/
virtual void onDeallocation(void* allocatedMemory) = 0;
protected:
virtual ~PxAllocationListener()
{
}
};
/**
\brief Broadcast class implementation, registering listeners.
<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread
or the physics processing thread(s). There is not internal locking
*/
template <class Listener, class Base>
class PxBroadcast : public Base
{
public:
static const uint32_t MAX_NB_LISTENERS = 16;
/**
\brief The default constructor.
*/
PxBroadcast()
{
}
/**
\brief Register new listener.
\note It is NOT SAFE to register and deregister listeners while allocations may be taking place.
moreover, there is no thread safety to registration/deregistration.
\param listener Listener to register.
*/
void registerListener(Listener& listener)
{
if(mListeners.size() < MAX_NB_LISTENERS)
mListeners.pushBack(&listener);
}
/**
\brief Deregister an existing listener.
\note It is NOT SAFE to register and deregister listeners while allocations may be taking place.
moreover, there is no thread safety to registration/deregistration.
\param listener Listener to deregister.
*/
void deregisterListener(Listener& listener)
{
mListeners.findAndReplaceWithLast(&listener);
}
/**
\brief Get number of registered listeners.
\return Number of listeners.
*/
uint32_t getNbListeners() const
{
return mListeners.size();
}
/**
\brief Get an existing listener from given index.
\param index Index of the listener.
\return Listener on given index.
*/
Listener& getListener(uint32_t index)
{
PX_ASSERT(index <= mListeners.size());
return *mListeners[index];
}
protected:
virtual ~PxBroadcast()
{
}
physx::PxInlineArray<Listener*, MAX_NB_LISTENERS, physx::PxAllocator> mListeners;
};
/**
\brief Abstract base class for an application defined memory allocator that allows an external listener
to audit the memory allocations.
*/
class PxBroadcastingAllocator : public PxBroadcast<PxAllocationListener, PxAllocatorCallback>
{
PX_NOCOPY(PxBroadcastingAllocator)
public:
/**
\brief The default constructor.
*/
PxBroadcastingAllocator(PxAllocatorCallback& allocator, PxErrorCallback& error) : mAllocator(allocator), mError(error)
{
mListeners.clear();
}
/**
\brief The default constructor.
*/
virtual ~PxBroadcastingAllocator()
{
mListeners.clear();
}
/**
\brief Allocates size bytes of memory, which must be 16-byte aligned.
This method should never return NULL. If you run out of memory, then
you should terminate the app or take some other appropriate action.
<b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
and physics processing thread(s).
\param size Number of bytes to allocate.
\param typeName Name of the datatype that is being allocated
\param filename The source file which allocated the memory
\param line The source line which allocated the memory
\return The allocated block of memory.
*/
void* allocate(size_t size, const char* typeName, const char* filename, int line)
{
void* mem = mAllocator.allocate(size, typeName, filename, line);
if(!mem)
{
mError.reportError(PxErrorCode::eABORT, "User allocator returned NULL.", filename, line);
return NULL;
}
if((size_t(mem) & 15))
{
mError.reportError(PxErrorCode::eABORT, "Allocations must be 16-byte aligned.", filename, line);
return NULL;
}
for(uint32_t i = 0; i < mListeners.size(); i++)
mListeners[i]->onAllocation(size, typeName, filename, line, mem);
return mem;
}
/**
\brief Frees memory previously allocated by allocate().
<b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
and physics processing thread(s).
\param ptr Memory to free.
*/
void deallocate(void* ptr)
{
for(uint32_t i = 0; i < mListeners.size(); i++)
{
mListeners[i]->onDeallocation(ptr);
}
mAllocator.deallocate(ptr);
}
private:
PxAllocatorCallback& mAllocator;
PxErrorCallback& mError;
};
/**
\brief Abstract base class for an application defined error callback that allows an external listener
to report errors.
*/
class PxBroadcastingErrorCallback : public PxBroadcast<PxErrorCallback, PxErrorCallback>
{
PX_NOCOPY(PxBroadcastingErrorCallback)
public:
/**
\brief The default constructor.
*/
PxBroadcastingErrorCallback(PxErrorCallback& errorCallback)
{
registerListener(errorCallback);
}
/**
\brief The default destructor.
*/
virtual ~PxBroadcastingErrorCallback()
{
mListeners.clear();
}
/**
\brief Reports an error code.
\param code Error code, see #PxErrorCode
\param message Message to display.
\param file File error occured in.
\param line Line number error occured on.
*/
void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line)
{
for(uint32_t i = 0; i < mListeners.size(); i++)
mListeners[i]->reportError(code, message, file, line);
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,55 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CONSTRUCTOR_H
#define PX_CONSTRUCTOR_H
#if !PX_DOXYGEN
namespace physx
{
#endif
/** enum for zero constructor tag for vectors and matrices */
enum PxZERO
{
PxZero
};
/** enum for identity constructor flag for quaternions, transforms, and matrices */
enum PxIDENTITY
{
PxIdentity
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif //PX_CONSTRUCTOR_H

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ERROR_CALLBACK_H
#define PX_ERROR_CALLBACK_H
#include "foundation/PxErrors.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief User defined interface class. Used by the library to emit debug information.
\note The SDK state should not be modified from within any error reporting functions.
<b>Threading:</b> The SDK sequences its calls to the output stream using a mutex, so the class need not
be implemented in a thread-safe manner if the SDK is the only client.
*/
class PxErrorCallback
{
public:
virtual ~PxErrorCallback()
{
}
/**
\brief Reports an error code.
\param code Error code, see #PxErrorCode
\param message Message to display.
\param file File error occured in.
\param line Line number error occured on.
*/
virtual void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line) = 0;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,128 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_ERRORS_H
#define PX_ERRORS_H
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Error codes
These error codes are passed to #PxErrorCallback
\see PxErrorCallback
*/
struct PxErrorCode
{
enum Enum
{
eNO_ERROR = 0,
//! \brief An informational message.
eDEBUG_INFO = 1,
//! \brief a warning message for the user to help with debugging
eDEBUG_WARNING = 2,
//! \brief method called with invalid parameter(s)
eINVALID_PARAMETER = 4,
//! \brief method was called at a time when an operation is not possible
eINVALID_OPERATION = 8,
//! \brief method failed to allocate some memory
eOUT_OF_MEMORY = 16,
/** \brief The library failed for some reason.
Possibly you have passed invalid values like NaNs, which are not checked for.
*/
eINTERNAL_ERROR = 32,
//! \brief An unrecoverable error, execution should be halted and log output flushed
eABORT = 64,
//! \brief The SDK has determined that an operation may result in poor performance.
ePERF_WARNING = 128,
//! \brief A bit mask for including all errors
eMASK_ALL = -1
};
};
#if PX_CHECKED
#define PX_CHECK_MSG(exp, msg) (!!(exp) || (PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg), 0) )
#define PX_CHECK_AND_RETURN(exp, msg) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return; } }
#define PX_CHECK_AND_RETURN_NULL(exp, msg) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return 0; } }
#define PX_CHECK_AND_RETURN_VAL(exp, msg, r) { if(!(exp)) { PxGetFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, PX_FL, msg); return r; } }
#else
#define PX_CHECK_MSG(exp, msg)
#define PX_CHECK_AND_RETURN(exp, msg)
#define PX_CHECK_AND_RETURN_NULL(exp, msg)
#define PX_CHECK_AND_RETURN_VAL(exp, msg, r)
#endif
// shortcut macros:
// usage: PxGetFoundation().error(PX_WARN, "static friction %f is is lower than dynamic friction %d", sfr, dfr);
#define PX_WARN ::physx::PxErrorCode::eDEBUG_WARNING, PX_FL
#define PX_INFO ::physx::PxErrorCode::eDEBUG_INFO, PX_FL
#if PX_DEBUG || PX_CHECKED
#define PX_WARN_ONCE(string) \
{ \
static PxU32 timestamp = 0; \
const PxU32 ts = PxGetWarnOnceTimeStamp(); \
if(timestamp != ts) \
{ \
timestamp = ts; \
PxGetFoundation().error(PX_WARN, string); \
} \
}
#define PX_WARN_ONCE_IF(condition, string) \
{ \
if(condition) \
{ \
PX_WARN_ONCE(string) \
} \
}
#else
#define PX_WARN_ONCE(string) ((void)0)
#define PX_WARN_ONCE_IF(condition, string) ((void)0)
#endif
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,100 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FPU_H
#define PX_FPU_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxAssert.h"
#include "foundation/PxFoundationConfig.h"
#define PX_IR(x) ((PxU32&)(x)) // integer representation of a floating-point value.
#define PX_SIR(x) ((PxI32&)(x)) // signed integer representation of a floating-point value.
#define PX_FR(x) ((PxReal&)(x)) // floating-point representation of a integer value.
#define PX_FPU_GUARD PxFPUGuard scopedFpGuard;
#define PX_SIMD_GUARD PxSIMDGuard scopedFpGuard;
#define PX_SIMD_GUARD_CNDT(x) PxSIMDGuard scopedFpGuard(x);
#if !PX_DOXYGEN
namespace physx
{
#endif
// sets the default SDK state for scalar and SIMD units
class PX_FOUNDATION_API PxFPUGuard
{
public:
PxFPUGuard(); // set fpu control word for PhysX
~PxFPUGuard(); // restore fpu control word
private:
PxU32 mControlWords[8];
};
// sets default SDK state for simd unit only, lighter weight than FPUGuard
class PxSIMDGuard
{
public:
PX_INLINE PxSIMDGuard(bool enable = true); // set simd control word for PhysX
PX_INLINE ~PxSIMDGuard(); // restore simd control word
private:
#if !(PX_LINUX || PX_OSX) || (!PX_EMSCRIPTEN && PX_INTEL_FAMILY)
PxU32 mControlWord;
bool mEnabled;
#endif
};
/**
\brief Enables floating point exceptions for the scalar and SIMD unit
*/
PX_FOUNDATION_API void PxEnableFPExceptions();
/**
\brief Disables floating point exceptions for the scalar and SIMD unit
*/
PX_FOUNDATION_API void PxDisableFPExceptions();
#if !PX_DOXYGEN
} // namespace physx
#endif
#if PX_WINDOWS_FAMILY
#include "foundation/windows/PxWindowsFPU.h"
#elif (PX_LINUX && PX_SSE2) || PX_OSX
#include "foundation/unix/PxUnixFPU.h"
#else
PX_INLINE physx::PxSIMDGuard::PxSIMDGuard(bool)
{
}
PX_INLINE physx::PxSIMDGuard::~PxSIMDGuard()
{
}
#endif
#endif

View File

@@ -0,0 +1,381 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FLAGS_H
#define PX_FLAGS_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxIO.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Container for bitfield flag variables associated with a specific enum type.
This allows for type safe manipulation for bitfields.
<h3>Example</h3>
// enum that defines each bit...
struct MyEnum
{
enum Enum
{
eMAN = 1,
eBEAR = 2,
ePIG = 4,
};
};
// implements some convenient global operators.
PX_FLAGS_OPERATORS(MyEnum::Enum, PxU8);
PxFlags<MyEnum::Enum, PxU8> myFlags;
myFlags |= MyEnum::eMAN;
myFlags |= MyEnum::eBEAR | MyEnum::ePIG;
if(myFlags & MyEnum::eBEAR)
{
doSomething();
}
*/
template <typename enumtype, typename storagetype = PxU32>
class PxFlags
{
public:
typedef storagetype InternalType;
PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(const PxEMPTY)
{
}
PX_CUDA_CALLABLE PX_INLINE PxFlags();
PX_CUDA_CALLABLE PX_INLINE PxFlags(enumtype e);
PX_CUDA_CALLABLE PX_INLINE PxFlags(const PxFlags<enumtype, storagetype>& f);
PX_CUDA_CALLABLE PX_INLINE explicit PxFlags(storagetype b);
PX_CUDA_CALLABLE PX_INLINE bool operator==(enumtype e) const;
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxFlags<enumtype, storagetype>& f) const;
PX_CUDA_CALLABLE PX_INLINE bool operator==(bool b) const;
PX_CUDA_CALLABLE PX_INLINE bool operator!=(enumtype e) const;
PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxFlags<enumtype, storagetype>& f) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(const PxFlags<enumtype, storagetype>& f);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator=(enumtype e);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(enumtype e);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator|=(const PxFlags<enumtype, storagetype>& f);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype e) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(const PxFlags<enumtype, storagetype>& f) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(enumtype e);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator&=(const PxFlags<enumtype, storagetype>& f);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype e) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(const PxFlags<enumtype, storagetype>& f) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(enumtype e);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& operator^=(const PxFlags<enumtype, storagetype>& f);
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(enumtype e) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator^(const PxFlags<enumtype, storagetype>& f) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~() const;
PX_CUDA_CALLABLE PX_INLINE operator bool() const;
PX_CUDA_CALLABLE PX_INLINE operator PxU8() const;
PX_CUDA_CALLABLE PX_INLINE operator PxU16() const;
PX_CUDA_CALLABLE PX_INLINE operator PxU32() const;
PX_CUDA_CALLABLE PX_INLINE void clear(enumtype e);
PX_CUDA_CALLABLE PX_INLINE void raise(enumtype e);
PX_CUDA_CALLABLE PX_INLINE bool isSet(enumtype e) const;
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& setAll(enumtype e);
public:
friend PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, PxFlags<enumtype, storagetype>& b)
{
PxFlags<enumtype, storagetype> out;
out.mBits = a & b.mBits;
return out;
}
private:
storagetype mBits;
};
#if !PX_DOXYGEN
#define PX_FLAGS_OPERATORS(enumtype, storagetype) \
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator|(enumtype a, enumtype b) \
{ \
PxFlags<enumtype, storagetype> r(a); \
r |= b; \
return r; \
} \
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator&(enumtype a, enumtype b) \
{ \
PxFlags<enumtype, storagetype> r(a); \
r &= b; \
return r; \
} \
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> operator~(enumtype a) \
{ \
return ~PxFlags<enumtype, storagetype>(a); \
}
#define PX_FLAGS_TYPEDEF(x, y) \
typedef PxFlags<x::Enum, y> x##s; \
PX_FLAGS_OPERATORS(x::Enum, y)
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags()
{
mBits = 0;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(enumtype e)
{
mBits = static_cast<storagetype>(e);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(const PxFlags<enumtype, storagetype>& f)
{
mBits = f.mBits;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::PxFlags(storagetype b)
{
mBits = b;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(enumtype e) const
{
return mBits == static_cast<storagetype>(e);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(const PxFlags<enumtype, storagetype>& f) const
{
return mBits == f.mBits;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator==(bool b) const
{
return bool(*this) == b;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(enumtype e) const
{
return mBits != static_cast<storagetype>(e);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::operator!=(const PxFlags<enumtype, storagetype>& f) const
{
return mBits != f.mBits;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(enumtype e)
{
mBits = static_cast<storagetype>(e);
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator=(const PxFlags<enumtype, storagetype>& f)
{
mBits = f.mBits;
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator|=(enumtype e)
{
mBits |= static_cast<storagetype>(e);
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::
operator|=(const PxFlags<enumtype, storagetype>& f)
{
mBits |= f.mBits;
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator|(enumtype e) const
{
PxFlags<enumtype, storagetype> out(*this);
out |= e;
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::
operator|(const PxFlags<enumtype, storagetype>& f) const
{
PxFlags<enumtype, storagetype> out(*this);
out |= f;
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator&=(enumtype e)
{
mBits &= static_cast<storagetype>(e);
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::
operator&=(const PxFlags<enumtype, storagetype>& f)
{
mBits &= f.mBits;
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator&(enumtype e) const
{
PxFlags<enumtype, storagetype> out = *this;
out.mBits &= static_cast<storagetype>(e);
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::
operator&(const PxFlags<enumtype, storagetype>& f) const
{
PxFlags<enumtype, storagetype> out = *this;
out.mBits &= f.mBits;
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::operator^=(enumtype e)
{
mBits ^= static_cast<storagetype>(e);
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::
operator^=(const PxFlags<enumtype, storagetype>& f)
{
mBits ^= f.mBits;
return *this;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator^(enumtype e) const
{
PxFlags<enumtype, storagetype> out = *this;
out.mBits ^= static_cast<storagetype>(e);
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::
operator^(const PxFlags<enumtype, storagetype>& f) const
{
PxFlags<enumtype, storagetype> out = *this;
out.mBits ^= f.mBits;
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype> PxFlags<enumtype, storagetype>::operator~() const
{
PxFlags<enumtype, storagetype> out;
out.mBits = storagetype(~mBits);
return out;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator bool() const
{
return mBits ? true : false;
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator PxU8() const
{
return static_cast<PxU8>(mBits);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator PxU16() const
{
return static_cast<PxU16>(mBits);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>::operator PxU32() const
{
return static_cast<PxU32>(mBits);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE void PxFlags<enumtype, storagetype>::clear(enumtype e)
{
mBits &= ~static_cast<storagetype>(e);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE void PxFlags<enumtype, storagetype>::raise(enumtype e)
{
mBits |= static_cast<storagetype>(e);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE bool PxFlags<enumtype, storagetype>::isSet(enumtype e) const
{
return (mBits & static_cast<storagetype>(e)) == static_cast<storagetype>(e);
}
template <typename enumtype, typename storagetype>
PX_CUDA_CALLABLE PX_INLINE PxFlags<enumtype, storagetype>& PxFlags<enumtype, storagetype>::setAll(enumtype e)
{
mBits = static_cast<storagetype>(e);
return *this;
}
} // namespace physx
#endif //!PX_DOXYGEN
#endif

View File

@@ -0,0 +1,233 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_H
#define PX_FOUNDATION_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxErrors.h"
#include "foundation/PxFoundationConfig.h"
#include <stdarg.h>
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxAllocationListener;
class PxErrorCallback;
class PxAllocatorCallback;
/**
\brief Foundation SDK singleton class.
You need to have an instance of this class to instance the higher level SDKs.
*/
class PX_FOUNDATION_API PxFoundation
{
public:
/**
\brief Destroys the instance it is called on.
The operation will fail, if there are still modules referencing the foundation object. Release all dependent modules
prior to calling this method.
\see PxCreateFoundation()
*/
virtual void release() = 0;
/**
retrieves error callback
*/
virtual PxErrorCallback& getErrorCallback() = 0;
/**
Sets mask of errors to report.
*/
virtual void setErrorLevel(PxErrorCode::Enum mask = PxErrorCode::eMASK_ALL) = 0;
/**
Retrieves mask of errors to be reported.
*/
virtual PxErrorCode::Enum getErrorLevel() const = 0;
/**
Retrieves the allocator this object was created with.
*/
virtual PxAllocatorCallback& getAllocatorCallback() = 0;
/**
Retrieves if allocation names are being passed to allocator callback.
*/
virtual bool getReportAllocationNames() const = 0;
/**
Set if allocation names are being passed to allocator callback.
\details Enabled by default in debug and checked build, disabled by default in profile and release build.
*/
virtual void setReportAllocationNames(bool value) = 0;
virtual void registerAllocationListener(PxAllocationListener& listener) = 0;
virtual void deregisterAllocationListener(PxAllocationListener& listener) = 0;
virtual void registerErrorCallback(PxErrorCallback& callback) = 0;
virtual void deregisterErrorCallback(PxErrorCallback& callback) = 0;
virtual bool error(PxErrorCode::Enum c, const char* file, int line, const char* messageFmt, ...) = 0;
virtual bool error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, va_list) = 0;
protected:
virtual ~PxFoundation()
{
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
// PT: use this to make generated code shorter (e.g. from 52 to 24 bytes of assembly (10 to 4 instructions))
// We must use a macro here to let __FILE__ expand to the proper filename (it doesn't work with an inlined function).
#define PX_IMPLEMENT_OUTPUT_ERROR \
template<const int errorCode> \
static PX_NOINLINE bool outputError(int line, const char* message) \
{ \
return PxGetFoundation().error(PxErrorCode::Enum(errorCode), __FILE__, line, message); \
}
/**
\brief Creates an instance of the foundation class
The foundation class is needed to initialize higher level SDKs. There may be only one instance per process.
Calling this method after an instance has been created already will result in an error message and NULL will be
returned.
\param version Version number we are expecting (should be #PX_PHYSICS_VERSION)
\param allocator User supplied interface for allocating memory(see #PxAllocatorCallback)
\param errorCallback User supplied interface for reporting errors and displaying messages(see #PxErrorCallback)
\return Foundation instance on success, NULL if operation failed
\see PxFoundation
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV PxCreateFoundation(physx::PxU32 version, physx::PxAllocatorCallback& allocator, physx::PxErrorCallback& errorCallback);
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetFoundationInstance(physx::PxFoundation& foundation);
/**
\brief Retrieves the Foundation SDK after it has been created.
\note The behavior of this method is undefined if the foundation instance has not been created already.
\see PxCreateFoundation(), PxIsFoundationValid()
*/
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreturn-type-c-linkage"
#endif // PX_LINUX
#endif // PX_CLANG
PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation& PX_CALL_CONV PxGetFoundation();
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic pop
#endif // PX_LINUX
#endif // PX_CLANG
/**
\brief Similar to PxGetFoundation() except it handles the case if the foundation was not created already.
\return Pointer to the foundation if an instance is currently available, otherwise null.
\see PxCreateFoundation(), PxGetFoundation()
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxFoundation* PX_CALL_CONV PxIsFoundationValid();
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxProfilerCallback;
class PxAllocatorCallback;
class PxErrorCallback;
#if !PX_DOXYGEN
} // namespace physx
#endif
/**
\brief Get the callback that will be used for all profiling.
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxProfilerCallback* PX_CALL_CONV PxGetProfilerCallback();
/**
\brief Set the callback that will be used for all profiling.
*/
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxSetProfilerCallback(physx::PxProfilerCallback* profiler);
/**
\brief Get the allocator callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxAllocatorCallback* PX_CALL_CONV PxGetAllocatorCallback();
/**
\brief Get the broadcasting allocator callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxAllocatorCallback* PX_CALL_CONV PxGetBroadcastAllocator(bool* reportAllocationNames = NULL);
/**
\brief Get the error callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxErrorCallback* PX_CALL_CONV PxGetErrorCallback();
/**
\brief Get the broadcasting error callback
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxErrorCallback* PX_CALL_CONV PxGetBroadcastError();
/**
\brief Get the warn once timestamp
*/
PX_C_EXPORT PX_FOUNDATION_API physx::PxU32 PX_CALL_CONV PxGetWarnOnceTimeStamp();
/**
\brief Decrement the ref count of PxFoundation
*/
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxDecFoundationRefCount();
/**
\brief Increment the ref count of PxFoundation
*/
PX_C_EXPORT PX_FOUNDATION_API void PX_CALL_CONV PxIncFoundationRefCount();
#endif

View File

@@ -0,0 +1,52 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_CONFIG_H
#define PX_FOUNDATION_CONFIG_H
#include "foundation/PxPreprocessor.h"
#if defined PX_PHYSX_STATIC_LIB
#define PX_FOUNDATION_API
#else
#if PX_WINDOWS_FAMILY && !PX_CUDA_COMPILER
#if defined PX_PHYSX_FOUNDATION_EXPORTS
#define PX_FOUNDATION_API __declspec(dllexport)
#else
#define PX_FOUNDATION_API __declspec(dllimport)
#endif
#elif PX_UNIX_FAMILY
#define PX_FOUNDATION_API PX_UNIX_EXPORT
#else
#define PX_FOUNDATION_API
#endif
#endif
#endif

View File

@@ -0,0 +1,163 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HASH_H
#define PX_HASH_H
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxString.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4302)
#endif
#if PX_LINUX
#include "foundation/PxSimpleTypes.h"
#endif
/*!
Central definition of hash functions
*/
#if !PX_DOXYGEN
namespace physx
{
#endif
// Hash functions
// Thomas Wang's 32 bit mix
// http://www.cris.com/~Ttwang/tech/inthash.htm
PX_FORCE_INLINE uint32_t PxComputeHash(const uint32_t key)
{
uint32_t k = key;
k += ~(k << 15);
k ^= (k >> 10);
k += (k << 3);
k ^= (k >> 6);
k += ~(k << 11);
k ^= (k >> 16);
return uint32_t(k);
}
PX_FORCE_INLINE uint32_t PxComputeHash(const int32_t key)
{
return PxComputeHash(uint32_t(key));
}
// Thomas Wang's 64 bit mix
// http://www.cris.com/~Ttwang/tech/inthash.htm
PX_FORCE_INLINE uint32_t PxComputeHash(const uint64_t key)
{
uint64_t k = key;
k += ~(k << 32);
k ^= (k >> 22);
k += ~(k << 13);
k ^= (k >> 8);
k += (k << 3);
k ^= (k >> 15);
k += ~(k << 27);
k ^= (k >> 31);
return uint32_t(UINT32_MAX & k);
}
#if PX_APPLE_FAMILY
// hash for size_t, to make gcc happy
PX_INLINE uint32_t PxComputeHash(const size_t key)
{
#if PX_P64_FAMILY
return PxComputeHash(uint64_t(key));
#else
return PxComputeHash(uint32_t(key));
#endif
}
#endif
// Hash function for pointers
PX_INLINE uint32_t PxComputeHash(const void* ptr)
{
#if PX_P64_FAMILY
return PxComputeHash(uint64_t(ptr));
#else
return PxComputeHash(uint32_t(UINT32_MAX & size_t(ptr)));
#endif
}
// Hash function for pairs
template <typename F, typename S>
PX_INLINE uint32_t PxComputeHash(const PxPair<F, S>& p)
{
uint32_t seed = 0x876543;
uint32_t m = 1000007;
return PxComputeHash(p.second) ^ (m * (PxComputeHash(p.first) ^ (m * seed)));
}
// hash object for hash map template parameter
template <class Key>
struct PxHash
{
uint32_t operator()(const Key& k) const
{
return PxComputeHash(k);
}
bool equal(const Key& k0, const Key& k1) const
{
return k0 == k1;
}
};
// specialization for strings
template <>
struct PxHash<const char*>
{
public:
uint32_t operator()(const char* _string) const
{
// "DJB" string hash
const uint8_t* string = reinterpret_cast<const uint8_t*>(_string);
uint32_t h = 5381;
for(const uint8_t* ptr = string; *ptr; ptr++)
h = ((h << 5) + h) ^ uint32_t(*ptr);
return h;
}
bool equal(const char* string0, const char* string1) const
{
return !Pxstrcmp(string0, string1);
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#if PX_VC
#pragma warning(pop)
#endif
#endif

View File

@@ -0,0 +1,791 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HASH_INTERNALS_H
#define PX_HASH_INTERNALS_H
#include "foundation/PxAllocator.h"
#include "foundation/PxBitUtils.h"
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxHash.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4127) // conditional expression is constant
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
template <class Entry, class Key, class HashFn, class GetKey, class PxAllocator, bool compacting>
class PxHashBase : private PxAllocator
{
void init(uint32_t initialTableSize, float loadFactor)
{
mBuffer = NULL;
mEntries = NULL;
mEntriesNext = NULL;
mHash = NULL;
mEntriesCapacity = 0;
mHashSize = 0;
mLoadFactor = loadFactor;
mFreeList = uint32_t(EOL);
mTimestamp = 0;
mEntriesCount = 0;
if(initialTableSize)
reserveInternal(initialTableSize);
}
public:
typedef Entry EntryType;
PxHashBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : PxAllocator("hashBase")
{
init(initialTableSize, loadFactor);
}
PxHashBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc) : PxAllocator(alloc)
{
init(initialTableSize, loadFactor);
}
PxHashBase(const PxAllocator& alloc) : PxAllocator(alloc)
{
init(64, 0.75f);
}
~PxHashBase()
{
destroy(); // No need to clear()
if(mBuffer)
PxAllocator::deallocate(mBuffer);
}
static const uint32_t EOL = 0xffffffff;
PX_INLINE Entry* create(const Key& k, bool& exists)
{
uint32_t h = 0;
if(mHashSize)
{
h = hash(k);
uint32_t index = mHash[h];
while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k))
index = mEntriesNext[index];
exists = index != EOL;
if(exists)
return mEntries + index;
}
else
exists = false;
if(freeListEmpty())
{
grow();
h = hash(k);
}
uint32_t entryIndex = freeListGetNext();
mEntriesNext[entryIndex] = mHash[h];
mHash[h] = entryIndex;
mEntriesCount++;
mTimestamp++;
return mEntries + entryIndex;
}
PX_INLINE const Entry* find(const Key& k) const
{
if(!mEntriesCount)
return NULL;
const uint32_t h = hash(k);
uint32_t index = mHash[h];
while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k))
index = mEntriesNext[index];
return index != EOL ? mEntries + index : NULL;
}
PX_INLINE bool erase(const Key& k, Entry& e)
{
if(!mEntriesCount)
return false;
const uint32_t h = hash(k);
uint32_t* ptr = mHash + h;
while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k))
ptr = mEntriesNext + *ptr;
if(*ptr == EOL)
return false;
PX_PLACEMENT_NEW(&e, Entry)(mEntries[*ptr]);
return eraseInternal(ptr);
}
PX_INLINE bool erase(const Key& k)
{
if(!mEntriesCount)
return false;
const uint32_t h = hash(k);
uint32_t* ptr = mHash + h;
while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k))
ptr = mEntriesNext + *ptr;
if(*ptr == EOL)
return false;
return eraseInternal(ptr);
}
PX_INLINE uint32_t size() const
{
return mEntriesCount;
}
PX_INLINE uint32_t capacity() const
{
return mHashSize;
}
void clear()
{
if(!mHashSize || mEntriesCount == 0)
return;
destroy();
intrinsics::memSet(mHash, EOL, mHashSize * sizeof(uint32_t));
const uint32_t sizeMinus1 = mEntriesCapacity - 1;
for(uint32_t i = 0; i < sizeMinus1; i++)
{
PxPrefetchLine(mEntriesNext + i, 128);
mEntriesNext[i] = i + 1;
}
mEntriesNext[mEntriesCapacity - 1] = uint32_t(EOL);
mFreeList = 0;
mEntriesCount = 0;
}
void reserve(uint32_t size)
{
if(size > mHashSize)
reserveInternal(size);
}
PX_INLINE const Entry* getEntries() const
{
return mEntries;
}
PX_INLINE Entry* insertUnique(const Key& k)
{
PX_ASSERT(find(k) == NULL);
uint32_t h = hash(k);
uint32_t entryIndex = freeListGetNext();
mEntriesNext[entryIndex] = mHash[h];
mHash[h] = entryIndex;
mEntriesCount++;
mTimestamp++;
return mEntries + entryIndex;
}
private:
void destroy()
{
for(uint32_t i = 0; i < mHashSize; i++)
{
for(uint32_t j = mHash[i]; j != EOL; j = mEntriesNext[j])
mEntries[j].~Entry();
}
}
template <typename HK, typename GK, class A, bool comp>
PX_NOINLINE void copy(const PxHashBase<Entry, Key, HK, GK, A, comp>& other);
// free list management - if we're coalescing, then we use mFreeList to hold
// the top of the free list and it should always be equal to size(). Otherwise,
// we build a free list in the next() pointers.
PX_INLINE void freeListAdd(uint32_t index)
{
if(compacting)
{
mFreeList--;
PX_ASSERT(mFreeList == mEntriesCount);
}
else
{
mEntriesNext[index] = mFreeList;
mFreeList = index;
}
}
PX_INLINE void freeListAdd(uint32_t start, uint32_t end)
{
if(!compacting)
{
for(uint32_t i = start; i < end - 1; i++) // add the new entries to the free list
mEntriesNext[i] = i + 1;
// link in old free list
mEntriesNext[end - 1] = mFreeList;
PX_ASSERT(mFreeList != end - 1);
mFreeList = start;
}
else if(mFreeList == EOL) // don't reset the free ptr for the compacting hash unless it's empty
mFreeList = start;
}
PX_INLINE uint32_t freeListGetNext()
{
PX_ASSERT(!freeListEmpty());
if(compacting)
{
PX_ASSERT(mFreeList == mEntriesCount);
return mFreeList++;
}
else
{
uint32_t entryIndex = mFreeList;
mFreeList = mEntriesNext[mFreeList];
return entryIndex;
}
}
PX_INLINE bool freeListEmpty() const
{
if(compacting)
return mEntriesCount == mEntriesCapacity;
else
return mFreeList == EOL;
}
PX_INLINE void replaceWithLast(uint32_t index)
{
PX_PLACEMENT_NEW(mEntries + index, Entry)(mEntries[mEntriesCount]);
mEntries[mEntriesCount].~Entry();
mEntriesNext[index] = mEntriesNext[mEntriesCount];
uint32_t h = hash(GetKey()(mEntries[index]));
uint32_t* ptr;
for(ptr = mHash + h; *ptr != mEntriesCount; ptr = mEntriesNext + *ptr)
PX_ASSERT(*ptr != EOL);
*ptr = index;
}
PX_INLINE uint32_t hash(const Key& k, uint32_t hashSize) const
{
return HashFn()(k) & (hashSize - 1);
}
PX_INLINE uint32_t hash(const Key& k) const
{
return hash(k, mHashSize);
}
PX_INLINE bool eraseInternal(uint32_t* ptr)
{
const uint32_t index = *ptr;
*ptr = mEntriesNext[index];
mEntries[index].~Entry();
mEntriesCount--;
mTimestamp++;
if (compacting && index != mEntriesCount)
replaceWithLast(index);
freeListAdd(index);
return true;
}
PX_NOINLINE void reserveInternal(uint32_t size)
{
if(!PxIsPowerOfTwo(size))
size = PxNextPowerOfTwo(size);
PX_ASSERT(!(size & (size - 1)));
// decide whether iteration can be done on the entries directly
bool resizeCompact = compacting || freeListEmpty();
// define new table sizes
uint32_t oldEntriesCapacity = mEntriesCapacity;
uint32_t newEntriesCapacity = uint32_t(float(size) * mLoadFactor);
uint32_t newHashSize = size;
// allocate new common buffer and setup pointers to new tables
uint8_t* newBuffer;
uint32_t* newHash;
uint32_t* newEntriesNext;
Entry* newEntries;
{
const uint64_t newEntriesNextBytesOffset = newHashSize * sizeof(uint32_t);
uint64_t newEntriesByteOffset = newEntriesNextBytesOffset + newEntriesCapacity * sizeof(uint32_t);
newEntriesByteOffset += (16 - (newEntriesByteOffset & 15)) & 15;
const uint64_t newBufferByteSize = newEntriesByteOffset + newEntriesCapacity * sizeof(Entry);
newBuffer = reinterpret_cast<uint8_t*>(PxAllocator::allocate(newBufferByteSize, PX_FL));
PX_ASSERT(newBuffer);
newHash = reinterpret_cast<uint32_t*>(newBuffer);
newEntriesNext = reinterpret_cast<uint32_t*>(newBuffer + newEntriesNextBytesOffset);
newEntries = reinterpret_cast<Entry*>(newBuffer + newEntriesByteOffset);
}
// initialize new hash table
intrinsics::memSet(newHash, int32_t(EOL), newHashSize * sizeof(uint32_t));
// iterate over old entries, re-hash and create new entries
if(resizeCompact)
{
// check that old free list is empty - we don't need to copy the next entries
PX_ASSERT(compacting || mFreeList == EOL);
for(uint32_t index = 0; index < mEntriesCount; ++index)
{
uint32_t h = hash(GetKey()(mEntries[index]), newHashSize);
newEntriesNext[index] = newHash[h];
newHash[h] = index;
PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]);
mEntries[index].~Entry();
}
}
else
{
// copy old free list, only required for non compact resizing
intrinsics::memCopy(newEntriesNext, mEntriesNext, mEntriesCapacity * sizeof(uint32_t));
for(uint32_t bucket = 0; bucket < mHashSize; bucket++)
{
uint32_t index = mHash[bucket];
while(index != EOL)
{
uint32_t h = hash(GetKey()(mEntries[index]), newHashSize);
newEntriesNext[index] = newHash[h];
PX_ASSERT(index != newHash[h]);
newHash[h] = index;
PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]);
mEntries[index].~Entry();
index = mEntriesNext[index];
}
}
}
// swap buffer and pointers
PxAllocator::deallocate(mBuffer);
mBuffer = newBuffer;
mHash = newHash;
mHashSize = newHashSize;
mEntriesNext = newEntriesNext;
mEntries = newEntries;
mEntriesCapacity = newEntriesCapacity;
freeListAdd(oldEntriesCapacity, newEntriesCapacity);
}
void grow()
{
PX_ASSERT((mFreeList == EOL) || (compacting && (mEntriesCount == mEntriesCapacity)));
uint32_t size = mHashSize == 0 ? 16 : mHashSize * 2;
reserve(size);
}
uint8_t* mBuffer;
Entry* mEntries;
uint32_t* mEntriesNext; // same size as mEntries
uint32_t* mHash;
uint32_t mEntriesCapacity;
uint32_t mHashSize;
float mLoadFactor;
uint32_t mFreeList;
uint32_t mTimestamp;
uint32_t mEntriesCount; // number of entries
public:
class Iter
{
public:
PX_INLINE Iter(PxHashBase& b) : mBucket(0), mEntry(uint32_t(b.EOL)), mTimestamp(b.mTimestamp), mBase(b)
{
if(mBase.mEntriesCapacity > 0)
{
mEntry = mBase.mHash[0];
skip();
}
}
PX_INLINE void check() const
{
PX_ASSERT(mTimestamp == mBase.mTimestamp);
}
PX_INLINE const Entry& operator*() const
{
check();
return mBase.mEntries[mEntry];
}
PX_INLINE Entry& operator*()
{
check();
return mBase.mEntries[mEntry];
}
PX_INLINE const Entry* operator->() const
{
check();
return mBase.mEntries + mEntry;
}
PX_INLINE Entry* operator->()
{
check();
return mBase.mEntries + mEntry;
}
PX_INLINE Iter operator++()
{
check();
advance();
return *this;
}
PX_INLINE Iter operator++(int)
{
check();
Iter i = *this;
advance();
return i;
}
PX_INLINE bool done() const
{
check();
return mEntry == mBase.EOL;
}
private:
PX_INLINE void advance()
{
mEntry = mBase.mEntriesNext[mEntry];
skip();
}
PX_INLINE void skip()
{
while(mEntry == mBase.EOL)
{
if(++mBucket == mBase.mHashSize)
break;
mEntry = mBase.mHash[mBucket];
}
}
Iter& operator=(const Iter&);
uint32_t mBucket;
uint32_t mEntry;
uint32_t mTimestamp;
PxHashBase& mBase;
};
/*!
Iterate over entries in a hash base and allow entry erase while iterating
*/
class PxEraseIterator
{
public:
PX_INLINE PxEraseIterator(PxHashBase& b): mBase(b)
{
reset();
}
PX_INLINE Entry* eraseCurrentGetNext(bool eraseCurrent)
{
if(eraseCurrent && mCurrentEntryIndexPtr)
{
mBase.eraseInternal(mCurrentEntryIndexPtr);
// if next was valid return the same ptr, if next was EOL search new hash entry
if(*mCurrentEntryIndexPtr != mBase.EOL)
return mBase.mEntries + *mCurrentEntryIndexPtr;
else
return traverseHashEntries();
}
// traverse mHash to find next entry
if(mCurrentEntryIndexPtr == NULL)
return traverseHashEntries();
const uint32_t index = *mCurrentEntryIndexPtr;
if(mBase.mEntriesNext[index] == mBase.EOL)
{
return traverseHashEntries();
}
else
{
mCurrentEntryIndexPtr = mBase.mEntriesNext + index;
return mBase.mEntries + *mCurrentEntryIndexPtr;
}
}
PX_INLINE void reset()
{
mCurrentHashIndex = 0;
mCurrentEntryIndexPtr = NULL;
}
private:
PX_INLINE Entry* traverseHashEntries()
{
mCurrentEntryIndexPtr = NULL;
while (mCurrentEntryIndexPtr == NULL && mCurrentHashIndex < mBase.mHashSize)
{
if (mBase.mHash[mCurrentHashIndex] != mBase.EOL)
{
mCurrentEntryIndexPtr = mBase.mHash + mCurrentHashIndex;
mCurrentHashIndex++;
return mBase.mEntries + *mCurrentEntryIndexPtr;
}
else
{
mCurrentHashIndex++;
}
}
return NULL;
}
PxEraseIterator& operator=(const PxEraseIterator&);
private:
uint32_t* mCurrentEntryIndexPtr;
uint32_t mCurrentHashIndex;
PxHashBase& mBase;
};
};
template <class Entry, class Key, class HashFn, class GetKey, class PxAllocator, bool compacting>
template <typename HK, typename GK, class A, bool comp>
PX_NOINLINE void
PxHashBase<Entry, Key, HashFn, GetKey, PxAllocator, compacting>::copy(const PxHashBase<Entry, Key, HK, GK, A, comp>& other)
{
reserve(other.mEntriesCount);
for(uint32_t i = 0; i < other.mEntriesCount; i++)
{
for(uint32_t j = other.mHash[i]; j != EOL; j = other.mEntriesNext[j])
{
const Entry& otherEntry = other.mEntries[j];
bool exists;
Entry* newEntry = create(GK()(otherEntry), exists);
PX_ASSERT(!exists);
PX_PLACEMENT_NEW(newEntry, Entry)(otherEntry);
}
}
}
template <class Key, class HashFn, class PxAllocator = typename PxAllocatorTraits<Key>::Type, bool Coalesced = false>
class PxHashSetBase
{
PX_NOCOPY(PxHashSetBase)
public:
struct GetKey
{
PX_INLINE const Key& operator()(const Key& e)
{
return e;
}
};
typedef PxHashBase<Key, Key, HashFn, GetKey, PxAllocator, Coalesced> BaseMap;
typedef typename BaseMap::Iter Iterator;
PxHashSetBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc)
: mBase(initialTableSize, loadFactor, alloc)
{
}
PxHashSetBase(const PxAllocator& alloc) : mBase(64, 0.75f, alloc)
{
}
PxHashSetBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor)
{
}
bool insert(const Key& k)
{
bool exists;
Key* e = mBase.create(k, exists);
if(!exists)
PX_PLACEMENT_NEW(e, Key)(k);
return !exists;
}
PX_INLINE bool contains(const Key& k) const
{
return mBase.find(k) != 0;
}
PX_INLINE bool erase(const Key& k)
{
return mBase.erase(k);
}
PX_INLINE uint32_t size() const
{
return mBase.size();
}
PX_INLINE uint32_t capacity() const
{
return mBase.capacity();
}
PX_INLINE void reserve(uint32_t size)
{
mBase.reserve(size);
}
PX_INLINE void clear()
{
mBase.clear();
}
protected:
BaseMap mBase;
};
template <class Key, class Value, class HashFn, class PxAllocator = typename PxAllocatorTraits<PxPair<const Key, Value> >::Type>
class PxHashMapBase
{
PX_NOCOPY(PxHashMapBase)
public:
typedef PxPair<const Key, Value> Entry;
struct GetKey
{
PX_INLINE const Key& operator()(const Entry& e)
{
return e.first;
}
};
typedef PxHashBase<Entry, Key, HashFn, GetKey, PxAllocator, true> BaseMap;
typedef typename BaseMap::Iter Iterator;
typedef typename BaseMap::PxEraseIterator EraseIterator;
PxHashMapBase(uint32_t initialTableSize, float loadFactor, const PxAllocator& alloc)
: mBase(initialTableSize, loadFactor, alloc)
{
}
PxHashMapBase(const PxAllocator& alloc) : mBase(64, 0.75f, alloc)
{
}
PxHashMapBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor)
{
}
bool insert(const Key /*&*/ k, const Value /*&*/ v)
{
bool exists;
Entry* e = mBase.create(k, exists);
if(!exists)
PX_PLACEMENT_NEW(e, Entry)(k, v);
return !exists;
}
Value& operator[](const Key& k)
{
bool exists;
Entry* e = mBase.create(k, exists);
if(!exists)
PX_PLACEMENT_NEW(e, Entry)(k, Value());
return e->second;
}
PX_INLINE const Entry* find(const Key& k) const
{
return mBase.find(k);
}
PX_INLINE bool erase(const Key& k)
{
return mBase.erase(k);
}
PX_INLINE bool erase(const Key& k, Entry& e)
{
return mBase.erase(k, e);
}
PX_INLINE uint32_t size() const
{
return mBase.size();
}
PX_INLINE uint32_t capacity() const
{
return mBase.capacity();
}
PX_INLINE Iterator getIterator()
{
return Iterator(mBase);
}
PX_INLINE EraseIterator getEraseIterator()
{
return EraseIterator(mBase);
}
PX_INLINE void reserve(uint32_t size)
{
mBase.reserve(size);
}
PX_INLINE void clear()
{
mBase.clear();
}
protected:
BaseMap mBase;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#if PX_VC
#pragma warning(pop)
#endif
#endif

View File

@@ -0,0 +1,119 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HASHMAP_H
#define PX_HASHMAP_H
#include "foundation/PxHashInternals.h"
// TODO: make this doxy-format
//
// This header defines two hash maps. Hash maps
// * support custom initial table sizes (rounded up internally to power-of-2)
// * support custom static allocator objects
// * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize
// when the 49th element is inserted)
// * are based on open hashing
// * have O(1) contains, erase
//
// Maps have STL-like copying semantics, and properly initialize and destruct copies of objects
//
// There are two forms of map: coalesced and uncoalesced. Coalesced maps keep the entries in the
// initial segment of an array, so are fast to iterate over; however deletion is approximately
// twice as expensive.
//
// HashMap<T>:
// bool insert(const Key& k, const Value& v) O(1) amortized (exponential resize policy)
// Value & operator[](const Key& k) O(1) for existing objects, else O(1) amortized
// const Entry * find(const Key& k); O(1)
// bool erase(const T& k); O(1)
// uint32_t size(); constant
// void reserve(uint32_t size); O(MAX(currentOccupancy,size))
// void clear(); O(currentOccupancy) (with zero constant for objects
// without
// destructors)
// Iterator getIterator();
//
// operator[] creates an entry if one does not exist, initializing with the default constructor.
// CoalescedHashMap<T> does not support getIterator, but instead supports
// const Key *getEntries();
//
// Use of iterators:
//
// for(HashMap::Iterator iter = test.getIterator(); !iter.done(); ++iter)
// myFunction(iter->first, iter->second);
#if !PX_DOXYGEN
namespace physx
{
#endif
template <class Key, class Value, class HashFn = PxHash<Key>, class Allocator = PxAllocator>
class PxHashMap : public physx::PxHashMapBase<Key, Value, HashFn, Allocator>
{
public:
typedef physx::PxHashMapBase<Key, Value, HashFn, Allocator> HashMapBase;
typedef typename HashMapBase::Iterator Iterator;
PxHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor)
{
}
PxHashMap(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashMapBase(initialTableSize, loadFactor, alloc)
{
}
PxHashMap(const Allocator& alloc) : HashMapBase(64, 0.75f, alloc)
{
}
Iterator getIterator()
{
return Iterator(HashMapBase::mBase);
}
};
template <class Key, class Value, class HashFn = PxHash<Key>, class Allocator = PxAllocator>
class PxCoalescedHashMap : public physx::PxHashMapBase<Key, Value, HashFn, Allocator>
{
public:
typedef physx::PxHashMapBase<Key, Value, HashFn, Allocator> HashMapBase;
PxCoalescedHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f)
: HashMapBase(initialTableSize, loadFactor)
{
}
const PxPair<const Key, Value>* getEntries() const
{
return HashMapBase::mBase.getEntries();
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,128 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HASHSET_H
#define PX_HASHSET_H
#include "foundation/PxHashInternals.h"
// TODO: make this doxy-format
// This header defines two hash sets. Hash sets
// * support custom initial table sizes (rounded up internally to power-of-2)
// * support custom static allocator objects
// * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize
// when the 49th element is inserted)
// * are based on open hashing
//
// Sets have STL-like copying semantics, and properly initialize and destruct copies of objects
//
// There are two forms of set: coalesced and uncoalesced. Coalesced sets keep the entries in the
// initial segment of an array, so are fast to iterate over; however deletion is approximately
// twice as expensive.
//
// HashSet<T>:
// bool insert(const T& k) amortized O(1) (exponential resize policy)
// bool contains(const T& k) const; O(1)
// bool erase(const T& k); O(1)
// uint32_t size() const; constant
// void reserve(uint32_t size); O(MAX(size, currentOccupancy))
// void clear(); O(currentOccupancy) (with zero constant for objects without
// destructors)
// Iterator getIterator();
//
// Use of iterators:
//
// for(HashSet::Iterator iter = test.getIterator(); !iter.done(); ++iter)
// myFunction(*iter);
//
// CoalescedHashSet<T> does not support getIterator, but instead supports
// const Key *getEntries();
//
// insertion into a set already containing the element fails returning false, as does
// erasure of an element not in the set
//
#if !PX_DOXYGEN
namespace physx
{
#endif
template <class Key, class HashFn = PxHash<Key>, class Allocator = PxAllocator>
class PxHashSet : public physx::PxHashSetBase<Key, HashFn, Allocator, false>
{
public:
typedef physx::PxHashSetBase<Key, HashFn, Allocator, false> HashSetBase;
typedef typename HashSetBase::Iterator Iterator;
PxHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor)
{
}
PxHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashSetBase(initialTableSize, loadFactor, alloc)
{
}
PxHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc)
{
}
Iterator getIterator()
{
return Iterator(HashSetBase::mBase);
}
};
template <class Key, class HashFn = PxHash<Key>, class Allocator = PxAllocator>
class PxCoalescedHashSet : public physx::PxHashSetBase<Key, HashFn, Allocator, true>
{
public:
typedef typename physx::PxHashSetBase<Key, HashFn, Allocator, true> HashSetBase;
PxCoalescedHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f)
: HashSetBase(initialTableSize, loadFactor)
{
}
PxCoalescedHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashSetBase(initialTableSize, loadFactor, alloc)
{
}
PxCoalescedHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc)
{
}
const Key* getEntries() const
{
return HashSetBase::mBase.getEntries();
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,140 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_IO_H
#define PX_IO_H
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/** enum for empty constructor tag*/
enum PxEMPTY
{
PxEmpty
};
/**
\brief Input stream class for I/O.
The user needs to supply a PxInputStream implementation to a number of methods to allow the SDK to read data.
*/
class PxInputStream
{
public:
/**
\brief read from the stream. The number of bytes read may be less than the number requested.
\param[in] dest the destination address to which the data will be read
\param[in] count the number of bytes requested
\return the number of bytes read from the stream.
*/
virtual uint32_t read(void* dest, uint32_t count) = 0;
virtual ~PxInputStream()
{
}
};
/**
\brief Input data class for I/O which provides random read access.
The user needs to supply a PxInputData implementation to a number of methods to allow the SDK to read data.
*/
class PxInputData : public PxInputStream
{
public:
/**
\brief return the length of the input data
\return size in bytes of the input data
*/
virtual uint32_t getLength() const = 0;
/**
\brief seek to the given offset from the start of the data.
\param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to
seek(length);
*/
virtual void seek(uint32_t offset) = 0;
/**
\brief return the current offset from the start of the data
\return the offset to seek to.
*/
virtual uint32_t tell() const = 0;
virtual ~PxInputData()
{
}
};
/**
\brief Output stream class for I/O.
The user needs to supply a PxOutputStream implementation to a number of methods to allow the SDK to write data.
*/
class PxOutputStream
{
public:
/**
\brief write to the stream. The number of bytes written may be less than the number sent.
\param[in] src the destination address from which the data will be written
\param[in] count the number of bytes to be written
\return the number of bytes written to the stream by this call.
*/
virtual uint32_t write(const void* src, uint32_t count) = 0;
virtual ~PxOutputStream()
{
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,94 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_INLINE_ALLOCATOR_H
#define PX_INLINE_ALLOCATOR_H
#include "foundation/PxUserAllocated.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// this is used by the array class to allocate some space for a small number
// of objects along with the metadata
template <PxU32 N, typename BaseAllocator>
class PxInlineAllocator : private BaseAllocator
{
public:
PxInlineAllocator(const PxEMPTY v) : BaseAllocator(v)
{
}
PxInlineAllocator(const BaseAllocator& alloc = BaseAllocator()) : BaseAllocator(alloc), mBufferUsed(false)
{
}
PxInlineAllocator(const PxInlineAllocator& aloc) : BaseAllocator(aloc), mBufferUsed(false)
{
}
void* allocate(PxU32 size, const char* filename, PxI32 line, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
if(!mBufferUsed && size <= N)
{
mBufferUsed = true;
return mBuffer;
}
return BaseAllocator::allocate(size, filename, line);
}
void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_UNUSED(cookie);
if(ptr == mBuffer)
mBufferUsed = false;
else
BaseAllocator::deallocate(ptr);
}
PX_FORCE_INLINE PxU8* getInlineBuffer()
{
return mBuffer;
}
PX_FORCE_INLINE bool isBufferUsed() const
{
return mBufferUsed;
}
protected:
PxU8 mBuffer[N];
bool mBufferUsed;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,45 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_INLINE_AOS_H
#define PX_INLINE_AOS_H
#include "foundation/PxPreprocessor.h"
#if PX_WINDOWS
#include "windows/PxWindowsTrigConstants.h"
#include "windows/PxWindowsInlineAoS.h"
#elif(PX_UNIX_FAMILY || PX_SWITCH)
#include "unix/PxUnixTrigConstants.h"
#include "unix/PxUnixInlineAoS.h"
#else
#error "Platform not supported!"
#endif
#endif

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_INLINE_ARRAY_H
#define PX_INLINE_ARRAY_H
#include "foundation/PxArray.h"
#include "foundation/PxInlineAllocator.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// array that pre-allocates for N elements
template <typename T, uint32_t N, typename Alloc = typename PxAllocatorTraits<T>::Type>
class PxInlineArray : public PxArray<T, PxInlineAllocator<N * sizeof(T), Alloc> >
{
typedef PxInlineAllocator<N * sizeof(T), Alloc> Allocator;
public:
PxInlineArray(const PxEMPTY v) : PxArray<T, Allocator>(v)
{
if(isInlined())
this->mData = reinterpret_cast<T*>(PxArray<T, Allocator>::getInlineBuffer());
}
PX_INLINE bool isInlined() const
{
return Allocator::isBufferUsed();
}
PX_INLINE explicit PxInlineArray(const Alloc& alloc = Alloc()) : PxArray<T, Allocator>(alloc)
{
this->mData = this->allocate(N);
this->mCapacity = N;
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,44 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_INTRINSICS_H
#define PX_INTRINSICS_H
#include "foundation/PxPreprocessor.h"
#if PX_WINDOWS_FAMILY
#include "windows/PxWindowsIntrinsics.h"
#elif(PX_LINUX || PX_APPLE_FAMILY)
#include "unix/PxUnixIntrinsics.h"
#elif PX_SWITCH
#include "switch/PxSwitchIntrinsics.h"
#else
#error "Platform not supported!"
#endif
#endif // #ifndef PX_INTRINSICS_H

View File

@@ -0,0 +1,508 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MAT33_H
#define PX_MAT33_H
#include "foundation/PxVec3.h"
#include "foundation/PxQuat.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
\brief 3x3 matrix class
Some clarifications, as there have been much confusion about matrix formats etc in the past.
Short:
- Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices).
- Matrix is physically stored in column major format
- Matrices are concaternated from left
Long:
Given three base vectors a, b and c the matrix is stored as
|a.x b.x c.x|
|a.y b.y c.y|
|a.z b.z c.z|
Vectors are treated as columns, so the vector v is
|x|
|y|
|z|
And matrices are applied _before_ the vector (pre-multiplication)
v' = M*v
|x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z|
|y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z|
|z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z|
Physical storage and indexing:
To be compatible with popular 3d rendering APIs (read D3d and OpenGL)
the physical indexing is
|0 3 6|
|1 4 7|
|2 5 8|
index = column*3 + row
which in C++ translates to M[column][row]
The mathematical indexing is M_row,column and this is what is used for _-notation
so _12 is 1st row, second column and operator(row, column)!
*/
template<class Type>
class PxMat33T
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T()
{
}
//! identity constructor
PX_CUDA_CALLABLE PX_INLINE PxMat33T(PxIDENTITY) :
column0(Type(1.0), Type(0.0), Type(0.0)),
column1(Type(0.0), Type(1.0), Type(0.0)),
column2(Type(0.0), Type(0.0), Type(1.0))
{
}
//! zero constructor
PX_CUDA_CALLABLE PX_INLINE PxMat33T(PxZERO) :
column0(Type(0.0)),
column1(Type(0.0)),
column2(Type(0.0))
{
}
//! Construct from three base vectors
PX_CUDA_CALLABLE PxMat33T(const PxVec3T<Type>& col0, const PxVec3T<Type>& col1, const PxVec3T<Type>& col2) :
column0(col0),
column1(col1),
column2(col2)
{
}
//! constructor from a scalar, which generates a multiple of the identity matrix
explicit PX_CUDA_CALLABLE PX_INLINE PxMat33T(Type r) :
column0(r, Type(0.0), Type(0.0)),
column1(Type(0.0), r, Type(0.0)),
column2(Type(0.0), Type(0.0), r)
{
}
//! Construct from Type[9]
explicit PX_CUDA_CALLABLE PX_INLINE PxMat33T(Type values[]) :
column0(values[0], values[1], values[2]),
column1(values[3], values[4], values[5]),
column2(values[6], values[7], values[8])
{
}
//! Construct from a quaternion
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T(const PxQuatT<Type>& q)
{
// PT: TODO: PX-566
const Type x = q.x;
const Type y = q.y;
const Type z = q.z;
const Type w = q.w;
const Type x2 = x + x;
const Type y2 = y + y;
const Type z2 = z + z;
const Type xx = x2 * x;
const Type yy = y2 * y;
const Type zz = z2 * z;
const Type xy = x2 * y;
const Type xz = x2 * z;
const Type xw = x2 * w;
const Type yz = y2 * z;
const Type yw = y2 * w;
const Type zw = z2 * w;
column0 = PxVec3T<Type>(Type(1.0) - yy - zz, xy + zw, xz - yw);
column1 = PxVec3T<Type>(xy - zw, Type(1.0) - xx - zz, yz + xw);
column2 = PxVec3T<Type>(xz + yw, yz - xw, Type(1.0) - xx - yy);
}
//! Copy constructor
PX_CUDA_CALLABLE PX_INLINE PxMat33T(const PxMat33T& other) :
column0(other.column0),
column1(other.column1),
column2(other.column2)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33T& operator=(const PxMat33T& other)
{
column0 = other.column0;
column1 = other.column1;
column2 = other.column2;
return *this;
}
//! Construct from diagonal, off-diagonals are zero.
PX_CUDA_CALLABLE PX_INLINE static const PxMat33T createDiagonal(const PxVec3T<Type>& d)
{
return PxMat33T(PxVec3T<Type>(d.x, Type(0.0), Type(0.0)),
PxVec3T<Type>(Type(0.0), d.y, Type(0.0)),
PxVec3T<Type>(Type(0.0), Type(0.0), d.z));
}
//! Computes the outer product of two vectors
PX_CUDA_CALLABLE PX_INLINE static const PxMat33T outer(const PxVec3T<Type>& a, const PxVec3T<Type>& b)
{
return PxMat33T(a * b.x, a * b.y, a * b.z);
}
/**
\brief returns true if the two matrices are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat33T& m) const
{
return column0 == m.column0 && column1 == m.column1 && column2 == m.column2;
}
//! Get transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33T getTranspose() const
{
const PxVec3T<Type> v0(column0.x, column1.x, column2.x);
const PxVec3T<Type> v1(column0.y, column1.y, column2.y);
const PxVec3T<Type> v2(column0.z, column1.z, column2.z);
return PxMat33T(v0, v1, v2);
}
//! Get the real inverse
PX_CUDA_CALLABLE PX_INLINE const PxMat33T getInverse() const
{
const Type det = getDeterminant();
PxMat33T inverse;
if(det != Type(0.0))
{
const Type invDet = Type(1.0) / det;
inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z);
inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z);
inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y);
inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x);
inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x);
inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x);
inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x);
inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x);
inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y);
return inverse;
}
else
{
return PxMat33T(PxIdentity);
}
}
//! Get determinant
PX_CUDA_CALLABLE PX_INLINE Type getDeterminant() const
{
return column0.dot(column1.cross(column2));
}
//! Unary minus
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator-() const
{
return PxMat33T(-column0, -column1, -column2);
}
//! Add
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator+(const PxMat33T& other) const
{
return PxMat33T(column0 + other.column0, column1 + other.column1, column2 + other.column2);
}
//! Subtract
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator-(const PxMat33T& other) const
{
return PxMat33T(column0 - other.column0, column1 - other.column1, column2 - other.column2);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_INLINE const PxMat33T operator*(Type scalar) const
{
return PxMat33T(column0 * scalar, column1 * scalar, column2 * scalar);
}
template<class Type2>
PX_CUDA_CALLABLE PX_INLINE friend PxMat33T<Type2> operator*(Type2, const PxMat33T<Type2>&);
//! Matrix vector multiplication (returns 'this->transform(vec)')
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> operator*(const PxVec3T<Type>& vec) const
{
return transform(vec);
}
// a <op>= b operators
//! Matrix multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat33T operator*(const PxMat33T& other) const
{
// Rows from this <dot> columns from other
// column0 = transform(other.column0) etc
return PxMat33T(transform(other.column0),
transform(other.column1),
transform(other.column2));
}
//! Equals-add
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator+=(const PxMat33T& other)
{
column0 += other.column0;
column1 += other.column1;
column2 += other.column2;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator-=(const PxMat33T& other)
{
column0 -= other.column0;
column1 -= other.column1;
column2 -= other.column2;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator*=(Type scalar)
{
column0 *= scalar;
column1 *= scalar;
column2 *= scalar;
return *this;
}
//! Equals matrix multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat33T& operator*=(const PxMat33T& other)
{
*this = *this * other;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
// Transform etc
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> transform(const PxVec3T<Type>& other) const
{
return column0 * other.x + column1 * other.y + column2 * other.z;
}
//! Transform vector by matrix transpose, v' = M^t*v
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> transformTranspose(const PxVec3T<Type>& other) const
{
return PxVec3T<Type>(column0.dot(other), column1.dot(other), column2.dot(other));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const Type* front() const
{
return &column0.x;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type>& operator[](PxU32 num)
{
return (&column0)[num];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type>& operator[](PxU32 num) const
{
return (&column0)[num];
}
// Data, see above for format!
PxVec3T<Type> column0, column1, column2; // the three base vectors
};
template<class Type>
PX_CUDA_CALLABLE PX_INLINE PxMat33T<Type> operator*(Type scalar, const PxMat33T<Type>& m)
{
return PxMat33T<Type>(scalar * m.column0, scalar * m.column1, scalar * m.column2);
}
// implementation from PxQuat.h
template<class Type>
PX_CUDA_CALLABLE PX_INLINE PxQuatT<Type>::PxQuatT(const PxMat33T<Type>& m)
{
if(m.column2.z < Type(0))
{
if(m.column0.x > m.column1.y)
{
const Type t = Type(1.0) + m.column0.x - m.column1.y - m.column2.z;
*this = PxQuatT<Type>(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) * (Type(0.5) / PxSqrt(t));
}
else
{
const Type t = Type(1.0) - m.column0.x + m.column1.y - m.column2.z;
*this = PxQuatT<Type>(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) * (Type(0.5) / PxSqrt(t));
}
}
else
{
if(m.column0.x < -m.column1.y)
{
const Type t = Type(1.0) - m.column0.x - m.column1.y + m.column2.z;
*this = PxQuatT<Type>(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) * (Type(0.5) / PxSqrt(t));
}
else
{
const Type t = Type(1.0) + m.column0.x + m.column1.y + m.column2.z;
*this = PxQuatT<Type>(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) * (Type(0.5) / PxSqrt(t));
}
}
}
typedef PxMat33T<float> PxMat33;
typedef PxMat33T<double> PxMat33d;
/**
\brief Sets a rotation matrix around the X axis.
\param m [out] output rotation matrix
\param angle [in] desired angle
*/
PX_INLINE void PxSetRotX(PxMat33& m, PxReal angle)
{
m = PxMat33(PxIdentity);
PxReal sin, cos;
PxSinCos(angle, sin, cos);
m[1][1] = m[2][2] = cos;
m[1][2] = sin;
m[2][1] = -sin;
}
/**
\brief Sets a rotation matrix around the Y axis.
\param m [out] output rotation matrix
\param angle [in] desired angle
*/
PX_INLINE void PxSetRotY(PxMat33& m, PxReal angle)
{
m = PxMat33(PxIdentity);
PxReal sin, cos;
PxSinCos(angle, sin, cos);
m[0][0] = m[2][2] = cos;
m[0][2] = -sin;
m[2][0] = sin;
}
/**
\brief Sets a rotation matrix around the Z axis.
\param m [out] output rotation matrix
\param angle [in] desired angle
*/
PX_INLINE void PxSetRotZ(PxMat33& m, PxReal angle)
{
m = PxMat33(PxIdentity);
PxReal sin, cos;
PxSinCos(angle, sin, cos);
m[0][0] = m[1][1] = cos;
m[0][1] = sin;
m[1][0] = -sin;
}
/**
\brief Returns a rotation quaternion around the X axis.
\param angle [in] desired angle
\return Quaternion that rotates around the desired axis
*/
PX_INLINE PxQuat PxGetRotXQuat(float angle)
{
PxMat33 m;
PxSetRotX(m, angle);
return PxQuat(m);
}
/**
\brief Returns a rotation quaternion around the Y axis.
\param angle [in] desired angle
\return Quaternion that rotates around the desired axis
*/
PX_INLINE PxQuat PxGetRotYQuat(float angle)
{
PxMat33 m;
PxSetRotY(m, angle);
return PxQuat(m);
}
/**
\brief Returns a rotation quaternion around the Z axis.
\param angle [in] desired angle
\return Quaternion that rotates around the desired axis
*/
PX_INLINE PxQuat PxGetRotZQuat(float angle)
{
PxMat33 m;
PxSetRotZ(m, angle);
return PxQuat(m);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,273 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MAT34_H
#define PX_MAT34_H
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
Basic mathematical 3x4 matrix, implemented as a 3x3 rotation matrix and a translation
See PxMat33 for the format of the rotation matrix.
*/
template<class Type>
class PxMat34T
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T()
{
}
//! Construct from four base vectors
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxVec3T<Type>& b0, const PxVec3T<Type>& b1, const PxVec3T<Type>& b2, const PxVec3T<Type>& b3)
: m(b0, b1, b2), p(b3)
{
}
//! Construct from Type[12]
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(Type values[]) :
m(values), p(values[9], values[10], values[11])
{
}
//! Construct from a 3x3 matrix
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat33T<Type>& other)
: m(other), p(PxZero)
{
}
//! Construct from a 3x3 matrix and a translation vector
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat33T<Type>& other, const PxVec3T<Type>& t)
: m(other), p(t)
{
}
//! Construct from a PxTransformT<Type>
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxTransformT<Type>& other)
: m(other.q), p(other.p)
{
}
//! Copy constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T(const PxMat34T& other) : m(other.m), p(other.p)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxMat34T& operator=(const PxMat34T& other)
{
m = other.m;
p = other.p;
return *this;
}
//! Set to identity matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE void setIdentity()
{
m = PxMat33T<Type>(PxIdentity);
p = PxVec3T<Type>(0);
}
// Simpler operators
//! Equality operator
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxMat34T& other) const
{
return m == other.m && p == other.p;
}
//! Inequality operator
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxMat34T& other) const
{
return !operator==(other);
}
//! Unary minus
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator-() const
{
return PxMat34T(-m, -p);
}
//! Add
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator+(const PxMat34T& other) const
{
return PxMat34T(m + other.m, p + other.p);
}
//! Subtract
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator-(const PxMat34T& other) const
{
return PxMat34T(m - other.m, p - other.p);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(Type scalar) const
{
return PxMat34T(m*scalar, p*scalar);
}
//! Matrix multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(const PxMat34T& other) const
{
//Rows from this <dot> columns from other
//base0 = rotate(other.m.column0) etc
return PxMat34T(m*other.m, m*other.p + p);
}
//! Matrix multiplication, extend the second matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T operator*(const PxMat33T<Type>& other) const
{
//Rows from this <dot> columns from other
//base0 = transform(other.m.column0) etc
return PxMat34T(m*other, p);
}
template<class Type2>
friend PxMat34T<Type2> operator*(const PxMat33T<Type2>& a, const PxMat34T<Type2>& b);
// a <op>= b operators
//! Equals-add
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator+=(const PxMat34T& other)
{
m += other.m;
p += other.p;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator-=(const PxMat34T& other)
{
m -= other.m;
p -= other.p;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T& operator*=(Type scalar)
{
m *= scalar;
p *= scalar;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
// Transform etc
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotate(const PxVec3T<Type>& other) const
{
return m*other;
}
//! Transform vector by transpose of matrix, equal to v' = M^t*v
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotateTranspose(const PxVec3T<Type>& other) const
{
return m.transformTranspose(other);
}
//! Transform point by matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transform(const PxVec3T<Type>& other) const
{
return m*other + p;
}
//! Transform point by transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transformTranspose(const PxVec3T<Type>& other) const
{
return m.transformTranspose(other - p);
}
//! Transform point by transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T transformTranspose(const PxMat34T& other) const
{
return PxMat34T(m.transformTranspose(other.m.column0),
m.transformTranspose(other.m.column1),
m.transformTranspose(other.m.column2),
m.transformTranspose(other.p - p));
}
//! Invert matrix treating it as a rotation+translation matrix only
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat34T getInverseRT() const
{
return PxMat34T(m.getTranspose(), m.transformTranspose(-p));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type>& operator[](PxU32 num) { return (&m.column0)[num]; }
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type>& operator[](PxU32 num) const { return (&m.column0)[num]; }
//Data, see above for format!
PxMat33T<Type> m;
PxVec3T<Type> p;
};
//! Multiply a*b, a is extended
template<class Type>
PX_INLINE PxMat34T<Type> operator*(const PxMat33T<Type>& a, const PxMat34T<Type>& b)
{
return PxMat34T<Type>(a * b.m, a * b.p);
}
typedef PxMat34T<float> PxMat34;
typedef PxMat34T<double> PxMat34d;
//! A padded version of PxMat34, to safely load its data using SIMD
class PxMat34Padded : public PxMat34
{
public:
PX_FORCE_INLINE PxMat34Padded(const PxMat34& src) : PxMat34(src) {}
PX_FORCE_INLINE PxMat34Padded() {}
PX_FORCE_INLINE ~PxMat34Padded() {}
PxU32 padding;
};
PX_COMPILE_TIME_ASSERT(0==(sizeof(PxMat34Padded)==16));
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,387 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MAT44_H
#define PX_MAT44_H
#include "foundation/PxQuat.h"
#include "foundation/PxVec4.h"
#include "foundation/PxMat33.h"
#include "foundation/PxTransform.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
\brief 4x4 matrix class
This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the PxMat33
\see PxMat33 PxTransform
*/
template<class Type>
class PxMat44T
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T()
{
}
//! identity constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T(PxIDENTITY) :
column0(Type(1.0), Type(0.0), Type(0.0), Type(0.0)),
column1(Type(0.0), Type(1.0), Type(0.0), Type(0.0)),
column2(Type(0.0), Type(0.0), Type(1.0), Type(0.0)),
column3(Type(0.0), Type(0.0), Type(0.0), Type(1.0))
{
}
//! zero constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T(PxZERO) : column0(PxZero), column1(PxZero), column2(PxZero), column3(PxZero)
{
}
//! Construct from four 4-vectors
PX_CUDA_CALLABLE PxMat44T(const PxVec4T<Type>& col0, const PxVec4T<Type>& col1, const PxVec4T<Type>& col2, const PxVec4T<Type>& col3) :
column0(col0),
column1(col1),
column2(col2),
column3(col3)
{
}
//! constructor that generates a multiple of the identity matrix
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(Type r) :
column0(r, Type(0.0), Type(0.0), Type(0.0)),
column1(Type(0.0), r, Type(0.0), Type(0.0)),
column2(Type(0.0), Type(0.0), r, Type(0.0)),
column3(Type(0.0), Type(0.0), Type(0.0), r)
{
}
//! Construct from three base vectors and a translation
PX_CUDA_CALLABLE PxMat44T(const PxVec3T<Type>& col0, const PxVec3T<Type>& col1, const PxVec3T<Type>& col2, const PxVec3T<Type>& col3) :
column0(col0, Type(0.0)),
column1(col1, Type(0.0)),
column2(col2, Type(0.0)),
column3(col3, Type(1.0))
{
}
//! Construct from Type[16]
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(Type values[]) :
column0(values[0], values[1], values[2], values[3]),
column1(values[4], values[5], values[6], values[7]),
column2(values[8], values[9], values[10], values[11]),
column3(values[12], values[13], values[14], values[15])
{
}
//! Construct from a quaternion
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxQuatT<Type>& q)
{
// PT: TODO: PX-566
const Type x = q.x;
const Type y = q.y;
const Type z = q.z;
const Type w = q.w;
const Type x2 = x + x;
const Type y2 = y + y;
const Type z2 = z + z;
const Type xx = x2 * x;
const Type yy = y2 * y;
const Type zz = z2 * z;
const Type xy = x2 * y;
const Type xz = x2 * z;
const Type xw = x2 * w;
const Type yz = y2 * z;
const Type yw = y2 * w;
const Type zw = z2 * w;
column0 = PxVec4T<Type>(Type(1.0) - yy - zz, xy + zw, xz - yw, Type(0.0));
column1 = PxVec4T<Type>(xy - zw, Type(1.0) - xx - zz, yz + xw, Type(0.0));
column2 = PxVec4T<Type>(xz + yw, yz - xw, Type(1.0) - xx - yy, Type(0.0));
column3 = PxVec4T<Type>(Type(0.0), Type(0.0), Type(0.0), Type(1.0));
}
//! Construct from a diagonal vector
explicit PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxVec4T<Type>& diagonal) :
column0(diagonal.x, Type(0.0), Type(0.0), Type(0.0)),
column1(Type(0.0), diagonal.y, Type(0.0), Type(0.0)),
column2(Type(0.0), Type(0.0), diagonal.z, Type(0.0)),
column3(Type(0.0), Type(0.0), Type(0.0), diagonal.w)
{
}
//! Construct from Mat33 and a translation
PX_CUDA_CALLABLE PxMat44T(const PxMat33T<Type>& axes, const PxVec3T<Type>& position) :
column0(axes.column0, Type(0.0)),
column1(axes.column1, Type(0.0)),
column2(axes.column2, Type(0.0)),
column3(position, Type(1.0))
{
}
PX_CUDA_CALLABLE PxMat44T(const PxTransform& t)
{
*this = PxMat44T(PxMat33T<Type>(t.q), t.p);
}
/**
\brief returns true if the two matrices are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxMat44T& m) const
{
return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3;
}
//! Copy constructor
PX_CUDA_CALLABLE PX_INLINE PxMat44T(const PxMat44T& other) :
column0(other.column0),
column1(other.column1),
column2(other.column2),
column3(other.column3)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator=(const PxMat44T& other)
{
column0 = other.column0;
column1 = other.column1;
column2 = other.column2;
column3 = other.column3;
return *this;
}
//! Get transposed matrix
PX_CUDA_CALLABLE PX_INLINE const PxMat44T getTranspose() const
{
return PxMat44T(
PxVec4T<Type>(column0.x, column1.x, column2.x, column3.x), PxVec4T<Type>(column0.y, column1.y, column2.y, column3.y),
PxVec4T<Type>(column0.z, column1.z, column2.z, column3.z), PxVec4T<Type>(column0.w, column1.w, column2.w, column3.w));
}
//! Unary minus
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator-() const
{
return PxMat44T(-column0, -column1, -column2, -column3);
}
//! Add
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator+(const PxMat44T& other) const
{
return PxMat44T(column0 + other.column0, column1 + other.column1, column2 + other.column2, column3 + other.column3);
}
//! Subtract
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator-(const PxMat44T& other) const
{
return PxMat44T(column0 - other.column0, column1 - other.column1, column2 - other.column2, column3 - other.column3);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator*(Type scalar) const
{
return PxMat44T(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar);
}
template<class Type2>
friend PxMat44T<Type2> operator*(Type2, const PxMat44T<Type2>&);
//! Matrix multiplication
PX_CUDA_CALLABLE PX_INLINE const PxMat44T operator*(const PxMat44T& other) const
{
// Rows from this <dot> columns from other
// column0 = transform(other.column0) etc
return PxMat44T(transform(other.column0), transform(other.column1), transform(other.column2), transform(other.column3));
}
// a <op>= b operators
//! Equals-add
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator+=(const PxMat44T& other)
{
column0 += other.column0;
column1 += other.column1;
column2 += other.column2;
column3 += other.column3;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator-=(const PxMat44T& other)
{
column0 -= other.column0;
column1 -= other.column1;
column2 -= other.column2;
column3 -= other.column3;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator*=(Type scalar)
{
column0 *= scalar;
column1 *= scalar;
column2 *= scalar;
column3 *= scalar;
return *this;
}
//! Equals matrix multiplication
PX_CUDA_CALLABLE PX_INLINE PxMat44T& operator*=(const PxMat44T& other)
{
*this = *this * other;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec4T<Type> transform(const PxVec4T<Type>& other) const
{
return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w;
}
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> transform(const PxVec3T<Type>& other) const
{
return transform(PxVec4T<Type>(other, Type(1.0))).getXYZ();
}
//! Rotate vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec4T<Type> rotate(const PxVec4T<Type>& other) const
{
return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0;
}
//! Rotate vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> rotate(const PxVec3T<Type>& other) const
{
return rotate(PxVec4T<Type>(other, Type(1.0))).getXYZ();
}
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> getBasis(PxU32 num) const
{
PX_ASSERT(num < 3);
return (&column0)[num].getXYZ();
}
PX_CUDA_CALLABLE PX_INLINE const PxVec3T<Type> getPosition() const
{
return column3.getXYZ();
}
PX_CUDA_CALLABLE PX_INLINE void setPosition(const PxVec3T<Type>& position)
{
column3.x = position.x;
column3.y = position.y;
column3.z = position.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const Type* front() const
{
return &column0.x;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4T<Type>& operator[](PxU32 num)
{
return (&column0)[num];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec4T<Type>& operator[](PxU32 num) const
{
return (&column0)[num];
}
PX_CUDA_CALLABLE PX_INLINE void scale(const PxVec4T<Type>& p)
{
column0 *= p.x;
column1 *= p.y;
column2 *= p.z;
column3 *= p.w;
}
PX_CUDA_CALLABLE PX_INLINE const PxMat44T inverseRT() const
{
const PxVec3T<Type> r0(column0.x, column1.x, column2.x);
const PxVec3T<Type> r1(column0.y, column1.y, column2.y);
const PxVec3T<Type> r2(column0.z, column1.z, column2.z);
return PxMat44T(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z));
}
PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
{
return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite();
}
// Data, see above for format!
PxVec4T<Type> column0, column1, column2, column3; // the four base vectors
};
// implementation from PxTransform.h
template<class Type>
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT<Type>::PxTransformT(const PxMat44T<Type>& m)
{
const PxVec3T<Type> column0(m.column0.x, m.column0.y, m.column0.z);
const PxVec3T<Type> column1(m.column1.x, m.column1.y, m.column1.z);
const PxVec3T<Type> column2(m.column2.x, m.column2.y, m.column2.z);
q = PxQuatT<Type>(PxMat33T<Type>(column0, column1, column2));
p = PxVec3T<Type>(m.column3.x, m.column3.y, m.column3.z);
}
typedef PxMat44T<float> PxMat44;
typedef PxMat44T<double> PxMat44d;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,380 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MATH_H
#define PX_MATH_H
#include "foundation/PxPreprocessor.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
#endif
#include <math.h>
#if PX_VC
#pragma warning(pop)
#endif
#if (PX_LINUX_FAMILY && !PX_ARM_FAMILY)
// Force linking against nothing newer than glibc v2.17 to remain compatible with platforms with older glibc versions
__asm__(".symver expf,expf@GLIBC_2.2.5");
__asm__(".symver powf,powf@GLIBC_2.2.5");
#endif
#include <float.h>
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxAssert.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// constants
static constexpr float PxPi = float(3.141592653589793);
static constexpr float PxHalfPi = float(1.57079632679489661923);
static constexpr float PxTwoPi = float(6.28318530717958647692);
static constexpr float PxInvPi = float(0.31830988618379067154);
static constexpr float PxInvTwoPi = float(0.15915494309189533577);
static constexpr float PxPiDivTwo = float(1.57079632679489661923);
static constexpr float PxPiDivFour = float(0.78539816339744830962);
static constexpr float PxSqrt2 = float(1.4142135623730951);
static constexpr float PxInvSqrt2 = float(0.7071067811865476);
/**
\brief The return value is the greater of the two specified values.
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMax(T a, T b)
{
return a < b ? b : a;
}
//! overload for float to use fsel on xbox
template <>
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMax(float a, float b)
{
return intrinsics::selectMax(a, b);
}
/**
\brief The return value is the lesser of the two specified values.
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE T PxMin(T a, T b)
{
return a < b ? a : b;
}
template <>
//! overload for float to use fsel on xbox
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxMin(float a, float b)
{
return intrinsics::selectMin(a, b);
}
/*
Many of these are just implemented as PX_CUDA_CALLABLE PX_FORCE_INLINE calls to the C lib right now,
but later we could replace some of them with some approximations or more
clever stuff.
*/
/**
\brief abs returns the absolute value of its argument.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAbs(float a)
{
return intrinsics::abs(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxEquals(float a, float b, float eps)
{
return (PxAbs(a - b) < eps);
}
/**
\brief abs returns the absolute value of its argument.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAbs(double a)
{
return ::fabs(a);
}
/**
\brief abs returns the absolute value of its argument.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE int32_t PxAbs(int32_t a)
{
return ::abs(a);
}
/**
\brief Clamps v to the range [hi,lo]
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE T PxClamp(T v, T lo, T hi)
{
PX_ASSERT(lo <= hi);
return PxMin(hi, PxMax(lo, v));
}
//! \brief Square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSqrt(float a)
{
return intrinsics::sqrt(a);
}
//! \brief Square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSqrt(double a)
{
return ::sqrt(a);
}
//! \brief reciprocal square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxRecipSqrt(float a)
{
return intrinsics::recipSqrt(a);
}
//! \brief reciprocal square root.
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxRecipSqrt(double a)
{
return 1 / ::sqrt(a);
}
//! \brief square of the argument
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxSqr(const PxF32 a)
{
return a * a;
}
//! trigonometry -- all angles are in radians.
//! \brief Sine of an angle ( <b>Unit:</b> Radians )
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSin(float a)
{
return intrinsics::sin(a);
}
//! \brief Sine of an angle ( <b>Unit:</b> Radians )
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxSin(double a)
{
return ::sin(a);
}
//! \brief Cosine of an angle (<b>Unit:</b> Radians)
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCos(float a)
{
return intrinsics::cos(a);
}
//! \brief Cosine of an angle (<b>Unit:</b> Radians)
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxCos(double a)
{
return ::cos(a);
}
//! \brief compute sine and cosine at the same time
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSinCos(const PxF32 a, PxF32& sin, PxF32& cos)
{
#if PX_CUDA_COMPILER && __CUDA_ARCH__ >= 350
__sincosf(a, &sin, &cos);
#else
sin = PxSin(a);
cos = PxCos(a);
#endif
}
//! \brief compute sine and cosine at the same time
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSinCos(const double a, double& sin, double& cos)
{
sin = PxSin(a);
cos = PxCos(a);
}
/**
\brief Tangent of an angle.
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxTan(float a)
{
return ::tanf(a);
}
/**
\brief Tangent of an angle.
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxTan(double a)
{
return ::tan(a);
}
/**
\brief Arcsine.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAsin(float f)
{
return ::asinf(PxClamp(f, -1.0f, 1.0f));
}
/**
\brief Arcsine.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAsin(double f)
{
return ::asin(PxClamp(f, -1.0, 1.0));
}
/**
\brief Arccosine.
Returns angle between 0 and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAcos(float f)
{
return ::acosf(PxClamp(f, -1.0f, 1.0f));
}
/**
\brief Arccosine.
Returns angle between 0 and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAcos(double f)
{
return ::acos(PxClamp(f, -1.0, 1.0));
}
/**
\brief ArcTangent.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan(float a)
{
return ::atanf(a);
}
/**
\brief ArcTangent.
Returns angle between -PI/2 and PI/2 in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan(double a)
{
return ::atan(a);
}
/**
\brief Arctangent of (x/y) with correct sign.
Returns angle between -PI and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxAtan2(float x, float y)
{
return ::atan2f(x, y);
}
/**
\brief Arctangent of (x/y) with correct sign.
Returns angle between -PI and PI in radians
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE double PxAtan2(double x, double y)
{
return ::atan2(x, y);
}
/**
\brief Converts degrees to radians.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxDegToRad(const PxF32 a)
{
return 0.01745329251994329547f * a;
}
//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(float f)
{
return intrinsics::isFinite(f);
}
//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
PX_CUDA_CALLABLE PX_FORCE_INLINE bool PxIsFinite(double f)
{
return intrinsics::isFinite(f);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxFloor(float a)
{
return ::floorf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxExp(float a)
{
return ::expf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxCeil(float a)
{
return ::ceilf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign(float a)
{
return physx::intrinsics::sign(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxSign2(float a, float eps = FLT_EPSILON)
{
return (a < -eps) ? -1.0f : (a > eps) ? 1.0f : 0.0f;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxPow(float x, float y)
{
return ::powf(x, y);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float PxLog(float x)
{
return ::logf(x);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,54 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MATH_INTRINSICS_H
#define PX_MATH_INTRINSICS_H
#include <string.h>
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#if PX_WINDOWS_FAMILY
#include "foundation/windows/PxWindowsMathIntrinsics.h"
#elif(PX_LINUX || PX_APPLE_FAMILY)
#include "foundation/unix/PxUnixMathIntrinsics.h"
#elif PX_SWITCH
#include "foundation/switch/PxSwitchMathIntrinsics.h"
#else
#error "Platform not supported!"
#endif
/**
Platform specific defines
*/
#if PX_WINDOWS_FAMILY
#pragma intrinsic(abs)
#pragma intrinsic(labs)
#endif
#endif

View File

@@ -0,0 +1,785 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MATH_UTILS_H
#define PX_MATH_UTILS_H
#include "foundation/PxFoundationConfig.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxVec4.h"
#include "foundation/PxAssert.h"
#include "foundation/PxPlane.h"
#include "foundation/PxMat33.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief finds the shortest rotation between two vectors.
\param[in] from the vector to start from
\param[in] target the vector to rotate to
\return a rotation about an axis normal to the two vectors which takes one to the other via the shortest path
*/
PX_FOUNDATION_API PxQuat PxShortestRotation(const PxVec3& from, const PxVec3& target);
/* \brief diagonalizes a 3x3 symmetric matrix y
The returned matrix satisfies M = R * D * R', where R is the rotation matrix for the output quaternion, R' its
transpose, and D the diagonal matrix
If the matrix is not symmetric, the result is undefined.
\param[in] m the matrix to diagonalize
\param[out] axes a quaternion rotation which diagonalizes the matrix
\return the vector diagonal of the diagonalized matrix.
*/
PX_FOUNDATION_API PxVec3 PxDiagonalize(const PxMat33& m, PxQuat& axes);
/** \brief creates a transform from the endpoints of a segment, suitable for an actor transform for a PxCapsuleGeometry
\param[in] p0 one end of major axis of the capsule
\param[in] p1 the other end of the axis of the capsule
\param[out] halfHeight the halfHeight of the capsule. This parameter is optional.
\return A PxTransform which will transform the vector (1,0,0) to the capsule axis shrunk by the halfHeight
*/
PX_FOUNDATION_API PxTransform PxTransformFromSegment(const PxVec3& p0, const PxVec3& p1, PxReal* halfHeight = NULL);
/** \brief creates a transform from a plane equation, suitable for an actor transform for a PxPlaneGeometry
\param[in] plane the desired plane equation
\return a PxTransform which will transform the plane PxPlane(1,0,0,0) to the specified plane
*/
PX_FOUNDATION_API PxTransform PxTransformFromPlaneEquation(const PxPlane& plane);
/** \brief creates a plane equation from a transform, such as the actor transform for a PxPlaneGeometry
\param[in] pose the transform
\return the plane
*/
PX_INLINE PxPlane PxPlaneEquationFromTransform(const PxTransform& pose)
{
return PxPlane(1.0f, 0.0f, 0.0f, 0.0f).transform(pose);
}
/**
\brief Spherical linear interpolation of two quaternions.
\param[in] t is the interpolation parameter in range (0, 1)
\param[in] left is the start of the interpolation
\param[in] right is the end of the interpolation
\return Returns left when t=0, right when t=1 and a linear interpolation of left and right when 0 < t < 1.
Returns angle between -PI and PI in radians
*/
PX_CUDA_CALLABLE PX_INLINE PxQuat PxSlerp(const PxReal t, const PxQuat& left, const PxQuat& right)
{
const PxReal quatEpsilon = (PxReal(1.0e-8f));
PxReal cosine = left.dot(right);
PxReal sign = PxReal(1);
if (cosine < 0)
{
cosine = -cosine;
sign = PxReal(-1);
}
PxReal sine = PxReal(1) - cosine * cosine;
if (sine >= quatEpsilon * quatEpsilon)
{
sine = PxSqrt(sine);
const PxReal angle = PxAtan2(sine, cosine);
const PxReal i_sin_angle = PxReal(1) / sine;
const PxReal leftw = PxSin(angle * (PxReal(1) - t)) * i_sin_angle;
const PxReal rightw = PxSin(angle * t) * i_sin_angle * sign;
return left * leftw + right * rightw;
}
return left;
}
/**
\brief integrate transform.
\param[in] curTrans The current transform
\param[in] linvel Linear velocity
\param[in] angvel Angular velocity
\param[in] timeStep The time-step for integration
\param[out] result The integrated transform
*/
PX_FOUNDATION_API void PxIntegrateTransform(const PxTransform& curTrans, const PxVec3& linvel, const PxVec3& angvel,
PxReal timeStep, PxTransform& result);
//! \brief Compute the exponent of a PxVec3
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuat PxExp(const PxVec3& v)
{
const PxReal m = v.magnitudeSquared();
return m < 1e-24f ? PxQuat(PxIdentity) : PxQuat(PxSqrt(m), v * PxRecipSqrt(m));
}
/**
\brief computes a oriented bounding box around the scaled basis.
\param basis Input = skewed basis, Output = (normalized) orthogonal basis.
\return Bounding box extent.
*/
PX_FOUNDATION_API PxVec3 PxOptimizeBoundingBox(PxMat33& basis);
/**
\brief return Returns the log of a PxQuat
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxLog(const PxQuat& q)
{
const PxReal s = q.getImaginaryPart().magnitude();
if (s < 1e-12f)
return PxVec3(0.0f);
// force the half-angle to have magnitude <= pi/2
PxReal halfAngle = q.w < 0 ? PxAtan2(-s, -q.w) : PxAtan2(s, q.w);
PX_ASSERT(halfAngle >= -PxPi / 2 && halfAngle <= PxPi / 2);
return q.getImaginaryPart().getNormalized() * 2.f * halfAngle;
}
/**
\brief return Returns 0 if v.x is largest element of v, 1 if v.y is largest element, 2 if v.z is largest element.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxLargestAxis(const PxVec3& v)
{
PxU32 m = PxU32(v.y > v.x ? 1 : 0);
return v.z > v[m] ? 2 : m;
}
/**
\brief Compute tan(theta/2) given sin(theta) and cos(theta) as inputs.
\param[in] sin has value sin(theta)
\param[in] cos has value cos(theta)
\return Returns tan(theta/2)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal PxTanHalf(PxReal sin, PxReal cos)
{
// PT: avoids divide by zero for singularity. We return sqrt(FLT_MAX) instead of FLT_MAX
// to make sure the calling code doesn't generate INF values when manipulating the returned value
// (some joints multiply it by 4, etc).
if (cos == -1.0f)
return sin < 0.0f ? -sqrtf(FLT_MAX) : sqrtf(FLT_MAX);
// PT: half-angle formula: tan(a/2) = sin(a)/(1+cos(a))
return sin / (1.0f + cos);
}
/**
\brief Compute the closest point on an 2d ellipse to a given 2d point.
\param[in] point is a 2d point in the y-z plane represented by (point.y, point.z)
\param[in] radii are the radii of the ellipse (radii.y and radii.z) in the y-z plane.
\return Returns the 2d position on the surface of the ellipse that is closest to point.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 PxEllipseClamp(const PxVec3& point, const PxVec3& radii)
{
// lagrange multiplier method with Newton/Halley hybrid root-finder.
// see http://www.geometrictools.com/Documentation/DistancePointToEllipse2.pdf
// for proof of Newton step robustness and initial estimate.
// Halley converges much faster but sometimes overshoots - when that happens we take
// a newton step instead
// converges in 1-2 iterations where D&C works well, and it's good with 4 iterations
// with any ellipse that isn't completely crazy
const PxU32 MAX_ITERATIONS = 20;
const PxReal convergenceThreshold = 1e-4f;
// iteration requires first quadrant but we recover generality later
PxVec3 q(0, PxAbs(point.y), PxAbs(point.z));
const PxReal tinyEps = 1e-6f; // very close to minor axis is numerically problematic but trivial
if (radii.y >= radii.z)
{
if (q.z < tinyEps)
return PxVec3(0, point.y > 0 ? radii.y : -radii.y, 0);
}
else
{
if (q.y < tinyEps)
return PxVec3(0, 0, point.z > 0 ? radii.z : -radii.z);
}
PxVec3 denom, e2 = radii.multiply(radii), eq = radii.multiply(q);
// we can use any initial guess which is > maximum(-e.y^2,-e.z^2) and for which f(t) is > 0.
// this guess works well near the axes, but is weak along the diagonals.
PxReal t = PxMax(eq.y - e2.y, eq.z - e2.z);
for (PxU32 i = 0; i < MAX_ITERATIONS; i++)
{
denom = PxVec3(0, 1 / (t + e2.y), 1 / (t + e2.z));
PxVec3 denom2 = eq.multiply(denom);
PxVec3 fv = denom2.multiply(denom2);
PxReal f = fv.y + fv.z - 1;
// although in exact arithmetic we are guaranteed f>0, we can get here
// on the first iteration via catastrophic cancellation if the point is
// very close to the origin. In that case we just behave as if f=0
if (f < convergenceThreshold)
return e2.multiply(point).multiply(denom);
PxReal df = fv.dot(denom) * -2.0f;
t = t - f / df;
}
// we didn't converge, so clamp what we have
PxVec3 r = e2.multiply(point).multiply(denom);
return r * PxRecipSqrt(PxSqr(r.y / radii.y) + PxSqr(r.z / radii.z));
}
/**
\brief Compute from an input quaternion q a pair of quaternions (swing, twist) such that
q = swing * twist
with the caveats that swing.x = twist.y = twist.z = 0.
\param[in] q is the quaternion to be decomposed into swing and twist quaternions.
\param[out] swing is the swing component of the quaternion decomposition.
\param[out] twist is the twist component of the quaternion decomposition.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxSeparateSwingTwist(const PxQuat& q, PxQuat& swing, PxQuat& twist)
{
twist = q.x != 0.0f ? PxQuat(q.x, 0, 0, q.w).getNormalized() : PxQuat(PxIdentity);
swing = q * twist.getConjugate();
}
/**
\brief Compute the angle between two non-unit vectors
\param[in] v0 is one of the non-unit vectors
\param[in] v1 is the other of the two non-unit vectors
\return Returns the angle (in radians) between the two vector v0 and v1.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 PxComputeAngle(const PxVec3& v0, const PxVec3& v1)
{
const PxF32 cos = v0.dot(v1); // |v0|*|v1|*Cos(Angle)
const PxF32 sin = (v0.cross(v1)).magnitude(); // |v0|*|v1|*Sin(Angle)
return PxAtan2(sin, cos);
}
/**
\brief Compute two normalized vectors (right and up) that are perpendicular to an input normalized vector (dir).
\param[in] dir is a normalized vector that is used to compute the perpendicular vectors.
\param[out] right is the first of the two vectors perpendicular to dir
\param[out] up is the second of the two vectors perpendicular to dir
*/
PX_CUDA_CALLABLE PX_INLINE void PxComputeBasisVectors(const PxVec3& dir, PxVec3& right, PxVec3& up)
{
// Derive two remaining vectors
if (PxAbs(dir.y) <= 0.9999f)
{
right = PxVec3(dir.z, 0.0f, -dir.x);
right.normalize();
// PT: normalize not needed for 'up' because dir & right are unit vectors,
// and by construction the angle between them is 90 degrees (i.e. sin(angle)=1)
up = PxVec3(dir.y * right.z, dir.z * right.x - dir.x * right.z, -dir.y * right.x);
}
else
{
right = PxVec3(1.0f, 0.0f, 0.0f);
up = PxVec3(0.0f, dir.z, -dir.y);
up.normalize();
}
}
/**
\brief Compute three normalized vectors (dir, right and up) that are parallel to (dir) and perpendicular to (right, up) the
normalized direction vector (p1 - p0)/||p1 - p0||.
\param[in] p0 is used to compute the normalized vector dir = (p1 - p0)/||p1 - p0||.
\param[in] p1 is used to compute the normalized vector dir = (p1 - p0)/||p1 - p0||.
\param[out] dir is the normalized vector (p1 - p0)/||p1 - p0||.
\param[out] right is the first of the two normalized vectors perpendicular to dir
\param[out] up is the second of the two normalized vectors perpendicular to dir
*/
PX_INLINE void PxComputeBasisVectors(const PxVec3& p0, const PxVec3& p1, PxVec3& dir, PxVec3& right, PxVec3& up)
{
// Compute the new direction vector
dir = p1 - p0;
dir.normalize();
// Derive two remaining vectors
PxComputeBasisVectors(dir, right, up);
}
/**
\brief Compute (i+1)%3
*/
PX_INLINE PxU32 PxGetNextIndex3(PxU32 i)
{
return (i + 1 + (i >> 1)) & 3;
}
/**
\brief Computes the barycentric coordinates for a point inside a tetrahedron.
This function calculates the barycentric coordinates of a point p with respect to a tetrahedron defined by vertices a, b, c, and d.
\param[in] a Vertex A of the tetrahedron
\param[in] b Vertex B of the tetrahedron
\param[in] c Vertex C of the tetrahedron
\param[in] d Vertex D of the tetrahedron
\param[in] p The point for which to compute the barycentric coordinates
\param[out] bary The resulting barycentric coordinates as a PxVec4
*/
PX_INLINE PX_CUDA_CALLABLE void PxComputeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec3& p, PxVec4& bary)
{
const PxVec3 ba = b - a;
const PxVec3 ca = c - a;
const PxVec3 da = d - a;
const PxVec3 pa = p - a;
const PxReal detBcd = ba.dot(ca.cross(da));
const PxReal detPcd = pa.dot(ca.cross(da));
bary.y = detPcd / detBcd;
const PxReal detBpd = ba.dot(pa.cross(da));
bary.z = detBpd / detBcd;
const PxReal detBcp = ba.dot(ca.cross(pa));
bary.w = detBcp / detBcd;
bary.x = 1 - bary.y - bary.z - bary.w;
}
/**
\brief Computes the barycentric coordinates for a point inside a triangle.
This function calculates the barycentric coordinates of a point p with respect to a triangle defined by vertices a, b, and c.
\param[in] a Vertex A of the triangle
\param[in] b Vertex B of the triangle
\param[in] c Vertex C of the triangle
\param[in] p The point for which to compute the barycentric coordinates
\param[out] bary The resulting barycentric coordinates as a PxVec4
*/
PX_INLINE PX_CUDA_CALLABLE void PxComputeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& p, PxVec4& bary)
{
const PxVec3 v0 = b - a;
const PxVec3 v1 = c - a;
const PxVec3 v2 = p - a;
const float d00 = v0.dot(v0);
const float d01 = v0.dot(v1);
const float d11 = v1.dot(v1);
const float d20 = v2.dot(v0);
const float d21 = v2.dot(v1);
const float denom = d00 * d11 - d01 * d01;
const float v = (d11 * d20 - d01 * d21) / denom;
const float w = (d00 * d21 - d01 * d20) / denom;
const float u = 1.f - v - w;
bary.x = u; bary.y = v; bary.z = w;
bary.w = 0.f;
}
/**
\brief Computes the barycentric coordinates for a point inside a triangle (deprecated).
This function is deprecated. Use PxComputeBarycentric instead.
\param[in] a Vertex A of the triangle
\param[in] b Vertex B of the triangle
\param[in] c Vertex C of the triangle
\param[in] p The point for which to compute the barycentric coordinates
\param[out] bary The resulting barycentric coordinates as a PxVec4
\see PxComputeBarycentric
*/
PX_INLINE PX_CUDA_CALLABLE PX_DEPRECATED void computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& p, PxVec4& bary)
{
PxComputeBarycentric(a, b, c, p, bary);
}
/**
\brief Computes the barycentric coordinates for a point inside a tetrahedron (deprecated).
This function is deprecated. Use PxComputeBarycentric instead.
\param[in] a Vertex A of the tetrahedron
\param[in] b Vertex B of the tetrahedron
\param[in] c Vertex C of the tetrahedron
\param[in] d Vertex D of the tetrahedron
\param[in] p The point for which to compute the barycentric coordinates
\param[out] bary The resulting barycentric coordinates as a PxVec4
\see PxComputeBarycentric
*/
PX_INLINE PX_CUDA_CALLABLE PX_DEPRECATED void computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec3& p, PxVec4& bary)
{
PxComputeBarycentric(a, b, c, d, p, bary);
}
/**
\brief Performs linear interpolation between two values.
\param[in] a The start value
\param[in] b The end value
\param[in] t The interpolation parameter in the range [0, 1]
\return The interpolated value
*/
PX_INLINE PX_CUDA_CALLABLE static float PxLerp(float a, float b, float t)
{
return a + t * (b - a);
}
/**
\brief Performs bilinear interpolation.
\param[in] f00 The value at (0, 0)
\param[in] f10 The value at (1, 0)
\param[in] f01 The value at (0, 1)
\param[in] f11 The value at (1, 1)
\param[in] tx The interpolation parameter along the x-axis
\param[in] ty The interpolation parameter along the y-axis
\return The interpolated value
\see PxLerp
*/
PX_INLINE PX_CUDA_CALLABLE static PxReal PxBiLerp(
const PxReal f00,
const PxReal f10,
const PxReal f01,
const PxReal f11,
const PxReal tx, const PxReal ty)
{
return PxLerp(
PxLerp(f00, f10, tx),
PxLerp(f01, f11, tx),
ty);
}
/**
\brief Performs trilinear interpolation.
\param[in] f000 The value at (0, 0, 0)
\param[in] f100 The value at (1, 0, 0)
\param[in] f010 The value at (0, 1, 0)
\param[in] f110 The value at (1, 1, 0)
\param[in] f001 The value at (0, 0, 1)
\param[in] f101 The value at (1, 0, 1)
\param[in] f011 The value at (0, 1, 1)
\param[in] f111 The value at (1, 1, 1)
\param[in] tx The interpolation parameter along the x-axis
\param[in] ty The interpolation parameter along the y-axis
\param[in] tz The interpolation parameter along the z-axis
\return The interpolated value
\see PxLerp PxBiLerp
*/
PX_INLINE PX_CUDA_CALLABLE static PxReal PxTriLerp(
const PxReal f000,
const PxReal f100,
const PxReal f010,
const PxReal f110,
const PxReal f001,
const PxReal f101,
const PxReal f011,
const PxReal f111,
const PxReal tx,
const PxReal ty,
const PxReal tz)
{
return PxLerp(
PxBiLerp(f000, f100, f010, f110, tx, ty),
PxBiLerp(f001, f101, f011, f111, tx, ty),
tz);
}
/**
\brief Computes the 1D index for a 3D grid point.
\param[in] i The x-coordinate index
\param[in] j The y-coordinate index
\param[in] k The z-coordinate index
\param[in] nbX The number of grid points along the x-axis
\param[in] nbY The number of grid points along the y-axis
\return The 1D index corresponding to the 3D grid point
*/
PX_INLINE PX_CUDA_CALLABLE static PxU32 PxSDFIdx(PxU32 i, PxU32 j, PxU32 k, PxU32 nbX, PxU32 nbY)
{
return i + j * nbX + k * nbX*nbY;
}
/**
\brief Samples the signed distance field (SDF) at a given local position.
This function samples the SDF at a given local position within the defined box bounds and calculates the interpolated distance value. It handles grid clamping and ensures that the sampled value is within the tolerance limit.
\param[in] sdf A pointer to the SDF data
\param[in] localPos The local position to sample the SDF
\param[in] sdfBoxLower The lower bound of the SDF box
\param[in] sdfBoxHigher The upper bound of the SDF box
\param[in] sdfDx The spacing between grid points in the SDF
\param[in] invSdfDx The inverse of the grid spacing
\param[in] dimX The number of grid points along the x-axis
\param[in] dimY The number of grid points along the y-axis
\param[in] dimZ The number of grid points along the z-axis
\param[in] tolerance The tolerance for clamping the grid points
\return The sampled SDF value
\see PxTriLerp PxSDFIdx
*/
PX_INLINE PX_CUDA_CALLABLE static PxReal PxSDFSample(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower,
const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxReal tolerance)
{
PxVec3 clampedGridPt = localPos.maximum(sdfBoxLower).minimum(sdfBoxHigher);
const PxVec3 diff = (localPos - clampedGridPt);
if (diff.magnitudeSquared() > tolerance*tolerance)
return PX_MAX_F32;
PxVec3 f = (clampedGridPt - sdfBoxLower) * invSdfDx;
PxU32 i = PxU32(f.x);
PxU32 j = PxU32(f.y);
PxU32 k = PxU32(f.z);
f -= PxVec3(PxReal(i), PxReal(j), PxReal(k));
if (i >= (dimX - 1))
{
i = dimX - 2;
clampedGridPt.x -= f.x * sdfDx;
f.x = 1.f;
}
if (j >= (dimY - 1))
{
j = dimY - 2;
clampedGridPt.y -= f.y * sdfDx;
f.y = 1.f;
}
if (k >= (dimZ - 1))
{
k = dimZ - 2;
clampedGridPt.z -= f.z * sdfDx;
f.z = 1.f;
}
const PxReal s000 = sdf[PxSDFIdx(i, j, k, dimX, dimY)];
const PxReal s100 = sdf[PxSDFIdx(i + 1, j, k, dimX, dimY)];
const PxReal s010 = sdf[PxSDFIdx(i, j + 1, k, dimX, dimY)];
const PxReal s110 = sdf[PxSDFIdx(i + 1, j + 1, k, dimX, dimY)];
const PxReal s001 = sdf[PxSDFIdx(i, j, k + 1, dimX, dimY)];
const PxReal s101 = sdf[PxSDFIdx(i + 1, j, k + 1, dimX, dimY)];
const PxReal s011 = sdf[PxSDFIdx(i, j + 1, k + 1, dimX, dimY)];
const PxReal s111 = sdf[PxSDFIdx(i + 1, j + 1, k + 1, dimX, dimY)];
PxReal dist = PxTriLerp(
s000,
s100,
s010,
s110,
s001,
s101,
s011,
s111,
f.x, f.y, f.z);
dist += diff.magnitude();
return dist;
}
#if !PX_DOXYGEN // remove due to failing references
PX_DEPRECATED struct Interpolation
{
/**
\brief Performs linear interpolation between two values.
\param[in] a The start value
\param[in] b The end value
\param[in] t The interpolation parameter in the range [0, 1]
\return The interpolated value
\deprecated Please use corresponding freestanding function outside of Interpolation scope.
*/
PX_DEPRECATED PX_INLINE PX_CUDA_CALLABLE static float PxLerp(float a, float b, float t)
{
return ::physx::PxLerp(a, b, t);
}
/**
\brief Performs bilinear interpolation.
\param[in] f00 The value at (0, 0)
\param[in] f10 The value at (1, 0)
\param[in] f01 The value at (0, 1)
\param[in] f11 The value at (1, 1)
\param[in] tx The interpolation parameter along the x-axis
\param[in] ty The interpolation parameter along the y-axis
\return The interpolated value
\deprecated Please use corresponding freestanding function outside of Interpolation scope.
*/
PX_DEPRECATED PX_INLINE PX_CUDA_CALLABLE static PxReal PxBiLerp(
const PxReal f00,
const PxReal f10,
const PxReal f01,
const PxReal f11,
const PxReal tx, const PxReal ty)
{
return ::physx::PxBiLerp(f00, f10, f01, f11, tx, ty);
}
/**
\brief Performs trilinear interpolation.
\param[in] f000 The value at (0, 0, 0)
\param[in] f100 The value at (1, 0, 0)
\param[in] f010 The value at (0, 1, 0)
\param[in] f110 The value at (1, 1, 0)
\param[in] f001 The value at (0, 0, 1)
\param[in] f101 The value at (1, 0, 1)
\param[in] f011 The value at (0, 1, 1)
\param[in] f111 The value at (1, 1, 1)
\param[in] tx The interpolation parameter along the x-axis
\param[in] ty The interpolation parameter along the y-axis
\param[in] tz The interpolation parameter along the z-axis
\return The interpolated value
\deprecated Please use corresponding freestanding function outside of Interpolation scope.
*/
PX_DEPRECATED PX_INLINE PX_CUDA_CALLABLE static PxReal PxTriLerp(
const PxReal f000,
const PxReal f100,
const PxReal f010,
const PxReal f110,
const PxReal f001,
const PxReal f101,
const PxReal f011,
const PxReal f111,
const PxReal tx,
const PxReal ty,
const PxReal tz)
{
return ::physx::PxTriLerp(f000, f100, f010, f110, f001, f101, f011, f111, tx, ty, tz);
}
/**
\brief Computes the 1D index for a 3D grid point.
\param[in] i The x-coordinate index
\param[in] j The y-coordinate index
\param[in] k The z-coordinate index
\param[in] nbX The number of grid points along the x-axis
\param[in] nbY The number of grid points along the y-axis
\return The 1D index corresponding to the 3D grid point
\deprecated Please use corresponding freestanding function outside of Interpolation scope.
*/
PX_DEPRECATED PX_INLINE PX_CUDA_CALLABLE static PxU32 PxSDFIdx(PxU32 i, PxU32 j, PxU32 k, PxU32 nbX, PxU32 nbY)
{
return ::physx::PxSDFIdx(i, j, k, nbX, nbY);
}
/**
\brief Samples the signed distance field (SDF) at a given local position.
This function samples the SDF at a given local position within the defined box bounds and calculates the interpolated distance value. It handles grid clamping and ensures that the sampled value is within the tolerance limit.
\param[in] sdf A pointer to the SDF data
\param[in] localPos The local position to sample the SDF
\param[in] sdfBoxLower The lower bound of the SDF box
\param[in] sdfBoxHigher The upper bound of the SDF box
\param[in] sdfDx The spacing between grid points in the SDF
\param[in] invSdfDx The inverse of the grid spacing
\param[in] dimX The number of grid points along the x-axis
\param[in] dimY The number of grid points along the y-axis
\param[in] dimZ The number of grid points along the z-axis
\param[in] tolerance The tolerance for clamping the grid points
\return The sampled SDF value
\deprecated Please use corresponding freestanding function outside of Interpolation scope.
*/
PX_DEPRECATED PX_INLINE PX_CUDA_CALLABLE static PxReal PxSDFSampleImpl(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower,
const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxReal tolerance)
{
return ::physx::PxSDFSample(sdf, localPos, sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance);
}
};
#endif // !PX_DOXYGEN // remove due to failing references
/**
\brief Samples the signed distance field (SDF) at a given local position with gradient computation (deprecated).
\param[in] sdf A pointer to the SDF data
\param[in] localPos The local position to sample the SDF
\param[in] sdfBoxLower The lower bound of the SDF box
\param[in] sdfBoxHigher The upper bound of the SDF box
\param[in] sdfDx The spacing between grid points in the SDF
\param[in] invSdfDx The inverse of the grid spacing
\param[in] dimX The number of grid points along the x-axis
\param[in] dimY The number of grid points along the y-axis
\param[in] dimZ The number of grid points along the z-axis
\param[out] gradient The resulting gradient vector
\param[in] tolerance The tolerance for clamping the grid points (default is PX_MAX_F32)
\return The sampled SDF value
\deprecated Please use PxSDFSample.
*/
PX_DEPRECATED PX_INLINE PX_CUDA_CALLABLE PxReal PxSdfSample(const PxReal* PX_RESTRICT sdf, const PxVec3& localPos, const PxVec3& sdfBoxLower,
const PxVec3& sdfBoxHigher, const PxReal sdfDx, const PxReal invSdfDx, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ, PxVec3& gradient, PxReal tolerance = PX_MAX_F32)
{
PxReal dist = PxSDFSample(sdf, localPos, sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance);
if (dist < tolerance)
{
PxVec3 grad;
grad.x = PxSDFSample(sdf, localPos + PxVec3(sdfDx, 0.f, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) -
PxSDFSample(sdf, localPos - PxVec3(sdfDx, 0.f, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance);
grad.y = PxSDFSample(sdf, localPos + PxVec3(0.f, sdfDx, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) -
PxSDFSample(sdf, localPos - PxVec3(0.f, sdfDx, 0.f), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance);
grad.z = PxSDFSample(sdf, localPos + PxVec3(0.f, 0.f, sdfDx), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance) -
PxSDFSample(sdf, localPos - PxVec3(0.f, 0.f, sdfDx), sdfBoxLower, sdfBoxHigher, sdfDx, invSdfDx, dimX, dimY, dimZ, tolerance);
gradient = grad;
}
return dist;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,126 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MEMORY_H
#define PX_MEMORY_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Sets the bytes of the provided buffer to zero.
\param dest [out] Pointer to block of memory to set zero.
\param count [in] Number of bytes to set to zero.
\return Pointer to memory block (same as input)
*/
PX_FORCE_INLINE void* PxMemZero(void* dest, PxU32 count)
{
// This is to avoid undefined behavior
return (count != 0) ? physx::intrinsics::memZero(dest, count) : NULL;
}
/**
\brief Sets the bytes of the provided buffer to the specified value.
\param dest [out] Pointer to block of memory to set to the specified value.
\param c [in] Value to set the bytes of the block of memory to.
\param count [in] Number of bytes to set to the specified value.
\return Pointer to memory block (same as input)
*/
PX_FORCE_INLINE void* PxMemSet(void* dest, PxI32 c, PxU32 count)
{
// This is to avoid undefined behavior
return (count != 0) ? physx::intrinsics::memSet(dest, c, count) : NULL;
}
/**
\brief Copies the bytes of one memory block to another. The memory blocks must not overlap.
\note Use #PxMemMove if memory blocks overlap.
\param dest [out] Pointer to block of memory to copy to.
\param src [in] Pointer to block of memory to copy from.
\param count [in] Number of bytes to copy.
\return Pointer to destination memory block
*/
PX_FORCE_INLINE void* PxMemCopy(void* dest, const void* src, PxU32 count)
{
// This is to avoid undefined behavior
return (count != 0) ? physx::intrinsics::memCopy(dest, src, count) : NULL;
}
/**
\brief Copies the bytes of one memory block to another. The memory blocks can overlap.
\note Use #PxMemCopy if memory blocks do not overlap.
\param dest [out] Pointer to block of memory to copy to.
\param src [in] Pointer to block of memory to copy from.
\param count [in] Number of bytes to copy.
\return Pointer to destination memory block
*/
PX_FORCE_INLINE void* PxMemMove(void* dest, const void* src, PxU32 count)
{
return physx::intrinsics::memMove(dest, src, count);
}
/**
Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data
definition for serialized classes is complete in checked builds.
\param ptr [out] Pointer to block of memory to initialize.
\param byteSize [in] Number of bytes to initialize.
*/
PX_INLINE void PxMarkSerializedMemory(void* ptr, PxU32 byteSize)
{
#if PX_CHECKED
PxMemSet(ptr, 0xcd, byteSize);
#else
PX_UNUSED(ptr);
PX_UNUSED(byteSize);
#endif
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,183 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MUTEX_H
#define PX_MUTEX_H
#include "foundation/PxAllocator.h"
/*
* This <new> inclusion is a best known fix for gcc 4.4.1 error:
* Creating object file for apex/src/PsAllocator.cpp ...
* In file included from apex/include/PsFoundation.h:30,
* from apex/src/PsAllocator.cpp:26:
* apex/include/PsMutex.h: In constructor 'physx::PxMutexT<Alloc>::MutexT(const Alloc&)':
* apex/include/PsMutex.h:92: error: no matching function for call to 'operator new(unsigned int,
* physx::PxMutexImpl*&)'
* <built-in>:0: note: candidates are: void* operator new(unsigned int)
*/
#include <new>
#if !PX_DOXYGEN
namespace physx
{
#endif
class PX_FOUNDATION_API PxMutexImpl
{
public:
/**
The constructor for Mutex creates a mutex. It is initially unlocked.
*/
PxMutexImpl();
/**
The destructor for Mutex deletes the mutex.
*/
~PxMutexImpl();
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method blocks until the mutex is
unlocked.
*/
void lock();
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method returns false without blocking.
*/
bool trylock();
/**
Release (unlock) the mutex.
*/
void unlock();
/**
Size of this class.
*/
static uint32_t getSize();
};
template <typename Alloc = PxReflectionAllocator<PxMutexImpl> >
class PxMutexT : protected Alloc
{
PX_NOCOPY(PxMutexT)
public:
class ScopedLock
{
PxMutexT<Alloc>& mMutex;
PX_NOCOPY(ScopedLock)
public:
PX_INLINE ScopedLock(PxMutexT<Alloc>& mutex) : mMutex(mutex)
{
mMutex.lock();
}
PX_INLINE ~ScopedLock()
{
mMutex.unlock();
}
};
/**
The constructor for Mutex creates a mutex. It is initially unlocked.
*/
PxMutexT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<PxMutexImpl*>(Alloc::allocate(PxMutexImpl::getSize(), PX_FL));
PX_PLACEMENT_NEW(mImpl, PxMutexImpl)();
}
/**
The destructor for Mutex deletes the mutex.
*/
~PxMutexT()
{
mImpl->~PxMutexImpl();
Alloc::deallocate(mImpl);
}
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method blocks until the mutex is
unlocked.
*/
PX_FORCE_INLINE void lock() const
{
mImpl->lock();
}
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method returns false without blocking,
returns true if lock is successfully acquired
*/
PX_FORCE_INLINE bool trylock() const
{
return mImpl->trylock();
}
/**
Release (unlock) the mutex, the calling thread must have
previously called lock() or method will error
*/
PX_FORCE_INLINE void unlock() const
{
mImpl->unlock();
}
private:
PxMutexImpl* mImpl;
};
class PX_FOUNDATION_API PxReadWriteLock
{
PX_NOCOPY(PxReadWriteLock)
public:
PxReadWriteLock();
~PxReadWriteLock();
// "takeLock" can only be false if the thread already holds the mutex, e.g. if it already acquired the write lock
void lockReader(bool takeLock);
void lockWriter();
void unlockReader();
void unlockWriter();
private:
class ReadWriteLockImpl* mImpl;
};
typedef PxMutexT<> PxMutex;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,59 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_VERSION_H
#define PX_PHYSICS_VERSION_H
/*
VersionNumbers: The combination of these
numbers uniquely identifies the API, and should
be incremented when the SDK API changes. This may
include changes to file formats.
This header is included in the main SDK header files
so that the entire SDK and everything that builds on it
is completely rebuilt when this file changes. Thus,
this file is not to include a frequently changing
build number. See BuildNumber.h for that.
Each of these three values should stay below 255 because
sometimes they are stored in a byte.
*/
#define PX_PHYSICS_VERSION_MAJOR 5
#define PX_PHYSICS_VERSION_MINOR 6
#define PX_PHYSICS_VERSION_BUGFIX 1
/**
The constant PX_PHYSICS_VERSION is used when creating certain PhysX module objects.
This is to ensure that the application is using the same header version as the library was built with.
*/
#define PX_PHYSICS_VERSION ((PX_PHYSICS_VERSION_MAJOR<<24) + (PX_PHYSICS_VERSION_MINOR<<16) + (PX_PHYSICS_VERSION_BUGFIX<<8) + 0)
#endif

View File

@@ -0,0 +1,147 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PINNED_ARRAY_H
#define PX_PINNED_ARRAY_H
#include "foundation/PxArray.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxBounds3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// PT: the default pinned-memory arrays are defined as PxPinnedArray = PxArray<T, PxVirtualAllocator>.
// The PxVirtualAllocator ultimately uses cuMemHostAlloc via PxgCudaHostMemoryAllocatorCallback / PxgPinnedMemoryAllocate.
// We use the CU_MEMHOSTALLOC_DEVICEMAP flag there so cuMemHostGetDevicePointer() can later be used on returned ptr.
//
// The new pinned-memory arrays are defined as PxPinnedArraySafe = PxArray<T, PxPinnedAllocator<T> >. This uses a new
// allocator that allocates either from cuMemHostAlloc, *or* fallbacks to regular allocs when we run out of pinned memory.
// The issue is that in the second case cuMemHostGetDevicePointer() will fail, so we cannot use this everywhere.
//
// I think this exposes issues in PxArray itself, for example in the swap function (we don't swap the allocator data there,
// so when using a PxVirtualAllocator with PxArray the PxVirtualAllocator members are not swapped).
// PT: this class uses the fact that PxArray inherits from the allocator to add new members to the array class. In particular
// PxPinnedAllocator::mPinned describes where PxArray::mData has been allocated. The class is mostly designed to be used in
// conjunction with PxArray, not as a standalone allocator.
template<class T>
class PxPinnedAllocator
{
public:
PxPinnedAllocator(PxVirtualAllocatorCallback* callback = NULL, int group = 0) : mCallback(callback), mGroup(group), mPinned(0) {}
PX_INLINE void* allocate(size_t size, const char* file, int line, uint32_t* cookie=NULL)
{
PX_ASSERT(mCallback);
// PT: returns *previous* pinned value. It will be passed back to the deallocate function.
if(cookie)
*cookie = mPinned;
if(!size)
{
mPinned = 0xffffffff;
return NULL;
}
// PT: first, try with the pinned-memory allocator
void* ptr = mCallback->allocate(size, mGroup, file, line);
if(ptr)
{
mPinned = 1;
return ptr;
}
// PT: if it fails, fallback to regular allocator
mPinned = 0;
return PxReflectionAllocator<T>::allocate(size, file, line);
}
PX_INLINE void deallocate(void* ptr, uint32_t* cookie=NULL)
{
PX_ASSERT(mCallback);
if(ptr)
{
// PT: by default use the internal value, except if we're given an explicit cookie
const uint32_t pinned = cookie ? *cookie : mPinned;
if(pinned==1)
mCallback->deallocate(ptr);
else
PxReflectionAllocator<T>::deallocate(ptr);
}
}
PX_FORCE_INLINE void setCallback(PxVirtualAllocatorCallback* callback)
{
mCallback = callback;
}
PX_FORCE_INLINE PxVirtualAllocatorCallback* getCallback()
{
return mCallback;
}
private:
PxVirtualAllocatorCallback* mCallback;
const int mGroup;
uint32_t mPinned;
PxPinnedAllocator& operator=(const PxPinnedAllocator&);
};
struct PxsCachedTransform;
// PT: default versions:
template<class T>
using PxPinnedArray = PxArray<T, PxVirtualAllocator>;
typedef PxArray<PxsCachedTransform, PxVirtualAllocator> PxCachedTransformArrayPinned;
typedef PxArray<PxBounds3, PxVirtualAllocator> PxBoundsArrayPinned;
typedef PxArray<PxReal, PxVirtualAllocator> PxFloatArrayPinned;
typedef PxArray<PxU32, PxVirtualAllocator> PxInt32ArrayPinned;
typedef PxArray<PxU16, PxVirtualAllocator> PxInt16ArrayPinned;
typedef PxArray<PxU8, PxVirtualAllocator> PxInt8ArrayPinned;
// PT: new versions
template<class T>
using PxPinnedArraySafe = PxArray<T, PxPinnedAllocator<T> >;
typedef PxArray<PxsCachedTransform, PxPinnedAllocator<PxsCachedTransform> > PxCachedTransformArrayPinnedSafe;
typedef PxArray<PxBounds3, PxPinnedAllocator<PxBounds3> > PxBoundsArrayPinnedSafe;
typedef PxArray<PxReal, PxPinnedAllocator<PxReal> > PxFloatArrayPinnedSafe;
typedef PxArray<PxU32, PxPinnedAllocator<PxU32> > PxInt32ArrayPinnedSafe;
typedef PxArray<PxU16, PxPinnedAllocator<PxU16> > PxInt16ArrayPinnedSafe;
typedef PxArray<PxU8, PxPinnedAllocator<PxU8> > PxInt8ArrayPinnedSafe;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,158 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PLANE_H
#define PX_PLANE_H
#include "foundation/PxTransform.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Representation of a plane.
Plane equation used: n.dot(v) + d = 0
*/
class PxPlane
{
public:
/**
\brief Constructor
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane()
{
}
/**
\brief Constructor from a normal and a distance
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance)
{
}
/**
\brief Constructor from a normal and a distance
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& normal, float distance) : n(normal), d(distance)
{
}
/**
\brief Constructor from a point on the plane and a normal
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& point, const PxVec3& normal)
: n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0
{
}
/**
\brief Constructor from three points
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2)
{
n = (p1 - p0).cross(p2 - p0).getNormalized();
d = -p0.dot(n);
}
/**
\brief returns true if the two planes are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxPlane& p) const
{
return n == p.n && d == p.d;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float distance(const PxVec3& p) const
{
return p.dot(n) + d;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool contains(const PxVec3& p) const
{
return PxAbs(distance(p)) < (1.0e-7f);
}
/**
\brief projects p into the plane
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 project(const PxVec3& p) const
{
return p - n * distance(p);
}
/**
\brief find an arbitrary point in the plane
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 pointInPlane() const
{
return -n * d;
}
/**
\brief equivalent plane with unit normal
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void normalize()
{
float denom = 1.0f / n.magnitude();
n *= denom;
d *= denom;
}
/**
\brief transform plane
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane transform(const PxTransform& pose) const
{
const PxVec3 transformedNormal = pose.rotate(n);
return PxPlane(transformedNormal, d - pose.p.dot(transformedNormal));
}
/**
\brief inverse-transform plane
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxPlane inverseTransform(const PxTransform& pose) const
{
const PxVec3 transformedNormal = pose.rotateInv(n);
return PxPlane(transformedNormal, d + pose.p.dot(n));
}
PxVec3 n; //!< The normal to the plane
float d; //!< The distance from the origin
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,265 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_POOL_H
#define PX_POOL_H
#include "foundation/PxArray.h"
#include "foundation/PxSort.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxInlineArray.h"
#include "foundation/PxMemory.h"
namespace physx
{
/*!
Simple allocation pool
*/
template <class T, class Alloc = typename PxAllocatorTraits<T>::Type>
class PxPoolBase : public PxUserAllocated, public Alloc
{
PX_NOCOPY(PxPoolBase)
protected:
PxPoolBase(const Alloc& alloc, uint32_t elementsPerSlab, uint32_t slabSize)
: Alloc(alloc), mSlabs(alloc), mElementsPerSlab(elementsPerSlab), mUsed(0), mSlabSize(slabSize), mFreeElement(0)
{
mSlabs.reserve(64);
PX_COMPILE_TIME_ASSERT(sizeof(T) >= sizeof(size_t));
}
public:
~PxPoolBase()
{
if(mUsed)
disposeElements();
for(void** slabIt = mSlabs.begin(), *slabEnd = mSlabs.end(); slabIt != slabEnd; ++slabIt)
Alloc::deallocate(*slabIt);
}
// Allocate space for single object
PX_INLINE T* allocate()
{
if(mFreeElement == 0)
allocateSlab();
T* p = reinterpret_cast<T*>(mFreeElement);
mFreeElement = mFreeElement->mNext;
mUsed++;
PxMarkSerializedMemory(p, sizeof(T));
return p;
}
// Put space for a single element back in the lists
PX_INLINE void deallocate(T* p)
{
if(p)
{
PX_ASSERT(mUsed);
mUsed--;
push(reinterpret_cast<FreeList*>(p));
}
}
PX_INLINE T* construct()
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T()) : NULL;
}
template <class A1>
PX_INLINE T* construct(A1& a)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a)) : NULL;
}
template <class A1, class A2>
PX_INLINE T* construct(A1& a, A2& b)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b)) : NULL;
}
template <class A1, class A2, class A3>
PX_INLINE T* construct(A1& a, A2& b, A3& c)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c)) : NULL;
}
template <class A1, class A2, class A3>
PX_INLINE T* construct(A1* a, A2& b, A3& c)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c)) : NULL;
}
template <class A1, class A2, class A3, class A4>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d)) : NULL;
}
template <class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e)) : NULL;
}
template <class A1, class A2, class A3, class A4, class A5, class A6>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f)) : NULL;
}
template <class A1, class A2, class A3, class A4, class A5, class A6>
PX_INLINE T* construct(const A1& a, A2& b, const A3& c, A4& d, A5& e, A6& f)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f)) : NULL;
}
template <class A1, class A2, class A3, class A4, class A5, class A6, class A7>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f, A7& g)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f, g)) : NULL;
}
template <class A1, class A2, class A3, class A4, class A5, class A6, class A7, class A8>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e, A6& f, A7& g, A8& h)
{
T* t = allocate();
return t ? PX_PLACEMENT_NEW(t, T(a, b, c, d, e, f, g, h)) : NULL;
}
PX_INLINE void destroy(T* const p)
{
if(p)
{
p->~T();
deallocate(p);
}
}
protected:
struct FreeList
{
FreeList* mNext;
};
// All the allocated slabs, sorted by pointer
PxArray<void*, Alloc> mSlabs;
const uint32_t mElementsPerSlab;
uint32_t mUsed;
const uint32_t mSlabSize;
FreeList* mFreeElement; // Head of free-list
// Helper function to get bitmap of allocated elements
void push(FreeList* p)
{
p->mNext = mFreeElement;
mFreeElement = p;
}
// Allocate a slab and segregate it into the freelist
void allocateSlab()
{
T* slab = reinterpret_cast<T*>(Alloc::allocate(mSlabSize, PX_FL));
mSlabs.pushBack(slab);
// Build a chain of nodes for the freelist
T* it = slab + mElementsPerSlab;
while(--it >= slab)
push(reinterpret_cast<FreeList*>(it));
}
/*
Cleanup method. Go through all active slabs and call destructor for live objects,
then free their memory
*/
void disposeElements()
{
PxArray<void*, Alloc> freeNodes(*this);
while(mFreeElement)
{
freeNodes.pushBack(mFreeElement);
mFreeElement = mFreeElement->mNext;
}
Alloc& alloc(*this);
PxSort(freeNodes.begin(), freeNodes.size(), PxLess<void*>(), alloc);
PxSort(mSlabs.begin(), mSlabs.size(), PxLess<void*>(), alloc);
typename PxArray<void*, Alloc>::Iterator slabIt = mSlabs.begin(), slabEnd = mSlabs.end();
for(typename PxArray<void*, Alloc>::Iterator freeIt = freeNodes.begin(); slabIt != slabEnd; ++slabIt)
{
for(T* tIt = reinterpret_cast<T*>(*slabIt), *tEnd = tIt + mElementsPerSlab; tIt != tEnd; ++tIt)
{
if(freeIt != freeNodes.end() && *freeIt == tIt)
++freeIt;
else
tIt->~T();
}
}
}
};
// original pool implementation
template <class T, class Alloc = typename PxAllocatorTraits<T>::Type>
class PxPool : public PxPoolBase<T, Alloc>
{
public:
PxPool(const Alloc& alloc = Alloc(), uint32_t elementsPerSlab = 32)
: PxPoolBase<T, Alloc>(alloc, elementsPerSlab, elementsPerSlab * sizeof(T))
{
}
};
// allows specification of the slab size instead of the occupancy
template <class T, uint32_t slabSize, class Alloc = typename PxAllocatorTraits<T>::Type>
class PxPool2 : public PxPoolBase<T, Alloc>
{
public:
PxPool2(const Alloc& alloc = Alloc()) : PxPoolBase<T, Alloc>(alloc, slabSize / sizeof(T), slabSize)
{
PX_COMPILE_TIME_ASSERT(slabSize > sizeof(T));
}
};
} // namespace physx
#endif

View File

@@ -0,0 +1,536 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PREPROCESSOR_H
#define PX_PREPROCESSOR_H
#include <stddef.h>
#define PX_STRINGIZE_HELPER(X) #X
#define PX_STRINGIZE(X) PX_STRINGIZE_HELPER(X)
#define PX_CONCAT_HELPER(X, Y) X##Y
#define PX_CONCAT(X, Y) PX_CONCAT_HELPER(X, Y)
/*
The following preprocessor identifiers specify compiler, OS, and architecture.
All definitions have a value of 1 or 0, use '#if' instead of '#ifdef'.
*/
/**
Compiler defines, see http://sourceforge.net/p/predef/wiki/Compilers/
*/
#if defined(_MSC_VER)
#if _MSC_VER >= 1920
#define PX_VC 16
#elif _MSC_VER >= 1910
#define PX_VC 15
#elif _MSC_VER >= 1900
#define PX_VC 14
#elif _MSC_VER >= 1800
#define PX_VC 12
#elif _MSC_VER >= 1700
#define PX_VC 11
#elif _MSC_VER >= 1600
#define PX_VC 10
#elif _MSC_VER >= 1500
#define PX_VC 9
#else
#error "Unknown VC version"
#endif
#elif defined(__clang__)
#define PX_CLANG 1
#if defined (__clang_major__)
#define PX_CLANG_MAJOR __clang_major__
#elif defined (_clang_major)
#define PX_CLANG_MAJOR _clang_major
#else
#define PX_CLANG_MAJOR 0
#endif
#elif defined(__GNUC__) // note: __clang__ implies __GNUC__
#define PX_GCC 1
#else
#error "Unknown compiler"
#endif
// not treated as its own compiler because clang, for example, can, in theory, compile CUDA code too
#if defined(__CUDACC__)
#define PX_CUDA_COMPILER 1
#else
#define PX_CUDA_COMPILER 0
#endif
/**
Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSystems/
*/
#if defined(_WIN64)
#define PX_WIN64 1
#elif defined(_WIN32) // note: _M_PPC implies _WIN32
#define PX_WIN32 1
#elif defined(__linux__) || defined (__EMSCRIPTEN__)
#define PX_LINUX 1
#elif defined(__APPLE__)
#define PX_OSX 1
#elif defined(__NX__)
#define PX_SWITCH 1
#else
#error "Unknown operating system"
#endif
/**
Architecture defines, see http://sourceforge.net/p/predef/wiki/Architectures/
*/
#if defined(__x86_64__) || defined(_M_X64)
#define PX_X64 1
#elif defined(__i386__) || defined(_M_IX86) || defined (__EMSCRIPTEN__)
#define PX_X86 1
#elif defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64)
#define PX_A64 1
#elif defined(__arm__) || defined(_M_ARM)
#define PX_ARM 1
#elif defined(__ppc__) || defined(_M_PPC) || defined(__CELLOS_LV2__)
#define PX_PPC 1
#else
#error "Unknown architecture"
#endif
/**
SIMD defines
*/
#if !defined(PX_SIMD_DISABLED)
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) || (defined (__EMSCRIPTEN__) && defined(__SSE2__))
#define PX_SSE2 1
#endif
#if defined(_M_ARM) || defined(__ARM_NEON__) || defined(__ARM_NEON)
#define PX_NEON 1
#endif
#if defined(_M_PPC) || defined(__CELLOS_LV2__)
#define PX_VMX 1
#endif
#endif
/**
define anything not defined on this platform to 0
*/
#ifndef PX_VC
#define PX_VC 0
#endif
#ifndef PX_CLANG
#define PX_CLANG 0
#endif
#ifndef PX_GCC
#define PX_GCC 0
#endif
#ifndef PX_WIN64
#define PX_WIN64 0
#endif
#ifndef PX_WIN32
#define PX_WIN32 0
#endif
#ifndef PX_LINUX
#define PX_LINUX 0
#endif
#ifndef PX_OSX
#define PX_OSX 0
#endif
#ifndef PX_SWITCH
#define PX_SWITCH 0
#endif
#ifndef PX_X64
#define PX_X64 0
#endif
#ifndef PX_X86
#define PX_X86 0
#endif
#ifndef PX_A64
#define PX_A64 0
#endif
#ifndef PX_ARM
#define PX_ARM 0
#endif
#ifndef PX_PPC
#define PX_PPC 0
#endif
#ifndef PX_SSE2
#define PX_SSE2 0
#endif
#ifndef PX_NEON
#define PX_NEON 0
#endif
#ifndef PX_VMX
#define PX_VMX 0
#endif
/*
define anything not defined through the command line to 0
*/
#ifndef PX_DEBUG
#define PX_DEBUG 0
#endif
#ifndef PX_CHECKED
#define PX_CHECKED 0
#endif
#ifndef PX_PROFILE
#define PX_PROFILE 0
#endif
#ifndef PX_DEBUG_CRT
#define PX_DEBUG_CRT 0
#endif
#ifndef PX_NVTX
#define PX_NVTX 0
#endif
#ifndef PX_DOXYGEN
#define PX_DOXYGEN 0
#endif
/**
family shortcuts
*/
// compiler
#define PX_GCC_FAMILY (PX_CLANG || PX_GCC)
// os
#define PX_WINDOWS_FAMILY (PX_WIN32 || PX_WIN64)
#define PX_LINUX_FAMILY PX_LINUX
#define PX_APPLE_FAMILY PX_OSX // equivalent to #if __APPLE__
#define PX_UNIX_FAMILY (PX_LINUX_FAMILY || PX_APPLE_FAMILY) // shortcut for unix/posix platforms
#if defined(__EMSCRIPTEN__)
#define PX_EMSCRIPTEN 1
#else
#define PX_EMSCRIPTEN 0
#endif
// architecture
#define PX_INTEL_FAMILY (PX_X64 || PX_X86)
#define PX_ARM_FAMILY (PX_ARM || PX_A64)
#define PX_P64_FAMILY (PX_X64 || PX_A64) // shortcut for 64-bit architectures
/**
C++ standard library defines
*/
#if defined(_LIBCPP_VERSION) || PX_WIN64 || PX_WIN32 || PX_EMSCRIPTEN
#define PX_LIBCPP 1
#else
#define PX_LIBCPP 0
#endif
// legacy define for PhysX
#define PX_WINDOWS (PX_WINDOWS_FAMILY && !PX_ARM_FAMILY)
/**
Assert macro
*/
#ifndef PX_ENABLE_ASSERTS
#if PX_DEBUG && !PX_CUDA_COMPILER
#define PX_ENABLE_ASSERTS 1
#else
#define PX_ENABLE_ASSERTS 0
#endif
#endif
/**
DLL export macros
*/
#ifndef PX_C_EXPORT
#if PX_WINDOWS_FAMILY || PX_LINUX
#define PX_C_EXPORT extern "C"
#else
#define PX_C_EXPORT
#endif
#endif
#if PX_UNIX_FAMILY&& __GNUC__ >= 4
#define PX_UNIX_EXPORT __attribute__((visibility("default")))
#else
#define PX_UNIX_EXPORT
#endif
#if PX_WINDOWS_FAMILY
#define PX_DLL_EXPORT __declspec(dllexport)
#define PX_DLL_IMPORT __declspec(dllimport)
#else
#define PX_DLL_EXPORT PX_UNIX_EXPORT
#define PX_DLL_IMPORT
#endif
/**
Calling convention
*/
#ifndef PX_CALL_CONV
#if PX_WINDOWS_FAMILY
#define PX_CALL_CONV __cdecl
#else
#define PX_CALL_CONV
#endif
#endif
/**
Pack macros - disabled on SPU because they are not supported
*/
#if PX_VC
#define PX_PUSH_PACK_DEFAULT __pragma(pack(push, 8))
#define PX_POP_PACK __pragma(pack(pop))
#elif PX_GCC_FAMILY
#define PX_PUSH_PACK_DEFAULT _Pragma("pack(push, 8)")
#define PX_POP_PACK _Pragma("pack(pop)")
#else
#define PX_PUSH_PACK_DEFAULT
#define PX_POP_PACK
#endif
/**
Inline macro
*/
#define PX_INLINE inline
#if PX_WINDOWS_FAMILY
#pragma inline_depth(255)
#endif
/**
Force inline macro
*/
#if PX_VC
#define PX_FORCE_INLINE __forceinline
#elif PX_CUDA_COMPILER
#define PX_FORCE_INLINE __forceinline__
#elif PX_GCC_FAMILY
#define PX_FORCE_INLINE inline __attribute__((always_inline))
#else
#define PX_FORCE_INLINE inline
#endif
/**
Noinline macro
*/
#if PX_WINDOWS_FAMILY
#define PX_NOINLINE __declspec(noinline)
#elif PX_GCC_FAMILY
#define PX_NOINLINE __attribute__((noinline))
#else
#define PX_NOINLINE
#endif
/**
Restrict macro
*/
#if PX_CUDA_COMPILER
#define PX_RESTRICT __restrict__
#else
#define PX_RESTRICT __restrict
#endif
/**
Noalias macro
*/
#if PX_WINDOWS_FAMILY
#define PX_NOALIAS __declspec(noalias)
#else
#define PX_NOALIAS
#endif
/**
Override macro
*/
#define PX_OVERRIDE override
/**
Final macro
*/
#define PX_FINAL final
/**
Unused attribute macro. Only on GCC for now.
*/
#if PX_GCC_FAMILY
#define PX_UNUSED_ATTRIBUTE __attribute__((unused))
#else
#define PX_UNUSED_ATTRIBUTE
#endif
/**
Alignment macros
PX_ALIGN_PREFIX and PX_ALIGN_SUFFIX can be used for type alignment instead of aligning individual variables as follows:
PX_ALIGN_PREFIX(16)
struct A {
...
} PX_ALIGN_SUFFIX(16);
This declaration style is parsed correctly by Visual Assist.
*/
#ifndef PX_ALIGN
#if PX_WINDOWS_FAMILY
#define PX_ALIGN(alignment, decl) __declspec(align(alignment)) decl
#define PX_ALIGN_PREFIX(alignment) __declspec(align(alignment))
#define PX_ALIGN_SUFFIX(alignment)
#elif PX_GCC_FAMILY
#define PX_ALIGN(alignment, decl) decl __attribute__((aligned(alignment)))
#define PX_ALIGN_PREFIX(alignment)
#define PX_ALIGN_SUFFIX(alignment) __attribute__((aligned(alignment)))
#elif PX_CUDA_COMPILER
#define PX_ALIGN(alignment, decl) __align__(alignment) decl
#define PX_ALIGN_PREFIX(alignment)
#define PX_ALIGN_SUFFIX(alignment) __align__(alignment))
#else
#define PX_ALIGN(alignment, decl)
#define PX_ALIGN_PREFIX(alignment)
#define PX_ALIGN_SUFFIX(alignment)
#endif
#endif
/**
Deprecated macro
- To deprecate a function: Place PX_DEPRECATED at the start of the function header (leftmost word).
- To deprecate a 'typedef', a 'struct' or a 'class': Place PX_DEPRECATED directly after the keywords ('typedef',
'struct', 'class').
Use these macro definitions to create warnings for deprecated functions
\#define PX_DEPRECATED __declspec(deprecated) // Microsoft
\#define PX_DEPRECATED __attribute__((deprecated())) // GCC
*/
#define PX_DEPRECATED
/**
General defines
*/
#if PX_LINUX && PX_CLANG && !PX_CUDA_COMPILER
#define PX_COMPILE_TIME_ASSERT(exp) \
_Pragma(" clang diagnostic push") \
_Pragma(" clang diagnostic ignored \"-Wc++98-compat\"") \
static_assert(exp, "") \
_Pragma(" clang diagnostic pop")
#else
#define PX_COMPILE_TIME_ASSERT(exp) static_assert(exp, "")
#endif
#if PX_GCC_FAMILY
#define PX_OFFSET_OF(X, Y) __builtin_offsetof(X, Y)
#else
#define PX_OFFSET_OF(X, Y) offsetof(X, Y)
#endif
#define PX_OFFSETOF_BASE 0x100 // casting the null ptr takes a special-case code path, which we don't want
#define PX_OFFSET_OF_RT(Class, Member) (reinterpret_cast<size_t>(&reinterpret_cast<Class*>(PX_OFFSETOF_BASE)->Member) - size_t(PX_OFFSETOF_BASE))
#if PX_WINDOWS_FAMILY
// check that exactly one of NDEBUG and _DEBUG is defined
#if !defined(NDEBUG) ^ defined(_DEBUG)
#error Exactly one of NDEBUG and _DEBUG needs to be defined!
#endif
#endif
// make sure PX_CHECKED is defined in all _DEBUG configurations as well
#if !PX_CHECKED && PX_DEBUG
#error PX_CHECKED must be defined when PX_DEBUG is defined
#endif
#if PX_CUDA_COMPILER
#define PX_CUDA_CALLABLE __host__ __device__
#else
#define PX_CUDA_CALLABLE
#endif
// avoid unreferenced parameter warning
// preferred solution: omit the parameter's name from the declaration
template <class T>
PX_CUDA_CALLABLE PX_INLINE void PX_UNUSED(T const&)
{
}
// Ensure that the application hasn't tweaked the pack value to less than 8, which would break
// matching between the API headers and the binaries
// This assert works on win32/win64, but may need further specialization on other platforms.
// Some GCC compilers need the compiler flag -malign-double to be set.
// Apparently the apple-clang-llvm compiler doesn't support malign-double.
#if PX_APPLE_FAMILY || (PX_CLANG && !PX_ARM)
struct PxPackValidation
{
char _;
long a;
};
#elif PX_CLANG && PX_ARM
struct PxPackValidation
{
char _;
double a;
};
#else
struct PxPackValidation
{
char _;
long long a;
};
#endif
// clang (as of version 3.9) cannot align doubles on 8 byte boundary when compiling for Intel 32 bit target
#if !PX_APPLE_FAMILY && !PX_EMSCRIPTEN && !(PX_CLANG && PX_X86)
PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(PxPackValidation, a) == 8);
#endif
// use in a cpp file to suppress LNK4221
#if PX_VC
#define PX_DUMMY_SYMBOL \
namespace \
{ \
char PxDummySymbol; \
}
#else
#define PX_DUMMY_SYMBOL
#endif
#if PX_GCC_FAMILY
#define PX_WEAK_SYMBOL __attribute__((weak)) // this is to support SIMD constant merging in template specialization
#else
#define PX_WEAK_SYMBOL
#endif
// Macro for avoiding default assignment and copy, because doing this by inheritance can increase class size on some
// platforms.
#define PX_NOCOPY(Class) \
protected: \
Class(const Class&); \
Class& operator=(const Class&);
//#define DISABLE_CUDA_PHYSX
#ifndef DISABLE_CUDA_PHYSX
//CUDA is currently supported on x86_64 windows and linux, and ARM_64 linux
#define PX_SUPPORT_GPU_PHYSX ((PX_X64 && (PX_WINDOWS_FAMILY || PX_LINUX)) || (PX_A64 && PX_LINUX))
#else
#define PX_SUPPORT_GPU_PHYSX 0
#endif
#ifndef PX_SUPPORT_EXTERN_TEMPLATE
#define PX_SUPPORT_EXTERN_TEMPLATE (PX_VC != 11)
#else
#define PX_SUPPORT_EXTERN_TEMPLATE 0
#endif
#define PX_FL __FILE__, __LINE__
#endif

View File

@@ -0,0 +1,143 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef PX_PROFILER_H
#define PX_PROFILER_H
#include "foundation/PxSimpleTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as
well as applications
*/
class PxProfilerCallback
{
protected:
virtual ~PxProfilerCallback() {}
public:
/**************************************************************************************************************************
Instrumented profiling events
***************************************************************************************************************************/
/**
\brief Mark the beginning of a nested profile block
\param[in] eventName Event name. Must be a persistent const char* that is the same pointer passed to zoneEnd such that the pointer can be used to pair the calls.
\param[in] detached True for cross thread events
\param[in] contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group.
\return Returns implementation-specific profiler data for this event
*/
virtual void* zoneStart(const char* eventName, bool detached, uint64_t contextId) = 0;
/**
\brief Mark the end of a nested profile block
\param[in] profilerData The data returned by the corresponding zoneStart call (or NULL if not available)
\param[in] eventName Event name. Must be a persistent const char* that is the same pointer passed to zoneStart such that the pointer can be used to pair the calls.
\param[in] detached True for cross thread events. Should match the value passed to zoneStart.
\param[in] contextId The context of this zone. Should match the value passed to zoneStart.
\note eventName plus contextId can be used to uniquely match up start and end of a zone.
*/
virtual void zoneEnd(void* profilerData, const char* eventName, bool detached, uint64_t contextId) = 0;
/**
\brief Record integer data to be displayed in the profiler.
\param[in] value The integer data point to be recorded.
\param[in] valueName The name of the data being recorded. Must be a persistent const char *
\param[in] contextId The context of this data.
*/
virtual void recordData(int32_t value, const char* valueName, uint64_t contextId)
{
PX_UNUSED(value);
PX_UNUSED(valueName);
PX_UNUSED(contextId);
}
/**
\brief Record float data to be displayed in the profiler.
\param[in] value The floating point data to be recorded.
\param[in] valueName The name of the data being recorded. Must be a persistent const char *
\param[in] contextId The context of this data.
*/
virtual void recordData(float value, const char* valueName, uint64_t contextId)
{
PX_UNUSED(value);
PX_UNUSED(valueName);
PX_UNUSED(contextId);
}
/**
\brief Record a frame marker to be displayed in the profiler.
Markers that have identical names will be displayed in the profiler
along with the time between each of the markers. A frame counter will display the frame marker count.
\param[in] name The name of the frame. Must be a persistent const char *
\param[in] contextId The context of the frame.
*/
virtual void recordFrame(const char* name, uint64_t contextId)
{
PX_UNUSED(name);
PX_UNUSED(contextId);
}
};
class PxProfileScoped
{
public:
PX_FORCE_INLINE PxProfileScoped(PxProfilerCallback* callback, const char* eventName, bool detached, uint64_t contextId) : mCallback(callback), mProfilerData(NULL)
{
if(mCallback)
{
mEventName = eventName;
mContextId = contextId;
mDetached = detached;
mProfilerData = mCallback->zoneStart(eventName, detached, contextId);
}
}
PX_FORCE_INLINE ~PxProfileScoped()
{
if(mCallback)
mCallback->zoneEnd(mProfilerData, mEventName, mDetached, mContextId);
}
PxProfilerCallback* mCallback;
const char* mEventName;
void* mProfilerData;
uint64_t mContextId;
bool mDetached;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,428 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_QUAT_H
#define PX_QUAT_H
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
template<class Type> class PxMat33T;
/**
\brief This is a quaternion class. For more information on quaternion mathematics
consult a mathematics source on complex numbers.
*/
template<class Type>
class PxQuatT
{
public:
/**
\brief Default constructor, does not do any initialization.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT()
{
}
//! identity constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(PxIDENTITY) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(Type(1.0))
{
}
/**
\brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero
*/
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(Type r) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(r)
{
}
/**
\brief Constructor. Take note of the order of the elements!
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(Type nx, Type ny, Type nz, Type nw) : x(nx), y(ny), z(nz), w(nw)
{
}
/**
\brief Creates from angle-axis representation.
Axis must be normalized!
Angle is in radians!
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_INLINE PxQuatT(Type angleRadians, const PxVec3T<Type>& unitAxis)
{
PX_ASSERT(PxAbs(Type(1.0) - unitAxis.magnitude()) < Type(1e-3));
const Type a = angleRadians * Type(0.5);
Type s;
PxSinCos(a, s, w);
x = unitAxis.x * s;
y = unitAxis.y * s;
z = unitAxis.z * s;
}
/**
\brief Copy ctor.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT(const PxQuatT& v) : x(v.x), y(v.y), z(v.z), w(v.w)
{
}
/**
\brief Creates from orientation matrix.
\param[in] m Rotation matrix to extract quaternion from.
*/
PX_CUDA_CALLABLE PX_INLINE explicit PxQuatT(const PxMat33T<Type>& m); /* defined in PxMat33.h */
/**
\brief returns true if quat is identity
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isIdentity() const
{
return x==Type(0.0) && y==Type(0.0) && z==Type(0.0) && w==Type(1.0);
}
/**
\brief returns true if all elements are finite (not NAN or INF, etc.)
*/
PX_CUDA_CALLABLE bool isFinite() const
{
return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w);
}
/**
\brief returns true if finite and magnitude is close to unit
*/
PX_CUDA_CALLABLE bool isUnit() const
{
const Type unitTolerance = Type(1e-3);
return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance;
}
/**
\brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs
isValid
*/
PX_CUDA_CALLABLE bool isSane() const
{
const Type unitTolerance = Type(1e-2);
return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance;
}
/**
\brief returns true if the two quaternions are exactly equal
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxQuatT& q) const
{
return x == q.x && y == q.y && z == q.z && w == q.w;
}
/**
\brief converts this quaternion to angle-axis representation
*/
PX_CUDA_CALLABLE PX_INLINE void toRadiansAndUnitAxis(Type& angle, PxVec3T<Type>& axis) const
{
const Type quatEpsilon = Type(1.0e-8);
const Type s2 = x * x + y * y + z * z;
if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis
{
angle = Type(0.0);
axis = PxVec3T<Type>(Type(1.0), Type(0.0), Type(0.0));
}
else
{
const Type s = PxRecipSqrt(s2);
axis = PxVec3T<Type>(x, y, z) * s;
angle = PxAbs(w) < quatEpsilon ? Type(PxPi) : PxAtan2(s2 * s, w) * Type(2.0);
}
}
/**
\brief Gets the angle between this quat and the identity quaternion.
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type getAngle() const
{
return PxAcos(w) * Type(2.0);
}
/**
\brief Gets the angle between this quat and the argument
<b>Unit:</b> Radians
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type getAngle(const PxQuatT& q) const
{
return PxAcos(dot(q)) * Type(2.0);
}
/**
\brief This is the squared 4D vector length, should be 1 for unit quaternions.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const
{
return x * x + y * y + z * z + w * w;
}
/**
\brief returns the scalar product of this and other.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxQuatT& v) const
{
return x * v.x + y * v.y + z * v.z + w * v.w;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT getNormalized() const
{
const Type s = Type(1.0) / magnitude();
return PxQuatT(x * s, y * s, z * s, w * s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const
{
return PxSqrt(magnitudeSquared());
}
// modifiers:
/**
\brief maps to the closest unit quaternion.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize() // convert this PxQuatT to a unit quaternion
{
const Type mag = magnitude();
if(mag != Type(0.0))
{
const Type imag = Type(1.0) / mag;
x *= imag;
y *= imag;
z *= imag;
w *= imag;
}
return mag;
}
/*
\brief returns the conjugate.
\note for unit quaternions, this is the inverse.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT getConjugate() const
{
return PxQuatT(-x, -y, -z, w);
}
/*
\brief returns imaginary part.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getImaginaryPart() const
{
return PxVec3T<Type>(x, y, z);
}
/** brief computes rotation of x-axis */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector0() const
{
const Type x2 = x * Type(2.0);
const Type w2 = w * Type(2.0);
return PxVec3T<Type>((w * w2) - Type(1.0) + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2);
}
/** brief computes rotation of y-axis */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector1() const
{
const Type y2 = y * Type(2.0);
const Type w2 = w * Type(2.0);
return PxVec3T<Type>((-z * w2) + x * y2, (w * w2) - Type(1.0) + y * y2, (x * w2) + z * y2);
}
/** brief computes rotation of z-axis */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getBasisVector2() const
{
const Type z2 = z * Type(2.0);
const Type w2 = w * Type(2.0);
return PxVec3T<Type>((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - Type(1.0) + z * z2);
}
/**
rotates passed vec by this (assumed unitary)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> rotate(const PxVec3T<Type>& v) const
{
const Type vx = Type(2.0) * v.x;
const Type vy = Type(2.0) * v.y;
const Type vz = Type(2.0) * v.z;
const Type w2 = w * w - Type(0.5);
const Type dot2 = (x * vx + y * vy + z * vz);
return PxVec3T<Type>((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2),
(vz * w2 + (x * vy - y * vx) * w + z * dot2));
}
/** \brief computes inverse rotation of x-axis */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getInvBasisVector0() const
{
const Type x2 = x * Type(2.0);
const Type w2 = w * Type(2.0);
return PxVec3T<Type>((w * w2) - Type(1.0) + x * x2, (-z * w2) + y * x2, (y * w2) + z * x2);
}
/** \brief computes the inverse rotation of the y-axis */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getInvBasisVector1() const
{
const Type y2 = y * Type(2.0);
const Type w2 = w * Type(2.0);
return PxVec3T<Type>((z * w2) + x * y2, (w * w2) - Type(1.0) + y * y2, (-x * w2) + z * y2);
}
/** \brief computes the inverse rotation of the z-axis */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> getInvBasisVector2() const
{
const Type z2 = z * Type(2.0);
const Type w2 = w * Type(2.0);
return PxVec3T<Type>((-y * w2) + x * z2, (x * w2) + y * z2, (w * w2) - Type(1.0) + z * z2);
}
/**
inverse rotates passed vec by this (assumed unitary)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3T<Type> rotateInv(const PxVec3T<Type>& v) const
{
const Type vx = Type(2.0) * v.x;
const Type vy = Type(2.0) * v.y;
const Type vz = Type(2.0) * v.z;
const Type w2 = w * w - Type(0.5);
const Type dot2 = (x * vx + y * vy + z * vz);
return PxVec3T<Type>((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2),
(vz * w2 - (x * vy - y * vx) * w + z * dot2));
}
/**
\brief Assignment operator
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator=(const PxQuatT& p)
{
x = p.x;
y = p.y;
z = p.z;
w = p.w;
return *this;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator*=(const PxQuatT& q)
{
const Type tx = w * q.x + q.w * x + y * q.z - q.y * z;
const Type ty = w * q.y + q.w * y + z * q.x - q.z * x;
const Type tz = w * q.z + q.w * z + x * q.y - q.x * y;
w = w * q.w - q.x * x - y * q.y - q.z * z;
x = tx;
y = ty;
z = tz;
return *this;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator+=(const PxQuatT& q)
{
x += q.x;
y += q.y;
z += q.z;
w += q.w;
return *this;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator-=(const PxQuatT& q)
{
x -= q.x;
y -= q.y;
z -= q.z;
w -= q.w;
return *this;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT& operator*=(const Type s)
{
x *= s;
y *= s;
z *= s;
w *= s;
return *this;
}
/** quaternion multiplication */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator*(const PxQuatT& q) const
{
return PxQuatT(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x,
w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z);
}
/** quaternion addition */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator+(const PxQuatT& q) const
{
return PxQuatT(x + q.x, y + q.y, z + q.z, w + q.w);
}
/** quaternion subtraction */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator-() const
{
return PxQuatT(-x, -y, -z, -w);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator-(const PxQuatT& q) const
{
return PxQuatT(x - q.x, y - q.y, z - q.z, w - q.w);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxQuatT operator*(Type r) const
{
return PxQuatT(x * r, y * r, z * r, w * r);
}
/** the quaternion elements */
Type x, y, z, w;
};
typedef PxQuatT<float> PxQuat;
typedef PxQuatT<double> PxQuatd;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,134 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SIMD_HELPERS_H
#define PX_SIMD_HELPERS_H
#include "foundation/PxMat33.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxTransform.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
//! A padded version of PxMat33, to safely load its data using SIMD
class PxMat33Padded : public PxMat33
{
public:
explicit PX_FORCE_INLINE PxMat33Padded(const PxQuat& q)
{
using namespace aos;
const QuatV qV = V4LoadU(&q.x);
Vec3V column0V, column1V, column2V;
QuatGetMat33V(qV, column0V, column1V, column2V);
#if defined(PX_SIMD_DISABLED) || (PX_LINUX && (PX_ARM || PX_A64))
V3StoreU(column0V, column0);
V3StoreU(column1V, column1);
V3StoreU(column2V, column2);
#else
V4StoreU(column0V, &column0.x);
V4StoreU(column1V, &column1.x);
V4StoreU(column2V, &column2.x);
#endif
}
PX_FORCE_INLINE ~PxMat33Padded() {}
PX_FORCE_INLINE void operator=(const PxMat33& other)
{
column0 = other.column0;
column1 = other.column1;
column2 = other.column2;
}
PxU32 padding;
};
#if !PX_DOXYGEN
namespace aos
{
#endif
PX_FORCE_INLINE void transformKernelVec4( const FloatVArg wa, const Vec4VArg va, const Vec4VArg pa,
const FloatVArg wb, const Vec4VArg vb, const Vec4VArg pb,
FloatV& wo, Vec4V& vo, Vec4V& po)
{
wo = FSub(FMul(wa, wb), V4Dot3(va, vb));
vo = V4ScaleAdd(va, wb, V4ScaleAdd(vb, wa, V4Cross(va, vb)));
const Vec4V t1 = V4Scale(pb, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec4V t2 = V4ScaleAdd(V4Cross(va, pb), wa, t1);
const Vec4V t3 = V4ScaleAdd(va, V4Dot3(va, pb), t2);
po = V4ScaleAdd(t3, FLoad(2.0f), pa);
}
// PT: out = a * b
template<const bool alignedInput, const bool alignedOutput>
PX_FORCE_INLINE void transformMultiply(PxTransform& out, const PxTransform& a, const PxTransform& b)
{
PX_ASSERT(!alignedInput || (size_t(&a)&15) == 0);
PX_ASSERT(!alignedInput || (size_t(&b)&15) == 0);
const Vec4V aPos = alignedInput ? V4LoadA(&a.p.x) : V4LoadU(&a.p.x);
const Vec4V aRot = alignedInput ? V4LoadA(&a.q.x) : V4LoadU(&a.q.x);
const Vec4V bPos = alignedInput ? V4LoadA(&b.p.x) : V4LoadU(&b.p.x);
const Vec4V bRot = alignedInput ? V4LoadA(&b.q.x) : V4LoadU(&b.q.x);
Vec4V v, p;
FloatV w;
transformKernelVec4(V4GetW(aRot), aRot, aPos, V4GetW(bRot), bRot, bPos, w, v, p);
if(alignedOutput)
{
PX_ASSERT((size_t(&out)&15) == 0);
V4StoreA(p, &out.p.x);
V4StoreA(V4SetW(v,w), &out.q.x);
}
else
{
V4StoreU(p, &out.p.x);
V4StoreU(V4SetW(v,w), &out.q.x);
}
}
// PT: out = a * b
PX_FORCE_INLINE void transformMultiply(PxTransform32& out, const PxTransform32& a, const PxTransform32& b)
{
transformMultiply<true, true>(out, a, b);
}
#if !PX_DOXYGEN
} // namespace aos
#endif
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,132 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SLIST_H
#define PX_SLIST_H
#include "foundation/PxAssert.h"
#include "foundation/PxAlignedMalloc.h"
#if PX_P64_FAMILY
#define PX_SLIST_ALIGNMENT 16
#else
#define PX_SLIST_ALIGNMENT 8
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4324) // Padding was added at the end of a structure because of a __declspec(align) value.
#endif
PX_ALIGN_PREFIX(PX_SLIST_ALIGNMENT)
class PxSListEntry
{
friend struct PxSListImpl;
public:
PxSListEntry() : mNext(NULL)
{
PX_ASSERT((size_t(this) & (PX_SLIST_ALIGNMENT - 1)) == 0);
}
// Only use on elements returned by SList::flush()
// because the operation is not atomic.
PxSListEntry* next()
{
return mNext;
}
private:
PxSListEntry* mNext;
}PX_ALIGN_SUFFIX(PX_SLIST_ALIGNMENT);
#if PX_VC
#pragma warning(pop)
#endif
// template-less implementation
struct PX_FOUNDATION_API PxSListImpl
{
PxSListImpl();
~PxSListImpl();
void push(PxSListEntry* entry);
PxSListEntry* pop();
PxSListEntry* flush();
static uint32_t getSize();
};
template <typename Alloc = PxReflectionAllocator<PxSListImpl> >
class PxSListT : protected Alloc
{
public:
PxSListT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<PxSListImpl*>(Alloc::allocate(PxSListImpl::getSize(), PX_FL));
PX_ASSERT((size_t(mImpl) & (PX_SLIST_ALIGNMENT - 1)) == 0);
PX_PLACEMENT_NEW(mImpl, PxSListImpl)();
}
~PxSListT()
{
mImpl->~PxSListImpl();
Alloc::deallocate(mImpl);
}
// pushes a new element to the list
void push(PxSListEntry& entry)
{
mImpl->push(&entry);
}
// pops an element from the list
PxSListEntry* pop()
{
return mImpl->pop();
}
// removes all items from list, returns pointer to first element
PxSListEntry* flush()
{
return mImpl->flush();
}
private:
PxSListImpl* mImpl;
};
typedef PxSListT<> PxSList;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,120 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SIMPLE_TYPES_H
#define PX_SIMPLE_TYPES_H
// Platform specific types:
// Design note: Its OK to use int for general loop variables and temps.
#include "foundation/PxPreprocessor.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard
// header
#endif
#include <stdint.h>
#if PX_VC
#pragma warning(pop)
#endif
#if PX_VC // we could use inttypes.h starting with VC12
#define PX_PRIu64 "I64u"
#else
#include <inttypes.h>
#define PX_PRIu64 PRIu64
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
typedef int64_t PxI64;
typedef uint64_t PxU64;
typedef int32_t PxI32;
typedef uint32_t PxU32;
typedef int16_t PxI16;
typedef uint16_t PxU16;
typedef int8_t PxI8;
typedef uint8_t PxU8;
typedef float PxF32;
typedef double PxF64;
typedef float PxReal;
// Int-as-bool type - has some uses for efficiency and with SIMD
typedef PxI32 PxIntBool;
static const PxIntBool PxIntFalse = 0;
static const PxIntBool PxIntTrue = 1;
// types for direct-GPU API
typedef PxU32 PxArticulationGPUIndex;
typedef PxU32 PxRigidDynamicGPUIndex;
typedef PxU32 PxShapeGPUIndex;
typedef PxU32 PxConstraintGPUIndex;
#define PX_INVALID_CONSTRAINT_GPU_INDEX 0xffffFFFF
typedef PxConstraintGPUIndex PxD6JointGPUIndex;
#define PX_INVALID_D6_JOINT_GPU_INDEX PX_INVALID_CONSTRAINT_GPU_INDEX
#if !PX_DOXYGEN
} // namespace physx
#endif
#define PX_SIGN_BITMASK 0x80000000
// Type ranges
#define PX_MAX_F32 3.4028234663852885981170418348452e+38F
// maximum possible float value
#define PX_MAX_F64 DBL_MAX // maximum possible double value
#define PX_EPS_F32 FLT_EPSILON // maximum relative error of float rounding
#define PX_EPS_F64 DBL_EPSILON // maximum relative error of double rounding
#define PX_MAX_REAL PX_MAX_F32
#define PX_EPS_REAL PX_EPS_F32
#define PX_NORMALIZATION_EPSILON float(1e-20f)
// Legacy type ranges used by PhysX
#define PX_MAX_I8 INT8_MAX
#define PX_MIN_I8 INT8_MIN
#define PX_MAX_U8 UINT8_MAX
#define PX_MIN_U8 UINT8_MIN
#define PX_MAX_I16 INT16_MAX
#define PX_MIN_I16 INT16_MIN
#define PX_MAX_U16 UINT16_MAX
#define PX_MIN_U16 UINT16_MIN
#define PX_MAX_I32 INT32_MAX
#define PX_MIN_I32 INT32_MIN
#define PX_MAX_U32 UINT32_MAX
#define PX_MIN_U32 UINT32_MIN
#endif

View File

@@ -0,0 +1,187 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SOCKET_H
#define PX_SOCKET_H
#include "foundation/PxUserAllocated.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
Socket abstraction API
*/
class PX_FOUNDATION_API PxSocket : public PxUserAllocated
{
public:
static const uint32_t DEFAULT_BUFFER_SIZE;
PxSocket(bool inEnableBuffering = true, bool blocking = true);
virtual ~PxSocket();
/*!
Opens a network socket for input and/or output
\param host
Name of the host to connect to. This can be an IP, URL, etc
\param port
The port to connect to on the remote host
\param timeout
Timeout in ms until the connection must be established.
\return
True if the connection was successful, false otherwise
*/
bool connect(const char* host, uint16_t port, uint32_t timeout = 1000);
/*!
Opens a network socket for input and/or output as a server. Put the connection in listening mode
\param port
The port on which the socket listens
*/
bool listen(uint16_t port);
/*!
Accept a connection on a socket that is in listening mode
\note
This method only supports a single connection client. Additional clients
that connect to the listening port will overwrite the existing socket handle.
\param block
whether or not the call should block
\return whether a connection was established
*/
bool accept(bool block);
/*!
Disconnects an open socket
*/
void disconnect();
/*!
Returns whether the socket is currently open (connected) or not.
\return
True if the socket is connected, false otherwise
*/
bool isConnected() const;
/*!
Returns the name of the connected host. This is the same as the string
that was supplied to the connect call.
\return
The name of the connected host
*/
const char* getHost() const;
/*!
Returns the port of the connected host. This is the same as the port
that was supplied to the connect call.
\return
The port of the connected host
*/
uint16_t getPort() const;
/*!
Flushes the output stream. Until the stream is flushed, there is no
guarantee that the written data has actually reached the destination
storage. Flush forces all buffered data to be sent to the output.
\note flush always blocks. If the socket is in non-blocking mode, this will result
the thread spinning.
\return
True if the flush was successful, false otherwise
*/
bool flush();
/*!
Writes data to the output stream.
\param data
Pointer to a block of data to write to the stream
\param length
Amount of data to write, in bytes
\return
Number of bytes actually written. This could be lower than length if the socket is non-blocking.
*/
uint32_t write(const uint8_t* data, uint32_t length);
/*!
Reads data from the output stream.
\param data
Pointer to a buffer where the read data will be stored.
\param length
Amount of data to read, in bytes.
\return
Number of bytes actually read. This could be lower than length if the stream end is
encountered or the socket is non-blocking.
*/
uint32_t read(uint8_t* data, uint32_t length);
/*!
Sets blocking mode of the socket.
Socket must be connected, otherwise calling this method won't take any effect.
*/
void setBlocking(bool blocking);
/*!
Returns whether read/write/flush calls to the socket are blocking.
\return
True if the socket is blocking.
*/
bool isBlocking() const;
private:
class SocketImpl* mImpl;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,127 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SORT_H
#define PX_SORT_H
#include "foundation/PxSortInternals.h"
#include "foundation/PxAlloca.h"
#define PX_SORT_PARANOIA PX_DEBUG
/**
\brief Sorts an array of objects in ascending order, assuming
that the predicate implements the < operator:
\see PxLess, PxGreater
*/
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4706) // disable the warning that we did an assignment within a conditional expression, as
// this was intentional.
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
template <class T, class Predicate, class PxAllocator>
void PxSort(T* elements, uint32_t count, const Predicate& compare, const PxAllocator& inAllocator,
const uint32_t initialStackSize = 32)
{
static const uint32_t SMALL_SORT_CUTOFF = 5; // must be >= 3 since we need 3 for median
PX_ALLOCA(stackMem, int32_t, initialStackSize);
PxStack<PxAllocator> stack(stackMem, initialStackSize, inAllocator);
int32_t first = 0, last = int32_t(count - 1);
if(last > first)
{
for(;;)
{
while(last > first)
{
PX_ASSERT(first >= 0 && last < int32_t(count));
if(uint32_t(last - first) < SMALL_SORT_CUTOFF)
{
PxSmallSort(elements, first, last, compare);
break;
}
else
{
const int32_t partIndex = PxPartition(elements, first, last, compare);
// push smaller sublist to minimize stack usage
if((partIndex - first) < (last - partIndex))
{
stack.push(first, partIndex - 1);
first = partIndex + 1;
}
else
{
stack.push(partIndex + 1, last);
last = partIndex - 1;
}
}
}
if(stack.empty())
break;
stack.pop(first, last);
}
}
#if PX_SORT_PARANOIA
for(uint32_t i = 1; i < count; i++)
PX_ASSERT(!compare(elements[i], elements[i - 1]));
#endif
}
template <class T, class Predicate>
void PxSort(T* elements, uint32_t count, const Predicate& compare)
{
PxSort(elements, count, compare, typename PxAllocatorTraits<T>::Type());
}
template <class T>
void PxSort(T* elements, uint32_t count)
{
PxSort(elements, count, PxLess<T>(), typename PxAllocatorTraits<T>::Type());
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#if PX_VC
#pragma warning(pop)
#endif
#endif

View File

@@ -0,0 +1,182 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SORT_INTERNALS_H
#define PX_SORT_INTERNALS_H
#include "foundation/PxAssert.h"
#include "foundation/PxMathIntrinsics.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxUserAllocated.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
template <class T, class Predicate>
PX_INLINE void PxMedian3(T* elements, int32_t first, int32_t last, Predicate& compare)
{
/*
This creates sentinels because we know there is an element at the start minimum(or equal)
than the pivot and an element at the end greater(or equal) than the pivot. Plus the
median of 3 reduces the chance of degenerate behavour.
*/
int32_t mid = (first + last) / 2;
if(compare(elements[mid], elements[first]))
PxSwap(elements[first], elements[mid]);
if(compare(elements[last], elements[first]))
PxSwap(elements[first], elements[last]);
if(compare(elements[last], elements[mid]))
PxSwap(elements[mid], elements[last]);
// keep the pivot at last-1
PxSwap(elements[mid], elements[last - 1]);
}
template <class T, class Predicate>
PX_INLINE int32_t PxPartition(T* elements, int32_t first, int32_t last, Predicate& compare)
{
PxMedian3(elements, first, last, compare);
/*
WARNING: using the line:
T partValue = elements[last-1];
and changing the scan loops to:
while(comparator.greater(partValue, elements[++i]));
while(comparator.greater(elements[--j], partValue);
triggers a compiler optimizer bug on xenon where it stores a double to the stack for partValue
then loads it as a single...:-(
*/
int32_t i = first; // we know first is less than pivot(but i gets pre incremented)
int32_t j = last - 1; // pivot is in last-1 (but j gets pre decremented)
for(;;)
{
while(compare(elements[++i], elements[last - 1]))
;
while(compare(elements[last - 1], elements[--j]))
;
if(i >= j)
break;
PX_ASSERT(i <= last && j >= first);
PxSwap(elements[i], elements[j]);
}
// put the pivot in place
PX_ASSERT(i <= last && first <= (last - 1));
PxSwap(elements[i], elements[last - 1]);
return i;
}
template <class T, class Predicate>
PX_INLINE void PxSmallSort(T* elements, int32_t first, int32_t last, Predicate& compare)
{
// selection sort - could reduce to fsel on 360 with floats.
for(int32_t i = first; i < last; i++)
{
int32_t m = i;
for(int32_t j = i + 1; j <= last; j++)
if(compare(elements[j], elements[m]))
m = j;
if(m != i)
PxSwap(elements[m], elements[i]);
}
}
template <class PxAllocator>
class PxStack
{
PxAllocator mAllocator;
uint32_t mSize, mCapacity;
int32_t* mMemory;
bool mRealloc;
public:
PxStack(int32_t* memory, uint32_t capacity, const PxAllocator& inAllocator)
: mAllocator(inAllocator), mSize(0), mCapacity(capacity), mMemory(memory), mRealloc(false)
{
}
~PxStack()
{
if(mRealloc)
mAllocator.deallocate(mMemory);
}
void grow()
{
mCapacity *= 2;
int32_t* newMem =
reinterpret_cast<int32_t*>(mAllocator.allocate(sizeof(int32_t) * mCapacity, PX_FL));
intrinsics::memCopy(newMem, mMemory, mSize * sizeof(int32_t));
if(mRealloc)
mAllocator.deallocate(mMemory);
mRealloc = true;
mMemory = newMem;
}
PX_INLINE void push(int32_t start, int32_t end)
{
if(mSize >= mCapacity - 1)
grow();
mMemory[mSize++] = start;
mMemory[mSize++] = end;
}
PX_INLINE void pop(int32_t& start, int32_t& end)
{
PX_ASSERT(!empty());
end = mMemory[--mSize];
start = mMemory[--mSize];
}
PX_INLINE bool empty()
{
return mSize == 0;
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,348 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_STRIDE_ITERATOR_H
#define PX_STRIDE_ITERATOR_H
#include "foundation/PxAssert.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Iterator class for iterating over arrays of data that may be interleaved with other data.
This class is used for iterating over arrays of elements that may have a larger element to element
offset, called the stride, than the size of the element itself (non-contiguous).
The template parameter T denotes the type of the element accessed. The stride itself
is stored as a member field so multiple instances of a PxStrideIterator class can have
different strides. This is useful for cases were the stride depends on runtime configuration.
The stride iterator can be used for index based access, e.g.:
\code
PxStrideIterator<PxVec3> strideArray(...);
for (unsigned i = 0; i < 10; ++i)
{
PxVec3& vec = strideArray[i];
...
}
\endcode
or iteration by increment, e.g.:
\code
PxStrideIterator<PxVec3> strideBegin(...);
PxStrideIterator<PxVec3> strideEnd(strideBegin + 10);
for (PxStrideIterator<PxVec3> it = strideBegin; it < strideEnd; ++it)
{
PxVec3& vec = *it;
...
}
\endcode
Two special cases:
- A stride of sizeof(T) represents a regular c array of type T.
- A stride of 0 can be used to describe re-occurrence of the same element multiple times.
*/
template <typename T>
class PxStrideIterator
{
#if !PX_DOXYGEN
template <typename X>
struct StripConst
{
typedef X Type;
};
template <typename X>
struct StripConst<const X>
{
typedef X Type;
};
#endif
public:
/**
\brief Constructor.
Optionally takes a pointer to an element and a stride.
\param[in] ptr pointer to element, defaults to NULL.
\param[in] stride stride for accessing consecutive elements, defaults to the size of one element.
*/
explicit PX_INLINE PxStrideIterator(T* ptr = NULL, PxU32 stride = sizeof(T)) : mPtr(ptr), mStride(stride)
{
PX_ASSERT(mStride == 0 || sizeof(T) <= mStride);
}
/**
\brief Copy constructor.
\param[in] strideIterator PxStrideIterator to be copied.
*/
PX_INLINE PxStrideIterator(const PxStrideIterator<typename StripConst<T>::Type>& strideIterator)
: mPtr(strideIterator.ptr()), mStride(strideIterator.stride())
{
PX_ASSERT(mStride == 0 || sizeof(T) <= mStride);
}
/**
\brief Get pointer to element.
*/
PX_INLINE T* ptr() const
{
return mPtr;
}
/**
\brief Get stride.
*/
PX_INLINE PxU32 stride() const
{
return mStride;
}
/**
\brief Indirection operator.
*/
PX_INLINE T& operator*() const
{
return *mPtr;
}
/**
\brief Dereferencing operator.
*/
PX_INLINE T* operator->() const
{
return mPtr;
}
/**
\brief Indexing operator.
*/
PX_INLINE T& operator[](unsigned int i) const
{
return *byteAdd(mPtr, i * stride());
}
/**
\brief Pre-increment operator.
*/
PX_INLINE PxStrideIterator& operator++()
{
mPtr = byteAdd(mPtr, stride());
return *this;
}
/**
\brief Post-increment operator.
*/
PX_INLINE PxStrideIterator operator++(int)
{
PxStrideIterator tmp = *this;
mPtr = byteAdd(mPtr, stride());
return tmp;
}
/**
\brief Pre-decrement operator.
*/
PX_INLINE PxStrideIterator& operator--()
{
mPtr = byteSub(mPtr, stride());
return *this;
}
/**
\brief Post-decrement operator.
*/
PX_INLINE PxStrideIterator operator--(int)
{
PxStrideIterator tmp = *this;
mPtr = byteSub(mPtr, stride());
return tmp;
}
/**
\brief Addition operator.
*/
PX_INLINE PxStrideIterator operator+(unsigned int i) const
{
return PxStrideIterator(byteAdd(mPtr, i * stride()), stride());
}
/**
\brief Subtraction operator.
*/
PX_INLINE PxStrideIterator operator-(unsigned int i) const
{
return PxStrideIterator(byteSub(mPtr, i * stride()), stride());
}
/**
\brief Addition compound assignment operator.
*/
PX_INLINE PxStrideIterator& operator+=(unsigned int i)
{
mPtr = byteAdd(mPtr, i * stride());
return *this;
}
/**
\brief Subtraction compound assignment operator.
*/
PX_INLINE PxStrideIterator& operator-=(unsigned int i)
{
mPtr = byteSub(mPtr, i * stride());
return *this;
}
/**
\brief Iterator difference.
*/
PX_INLINE int operator-(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr));
return byteDiff / static_cast<int>(stride());
}
/**
\brief Equality operator.
*/
PX_INLINE bool operator==(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
return mPtr == other.mPtr;
}
/**
\brief Inequality operator.
*/
PX_INLINE bool operator!=(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
return mPtr != other.mPtr;
}
/**
\brief Less than operator.
*/
PX_INLINE bool operator<(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
return mPtr < other.mPtr;
}
/**
\brief Greater than operator.
*/
PX_INLINE bool operator>(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
return mPtr > other.mPtr;
}
/**
\brief Less or equal than operator.
*/
PX_INLINE bool operator<=(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
return mPtr <= other.mPtr;
}
/**
\brief Greater or equal than operator.
*/
PX_INLINE bool operator>=(const PxStrideIterator& other) const
{
PX_ASSERT(isCompatible(other));
return mPtr >= other.mPtr;
}
private:
PX_INLINE static T* byteAdd(T* ptr, PxU32 bytes)
{
return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) + bytes));
}
PX_INLINE static T* byteSub(T* ptr, PxU32 bytes)
{
return const_cast<T*>(reinterpret_cast<const T*>(reinterpret_cast<const PxU8*>(ptr) - bytes));
}
PX_INLINE bool isCompatible(const PxStrideIterator& other) const
{
int byteDiff = static_cast<int>(reinterpret_cast<const PxU8*>(mPtr) - reinterpret_cast<const PxU8*>(other.mPtr));
return (stride() == other.stride()) && (abs(byteDiff) % stride() == 0);
}
T* mPtr;
PxU32 mStride;
};
/**
\brief Addition operator.
*/
template <typename T>
PX_INLINE PxStrideIterator<T> operator+(int i, PxStrideIterator<T> it)
{
it += i;
return it;
}
/**
\brief Stride iterator factory function which infers the iterator type.
*/
template <typename T>
PX_INLINE PxStrideIterator<T> PxMakeIterator(T* ptr, PxU32 stride = sizeof(T))
{
return PxStrideIterator<T>(ptr, stride);
}
/**
\brief Stride iterator factory function which infers the iterator type.
*/
template <typename T>
PX_INLINE PxStrideIterator<const T> PxMakeIterator(const T* ptr, PxU32 stride = sizeof(T))
{
return PxStrideIterator<const T>(ptr, stride);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,79 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_STRING_H
#define PX_STRING_H
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxFoundationConfig.h"
#include <stdarg.h>
#if !PX_DOXYGEN
namespace physx
{
#endif
// the following functions have C99 semantics. Note that C99 requires for snprintf and vsnprintf:
// * the resulting string is always NULL-terminated regardless of truncation.
// * in the case of truncation the return value is the number of characters that would have been created.
PX_FOUNDATION_API int32_t Pxsscanf(const char* buffer, const char* format, ...);
PX_FOUNDATION_API int32_t Pxstrcmp(const char* str1, const char* str2);
PX_FOUNDATION_API int32_t Pxstrncmp(const char* str1, const char* str2, size_t count);
PX_FOUNDATION_API int32_t Pxsnprintf(char* dst, size_t dstSize, const char* format, ...);
PX_FOUNDATION_API int32_t Pxvsnprintf(char* dst, size_t dstSize, const char* src, va_list arg);
// strlcat and strlcpy have BSD semantics:
// * dstSize is always the size of the destination buffer
// * the resulting string is always NULL-terminated regardless of truncation
// * in the case of truncation the return value is the length of the string that would have been created
PX_FOUNDATION_API size_t Pxstrlcat(char* dst, size_t dstSize, const char* src);
PX_FOUNDATION_API size_t Pxstrlcpy(char* dst, size_t dstSize, const char* src);
// case-insensitive string comparison
PX_FOUNDATION_API int32_t Pxstricmp(const char* str1, const char* str2);
PX_FOUNDATION_API int32_t Pxstrnicmp(const char* str1, const char* str2, size_t count);
// in-place string case conversion
PX_FOUNDATION_API void Pxstrlwr(char* str);
PX_FOUNDATION_API void Pxstrupr(char* str);
/**
\brief Prints the string literally (does not consume % specifier), trying to make sure it's visible to the app
programmer
*/
PX_FOUNDATION_API void PxPrintString(const char*);
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,139 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SYNC_H
#define PX_SYNC_H
#include "foundation/PxAllocator.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/*!
Implementation notes:
* - Calling set() on an already signaled Sync does not change its state.
* - Calling reset() on an already reset Sync does not change its state.
* - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention).
* - Calling wait() on an already signaled Sync will return true immediately.
* - NOTE: be careful when pulsing an event with set() followed by reset(), because a
* thread that is not waiting on the event will miss the signal.
*/
class PX_FOUNDATION_API PxSyncImpl
{
public:
static const uint32_t waitForever = 0xffffffff;
PxSyncImpl();
~PxSyncImpl();
/** Wait on the object for at most the given number of ms. Returns
* true if the object is signaled. Sync::waitForever will block forever
* or until the object is signaled.
*/
bool wait(uint32_t milliseconds = waitForever);
/** Signal the synchronization object, waking all threads waiting on it */
void set();
/** Reset the synchronization object */
void reset();
/**
Size of this class.
*/
static uint32_t getSize();
};
/*!
Implementation notes:
* - Calling set() on an already signaled Sync does not change its state.
* - Calling reset() on an already reset Sync does not change its state.
* - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention).
* - Calling wait() on an already signaled Sync will return true immediately.
* - NOTE: be careful when pulsing an event with set() followed by reset(), because a
* thread that is not waiting on the event will miss the signal.
*/
template <typename Alloc = PxReflectionAllocator<PxSyncImpl> >
class PxSyncT : protected Alloc
{
public:
static const uint32_t waitForever = PxSyncImpl::waitForever;
PxSyncT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<PxSyncImpl*>(Alloc::allocate(PxSyncImpl::getSize(), PX_FL));
PX_PLACEMENT_NEW(mImpl, PxSyncImpl)();
}
~PxSyncT()
{
mImpl->~PxSyncImpl();
Alloc::deallocate(mImpl);
}
/** Wait on the object for at most the given number of ms. Returns
* true if the object is signaled. Sync::waitForever will block forever
* or until the object is signaled.
*/
bool wait(uint32_t milliseconds = PxSyncImpl::waitForever)
{
return mImpl->wait(milliseconds);
}
/** Signal the synchronization object, waking all threads waiting on it */
void set()
{
mImpl->set();
}
/** Reset the synchronization object */
void reset()
{
mImpl->reset();
}
private:
class PxSyncImpl* mImpl;
};
typedef PxSyncT<> PxSync;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,54 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TEMP_ALLOCATOR_H
#define PX_TEMP_ALLOCATOR_H
#include "foundation/PxAllocator.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTempAllocator
{
public:
PX_FORCE_INLINE PxTempAllocator(const char* = 0)
{
}
PX_FOUNDATION_API void* allocate(size_t size, const char* file, PxI32 line);
PX_FOUNDATION_API void deallocate(void* ptr);
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,369 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_THREAD_H
#define PX_THREAD_H
#include "foundation/PxUserAllocated.h"
// todo: these need to go somewhere else
// PT: looks like this is still used on some platforms
#if PX_WINDOWS_FAMILY
#define PxSpinLockPause() __asm pause
#elif PX_LINUX || PX_APPLE_FAMILY || PX_SWITCH
#define PxSpinLockPause() asm("nop")
#else
#error "Platform not supported!"
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxThreadPriority
{
enum Enum
{
eHIGH = 0, //!< High priority
eABOVE_NORMAL = 1, //!< Above Normal priority
eNORMAL = 2, //!< Normal/default priority
eBELOW_NORMAL = 3, //!< Below Normal priority
eLOW = 4, //!< Low priority.
eFORCE_DWORD = 0xffFFffFF
};
};
class PxRunnable
{
public:
PxRunnable() {}
virtual ~PxRunnable() {}
virtual void execute() {}
};
class PX_FOUNDATION_API PxThreadImpl
{
public:
typedef size_t Id; // space for a pointer or an integer
typedef void* (*ExecuteFn)(void*);
static PxU32 getDefaultStackSize();
static Id getId();
/**
Construct (but do not start) the thread object. The OS thread object will not be created
until start() is called. Executes in the context
of the spawning thread.
*/
PxThreadImpl();
/**
Construct and start the the thread, passing the given arg to the given fn. (pthread style)
*/
PxThreadImpl(ExecuteFn fn, void* arg, const char* name);
/**
Deallocate all resources associated with the thread. Should be called in the
context of the spawning thread.
*/
~PxThreadImpl();
/**
Create the OS thread and start it running. Called in the context of the spawning thread.
If an affinity mask has previously been set then it will be applied after the
thread has been created.
*/
void start(PxU32 stackSize, PxRunnable* r);
/**
Violently kill the current thread. Blunt instrument, not recommended since
it can leave all kinds of things unreleased (stack, memory, mutexes...) Should
be called in the context of the spawning thread.
*/
void kill();
/**
Stop the thread. Signals the spawned thread that it should stop, so the
thread should check regularly
*/
void signalQuit();
/**
Wait for a thread to stop. Should be called in the context of the spawning
thread. Returns false if the thread has not been started.
*/
bool waitForQuit();
/**
check whether the thread is signalled to quit. Called in the context of the
spawned thread.
*/
bool quitIsSignalled();
/**
Cleanly shut down this thread. Called in the context of the spawned thread.
*/
void quit();
/**
Change the affinity mask for this thread. The mask is a platform
specific value.
On Windows, Linux, and Switch platforms, each set mask bit represents
the index of a logical processor that the OS may schedule thread execution on.
Bits outside the range of valid logical processors may be ignored or cause
the function to return an error.
On Apple platforms, this function has no effect.
If the thread has not yet been started then the mask is stored
and applied when the thread is started.
If the thread has already been started then this method returns the
previous affinity mask on success, otherwise it returns zero.
*/
PxU32 setAffinityMask(PxU32 mask);
static PxThreadPriority::Enum getPriority(Id threadId);
/** Set thread priority. */
void setPriority(PxThreadPriority::Enum prio);
/** set the thread's name */
void setName(const char* name);
/** Put the current thread to sleep for the given number of milliseconds */
static void sleep(PxU32 ms);
/** Yield the current thread's slot on the CPU */
static void yield();
/** Inform the processor that we're in a busy wait to give it a chance to do something clever.
yield() yields the thread, while yieldProcessor() aims to yield the processor */
static void yieldProcessor();
/** Return the number of physical cores (does not include hyper-threaded cores), returns 0 on failure */
static PxU32 getNbPhysicalCores();
/**
Size of this class.
*/
static PxU32 getSize();
};
/**
Thread abstraction API
*/
template <typename Alloc = PxReflectionAllocator<PxThreadImpl> >
class PxThreadT : protected Alloc, public PxUserAllocated, public PxRunnable
{
public:
typedef PxThreadImpl::Id Id; // space for a pointer or an integer
/**
Construct (but do not start) the thread object. Executes in the context
of the spawning thread
*/
PxThreadT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<PxThreadImpl*>(Alloc::allocate(PxThreadImpl::getSize(), PX_FL));
PX_PLACEMENT_NEW(mImpl, PxThreadImpl)();
}
/**
Construct and start the the thread, passing the given arg to the given fn. (pthread style)
*/
PxThreadT(PxThreadImpl::ExecuteFn fn, void* arg, const char* name, const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<PxThreadImpl*>(Alloc::allocate(PxThreadImpl::getSize(), PX_FL));
PX_PLACEMENT_NEW(mImpl, PxThreadImpl)(fn, arg, name);
}
/**
Deallocate all resources associated with the thread. Should be called in the
context of the spawning thread.
*/
virtual ~PxThreadT()
{
mImpl->~PxThreadImpl();
Alloc::deallocate(mImpl);
}
/**
start the thread running. Called in the context of the spawning thread.
*/
void start(PxU32 stackSize = PxThreadImpl::getDefaultStackSize())
{
mImpl->start(stackSize, this);
}
/**
Violently kill the current thread. Blunt instrument, not recommended since
it can leave all kinds of things unreleased (stack, memory, mutexes...) Should
be called in the context of the spawning thread.
*/
void kill()
{
mImpl->kill();
}
/**
The virtual execute() method is the user defined function that will
run in the new thread. Called in the context of the spawned thread.
*/
virtual void execute()
{
}
/**
stop the thread. Signals the spawned thread that it should stop, so the
thread should check regularly
*/
void signalQuit()
{
mImpl->signalQuit();
}
/**
Wait for a thread to stop. Should be called in the context of the spawning
thread. Returns false if the thread has not been started.
*/
bool waitForQuit()
{
return mImpl->waitForQuit();
}
/**
check whether the thread is signalled to quit. Called in the context of the
spawned thread.
*/
bool quitIsSignalled()
{
return mImpl->quitIsSignalled();
}
/**
Cleanly shut down this thread. Called in the context of the spawned thread.
*/
void quit()
{
mImpl->quit();
}
PxU32 setAffinityMask(PxU32 mask)
{
return mImpl->setAffinityMask(mask);
}
static PxThreadPriority::Enum getPriority(PxThreadImpl::Id threadId)
{
return PxThreadImpl::getPriority(threadId);
}
/** Set thread priority. */
void setPriority(PxThreadPriority::Enum prio)
{
mImpl->setPriority(prio);
}
/** set the thread's name */
void setName(const char* name)
{
mImpl->setName(name);
}
/** Put the current thread to sleep for the given number of milliseconds */
static void sleep(PxU32 ms)
{
PxThreadImpl::sleep(ms);
}
/** Yield the current thread's slot on the CPU */
static void yield()
{
PxThreadImpl::yield();
}
/** Inform the processor that we're in a busy wait to give it a chance to do something clever
yield() yields the thread, while yieldProcessor() aims to yield the processor */
static void yieldProcesor()
{
PxThreadImpl::yieldProcessor();
}
static PxU32 getDefaultStackSize()
{
return PxThreadImpl::getDefaultStackSize();
}
static PxThreadImpl::Id getId()
{
return PxThreadImpl::getId();
}
static PxU32 getNbPhysicalCores()
{
return PxThreadImpl::getNbPhysicalCores();
}
private:
class PxThreadImpl* mImpl;
};
typedef PxThreadT<> PxThread;
PX_FOUNDATION_API PxU32 PxTlsAlloc();
PX_FOUNDATION_API void PxTlsFree(PxU32 index);
PX_FOUNDATION_API void* PxTlsGet(PxU32 index);
PX_FOUNDATION_API size_t PxTlsGetValue(PxU32 index);
PX_FOUNDATION_API PxU32 PxTlsSet(PxU32 index, void* value);
PX_FOUNDATION_API PxU32 PxTlsSetValue(PxU32 index, size_t value);
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,97 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TIME_H
#define PX_TIME_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxFoundationConfig.h"
#if PX_LINUX
#include <time.h>
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxCounterFrequencyToTensOfNanos
{
PxU64 mNumerator;
PxU64 mDenominator;
PxCounterFrequencyToTensOfNanos(PxU64 inNum, PxU64 inDenom) : mNumerator(inNum), mDenominator(inDenom)
{
}
// quite slow.
PxU64 toTensOfNanos(PxU64 inCounter) const
{
return (inCounter * mNumerator) / mDenominator;
}
};
class PX_FOUNDATION_API PxTime
{
public:
typedef PxF64 Second;
static const PxU64 sNumTensOfNanoSecondsInASecond = 100000000;
// This is supposedly guaranteed to not change after system boot
// regardless of processors, speedstep, etc.
static const PxCounterFrequencyToTensOfNanos& getBootCounterFrequency();
static PxCounterFrequencyToTensOfNanos getCounterFrequency();
static PxU64 getCurrentCounterValue();
// SLOW!!
// Thar be a 64 bit divide in thar!
static PxU64 getCurrentTimeInTensOfNanoSeconds()
{
PxU64 ticks = getCurrentCounterValue();
return getBootCounterFrequency().toTensOfNanos(ticks);
}
PxTime();
Second getElapsedSeconds();
Second peekElapsedSeconds();
Second getLastTime() const;
private:
#if PX_LINUX || PX_APPLE_FAMILY
Second mLastTime;
#else
PxI64 mTickCount;
#endif
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,261 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRANSFORM_H
#define PX_TRANSFORM_H
#include "foundation/PxQuat.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
template<class Type> class PxMat44T;
/*!
\brief class representing a rigid euclidean transform as a quaternion and a vector
*/
template<class Type>
class PxTransformT
{
public:
PxQuatT<Type> q;
PxVec3T<Type> p;
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT()
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(PxIDENTITY) : q(PxIdentity), p(PxZero)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxVec3T<Type>& position) : q(PxIdentity), p(position)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxQuatT<Type>& orientation) : q(orientation), p(Type(0))
{
PX_ASSERT(orientation.isSane());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(Type x, Type y, Type z, PxQuatT<Type> aQ = PxQuatT<Type>(PxIdentity)) : q(aQ), p(x, y, z)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(const PxVec3T<Type>& p0, const PxQuatT<Type>& q0) : q(q0), p(p0)
{
PX_ASSERT(q0.isSane());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE explicit PxTransformT(const PxMat44T<Type>& m); // defined in PxMat44.h
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT(const PxTransformT& other)
{
p = other.p;
q = other.q;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator=(const PxTransformT& other)
{
p = other.p;
q = other.q;
}
/**
\brief returns true if the two transforms are exactly equal
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxTransformT& t) const
{
return p == t.p && q == t.q;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT operator*(const PxTransformT& x) const
{
PX_ASSERT(x.isSane());
return transform(x);
}
//! Equals matrix multiplication
PX_CUDA_CALLABLE PX_INLINE PxTransformT& operator*=(const PxTransformT& other)
{
*this = *this * other;
return *this;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT getInverse() const
{
PX_ASSERT(isFinite());
return PxTransformT(q.rotateInv(-p), q.getConjugate());
}
/**
\brief return a normalized transform (i.e. one in which the quaternion has unit magnitude)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT getNormalized() const
{
return PxTransformT(p, q.getNormalized());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transform(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotate(input) + p;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> transformInv(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotateInv(input - p);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotate(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotate(input);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T<Type> rotateInv(const PxVec3T<Type>& input) const
{
PX_ASSERT(isFinite());
return q.rotateInv(input);
}
//! Transform transform to parent (returns compound transform: first src, then *this)
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT transform(const PxTransformT& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isSane());
// src = [srct, srcr] -> [r*srct + t, r*srcr]
return PxTransformT(q.rotate(src.p) + p, q * src.q);
}
//! Transform transform from parent (returns compound transform: first src, then this->inverse)
PX_CUDA_CALLABLE PX_FORCE_INLINE PxTransformT transformInv(const PxTransformT& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isFinite());
// src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
const PxQuatT<Type> qinv = q.getConjugate();
return PxTransformT(qinv.rotate(src.p - p), qinv * src.q);
}
/**
\brief returns true if finite and q is a unit quaternion
*/
PX_CUDA_CALLABLE bool isValid() const
{
return p.isFinite() && q.isFinite() && q.isUnit();
}
/**
\brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
vs isValid
*/
PX_CUDA_CALLABLE bool isSane() const
{
return isFinite() && q.isSane();
}
/**
\brief returns true if all elems are finite (not NAN or INF, etc.)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return p.isFinite() && q.isFinite();
}
};
typedef PxTransformT<float> PxTransform;
typedef PxTransformT<double> PxTransformd;
/*!
\brief A generic padded & aligned transform class.
This can be used for safe faster loads & stores, and faster address computations
(the default PxTransformT often generating imuls for this otherwise). Padding bytes
can be reused to store useful data if needed.
*/
struct PX_ALIGN_PREFIX(16) PxTransformPadded : PxTransform
{
PX_FORCE_INLINE PxTransformPadded()
{
}
PX_FORCE_INLINE PxTransformPadded(const PxTransformPadded& other) : PxTransform(other)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(const PxTransform& other) : PxTransform(other)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(PxIDENTITY) : PxTransform(PxIdentity)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(const PxVec3& position) : PxTransform(position)
{
}
PX_FORCE_INLINE explicit PxTransformPadded(const PxQuat& orientation) : PxTransform(orientation)
{
}
PX_FORCE_INLINE PxTransformPadded(const PxVec3& p0, const PxQuat& q0) : PxTransform(p0, q0)
{
}
PX_FORCE_INLINE void operator=(const PxTransformPadded& other)
{
p = other.p;
q = other.q;
}
PX_FORCE_INLINE void operator=(const PxTransform& other)
{
p = other.p;
q = other.q;
}
PxU32 padding;
}
PX_ALIGN_SUFFIX(16);
PX_COMPILE_TIME_ASSERT(sizeof(PxTransformPadded)==32);
typedef PxTransformPadded PxTransform32;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,69 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_UNION_CAST_H
#define PX_UNION_CAST_H
#include "foundation/PxPreprocessor.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
// Needed for clang 7
#if PX_CLANG && PX_CLANG_MAJOR >= 7
#define USE_VOLATILE_UNION volatile
#else
#define USE_VOLATILE_UNION
#endif
template <class A, class B>
PX_FORCE_INLINE A PxUnionCast(B b)
{
union AB
{
AB(B bb) : _b(bb)
{
}
B _b;
A _a;
} USE_VOLATILE_UNION u(b);
return u._a;
}
#undef USE_VOLATILE_UNION
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,116 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_USER_ALLOCATED_H
#define PX_USER_ALLOCATED_H
#include "PxAllocator.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
Provides new and delete using a UserAllocator.
Guarantees that 'delete x;' uses the UserAllocator too.
*/
class PxUserAllocated
{
public:
// PX_SERIALIZATION
PX_INLINE void* operator new(size_t, void* address)
{
return address;
}
//~PX_SERIALIZATION
// Matching operator delete to the above operator new. Don't ask me
// how this makes any sense - Nuernberger.
PX_INLINE void operator delete(void*, void*)
{
}
template <typename Alloc>
PX_INLINE void* operator new(size_t size, Alloc alloc, const char* fileName, int line)
{
return alloc.allocate(size, fileName, line);
}
template <typename Alloc>
PX_INLINE void* operator new(size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line)
{
// align is not respected, we have 16bit aligned allocator
return alloc.allocate(size, fileName, line);
}
template <typename Alloc>
PX_INLINE void* operator new [](size_t size, Alloc alloc, const char* fileName, int line)
{
return alloc.allocate(size, fileName, line);
}
template <typename Alloc>
PX_INLINE void* operator new [](size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line)
{
// align is not respected, we have 16bit aligned allocator
return alloc.allocate(size, fileName, line);
}
// placement delete
template <typename Alloc>
PX_INLINE void operator delete(void* ptr, Alloc alloc, const char* fileName, int line)
{
PX_UNUSED(fileName);
PX_UNUSED(line);
alloc.deallocate(ptr);
}
template <typename Alloc>
PX_INLINE void operator delete [](void* ptr, Alloc alloc, const char* fileName, int line)
{
PX_UNUSED(fileName);
PX_UNUSED(line);
alloc.deallocate(ptr);
}
PX_INLINE void operator delete(void* ptr)
{
PxAllocator().deallocate(ptr);
}
PX_INLINE void operator delete [](void* ptr)
{
PxAllocator().deallocate(ptr);
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,151 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_UTILITIES_H
#define PX_UTILITIES_H
#include "foundation/PxVec3.h"
#include "foundation/PxAssert.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxBasicTemplates.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
PX_INLINE char PxLittleEndian()
{
int i = 1;
return *(reinterpret_cast<char*>(&i));
}
// PT: checked casts
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxTo32(PxU64 value)
{
PX_ASSERT(value <= 0xffffffff);
return PxU32(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 PxToU32(PxI32 value)
{
PX_ASSERT(value >= 0);
return PxU32(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 PxTo16(PxU32 value)
{
PX_ASSERT(value <= 0xffff);
return PxU16(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxU16 value)
{
PX_ASSERT(value <= 0xff);
return PxU8(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxU32 value)
{
PX_ASSERT(value <= 0xff);
return PxU8(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 PxTo8(PxI32 value)
{
PX_ASSERT(value <= 0xff);
PX_ASSERT(value >= 0);
return PxU8(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxI8 PxToI8(PxU32 value)
{
PX_ASSERT(value <= 0x7f);
return PxI8(value);
}
//! \cond
/*!
Get number of elements in array
*/
template <typename T, size_t N>
char (&PxArraySizeHelper(T (&array)[N]))[N];
#define PX_ARRAY_SIZE(_array) (sizeof(physx::PxArraySizeHelper(_array)))
//! \endcond
/*!
Sort two elements using operator<
On return x will be the smaller of the two
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(T& x, T& y)
{
if(y < x)
PxSwap(x, y);
}
// most architectures can do predication on real comparisons, and on VMX, it matters
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(PxReal& x, PxReal& y)
{
PxReal newX = PxMin(x, y);
PxReal newY = PxMax(x, y);
x = newX;
y = newY;
}
/*!
Sort two elements using operator< and also keep order
of any extra data
*/
template <class T, class E1>
PX_CUDA_CALLABLE PX_FORCE_INLINE void PxOrder(T& x, T& y, E1& xe1, E1& ye1)
{
if(y < x)
{
swap(x, y);
swap(xe1, ye1);
}
}
#if PX_GCC_FAMILY && !PX_EMSCRIPTEN
__attribute__((noreturn))
#endif
PX_INLINE void PxDebugBreak()
{
#if PX_WINDOWS
__debugbreak();
#elif PX_LINUX
__builtin_trap();
#elif PX_GCC_FAMILY
__builtin_trap();
#else
PX_ASSERT(false);
#endif
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,347 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC2_H
#define PX_VEC2_H
#include "foundation/PxMath.h"
#include "foundation/PxConstructor.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief 2 Element vector class.
This is a 2-dimensional vector class with public data members.
*/
template<class Type>
class PxVec2T
{
public:
/**
\brief default constructor leaves data uninitialized.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T()
{
}
/**
\brief zero constructor.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(PxZERO) : x(Type(0.0)), y(Type(0.0))
{
}
/**
\brief Assigns scalar parameter to all elements.
Useful to initialize to zero or one.
\param[in] a Value to assign to elements.
*/
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(Type a) : x(a), y(a)
{
}
/**
\brief Initializes from 2 scalar parameters.
\param[in] nx Value to initialize X component.
\param[in] ny Value to initialize Y component.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(Type nx, Type ny) : x(nx), y(ny)
{
}
/**
\brief Copy ctor.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T(const PxVec2T& v) : x(v.x), y(v.y)
{
}
// Operators
/**
\brief Assignment operator
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator=(const PxVec2T& p)
{
x = p.x;
y = p.y;
return *this;
}
/**
\brief element access
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator[](unsigned int index)
{
PX_ASSERT(index <= 1);
return reinterpret_cast<Type*>(this)[index];
}
/**
\brief element access
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE const Type& operator[](unsigned int index) const
{
PX_ASSERT(index <= 1);
return reinterpret_cast<const Type*>(this)[index];
}
/**
\brief returns true if the two vectors are exactly equal.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec2T& v) const
{
return x == v.x && y == v.y;
}
/**
\brief returns true if the two vectors are not exactly equal.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec2T& v) const
{
return x != v.x || y != v.y;
}
/**
\brief tests for exact zero vector
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const
{
return x == Type(0.0) && y == Type(0.0);
}
/**
\brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.)
*/
PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
{
return PxIsFinite(x) && PxIsFinite(y);
}
/**
\brief is normalized - used by API parameter validation
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const
{
const Type unitTolerance = Type(1e-4);
return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance;
}
/**
\brief returns the squared magnitude
Avoids calling PxSqrt()!
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const
{
return x * x + y * y;
}
/**
\brief returns the magnitude
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const
{
return PxSqrt(magnitudeSquared());
}
/**
\brief negation
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator-() const
{
return PxVec2T(-x, -y);
}
/**
\brief vector addition
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator+(const PxVec2T& v) const
{
return PxVec2T(x + v.x, y + v.y);
}
/**
\brief vector difference
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator-(const PxVec2T& v) const
{
return PxVec2T(x - v.x, y - v.y);
}
/**
\brief scalar post-multiplication
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator*(Type f) const
{
return PxVec2T(x * f, y * f);
}
/**
\brief scalar division
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T operator/(Type f) const
{
f = Type(1.0) / f;
return PxVec2T(x * f, y * f);
}
/**
\brief vector addition
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator+=(const PxVec2T& v)
{
x += v.x;
y += v.y;
return *this;
}
/**
\brief vector difference
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator-=(const PxVec2T& v)
{
x -= v.x;
y -= v.y;
return *this;
}
/**
\brief scalar multiplication
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator*=(Type f)
{
x *= f;
y *= f;
return *this;
}
/**
\brief scalar division
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T& operator/=(Type f)
{
f = Type(1.0) / f;
x *= f;
y *= f;
return *this;
}
/**
\brief returns the scalar product of this and other.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxVec2T& v) const
{
return x * v.x + y * v.y;
}
/** returns a unit vector */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T getNormalized() const
{
const Type m = magnitudeSquared();
return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec2T(Type(0));
}
/**
\brief normalizes the vector in place
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize()
{
const Type m = magnitude();
if(m > Type(0.0))
*this /= m;
return m;
}
/**
\brief a[i] * b[i], for all i.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T multiply(const PxVec2T& a) const
{
return PxVec2T(x * a.x, y * a.y);
}
/**
\brief element-wise minimum
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T minimum(const PxVec2T& v) const
{
return PxVec2T(PxMin(x, v.x), PxMin(y, v.y));
}
/**
\brief returns MIN(x, y);
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type minElement() const
{
return PxMin(x, y);
}
/**
\brief element-wise maximum
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec2T maximum(const PxVec2T& v) const
{
return PxVec2T(PxMax(x, v.x), PxMax(y, v.y));
}
/**
\brief returns MAX(x, y);
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type maxElement() const
{
return PxMax(x, y);
}
Type x, y;
};
template<class Type>
PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec2T<Type> operator*(Type f, const PxVec2T<Type>& v)
{
return PxVec2T<Type>(f * v.x, f * v.y);
}
typedef PxVec2T<float> PxVec2;
typedef PxVec2T<double> PxVec2d;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,420 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC3_H
#define PX_VEC3_H
#include "foundation/PxMath.h"
#include "foundation/PxConstructor.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief 3 Element vector class.
This is a 3-dimensional vector class with public data members.
*/
template<class Type>
class PxVec3T
{
public:
/**
\brief default constructor leaves data uninitialized.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T()
{
}
/**
\brief zero constructor.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(PxZERO) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0))
{
}
/**
\brief Assigns scalar parameter to all elements.
Useful to initialize to zero or one.
\param[in] a Value to assign to elements.
*/
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(Type a) : x(a), y(a), z(a)
{
}
/**
\brief Initializes from 3 scalar parameters.
\param[in] nx Value to initialize X component.
\param[in] ny Value to initialize Y component.
\param[in] nz Value to initialize Z component.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(Type nx, Type ny, Type nz) : x(nx), y(ny), z(nz)
{
}
/**
\brief Copy ctor.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T(const PxVec3T& v) : x(v.x), y(v.y), z(v.z)
{
}
// Operators
/**
\brief Assignment operator
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator=(const PxVec3T& p)
{
x = p.x;
y = p.y;
z = p.z;
return *this;
}
/**
\brief element access
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type& operator[](unsigned int index)
{
PX_ASSERT(index <= 2);
return reinterpret_cast<Type*>(this)[index];
}
/**
\brief element access
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE const Type& operator[](unsigned int index) const
{
PX_ASSERT(index <= 2);
return reinterpret_cast<const Type*>(this)[index];
}
/**
\brief returns true if the two vectors are exactly equal.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const PxVec3T& v) const
{
return x == v.x && y == v.y && z == v.z;
}
/**
\brief returns true if the two vectors are not exactly equal.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const PxVec3T& v) const
{
return x != v.x || y != v.y || z != v.z;
}
/**
\brief tests for exact zero vector
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZero() const
{
return x == Type(0.0) && y == Type(0.0) && z == Type(0.0);
}
/**
\brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
*/
PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
{
return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z);
}
/**
\brief is normalized - used by API parameter validation
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isNormalized() const
{
const float unitTolerance = Type(1e-4); // PT: do we need a different epsilon for float & double?
return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance;
}
/**
\brief returns the squared magnitude
Avoids calling PxSqrt()!
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitudeSquared() const
{
return x * x + y * y + z * z;
}
/**
\brief returns the magnitude
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type magnitude() const
{
return PxSqrt(magnitudeSquared());
}
/**
\brief negation
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator-() const
{
return PxVec3T(-x, -y, -z);
}
/**
\brief vector addition
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator+(const PxVec3T& v) const
{
return PxVec3T(x + v.x, y + v.y, z + v.z);
}
/**
\brief vector difference
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator-(const PxVec3T& v) const
{
return PxVec3T(x - v.x, y - v.y, z - v.z);
}
/**
\brief scalar post-multiplication
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator*(Type f) const
{
return PxVec3T(x * f, y * f, z * f);
}
/**
\brief scalar division
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T operator/(Type f) const
{
f = Type(1.0) / f;
return PxVec3T(x * f, y * f, z * f);
}
/**
\brief vector addition
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator+=(const PxVec3T& v)
{
x += v.x;
y += v.y;
z += v.z;
return *this;
}
/**
\brief vector difference
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator-=(const PxVec3T& v)
{
x -= v.x;
y -= v.y;
z -= v.z;
return *this;
}
/**
\brief scalar multiplication
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator*=(Type f)
{
x *= f;
y *= f;
z *= f;
return *this;
}
/**
\brief scalar division
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T& operator/=(Type f)
{
f = Type(1.0) / f;
x *= f;
y *= f;
z *= f;
return *this;
}
/**
\brief returns the scalar product of this and other.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type dot(const PxVec3T& v) const
{
return x * v.x + y * v.y + z * v.z;
}
/**
\brief cross product
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T cross(const PxVec3T& v) const
{
return PxVec3T(y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x);
}
/** returns a unit vector */
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T getNormalized() const
{
const Type m = magnitudeSquared();
return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec3T(Type(0));
}
/**
\brief normalizes the vector in place
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalize()
{
const Type m = magnitude();
if(m > Type(0.0))
*this /= m;
return m;
}
/**
\brief normalizes the vector in place. Does nothing if vector magnitude is under PX_NORMALIZATION_EPSILON.
Returns vector magnitude if >= PX_NORMALIZATION_EPSILON and 0.0f otherwise.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalizeSafe()
{
const Type mag = magnitude();
if(mag < PX_NORMALIZATION_EPSILON) // PT: do we need a different epsilon for float & double?
return Type(0.0);
*this *= Type(1.0) / mag;
return mag;
}
/**
\brief normalizes the vector in place. Asserts if vector magnitude is under PX_NORMALIZATION_EPSILON.
returns vector magnitude.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type normalizeFast()
{
const Type mag = magnitude();
PX_ASSERT(mag >= PX_NORMALIZATION_EPSILON); // PT: do we need a different epsilon for float & double?
*this *= Type(1.0) / mag;
return mag;
}
/**
\brief a[i] * b[i], for all i.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T multiply(const PxVec3T& a) const
{
return PxVec3T(x * a.x, y * a.y, z * a.z);
}
/**
\brief element-wise minimum
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T minimum(const PxVec3T& v) const
{
return PxVec3T(PxMin(x, v.x), PxMin(y, v.y), PxMin(z, v.z));
}
/**
\brief returns MIN(x, y, z);
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type minElement() const
{
return PxMin(x, PxMin(y, z));
}
/**
\brief element-wise maximum
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T maximum(const PxVec3T& v) const
{
return PxVec3T(PxMax(x, v.x), PxMax(y, v.y), PxMax(z, v.z));
}
/**
\brief returns MAX(x, y, z);
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE Type maxElement() const
{
return PxMax(x, PxMax(y, z));
}
/**
\brief returns absolute values of components;
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3T abs() const
{
return PxVec3T(PxAbs(x), PxAbs(y), PxAbs(z));
}
Type x, y, z;
};
template<class Type>
PX_CUDA_CALLABLE static PX_FORCE_INLINE PxVec3T<Type> operator*(Type f, const PxVec3T<Type>& v)
{
return PxVec3T<Type>(f * v.x, f * v.y, f * v.z);
}
typedef PxVec3T<float> PxVec3;
typedef PxVec3T<double> PxVec3d;
//! A padded version of PxVec3, to safely load its data using SIMD
class PxVec3Padded : public PxVec3
{
public:
PX_FORCE_INLINE PxVec3Padded() {}
PX_FORCE_INLINE ~PxVec3Padded() {}
PX_FORCE_INLINE PxVec3Padded(const PxVec3& p) : PxVec3(p) {}
PX_FORCE_INLINE PxVec3Padded(float f) : PxVec3(f) {}
/**
\brief Assignment operator.
To fix this:
error: definition of implicit copy assignment operator for 'PxVec3Padded' is deprecated because it has a user-declared destructor [-Werror,-Wdeprecated]
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3Padded& operator=(const PxVec3Padded& p)
{
x = p.x;
y = p.y;
z = p.z;
return *this;
}
PxU32 padding;
};
PX_COMPILE_TIME_ASSERT(sizeof(PxVec3Padded) == 16);
typedef PxVec3Padded PxVec3p;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,364 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC4_H
#define PX_VEC4_H
#include "foundation/PxMath.h"
#include "foundation/PxVec3.h"
/**
\brief 4 Element vector class.
This is a 4-dimensional vector class with public data members.
*/
#if !PX_DOXYGEN
namespace physx
{
#endif
template<class Type>
class PxVec4T
{
public:
/**
\brief default constructor leaves data uninitialized.
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T()
{
}
/**
\brief zero constructor.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec4T(PxZERO) : x(Type(0.0)), y(Type(0.0)), z(Type(0.0)), w(Type(0.0))
{
}
/**
\brief Assigns scalar parameter to all elements.
Useful to initialize to zero or one.
\param[in] a Value to assign to elements.
*/
explicit PX_CUDA_CALLABLE PX_INLINE PxVec4T(Type a) : x(a), y(a), z(a), w(a)
{
}
/**
\brief Initializes from 3 scalar parameters.
\param[in] nx Value to initialize X component.
\param[in] ny Value to initialize Y component.
\param[in] nz Value to initialize Z component.
\param[in] nw Value to initialize W component.
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T(Type nx, Type ny, Type nz, Type nw) : x(nx), y(ny), z(nz), w(nw)
{
}
/**
\brief Initializes from 3 scalar parameters.
\param[in] v Value to initialize the X, Y, and Z components.
\param[in] nw Value to initialize W component.
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T(const PxVec3T<Type>& v, Type nw) : x(v.x), y(v.y), z(v.z), w(nw)
{
}
/**
\brief Initializes from an array of scalar parameters.
\param[in] v Value to initialize with.
*/
explicit PX_CUDA_CALLABLE PX_INLINE PxVec4T(const Type v[]) : x(v[0]), y(v[1]), z(v[2]), w(v[3])
{
}
/**
\brief Copy ctor.
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T(const PxVec4T& v) : x(v.x), y(v.y), z(v.z), w(v.w)
{
}
// Operators
/**
\brief Assignment operator
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator=(const PxVec4T& p)
{
x = p.x;
y = p.y;
z = p.z;
w = p.w;
return *this;
}
/**
\brief element access
*/
PX_CUDA_CALLABLE PX_INLINE Type& operator[](unsigned int index)
{
PX_ASSERT(index <= 3);
return reinterpret_cast<Type*>(this)[index];
}
/**
\brief element access
*/
PX_CUDA_CALLABLE PX_INLINE const Type& operator[](unsigned int index) const
{
PX_ASSERT(index <= 3);
return reinterpret_cast<const Type*>(this)[index];
}
/**
\brief returns true if the two vectors are exactly equal.
*/
PX_CUDA_CALLABLE PX_INLINE bool operator==(const PxVec4T& v) const
{
return x == v.x && y == v.y && z == v.z && w == v.w;
}
/**
\brief returns true if the two vectors are not exactly equal.
*/
PX_CUDA_CALLABLE PX_INLINE bool operator!=(const PxVec4T& v) const
{
return x != v.x || y != v.y || z != v.z || w != v.w;
}
/**
\brief tests for exact zero vector
*/
PX_CUDA_CALLABLE PX_INLINE bool isZero() const
{
return x == Type(0) && y == Type(0) && z == Type(0) && w == Type(0);
}
/**
\brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
*/
PX_CUDA_CALLABLE PX_INLINE bool isFinite() const
{
return PxIsFinite(x) && PxIsFinite(y) && PxIsFinite(z) && PxIsFinite(w);
}
/**
\brief is normalized - used by API parameter validation
*/
PX_CUDA_CALLABLE PX_INLINE bool isNormalized() const
{
const Type unitTolerance = Type(1e-4);
return isFinite() && PxAbs(magnitude() - Type(1.0)) < unitTolerance;
}
/**
\brief returns the squared magnitude
Avoids calling PxSqrt()!
*/
PX_CUDA_CALLABLE PX_INLINE Type magnitudeSquared() const
{
return x * x + y * y + z * z + w * w;
}
/**
\brief returns the magnitude
*/
PX_CUDA_CALLABLE PX_INLINE Type magnitude() const
{
return PxSqrt(magnitudeSquared());
}
/**
\brief negation
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T operator-() const
{
return PxVec4T(-x, -y, -z, -w);
}
/**
\brief vector addition
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T operator+(const PxVec4T& v) const
{
return PxVec4T(x + v.x, y + v.y, z + v.z, w + v.w);
}
/**
\brief vector difference
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T operator-(const PxVec4T& v) const
{
return PxVec4T(x - v.x, y - v.y, z - v.z, w - v.w);
}
/**
\brief scalar post-multiplication
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T operator*(Type f) const
{
return PxVec4T(x * f, y * f, z * f, w * f);
}
/**
\brief scalar division
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T operator/(Type f) const
{
f = Type(1.0) / f;
return PxVec4T(x * f, y * f, z * f, w * f);
}
/**
\brief vector addition
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator+=(const PxVec4T& v)
{
x += v.x;
y += v.y;
z += v.z;
w += v.w;
return *this;
}
/**
\brief vector difference
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator-=(const PxVec4T& v)
{
x -= v.x;
y -= v.y;
z -= v.z;
w -= v.w;
return *this;
}
/**
\brief scalar multiplication
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator*=(Type f)
{
x *= f;
y *= f;
z *= f;
w *= f;
return *this;
}
/**
\brief scalar division
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T& operator/=(Type f)
{
f = Type(1.0) / f;
x *= f;
y *= f;
z *= f;
w *= f;
return *this;
}
/**
\brief returns the scalar product of this and other.
*/
PX_CUDA_CALLABLE PX_INLINE Type dot(const PxVec4T& v) const
{
return x * v.x + y * v.y + z * v.z + w * v.w;
}
/** returns a unit vector */
PX_CUDA_CALLABLE PX_INLINE PxVec4T getNormalized() const
{
const Type m = magnitudeSquared();
return m > Type(0.0) ? *this * PxRecipSqrt(m) : PxVec4T(Type(0));
}
/**
\brief normalizes the vector in place
*/
PX_CUDA_CALLABLE PX_INLINE Type normalize()
{
const Type m = magnitude();
if(m > Type(0.0))
*this /= m;
return m;
}
/**
\brief a[i] * b[i], for all i.
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T multiply(const PxVec4T& a) const
{
return PxVec4T(x * a.x, y * a.y, z * a.z, w * a.w);
}
/**
\brief element-wise minimum
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T minimum(const PxVec4T& v) const
{
return PxVec4T(PxMin(x, v.x), PxMin(y, v.y), PxMin(z, v.z), PxMin(w, v.w));
}
/**
\brief element-wise maximum
*/
PX_CUDA_CALLABLE PX_INLINE PxVec4T maximum(const PxVec4T& v) const
{
return PxVec4T(PxMax(x, v.x), PxMax(y, v.y), PxMax(z, v.z), PxMax(w, v.w));
}
PX_CUDA_CALLABLE PX_INLINE PxVec3T<Type> getXYZ() const
{
return PxVec3T<Type>(x, y, z);
}
Type x, y, z, w;
};
template<class Type>
PX_CUDA_CALLABLE static PX_INLINE PxVec4T<Type> operator*(Type f, const PxVec4T<Type>& v)
{
return PxVec4T<Type>(f * v.x, f * v.y, f * v.z, f * v.w);
}
typedef PxVec4T<float> PxVec4;
typedef PxVec4T<double> PxVec4d;
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,251 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC_MATH_AOS_SCALAR_H
#define PX_VEC_MATH_AOS_SCALAR_H
#if COMPILE_VECTOR_INTRINSICS
#error Scalar version should not be included when using vector intrinsics.
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
struct VecI16V;
struct VecU16V;
struct VecI32V;
struct VecU32V;
struct Vec4V;
typedef Vec4V QuatV;
PX_ALIGN_PREFIX(16)
struct FloatV
{
PxF32 x;
PxF32 pad[3];
FloatV()
{
}
FloatV(const PxF32 _x) : x(_x)
{
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Vec4V
{
PxF32 x, y, z, w;
Vec4V()
{
}
Vec4V(const PxF32 _x, const PxF32 _y, const PxF32 _z, const PxF32 _w) : x(_x), y(_y), z(_z), w(_w)
{
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Vec3V
{
PxF32 x, y, z;
PxF32 pad;
Vec3V()
{
}
Vec3V(const PxF32 _x, const PxF32 _y, const PxF32 _z) : x(_x), y(_y), z(_z), pad(0.0f)
{
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct BoolV
{
PxU32 ux, uy, uz, uw;
BoolV()
{
}
BoolV(const PxU32 _x, const PxU32 _y, const PxU32 _z, const PxU32 _w) : ux(_x), uy(_y), uz(_z), uw(_w)
{
}
} PX_ALIGN_SUFFIX(16);
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V col0;
Vec3V col1;
Vec3V col2;
};
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V col0;
Vec3V col1;
Vec3V col2;
Vec3V col3;
};
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V col0;
Vec4V col1;
Vec4V col2;
};
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V col0;
Vec4V col1;
Vec4V col2;
Vec4V col3;
};
PX_ALIGN_PREFIX(16)
struct VecU32V
{
PxU32 u32[4];
PX_FORCE_INLINE VecU32V()
{
}
PX_FORCE_INLINE VecU32V(PxU32 a, PxU32 b, PxU32 c, PxU32 d)
{
u32[0] = a;
u32[1] = b;
u32[2] = c;
u32[3] = d;
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct VecI32V
{
PxI32 i32[4];
PX_FORCE_INLINE VecI32V()
{
}
PX_FORCE_INLINE VecI32V(PxI32 a, PxI32 b, PxI32 c, PxI32 d)
{
i32[0] = a;
i32[1] = b;
i32[2] = c;
i32[3] = d;
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct VecI16V
{
PxI16 i16[8];
PX_FORCE_INLINE VecI16V()
{
}
PX_FORCE_INLINE VecI16V(PxI16 a, PxI16 b, PxI16 c, PxI16 d, PxI16 e, PxI16 f, PxI16 g, PxI16 h)
{
i16[0] = a;
i16[1] = b;
i16[2] = c;
i16[3] = d;
i16[4] = e;
i16[5] = f;
i16[6] = g;
i16[7] = h;
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct VecU16V
{
union
{
PxU16 u16[8];
PxI16 i16[8];
};
PX_FORCE_INLINE VecU16V()
{
}
PX_FORCE_INLINE VecU16V(PxU16 a, PxU16 b, PxU16 c, PxU16 d, PxU16 e, PxU16 f, PxU16 g, PxU16 h)
{
u16[0] = a;
u16[1] = b;
u16[2] = c;
u16[3] = d;
u16[4] = e;
u16[5] = f;
u16[6] = g;
u16[7] = h;
}
} PX_ALIGN_SUFFIX(16);
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define QuatVArg QuatV &
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV &
} // namespace aos
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,497 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC_QUAT_H
#define PX_VEC_QUAT_H
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
#ifndef PX_PIDIV2
#define PX_PIDIV2 1.570796327f
#endif
//////////////////////////////////
// QuatV
//////////////////////////////////
PX_FORCE_INLINE QuatV QuatVLoadXYZW(const PxF32 x, const PxF32 y, const PxF32 z, const PxF32 w)
{
return V4LoadXYZW(x, y, z, w);
}
PX_FORCE_INLINE QuatV QuatVLoadU(const PxF32* v)
{
return V4LoadU(v);
}
PX_FORCE_INLINE QuatV QuatVLoadA(const PxF32* v)
{
return V4LoadA(v);
}
PX_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a)
{
// q = cos(a/2) + u*sin(a/2)
const FloatV half = FLoad(0.5f);
const FloatV hangle = FMul(a, half);
const FloatV piByTwo(FLoad(PX_PIDIV2));
const FloatV PiByTwoMinHangle(FSub(piByTwo, hangle));
const Vec4V hangle2(Vec4V_From_Vec3V(V3Merge(hangle, PiByTwoMinHangle, hangle)));
/*const FloatV sina = FSin(hangle);
const FloatV cosa = FCos(hangle);*/
const Vec4V _sina = V4Sin(hangle2);
const FloatV sina = V4GetX(_sina);
const FloatV cosa = V4GetY(_sina);
const Vec3V v = V3Scale(u, sina);
// return V4Sel(BTTTF(), Vec4V_From_Vec3V(v), V4Splat(cosa));
return V4SetW(Vec4V_From_Vec3V(v), cosa);
}
// Normalize
PX_FORCE_INLINE QuatV QuatNormalize(const QuatV q)
{
return V4Normalize(q);
}
PX_FORCE_INLINE FloatV QuatLength(const QuatV q)
{
return V4Length(q);
}
PX_FORCE_INLINE FloatV QuatLengthSq(const QuatV q)
{
return V4LengthSq(q);
}
PX_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b) // convert this PxQuat to a unit quaternion
{
return V4Dot(a, b);
}
PX_FORCE_INLINE QuatV QuatConjugate(const QuatV q)
{
return V4SetW(V4Neg(q), V4GetW(q));
}
PX_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q)
{
return Vec3V_From_Vec4V(q);
}
/** brief computes rotation of x-axis */
PX_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q)
{
/*const PxF32 x2 = x*2.0f;
const PxF32 w2 = w*2.0f;
return PxVec3( (w * w2) - 1.0f + x*x2,
(z * w2) + y*x2,
(-y * w2) + z*x2);*/
const FloatV two = FLoad(2.0f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV x2 = FMul(V3GetX(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, x2);
const Vec3V tmp = V3Merge(w, V3GetZ(u), FNeg(V3GetY(u)));
// const Vec3V b = V3Scale(tmp, w2);
// const Vec3V ab = V3Add(a, b);
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetX(ab, FSub(V3GetX(ab), FOne()));
}
/** brief computes rotation of y-axis */
PX_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q)
{
/*const PxF32 y2 = y*2.0f;
const PxF32 w2 = w*2.0f;
return PxVec3( (-z * w2) + x*y2,
(w * w2) - 1.0f + y*y2,
(x * w2) + z*y2);*/
const FloatV two = FLoad(2.0f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV y2 = FMul(V3GetY(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, y2);
const Vec3V tmp = V3Merge(FNeg(V3GetZ(u)), w, V3GetX(u));
// const Vec3V b = V3Scale(tmp, w2);
// const Vec3V ab = V3Add(a, b);
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetY(ab, FSub(V3GetY(ab), FOne()));
}
/** brief computes rotation of z-axis */
PX_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q)
{
/*const PxF32 z2 = z*2.0f;
const PxF32 w2 = w*2.0f;
return PxVec3( (y * w2) + x*z2,
(-x * w2) + y*z2,
(w * w2) - 1.0f + z*z2);*/
const FloatV two = FLoad(2.0f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV z2 = FMul(V3GetZ(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, z2);
const Vec3V tmp = V3Merge(V3GetY(u), FNeg(V3GetX(u)), w);
/*const Vec3V b = V3Scale(tmp, w2);
const Vec3V ab = V3Add(a, b);*/
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetZ(ab, FSub(V3GetZ(ab), FOne()));
}
PX_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v)
{
/*
const PxVec3 qv(x,y,z);
return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2;
*/
const FloatV two = FLoad(2.0f);
// const FloatV half = FloatV_From_F32(0.5f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
// const FloatV w2 = FSub(FMul(w, w), half);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
// const Vec3V b = V3Scale(V3Cross(u, v), w);
// const Vec3V c = V3Scale(u, V3Dot(u, v));
// return V3Scale(V3Add(V3Add(a, b), c), two);
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two);
}
// PT: same as QuatRotate but operates on a Vec4V
PX_FORCE_INLINE Vec4V QuatRotate4V(const QuatV q, const Vec4V v)
{
const FloatV two = FLoad(2.0f);
const FloatV nhalf = FLoad(-0.5f);
const Vec4V u = q; // PT: W not cleared here
const FloatV w = V4GetW(q);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec4V a = V4Scale(v, w2); // PT: W has non-zero data here
const Vec4V temp = V4ScaleAdd(V4Cross(u, v), w, a);
return V4Scale(V4ScaleAdd(u, V4Dot3(u, v), temp), two); // PT: beware, V4Dot3 has one more instruction here
}
// PT: avoid some multiplies when immediately normalizing a rotated vector
PX_FORCE_INLINE Vec3V QuatRotateAndNormalize(const QuatV q, const Vec3V v)
{
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
return V3Normalize(V3ScaleAdd(u, V3Dot(u, v), temp));
}
PX_FORCE_INLINE Vec3V QuatTransform(const QuatV q, const Vec3V p, const Vec3V v)
{
// p + q.rotate(v)
const FloatV two = FLoad(2.0f);
// const FloatV half = FloatV_From_F32(0.5f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
// const FloatV w2 = FSub(FMul(w, w), half);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
/*const Vec3V b = V3Scale(V3Cross(u, v), w);
const Vec3V c = V3Scale(u, V3Dot(u, v));
return V3ScaleAdd(V3Add(V3Add(a, b), c), two, p);*/
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
const Vec3V z = V3ScaleAdd(u, V3Dot(u, v), temp);
return V3ScaleAdd(z, two, p);
}
PX_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v)
{
// const PxVec3 qv(x,y,z);
// return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2;
const FloatV two = FLoad(2.0f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
/*const Vec3V b = V3Scale(V3Cross(u, v), w);
const Vec3V c = V3Scale(u, V3Dot(u, v));
return V3Scale(V3Add(V3Sub(a, b), c), two);*/
const Vec3V temp = V3NegScaleSub(V3Cross(u, v), w, a);
return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two);
}
PX_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b)
{
const Vec4V imagA = a;
const Vec4V imagB = b;
const FloatV rA = V4GetW(a);
const FloatV rB = V4GetW(b);
const FloatV real = FSub(FMul(rA, rB), V4Dot3(imagA, imagB));
const Vec4V v0 = V4Scale(imagA, rB);
const Vec4V v1 = V4Scale(imagB, rA);
const Vec4V v2 = V4Cross(imagA, imagB);
const Vec4V imag = V4Add(V4Add(v0, v1), v2);
return V4SetW(imag, real);
}
PX_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b)
{
return V4Add(a, b);
}
PX_FORCE_INLINE QuatV QuatNeg(const QuatV q)
{
return V4Neg(q);
}
PX_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b)
{
return V4Sub(a, b);
}
PX_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b)
{
return V4Scale(a, b);
}
PX_FORCE_INLINE QuatV QuatMerge(const FloatV* const floatVArray)
{
return V4Merge(floatVArray);
}
PX_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
return V4Merge(x, y, z, w);
}
PX_FORCE_INLINE QuatV QuatIdentity()
{
return V4SetW(V4Zero(), FOne());
}
PX_FORCE_INLINE bool isFiniteQuatV(const QuatV q)
{
return isFiniteVec4V(q);
}
#if (PX_LINUX || PX_SWITCH) && PX_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wbitwise-instead-of-logical" // bitwise intentionally chosen for performance
#endif
PX_FORCE_INLINE bool isValidQuatV(const QuatV q)
{
const FloatV unitTolerance = FLoad(1e-4f);
const FloatV tmp = FAbs(FSub(QuatLength(q), FOne()));
const BoolV con = FIsGrtr(unitTolerance, tmp);
return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1);
}
PX_FORCE_INLINE bool isSaneQuatV(const QuatV q)
{
const FloatV unitTolerance = FLoad(1e-2f);
const FloatV tmp = FAbs(FSub(QuatLength(q), FOne()));
const BoolV con = FIsGrtr(unitTolerance, tmp);
return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1);
}
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic pop
#endif
PX_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q)
{
// const FloatV two = FloatV_From_F32(2.0f);
// const FloatV one = FOne();
// const FloatV x = V4GetX(q);
// const FloatV y = V4GetY(q);
// const FloatV z = V4GetZ(q);
// const Vec4V _q = V4Mul(q, two);
//
////const FloatV w = V4GetW(q);
// const Vec4V t0 = V4Mul(_q, x); // 2xx, 2xy, 2xz, 2xw
// const Vec4V t1 = V4Mul(_q, y); // 2xy, 2yy, 2yz, 2yw
// const Vec4V t2 = V4Mul(_q, z); // 2xz, 2yz, 2zz, 2zw
////const Vec4V t3 = V4Mul(_q, w); // 2xw, 2yw, 2zw, 2ww
// const FloatV xx2 = V4GetX(t0);
// const FloatV xy2 = V4GetY(t0);
// const FloatV xz2 = V4GetZ(t0);
// const FloatV xw2 = V4GetW(t0);
// const FloatV yy2 = V4GetY(t1);
// const FloatV yz2 = V4GetZ(t1);
// const FloatV yw2 = V4GetW(t1);
// const FloatV zz2 = V4GetZ(t2);
// const FloatV zw2 = V4GetW(t2);
////const FloatV ww2 = V4GetW(t3);
// const FloatV c00 = FSub(one, FAdd(yy2, zz2));
// const FloatV c01 = FSub(xy2, zw2);
// const FloatV c02 = FAdd(xz2, yw2);
// const FloatV c10 = FAdd(xy2, zw2);
// const FloatV c11 = FSub(one, FAdd(xx2, zz2));
// const FloatV c12 = FSub(yz2, xw2);
// const FloatV c20 = FSub(xz2, yw2);
// const FloatV c21 = FAdd(yz2, xw2);
// const FloatV c22 = FSub(one, FAdd(xx2, yy2));
// const Vec3V c0 = V3Merge(c00, c10, c20);
// const Vec3V c1 = V3Merge(c01, c11, c21);
// const Vec3V c2 = V3Merge(c02, c12, c22);
// return Mat33V(c0, c1, c2);
const FloatV one = FOne();
const FloatV x = V4GetX(q);
const FloatV y = V4GetY(q);
const FloatV z = V4GetZ(q);
const FloatV w = V4GetW(q);
const FloatV x2 = FAdd(x, x);
const FloatV y2 = FAdd(y, y);
const FloatV z2 = FAdd(z, z);
const FloatV xx = FMul(x2, x);
const FloatV yy = FMul(y2, y);
const FloatV zz = FMul(z2, z);
const FloatV xy = FMul(x2, y);
const FloatV xz = FMul(x2, z);
const FloatV xw = FMul(x2, w);
const FloatV yz = FMul(y2, z);
const FloatV yw = FMul(y2, w);
const FloatV zw = FMul(z2, w);
const FloatV v = FSub(one, xx);
const Vec3V column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw));
const Vec3V column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw));
const Vec3V column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy));
return Mat33V(column0, column1, column2);
}
PX_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a)
{
const FloatV one = FOne();
const FloatV zero = FZero();
const FloatV half = FLoad(0.5f);
const FloatV two = FLoad(2.0f);
const FloatV scale = FLoad(0.25f);
const FloatV a00 = V3GetX(a.col0);
const FloatV a11 = V3GetY(a.col1);
const FloatV a22 = V3GetZ(a.col2);
const FloatV a21 = V3GetZ(a.col1); // row=2, col=1;
const FloatV a12 = V3GetY(a.col2); // row=1, col=2;
const FloatV a02 = V3GetX(a.col2); // row=0, col=2;
const FloatV a20 = V3GetZ(a.col0); // row=2, col=0;
const FloatV a10 = V3GetY(a.col0); // row=1, col=0;
const FloatV a01 = V3GetX(a.col1); // row=0, col=1;
const Vec3V vec0 = V3Merge(a21, a02, a10);
const Vec3V vec1 = V3Merge(a12, a20, a01);
const Vec3V v = V3Sub(vec0, vec1);
const Vec3V g = V3Add(vec0, vec1);
const FloatV trace = FAdd(a00, FAdd(a11, a22));
if(FAllGrtrOrEq(trace, zero))
{
const FloatV h = FSqrt(FAdd(trace, one));
const FloatV w = FMul(half, h);
const FloatV s = FMul(half, FRecip(h));
const Vec3V u = V3Scale(v, s);
return V4SetW(Vec4V_From_Vec3V(u), w);
}
else
{
const FloatV ntrace = FNeg(trace);
const Vec3V d = V3Merge(a00, a11, a22);
const BoolV con0 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a00), d));
const BoolV con1 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a11), d));
const FloatV t0 = FAdd(one, FScaleAdd(a00, two, ntrace));
const FloatV t1 = FAdd(one, FScaleAdd(a11, two, ntrace));
const FloatV t2 = FAdd(one, FScaleAdd(a22, two, ntrace));
const FloatV t = FSel(con0, t0, FSel(con1, t1, t2));
const FloatV h = FMul(two, FSqrt(t));
const FloatV s = FRecip(h);
const FloatV g0 = FMul(scale, h);
const Vec3V vs = V3Scale(v, s);
const Vec3V gs = V3Scale(g, s);
const FloatV gsx = V3GetX(gs);
const FloatV gsy = V3GetY(gs);
const FloatV gsz = V3GetZ(gs);
// vs.x= (a21 - a12)*s; vs.y=(a02 - a20)*s; vs.z=(a10 - a01)*s;
// gs.x= (a21 + a12)*s; gs.y=(a02 + a20)*s; gs.z=(a10 + a01)*s;
const Vec4V v0 = V4Merge(g0, gsz, gsy, V3GetX(vs));
const Vec4V v1 = V4Merge(gsz, g0, gsx, V3GetY(vs));
const Vec4V v2 = V4Merge(gsy, gsx, g0, V3GetZ(vs));
return V4Sel(con0, v0, V4Sel(con1, v1, v2));
}
}
} // namespace aos
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,290 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_VEC_TRANSFORM_H
#define PX_VEC_TRANSFORM_H
#include "foundation/PxVecMath.h"
#include "foundation/PxTransform.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
class PxTransformV
{
public:
QuatV q;
Vec3V p;
PX_FORCE_INLINE PxTransformV(const PxTransform& transform)
{
// PT: this is now similar to loadTransformU below.
q = QuatVLoadU(&transform.q.x);
p = V3LoadU(&transform.p.x);
}
PX_FORCE_INLINE PxTransformV(const Vec3VArg p0 = V3Zero(), const QuatVArg q0 = QuatIdentity()) : q(q0), p(p0)
{
PX_ASSERT(isSaneQuatV(q0));
}
PX_FORCE_INLINE PxTransformV operator*(const PxTransformV& x) const
{
PX_ASSERT(x.isSane());
return transform(x);
}
PX_FORCE_INLINE PxTransformV getInverse() const
{
PX_ASSERT(isFinite());
// return PxTransform(q.rotateInv(-p),q.getConjugate());
return PxTransformV(QuatRotateInv(q, V3Neg(p)), QuatConjugate(q));
}
PX_FORCE_INLINE void invalidate()
{
p = V3Splat(FMax());
q = QuatIdentity();
}
PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotate(input) + p;
return QuatTransform(q, p, input);
}
PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotateInv(input-p);
return QuatRotateInv(q, V3Sub(input, p));
}
PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotate(input);
return QuatRotate(q, input);
}
// PT: avoid some multiplies when immediately normalizing a rotated vector
PX_FORCE_INLINE Vec3V rotateAndNormalize(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
return QuatRotateAndNormalize(q, input);
}
PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotateInv(input);
return QuatRotateInv(q, input);
}
//! Transform transform to parent (returns compound transform: first src, then *this)
PX_FORCE_INLINE PxTransformV transform(const PxTransformV& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isSane());
// src = [srct, srcr] -> [r*srct + t, r*srcr]
// return PxTransform(q.rotate(src.p) + p, q*src.q);
return PxTransformV(V3Add(QuatRotate(q, src.p), p), QuatMul(q, src.q));
}
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wbitwise-instead-of-logical" // bitwise intentionally chosen for performance
#endif
/**
\brief returns true if finite and q is a unit quaternion
*/
PX_FORCE_INLINE bool isValid() const
{
// return p.isFinite() && q.isFinite() && q.isValid();
return isFiniteVec3V(p) & isFiniteQuatV(q) & isValidQuatV(q);
}
/**
\brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
vs isValid
*/
PX_FORCE_INLINE bool isSane() const
{
// return isFinite() && q.isSane();
return isFinite() & isSaneQuatV(q);
}
/**
\brief returns true if all elems are finite (not NAN or INF, etc.)
*/
PX_FORCE_INLINE bool isFinite() const
{
// return p.isFinite() && q.isFinite();
return isFiniteVec3V(p) & isFiniteQuatV(q);
}
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic pop
#endif
//! Transform transform from parent (returns compound transform: first src, then this->inverse)
PX_FORCE_INLINE PxTransformV transformInv(const PxTransformV& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isFinite());
// src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
/*PxQuat qinv = q.getConjugate();
return PxTransform(qinv.rotate(src.p - p), qinv*src.q);*/
const QuatV qinv = QuatConjugate(q);
const Vec3V v = QuatRotate(qinv, V3Sub(src.p, p));
const QuatV rot = QuatMul(qinv, src.q);
return PxTransformV(v, rot);
}
static PX_FORCE_INLINE PxTransformV createIdentity()
{
return PxTransformV(V3Zero());
}
};
PX_FORCE_INLINE PxTransformV loadTransformA(const PxTransform& transform)
{
const QuatV q0 = QuatVLoadA(&transform.q.x);
const Vec3V p0 = V3LoadA(&transform.p.x);
return PxTransformV(p0, q0);
}
PX_FORCE_INLINE PxTransformV loadTransformU(const PxTransform& transform)
{
const QuatV q0 = QuatVLoadU(&transform.q.x);
const Vec3V p0 = V3LoadU(&transform.p.x);
return PxTransformV(p0, q0);
}
class PxMatTransformV
{
public:
Mat33V rot;
Vec3V p;
PX_FORCE_INLINE PxMatTransformV()
{
p = V3Zero();
rot = M33Identity();
}
PX_FORCE_INLINE PxMatTransformV(const Vec3VArg _p, const Mat33V& _rot)
{
p = _p;
rot = _rot;
}
PX_FORCE_INLINE PxMatTransformV(const PxTransformV& other)
{
p = other.p;
QuatGetMat33V(other.q, rot.col0, rot.col1, rot.col2);
}
PX_FORCE_INLINE PxMatTransformV(const Vec3VArg _p, const QuatV& quat)
{
p = _p;
QuatGetMat33V(quat, rot.col0, rot.col1, rot.col2);
}
PX_FORCE_INLINE Vec3V getCol0() const
{
return rot.col0;
}
PX_FORCE_INLINE Vec3V getCol1() const
{
return rot.col1;
}
PX_FORCE_INLINE Vec3V getCol2() const
{
return rot.col2;
}
PX_FORCE_INLINE void setCol0(const Vec3VArg col0)
{
rot.col0 = col0;
}
PX_FORCE_INLINE void setCol1(const Vec3VArg col1)
{
rot.col1 = col1;
}
PX_FORCE_INLINE void setCol2(const Vec3VArg col2)
{
rot.col2 = col2;
}
PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const
{
return V3Add(p, M33MulV3(rot, input));
}
PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const
{
return M33TrnspsMulV3(rot, V3Sub(input, p)); // QuatRotateInv(q, V3Sub(input, p));
}
PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const
{
return M33MulV3(rot, input);
}
PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const
{
return M33TrnspsMulV3(rot, input);
}
PX_FORCE_INLINE PxMatTransformV transformInv(const PxMatTransformV& src) const
{
const Vec3V v = M33TrnspsMulV3(rot, V3Sub(src.p, p));
const Mat33V mat = M33MulM33(M33Trnsps(rot), src.rot);
return PxMatTransformV(v, mat);
}
};
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,46 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXAOS_H
#define PXFOUNDATION_PXUNIXAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if PX_INTEL_FAMILY
#include "foundation/unix/sse2/PxUnixSse2AoS.h"
#elif PX_NEON
#include "foundation/unix/neon/PxUnixNeonAoS.h"
#else
#error No SIMD implementation for this unix platform.
#endif
#endif // PXFOUNDATION_PXUNIXAOS_H

View File

@@ -0,0 +1,83 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXFPU_H
#define PXFOUNDATION_PXUNIXFPU_H
#include "foundation/PxPreprocessor.h"
#if PX_LINUX || PX_OSX
#if PX_X86 || PX_X64
#if PX_EMSCRIPTEN
#include <emmintrin.h>
#endif
#include <xmmintrin.h>
#elif PX_NEON
#include <arm_neon.h>
#endif
PX_INLINE physx::PxSIMDGuard::PxSIMDGuard(bool enable)
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
: mEnabled(enable)
#endif
{
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
if(enable)
{
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
}
else
{
PX_UNUSED(enable);
PX_ASSERT(_mm_getcsr() & _MM_FLUSH_ZERO_ON);
PX_ASSERT(_mm_getcsr() & (1 << 6));
PX_ASSERT(_mm_getcsr() & _MM_MASK_MASK);
}
#endif
}
PX_INLINE physx::PxSIMDGuard::~PxSIMDGuard()
{
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
if(mEnabled)
{
// restore control word and clear exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & PxU32(~_MM_EXCEPT_MASK));
}
#endif
}
#else
#error No SIMD implementation for this unix platform.
#endif // PX_LINUX || PX_OSX
#endif // #ifndef PXFOUNDATION_PXUNIXFPU_H

View File

@@ -0,0 +1,44 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXINLINEAOS_H
#define PXFOUNDATION_PXUNIXINLINEAOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if PX_INTEL_FAMILY
#include "foundation/unix/sse2/PxUnixSse2InlineAoS.h"
#elif PX_NEON
#include "foundation/unix/neon/PxUnixNeonInlineAoS.h"
#else
#error No SIMD implementation for this unix platform.
#endif
#endif // PXFOUNDATION_PXUNIXINLINEAOS_H

View File

@@ -0,0 +1,127 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXINTRINSICS_H
#define PSFOUNDATION_PSUNIXINTRINSICS_H
#include "foundation/PxAssert.h"
#include <math.h>
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !(PX_LINUX || PX_APPLE_FAMILY)
#error "This file should only be included by unix builds!!"
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
PX_FORCE_INLINE void PxMemoryBarrier()
{
__sync_synchronize();
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t PxHighestSetBitUnsafe(uint64_t v)
{
return uint32_t(63 - __builtin_clzl(v));
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t PxHighestSetBitUnsafe(uint32_t v)
{
return uint32_t(31 - __builtin_clz(v));
}
/*!
Return the index of the lowest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t PxLowestSetBitUnsafe(uint64_t v)
{
return uint32_t(__builtin_ctzl(v));
}
/*!
Return the index of the lowest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t PxLowestSetBitUnsafe(uint32_t v)
{
return uint32_t(__builtin_ctz(v));
}
/*!
Returns the index of the highest set bit. Returns 32 for v=0.
*/
PX_INLINE uint32_t PxCountLeadingZeros(uint32_t v)
{
if(v)
return uint32_t(__builtin_clz(v));
else
return 32u;
}
/*!
Prefetch aligned 64B x86, 32b ARM around \c ptr+offset.
*/
PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0)
{
#if PX_CUDA_COMPILER
__builtin_prefetch(reinterpret_cast<const char*>(ptr) + offset, 0, 3);
#else
__builtin_prefetch(reinterpret_cast<const char* PX_RESTRICT>(ptr) + offset, 0, 3);
#endif
}
/*!
Prefetch \c count bytes starting at \c ptr.
*/
PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = reinterpret_cast<const char*>(ptr);
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
PxPrefetchLine(cp);
cp += 64;
} while(--lines);
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif // #ifndef PSFOUNDATION_PSUNIXINTRINSICS_H

View File

@@ -0,0 +1,179 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXINTRINSICS_H
#define PXFOUNDATION_PXUNIXINTRINSICS_H
#include "foundation/PxAssert.h"
#if !(PX_LINUX || PX_APPLE_FAMILY)
#error "This file should only be included by Unix builds!!"
#endif
#if PX_LINUX && !PX_CUDA_COMPILER && !PX_EMSCRIPTEN
// Linux and CUDA compilation does not work with std::isfnite, as it is not marked as CUDA callable
#include <cmath>
#ifndef isfinite
using std::isfinite;
#endif
#endif
#include <math.h>
#include <float.h>
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace intrinsics
{
//! \brief platform-specific absolute value
PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a)
{
return ::fabsf(a);
}
//! \brief platform-specific select float
PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c)
{
return (a >= 0.0f) ? b : c;
}
//! \brief platform-specific sign
PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a)
{
return (a >= 0.0f) ? 1.0f : -1.0f;
}
//! \brief platform-specific reciprocal
PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a)
{
return 1.0f / a;
}
//! \brief platform-specific reciprocal estimate
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a)
{
return ::sqrtf(a);
}
//! \brief platform-specific reciprocal square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a)
{
return 1.0f / ::sqrtf(a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific sine
PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a)
{
return ::sinf(a);
}
//! \brief platform-specific cosine
PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a)
{
return ::cosf(a);
}
//! \brief platform-specific minimum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b)
{
return a < b ? a : b;
}
//! \brief platform-specific maximum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b)
{
return a > b ? a : b;
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a)
{
//std::isfinite not recommended as of Feb 2017, since it doesn't work with g++/clang's floating point optimization.
union localU { PxU32 i; float f; } floatUnion;
floatUnion.f = a;
return !((floatUnion.i & 0x7fffffff) >= 0x7f800000);
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a)
{
return !!isfinite(a);
}
/*!
Sets \c count bytes starting at \c dst to zero.
*/
PX_FORCE_INLINE void* memZero(void* dest, size_t count)
{
return memset(dest, 0, count);
}
/*!
Sets \c count bytes starting at \c dst to \c c.
*/
PX_FORCE_INLINE void* memSet(void* dest, int32_t c, size_t count)
{
return memset(dest, c, count);
}
/*!
Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
*/
PX_FORCE_INLINE void* memCopy(void* dest, const void* src, size_t count)
{
return memcpy(dest, src, count);
}
/*!
Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
*/
PX_FORCE_INLINE void* memMove(void* dest, const void* src, size_t count)
{
return memmove(dest, src, count);
}
} // namespace intrinsics
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif // #ifndef PXFOUNDATION_PXUNIXINTRINSICS_H

View File

@@ -0,0 +1,76 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXTRIGCONSTANTS_H
#define PXFOUNDATION_PXUNIXTRIGCONSTANTS_H
#include "foundation/PxMath.h"
#include "foundation/PxPreprocessor.h"
namespace physx
{
namespace aos
{
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmissing-variable-declarations"
#endif
#endif
#define PX_GLOBALCONST extern const __attribute__((weak))
PX_ALIGN_PREFIX(16)
struct PX_VECTORF32
{
float f[4];
} PX_ALIGN_SUFFIX(16);
PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } };
PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } };
PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } };
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic pop
#endif
#endif
} // namespace aos
} // namespace physx
#endif //PXFOUNDATION_PXUNIXTRIGCONSTANTS_H

View File

@@ -0,0 +1,136 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXNEONAOS_H
#define PXFOUNDATION_PXUNIXNEONAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
// only ARM NEON compatible platforms should reach this
#include <arm_neon.h>
namespace physx
{
namespace aos
{
typedef float32x2_t FloatV;
typedef float32x4_t Vec3V;
typedef float32x4_t Vec4V;
typedef uint32x4_t BoolV;
typedef float32x4_t QuatV;
typedef uint32x4_t VecU32V;
typedef int32x4_t VecI32V;
typedef uint16x8_t VecU16V;
typedef int16x8_t VecI16V;
typedef uint8x16_t VecU8V;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define VecU8VArg VecU8V &
#define QuatVArg QuatV &
// KS - TODO - make an actual VecCrossV type for NEON
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXNEONAOS_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXSSE2AOS_H
#define PXFOUNDATION_PXUNIXSSE2AOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
namespace physx
{
namespace aos
{
#if PX_EMSCRIPTEN
typedef int8_t __int8_t;
typedef int16_t __int16_t;
typedef int32_t __int32_t;
typedef int64_t __int64_t;
typedef uint16_t __uint16_t;
typedef uint32_t __uint32_t;
typedef uint64_t __uint64_t;
#endif
typedef union UnionM128
{
UnionM128()
{
}
UnionM128(__m128 in)
{
m128 = in;
}
UnionM128(__m128i in)
{
m128i = in;
}
operator __m128()
{
return m128;
}
operator __m128() const
{
return m128;
}
float m128_f32[4];
__int8_t m128_i8[16];
__int16_t m128_i16[8];
__int32_t m128_i32[4];
__int64_t m128_i64[2];
__uint16_t m128_u16[8];
__uint32_t m128_u32[4];
__uint64_t m128_u64[2];
__m128 m128;
__m128i m128i;
} UnionM128;
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 QuatV;
typedef __m128i VecI32V;
typedef UnionM128 VecU32V;
typedef UnionM128 VecU16V;
typedef UnionM128 VecI16V;
typedef UnionM128 VecU8V;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define VecU8VArg VecU8V &
#define QuatVArg QuatV &
// Optimization for situations in which you cross product multiple vectors with the same vector.
// Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXSSE2AOS_H

View File

@@ -0,0 +1,719 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PXFOUNDATION_PXUNIXSSE2INLINEAOS_H
#define PXFOUNDATION_PXUNIXSSE2INLINEAOS_H
namespace physx
{
namespace aos
{
//////////////////////////////////////////////////////////////////////
//Test that Vec3V and FloatV are legal
//////////////////////////////////////////////////////////////////////
#define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f
PX_FORCE_INLINE static bool isValidFloatV(const FloatV a)
{
const PxF32 x = V4ReadX(a);
const PxF32 y = V4ReadY(a);
const PxF32 z = V4ReadZ(a);
const PxF32 w = V4ReadW(a);
return (x == y && x == z && x == w);
/*if (
(PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - z) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - w) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
if (
(PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - z) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - w) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
return false;*/
}
}
}
#include "../../PxVecMathSSE.h"
namespace physx
{
namespace aos
{
#define PX_FPCLASS_SNAN 0x0001 /* signaling NaN */
#define PX_FPCLASS_QNAN 0x0002 /* quiet NaN */
#define PX_FPCLASS_NINF 0x0004 /* negative infinity */
#define PX_FPCLASS_PINF 0x0200 /* positive infinity */
namespace internalSimd
{
#if !PX_EMSCRIPTEN
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wglobal-constructors"
#endif
#endif
const PX_ALIGN(16, PxF32 gMaskXYZ[4]) = { physx::PxUnionCast<PxF32>(0xffffffff), physx::PxUnionCast<PxF32>(0xffffffff),
physx::PxUnionCast<PxF32>(0xffffffff), 0 };
#if PX_CLANG
#if PX_LINUX
#pragma clang diagnostic pop
#endif
#endif
#else
// emscripten doesn't like the PxUnionCast data structure
// the following is what windows and xbox does -- using these for emscripten
const PX_ALIGN(16, PxU32 gMaskXYZ[4]) = { 0xffffffff, 0xffffffff, 0xffffffff, 0 };
#endif
}
namespace vecMathTests
{
PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalSimd::BAllTrue4_R(VecI32V_IsEq(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b))) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
BoolV c = internalSimd::m128_I2F(_mm_cmpeq_epi32(a, b));
return internalSimd::BAllTrue4_R(c) != 0;
}
#define VECMATH_AOS_EPSILON (1e-3f)
PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
const FloatV c = FSub(a, b);
const FloatV minError = FLoad(-VECMATH_AOS_EPSILON);
const FloatV maxError = FLoad(VECMATH_AOS_EPSILON);
return _mm_comigt_ss(c, minError) && _mm_comilt_ss(c, maxError);
}
PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
const Vec3V c = V3Sub(a, b);
const Vec3V minError = V3Load(-VECMATH_AOS_EPSILON);
const Vec3V maxError = V3Load(VECMATH_AOS_EPSILON);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxError));
}
PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const Vec4V c = V4Sub(a, b);
const Vec4V minError = V4Load(-VECMATH_AOS_EPSILON);
const Vec4V maxError = V4Load(VECMATH_AOS_EPSILON);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxError) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), minError) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), maxError));
}
} //vecMathTests
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
PxF32 badNumber =
physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF);
const FloatV vBadNum = FLoad(badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return internalSimd::FiniteTestEq(vMask, BFFFF()) == 1;
}
PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
PxF32 badNumber =
physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF);
const Vec3V vBadNum = V3Load(badNumber);
const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF());
return internalSimd::FiniteTestEq(vMask, BFFFF()) == 1;
}
PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
/*Vec4V a;
PX_ALIGN(16, PxF32 f[4]);
F32Array_Aligned_From_Vec4V(a, f);
return PxIsFinite(f[0])
&& PxIsFinite(f[1])
&& PxIsFinite(f[2])
&& PxIsFinite(f[3]);*/
PxF32 badNumber =
physx::PxUnionCast<PxF32, PxU32>(PX_FPCLASS_SNAN | PX_FPCLASS_QNAN | PX_FPCLASS_NINF | PX_FPCLASS_PINF);
const Vec4V vBadNum = V4Load(badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return internalSimd::FiniteTestEq(vMask, BFFFF()) == 1;
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
#if !PX_EMSCRIPTEN
return _mm_and_ps(reinterpret_cast<const Vec3V&>(f), V4LoadA(internalSimd::gMaskXYZ));
#else
return _mm_and_ps((Vec3V&)f, (VecI32V&)internalSimd::gMaskXYZ);
#endif
}
PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
return _mm_set_ps(0.0f, f.z, f.y, f.x);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(f);
#if !PX_EMSCRIPTEN
return _mm_and_ps(V4LoadA(f), V4LoadA(internalSimd::gMaskXYZ));
#else
return _mm_and_ps((Vec3V&)*f, (VecI32V&)internalSimd::gMaskXYZ);
#endif
}
PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i)
{
ASSERT_ISALIGNED16(i);
_mm_store_ps(reinterpret_cast<float*>(i), internalSimd::m128_I2F(iv));
}
PX_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const PX_ALIGN(16, PxI32) b[4] = { -PxI32(f[0]), -PxI32(f[1]), -PxI32(f[2]), -PxI32(f[3]) };
return _mm_load_ps(reinterpret_cast<const float*>(&b));
}
PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32) f2[4];
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f)
{
PX_ALIGN(16, PxF32) f2[4];
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
//////////////////////////////////
// FLOATV
//////////////////////////////////
PX_FORCE_INLINE FloatV FAbs(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
PX_ALIGN(16, const PxU32) absMask[4] = { 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF };
return _mm_and_ps(a, _mm_load_ps(reinterpret_cast<const PxF32*>(absMask)));
}
//////////////////////////////////
// VEC3V
//////////////////////////////////
PX_FORCE_INLINE Vec3V V3UnitX()
{
const PX_ALIGN(16, PxF32) x[4] = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec3V V3UnitY()
{
const PX_ALIGN(16, PxF32) y[4] = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec3V V3UnitZ()
{
const PX_ALIGN(16, PxF32) z[4] = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
//////////////////////////////////
// VEC4V
//////////////////////////////////
PX_FORCE_INLINE Vec4V V4UnitW()
{
const PX_ALIGN(16, PxF32) w[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
const __m128 w128 = _mm_load_ps(w);
return w128;
}
PX_FORCE_INLINE Vec4V V4UnitX()
{
const PX_ALIGN(16, PxF32) x[4] = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec4V V4UnitY()
{
const PX_ALIGN(16, PxF32) y[4] = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec4V V4UnitZ()
{
const PX_ALIGN(16, PxF32) z[4] = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
#if !PX_EMSCRIPTEN
return _mm_and_ps(v, V4LoadA(internalSimd::gMaskXYZ));
#else
return _mm_and_ps(v, (VecI32V&)internalSimd::gMaskXYZ);
#endif
}
//////////////////////////////////
// BoolV
//////////////////////////////////
/*
template<int index> PX_FORCE_INLINE BoolV BSplatElement(BoolV a)
{
BoolV result;
result[0] = result[1] = result[2] = result[3] = a[index];
return result;
}
*/
template <int index>
BoolV BSplatElement(BoolV a)
{
float* data = reinterpret_cast<float*>(&a);
return V4Load(data[index]);
}
//////////////////////////////////
// MAT33V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
const FloatV x = V3Dot(a.col0, b);
const FloatV y = V3Dot(a.col1, b);
const FloatV z = V3Dot(a.col2, b);
return V3Merge(x, y, z);
}
PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)),
V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2)));
}
/*PX_FORCE_INLINE Mat33V PromoteVec3V(const Vec3V v)
{
const BoolV bTFFF = BTFFF();
const BoolV bFTFF = BFTFF();
const BoolV bFFTF = BTFTF();
const Vec3V zero = V3Zero();
return Mat33V(V3Sel(bTFFF, v, zero), V3Sel(bFTFF, v, zero), V3Sel(bFFTF, v, zero));
}*/
//////////////////////////////////
// MAT34V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
const FloatV x = V3Dot(a.col0, b);
const FloatV y = V3Dot(a.col1, b);
const FloatV z = V3Dot(a.col2, b);
return V3Merge(x, y, z);
}
PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
return Mat33V(V3Merge(V3GetX(a.col0), V3GetX(a.col1), V3GetX(a.col2)),
V3Merge(V3GetY(a.col0), V3GetY(a.col1), V3GetY(a.col2)),
V3Merge(V3GetZ(a.col0), V3GetZ(a.col1), V3GetZ(a.col2)));
}
//////////////////////////////////
// MAT44V
//////////////////////////////////
PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
PX_ALIGN(16, FloatV) dotProdArray[4] = { V4Dot(a.col0, b), V4Dot(a.col1, b), V4Dot(a.col2, b), V4Dot(a.col3, b) };
return V4Merge(dotProdArray);
}
PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
const Vec4V v0 = _mm_unpacklo_ps(a.col0, a.col2);
const Vec4V v1 = _mm_unpackhi_ps(a.col0, a.col2);
const Vec4V v2 = _mm_unpacklo_ps(a.col1, a.col3);
const Vec4V v3 = _mm_unpackhi_ps(a.col1, a.col3);
return Mat44V(_mm_unpacklo_ps(v0, v2), _mm_unpackhi_ps(v0, v2), _mm_unpacklo_ps(v1, v3), _mm_unpackhi_ps(v1, v3));
}
//////////////////////////////////
// Misc
//////////////////////////////////
/*
// AP: work in progress - use proper SSE intrinsics where possible
PX_FORCE_INLINE VecU16V V4U32PK(VecU32V a, VecU32V b)
{
VecU16V result;
result.m128_u16[0] = PxU16(PxClamp<PxU32>((a).m128_u32[0], 0, 0xFFFF));
result.m128_u16[1] = PxU16(PxClamp<PxU32>((a).m128_u32[1], 0, 0xFFFF));
result.m128_u16[2] = PxU16(PxClamp<PxU32>((a).m128_u32[2], 0, 0xFFFF));
result.m128_u16[3] = PxU16(PxClamp<PxU32>((a).m128_u32[3], 0, 0xFFFF));
result.m128_u16[4] = PxU16(PxClamp<PxU32>((b).m128_u32[0], 0, 0xFFFF));
result.m128_u16[5] = PxU16(PxClamp<PxU32>((b).m128_u32[1], 0, 0xFFFF));
result.m128_u16[6] = PxU16(PxClamp<PxU32>((b).m128_u32[2], 0, 0xFFFF));
result.m128_u16[7] = PxU16(PxClamp<PxU32>((b).m128_u32[3], 0, 0xFFFF));
return result;
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16Or(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_or_si128(m128_F2I(a), m128_F2I(b)));
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16And(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_and_si128(m128_F2I(a), m128_F2I(b)));
}
*/
/*
PX_FORCE_INLINE VecU16V V4U16Andc(VecU16V a, VecU16V b)
{
return m128_I2F(_mm_andnot_si128(m128_F2I(b), m128_F2I(a)));
}
*/
PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w)
{
return _mm_set_epi32(w, z, y, x);
}
PX_FORCE_INLINE VecI32V I4Load(const PxI32 i)
{
return internalSimd::m128_F2I(_mm_load1_ps(reinterpret_cast<const PxF32*>(&i)));
}
PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i)
{
return internalSimd::m128_F2I(_mm_loadu_ps(reinterpret_cast<const PxF32*>(i)));
}
PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i)
{
ASSERT_ISALIGNED16(i);
return internalSimd::m128_F2I(_mm_load_ps(reinterpret_cast<const PxF32*>(i)));
}
PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return _mm_add_epi32(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return _mm_sub_epi32(a, b);
}
PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_cmpgt_epi32(a, b));
}
PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_cmpeq_epi32(a, b));
}
PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return _mm_or_si128(_mm_andnot_si128(internalSimd::m128_F2I(c), b), _mm_and_si128(internalSimd::m128_F2I(c), a));
}
PX_FORCE_INLINE VecI32V VecI32V_Zero()
{
return _mm_setzero_si128();
}
PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
return _mm_or_si128(_mm_andnot_si128(internalSimd::m128_F2I(c), b), _mm_and_si128(internalSimd::m128_F2I(c), a));
}
PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
VecShiftV s;
s.shift = VecI32V_Sel(BTFFF(), shift, VecI32V_Zero());
return s;
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return _mm_sll_epi32(a, count.shift);
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return _mm_srl_epi32(a, count.shift);
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count)
{
return _mm_slli_epi32(a, PxI32(count));
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count)
{
return _mm_srai_epi32(a, PxI32(count));
}
PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return _mm_and_si128(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return _mm_or_si128(a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return internalSimd::m128_F2I(_mm_shuffle_ps(internalSimd::m128_I2F(a), internalSimd::m128_I2F(a), _MM_SHUFFLE(0, 0, 0, 0)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return internalSimd::m128_F2I(_mm_shuffle_ps(internalSimd::m128_I2F(a), internalSimd::m128_I2F(a), _MM_SHUFFLE(1, 1, 1, 1)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return internalSimd::m128_F2I(_mm_shuffle_ps(internalSimd::m128_I2F(a), internalSimd::m128_I2F(a), _MM_SHUFFLE(2, 2, 2, 2)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return internalSimd::m128_F2I(_mm_shuffle_ps(internalSimd::m128_I2F(a), internalSimd::m128_I2F(a), _MM_SHUFFLE(3, 3, 3, 3)));
}
PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i)
{
_mm_store_ss(reinterpret_cast<PxF32*>(i), internalSimd::m128_I2F(a));
}
PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return internalSimd::m128_F2I(a);
}
PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg x, const VecI32VArg y, const VecI32VArg z, const VecI32VArg w)
{
const __m128 xw = _mm_move_ss(internalSimd::m128_I2F(y), internalSimd::m128_I2F(x)); // y, y, y, x
const __m128 yz = _mm_move_ss(internalSimd::m128_I2F(z), internalSimd::m128_I2F(w)); // z, z, z, w
return internalSimd::m128_F2I(_mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0)));
}
/*
template<int a> PX_FORCE_INLINE VecI32V V4ISplat()
{
VecI32V result;
result.m128_i32[0] = a;
result.m128_i32[1] = a;
result.m128_i32[2] = a;
result.m128_i32[3] = a;
return result;
}
template<PxU32 a> PX_FORCE_INLINE VecU32V V4USplat()
{
VecU32V result;
result.m128_u32[0] = a;
result.m128_u32[1] = a;
result.m128_u32[2] = a;
result.m128_u32[3] = a;
return result;
}
*/
/*
PX_FORCE_INLINE void V4U16StoreAligned(VecU16V val, VecU16V* address)
{
*address = val;
}
*/
PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
/*PX_FORCE_INLINE Vec4V V4LoadAligned(Vec4V* addr)
{
return *addr;
}
PX_FORCE_INLINE Vec4V V4LoadUnaligned(Vec4V* addr)
{
return V4LoadU(reinterpret_cast<float*>(addr));
}*/
PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V in)
{
return _mm_cvtepi32_ps(in);
}
PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return _mm_cvttps_epi32(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return Vec4V(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return internalSimd::m128_I2F(a);
}
PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecU32V(a);
}
PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return internalSimd::m128_F2I(a);
}
template <int index>
PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
VecU32V result;
result.m128_u32[0] = result.m128_u32[1] = result.m128_u32[2] = result.m128_u32[3] = a.m128_u32[index];
return result;
}
template <int index>
PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
float* data = reinterpret_cast<float*>(&a);
return V4Load(data[index]);
}
/*PX_FORCE_INLINE Vec4V V4Ceil(const Vec4V in)
{
UnionM128 a(in);
return V4LoadXYZW(PxCeil(a.m128_f32[0]), PxCeil(a.m128_f32[1]), PxCeil(a.m128_f32[2]), PxCeil(a.m128_f32[3]));
}
PX_FORCE_INLINE Vec4V V4Floor(const Vec4V in)
{
UnionM128 a(in);
return V4LoadXYZW(PxFloor(a.m128_f32[0]), PxFloor(a.m128_f32[1]), PxFloor(a.m128_f32[2]), PxFloor(a.m128_f32[3]));
}
PX_FORCE_INLINE VecU32V V4ConvertToU32VSaturate(const Vec4V in, PxU32 power)
{
PX_ASSERT(power == 0 && "Non-zero power not supported in convertToU32VSaturate");
PX_UNUSED(power); // prevent warning in release builds
PxF32 ffffFFFFasFloat = PxF32(0xFFFF0000);
UnionM128 a(in);
VecU32V result;
result.m128_u32[0] = PxU32(PxClamp<PxF32>((a).m128_f32[0], 0.0f, ffffFFFFasFloat));
result.m128_u32[1] = PxU32(PxClamp<PxF32>((a).m128_f32[1], 0.0f, ffffFFFFasFloat));
result.m128_u32[2] = PxU32(PxClamp<PxF32>((a).m128_f32[2], 0.0f, ffffFFFFasFloat));
result.m128_u32[3] = PxU32(PxClamp<PxF32>((a).m128_f32[3], 0.0f, ffffFFFFasFloat));
return result;
}*/
} // namespace aos
} // namespace physx
#endif // PXFOUNDATION_PXUNIXSSE2INLINEAOS_H

View File

@@ -0,0 +1,143 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_AOS_H
#define PX_WINDOWS_AOS_H
// no includes here! this file should be included from PxAOS.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
namespace aos
{
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 VecU32V;
typedef __m128 VecI32V;
typedef __m128 VecU16V;
typedef __m128 VecI16V;
typedef __m128 QuatV;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define QuatVArg QuatV &
// Optimization for situations in which you cross product multiple vectors with the same vector.
// Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,63 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_FPU_H
#define PX_WINDOWS_FPU_H
PX_INLINE physx::PxSIMDGuard::PxSIMDGuard(bool enable) : mEnabled(enable)
{
#if !PX_ARM && !PX_A64
if (enable)
{
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
}
else
{
PX_ASSERT(_mm_getcsr() & _MM_FLUSH_ZERO_ON);
PX_ASSERT(_mm_getcsr() & (1 << 6));
PX_ASSERT(_mm_getcsr() & _MM_MASK_MASK);
}
#endif
}
PX_INLINE physx::PxSIMDGuard::~PxSIMDGuard()
{
#if !PX_ARM && !PX_A64
if (mEnabled)
{
// restore control word and clear any exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK);
}
#endif
}
#endif

View File

@@ -0,0 +1,96 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_INCLUDE_H
#define PX_WINDOWS_INCLUDE_H
#ifndef _WIN32
#error "This file should only be included by Windows builds!!"
#endif
#ifdef _WINDOWS_ // windows already included
#error "Only include windows.h through this file!!"
#endif
// We only support >= Windows 7, and we need this for critical section and
// Setting this hides some important APIs (e.g. LoadPackagedLibrary), so don't do it
#define _WIN32_WINNT 0x0601
// turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked*
// etc)
#define NOGDICAPMASKS
#define NOVIRTUALKEYCODES
#define NOWINMESSAGES
#define NOWINSTYLES
#define NOSYSMETRICS
#define NOMENUS
#define NOICONS
#define NOKEYSTATES
#define NOSYSCOMMANDS
#define NORASTEROPS
#define NOSHOWWINDOW
#define NOATOM
#define NOCLIPBOARD
#define NOCOLOR
#define NOCTLMGR
#define NODRAWTEXT
#define NOGDI
#define NOMB
#define NOMEMMGR
#define NOMETAFILE
#define NOMINMAX
#define NOOPENFILE
#define NOSCROLL
#define NOSERVICE
#define NOSOUND
#define NOTEXTMETRIC
#define NOWH
#define NOWINOFFSETS
#define NOCOMM
#define NOKANJI
#define NOHELP
#define NOPROFILER
#define NODEFERWINDOWPOS
#define NOMCX
#define WIN32_LEAN_AND_MEAN
// We need a slightly wider API surface for e.g. MultiByteToWideChar
#define NOUSER
#define NONLS
#define NOMSG
#pragma warning(push)
#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#include <windows.h>
#pragma warning(pop)
#if PX_SSE2
#include <xmmintrin.h>
#endif
#endif

View File

@@ -0,0 +1,610 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_INLINE_AOS_H
#define PX_WINDOWS_INLINE_AOS_H
namespace physx
{
namespace aos
{
//////////////////////////////////////////////////////////////////////
//Test that Vec3V and FloatV are legal
//////////////////////////////////////////////////////////////////////
#define FLOAT_COMPONENTS_EQUAL_THRESHOLD 0.01f
PX_FORCE_INLINE bool isValidFloatV(const FloatV a)
{
const PxF32 x = V4ReadX(a);
const PxF32 y = V4ReadY(a);
const PxF32 z = V4ReadZ(a);
const PxF32 w = V4ReadW(a);
return (!(x != y || x != z || x != w));
/*if (
(PxAbs(x - y) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - z) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs(x - w) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
if (
(PxAbs((x - y) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - z) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD) &&
(PxAbs((x - w) / x) < FLOAT_COMPONENTS_EQUAL_THRESHOLD)
)
{
return true;
}
return false;*/
}
}
}
#include "../PxVecMathSSE.h"
namespace physx
{
namespace aos
{
/////////////////////////////////////////////////////////////////////
////FUNCTIONS USED ONLY FOR ASSERTS IN VECTORISED IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// USED ONLY INTERNALLY
//////////////////////////////////////////////////////////////////////
namespace internalSimd
{
const PX_ALIGN(16, PxU32 gMaskXYZ[4]) = { 0xffffffff, 0xffffffff, 0xffffffff, 0 };
} //internalSimd
namespace vecMathTests
{
PX_FORCE_INLINE bool allElementsEqualBoolV(const BoolV a, const BoolV b)
{
return internalSimd::BAllTrue4_R(VecI32V_IsEq(a, b)) != 0;
}
PX_FORCE_INLINE bool allElementsEqualVecI32V(const VecI32V a, const VecI32V b)
{
BoolV c = internalSimd::m128_I2F(_mm_cmpeq_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
return internalSimd::BAllTrue4_R(c) != 0;
}
#define VECMATH_AOS_EPSILON (1e-3f)
static const FloatV minFError = FLoad(-VECMATH_AOS_EPSILON);
static const FloatV maxFError = FLoad(VECMATH_AOS_EPSILON);
static const Vec3V minV3Error = V3Load(-VECMATH_AOS_EPSILON);
static const Vec3V maxV3Error = V3Load(VECMATH_AOS_EPSILON);
static const Vec4V minV4Error = V4Load(-VECMATH_AOS_EPSILON);
static const Vec4V maxV4Error = V4Load(VECMATH_AOS_EPSILON);
PX_FORCE_INLINE bool allElementsNearEqualFloatV(const FloatV a, const FloatV b)
{
ASSERT_ISVALIDFLOATV(a);
ASSERT_ISVALIDFLOATV(b);
const FloatV c = FSub(a, b);
return _mm_comigt_ss(c, minFError) && _mm_comilt_ss(c, maxFError);
}
PX_FORCE_INLINE bool allElementsNearEqualVec3V(const Vec3V a, const Vec3V b)
{
const Vec3V c = V3Sub(a, b);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxV3Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxV3Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minV3Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxV3Error));
}
PX_FORCE_INLINE bool allElementsNearEqualVec4V(const Vec4V a, const Vec4V b)
{
const Vec4V c = V4Sub(a, b);
return (_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)), maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)), maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)), maxV4Error) &&
_mm_comigt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), minV4Error) &&
_mm_comilt_ss(_mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)), maxV4Error));
}
} //vecMathTests
PX_FORCE_INLINE bool isFiniteFloatV(const FloatV a)
{
PxF32 f;
FStore(a, &f);
return PxIsFinite(f);
/*
const PxU32 badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const FloatV vBadNum = FloatV_From_F32((PxF32&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
PX_FORCE_INLINE bool isFiniteVec3V(const Vec3V a)
{
PX_ALIGN(16, PxF32 f[4]);
V4StoreA((Vec4V&)a, f);
return PxIsFinite(f[0]) && PxIsFinite(f[1]) && PxIsFinite(f[2]);
/*
const PxU32 badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const Vec3V vBadNum = Vec3V_From_F32((PxF32&)badNumber);
const BoolV vMask = BAnd(BAnd(vBadNum, a), BTTTF());
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
PX_FORCE_INLINE bool isFiniteVec4V(const Vec4V a)
{
PX_ALIGN(16, PxF32 f[4]);
V4StoreA(a, f);
return PxIsFinite(f[0]) && PxIsFinite(f[1]) && PxIsFinite(f[2]) && PxIsFinite(f[3]);
/*
const PxU32 badNumber = (_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF);
const Vec4V vBadNum = Vec4V_From_U32((PxF32&)badNumber);
const BoolV vMask = BAnd(vBadNum, a);
return FiniteTestEq(vMask, BFFFF()) == 1;
*/
}
/////////////////////////////////////////////////////////////////////
////VECTORISED FUNCTION IMPLEMENTATIONS
/////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE Vec3V V3LoadA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
return _mm_and_ps(_mm_load_ps(&f.x), reinterpret_cast<const Vec4V&>(internalSimd::gMaskXYZ));
}
// w component of result is undefined
PX_FORCE_INLINE Vec3V V3LoadUnsafeA(const PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
return _mm_load_ps(&f.x);
}
PX_FORCE_INLINE Vec3V V3LoadA(const PxF32* const f)
{
ASSERT_ISALIGNED16(f);
return V4ClearW(_mm_load_ps(f));
}
PX_FORCE_INLINE void I4StoreA(const VecI32V iv, PxI32* i)
{
ASSERT_ISALIGNED16(i);
_mm_store_ps((PxF32*)i, iv);
}
PX_FORCE_INLINE BoolV BLoad(const bool* const f)
{
const PX_ALIGN(16, PxU32 b[4]) = { PxU32(-(PxI32)f[0]), PxU32(-(PxI32)f[1]),
PxU32(-(PxI32)f[2]), PxU32(-(PxI32)f[3]) };
return _mm_load_ps((float*)&b);
}
PX_FORCE_INLINE void V3StoreA(const Vec3V a, PxVec3& f)
{
ASSERT_ISALIGNED16(&f);
PX_ALIGN(16, PxF32 f2[4]);
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
PX_FORCE_INLINE void V3StoreU(const Vec3V a, PxVec3& f)
{
PX_ALIGN(16, PxF32 f2[4]);
_mm_store_ps(f2, a);
f = PxVec3(f2[0], f2[1], f2[2]);
}
//////////////////////////////////
// FLOATV
//////////////////////////////////
PX_FORCE_INLINE FloatV FAbs(const FloatV a)
{
ASSERT_ISVALIDFLOATV(a);
PX_ALIGN(16, const static PxU32 absMask[4]) = { 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF, 0x7fFFffFF };
return _mm_and_ps(a, _mm_load_ps(reinterpret_cast<const PxF32*>(absMask)));
}
//////////////////////////////////
// VEC3V
//////////////////////////////////
PX_FORCE_INLINE Vec3V V3UnitX()
{
const PX_ALIGN(16, PxF32 x[4]) = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec3V V3UnitY()
{
const PX_ALIGN(16, PxF32 y[4]) = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec3V V3UnitZ()
{
const PX_ALIGN(16, PxF32 z[4]) = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
//////////////////////////////////
// VEC4V
//////////////////////////////////
PX_FORCE_INLINE Vec4V V4UnitW()
{
const PX_ALIGN(16, PxF32 w[4]) = { 0.0f, 0.0f, 0.0f, 1.0f };
const __m128 w128 = _mm_load_ps(w);
return w128;
}
PX_FORCE_INLINE Vec4V V4UnitX()
{
const PX_ALIGN(16, PxF32 x[4]) = { 1.0f, 0.0f, 0.0f, 0.0f };
const __m128 x128 = _mm_load_ps(x);
return x128;
}
PX_FORCE_INLINE Vec4V V4UnitY()
{
const PX_ALIGN(16, PxF32 y[4]) = { 0.0f, 1.0f, 0.0f, 0.0f };
const __m128 y128 = _mm_load_ps(y);
return y128;
}
PX_FORCE_INLINE Vec4V V4UnitZ()
{
const PX_ALIGN(16, PxF32 z[4]) = { 0.0f, 0.0f, 1.0f, 0.0f };
const __m128 z128 = _mm_load_ps(z);
return z128;
}
PX_FORCE_INLINE Vec4V V4ClearW(const Vec4V v)
{
return _mm_and_ps(v, (VecI32V&)internalSimd::gMaskXYZ);
}
//////////////////////////////////
// BoolV
//////////////////////////////////
template <int index>
BoolV BSplatElement(BoolV a)
{
return internalSimd::m128_I2F(
_mm_shuffle_epi32(internalSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
//////////////////////////////////
// MAT33V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M33TrnspsMulV3(const Mat33V& a, const Vec3V b)
{
Vec3V v0 = V3Mul(a.col0, b);
Vec3V v1 = V3Mul(a.col1, b);
Vec3V v2 = V3Mul(a.col2, b);
V3Transpose(v0, v1, v2);
return V3Add(V3Add(v0, v1), v2);
}
PX_FORCE_INLINE Mat33V M33Trnsps(const Mat33V& a)
{
Vec3V col0 = a.col0, col1 = a.col1, col2 = a.col2;
V3Transpose(col0, col1, col2);
return Mat33V(col0, col1, col2);
}
//////////////////////////////////
// MAT34V
//////////////////////////////////
PX_FORCE_INLINE Vec3V M34TrnspsMul33V3(const Mat34V& a, const Vec3V b)
{
Vec3V v0 = V3Mul(a.col0, b);
Vec3V v1 = V3Mul(a.col1, b);
Vec3V v2 = V3Mul(a.col2, b);
V3Transpose(v0, v1, v2);
return V3Add(V3Add(v0, v1), v2);
}
PX_FORCE_INLINE Mat33V M34Trnsps33(const Mat34V& a)
{
Vec3V col0 = a.col0, col1 = a.col1, col2 = a.col2;
V3Transpose(col0, col1, col2);
return Mat33V(col0, col1, col2);
}
/*PX_FORCE_INLINE Mat34V M34Inverse(const Mat34V& a)
{
Mat34V aInv;
const BoolV tfft = BTFFT();
const BoolV tttf = BTTTF();
const FloatV zero = V3Zero();
const Vec3V cross01 = V3Cross(a.col0, a.col1);
const Vec3V cross12 = V3Cross(a.col1, a.col2);
const Vec3V cross20 = V3Cross(a.col2, a.col0);
const FloatV dot = V3Dot(cross01, a.col2);
const FloatV invDet = _mm_rcp_ps(dot);
const Vec3V mergeh = _mm_unpacklo_ps(cross12, cross01);
const Vec3V mergel = _mm_unpackhi_ps(cross12, cross01);
Vec3V colInv0 = _mm_unpacklo_ps(mergeh, cross20);
colInv0 = _mm_or_ps(_mm_andnot_ps(tttf, zero), _mm_and_ps(tttf, colInv0));
const Vec3V zppd = _mm_shuffle_ps(mergeh, cross20, _MM_SHUFFLE(3, 0, 0, 2));
const Vec3V pbwp = _mm_shuffle_ps(cross20, mergeh, _MM_SHUFFLE(3, 3, 1, 0));
const Vec3V colInv1 = _mm_or_ps(_mm_andnot_ps(BTFFT(), pbwp), _mm_and_ps(BTFFT(), zppd));
const Vec3V xppd = _mm_shuffle_ps(mergel, cross20, _MM_SHUFFLE(3, 0, 0, 0));
const Vec3V pcyp = _mm_shuffle_ps(cross20, mergel, _MM_SHUFFLE(3, 1, 2, 0));
const Vec3V colInv2 = _mm_or_ps(_mm_andnot_ps(tfft, pcyp), _mm_and_ps(tfft, xppd));
aInv.col0 = _mm_mul_ps(colInv0, invDet);
aInv.col1 = _mm_mul_ps(colInv1, invDet);
aInv.col2 = _mm_mul_ps(colInv2, invDet);
aInv.col3 = M34Mul33V3(aInv, V3Neg(a.col3));
return aInv;
}*/
//////////////////////////////////
// MAT44V
//////////////////////////////////
PX_FORCE_INLINE Vec4V M44TrnspsMulV4(const Mat44V& a, const Vec4V b)
{
Vec4V v0 = V4Mul(a.col0, b);
Vec4V v1 = V4Mul(a.col1, b);
Vec4V v2 = V4Mul(a.col2, b);
Vec4V v3 = V4Mul(a.col3, b);
V4Transpose(v0, v1, v2, v3);
return V4Add(V4Add(v0, v1), V4Add(v2, v3));
}
PX_FORCE_INLINE Mat44V M44Trnsps(const Mat44V& a)
{
Vec4V col0 = a.col0, col1 = a.col1, col2 = a.col2, col3 = a.col3;
V4Transpose(col0, col1, col2, col3);
return Mat44V(col0, col1, col2, col3);
}
//////////////////////////////////
// Misc
//////////////////////////////////
PX_FORCE_INLINE VecI32V I4LoadXYZW(const PxI32& x, const PxI32& y, const PxI32& z, const PxI32& w)
{
return internalSimd::m128_I2F(_mm_set_epi32(w, z, y, x));
}
PX_FORCE_INLINE VecI32V I4Load(const PxI32 i)
{
return _mm_load1_ps(reinterpret_cast<const PxF32*>(&i));
}
PX_FORCE_INLINE VecI32V I4LoadU(const PxI32* i)
{
return _mm_loadu_ps(reinterpret_cast<const PxF32*>(i));
}
PX_FORCE_INLINE VecI32V I4LoadA(const PxI32* i)
{
ASSERT_ISALIGNED16(i);
return _mm_load_ps(reinterpret_cast<const PxF32*>(i));
}
PX_FORCE_INLINE VecI32V VecI32V_Add(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_add_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V VecI32V_Sub(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_sub_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
}
PX_FORCE_INLINE BoolV VecI32V_IsGrtr(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_cmpgt_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
}
PX_FORCE_INLINE BoolV VecI32V_IsEq(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_cmpeq_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V V4I32Sel(const BoolV c, const VecI32V a, const VecI32V b)
{
return V4U32Sel(c, a, b);
}
PX_FORCE_INLINE VecI32V VecI32V_Zero()
{
return V4Zero(); // PT: TODO: unify VecI32V / VecU32V on Windows / Linux, not the same types (?!)
}
PX_FORCE_INLINE VecI32V VecI32V_Sel(const BoolV c, const VecI32VArg a, const VecI32VArg b)
{
PX_ASSERT(vecMathTests::allElementsEqualBoolV(c, BTTTT()) ||
vecMathTests::allElementsEqualBoolV(c, BFFFF()));
return _mm_or_ps(_mm_andnot_ps(c, b), _mm_and_ps(c, a));
}
PX_FORCE_INLINE VecShiftV VecI32V_PrepareShift(const VecI32VArg shift)
{
VecShiftV preparedShift;
preparedShift.shift = _mm_or_ps(_mm_andnot_ps(BTFFF(), VecI32V_Zero()), _mm_and_ps(BTFFF(), shift));
return preparedShift;
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const VecShiftVArg count)
{
return internalSimd::m128_I2F(_mm_sll_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(count.shift)));
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const VecShiftVArg count)
{
return internalSimd::m128_I2F(_mm_srl_epi32(internalSimd::m128_F2I(a), internalSimd::m128_F2I(count.shift)));
}
PX_FORCE_INLINE VecI32V VecI32V_LeftShift(const VecI32VArg a, const PxU32 count)
{
return internalSimd::m128_I2F(_mm_slli_epi32(internalSimd::m128_F2I(a), count));
}
PX_FORCE_INLINE VecI32V VecI32V_RightShift(const VecI32VArg a, const PxU32 count)
{
return internalSimd::m128_I2F(_mm_srai_epi32(internalSimd::m128_F2I(a), count));
}
PX_FORCE_INLINE VecI32V VecI32V_And(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_and_si128(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V VecI32V_Or(const VecI32VArg a, const VecI32VArg b)
{
return internalSimd::m128_I2F(_mm_or_si128(internalSimd::m128_F2I(a), internalSimd::m128_F2I(b)));
}
PX_FORCE_INLINE VecI32V VecI32V_GetX(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0));
}
PX_FORCE_INLINE VecI32V VecI32V_GetY(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1));
}
PX_FORCE_INLINE VecI32V VecI32V_GetZ(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2));
}
PX_FORCE_INLINE VecI32V VecI32V_GetW(const VecI32VArg a)
{
return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3));
}
PX_FORCE_INLINE void PxI32_From_VecI32V(const VecI32VArg a, PxI32* i)
{
_mm_store_ss(reinterpret_cast<PxF32*>(i), a);
}
PX_FORCE_INLINE VecI32V VecI32V_From_BoolV(const BoolVArg a)
{
return a;
}
PX_FORCE_INLINE VecU32V VecU32V_From_BoolV(const BoolVArg a)
{
return a;
}
PX_FORCE_INLINE VecI32V VecI32V_Merge(const VecI32VArg a, const VecI32VArg b, const VecI32VArg c, const VecI32VArg d)
{
const __m128 xw = _mm_move_ss(b, a); // y, y, y, x
const __m128 yz = _mm_move_ss(c, d); // z, z, z, w
return _mm_shuffle_ps(xw, yz, _MM_SHUFFLE(0, 2, 1, 0));
}
PX_FORCE_INLINE void V4U32StoreAligned(VecU32V val, VecU32V* address)
{
*address = val;
}
PX_FORCE_INLINE Vec4V Vec4V_From_VecI32V(VecI32V a)
{
return _mm_cvtepi32_ps(internalSimd::m128_F2I(a));
}
PX_FORCE_INLINE VecI32V VecI32V_From_Vec4V(Vec4V a)
{
return internalSimd::m128_I2F(_mm_cvttps_epi32(a));
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecU32V(VecU32V a)
{
return Vec4V(a);
}
PX_FORCE_INLINE Vec4V Vec4V_ReinterpretFrom_VecI32V(VecI32V a)
{
return Vec4V(a);
}
PX_FORCE_INLINE VecU32V VecU32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecU32V(a);
}
PX_FORCE_INLINE VecI32V VecI32V_ReinterpretFrom_Vec4V(Vec4V a)
{
return VecI32V(a);
}
template <int index>
PX_FORCE_INLINE VecU32V V4U32SplatElement(VecU32V a)
{
return internalSimd::m128_I2F(_mm_shuffle_epi32(internalSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
template <int index>
PX_FORCE_INLINE Vec4V V4SplatElement(Vec4V a)
{
return internalSimd::m128_I2F(_mm_shuffle_epi32(internalSimd::m128_F2I(a), _MM_SHUFFLE(index, index, index, index)));
}
/*PX_FORCE_INLINE Vec4V V4ConvertFromI32V(const VecI32V in)
{
return _mm_cvtepi32_ps(internalSimd::m128_F2I(in));
}*/
} // namespace aos
} // namespace physx
#endif

View File

@@ -0,0 +1,201 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_INTRINSICS_H
#define PX_WINDOWS_INTRINSICS_H
#include "foundation/PxAssert.h"
#include <string.h>
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !PX_WINDOWS_FAMILY
#error "This file should only be included by Windows builds!!"
#endif
#pragma intrinsic(memcmp)
#pragma intrinsic(memcpy)
#pragma intrinsic(memset)
#pragma warning(push)
//'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#pragma warning(disable : 4668)
#if PX_VC == 10
#pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)'
#endif
#include <intrin.h>
#pragma warning(pop)
#pragma warning(push)
#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
#include <math.h>
#pragma warning(pop)
#include <float.h>
// do not include for ARM target
#if !PX_ARM && !PX_A64
#include <mmintrin.h>
#endif
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
#if !PX_DOXYGEN
namespace physx
{
#endif
/*
* Implements a memory barrier
*/
PX_FORCE_INLINE void PxMemoryBarrier()
{
_ReadWriteBarrier();
/* long Barrier;
__asm {
xchg Barrier, eax
}*/
}
/*!
Returns the index of the highest set bit. Not valid for zero arg.
*/
PX_FORCE_INLINE uint32_t PxHighestSetBitUnsafe(uint64_t v)
{
unsigned long retval;
#ifndef PX_GENERATE_META_DATA
_BitScanReverse64(&retval, v);
#endif
return retval;
}
/*!
Returns the index of the highest set bit. Not valid for zero arg.
*/
PX_FORCE_INLINE uint32_t PxHighestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanReverse(&retval, v);
return retval;
}
/*!
Returns the index of the lowest set bit. Undefined for zero arg.
*/
PX_FORCE_INLINE uint32_t PxLowestSetBitUnsafe(uint64_t v)
{
unsigned long retval;
#ifndef PX_GENERATE_META_DATA
_BitScanForward64(&retval, v);
#endif
return retval;
}
/*!
Returns the index of the lowest set bit. Undefined for zero arg.
*/
PX_FORCE_INLINE uint32_t PxLowestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanForward(&retval, v);
return retval;
}
/*!
Returns the number of leading zeros in v. Returns 32 for v=0.
*/
PX_FORCE_INLINE uint32_t PxCountLeadingZeros(uint32_t v)
{
if(v)
{
unsigned long bsr = (unsigned long)-1;
_BitScanReverse(&bsr, v);
return 31 - bsr;
}
else
return 32;
}
/*!
Prefetch aligned cache size around \c ptr+offset.
*/
#if !PX_ARM && !PX_A64
PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0)
{
// cache line on X86/X64 is 64-bytes so a 128-byte prefetch would require 2 prefetches.
// However, we can only dispatch a limited number of prefetch instructions so we opt to prefetch just 1 cache line
/*_mm_prefetch(((const char*)ptr + offset), _MM_HINT_T0);*/
// We get slightly better performance prefetching to non-temporal addresses instead of all cache levels
_mm_prefetch(((const char*)ptr + offset), _MM_HINT_NTA);
}
#else
PX_FORCE_INLINE void PxPrefetchLine(const void* ptr, uint32_t offset = 0)
{
// arm does have 32b cache line size
__prefetch(((const char*)ptr + offset));
}
#endif
/*!
Prefetch \c count bytes starting at \c ptr.
*/
#if !PX_ARM
PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
PxPrefetchLine(cp);
cp += 64;
} while(--lines);
}
#else
PX_FORCE_INLINE void PxPrefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint32_t p = size_t(ptr);
uint32_t startLine = p >> 5, endLine = (p + count - 1) >> 5;
uint32_t lines = endLine - startLine + 1;
do
{
PxPrefetchLine(cp);
cp += 32;
} while(--lines);
}
#endif
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,178 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_MATH_INTRINSICS_H
#define PX_WINDOWS_MATH_INTRINSICS_H
#include "foundation/PxAssert.h"
#if !PX_WINDOWS_FAMILY
#error "This file should only be included by Windows builds!!"
#endif
#include <math.h>
#include <float.h>
#if !PX_DOXYGEN
namespace physx
{
namespace intrinsics
{
#endif
//! \brief platform-specific absolute value
PX_CUDA_CALLABLE PX_FORCE_INLINE float abs(float a)
{
return ::fabsf(a);
}
//! \brief platform-specific select float
PX_CUDA_CALLABLE PX_FORCE_INLINE float fsel(float a, float b, float c)
{
return (a >= 0.0f) ? b : c;
}
//! \brief platform-specific sign
PX_CUDA_CALLABLE PX_FORCE_INLINE float sign(float a)
{
return (a >= 0.0f) ? 1.0f : -1.0f;
}
//! \brief platform-specific reciprocal
PX_CUDA_CALLABLE PX_FORCE_INLINE float recip(float a)
{
return 1.0f / a;
}
//! \brief platform-specific reciprocal estimate
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float sqrt(float a)
{
return ::sqrtf(a);
}
//! \brief platform-specific reciprocal square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrt(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific reciprocal square root estimate
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific sine
PX_CUDA_CALLABLE PX_FORCE_INLINE float sin(float a)
{
return ::sinf(a);
}
//! \brief platform-specific cosine
PX_CUDA_CALLABLE PX_FORCE_INLINE float cos(float a)
{
return ::cosf(a);
}
//! \brief platform-specific minimum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMin(float a, float b)
{
return a < b ? a : b;
}
//! \brief platform-specific maximum
PX_CUDA_CALLABLE PX_FORCE_INLINE float selectMax(float a, float b)
{
return a > b ? a : b;
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(float a)
{
#if PX_CUDA_COMPILER
return !!isfinite(a);
#else
return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
#endif
}
//! \brief platform-specific finiteness check (not INF or NAN)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite(double a)
{
#if PX_CUDA_COMPILER
return !!isfinite(a);
#else
return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
#endif
}
/*!
Sets \c count bytes starting at \c dst to zero.
*/
PX_FORCE_INLINE void* memZero(void* dest, size_t count)
{
return memset(dest, 0, count);
}
/*!
Sets \c count bytes starting at \c dst to \c c.
*/
PX_FORCE_INLINE void* memSet(void* dest, int32_t c, size_t count)
{
return memset(dest, c, count);
}
/*!
Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
*/
PX_FORCE_INLINE void* memCopy(void* dest, const void* src, size_t count)
{
return memcpy(dest, src, count);
}
/*!
Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
*/
PX_FORCE_INLINE void* memMove(void* dest, const void* src, size_t count)
{
return memmove(dest, src, count);
}
#if !PX_DOXYGEN
} // namespace intrinsics
} // namespace physx
#endif
#endif

View File

@@ -0,0 +1,60 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_WINDOWS_TRIG_CONSTANTS_H
#define PX_WINDOWS_TRIG_CONSTANTS_H
namespace physx
{
namespace aos
{
#define PX_GLOBALCONST extern const __declspec(selectany)
__declspec(align(16)) struct PX_VECTORF32
{
float f[4];
};
PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } };
PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } };
PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } };
} // namespace aos
} // namespace physx
#endif