feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,52 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef CUDA_CONTEXT_MANAGER_H
#define CUDA_CONTEXT_MANAGER_H
#include "foundation/PxPreprocessor.h"
#if PX_SUPPORT_GPU_PHYSX
namespace physx
{
class PxCudaContextManager;
class PxCudaContextManagerDesc;
class PxErrorCallback;
/**
Creates cuda context manager for PhysX and APEX.
Set launchSynchronous to true for Cuda to report the actual point of failure
*/
PxCudaContextManager* createCudaContextManager(const PxCudaContextManagerDesc& desc, PxErrorCallback& errorCallback, bool launchSynchronous);
}
#endif
#endif

View File

@@ -0,0 +1,84 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef __CUDA_KERNEL_WRANGLER__
#define __CUDA_KERNEL_WRANGLER__
#include "foundation/PxPreprocessor.h"
// Make this header is safe for inclusion in headers that are shared with device code.
#if !PX_CUDA_COMPILER
#include "foundation/PxUserAllocated.h"
#include "foundation/PxArray.h"
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
#pragma clang diagnostic ignored "-Wdisabled-macro-expansion"
#endif
#include <cuda.h>
#if PX_LINUX && PX_CLANG
#pragma clang diagnostic pop
#endif
namespace physx
{
class PxCudaContextManager;
class PxCudaContext;
class KernelWrangler : public PxUserAllocated
{
PX_NOCOPY(KernelWrangler)
public:
KernelWrangler(PxCudaContextManager& cudaContextManager, PxErrorCallback& errorCallback, const char** funcNames, uint16_t numFuncs);
virtual ~KernelWrangler() {}
PX_FORCE_INLINE CUfunction getCuFunction(uint16_t funcIndex) const
{
CUfunction func = mCuFunctions[ funcIndex ];
PX_ASSERT(func);
return func;
}
const char* getCuFunctionName(uint16_t funcIndex) const;
PX_FORCE_INLINE bool hadError() const { return mError; }
protected:
bool mError;
const char** mKernelNames;
PxArray<CUfunction> mCuFunctions;
PxCudaContextManager& mCudaContextManager;
PxCudaContext* mCudaContext;
PxErrorCallback& mErrorCallback;
};
}
#endif
#endif

View File

@@ -0,0 +1,168 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
#ifndef PXG_MEMORY_TRACKER_H
#define PXG_MEMORY_TRACKER_H
#include "foundation/PxAllocator.h"
#include "foundation/PxErrors.h"
#include "foundation/PxFoundation.h"
#include "foundation/PxErrorCallback.h"
#include "foundation/PxMutex.h"
#include "foundation/PxMemory.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxString.h"
#include "foundation/PxAssert.h"
#include <stdio.h>
// usage:
//
// create a static MemTracker object in your allocator .cpp
// use registerMemory/unregisterMemory in your allocation/deallocation functions.
//
// please wrap all tracking code in PX_DEBUG to avoid contaminating release builds.
//
struct AllocInfo
{
const void* mPtr;
bool mIsGpuPointer;
physx::PxU64 mNumBytes;
const char* mFileName;
physx::PxI32 mLineNumber;
AllocInfo(const void* ptr, bool isGpuPointer, physx::PxU64 numBytes, const char* fileName, physx::PxI32 lineNumber) :
mPtr(ptr), mIsGpuPointer(isGpuPointer), mNumBytes(numBytes), mFileName(fileName), mLineNumber(lineNumber)
{
}
PX_FORCE_INLINE void operator = (const AllocInfo& other)
{
mPtr = other.mPtr;
mIsGpuPointer = other.mIsGpuPointer;
mNumBytes = other.mNumBytes;
mFileName = other.mFileName;
mLineNumber = other.mLineNumber;
}
};
class MemTracker
{
AllocInfo* mMemBlockList;
physx::PxU32 mCapacity;
physx::PxU32 mNumElementsInUse;
physx::PxRawAllocator mAllocator;
physx::PxMutexT<physx::PxRawAllocator> mMutex;
void doubleSize()
{
mCapacity = 2 * mCapacity;
AllocInfo* mNewPtr = (AllocInfo*)mAllocator.allocate(mCapacity * sizeof(AllocInfo), PX_FL);
physx::PxMemCopy(reinterpret_cast<void*>(mNewPtr), reinterpret_cast<const void*>(mMemBlockList), mNumElementsInUse * sizeof(AllocInfo));
mAllocator.deallocate(mMemBlockList);
mMemBlockList = mNewPtr;
}
public:
MemTracker()
{
mCapacity = 64;
mMemBlockList = (AllocInfo*)mAllocator.allocate(mCapacity * sizeof(AllocInfo), PX_FL);
mNumElementsInUse = 0;
}
void registerMemory(void* ptr, bool isGpuMemory, physx::PxU64 numBytes, const char* filename, physx::PxI32 lineNumber)
{
physx::PxMutexT<physx::PxRawAllocator>::ScopedLock lock(mMutex);
if (mNumElementsInUse == mCapacity)
doubleSize();
mMemBlockList[mNumElementsInUse] = AllocInfo(ptr, isGpuMemory, numBytes, filename, lineNumber);
++mNumElementsInUse;
}
bool unregisterMemory(void* ptr, bool isGpuMemory)
{
physx::PxMutexT<physx::PxRawAllocator>::ScopedLock lock(mMutex);
if (mMemBlockList)
for (physx::PxU32 i = 0; i < mNumElementsInUse; ++i)
{
if (mMemBlockList[i].mPtr == ptr && mMemBlockList[i].mIsGpuPointer == isGpuMemory)
{
mMemBlockList[i] = mMemBlockList[mNumElementsInUse - 1];
--mNumElementsInUse;
return true;
}
}
return false;
}
void checkForLeaks()
{
physx::PxMutexT<physx::PxRawAllocator>::ScopedLock lock(mMutex);
if (mMemBlockList)
{
for (physx::PxU32 i = 0; i < mNumElementsInUse; ++i)
{
const AllocInfo& info = mMemBlockList[i];
if(PxIsFoundationValid()) // error callback requires foundation
{
char msg[512];
physx::Pxsnprintf(msg, 512, "Memory not freed: Ptr: %p, numBytes: %zu, file: %s, line: %i isDeviceMem %u\n", info.mPtr, info.mNumBytes, info.mFileName, info.mLineNumber, info.mIsGpuPointer);
PxGetErrorCallback()->reportError(physx::PxErrorCode::eINTERNAL_ERROR, msg, PX_FL);
}
else
{
printf("Memory not freed: Ptr: %p, numBytes: %zu, file: %s, line: %i isDeviceMem %u\n", info.mPtr, info.mNumBytes, info.mFileName, info.mLineNumber, info.mIsGpuPointer);
}
}
// assert to make tests fail.
//if (mNumElementsInUse > 0)
// PX_ALWAYS_ASSERT();
}
}
~MemTracker()
{
checkForLeaks();
if (mMemBlockList)
{
mAllocator.deallocate(mMemBlockList);
mMemBlockList = NULL;
}
}
};
#endif