feat(physics): wire physx sdk into build

This commit is contained in:
2026-04-15 12:22:15 +08:00
parent 5bf258df6d
commit 31f40e2cbb
2044 changed files with 752623 additions and 1 deletions

View File

@@ -0,0 +1,779 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxProfileZone.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBitUtils.h"
#include "GuAABBPruner.h"
#include "GuPrunerMergeData.h"
#include "GuCallbackAdapter.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuQuery.h"
#include "CmVisualization.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
AABBPruner::AABBPruner(bool incrementalRebuild, PxU64 contextID, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy, PxU32 nbObjectsPerNode) :
mAABBTree (NULL),
mNewTree (NULL),
mNbCachedBoxes (0),
mNbCalls (0),
mTimeStamp (0),
mBucketPruner (contextID, cpType, &mPool),
mProgress (BUILD_NOT_STARTED),
mRebuildRateHint (100),
mAdaptiveRebuildTerm(0),
mNbObjectsPerNode (nbObjectsPerNode),
mBuildStrategy (buildStrategy),
mPool (contextID, TRANSFORM_CACHE_GLOBAL),
mIncrementalRebuild (incrementalRebuild),
mUncommittedChanges (false),
mNeedsNewTree (false),
mNewTreeFixups ("AABBPruner::mNewTreeFixups")
{
PX_ASSERT(nbObjectsPerNode<16);
}
AABBPruner::~AABBPruner()
{
release();
}
bool AABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool hasPruningStructure)
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mPool.mContextID);
if(!count)
return true;
// no need to do refitMarked for added objects since they are not in the tree
// if we have provided pruning structure, we will merge it, the changes will be applied after the objects has been addded
if(!hasPruningStructure || !mAABBTree)
mUncommittedChanges = true;
// PT: TODO: 'addObjects' for bucket pruner too. Not urgent since we always call the function with count=1 at the moment
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
// Bucket pruner is only used while the dynamic pruner is rebuilding
// For the static pruner a full rebuild will happen in commit() every time we modify something, this is not true if
// pruning structure was provided. The objects tree will be merged directly into the static tree. No rebuild will be triggered.
if(mIncrementalRebuild && mAABBTree)
{
PX_PROFILE_ZONE("SceneQuery.bucketPrunerAddObjects", mPool.mContextID);
mNeedsNewTree = true; // each add forces a tree rebuild
// if a pruner structure is provided, we dont move the new objects into bucket pruner
// the pruning structure will be merged into the bucket pruner
if(!hasPruningStructure)
{
for(PxU32 i=0;i<valid;i++)
{
// PT: poolIndex fetched in vain for bucket pruner companion...
// Since the incremental tree references the same pool we could just retrieve the poolIndex there, from the handle...
const PrunerHandle handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mBucketPruner.addObject(data[i], handle, bounds[i], transforms[i], mTimeStamp, poolIndex);
}
}
}
return valid==count;
}
void AABBPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mPool.mContextID);
if(!count)
return;
mUncommittedChanges = true;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true; // each update forces a tree rebuild
const PxBounds3* currentBounds = mPool.getCurrentWorldBoxes();
const PxTransform* currentTransforms = mPool.getTransforms();
const PrunerPayload* data = mPool.getObjects();
const bool addToRefit = mProgress == BUILD_NEW_MAPPING || mProgress == BUILD_FULL_REFIT || mProgress==BUILD_LAST_FRAME;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle handle = handles[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex != INVALID_NODE_ID) // this means it's in the current tree still and hasn't been removed
mAABBTree->markNodeForRefit(treeNodeIndex);
else // otherwise it means it should be in the bucket pruner
{
PX_ASSERT(&data[poolIndex]==&mPool.getPayloadData(handle));
bool found = mBucketPruner.updateObject(currentBounds[poolIndex], currentTransforms[poolIndex], data[poolIndex], handle, poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
if(addToRefit)
mToRefit.pushBack(poolIndex);
}
}
}
void AABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mPool.mContextID);
if(!count)
return;
mUncommittedChanges = true;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
// copy the payload/userdata before removing it since we need to know the payload/userdata to remove it from the bucket pruner
const PrunerPayload removedData = mPool.getPayloadData(h);
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h, removalCallback); // save the lastIndex returned by removeObject
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex]; // already removed from pool but still in tree map
const PrunerPayload swappedData = mPool.getObjects()[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID) // can be invalid if removed
{
mAABBTree->markNodeForRefit(treeNodeIndex); // mark the spot as blank
mBucketPruner.swapIndex(poolIndex, swappedData, poolRelocatedLastIndex); // if swapped index is in bucket pruner
}
else
{
bool status = mBucketPruner.removeObject(removedData, h, poolIndex, swappedData, poolRelocatedLastIndex);
// PT: removed assert to avoid crashing all UTs
//PX_ASSERT(status);
PX_UNUSED(status);
}
mTreeMap.invalidate(poolIndex, poolRelocatedLastIndex, *mAABBTree);
if(mNewTree)
mNewTreeFixups.pushBack(NewTreeFixup(poolIndex, poolRelocatedLastIndex));
}
}
if (mPool.getNbActiveObjects()==0)
{
// this is just to make sure we release all the internal data once all the objects are out of the pruner
// since this is the only place we know that and we don't want to keep memory reserved
release();
// Pruner API requires a commit before the next query, even if we ended up removing the entire tree here. This
// forces that to happen.
mUncommittedChanges = true;
}
}
bool AABBPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
OverlapCallbackAdapter pcb(pcbArgName, mPool);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXCORE:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.overlap(queryVolume, pcbArgName);
return again;
}
bool AABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
again = AABBTreeRaycast<true, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.sweep(queryVolume, unitDir, inOutDistance, pcbArgName);
return again;
}
bool AABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<false, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.raycast(origin, unitDir, inOutDistance, pcbArgName);
return again;
}
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void AABBPruner::purge()
{
release();
mUncommittedChanges = true; // this ensures a commit() must happen before any query
}
void AABBPruner::setRebuildRateHint(PxU32 nbStepsForRebuild)
{
PX_ASSERT(nbStepsForRebuild > 3);
mRebuildRateHint = (nbStepsForRebuild-3); // looks like a magic number to account for the rebuild pipeline latency
mAdaptiveRebuildTerm = 0;
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void AABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mPool.mContextID);
if(!mUncommittedChanges && (mProgress != BUILD_FINISHED))
// Q: seems like this is both for refit and finalization so is this is correct?
// i.e. in a situation when we started rebuilding a tree and didn't add anything since
// who is going to set mUncommittedChanges to true?
// A: it's set in buildStep at final stage, so that finalization is forced.
// Seems a bit difficult to follow and verify correctness.
return;
mUncommittedChanges = false;
if(!mAABBTree || !mIncrementalRebuild)
{
if(!mIncrementalRebuild && mAABBTree)
PxGetFoundation().error(PxErrorCode::ePERF_WARNING, PX_FL, "SceneQuery static AABB Tree rebuilt, because a shape attached to a static actor was added, removed or moved, and PxSceneQueryDesc::staticStructure is set to eSTATIC_AABB_TREE.");
fullRebuildAABBTree();
return;
}
// Note: it is not safe to call AABBPruner::build() here
// because the first thread will perform one step of the incremental update,
// continue raycasting, while the second thread performs the next step in
// the incremental update
// Calling Refit() below is safe. It will call
// StaticPruner::build() when necessary. Both will early
// exit if the tree is already up to date, if it is not already, then we
// must be the first thread performing raycasts on a dirty tree and other
// scene query threads will be locked out by the write lock in
// PrunerManager::flushUpdates()
if (mProgress != BUILD_FINISHED)
{
// Calling refit because the second tree is not ready to be swapped in (mProgress != BUILD_FINISHED)
// Generally speaking as long as things keep moving the second build will never catch up with true state
refitUpdatedAndRemoved();
}
else
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalize", mPool.mContextID);
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeSwitch", mPool.mContextID);
PX_DELETE(mAABBTree); // delete the old tree
mCachedBoxes.release();
mProgress = BUILD_NOT_STARTED; // reset the build state to initial
// Adjust adaptive term to get closer to specified rebuild rate.
// perform an even division correction to make sure the rebuild rate adds up
if (mNbCalls > mRebuildRateHint)
mAdaptiveRebuildTerm++;
else if (mNbCalls < mRebuildRateHint)
mAdaptiveRebuildTerm--;
// Switch trees
#if PX_DEBUG
mNewTree->validate();
#endif
mAABBTree = mNewTree; // set current tree to progressively rebuilt tree
mNewTree = NULL; // clear out the progressively rebuild tree pointer
mNodeAllocator.release();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mPool.mContextID);
// rebuild the tree map to match the current (newly built) tree
mTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mAABBTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree that finished rebuilding.
// AP: the problem here is while we are rebuilding the tree there are ongoing modifications to the current tree
// but the background build has a cached copy of all the AABBs at the time it was started
// (and will produce indices referencing those)
// Things that can happen in the meantime: update, remove, add, commit
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
{
// PT: we're not doing a full refit after this point anymore, so the remaining deleted objects must be manually marked for
// refit (otherwise their AABB in the tree would remain valid, leading to crashes when the corresponding index is 0xffffffff).
// We must do this before invalidating the corresponding tree nodes in the map, obviously (otherwise we'd be reading node
// indices that we already invalidated).
const PoolIndex poolIndex = r->removedIndex;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
mTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mAABBTree);
}
mNewTreeFixups.clear(); // clear out the fixups since we just applied them all
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalRefit", mPool.mContextID);
const PxU32 size = mToRefit.size();
for(PxU32 i=0;i<size;i++)
{
const PoolIndex poolIndex = mToRefit[i];
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
}
mToRefit.clear();
refitUpdatedAndRemoved();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeRemoveObjects", mPool.mContextID);
PxU32 nbRemovedPairs = mBucketPruner.removeMarkedObjects(mTimeStamp-1);
PX_UNUSED(nbRemovedPairs);
mNeedsNewTree = mBucketPruner.getNbObjects()>0;
}
}
updateBucketPruner();
}
void AABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
if(mIncrementalRebuild)
mBucketPruner.shiftOrigin(shift);
if(mNewTree)
mNewTree->shiftOrigin(shift);
}
void AABBPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 secondaryColor) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
visualizeTree(out, primaryColor, mAABBTree);
// Render added objects not yet in the tree
out << PxTransform(PxIdentity);
out << PxU32(PxDebugColor::eARGB_WHITE);
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
mBucketPruner.visualize(out, secondaryColor);
}
bool AABBPruner::buildStep(bool synchronousCall)
{
PX_PROFILE_ZONE("SceneQuery.prunerBuildStep", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
if(!synchronousCall || !prepareBuild())
return false;
}
else if(mProgress==BUILD_INIT)
{
mNewTree->progressiveBuild(mBuilder, mNodeAllocator, mBuildStats, 0, 0);
mProgress = BUILD_IN_PROGRESS;
mNbCalls = 0;
// Use a heuristic to estimate the number of work units needed for rebuilding the tree.
// The general idea is to use the number of work units of the previous tree to build the new tree.
// This works fine as long as the number of leaves remains more or less the same for the old and the
// new tree. If that is not the case, this estimate can be way off and the work units per step will
// be either much too small or too large. Hence, in that case we will try to estimate the number of work
// units based on the number of leaves of the new tree as follows:
//
// - Assume new tree with n leaves is perfectly-balanced
// - Compute the depth of perfectly-balanced tree with n leaves
// - Estimate number of working units for the new tree
const PxU32 depth = PxILog2(mBuilder.mNbPrimitives); // Note: This is the depth without counting the leaf layer
const PxU32 estimatedNbWorkUnits = depth * mBuilder.mNbPrimitives; // Estimated number of work units for new tree
const PxU32 estimatedNbWorkUnitsOld = mAABBTree ? mAABBTree->getTotalPrims() : 0;
if ((estimatedNbWorkUnits <= (estimatedNbWorkUnitsOld << 1)) && (estimatedNbWorkUnits >= (estimatedNbWorkUnitsOld >> 1)))
// The two estimates do not differ by more than a factor 2
mTotalWorkUnits = estimatedNbWorkUnitsOld;
else
{
mAdaptiveRebuildTerm = 0;
mTotalWorkUnits = estimatedNbWorkUnits;
}
const PxI32 totalWorkUnits = PxI32(mTotalWorkUnits + (mAdaptiveRebuildTerm * mBuilder.mNbPrimitives));
mTotalWorkUnits = PxU32(PxMax(totalWorkUnits, 0));
}
else if(mProgress==BUILD_IN_PROGRESS)
{
mNbCalls++;
const PxU32 Limit = 1 + (mTotalWorkUnits / mRebuildRateHint);
// looks like progressiveRebuild returns 0 when finished
if(!mNewTree->progressiveBuild(mBuilder, mNodeAllocator, mBuildStats, 1, Limit))
{
// Done
mProgress = BUILD_NEW_MAPPING;
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
else if(mProgress==BUILD_NEW_MAPPING)
{
mNbCalls++;
mProgress = BUILD_FULL_REFIT;
// PT: we can't call fullRefit without creating the new mapping first: the refit function will fetch boxes from
// the pool using "primitive indices" captured in the tree. But some of these indices may have been invalidated
// if objects got removed while the tree was built. So we need to invalidate the corresponding nodes before refit,
// that way the #prims will be zero and the code won't fetch a wrong box (which may now below to a different object).
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mPool.mContextID);
if(mNewTreeFixups.size())
{
mNewTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mNewTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree.
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
mNewTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mNewTree);
mNewTreeFixups.clear();
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
}
else if(mProgress==BUILD_FULL_REFIT)
{
mNbCalls++;
mProgress = BUILD_LAST_FRAME;
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFullRefit", mPool.mContextID);
// We need to refit the new tree because objects may have moved while we were building it.
mNewTree->fullRefit(mPool.getCurrentWorldBoxes());
}
}
else if(mProgress==BUILD_LAST_FRAME)
{
mProgress = BUILD_FINISHED;
}
// This is required to be set because commit handles both refit and a portion of build finalization (why?)
// This is overly conservative also only necessary in case there were no updates at all to the tree since the last tree swap
// It also overly conservative in a sense that it could be set only if mProgress was just set to BUILD_FINISHED
// If run asynchronously from a different thread, we touched just the new AABB build phase, we should not mark the main tree as dirty
if(synchronousCall)
mUncommittedChanges = true;
return mProgress==BUILD_FINISHED;
}
return false;
}
bool AABBPruner::prepareBuild()
{
PX_PROFILE_ZONE("SceneQuery.prepareBuild", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return false;
mNodeAllocator.release();
PX_DELETE(mNewTree);
mNewTree = PX_NEW(AABBTree);
mNbCachedBoxes = nbObjects;
mCachedBoxes.init(nbObjects, mPool.getCurrentWorldBoxes());
// PT: objects currently in the bucket pruner will be in the new tree. They are marked with the
// current timestamp (mTimeStamp). However more objects can get added while we compute the new tree,
// and those ones will not be part of it. These new objects will be marked with the new timestamp
// value (mTimeStamp+1), and we can use these different values to remove the proper objects from
// the bucket pruner (when switching to the new tree).
mTimeStamp++;
// notify the incremental pruner to swap trees (for incremental pruner companion)
mBucketPruner.timeStampChange();
mBuilder.reset();
mBuilder.mNbPrimitives = mNbCachedBoxes;
mBuilder.mBounds = &mCachedBoxes;
mBuilder.mLimit = mNbObjectsPerNode;
mBuilder.mBuildStrategy = mBuildStrategy;
mBuildStats.reset();
// start recording modifications to the tree made during rebuild to reapply (fix the new tree) eventually
PX_ASSERT(mNewTreeFixups.size()==0);
mProgress = BUILD_INIT;
}
}
else
return false;
return true;
}
/**
* Builds an AABB-tree for objects in the pruning pool.
* \return true if success
*/
bool AABBPruner::fullRebuildAABBTree()
{
PX_PROFILE_ZONE("SceneQuery.prunerFullRebuildAABBTree", mPool.mContextID);
// Release possibly already existing tree
PX_DELETE(mAABBTree);
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return true;
bool Status;
{
// Create a new tree
mAABBTree = PX_NEW(AABBTree);
Status = mAABBTree->build(AABBTreeBuildParams(mNbObjectsPerNode, nbObjects, &mPool.getCurrentAABBTreeBounds(), mBuildStrategy), mNodeAllocator);
}
// No need for the tree map for static pruner
if(mIncrementalRebuild)
mTreeMap.initMap(PxMax(nbObjects, mNbCachedBoxes), *mAABBTree);
return Status;
}
// called in the end of commit(), but only if mIncrementalRebuild is true
void AABBPruner::updateBucketPruner()
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateBucketPruner", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
mBucketPruner.build();
}
void AABBPruner::release() // this can be called from purge()
{
mBucketPruner.release();
mTimeStamp = 0;
mTreeMap.release();
mNewTreeMap.release();
mCachedBoxes.release();
mBuilder.reset();
mNodeAllocator.release();
PX_DELETE(mNewTree);
PX_DELETE(mAABBTree);
mNbCachedBoxes = 0;
mProgress = BUILD_NOT_STARTED;
mNewTreeFixups.clear();
mUncommittedChanges = false;
}
// Refit current tree
void AABBPruner::refitUpdatedAndRemoved()
{
PX_PROFILE_ZONE("SceneQuery.prunerRefitUpdatedAndRemoved", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
AABBTree* tree = getAABBTree();
if(!tree)
return;
#if PX_DEBUG
tree->validate();
#endif
//### missing a way to skip work if not needed
const PxU32 nbObjects = mPool.getNbActiveObjects();
// At this point there still can be objects in the tree that are blanked out so it's an optimization shortcut (not required)
if(!nbObjects)
return;
mBucketPruner.refitMarkedNodes(mPool.getCurrentWorldBoxes());
tree->refitMarkedNodes(mPool.getCurrentWorldBoxes());
}
void AABBPruner::merge(const void* mergeParams)
{
const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
if(!pruningStructure.mAABBTreeNodes)
return;
if(mAABBTree)
{
// index in pruning pool, where new objects were added
const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// create tree from given nodes and indices
AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
if(!mIncrementalRebuild)
{
// merge tree directly
mAABBTree->mergeTree(aabbTreeMergeParams);
}
else
{
mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
}
}
}
void AABBPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mAABBTree && mAABBTree->getNodes())
bounds = mAABBTree->getNodes()->mBV;
else
bounds.setEmpty();
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
{
PxBounds3 extBounds;
mBucketPruner.getGlobalBounds(extBounds);
bounds.include(extBounds);
}
}

View File

@@ -0,0 +1,264 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABB_PRUNER_H
#define GU_AABB_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuExtendedBucketPruner.h"
#include "GuSqInternal.h"
#include "GuPruningPool.h"
#include "GuAABBTree.h"
#include "GuAABBTreeUpdateMap.h"
#include "GuAABBTreeBuildStats.h"
namespace physx
{
namespace Gu
{
// PT: we build the new tree over a number of frames/states, in order to limit perf spikes in 'updatePruningTrees'.
// The states are as follows:
//
// BUILD_NOT_STARTED (1 frame, AABBPruner):
//
// This is the initial state, before the new (AABBTree) build even starts. In this frame/state, we perform the AABBPruner-related
// memory allocations:
// - the new AABB tree is allocated
// - the array of cached bounding boxes is allocated and filled
//
// BUILD_INIT (1 frame, AABBTree):
//
// This is the first frame in which the new tree gets built. It deserves its own special state since various things happen in the
// first frame, that do no happen in subsequent frames. Basically most initial AABBTree-related allocations happen here (but no
// build step per se).
//
// BUILD_IN_PROGRESS (N frames, AABBTree):
//
// This is the core build function, actually building the tree. This should be mostly allocation-free, except here and there when
// building non-complete trees, and during the last call when the tree is finally built.
//
// BUILD_NEW_MAPPING (1 frame, AABBPruner):
//
// After the new AABBTree is built, we recreate an AABBTreeUpdateMap for the new tree, and use it to invalidate nodes whose objects
// have been removed during the build.
//
// We need to do that before doing a full refit in the next stage/frame. If we don't do that, the refit code will fetch a wrong box,
// that may very well belong to an entirely new object.
//
// Note that this mapping/update map (mNewTreeMap) is temporary, and only needed for the next stage.
//
// BUILD_FULL_REFIT (1 frame, AABBPruner):
//
// Once the new update map is available, we fully refit the new tree. AABBs of moved objects get updated. AABBs of removed objects
// become empty.
//
// BUILD_LAST_FRAME (1 frame, AABBPruner):
//
// This is an artificial frame used to delay the tree switching code. The switch happens as soon as we reach the BUILD_FINISHED
// state, but we don't want to execute BUILD_FULL_REFIT and the switch in the same frame. This extra BUILD_LAST_FRAME stage buys
// us one frame, i.e. we have one frame in which we do BUILD_FULL_REFIT, and in the next frame we'll do both BUILD_LAST_FRAME /
// BUILD_FINISHED / the switch.
//
// BUILD_FINISHED (1 frame, AABBPruner):
//
// Several things happen in this 'finalization' frame/stage:
// - We switch the trees (old one is deleted, cached boxes are deleted, new tree pointer is setup)
// - A new (final) update map is created (mTreeMap). The map is used to invalidate objects that may have been removed during
// the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames. The nodes containing these removed objects are marked for refit.
// - Nodes containing objects that have moved during the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames are marked for refit.
// - We do a partial refit on the new tree, to take these final changes into account. This small partial refit is usually much
// cheaper than the full refit we previously performed here.
// - We remove old objects from the bucket pruner
//
enum BuildStatus
{
BUILD_NOT_STARTED,
BUILD_INIT,
BUILD_IN_PROGRESS,
BUILD_NEW_MAPPING,
BUILD_FULL_REFIT,
BUILD_LAST_FRAME,
BUILD_FINISHED,
BUILD_FORCE_DWORD = 0xffffffff
};
// This class implements the Pruner interface for internal SQ use with some additional specialized functions
// The underlying data structure is a binary AABB tree
// AABBPruner supports insertions, removals and updates for dynamic objects
// The tree is either entirely rebuilt in a single frame (static pruner) or progressively rebuilt over multiple frames (dynamic pruner)
// The rebuild happens on a copy of the tree
// the copy is then swapped with current tree at the time commit() is called (only if mBuildState is BUILD_FINISHED),
// otherwise commit() will perform a refit operation applying any pending changes to the current tree
// While the tree is being rebuilt a temporary data structure (BucketPruner) is also kept in sync and used to speed up
// queries on updated objects that are not yet in either old or new tree.
// The requirements on the order of calls:
// commit() is required to be called before any queries to apply modifications
// queries can be issued on multiple threads after commit is called
// commit, buildStep, add/remove/update have to be called from the same thread or otherwise strictly serialized by external code
// and cannot be issued while a query is running
class AABBPruner : public DynamicPruner
{
PX_NOCOPY(AABBPruner)
public:
PX_PHYSX_COMMON_API AABBPruner(bool incrementalRebuild, PxU64 contextID, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy=BVH_SPLATTER_POINTS, PxU32 nbObjectsPerNode=4); // true is equivalent to former dynamic pruner
virtual ~AABBPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
virtual bool isDynamic() const { return mIncrementalRebuild; }
//~Pruner
// DynamicPruner
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild); // Besides the actual rebuild steps, 3 additional steps are needed.
virtual bool buildStep(bool synchronousCall = true); // returns true if finished
virtual bool prepareBuild(); // returns true if new tree is needed
//~DynamicPruner
// direct access for test code
PX_FORCE_INLINE PxU32 getNbAddedObjects() const { return mBucketPruner.getNbObjects(); }
PX_FORCE_INLINE const AABBTree* getAABBTree() const { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE AABBTree* getAABBTree() { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE void setAABBTree(AABBTree* tree) { mAABBTree = tree; }
PX_FORCE_INLINE const AABBTree* hasAABBTree() const { return mAABBTree; }
PX_FORCE_INLINE BuildStatus getBuildStatus() const { return mProgress; }
// local functions
// private:
NodeAllocator mNodeAllocator;
AABBTree* mAABBTree; // current active tree
AABBTreeBuildParams mBuilder; // this class deals with the details of the actual tree building
BuildStats mBuildStats;
// tree with build in progress, assigned to mAABBTree in commit, when mProgress is BUILD_FINISHED
// created in buildStep(), BUILD_NOT_STARTED
// This is non-null when there is a tree rebuild going on in progress
// and thus also indicates that we have to start saving the fixups
AABBTree* mNewTree;
// during rebuild the pool might change so we need a copy of boxes for the tree build
AABBTreeBounds mCachedBoxes;
PxU32 mNbCachedBoxes;
// incremented in commit(), serves as a progress counter for rebuild
PxU32 mNbCalls;
// PT: incremented each time we start building a new tree (i.e. effectively identifies a given tree)
// Timestamp is passed to bucket pruner to mark objects added there, linking them to a specific tree.
// When switching to the new tree, timestamp is used to remove old objects (now in the new tree) from
// the bucket pruner.
PxU32 mTimeStamp;
// this pruner is used for queries on objects that are not in the current tree yet
// includes both the objects in the tree being rebuilt and all the objects added later
ExtendedBucketPruner mBucketPruner;
BuildStatus mProgress; // current state of second tree build progress
// Fraction (as in 1/Nth) of the total number of primitives
// that should be processed per step by the AABB builder
// so if this value is 1, all primitives will be rebuilt, 2 => 1/2 of primitives per step etc.
// see also mNbCalls, mNbCalls varies from 0 to mRebuildRateHint-1
PxU32 mRebuildRateHint;
// Estimate for how much work has to be done to rebuild the tree.
PxU32 mTotalWorkUnits;
// Term to correct the work unit estimate if the rebuild rate is not matched
PxI32 mAdaptiveRebuildTerm;
const PxU32 mNbObjectsPerNode;
const BVHBuildStrategy mBuildStrategy;
PruningPool mPool; // Pool of AABBs
// maps pruning pool indices to aabb tree indices
// maps to INVALID_NODE_ID if the pool entry was removed or "pool index is outside input domain"
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID)
// So:
// treeNodeIndex = mTreeMap.operator[](poolIndex)
// aabbTree->treeNodes[treeNodeIndex].primitives[0] == poolIndex
AABBTreeUpdateMap mTreeMap;
// Temporary update map, see BuildStatus notes above for details
AABBTreeUpdateMap mNewTreeMap;
// This is only set once in the constructor and is equivalent to isDynamicTree
// if it set to false then a 1-shot rebuild is performed in commit()
// bucket pruner is only used with incremental rebuild
const bool mIncrementalRebuild;
// A rebuild can be triggered even when the Pruner is not dirty
// mUncommittedChanges is set to true in add, remove, update and buildStep
// mUncommittedChanges is set to false in commit
// mUncommittedChanges has to be false (commit() has to be called) in order to run a query as defined by the
// mUncommittedChanges is not set to true in add, when pruning structure is provided. Scene query shapes
// are merged to current AABB tree directly
// Pruner higher level API
bool mUncommittedChanges;
// A new AABB tree is built if an object was added, removed or updated
// Changing objects during a build will trigger another rebuild right afterwards
// this is set to true if a new tree has to be created again after the current rebuild is done
bool mNeedsNewTree;
// This struct is used to record modifications made to the pruner state
// while a tree is building in the background
// this is so we can apply the modifications to the tree at the time of completion
// the recorded fixup information is: removedIndex (in ::remove()) and
// lastIndexMoved which is the last index in the pruner array
// (since the way we remove from PruningPool is by swapping last into removed slot,
// we need to apply a fixup so that it syncs up that operation in the new tree)
struct NewTreeFixup
{
PX_FORCE_INLINE NewTreeFixup(PxU32 removedIndex_, PxU32 relocatedLastIndex_)
: removedIndex(removedIndex_), relocatedLastIndex(relocatedLastIndex_) {}
PxU32 removedIndex;
PxU32 relocatedLastIndex;
};
PxArray<NewTreeFixup> mNewTreeFixups;
PxArray<PoolIndex> mToRefit;
// Internal methods
bool fullRebuildAABBTree(); // full rebuild function, used with static pruner mode
void release();
void refitUpdatedAndRemoved();
void updateBucketPruner();
};
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,580 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_H
#define GU_AABBTREE_H
#include "foundation/PxMemory.h"
#include "foundation/PxArray.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
#include "GuAABBTreeQuery.h"
#include "GuDistancePointTriangle.h"
#include "GuDistancePointTetrahedron.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
struct SAH_Buffers;
class NodeAllocator;
struct BuildStats;
class AABBTreeBounds;
// PT: TODO: sometimes we export member functions, sometimes we export the whole class. What's the story here?
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
//! Contains AABB-tree build parameters
class PX_PHYSX_COMMON_API AABBTreeBuildParams : public PxUserAllocated
{
public:
AABBTreeBuildParams(PxU32 limit = 1, PxU32 nb_prims = 0, const AABBTreeBounds* bounds = NULL, BVHBuildStrategy bs = BVH_SPLATTER_POINTS) :
mLimit (limit),
mNbPrimitives (nb_prims),
mBounds (bounds),
mCache (NULL),
mBuildStrategy (bs)
{
}
~AABBTreeBuildParams()
{
reset();
}
PX_FORCE_INLINE void reset()
{
mLimit = mNbPrimitives = 0;
mBounds = NULL;
PX_FREE(mCache);
}
PxU32 mLimit; //!< Limit number of primitives / node. If limit is 1, build a complete tree (2*N-1 nodes)
PxU32 mNbPrimitives; //!< Number of (source) primitives.
const AABBTreeBounds* mBounds; //!< Shortcut to an app-controlled array of AABBs.
mutable PxVec3* mCache; //!< Cache for AABB centers - managed by build code.
BVHBuildStrategy mBuildStrategy;
};
//! AABB tree node used for building
class PX_PHYSX_COMMON_API AABBTreeBuildNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE AABBTreeBuildNode() {}
PX_FORCE_INLINE ~AABBTreeBuildNode() {}
PX_FORCE_INLINE const PxBounds3& getAABB() const { return mBV; }
PX_FORCE_INLINE const AABBTreeBuildNode* getPos() const { return mPos; }
PX_FORCE_INLINE const AABBTreeBuildNode* getNeg() const { const AABBTreeBuildNode* P = mPos; return P ? P + 1 : NULL; }
PX_FORCE_INLINE bool isLeaf() const { return !getPos(); }
PxBounds3 mBV; //!< Global bounding-volume enclosing all the node-related primitives
const AABBTreeBuildNode* mPos; //!< "Positive" & "Negative" children
PxU32 mNodeIndex; //!< Index of node-related primitives (in the tree's mIndices array)
PxU32 mNbPrimitives; //!< Number of primitives for this node
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbPrimitives; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return mNbPrimitives; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val) { mNbPrimitives = val; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + mNodeIndex; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + mNodeIndex; }
void subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void subdivideSAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
};
//! For complete trees we can predict the final number of nodes and preallocate them. For incomplete trees we can't.
//! But we don't want to allocate nodes one by one (which would be quite slow), so we use this helper class to
//! allocate N nodes at once, while minimizing the amount of nodes allocated for nothing. An initial amount of
//! nodes is estimated using the max number for a complete tree, and the user-defined number of primitives per leaf.
//! In ideal cases this estimated number will be quite close to the final number of nodes. When that number is not
//! enough though, slabs of N=1024 extra nodes are allocated until the build is complete.
class PX_PHYSX_COMMON_API NodeAllocator : public PxUserAllocated
{
public:
NodeAllocator();
~NodeAllocator();
void release();
void init(PxU32 nbPrimitives, PxU32 limit);
AABBTreeBuildNode* getBiNode();
AABBTreeBuildNode* mPool;
struct Slab
{
PX_FORCE_INLINE Slab() {}
PX_FORCE_INLINE Slab(AABBTreeBuildNode* pool, PxU32 nbUsedNodes, PxU32 maxNbNodes) : mPool(pool), mNbUsedNodes(nbUsedNodes), mMaxNbNodes(maxNbNodes) {}
AABBTreeBuildNode* mPool;
PxU32 mNbUsedNodes;
PxU32 mMaxNbNodes;
};
PxArray<Slab> mSlabs;
PxU32 mCurrentSlabIndex;
PxU32 mTotalNbNodes;
};
#if PX_VC
#pragma warning(pop)
#endif
/*
* \brief Builds AABBtree from given parameters.
* \param params [in/out] AABBTree build params
* \param nodeAllocator [in/out] Node allocator
* \param stats [out] Statistics
* \return Indices buffer allocated during build, or NULL if failed
*/
PX_PHYSX_COMMON_API PxU32* buildAABBTree(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats);
// PT: TODO: explain how users should call these functions and maybe revisit this
PX_PHYSX_COMMON_API void flattenTree(const NodeAllocator& nodeAllocator, BVHNode* dest, const PxU32* remap = NULL);
PX_PHYSX_COMMON_API void buildAABBTree(PxU32 nbBounds, const AABBTreeBounds& bounds, PxArray<BVHNode>& tree);
PxU32 reshuffle(PxU32 nb, PxU32* const PX_RESTRICT prims, const PxVec3* PX_RESTRICT centers, float splitValue, PxU32 axis);
class BitArray
{
public:
BitArray() : mBits(NULL), mSize(0) {}
BitArray(PxU32 nb_bits) { init(nb_bits); }
~BitArray() { PX_FREE(mBits); }
bool init(PxU32 nb_bits);
// Data management
PX_FORCE_INLINE void setBit(PxU32 bit_number)
{
mBits[bit_number>>5] |= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearBit(PxU32 bit_number)
{
mBits[bit_number>>5] &= ~(1<<(bit_number&31));
}
PX_FORCE_INLINE void toggleBit(PxU32 bit_number)
{
mBits[bit_number>>5] ^= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearAll() { PxMemZero(mBits, mSize*4); }
PX_FORCE_INLINE void setAll() { PxMemSet(mBits, 0xff, mSize*4); }
void resize(PxU32 maxBitNumber);
// Data access
PX_FORCE_INLINE PxIntBool isSet(PxU32 bit_number) const
{
return PxIntBool(mBits[bit_number>>5] & (1<<(bit_number&31)));
}
PX_FORCE_INLINE const PxU32* getBits() const { return mBits; }
PX_FORCE_INLINE PxU32 getSize() const { return mSize; }
protected:
PxU32* mBits; //!< Array of bits
PxU32 mSize; //!< Size of the array in dwords
};
//! Contains AABB-tree merge parameters
class AABBTreeMergeData
{
public:
AABBTreeMergeData(PxU32 nbNodes, const BVHNode* nodes, PxU32 nbIndices, const PxU32* indices, PxU32 indicesOffset) :
mNbNodes(nbNodes), mNodes(nodes), mNbIndices(nbIndices), mIndices(indices), mIndicesOffset(indicesOffset)
{
}
~AABBTreeMergeData() {}
PX_FORCE_INLINE const BVHNode& getRootNode() const { return *mNodes; }
public:
PxU32 mNbNodes; //!< Number of nodes of AABB tree merge
const BVHNode* mNodes; //!< Nodes of AABB tree merge
PxU32 mNbIndices; //!< Number of indices of AABB tree merge
const PxU32* mIndices; //!< Indices of AABB tree merge
PxU32 mIndicesOffset; //!< Indices offset from pruning pool
};
// Progressive building
class FIFOStack;
//~Progressive building
// PT: base class used to share some data and code between Gu::AABBtree and Gu::BVH. This is WIP and subject to change.
// Design dictated by refactoring necessities rather than a grand vision of something.
class BVHCoreData : public PxUserAllocated
{
public:
BVHCoreData() : mNbIndices(0), mNbNodes(0), mNodes(NULL), mIndices(NULL) {}
PX_FORCE_INLINE PxU32 getNbIndices() const { return mNbIndices; }
PX_FORCE_INLINE const PxU32* getIndices() const { return mIndices; }
PX_FORCE_INLINE PxU32* getIndices() { return mIndices; }
PX_FORCE_INLINE void setIndices(PxU32* indices) { mIndices = indices; }
PX_FORCE_INLINE PxU32 getNbNodes() const { return mNbNodes; }
PX_FORCE_INLINE const BVHNode* getNodes() const { return mNodes; }
PX_FORCE_INLINE BVHNode* getNodes() { return mNodes; }
PX_PHYSX_COMMON_API void fullRefit(const PxBounds3* boxes);
// PT: I'm leaving the above accessors here to avoid refactoring the SQ code using them, but members became public.
PxU32 mNbIndices; //!< Nb indices
PxU32 mNbNodes; //!< Number of nodes in the tree.
BVHNode* mNodes; //!< Linear pool of nodes.
PxU32* mIndices; //!< Indices in the app list. Indices are reorganized during build (permutation).
};
class BVHPartialRefitData : public BVHCoreData
{
public:
PX_PHYSX_COMMON_API BVHPartialRefitData();
PX_PHYSX_COMMON_API ~BVHPartialRefitData();
PX_PHYSX_COMMON_API void releasePartialRefitData(bool clearRefitMap);
// adds node[index] to a list of nodes to refit when refitMarkedNodes is called
// Note that this includes updating the hierarchy up the chain
PX_PHYSX_COMMON_API void markNodeForRefit(TreeNodeIndex nodeIndex);
PX_PHYSX_COMMON_API void refitMarkedNodes(const PxBounds3* boxes);
PX_FORCE_INLINE PxU32* getUpdateMap() { return mUpdateMap; }
protected:
PxU32* mParentIndices; //!< PT: hot/cold split, keep parent data in separate array
PxU32* mUpdateMap; //!< PT: Local index to tree node index
BitArray mRefitBitmask; //!< bit is set for each node index in markForRefit
PxU32 mRefitHighestSetWord;
PxU32* getParentIndices();
public:
void createUpdateMap(PxU32 nbObjects);
};
//! AABB-tree, N primitives/leaf
// PT: TODO: each PX_PHYSX_COMMON_API is a cross-DLL call, should we split that class in Gu/Sq parts to minimize this?
class AABBTree : public BVHPartialRefitData
{
public:
PX_PHYSX_COMMON_API AABBTree();
PX_PHYSX_COMMON_API ~AABBTree();
// Build
PX_PHYSX_COMMON_API bool build(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator);
// Progressive building
PX_PHYSX_COMMON_API PxU32 progressiveBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats, PxU32 progress, PxU32 limit);
//~Progressive building
PX_PHYSX_COMMON_API void release(bool clearRefitMap=true);
// Merge tree with another one
PX_PHYSX_COMMON_API void mergeTree(const AABBTreeMergeData& tree);
// Initialize tree from given merge data
PX_PHYSX_COMMON_API void initTree(const AABBTreeMergeData& tree);
// Data access
PX_FORCE_INLINE PxU32 getTotalPrims() const { return mTotalPrims; }
PX_PHYSX_COMMON_API void shiftOrigin(const PxVec3& shift);
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
PX_PHYSX_COMMON_API void shiftIndices(PxU32 offset);
#if PX_DEBUG
void validate() {}
#endif
private:
PxU32 mTotalPrims; //!< Copy of final BuildStats::mTotalPrims
// Progressive building
FIFOStack* mStack;
//~Progressive building
bool buildInit(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats);
void buildEnd(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, const BuildStats& stats);
// tree merge
void mergeRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void mergeRuntimeLeaf(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& tree);
void traverseRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 nodeIndex);
};
struct TinyBVH
{
PxArray<Gu::BVHNode> mTree;
PX_PHYSX_COMMON_API static void constructFromTriangles(const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points,
TinyBVH& result, PxF32 enlargement = 1e-4f);
PX_PHYSX_COMMON_API static void constructFromTetrahedra(const PxU32* tetrahedra, const PxU32 numTetrahedra, const PxVec3* points,
TinyBVH& result, PxF32 enlargement = 1e-4f);
template<typename T>
void Traverse(T& traversalController, PxI32 rootNodeIndex = 0)
{
Gu::traverseBVH(mTree.begin(), traversalController, rootNodeIndex);
}
};
class ClosestDistanceToTetmeshTraversalController
{
private:
PxReal mClosestDistanceSquared;
const PxU32* mTetrahedra;
const PxVec3* mPoints;
const Gu::BVHNode* mNodes;
PxVec3 mQueryPoint;
PxVec3 mClosestPoint;
PxI32 mClosestTetId;
public:
PX_FORCE_INLINE ClosestDistanceToTetmeshTraversalController() {}
PX_FORCE_INLINE ClosestDistanceToTetmeshTraversalController(const PxU32* tetrahedra, const PxVec3* points, Gu::BVHNode* nodes) :
mClosestDistanceSquared(PX_MAX_F32), mTetrahedra(tetrahedra), mPoints(points), mNodes(nodes), mQueryPoint(0.0f), mClosestPoint(0.0f), mClosestTetId(-1)
{
initialize(tetrahedra, points, nodes);
}
void initialize(const PxU32* tetrahedra, const PxVec3* points, Gu::BVHNode* nodes)
{
mTetrahedra = tetrahedra;
mPoints = points;
mNodes = nodes;
mQueryPoint = PxVec3(0.0f);
mClosestPoint = PxVec3(0.0f);
mClosestTetId = -1;
mClosestDistanceSquared = PX_MAX_F32;
}
PX_FORCE_INLINE void setQueryPoint(const PxVec3& queryPoint)
{
mQueryPoint = queryPoint;
mClosestDistanceSquared = PX_MAX_F32;
mClosestPoint = PxVec3(0.0f);
mClosestTetId = -1;
}
PX_FORCE_INLINE const PxVec3& getClosestPoint() const
{
return mClosestPoint;
}
PX_FORCE_INLINE PxReal distancePointBoxSquared(const PxBounds3& box, const PxVec3& point)
{
PxVec3 closestPt = box.minimum.maximum(box.maximum.minimum(point));
return (closestPt - point).magnitudeSquared();
}
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const Gu::BVHNode& node, PxI32)
{
if (distancePointBoxSquared(node.mBV, mQueryPoint) >= mClosestDistanceSquared)
return Gu::TraversalControl::eDontGoDeeper;
if (node.isLeaf())
{
const PxI32 j = node.getPrimitiveIndex();
const PxU32* tet = &mTetrahedra[4 * j];
PxVec3 closest = closestPtPointTetrahedronWithInsideCheck(mQueryPoint,
mPoints[tet[0]], mPoints[tet[1]], mPoints[tet[2]], mPoints[tet[3]]);
PxReal d2 = (closest - mQueryPoint).magnitudeSquared();
if (d2 < mClosestDistanceSquared)
{
mClosestDistanceSquared = d2;
mClosestTetId = j;
mClosestPoint = closest;
}
if (d2 == 0.0f)
return Gu::TraversalControl::eAbort;
return Gu::TraversalControl::eDontGoDeeper;
}
const Gu::BVHNode& nodePos = mNodes[node.getPosIndex()];
const PxReal distSquaredPos = distancePointBoxSquared(nodePos.mBV, mQueryPoint);
const Gu::BVHNode& nodeNeg = mNodes[node.getNegIndex()];
const PxReal distSquaredNeg = distancePointBoxSquared(nodeNeg.mBV, mQueryPoint);
if (distSquaredPos < distSquaredNeg)
{
if (distSquaredPos < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeper;
}
else
{
if (distSquaredNeg < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeperNegFirst;
}
return Gu::TraversalControl::eDontGoDeeper;
}
PxI32 getClosestTetId() const { return mClosestTetId; }
void setClosestStart(const PxReal closestDistanceSquared, PxI32 closestTetrahedron, const PxVec3& closestPoint)
{
mClosestDistanceSquared = closestDistanceSquared;
mClosestTetId = closestTetrahedron;
mClosestPoint = closestPoint;
}
private:
PX_NOCOPY(ClosestDistanceToTetmeshTraversalController)
};
class ClosestDistanceToTrimeshTraversalController
{
private:
PxReal mClosestDistanceSquared;
const PxU32* mTriangles;
const PxVec3* mPoints;
const Gu::BVHNode* mNodes;
PxVec3 mQueryPoint;
PxVec3 mClosestPoint;
PxI32 mClosestTriId;
public:
PX_FORCE_INLINE ClosestDistanceToTrimeshTraversalController() {}
PX_FORCE_INLINE ClosestDistanceToTrimeshTraversalController(const PxU32* triangles, const PxVec3* points, Gu::BVHNode* nodes) :
mClosestDistanceSquared(PX_MAX_F32), mTriangles(triangles), mPoints(points), mNodes(nodes), mQueryPoint(0.0f), mClosestPoint(0.0f), mClosestTriId(-1)
{
initialize(triangles, points, nodes);
}
void initialize(const PxU32* triangles, const PxVec3* points, Gu::BVHNode* nodes)
{
mTriangles = triangles;
mPoints = points;
mNodes = nodes;
mQueryPoint = PxVec3(0.0f);
mClosestPoint = PxVec3(0.0f);
mClosestTriId = -1;
mClosestDistanceSquared = PX_MAX_F32;
}
PX_FORCE_INLINE void setQueryPoint(const PxVec3& queryPoint)
{
mQueryPoint = queryPoint;
mClosestDistanceSquared = PX_MAX_F32;
mClosestPoint = PxVec3(0.0f);
mClosestTriId = -1;
}
PX_FORCE_INLINE const PxVec3& getClosestPoint() const
{
return mClosestPoint;
}
PX_FORCE_INLINE PxReal distancePointBoxSquared(const PxBounds3& box, const PxVec3& point)
{
PxVec3 closestPt = box.minimum.maximum(box.maximum.minimum(point));
return (closestPt - point).magnitudeSquared();
}
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const Gu::BVHNode& node, PxI32)
{
if (distancePointBoxSquared(node.mBV, mQueryPoint) >= mClosestDistanceSquared)
return Gu::TraversalControl::eDontGoDeeper;
if (node.isLeaf())
{
const PxI32 j = node.getPrimitiveIndex();
const PxU32* tri = &mTriangles[3 * j];
aos::FloatV t1, t2;
aos::Vec3V q = V3LoadU(mQueryPoint);
aos::Vec3V a = V3LoadU(mPoints[tri[0]]);
aos::Vec3V b = V3LoadU(mPoints[tri[1]]);
aos::Vec3V c = V3LoadU(mPoints[tri[2]]);
aos::Vec3V cp;
aos::FloatV d = Gu::distancePointTriangleSquared2UnitBox(q, a, b, c, t1, t2, cp);
PxReal d2;
FStore(d, &d2);
PxVec3 closest;
V3StoreU(cp, closest);
//const PxVec3 closest = closestPtPointTriangle2UnitBox(mQueryPoint, mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]]);
//PxReal d2 = (closest - mQueryPoint).magnitudeSquared();
if (d2 < mClosestDistanceSquared)
{
mClosestDistanceSquared = d2;
mClosestTriId = j;
mClosestPoint = closest;
}
return Gu::TraversalControl::eDontGoDeeper;
}
const Gu::BVHNode& nodePos = mNodes[node.getPosIndex()];
const PxReal distSquaredPos = distancePointBoxSquared(nodePos.mBV, mQueryPoint);
const Gu::BVHNode& nodeNeg = mNodes[node.getNegIndex()];
const PxReal distSquaredNeg = distancePointBoxSquared(nodeNeg.mBV, mQueryPoint);
if (distSquaredPos < distSquaredNeg)
{
if (distSquaredPos < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeper;
}
else
{
if (distSquaredNeg < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeperNegFirst;
}
return Gu::TraversalControl::eDontGoDeeper;
}
PxI32 getClosestTriId() const { return mClosestTriId; }
void setClosestStart(const PxReal closestDistanceSquared, PxI32 closestTriangle, const PxVec3& closestPoint)
{
mClosestDistanceSquared = closestDistanceSquared;
mClosestTriId = closestTriangle;
mClosestPoint = closestPoint;
}
private:
PX_NOCOPY(ClosestDistanceToTrimeshTraversalController)
};
} // namespace Gu
}
#endif // GU_AABBTREE_H

View File

@@ -0,0 +1,70 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_BOUNDS_H
#define GU_AABBTREE_BOUNDS_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
class PxBounds3;
namespace Gu
{
class PX_PHYSX_COMMON_API AABBTreeBounds
{
public:
AABBTreeBounds() : mBounds(NULL), mUserAllocated(false) {}
~AABBTreeBounds() { release(); }
void init(PxU32 nbBounds, const PxBounds3* bounds=NULL);
void resize(PxU32 newSize, PxU32 previousSize);
void release();
PX_FORCE_INLINE PxBounds3* getBounds() { return mBounds; }
PX_FORCE_INLINE const PxBounds3* getBounds() const { return mBounds; }
PX_FORCE_INLINE void moveFrom(AABBTreeBounds& source)
{
mBounds = source.mBounds;
source.mBounds = NULL;
}
PX_FORCE_INLINE void takeOwnership() { mUserAllocated = true; }
PX_FORCE_INLINE bool ownsMemory() const { return mUserAllocated==false; }
PX_FORCE_INLINE void setBounds(PxBounds3* bounds) { mBounds = bounds; mUserAllocated=true; }
private:
PxBounds3* mBounds;
PxU32 mUserAllocated;
};
} // namespace Gu
}
#endif // GU_AABBTREE_BOUNDS_H

View File

@@ -0,0 +1,58 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_BUILD_STATS_H
#define GU_AABBTREE_BUILD_STATS_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
//! Contains AABB-tree build statistics
struct PX_PHYSX_COMMON_API BuildStats
{
BuildStats() : mCount(0), mTotalPrims(0) {}
PxU32 mCount; //!< Number of nodes created
PxU32 mTotalPrims; //!< Total accumulated number of primitives. Should be much higher than the source
//!< number of prims, since it accumulates all prims covered by each node (i.e. internal
//!< nodes too, not just leaf ones)
// PT: everything's public so consider dropping these
PX_FORCE_INLINE void reset() { mCount = mTotalPrims = 0; }
PX_FORCE_INLINE void setCount(PxU32 nb) { mCount = nb; }
PX_FORCE_INLINE void increaseCount(PxU32 nb) { mCount += nb; }
PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
};
} // namespace Gu
}
#endif // GU_AABBTREE_BUILD_STATS_H

View File

@@ -0,0 +1,98 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_NODE_H
#define GU_AABBTREE_NODE_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct BVHNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE BVHNode() {}
PX_FORCE_INLINE ~BVHNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return mData&1; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + (mData>>5); }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + (mData>>5); }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return mData>>5; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE PxU32 getPosIndex() const { return mData>>1; }
PX_FORCE_INLINE PxU32 getNegIndex() const { return (mData>>1) + 1; }
PX_FORCE_INLINE const BVHNode* getPos(const BVHNode* base) const { return base + (mData>>1); }
PX_FORCE_INLINE const BVHNode* getNeg(const BVHNode* base) const { const BVHNode* P = getPos(base); return P ? P+1 : NULL; }
PX_FORCE_INLINE BVHNode* getPos(BVHNode* base) { return base + (mData >> 1); }
PX_FORCE_INLINE BVHNode* getNeg(BVHNode* base) { BVHNode* P = getPos(base); return P ? P + 1 : NULL; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val)
{
PX_ASSERT(val<16);
PxU32 data = mData & ~(15<<1);
data |= val<<1;
mData = data;
}
PX_FORCE_INLINE void getAABBCenterExtentsV(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V(V4Scale(V4Sub(maxV, minV), halfV));
*center = Vec3V_From_Vec4V(V4Scale(V4Add(maxV, minV), halfV));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
*extents = Vec3V_From_Vec4V(V4Sub(maxV, minV));
*center = Vec3V_From_Vec4V(V4Add(maxV, minV));
}
PxBounds3 mBV; // Global bounding-volume enclosing all the node-related primitives
PxU32 mData; // 27 bits node or prim index|4 bits #prims|1 bit leaf
};
} // namespace Gu
}
#endif // GU_AABBTREE_NODE_H

View File

@@ -0,0 +1,303 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREEQUERY_H
#define GU_AABBTREEQUERY_H
#include "GuBVHTestsSIMD.h"
#include "GuAABBTreeBounds.h"
#include "foundation/PxInlineArray.h"
#include "GuAABBTreeNode.h"
namespace physx
{
namespace Gu
{
#define RAW_TRAVERSAL_STACK_SIZE 256
//////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void getBoundsTimesTwo(Vec4V& center, Vec4V& extents, const PxBounds3* bounds, PxU32 poolIndex)
{
const PxBounds3* objectBounds = bounds + poolIndex;
// PT: it's safe to V4LoadU because the pointer comes from the AABBTreeBounds class
const Vec4V minV = V4LoadU(&objectBounds->minimum.x);
const Vec4V maxV = V4LoadU(&objectBounds->maximum.x);
center = V4Add(maxV, minV);
extents = V4Sub(maxV, minV);
}
//////////////////////////////////////////////////////////////////////////
template<const bool tHasIndices, typename Test, typename Node, typename QueryCallback>
static PX_FORCE_INLINE bool doOverlapLeafTest(const Test& test, const Node* node, const PxBounds3* bounds, const PxU32* indices, QueryCallback& visitor)
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = tHasIndices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = tHasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds, primIndex);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V extents_ = V4Scale(extents2, halfV);
const Vec4V center_ = V4Scale(center2, halfV);
if(!test(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
if(!visitor.invoke(primIndex))
return false;
}
return true;
}
template<const bool tHasIndices, typename Test, typename Tree, typename Node, typename QueryCallback>
class AABBTreeOverlap
{
public:
bool operator()(const AABBTreeBounds& treeBounds, const Tree& tree, const Test& test, QueryCallback& visitor)
{
const PxBounds3* bounds = treeBounds.getBounds();
PxInlineArray<const Node*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const Node* node = stack[--stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV(&center, &extents);
while(test(center, extents))
{
if(node->isLeaf())
{
if(!doOverlapLeafTest<tHasIndices, Test, Node>(test, node, bounds, tree.getIndices(), visitor))
return false;
break;
}
const Node* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
node->getAABBCenterExtentsV(&center, &extents);
}
}
return true;
}
};
//////////////////////////////////////////////////////////////////////////
template <const bool tInflate, const bool tHasIndices, typename Node, typename QueryCallback> // use inflate=true for sweeps, inflate=false for raycasts
static PX_FORCE_INLINE bool doLeafTest( const Node* node, Gu::RayAABBTest& test, const PxBounds3* bounds, const PxU32* indices, PxReal& maxDist, QueryCallback& pcb)
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = tHasIndices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = tHasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center_, extents_;
getBoundsTimesTwo(center_, extents_, bounds, primIndex);
if(!test.check<tInflate>(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
// PT:
// - 'maxDist' is the current best distance. It can be seen as a "maximum allowed distance" (as passed to the
// template by users initially) but also as the "current minimum impact distance", so the name is misleading.
// Either way this is where we write & communicate the final/best impact distance to users.
//
// - the invoke function also takes a distance parameter, and this one is in/out. In input we must pass the
// current best distance to the leaf node, so that subsequent leaf-level queries can cull things away as
// much as possible. In output users return a shrunk distance value if they found a hit. We need to pass a
// copy of 'maxDist' ('md') since it would be too dangerous to rely on the arbitrary user code to always do
// the right thing. In particular if we'd pass 'maxDist' to invoke directly, and the called code would NOT
// respect the passed max value, it could potentially return a hit further than the best 'maxDist'. At which
// point the '(md < oldMaxDist)' test would fail but the damage would have already been done ('maxDist' would
// have already been overwritten with a larger value than before). Hence, we need 'md'.
//
// - now 'oldMaxDist' however is more subtle. In theory we wouldn't need it and we could just use '(md < maxDist)'
// in the test below. But that opens the door to subtle bugs: 'maxDist' is a reference to some value somewhere
// in the user's code, and we call the same user in invoke. It turns out that the invoke code can access and
// modify 'maxDist' on their side, even if we do not pass it to invoke. It's basically the same problem as
// before, but much more difficult to see. It does happen with the current PhysX implementations of the invoke
// functions: they modify the 'md' that we send them, but *also* 'maxDist' without the code below knowing
// about it. So the subsequent test fails again because md == maxDist. A potential solution would have been to
// work on a local copy of 'maxDist' in operator(), only writing out the final distance when returning from the
// function. Another solution used below is to introduce that local copy just here in the leaf code: that's
// where 'oldMaxDist' comes from.
PxReal oldMaxDist = maxDist;
PxReal md = maxDist;
if(!pcb.invoke(md, primIndex))
return false;
if(md < oldMaxDist)
{
maxDist = md;
test.setDistance(md);
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////
template <const bool tInflate, const bool tHasIndices, typename Tree, typename Node, typename QueryCallback> // use inflate=true for sweeps, inflate=false for raycasts
class AABBTreeRaycast
{
public:
bool operator()(
const AABBTreeBounds& treeBounds, const Tree& tree,
const PxVec3& origin, const PxVec3& unitDir, PxReal& maxDist, const PxVec3& inflation,
QueryCallback& pcb)
{
const PxBounds3* bounds = treeBounds.getBounds();
// PT: we will pass center*2 and extents*2 to the ray-box code, to save some work per-box
// So we initialize the test with values multiplied by 2 as well, to get correct results
Gu::RayAABBTest test(origin*2.0f, unitDir*2.0f, maxDist, inflation*2.0f);
PxInlineArray<const Node*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex--)
{
const Node* node = stack[stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV2(&center, &extents);
if(test.check<tInflate>(center, extents)) // TODO: try timestamp ray shortening to skip this
{
while(!node->isLeaf())
{
const Node* children = node->getPos(nodeBase);
Vec3V c0, e0;
children[0].getAABBCenterExtentsV2(&c0, &e0);
const PxU32 b0 = test.check<tInflate>(c0, e0);
Vec3V c1, e1;
children[1].getAABBCenterExtentsV2(&c1, &e1);
const PxU32 b1 = test.check<tInflate>(c1, e1);
if(b0 && b1) // if both intersect, push the one with the further center on the stack for later
{
// & 1 because FAllGrtr behavior differs across platforms
const PxU32 bit = FAllGrtr(V3Dot(V3Sub(c1, c0), test.mDir), FZero()) & 1;
stack[stackIndex++] = children + bit;
node = children + (1 - bit);
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
else if(b0)
node = children;
else if(b1)
node = children + 1;
else
goto skip_leaf_code;
}
if(!doLeafTest<tInflate, tHasIndices, Node>(node, test, bounds, tree.getIndices(), maxDist, pcb))
return false;
skip_leaf_code:;
}
}
return true;
}
};
struct TraversalControl
{
enum Enum {
eDontGoDeeper,
eGoDeeper,
eGoDeeperNegFirst,
eAbort
};
};
template<typename T>
void traverseBVH(const Gu::BVHNode* nodes, T& traversalController, PxI32 rootNodeIndex = 0)
{
PxI32 index = rootNodeIndex;
PxInlineArray<PxI32, RAW_TRAVERSAL_STACK_SIZE> todoStack;
while (true)
{
const Gu::BVHNode& a = nodes[index];
TraversalControl::Enum control = traversalController.analyze(a, index);
if (control == TraversalControl::eAbort)
return;
if (!a.isLeaf() && (control == TraversalControl::eGoDeeper || control == TraversalControl::eGoDeeperNegFirst))
{
if (control == TraversalControl::eGoDeeperNegFirst)
{
todoStack.pushBack(a.getPosIndex());
index = a.getNegIndex(); //index gets processed next - assign negative index to it
}
else
{
todoStack.pushBack(a.getNegIndex());
index = a.getPosIndex(); //index gets processed next - assign positive index to it
}
continue;
}
if (todoStack.empty()) break;
index = todoStack.popBack();
}
}
}
}
#endif // SQ_AABBTREEQUERY_H

View File

@@ -0,0 +1,197 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuAABBTreeUpdateMap.h"
#include "GuAABBTree.h"
#include "GuAABBTreeNode.h"
using namespace physx;
using namespace Gu;
static const PxU32 SHRINK_THRESHOLD = 1024;
void AABBTreeUpdateMap::initMap(PxU32 nbObjects, const AABBTree& tree)
{
if(!nbObjects)
{
release();
return;
}
// Memory management
{
const PxU32 mapSize = nbObjects;
const PxU32 targetCapacity = mapSize + (mapSize>>2);
PxU32 currentCapacity = mMapping.capacity();
if( ( targetCapacity < (currentCapacity>>1) ) && ( (currentCapacity-targetCapacity) > SHRINK_THRESHOLD ) )
{
// trigger reallocation of a smaller array, there is enough memory to save
currentCapacity = 0;
}
if(mapSize > currentCapacity)
{
// the mapping values are invalid and reset below in any case
// so there is no need to copy the values at all
mMapping.reset();
mMapping.reserve(targetCapacity); // since size is 0, reserve will also just allocate
}
mMapping.forceSize_Unsafe(mapSize);
for(PxU32 i=0;i<mapSize;i++)
mMapping[i] = INVALID_NODE_ID;
}
const PxU32 nbNodes = tree.getNbNodes();
const BVHNode* nodes = tree.getNodes();
const PxU32* indices = tree.getIndices();
for(TreeNodeIndex i=0;i<nbNodes;i++)
{
if(nodes[i].isLeaf())
{
const PxU32 nbPrims = nodes[i].getNbRuntimePrimitives();
// PT: with multiple primitives per node, several mapping entries will point to the same node.
PX_ASSERT(nbPrims<16);
for(PxU32 j=0;j<nbPrims;j++)
{
const PxU32 index = nodes[i].getPrimitives(indices)[j];
PX_ASSERT(index<nbObjects);
mMapping[index] = i;
}
}
}
}
void AABBTreeUpdateMap::invalidate(PoolIndex prunerIndex0, PoolIndex prunerIndex1, AABBTree& tree)
{
// prunerIndex0 and prunerIndex1 are both indices into the pool, not handles
// prunerIndex0 is the index in the pruning pool for the node that was just removed
// prunerIndex1 is the index in the pruning pool for the node
const TreeNodeIndex nodeIndex0 = prunerIndex0<mMapping.size() ? mMapping[prunerIndex0] : INVALID_NODE_ID;
const TreeNodeIndex nodeIndex1 = prunerIndex1<mMapping.size() ? mMapping[prunerIndex1] : INVALID_NODE_ID;
//printf("map invalidate pi0:%x ni0:%x\t",prunerIndex0,nodeIndex0);
//printf(" replace with pi1:%x ni1:%x\n",prunerIndex1,nodeIndex1);
// if nodeIndex0 exists:
// invalidate node 0
// invalidate map prunerIndex0
// if nodeIndex1 exists:
// point node 1 to prunerIndex0
// map prunerIndex0 to node 1
// invalidate map prunerIndex1
// eventually:
// - node 0 is invalid
// - prunerIndex0 is mapped to node 1 or
// is not mapped if prunerIndex1 is not mapped
// is not mapped if prunerIndex0==prunerIndex1
// - node 1 points to prunerIndex0 or
// is invalid if prunerIndex1 is not mapped
// is invalid if prunerIndex0==prunerIndex1
// - prunerIndex1 is not mapped
BVHNode* nodes = tree.getNodes();
if(nodeIndex0!=INVALID_NODE_ID)
{
PX_ASSERT(nodeIndex0 < tree.getNbNodes());
PX_ASSERT(nodes[nodeIndex0].isLeaf());
BVHNode* node0 = nodes + nodeIndex0;
const PxU32 nbPrims = node0->getNbRuntimePrimitives();
PX_ASSERT(nbPrims < 16);
// retrieve the primitives pointer
PxU32* primitives = node0->getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// PT: look for desired pool index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == nodeIndex0); // PT: all primitives should point to the same leaf node
if(prunerIndex0 == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims-1;
node0->setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
mMapping[prunerIndex0] = INVALID_NODE_ID; // invalidate the node index for pool 0
// PT: swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if(last!=i)
PxSwap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
if (nodeIndex1!=INVALID_NODE_ID)
{
// PT: with multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
if(prunerIndex0!=prunerIndex1)
{
PX_ASSERT(nodeIndex1 < tree.getNbNodes());
PX_ASSERT(nodes[nodeIndex1].isLeaf());
BVHNode* node1 = nodes + nodeIndex1;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims < 16);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// PT: look for desired pool index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == nodeIndex1); // PT: all primitives should point to the same leaf node
if(prunerIndex1 == primitives[i])
{
foundIt = true;
primitives[i] = prunerIndex0; // point node 1 to the pool object moved to ID 0
mMapping[prunerIndex0] = nodeIndex1; // pool 0 is pointed at by node 1 now
mMapping[prunerIndex1] = INVALID_NODE_ID; // pool 1 is no longer stored in the tree
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
}
}

View File

@@ -0,0 +1,81 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABB_TREE_UPDATE_MAP_H
#define GU_AABB_TREE_UPDATE_MAP_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
#include "foundation/PxArray.h"
namespace physx
{
namespace Gu
{
class AABBTree;
// Maps pruning pool indices to AABB-tree indices (i.e. locates the object's box in the aabb-tree nodes pool)
//
// The map spans pool indices from 0..N-1, where N is the number of pool entries when the map was created from a tree.
//
// It maps:
// to node indices in the range 0..M-1, where M is the number of nodes in the tree the map was created from,
// or to INVALID_NODE_ID if the pool entry was removed or pool index is outside input domain.
//
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID) is true at all times.
class AABBTreeUpdateMap
{
public:
AABBTreeUpdateMap() {}
~AABBTreeUpdateMap() {}
void release()
{
mMapping.reset();
}
// indices offset used when indices are shifted from objects (used for merged trees)
PX_PHYSX_COMMON_API void initMap(PxU32 numPoolObjects, const AABBTree& tree);
PX_PHYSX_COMMON_API void invalidate(PoolIndex poolIndex, PoolIndex replacementPoolIndex, AABBTree& tree);
PX_FORCE_INLINE TreeNodeIndex operator[](PxU32 poolIndex) const
{
return poolIndex < mMapping.size() ? mMapping[poolIndex] : INVALID_NODE_ID;
}
private:
// maps from prunerIndex (index in the PruningPool) to treeNode index
// this will only map to leaf tree nodes
PxArray<TreeNodeIndex> mMapping;
};
}
}
#endif

View File

@@ -0,0 +1,141 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuActorShapeMap.h"
#include "foundation/PxMemory.h"
using namespace physx;
using namespace Gu;
namespace physx
{
namespace Gu
{
/*PX_FORCE_INLINE*/ uint32_t PxComputeHash(const ActorShapeMap::ActorShape& owner)
{
PX_ASSERT(!(size_t(owner.mActor)&3));
PX_ASSERT(!(size_t(owner.mShape)&3));
const uint32_t id0 = uint32_t(size_t(owner.mActor)>>2);
const uint32_t id1 = uint32_t(size_t(owner.mShape)>>2);
const uint64_t mix = (uint64_t(id0)<<32)|uint64_t(id1);
return ::PxComputeHash(mix);
}
}
}
ActorShapeMap::ActorShapeMap() : mCacheSize(0), mCache(NULL)
{
}
ActorShapeMap::~ActorShapeMap()
{
PX_FREE(mCache);
}
void ActorShapeMap::resizeCache(PxU32 index)
{
PxU32 size = mCacheSize ? mCacheSize*2 : 64;
const PxU32 minSize = index+1;
if(minSize>size)
size = minSize*2;
Cache* items = PX_ALLOCATE(Cache, size, "Cache");
if(mCache)
PxMemCopy(items, mCache, mCacheSize*sizeof(Cache));
PxMemZero(items+mCacheSize, (size-mCacheSize)*sizeof(Cache));
PX_FREE(mCache);
mCache = items;
mCacheSize = size;
}
bool ActorShapeMap::add(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData actorShapeData)
{
if(actorIndex!=PX_INVALID_INDEX)
{
if(actorIndex>=mCacheSize)
resizeCache(actorIndex);
//if(!mCache[actorIndex].mActor)
if(!mCache[actorIndex].mShape)
{
//mCache[actorIndex].mActor = actor;
mCache[actorIndex].mShape = shape;
mCache[actorIndex].mData = actorShapeData;
return true;
}
//PX_ASSERT(mCache[actorIndex].mActor==actor);
PX_ASSERT(mCache[actorIndex].mShape);
if(mCache[actorIndex].mShape==shape)
{
mCache[actorIndex].mData = actorShapeData;
return false;
}
}
return mDatabase.insert(ActorShape(actor, shape), actorShapeData);
}
bool ActorShapeMap::remove(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData* removed)
{
if(actorIndex!=PX_INVALID_INDEX)
{
//if(mCache[actorIndex].mActor==actor && mCache[actorIndex].mShape==shape)
if(mCache[actorIndex].mShape==shape)
{
//mCache[actorIndex].mActor = NULL;
mCache[actorIndex].mShape = NULL;
PX_ASSERT(!mDatabase.erase(ActorShape(actor, shape)));
if(removed)
*removed = mCache[actorIndex].mData;
return true;
}
}
PxHashMap<ActorShape, ActorShapeData>::Entry removedEntry;
const bool found = mDatabase.erase(ActorShape(actor, shape), removedEntry);
if(found && removed)
*removed = removedEntry.second;
return found;
}
ActorShapeData ActorShapeMap::find(PxU32 actorIndex, const void* actor, const void* shape) const
{
if(actorIndex!=PX_INVALID_INDEX)
{
if(mCache[actorIndex].mShape==shape)
//if(mCache[actorIndex].mActor==actor && mCache[actorIndex].mShape==shape)
{
return mCache[actorIndex].mData;
}
}
const PxHashMap<ActorShape, ActorShapeData>::Entry* e = mDatabase.find(ActorShape(actor, shape));
PX_ASSERT(e);
return e->second;
}

View File

@@ -0,0 +1,906 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxFoundation.h"
#include "foundation/PxFPU.h"
#include "foundation/PxPlane.h"
#include "geometry/PxGeometryInternal.h"
#include "GuBVH.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuAABBTreeBuildStats.h"
#include "GuMeshFactory.h"
#include "GuQuery.h"
#include "CmSerialize.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
// PT: these two functions moved from cooking
bool BVHData::build(PxU32 nbBounds, const void* boundsData, PxU32 boundsStride, float enlargement, PxU32 nbPrimsPerLeaf, BVHBuildStrategy bs)
{
if(!nbBounds || !boundsData || boundsStride<sizeof(PxBounds3) || enlargement<0.0f || nbPrimsPerLeaf>=16)
return false;
mBounds.init(nbBounds);
if(nbBounds)
{
const PxU8* sB = reinterpret_cast<const PxU8*>(boundsData);
for(PxU32 i=0; i<nbBounds-1; i++)
{
inflateBounds<true>(mBounds.getBounds()[i], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
sB += boundsStride;
}
inflateBounds<false>(mBounds.getBounds()[nbBounds-1], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
}
mNbIndices = nbBounds;
// build the BVH
BuildStats stats;
NodeAllocator nodeAllocator;
mIndices = buildAABBTree(AABBTreeBuildParams(nbPrimsPerLeaf, nbBounds, &mBounds, bs), nodeAllocator, stats);
if(!mIndices)
return false;
// store the computed hierarchy
mNbNodes = stats.getCount();
mNodes = PX_ALLOCATE(BVHNode, mNbNodes, "AABB tree nodes");
PX_ASSERT(mNbNodes==nodeAllocator.mTotalNbNodes);
// store the results into BVHNode list
if(nbPrimsPerLeaf==1)
{
// PT: with 1 prim/leaf we don't need the remap table anymore, we can just store the prim index in each tree node directly.
flattenTree(nodeAllocator, mNodes, mIndices);
PX_FREE(mIndices);
}
else
flattenTree(nodeAllocator, mNodes);
return true;
}
// A.B. move to load code
#define PX_BVH_STRUCTURE_VERSION 1
bool BVHData::save(PxOutputStream& stream, bool endian) const
{
// write header
if(!writeHeader('B', 'V', 'H', 'S', PX_BVH_STRUCTURE_VERSION, endian, stream))
return false;
// write mData members
writeDword(mNbIndices, endian, stream);
writeDword(mNbNodes, endian, stream);
// write indices and bounds
for(PxU32 i=0; i<mNbIndices; i++)
writeDword(mIndices[i], endian, stream);
const PxBounds3* bounds = mBounds.getBounds();
for(PxU32 i=0; i<mNbIndices; i++)
{
writeFloatBuffer(&bounds[i].minimum.x, 3, endian, stream);
writeFloatBuffer(&bounds[i].maximum.x, 3, endian, stream);
}
// write nodes
for(PxU32 i=0; i<mNbNodes; i++)
{
writeDword(mNodes[i].mData, endian, stream);
writeFloatBuffer(&mNodes[i].mBV.minimum.x, 3, endian, stream);
writeFloatBuffer(&mNodes[i].mBV.maximum.x, 3, endian, stream);
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// PT: temporary for Kit
BVH::BVH(const PxBVHInternalData& data) :
PxBVH (PxType(PxConcreteType::eBVH), PxBaseFlags(0)),
mMeshFactory (NULL)
{
mData.mNbIndices = data.mNbIndices;
mData.mNbNodes = data.mNbNodes;
mData.mIndices = data.mIndices;
mData.mNodes = reinterpret_cast<BVHNode*>(data.mNodes);
mData.mBounds.setBounds(reinterpret_cast<PxBounds3*>(data.mBounds));
}
bool BVH::getInternalData(PxBVHInternalData& data, bool takeOwnership) const
{
data.mNbIndices = mData.mNbIndices;
data.mNbNodes = mData.mNbNodes;
data.mNodeSize = sizeof(BVHNode);
data.mNodes = mData.mNodes;
data.mIndices = mData.mIndices;
data.mBounds = const_cast<PxBounds3*>(mData.mBounds.getBounds());
if(takeOwnership)
const_cast<BVH*>(this)->mData.mBounds.takeOwnership();
return true;
}
bool physx::PxGetBVHInternalData(PxBVHInternalData& data, const PxBVH& bvh, bool takeOwnership)
{
return static_cast<const BVH&>(bvh).getInternalData(data, takeOwnership);
}
//~ PT: temporary for Kit
BVH::BVH(MeshFactory* factory) :
PxBVH (PxType(PxConcreteType::eBVH), PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE),
mMeshFactory (factory)
{
}
BVH::BVH(MeshFactory* factory, BVHData& bvhData) :
PxBVH (PxType(PxConcreteType::eBVH), PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE),
mMeshFactory (factory),
mData (bvhData)
{
}
BVH::~BVH()
{
}
bool BVH::init(PxU32 nbPrims, AABBTreeBounds* bounds, const void* boundsData, PxU32 stride, BVHBuildStrategy bs, PxU32 nbPrimsPerLeaf, float enlargement)
{
if(!nbPrims)
return false;
if(bounds)
{
mData.mBounds.moveFrom(*bounds);
}
else
{
mData.mBounds.init(nbPrims);
PxBounds3* dst = mData.mBounds.getBounds();
if(stride==sizeof(PxBounds3))
{
PxMemCopy(dst, boundsData, sizeof(PxBounds3)*nbPrims);
}
else
{
if(nbPrims)
{
const PxU8* sB = reinterpret_cast<const PxU8*>(boundsData);
for(PxU32 i=0; i<nbPrims-1; i++)
{
inflateBounds<true>(mData.mBounds.getBounds()[i], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
sB += stride;
}
inflateBounds<false>(mData.mBounds.getBounds()[nbPrims-1], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
}
}
}
mData.mNbIndices = nbPrims;
// build the BVH
BuildStats stats;
NodeAllocator nodeAllocator;
mData.mIndices = buildAABBTree(AABBTreeBuildParams(nbPrimsPerLeaf, nbPrims, &mData.mBounds, bs), nodeAllocator, stats);
if(!mData.mIndices)
return false;
// store the computed hierarchy
mData.mNbNodes = stats.getCount();
mData.mNodes = PX_ALLOCATE(BVHNode, mData.mNbNodes, "AABB tree nodes");
PX_ASSERT(mData.mNbNodes==nodeAllocator.mTotalNbNodes);
// store the results into BVHNode list
if(nbPrimsPerLeaf==1)
{
// PT: with 1 prim/leaf we don't need the remap table anymore, we can just store the prim index in each tree node directly.
flattenTree(nodeAllocator, mData.mNodes, mData.mIndices);
PX_FREE(mData.mIndices);
}
else
flattenTree(nodeAllocator, mData.mNodes);
return true;
}
bool BVH::load(PxInputStream& stream)
{
// Import header
PxU32 version;
bool mismatch;
if(!readHeader('B', 'V', 'H', 'S', version, mismatch, stream))
return false;
// read numVolumes, numNodes together
//ReadDwordBuffer(&mData.mNbIndices, 2, mismatch, stream);
mData.mNbIndices = readDword(mismatch, stream);
mData.mNbNodes = readDword(mismatch, stream);
// read indices
mData.mIndices = PX_ALLOCATE(PxU32, mData.mNbIndices, "BVH indices");
ReadDwordBuffer(mData.mIndices, mData.mNbIndices, mismatch, stream);
// read bounds
mData.mBounds.init(mData.mNbIndices);
readFloatBuffer(&mData.mBounds.getBounds()->minimum.x, mData.mNbIndices*(3 + 3), mismatch, stream);
// read nodes
mData.mNodes = PX_ALLOCATE(BVHNode, mData.mNbNodes, "BVH nodes");
for(PxU32 i = 0; i < mData.mNbNodes; i++)
{
ReadDwordBuffer(&mData.mNodes[i].mData, 1, mismatch, stream);
readFloatBuffer(&mData.mNodes[i].mBV.minimum.x, 3 + 3, mismatch, stream);
}
return true;
}
void BVH::release()
{
decRefCount();
}
void BVH::onRefCountZero()
{
::onRefCountZero(this, mMeshFactory, false, "PxBVH::release: double deletion detected!");
}
namespace
{
struct BVHTree
{
PX_FORCE_INLINE BVHTree(const BVHData& data) : mRootNode(data.mNodes), mIndices(data.mIndices) {}
const BVHNode* getNodes() const { return mRootNode; }
const PxU32* getIndices() const { return mIndices; }
const BVHNode* mRootNode;
const PxU32* mIndices;
};
}
namespace
{
struct RaycastAdapter
{
RaycastAdapter(PxBVH::RaycastCallback& cb) : mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 index)
{
if(mAbort || !mCallback.reportHit(index, distance))
{
mAbort = true;
return false;
}
return true;
}
PxBVH::RaycastCallback& mCallback;
bool mAbort;
PX_NOCOPY(RaycastAdapter)
};
}
bool BVH::raycast(const PxVec3& origin, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
RaycastAdapter ra(cb);
if(mData.mIndices)
return AABBTreeRaycast<false, true, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), origin, unitDir, distance, PxVec3(0.0f), ra);
else
return AABBTreeRaycast<false, false, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), origin, unitDir, distance, PxVec3(0.0f), ra);
}
namespace
{
struct OverlapAdapter
{
OverlapAdapter(PxBVH::OverlapCallback& cb) : mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxU32 index)
{
if(mAbort || !mCallback.reportHit(index))
{
mAbort = true;
return false;
}
return true;
}
PxBVH::OverlapCallback& mCallback;
bool mAbort;
PX_NOCOPY(OverlapAdapter)
};
}
bool BVH::overlap(const ShapeData& queryVolume, OverlapCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
OverlapAdapter oa(cb);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, AABBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, AABBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
}
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, 1.0f);
if(mData.mIndices)
return AABBTreeOverlap<true, CapsuleAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, CapsuleAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, SphereAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, SphereAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
return false;
}
bool BVH::overlap(const PxGeometry& geom, const PxTransform& pose, OverlapCallback& cb, PxGeometryQueryFlags flags) const
{
const ShapeData queryVolume(geom, pose, 0.0f);
return overlap(queryVolume, cb, flags);
}
bool BVH::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
RaycastAdapter ra(cb);
if(mData.mIndices)
return AABBTreeRaycast<true, true, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), aabb.getCenter(), unitDir, distance, aabb.getExtents(), ra);
else
return AABBTreeRaycast<true, false, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), aabb.getCenter(), unitDir, distance, aabb.getExtents(), ra);
}
bool BVH::sweep(const PxGeometry& geom, const PxTransform& pose, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const
{
const ShapeData queryVolume(geom, pose, 0.0f);
return sweep(queryVolume, unitDir, distance, cb, flags);
}
namespace
{
PX_FORCE_INLINE bool planesAABBOverlap(const PxVec3& m, const PxVec3& d, const PxPlane* p, PxU32& outClipMask, PxU32 inClipMask)
{
PxU32 mask = 1;
PxU32 tmpOutClipMask = 0;
while(mask<=inClipMask)
{
if(inClipMask & mask)
{
const float NP = d.x*fabsf(p->n.x) + d.y*fabsf(p->n.y) + d.z*fabsf(p->n.z);
const float MP = m.x*p->n.x + m.y*p->n.y + m.z*p->n.z + p->d;
if(NP < MP)
return false;
if((-NP) < MP)
tmpOutClipMask |= mask;
}
mask+=mask;
p++;
}
outClipMask = tmpOutClipMask;
return true;
}
struct FrustumTest
{
FrustumTest(PxU32 nbPlanes, const PxPlane* planes) : mPlanes(planes), mMask((1<<nbPlanes)-1), mNbPlanes(nbPlanes), mOutClipMask(0)
{
}
PX_FORCE_INLINE PxIntBool operator()(const Vec3V boxCenter, const Vec3V boxExtents) const
{
// PT: TODO: rewrite all this in SIMD
PxVec3 center, extents;
V3StoreU(boxCenter, center);
V3StoreU(boxExtents, extents);
if(!planesAABBOverlap(center, extents, mPlanes, mOutClipMask, mMask))
return PxIntFalse;
// PT: unfortunately the AABBTreeOverlap template doesn't support this case where we know we can
// immediately dump the rest of the tree (i.e. the old "containment tests" in Opcode). We might
// want to revisit this at some point.
//
// In fact it's worse than this: we lost the necessary data to make this quick, in "flattenTree"
// when going from AABBTreeBuildNodes to BVHNodes. The BVHNodes lost the primitive-related info
// for internal (non-leaf) nodes so we cannot just dump a list of primitives when an internal
// node is fully visible (like we did in Opcode 1.x). Best we can do is keep traversing the tree
// and skip VFC tests.
//if(!outClipMask)
return PxIntTrue;
}
const PxPlane* mPlanes;
const PxU32 mMask;
const PxU32 mNbPlanes;
mutable PxU32 mOutClipMask;
PX_NOCOPY(FrustumTest)
};
}
static bool dumpNode(OverlapAdapter& oa, const BVHNode* const nodeBase, const BVHNode* node0, const PxU32* indices)
{
PxInlineArray<const BVHNode*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
stack[0] = node0;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const BVHNode* node = stack[--stackIndex];
while(1)
{
if(node->isLeaf())
{
PxU32 nbPrims = node->getNbPrimitives();
const PxU32* prims = indices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = indices ? *prims++ : node->getPrimitiveIndex();
if(!oa.invoke(primIndex))
return false;
}
break;
}
else
{
const BVHNode* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
}
}
return true;
}
bool BVH::cull(PxU32 nbPlanes, const PxPlane* planes, OverlapCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
OverlapAdapter oa(cb);
const FrustumTest test(nbPlanes, planes);
if(0)
{
// PT: this vanilla codepath is slower
if(mData.mIndices)
return AABBTreeOverlap<true, FrustumTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, FrustumTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
else
{
const PxBounds3* bounds = mData.mBounds.getBounds();
const bool hasIndices = mData.mIndices!=NULL;
PxInlineArray<const BVHNode*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const BVHNode* const nodeBase = mData.mNodes;
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const BVHNode* node = stack[--stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV(&center, &extents);
while(test(center, extents))
{
if(!test.mOutClipMask)
{
if(!dumpNode(oa, nodeBase, node, mData.mIndices))
return false;
break;
}
else
{
if(node->isLeaf())
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = hasIndices ? node->getPrimitives(mData.mIndices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = hasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds, primIndex);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V extents_ = V4Scale(extents2, halfV);
const Vec4V center_ = V4Scale(center2, halfV);
if(!test(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
if(!oa.invoke(primIndex))
return false;
}
break;
}
const BVHNode* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
node->getAABBCenterExtentsV(&center, &extents);
}
}
}
return true;
}
}
void BVH::refit()
{
mData.fullRefit(mData.mBounds.getBounds());
}
bool BVH::updateBoundsInternal(PxU32 localIndex, const PxBounds3& newBounds)
{
if(localIndex>=mData.mNbIndices)
return false;
PxBounds3* bounds = mData.mBounds.getBounds();
bounds[localIndex] = newBounds;
// Lazy-create update map
if(!mData.getUpdateMap())
mData.createUpdateMap(mData.mNbIndices);
PxU32* mMapping = mData.getUpdateMap();
if(mMapping)
{
const PxU32 treeNodeIndex = mMapping[localIndex];
if(treeNodeIndex!=0xffffffff)
{
mData.markNodeForRefit(treeNodeIndex);
return true;
}
}
return false;
}
bool BVH::updateBounds(PxU32 boundsIndex, const PxBounds3& newBounds)
{
return updateBoundsInternal(boundsIndex, newBounds);
}
void BVH::partialRefit()
{
mData.refitMarkedNodes(mData.mBounds.getBounds());
}
bool BVH::traverse(TraversalCallback& cb) const
{
// PT: copy-pasted from AABBTreeOverlap and modified
PxInlineArray<const BVHNode*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const BVHNode* const nodeBase = mData.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const BVHNode* node = stack[--stackIndex];
while(cb.visitNode(node->mBV))
{
if(node->isLeaf())
{
if(mData.getIndices())
{
if(!cb.reportLeaf(node->getNbPrimitives(), node->getPrimitives(mData.getIndices())))
return false;
}
else
{
PX_ASSERT(node->getNbPrimitives()==1);
const PxU32 primIndex = node->getPrimitiveIndex();
if(!cb.reportLeaf(node->getNbPrimitives(), &primIndex))
return false;
}
break;
}
const BVHNode* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
}
return true;
}
#include "geometry/PxMeshQuery.h"
#define GU_BVH_STACK_SIZE 1024 // Default size of local stacks for non-recursive traversals.
static bool doLeafVsLeaf(PxReportCallback<PxGeomIndexPair>& callback, const BVHNode* node0, const PxBounds3* bounds0, const PxU32* indices0,
const BVHNode* node1, const PxBounds3* bounds1, const PxU32* indices1,
bool& abort)
{
PxGeomIndexPair* dst = callback.mBuffer;
PxU32 capacity = callback.mCapacity;
PxU32 currentSize = callback.mSize;
PX_ASSERT(currentSize<capacity);
bool foundHit = false;
abort = false;
const FloatV halfV = FLoad(0.5f);
PxU32 nbPrims0 = node0->getNbPrimitives();
const PxU32* prims0 = indices0 ? node0->getPrimitives(indices0) : NULL;
while(nbPrims0--)
{
const PxU32 primIndex0 = prims0 ? *prims0++ : node0->getPrimitiveIndex();
Vec3V center0, extents0;
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds0, primIndex0);
extents0 = Vec3V_From_Vec4V(V4Scale(extents2, halfV));
center0 = Vec3V_From_Vec4V(V4Scale(center2, halfV));
}
PxU32 nbPrims1 = node1->getNbPrimitives();
const PxU32* prims1 = indices1 ? node1->getPrimitives(indices1) : NULL;
while(nbPrims1--)
{
const PxU32 primIndex1 = prims1 ? *prims1++ : node1->getPrimitiveIndex();
Vec3V center1, extents1;
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds1, primIndex1);
extents1 = Vec3V_From_Vec4V(V4Scale(extents2, halfV));
center1 = Vec3V_From_Vec4V(V4Scale(center2, halfV));
}
if(PxIntBool(V3AllGrtrOrEq(V3Add(extents0, extents1), V3Abs(V3Sub(center1, center0)))))
{
foundHit = true;
// PT: TODO: refactor callback management code with BVH34
dst[currentSize].id0 = primIndex0;
dst[currentSize].id1 = primIndex1;
currentSize++;
if(currentSize==capacity)
{
callback.mSize = 0;
if(!callback.flushResults(currentSize, dst))
{
abort = true;
return foundHit;
}
dst = callback.mBuffer;
capacity = callback.mCapacity;
currentSize = callback.mSize;
}
}
}
}
callback.mSize = currentSize;
return foundHit;
}
static PX_FORCE_INLINE void pushChildren(PxGeomIndexPair* stack, PxU32& nb, PxU32 a, PxU32 b, PxU32 c, PxU32 d)
{
stack[nb].id0 = a;
stack[nb].id1 = b;
nb++;
stack[nb].id0 = c;
stack[nb].id1 = d;
nb++;
}
static PX_NOINLINE bool abortQuery(PxReportCallback<PxGeomIndexPair>& callback, bool& abort)
{
abort = true;
callback.mSize = 0;
return true;
}
static bool BVH_BVH(PxReportCallback<PxGeomIndexPair>& callback, const BVH& tree0, const BVH& tree1, bool& _abort)
{
const BVHNode* PX_RESTRICT node0 = tree0.getNodes();
const BVHNode* PX_RESTRICT node1 = tree1.getNodes();
PX_ASSERT(node0 && node1);
const PxBounds3* bounds0 = tree0.getData().mBounds.getBounds();
const PxBounds3* bounds1 = tree1.getData().mBounds.getBounds();
const PxU32* indices0 = tree0.getIndices();
const PxU32* indices1 = tree1.getIndices();
{
PxU32 nb=1;
PxGeomIndexPair stack[GU_BVH_STACK_SIZE];
stack[0].id0 = 0;
stack[0].id1 = 0;
bool status = false;
const BVHNode* const root0 = node0;
const BVHNode* const root1 = node1;
do
{
const PxGeomIndexPair& childData = stack[--nb];
node0 = root0 + childData.id0;
node1 = root1 + childData.id1;
if(node0->mBV.intersects(node1->mBV))
{
const PxU32 isLeaf0 = node0->isLeaf();
const PxU32 isLeaf1 = node1->isLeaf();
if(isLeaf0)
{
if(isLeaf1)
{
bool abort;
if(doLeafVsLeaf(callback, node0, bounds0, indices0, node1, bounds1, indices1, abort))
status = true;
if(abort)
return abortQuery(callback, _abort);
}
else
{
const PxU32 posIndex1 = node1->getPosIndex();
pushChildren(stack, nb, childData.id0, posIndex1, childData.id0, posIndex1 + 1);
}
}
else if(isLeaf1)
{
const PxU32 posIndex0 = node0->getPosIndex();
pushChildren(stack, nb, posIndex0, childData.id1, posIndex0 + 1, childData.id1);
}
else
{
const PxU32 posIndex0 = node0->getPosIndex();
const PxU32 posIndex1 = node1->getPosIndex();
pushChildren(stack, nb, posIndex0, posIndex1, posIndex0, posIndex1 + 1);
pushChildren(stack, nb, posIndex0 + 1, posIndex1, posIndex0 + 1, posIndex1 + 1);
}
}
}while(nb);
return status;
}
}
bool physx::PxFindOverlap(PxReportCallback<PxGeomIndexPair>& callback, const PxBVH& bvh0, const PxBVH& bvh1)
{
PX_SIMD_GUARD
// PT: TODO: refactor callback management code with BVH34
PxGeomIndexPair stackBuffer[256];
bool mustResetBuffer;
if(callback.mBuffer)
{
PX_ASSERT(callback.mCapacity);
mustResetBuffer = false;
}
else
{
callback.mBuffer = stackBuffer;
PX_ASSERT(callback.mCapacity<=256);
if(callback.mCapacity==0 || callback.mCapacity>256)
{
callback.mCapacity = 256;
}
callback.mSize = 0;
mustResetBuffer = true;
}
bool abort = false;
const bool status = BVH_BVH(callback, static_cast<const BVH&>(bvh0), static_cast<const BVH&>(bvh1), abort);
if(!abort)
{
const PxU32 currentSize = callback.mSize;
if(currentSize)
{
callback.mSize = 0;
callback.flushResults(currentSize, callback.mBuffer);
}
}
if(mustResetBuffer)
callback.mBuffer = NULL;
return status;
}

View File

@@ -0,0 +1,140 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BVH_H
#define GU_BVH_H
#include "geometry/PxBVH.h"
#include "CmRefCountable.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxUserAllocated.h"
#include "GuAABBTreeBounds.h"
#include "GuAABBTree.h"
namespace physx
{
struct PxBVHInternalData;
class PxInputStream;
namespace Gu
{
class MeshFactory;
struct BVHNode;
class ShapeData;
class BVHData : public BVHPartialRefitData
{
public:
BVHData() {}
BVHData(BVHData& other)
{
mNbIndices = other.mNbIndices;
mNbNodes = other.mNbNodes;
mIndices = other.mIndices;
mNodes = other.mNodes;
mBounds.moveFrom(other.mBounds);
other.mIndices = NULL;
other.mNodes = NULL;
}
~BVHData()
{
if(mBounds.ownsMemory())
{
mBounds.release();
PX_FREE(mIndices);
PX_FREE(mNodes); // PT: TODO: fix this, unify with AABBTree version
}
mNbNodes = 0;
mNbIndices = 0;
}
PX_PHYSX_COMMON_API bool build(PxU32 nbBounds, const void* boundsData, PxU32 boundsStride, float enlargement, PxU32 numPrimsPerLeaf, BVHBuildStrategy bs);
PX_PHYSX_COMMON_API bool save(PxOutputStream& stream, bool endian) const;
AABBTreeBounds mBounds;
};
/**
\brief Represents a BVH.
*/
class BVH : public PxBVH, public PxUserAllocated, public Cm::RefCountable
{
public:
// PT: TODO: revisit these PX_PHYSX_COMMON_API calls. At the end of the day the issue is that things like PxUserAllocated aren't exported.
PX_PHYSX_COMMON_API BVH(MeshFactory* factory);
PX_PHYSX_COMMON_API BVH(MeshFactory* factory, BVHData& data);
PX_PHYSX_COMMON_API BVH(const PxBVHInternalData& data);
virtual ~BVH();
PX_PHYSX_COMMON_API bool init(PxU32 nbPrims, AABBTreeBounds* bounds, const void* boundsData, PxU32 stride, BVHBuildStrategy bs, PxU32 nbPrimsPerLeaf, float enlargement);
bool load(PxInputStream& desc);
void release();
// PxBVH
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool overlap(const PxGeometry& geom, const PxTransform& pose, OverlapCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool sweep(const PxGeometry& geom, const PxTransform& pose, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool cull(PxU32 nbPlanes, const PxPlane* planes, OverlapCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual PxU32 getNbBounds() const PX_OVERRIDE { return mData.mNbIndices; }
virtual const PxBounds3* getBounds() const PX_OVERRIDE { return mData.mBounds.getBounds(); }
virtual void refit() PX_OVERRIDE;
virtual bool updateBounds(PxU32 boundsIndex, const PxBounds3& newBounds) PX_OVERRIDE;
virtual void partialRefit() PX_OVERRIDE;
virtual bool traverse(TraversalCallback& cb) const PX_OVERRIDE;
//~PxBVH
// Cm::RefCountable
virtual void onRefCountZero() PX_OVERRIDE;
//~Cm::RefCountable
PX_FORCE_INLINE const BVHNode* getNodes() const { return mData.mNodes; }
PX_FORCE_INLINE const PxU32* getIndices() const { return mData.mIndices; }
PX_FORCE_INLINE const BVHData& getData() const { return mData; }
bool getInternalData(PxBVHInternalData&, bool) const;
bool updateBoundsInternal(PxU32 localIndex, const PxBounds3& bounds);
// PT: alternative implementations directly working on shape data
bool overlap(const ShapeData& shapeData, OverlapCallback& cb, PxGeometryQueryFlags flags) const;
bool sweep(const ShapeData& shapeData, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const;
private:
MeshFactory* mMeshFactory;
BVHData mData;
};
}
}
#endif

View File

@@ -0,0 +1,257 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BVH_TESTS_SIMD_H
#define GU_BVH_TESTS_SIMD_H
#include "foundation/PxTransform.h"
#include "foundation/PxBounds3.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "foundation/PxVecMath.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct RayAABBTest
{
PX_FORCE_INLINE RayAABBTest(const PxVec3& origin_, const PxVec3& unitDir_, PxReal maxDist, const PxVec3& inflation_)
: mOrigin(V3LoadU(origin_))
, mDir(V3LoadU(unitDir_))
, mDirYZX(V3PermYZX(mDir))
, mInflation(V3LoadU(inflation_))
, mAbsDir(V3Abs(mDir))
, mAbsDirYZX(V3PermYZX(mAbsDir))
{
const PxVec3 ext = maxDist >= PX_MAX_F32 ? PxVec3( unitDir_.x == 0 ? origin_.x : PxSign(unitDir_.x)*PX_MAX_F32,
unitDir_.y == 0 ? origin_.y : PxSign(unitDir_.y)*PX_MAX_F32,
unitDir_.z == 0 ? origin_.z : PxSign(unitDir_.z)*PX_MAX_F32)
: origin_ + unitDir_ * maxDist;
mRayMin = V3Min(mOrigin, V3LoadU(ext));
mRayMax = V3Max(mOrigin, V3LoadU(ext));
}
PX_FORCE_INLINE void setDistance(PxReal distance)
{
const Vec3V ext = V3ScaleAdd(mDir, FLoad(distance), mOrigin);
mRayMin = V3Min(mOrigin, ext);
mRayMax = V3Max(mOrigin, ext);
}
template<bool TInflate>
PX_FORCE_INLINE PxU32 check(const Vec3V center, const Vec3V extents) const
{
const Vec3V iExt = TInflate ? V3Add(extents, mInflation) : extents;
// coordinate axes
const Vec3V nodeMax = V3Add(center, iExt);
const Vec3V nodeMin = V3Sub(center, iExt);
// cross axes
const Vec3V offset = V3Sub(mOrigin, center);
const Vec3V offsetYZX = V3PermYZX(offset);
const Vec3V iExtYZX = V3PermYZX(iExt);
const Vec3V f = V3NegMulSub(mDirYZX, offset, V3Mul(mDir, offsetYZX));
const Vec3V g = V3MulAdd(iExt, mAbsDirYZX, V3Mul(iExtYZX, mAbsDir));
const BoolV
maskA = V3IsGrtrOrEq(nodeMax, mRayMin),
maskB = V3IsGrtrOrEq(mRayMax, nodeMin),
maskC = V3IsGrtrOrEq(g, V3Abs(f));
const BoolV andABCMasks = BAnd(BAnd(maskA, maskB), maskC);
return BAllEqTTTT(andABCMasks);
}
const Vec3V mOrigin, mDir, mDirYZX, mInflation, mAbsDir, mAbsDirYZX;
Vec3V mRayMin, mRayMax;
protected:
RayAABBTest& operator=(const RayAABBTest&);
};
// probably not worth having a SIMD version of this unless the traversal passes Vec3Vs
struct AABBAABBTest
{
PX_FORCE_INLINE AABBAABBTest(const PxTransform&t, const PxBoxGeometry&b)
: mCenter(V3LoadU(t.p))
, mExtents(V3LoadU(b.halfExtents))
{ }
PX_FORCE_INLINE AABBAABBTest(const PxBounds3& b)
: mCenter(V3LoadU(b.getCenter()))
, mExtents(V3LoadU(b.getExtents()))
{ }
PX_FORCE_INLINE PxIntBool operator()(const Vec3V center, const Vec3V extents) const
{
//PxVec3 c; PxVec3_From_Vec3V(center, c);
//PxVec3 e; PxVec3_From_Vec3V(extents, e);
//if(PxAbs(c.x - mCenter.x) > mExtents.x + e.x) return IntFalse;
//if(PxAbs(c.y - mCenter.y) > mExtents.y + e.y) return IntFalse;
//if(PxAbs(c.z - mCenter.z) > mExtents.z + e.z) return IntFalse;
//return IntTrue;
return PxIntBool(V3AllGrtrOrEq(V3Add(mExtents, extents), V3Abs(V3Sub(center, mCenter))));
}
private:
AABBAABBTest& operator=(const AABBAABBTest&);
const Vec3V mCenter, mExtents;
};
struct SphereAABBTest
{
PX_FORCE_INLINE SphereAABBTest(const PxTransform& t, const PxSphereGeometry& s)
: mCenter(V3LoadU(t.p))
, mRadius2(FLoad(s.radius * s.radius))
{}
PX_FORCE_INLINE SphereAABBTest(const PxVec3& center, PxF32 radius)
: mCenter(V3LoadU(center))
, mRadius2(FLoad(radius * radius))
{}
PX_FORCE_INLINE PxIntBool operator()(const Vec3V boxCenter, const Vec3V boxExtents) const
{
const Vec3V offset = V3Sub(mCenter, boxCenter);
const Vec3V closest = V3Clamp(offset, V3Neg(boxExtents), boxExtents);
const Vec3V d = V3Sub(offset, closest);
return PxIntBool(BAllEqTTTT(FIsGrtrOrEq(mRadius2, V3Dot(d, d))));
}
private:
SphereAABBTest& operator=(const SphereAABBTest&);
const Vec3V mCenter;
const FloatV mRadius2;
};
// The Opcode capsule-AABB traversal test seems to be *exactly* the same as the ray-box test inflated by the capsule radius (so not a true capsule/box test)
// and the code for the ray-box test is better. TODO: check the zero length case and use the sphere traversal if this one fails.
// (OTOH it's not that hard to adapt the Ray-AABB test to a capsule test)
struct CapsuleAABBTest: private RayAABBTest
{
PX_FORCE_INLINE CapsuleAABBTest(const PxVec3& origin, const PxVec3& unitDir, const PxReal length, const PxVec3& inflation)
: RayAABBTest(origin, unitDir, length, inflation)
{}
PX_FORCE_INLINE PxIntBool operator()(const Vec3VArg center, const Vec3VArg extents) const
{
return PxIntBool(RayAABBTest::check<true>(center, extents));
}
};
template<bool fullTest>
struct OBBAABBTests
{
OBBAABBTests(const PxVec3& pos, const PxMat33& rot, const PxVec3& halfExtentsInflated)
{
const Vec3V eps = V3Load(1e-6f);
mT = V3LoadU(pos);
mExtents = V3LoadU(halfExtentsInflated);
// storing the transpose matrices yields a simpler SIMD test
mRT = Mat33V_From_PxMat33(rot.getTranspose());
mART = Mat33V(V3Add(V3Abs(mRT.col0), eps), V3Add(V3Abs(mRT.col1), eps), V3Add(V3Abs(mRT.col2), eps));
mBB_xyz = M33TrnspsMulV3(mART, mExtents);
if(fullTest)
{
const Vec3V eYZX = V3PermYZX(mExtents), eZXY = V3PermZXY(mExtents);
mBB_123 = V3MulAdd(eYZX, V3PermZXY(mART.col0), V3Mul(eZXY, V3PermYZX(mART.col0)));
mBB_456 = V3MulAdd(eYZX, V3PermZXY(mART.col1), V3Mul(eZXY, V3PermYZX(mART.col1)));
mBB_789 = V3MulAdd(eYZX, V3PermZXY(mART.col2), V3Mul(eZXY, V3PermYZX(mART.col2)));
}
}
// TODO: force inline it?
PxIntBool operator()(const Vec3V center, const Vec3V extents) const
{
const Vec3V t = V3Sub(mT, center);
// class I - axes of AABB
if(V3OutOfBounds(t, V3Add(extents, mBB_xyz)))
return PxIntFalse;
const Vec3V rX = mRT.col0, rY = mRT.col1, rZ = mRT.col2;
const Vec3V arX = mART.col0, arY = mART.col1, arZ = mART.col2;
const FloatV eX = V3GetX(extents), eY = V3GetY(extents), eZ = V3GetZ(extents);
const FloatV tX = V3GetX(t), tY = V3GetY(t), tZ = V3GetZ(t);
// class II - axes of OBB
{
const Vec3V v = V3ScaleAdd(rZ, tZ, V3ScaleAdd(rY, tY, V3Scale(rX, tX)));
const Vec3V v2 = V3ScaleAdd(arZ, eZ, V3ScaleAdd(arY, eY, V3ScaleAdd(arX, eX, mExtents)));
if(V3OutOfBounds(v, v2))
return PxIntFalse;
}
if(!fullTest)
return PxIntTrue;
// class III - edge cross products. Almost all OBB tests early-out with type I or type II,
// so early-outs here probably aren't useful (TODO: profile)
const Vec3V va = V3NegScaleSub(rZ, tY, V3Scale(rY, tZ));
const Vec3V va2 = V3ScaleAdd(arY, eZ, V3ScaleAdd(arZ, eY, mBB_123));
const BoolV ba = BOr(V3IsGrtr(va, va2), V3IsGrtr(V3Neg(va2), va));
const Vec3V vb = V3NegScaleSub(rX, tZ, V3Scale(rZ, tX));
const Vec3V vb2 = V3ScaleAdd(arX, eZ, V3ScaleAdd(arZ, eX, mBB_456));
const BoolV bb = BOr(V3IsGrtr(vb, vb2), V3IsGrtr(V3Neg(vb2), vb));
const Vec3V vc = V3NegScaleSub(rY, tX, V3Scale(rX, tY));
const Vec3V vc2 = V3ScaleAdd(arX, eY, V3ScaleAdd(arY, eX, mBB_789));
const BoolV bc = BOr(V3IsGrtr(vc, vc2), V3IsGrtr(V3Neg(vc2), vc));
return PxIntBool(BAllEqFFFF(BOr(ba, BOr(bb,bc))));
}
Vec3V mExtents; // extents of OBB
Vec3V mT; // translation of OBB
Mat33V mRT; // transpose of rotation matrix of OBB
Mat33V mART; // transpose of mRT, padded by epsilon
Vec3V mBB_xyz; // extents of OBB along coordinate axes
Vec3V mBB_123; // projections of extents onto edge-cross axes
Vec3V mBB_456;
Vec3V mBB_789;
};
typedef OBBAABBTests<true> OBBAABBTest;
}
}
#endif

View File

@@ -0,0 +1,630 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuBounds.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "geometry/PxConvexCoreGeometry.h"
#include "GuInternal.h"
#include "CmUtils.h"
#include "GuConvexMesh.h"
#include "GuConvexMeshData.h"
#include "GuTriangleMesh.h"
#include "GuTetrahedronMesh.h"
#include "GuHeightFieldData.h"
#include "GuHeightField.h"
#include "GuConvexUtilsInternal.h"
#include "GuBoxConversion.h"
#include "GuConvexGeometry.h"
#include "GuConvexSupport.h"
using namespace physx;
using namespace Gu;
using namespace aos;
// Compute global box for current node. The box is stored in mBV.
void Gu::computeGlobalBox(PxBounds3& bounds, PxU32 nbPrims, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT primitives)
{
PX_ASSERT(boxes);
PX_ASSERT(primitives);
PX_ASSERT(nbPrims);
Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
for (PxU32 i=1; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
minV = V4Min(minV, V4LoadU(&boxes[index].minimum.x));
maxV = V4Max(maxV, V4LoadU(&boxes[index].maximum.x));
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeBoundsAroundVertices(PxBounds3& bounds, PxU32 nbVerts, const PxVec3* PX_RESTRICT verts)
{
// PT: we can safely V4LoadU the first N-1 vertices. We must V3LoadU the last vertex, to make sure we don't read
// invalid memory. Since we have to special-case that last vertex anyway, we reuse that code to also initialize
// the minV/maxV values (bypassing the need for a 'setEmpty()' initialization).
if(!nbVerts)
{
bounds.setEmpty();
return;
}
PxU32 nbSafe = nbVerts-1;
// PT: read last (unsafe) vertex using V3LoadU, initialize minV/maxV
const Vec4V lastVertexV = Vec4V_From_Vec3V(V3LoadU(&verts[nbSafe].x));
Vec4V minV = lastVertexV;
Vec4V maxV = lastVertexV;
// PT: read N-1 first (safe) vertices using V4LoadU
while(nbSafe--)
{
const Vec4V vertexV = V4LoadU(&verts->x);
verts++;
minV = V4Min(minV, vertexV);
maxV = V4Max(maxV, vertexV);
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeLocalBoundsAndGeomEpsilon(const PxVec3* vertices, PxU32 nbVerties, PxBounds3& localBounds, PxReal& geomEpsilon)
{
computeBoundsAroundVertices(localBounds, nbVerties, vertices);
// Derive a good geometric epsilon from local bounds. We must do this before bounds extrusion for heightfields.
//
// From Charles Bloom:
// "Epsilon must be big enough so that the consistency condition abs(D(Hit))
// <= Epsilon is satisfied for all queries. You want the smallest epsilon
// you can have that meets that constraint. Normal floats have a 24 bit
// mantissa. When you do any float addition, you may have round-off error
// that makes the result off by roughly 2^-24 * result. Our result is
// scaled by the position values. If our world is strictly required to be
// in a box of world size W (each coordinate in -W to W), then the maximum
// error is 2^-24 * W. Thus Epsilon must be at least >= 2^-24 * W. If
// you're doing coordinate transforms, that may scale your error up by some
// amount, so you'll need a bigger epsilon. In general something like
// 2^-22*W is reasonable. If you allow scaled transforms, it needs to be
// something like 2^-22*W*MAX_SCALE."
// PT: TODO: runtime checkings for this
PxReal eps = 0.0f;
for (PxU32 i = 0; i < 3; i++)
eps = PxMax(eps, PxMax(PxAbs(localBounds.maximum[i]), PxAbs(localBounds.minimum[i])));
eps *= powf(2.0f, -22.0f);
geomEpsilon = eps;
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxMat33& rot, const PxVec3& pos, const CenterExtentsPadded& bounds)
{
c = rot.transform(bounds.mCenter) + pos;
ext = Cm::basisExtent(rot.column0, rot.column1, rot.column2, bounds.mExtents);
}
// PT: this one may have duplicates in GuBV4_BoxSweep_Internal.h & GuBV4_Raycast.cpp
static PX_FORCE_INLINE Vec4V multiply3x3V(const Vec4V p, const PxMat33Padded& mat_Padded)
{
Vec4V ResV = V4Scale(V4LoadU(&mat_Padded.column0.x), V4GetX(p));
ResV = V4Add(ResV, V4Scale(V4LoadU(&mat_Padded.column1.x), V4GetY(p)));
ResV = V4Add(ResV, V4Scale(V4LoadU(&mat_Padded.column2.x), V4GetZ(p)));
return ResV;
}
static PX_FORCE_INLINE void transformNoEmptyTestV(PxVec3p& c, PxVec3p& ext, const PxMat33Padded& rot, const PxVec3& pos, const CenterExtentsPadded& bounds)
{
const Vec4V boundsCenterV = V4LoadU(&bounds.mCenter.x); // PT: this load is safe since extents follow center in the class
// PT: unfortunately we can't V4LoadU 'pos' directly (it can come directly from users!). So we have to live with this for now:
const Vec4V posV = Vec4V_From_Vec3V(V3LoadU(&pos.x));
// PT: but eventually we'd like to use the "unsafe" version (e.g. by switching p&q in PxTransform), which would save 6 instructions on Win32
const Vec4V cV = V4Add(multiply3x3V(boundsCenterV, rot), posV);
// const Vec4V cV = V4Add(multiply3x3V(boundsCenterV, rot), V4LoadU(&pos.x)); // ### unsafe
V4StoreU(cV, &c.x);
// extended basis vectors
const Vec4V boundsExtentsV = V4LoadU(&bounds.mExtents.x); // PT: this load is safe since bounds are padded
const Vec4V c0V = V4Scale(V4LoadU(&rot.column0.x), V4GetX(boundsExtentsV));
const Vec4V c1V = V4Scale(V4LoadU(&rot.column1.x), V4GetY(boundsExtentsV));
const Vec4V c2V = V4Scale(V4LoadU(&rot.column2.x), V4GetZ(boundsExtentsV));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
V4StoreU(extentsV, &ext.x);
}
static PX_FORCE_INLINE PxU32 isNonIdentity(const PxVec3& scale)
{
#define IEEE_1_0 0x3f800000 //!< integer representation of 1.0
const PxU32* binary = reinterpret_cast<const PxU32*>(&scale.x);
return (binary[0] - IEEE_1_0)|(binary[1] - IEEE_1_0)|(binary[2] - IEEE_1_0);
}
// PT: please don't inline this one - 300+ lines of rarely used code
static void computeScaledMatrix(PxMat33Padded& rot, const PxMeshScale& scale)
{
rot = rot * Cm::toMat33(scale);
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxTransform& transform, const PxMeshScale& scale, const CenterExtentsPadded& bounds)
{
PxMat33Padded rot(transform.q);
if(isNonIdentity(scale.scale))
computeScaledMatrix(rot, scale);
transformNoEmptyTestV(c, ext, rot, transform.p, bounds);
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxVec3& pos, const PxMat33Padded& rot, const PxMeshScale& scale, const CenterExtentsPadded& bounds)
{
if(scale.isIdentity())
transformNoEmptyTest(c, ext, rot, pos, bounds);
else
transformNoEmptyTest(c, ext, rot * Cm::toMat33(scale), pos, bounds);
}
static void computeMeshBounds(const PxTransform& pose, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& meshScale, PxVec3p& origin, PxVec3p& extent)
{
transformNoEmptyTest(origin, extent, pose, meshScale, *localSpaceBounds);
}
static void computePlaneBounds(PxBounds3& bounds, const PxTransform& pose, float contactOffset, float inflation)
{
// PT: A plane is infinite, so usually the bounding box covers the whole world.
// Now, in particular cases when the plane is axis-aligned, we can take
// advantage of this to compute a smaller bounding box anyway.
// PT: we use PX_MAX_BOUNDS_EXTENTS to be compatible with PxBounds3::setMaximal,
// and to make sure that the value doesn't collide with the BP's sentinels.
const PxF32 bigValue = PX_MAX_BOUNDS_EXTENTS;
// const PxF32 bigValue = 1000000.0f;
PxVec3 minPt = PxVec3(-bigValue, -bigValue, -bigValue);
PxVec3 maxPt = PxVec3(bigValue, bigValue, bigValue);
const PxVec3 planeNormal = pose.q.getBasisVector0();
const PxPlane plane(pose.p, planeNormal);
const float nx = PxAbs(planeNormal.x);
const float ny = PxAbs(planeNormal.y);
const float nz = PxAbs(planeNormal.z);
const float epsilon = 1e-6f;
const float oneMinusEpsilon = 1.0f - epsilon;
if(nx>oneMinusEpsilon && ny<epsilon && nz<epsilon)
{
if(planeNormal.x>0.0f) maxPt.x = -plane.d + contactOffset;
else minPt.x = plane.d - contactOffset;
}
else if(nx<epsilon && ny>oneMinusEpsilon && nz<epsilon)
{
if(planeNormal.y>0.0f) maxPt.y = -plane.d + contactOffset;
else minPt.y = plane.d - contactOffset;
}
else if(nx<epsilon && ny<epsilon && nz>oneMinusEpsilon)
{
if(planeNormal.z>0.0f) maxPt.z = -plane.d + contactOffset;
else minPt.z = plane.d - contactOffset;
}
// PT: it is important to compute the min/max form directly without going through the
// center/extents intermediate form. With PX_MAX_BOUNDS_EXTENTS, those back-and-forth
// computations destroy accuracy.
// PT: inflation actually destroys the bounds really. We keep it to please UTs but this is broken (DE10595).
// (e.g. for SQ 1% of PX_MAX_BOUNDS_EXTENTS is still a huge number, effectively making the AABB infinite and defeating the point of the above computation)
if(inflation!=1.0f)
{
const PxVec3 c = (maxPt + minPt)*0.5f;
const PxVec3 e = (maxPt - minPt)*0.5f*inflation;
minPt = c - e;
maxPt = c + e;
}
bounds.minimum = minPt;
bounds.maximum = maxPt;
}
static PX_FORCE_INLINE void inflateBounds(PxBounds3& bounds, const PxVec3p& origin, const PxVec3p& extents, float contactOffset, float inflation)
{
Vec4V extentsV = V4LoadU(&extents.x);
extentsV = V4Add(extentsV, V4Load(contactOffset));
extentsV = V4Scale(extentsV, FLoad(inflation));
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
static PX_FORCE_INLINE Vec4V basisExtentV(const PxMat33Padded& basis, const PxVec3& extent, float offset, float inflation)
{
// extended basis vectors
const Vec4V c0V = V4Scale(V4LoadU(&basis.column0.x), FLoad(extent.x));
const Vec4V c1V = V4Scale(V4LoadU(&basis.column1.x), FLoad(extent.y));
const Vec4V c2V = V4Scale(V4LoadU(&basis.column2.x), FLoad(extent.z));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
extentsV = V4Add(extentsV, V4Load(offset));
extentsV = V4Scale(extentsV, FLoad(inflation));
return extentsV;
}
static PX_FORCE_INLINE void computeMeshBounds(PxBounds3& bounds, float contactOffset, float inflation, const PxTransform& pose, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& scale)
{
PxVec3p origin, extents;
computeMeshBounds(pose, localSpaceBounds, scale, origin, extents);
::inflateBounds(bounds, origin, extents, contactOffset, inflation);
}
void Gu::computeTightBounds(PxBounds3& bounds, PxU32 nb, const PxVec3* PX_RESTRICT v, const PxTransform& pose, const PxMeshScale& scale, float contactOffset, float inflation)
{
if(!nb)
{
bounds.setEmpty();
return;
}
PxMat33Padded rot(pose.q);
if(isNonIdentity(scale.scale))
computeScaledMatrix(rot, scale);
// PT: we can safely V4LoadU the first N-1 vertices. We must V3LoadU the last vertex, to make sure we don't read
// invalid memory. Since we have to special-case that last vertex anyway, we reuse that code to also initialize
// the minV/maxV values (bypassing the need for a 'setEmpty()' initialization).
PxU32 nbSafe = nb-1;
// PT: read last (unsafe) vertex using V3LoadU, initialize minV/maxV
const Vec4V lastVertexV = multiply3x3V(Vec4V_From_Vec3V(V3LoadU(&v[nbSafe].x)), rot);
Vec4V minV = lastVertexV;
Vec4V maxV = lastVertexV;
// PT: read N-1 first (safe) vertices using V4LoadU
while(nbSafe--)
{
const Vec4V vertexV = multiply3x3V(V4LoadU(&v->x), rot);
v++;
minV = V4Min(minV, vertexV);
maxV = V4Max(maxV, vertexV);
}
const Vec4V offsetV = V4Load(contactOffset);
minV = V4Sub(minV, offsetV);
maxV = V4Add(maxV, offsetV);
const Vec4V posV = Vec4V_From_Vec3V(V3LoadU(&pose.p.x));
maxV = V4Add(maxV, posV);
minV = V4Add(minV, posV);
// Inflation
{
const Vec4V centerV = V4Scale(V4Add(maxV, minV), FLoad(0.5f));
const Vec4V extentsV = V4Scale(V4Sub(maxV, minV), FLoad(0.5f*inflation));
maxV = V4Add(centerV, extentsV);
minV = V4Sub(centerV, extentsV);
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeBounds(PxBounds3& bounds, const PxGeometry& geometry, const PxTransform& pose, float contactOffset, float inflation)
{
// Box, Convex, Mesh and HeightField will compute local bounds and pose to world space.
// Sphere, Capsule & Plane will compute world space bounds directly.
switch(geometry.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& shape = static_cast<const PxSphereGeometry&>(geometry);
const PxVec3 extents((shape.radius+contactOffset)*inflation);
bounds.minimum = pose.p - extents;
bounds.maximum = pose.p + extents;
}
break;
case PxGeometryType::ePLANE:
{
computePlaneBounds(bounds, pose, contactOffset, inflation);
}
break;
case PxGeometryType::eCAPSULE:
{
computeCapsuleBounds(bounds, static_cast<const PxCapsuleGeometry&>(geometry), pose, contactOffset, inflation);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& shape = static_cast<const PxBoxGeometry&>(geometry);
const PxVec3p origin(pose.p);
const PxMat33Padded basis(pose.q);
const Vec4V extentsV = basisExtentV(basis, shape.halfExtents, contactOffset, inflation);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
break;
case PxGeometryType::eCONVEXCORE:
{
Gu::ConvexShape s;
Gu::makeConvexShape(geometry, pose, s);
bounds = s.computeBounds();
bounds.fattenFast(contactOffset);
bounds.scaleFast(inflation);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& shape = static_cast<const PxConvexMeshGeometry&>(geometry);
const Gu::ConvexHullData& hullData = static_cast<const Gu::ConvexMesh*>(shape.convexMesh)->getHull();
const bool useTightBounds = shape.meshFlags & PxConvexMeshGeometryFlag::eTIGHT_BOUNDS;
if(useTightBounds)
computeTightBounds(bounds, hullData.mNbHullVertices, hullData.getHullVertices(), pose, shape.scale, contactOffset, inflation);
else
computeMeshBounds(bounds, contactOffset, inflation, pose, &hullData.getPaddedBounds(), shape.scale);
}
break;
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& shape = static_cast<const PxTriangleMeshGeometry&>(geometry);
const TriangleMesh* triangleMesh = static_cast<const TriangleMesh*>(shape.triangleMesh);
const bool useTightBounds = shape.meshFlags & PxMeshGeometryFlag::eTIGHT_BOUNDS;
if(useTightBounds)
computeTightBounds(bounds, triangleMesh->getNbVerticesFast(), triangleMesh->getVerticesFast(), pose, shape.scale, contactOffset, inflation);
else
computeMeshBounds(bounds, contactOffset, inflation, pose, &triangleMesh->getPaddedBounds(), shape.scale);
}
break;
case PxGeometryType::eHEIGHTFIELD:
{
const PxHeightFieldGeometry& shape = static_cast<const PxHeightFieldGeometry&>(geometry);
computeMeshBounds(bounds, contactOffset, inflation, pose, &static_cast<const Gu::HeightField*>(shape.heightField)->getData().getPaddedBounds(), PxMeshScale(PxVec3(shape.rowScale, shape.heightScale, shape.columnScale)));
}
break;
case PxGeometryType::eTETRAHEDRONMESH:
{
const PxTetrahedronMeshGeometry& shape = static_cast<const PxTetrahedronMeshGeometry&>(geometry);
computeMeshBounds(bounds, contactOffset, inflation, pose, &static_cast<const Gu::TetrahedronMesh*>(shape.tetrahedronMesh)->getPaddedBounds(), PxMeshScale());
}
break;
case PxGeometryType::ePARTICLESYSTEM:
{
// implement!
PX_ASSERT(0);
}
break;
case PxGeometryType::eCUSTOM:
{
const PxCustomGeometry& shape = static_cast<const PxCustomGeometry&>(geometry);
PxVec3p centre(0), extents(0);
if (shape.callbacks)
{
const PxBounds3 b = shape.callbacks->getLocalBounds(shape);
centre = b.getCenter(); extents = b.getExtents();
}
const PxVec3p origin(pose.transform(centre));
const PxMat33Padded basis(pose.q);
const Vec4V extentsV = basisExtentV(basis, extents, contactOffset, inflation);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
break;
default:
{
PX_ASSERT(0);
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Gu::computeBounds: Unknown shape type.");
}
}
}
static PX_FORCE_INLINE void computeBoxExtentsAroundCapsule(PxVec3& extents, const PxCapsuleGeometry& capsuleGeom, float inflation)
{
extents.x = (capsuleGeom.radius + capsuleGeom.halfHeight) * inflation;
extents.y = capsuleGeom.radius * inflation;
extents.z = capsuleGeom.radius * inflation;
}
static const PxReal SQ_PRUNER_INFLATION = 1.01f; // pruner test shape inflation (not narrow phase shape)
static void computeMeshBounds(const PxVec3& pos, const PxMat33Padded& rot, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& meshScale, PxVec3p& origin, PxVec3p& extent)
{
PxPrefetchLine(localSpaceBounds); // PT: this one helps reducing L2 misses in transformNoEmptyTest
transformNoEmptyTest(origin, extent, pos, rot, meshScale, *localSpaceBounds);
}
// PT: warning: this writes 4 bytes after the end of 'bounds'. Calling code must ensure it is safe to do so.
static PX_FORCE_INLINE void computeMinMaxBounds(PxBounds3* PX_RESTRICT bounds, const PxVec3p& c, const PxVec3p& e, float prunerInflation, float offset)
{
const Vec4V extentsV = V4Scale(V4Add(V4LoadU(&e.x), V4Load(offset)), FLoad(prunerInflation));
const Vec4V centerV = V4LoadU(&c.x);
const Vec4V minV = V4Sub(centerV, extentsV);
const Vec4V maxV = V4Add(centerV, extentsV);
V4StoreU(minV, &bounds->minimum.x);
V4StoreU(maxV, &bounds->maximum.x);
}
ShapeData::ShapeData(const PxGeometry& g, const PxTransform& t, PxReal inflation)
{
// PT: this cast to matrix is already done in GeometryUnion::computeBounds (e.g. for boxes). So we do it first,
// then we'll pass the matrix directly to computeBoundsShapeData, to avoid the double conversion.
const bool isOBB = PxAbs(t.q.w) < 0.999999f;
if(isOBB)
{
// PT: writes 4 bytes after 'rot' but it's safe since we then write 'center' just afterwards
buildFrom(mGuBox, t.q);
}
else
{
mGuBox.rot = PxMat33(PxIdentity);
}
// PT: can't use V4Load here since there's no guarantee on 't.p'
// PT: must store 'center' after 'rot' now
mGuBox.center = t.p;
// Compute AABB, used by the BucketPruner as cullBox
switch(g.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& shape = static_cast<const PxSphereGeometry&>(g);
computeMinMaxBounds(&mPrunerInflatedAABB, mGuBox.center, PxVec3(0.0f), SQ_PRUNER_INFLATION, shape.radius+inflation);
//
reinterpret_cast<Sphere&>(mGuSphere) = Sphere(t.p, shape.radius);
}
break;
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& shape = static_cast<const PxCapsuleGeometry&>(g);
const PxVec3p extents = mGuBox.rot.column0.abs() * shape.halfHeight;
computeMinMaxBounds(&mPrunerInflatedAABB, mGuBox.center, extents, SQ_PRUNER_INFLATION, shape.radius+inflation);
//
Capsule& dstWorldCapsule = reinterpret_cast<Capsule&>(mGuCapsule); // store a narrow phase version copy
getCapsule(dstWorldCapsule, shape, t);
mGuBox.extents.x = shape.halfHeight;
// compute PxBoxGeometry pruner geom around input capsule geom; transform remains unchanged
computeBoxExtentsAroundCapsule(mPrunerBoxGeomExtents, shape, SQ_PRUNER_INFLATION);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& shape = static_cast<const PxBoxGeometry&>(g);
// PT: cast is safe because 'rot' followed by other members
Vec4V extentsV = basisExtentV(static_cast<const PxMat33Padded&>(mGuBox.rot), shape.halfExtents, inflation, SQ_PRUNER_INFLATION);
// PT: c/e-to-m/M conversion
const Vec4V centerV = V4LoadU(&mGuBox.center.x);
const Vec4V minV = V4Sub(centerV, extentsV);
const Vec4V maxV = V4Add(centerV, extentsV);
V4StoreU(minV, &mPrunerInflatedAABB.minimum.x);
V4StoreU(maxV, &mPrunerInflatedAABB.maximum.x); // PT: WARNING: writes past end of class
//
mGuBox.extents = shape.halfExtents; // PT: TODO: use SIMD
mPrunerBoxGeomExtents = shape.halfExtents*SQ_PRUNER_INFLATION;
}
break;
case PxGeometryType::eCONVEXCORE:
{
PxBounds3 bounds; Gu::computeBounds(bounds, g, t, inflation, SQ_PRUNER_INFLATION);
mPrunerInflatedAABB.minimum = bounds.minimum;
mPrunerInflatedAABB.maximum = bounds.maximum;
mGuBox.extents = mPrunerBoxGeomExtents = bounds.getExtents();
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& shape = static_cast<const PxConvexMeshGeometry&>(g);
const ConvexMesh* cm = static_cast<const ConvexMesh*>(shape.convexMesh);
const ConvexHullData* hullData = &cm->getHull();
// PT: cast is safe since 'rot' is followed by other members of the box
PxVec3p center, extents;
computeMeshBounds(mGuBox.center, static_cast<const PxMat33Padded&>(mGuBox.rot), &hullData->getPaddedBounds(), shape.scale, center, extents);
computeMinMaxBounds(&mPrunerInflatedAABB, center, extents, SQ_PRUNER_INFLATION, inflation);
//
Box prunerBox;
computeOBBAroundConvex(prunerBox, shape, cm, t);
mGuBox.rot = prunerBox.rot; // PT: TODO: optimize this copy
// AP: pruners are now responsible for growing the OBB by 1% for overlap/sweep/GJK accuracy
mPrunerBoxGeomExtents = prunerBox.extents*SQ_PRUNER_INFLATION;
mGuBox.center = prunerBox.center;
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("PhysX internal error: Invalid shape in ShapeData contructor.");
}
// PT: WARNING: these writes must stay after the above code
mIsOBB = PxU32(isOBB);
mType = PxU16(g.getType());
}

View File

@@ -0,0 +1,127 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
using namespace physx;
void Gu::Box::create(const Gu::Capsule& capsule)
{
// Box center = center of the two LSS's endpoints
center = capsule.computeCenter();
// Box orientation
const PxVec3 dir = capsule.p1 - capsule.p0;
const float d = dir.magnitude();
if(d!=0.0f)
{
rot.column0 = dir / d;
PxComputeBasisVectors(rot.column0, rot.column1, rot.column2);
}
else
rot = PxMat33(PxIdentity);
// Box extents
extents.x = capsule.radius + (d * 0.5f);
extents.y = capsule.radius;
extents.z = capsule.radius;
}
/**
Returns edges.
\return 24 indices (12 edges) indexing the list returned by ComputePoints()
*/
const PxU8* Gu::getBoxEdges()
{
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
static PxU8 Indices[] = {
0, 1, 1, 2, 2, 3, 3, 0,
7, 6, 6, 5, 5, 4, 4, 7,
1, 5, 6, 2,
3, 7, 4, 0
};
return Indices;
}
void Gu::computeOBBPoints(PxVec3* PX_RESTRICT pts, const PxVec3& center, const PxVec3& extents, const PxVec3& base0, const PxVec3& base1, const PxVec3& base2)
{
PX_ASSERT(pts);
// "Rotated extents"
const PxVec3 axis0 = base0 * extents.x;
const PxVec3 axis1 = base1 * extents.y;
const PxVec3 axis2 = base2 * extents.z;
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
// Original code: 24 vector ops
/* pts[0] = box.center - Axis0 - Axis1 - Axis2;
pts[1] = box.center + Axis0 - Axis1 - Axis2;
pts[2] = box.center + Axis0 + Axis1 - Axis2;
pts[3] = box.center - Axis0 + Axis1 - Axis2;
pts[4] = box.center - Axis0 - Axis1 + Axis2;
pts[5] = box.center + Axis0 - Axis1 + Axis2;
pts[6] = box.center + Axis0 + Axis1 + Axis2;
pts[7] = box.center - Axis0 + Axis1 + Axis2;*/
// Rewritten: 12 vector ops
pts[0] = pts[3] = pts[4] = pts[7] = center - axis0;
pts[1] = pts[2] = pts[5] = pts[6] = center + axis0;
PxVec3 tmp = axis1 + axis2;
pts[0] -= tmp;
pts[1] -= tmp;
pts[6] += tmp;
pts[7] += tmp;
tmp = axis1 - axis2;
pts[2] += tmp;
pts[3] += tmp;
pts[4] -= tmp;
pts[5] -= tmp;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,283 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BUCKET_PRUNER_H
#define GU_BUCKET_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
#include "GuSqInternal.h"
#include "GuPruningPool.h"
#include "foundation/PxHash.h"
#define FREE_PRUNER_SIZE 16
//#define USE_REGULAR_HASH_MAP
#ifdef USE_REGULAR_HASH_MAP
#include "foundation/PxHashMap.h"
#endif
namespace physx
{
class PxRenderOutput;
namespace Gu
{
typedef PxU32 BucketWord;
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value.
#endif
PX_ALIGN_PREFIX(16) struct BucketBox
{
PxVec3 mCenter;
PxU32 mData0; // Integer-encoded min value along sorting axis
PxVec3 mExtents;
PxU32 mData1; // Integer-encoded max value along sorting axis
#if PX_DEBUG
// PT: we need the original min value for debug checks. Using the center/extents version
// fails because recomputing the min from them introduces FPU accuracy errors in the values.
float mDebugMin;
#endif
PX_FORCE_INLINE PxVec3 getMin() const
{
return mCenter - mExtents;
}
PX_FORCE_INLINE PxVec3 getMax() const
{
return mCenter + mExtents;
}
PX_FORCE_INLINE void setEmpty()
{
mCenter = PxVec3(0.0f);
mExtents = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
#if PX_DEBUG
mDebugMin = PX_MAX_BOUNDS_EXTENTS;
#endif
}
}PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16) struct BucketPrunerNode
{
BucketPrunerNode();
void classifyBoxes( float limitX, float limitZ,
PxU32 nb,
BucketBox* PX_RESTRICT boxes,
const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox* PX_RESTRICT sortedBoxes,
PrunerPayload* PX_RESTRICT sortedObjects,
PxTransform* PX_RESTRICT sortedTransforms,
bool isCrossBucket, PxU32 sortAxis);
PX_FORCE_INLINE void initCounters()
{
for(PxU32 i=0;i<5;i++)
mCounters[i] = 0;
for(PxU32 i=0;i<5;i++)
mOffsets[i] = 0;
}
BucketWord mCounters[5]; // Number of objects in each of the 5 children
BucketWord mOffsets[5]; // Start index of objects for each of the 5 children
BucketBox mBucketBox[5]; // AABBs around objects for each of the 5 children
PxU16 mOrder[8]; // PNS: 5 children => 3 bits/index => 3*5=15 bits total, for each of the 8 canonical directions
}PX_ALIGN_SUFFIX(16);
PX_FORCE_INLINE PxU32 PxComputeHash(const PrunerPayload& payload)
{
#if PX_P64_FAMILY
// const PxU32 h0 = PxHash((const void*)payload.data[0]);
// const PxU32 h1 = PxHash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return physx::PxComputeHash(PxU64(h0)|(PxU64(h1)<<32));
#else
return physx::PxComputeHash(PxU64(payload.data[0])|(PxU64(payload.data[1])<<32));
#endif
}
#ifdef USE_REGULAR_HASH_MAP
struct BucketPrunerPair : public PxUserAllocated
{
PX_FORCE_INLINE BucketPrunerPair() {}
PX_FORCE_INLINE BucketPrunerPair(PxU32 index, PxU32 stamp) : mCoreIndex(index), mTimeStamp(stamp) {}
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
typedef PxHashMap<PrunerPayload, BucketPrunerPair> BucketPrunerMap;
#else
struct BucketPrunerPair : public PxUserAllocated
{
PrunerPayload mData;
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
// Custom hash-map - currently faster than the regular hash-map (PxHashMap), in particular for 'find-and-erase' operations.
class BucketPrunerMap : public PxUserAllocated
{
public:
BucketPrunerMap();
~BucketPrunerMap();
void purge();
void shrinkMemory();
BucketPrunerPair* addPair (const PrunerPayload& payload, PxU32 coreIndex, PxU32 timeStamp);
bool removePair (const PrunerPayload& payload, PxU32& coreIndex, PxU32& timeStamp);
const BucketPrunerPair* findPair (const PrunerPayload& payload) const;
PX_FORCE_INLINE PxU32 getPairIndex(const BucketPrunerPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BucketPrunerPair));
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
BucketPrunerPair* mActivePairs;
PxU32 mReservedMemory;
PX_FORCE_INLINE BucketPrunerPair* findPair(const PrunerPayload& payload, PxU32 hashValue) const;
void removePairInternal(const PrunerPayload& payload, PxU32 hashValue, PxU32 pairIndex);
void reallocPairs();
void reserveMemory(PxU32 memSize);
};
#endif
class BucketPrunerCore : public PxUserAllocated
{
public:
PX_PHYSX_COMMON_API BucketPrunerCore(bool externalMemory=true);
PX_PHYSX_COMMON_API ~BucketPrunerCore();
void release();
void setExternalMemory(PxU32 nbObjects, PxBounds3* boxes, PrunerPayload* objects, PxTransform* transforms);
PX_PHYSX_COMMON_API bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp=0);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PxTransform& transform);
// PT: look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
PX_PHYSX_COMMON_API bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
PX_PHYSX_COMMON_API bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
PX_PHYSX_COMMON_API bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
void getGlobalBounds(PxBounds3& bounds) const;
void shiftOrigin(const PxVec3& shift);
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build() { classifyBoxes(); }
#ifdef FREE_PRUNER_SIZE
PX_FORCE_INLINE PxU32 getNbObjects() const { return mNbFree + mCoreNbObjects; }
#else
PX_FORCE_INLINE PxU32 getNbObjects() const { return mCoreNbObjects; }
#endif
// private:
PxU32 mCoreNbObjects; // Current number of objects in core arrays
PxU32 mCoreCapacity; // Capacity of core arrays
PxBounds3* mCoreBoxes; // Core array
PrunerPayload* mCoreObjects; // Core array
PxTransform* mCoreTransforms;
PxU32* mCoreRemap; // Remaps core index to sorted index, i.e. sortedIndex = mCoreRemap[coreIndex]
BucketBox* mSortedWorldBoxes; // Sorted array
PrunerPayload* mSortedObjects; // Sorted array
PxTransform* mSortedTransforms;
#ifdef FREE_PRUNER_SIZE
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxTransform mFreeTransforms[FREE_PRUNER_SIZE]; // mNbFree transforms are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
#endif
BucketPrunerMap mMap; // Maps (PrunerPayload) object to corresponding index in core array.
// Objects in the free array do not appear in this map.
PxU32 mSortedNb;
PxU32 mSortedCapacity;
PxU32 mSortAxis;
BucketBox mGlobalBox; // Global bounds around all objects in the structure (except the ones in the "free" array)
BucketPrunerNode mLevel1;
BucketPrunerNode mLevel2[5];
BucketPrunerNode mLevel3[5][5];
bool mDirty;
bool mOwnMemory;
private:
PX_PHYSX_COMMON_API void classifyBoxes();
void allocateSortedMemory(PxU32 nb);
void resizeCore();
PX_FORCE_INLINE void addObjectInternal(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp);
};
#if PX_VC
#pragma warning(pop)
#endif
class BucketPruner : public Pruner
{
public:
PX_PHYSX_COMMON_API BucketPruner(PxU64 contextID);
virtual ~BucketPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
//~Pruner
private:
BucketPrunerCore mCore;
PruningPool mPool;
};
}
}
#endif

View File

@@ -0,0 +1,444 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "GuSweepTests.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
#include "GuDistanceSegmentBox.h"
#include "GuDistancePointBox.h"
#include "GuSweepBoxSphere.h"
#include "GuSweepCapsuleBox.h"
#include "GuSweepBoxBox.h"
#include "GuSweepBoxTriangle_SAT.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
#include "foundation/PxVecMath.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
static const bool gValidateBoxRadiusComputation = false;
///////////////////////////////////////////
bool sweepCapsule_BoxGeom_Precise(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(capsulePose_);
PX_UNUSED(capsuleGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
if (lss.p0 == lss.p1) // The capsule is actually a sphere
{
//TODO: Check if this is really faster than using a "sphere-aware" version of sweepCapsuleBox
Box box; buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
if(!sweepBoxSphere(box, lss.radius, lss.p0, unitDir, distance, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.normal = -sweepHit.normal;
sweepHit.flags = PxHitFlag::eNORMAL;
if(hitFlags & PxHitFlag::ePOSITION && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
const PxVec3 newSphereCenter = lss.p0 + unitDir * sweepHit.distance;
PxVec3 closest;
const PxReal d = distancePointBoxSquared(newSphereCenter, box.center, box.extents, box.rot, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
closest = box.rotate(closest);
sweepHit.position = closest + box.center;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
else
{
if(!sweepCapsuleBox(lss, pose, boxGeom.halfExtents, unitDir, distance, sweepHit.position, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
Capsule movedCaps = lss;
movedCaps.p0 += unitDir * sweepHit.distance;
movedCaps.p1 += unitDir * sweepHit.distance;
Box box;
buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
PxVec3 closest;
const PxReal d = distanceSegmentBoxSquared(movedCaps, box, NULL, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
closest = pose.q.rotate(closest);
sweepHit.position = closest + pose.p;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool sweepBox_SphereGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
// PT: move to relative space
const Box relBox(box.center - pose.p, box.extents, box.rot);
const PxReal sphereRadius = sphereGeom.radius + inflation;
if(!sweepBoxSphere(relBox, sphereRadius, PxVec3(0), -unitDir, distance, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
const PxVec3 motion = sweepHit.distance * unitDir;
const PxVec3 newSphereCenter = - motion;
PxVec3 closest;
const PxReal d = distancePointBoxSquared(newSphereCenter, relBox.center, relBox.extents, relBox.rot, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
sweepHit.position = relBox.rotate(closest) + box.center + motion; // PT: undo move to local space here
sweepHit.flags |= PxHitFlag::ePOSITION;
}
return true;
}
bool sweepBox_CapsuleGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_UNUSED(inflation);
PX_UNUSED(boxGeom_);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
// PT: move to relative space
const PxVec3 delta = box.center - pose.p;
Box relBox(delta, box.extents, box.rot);
Capsule capsule;
const PxVec3 halfHeightVector = getCapsuleHalfHeightVector(pose, capsuleGeom);
capsule.p0 = halfHeightVector;
capsule.p1 = -halfHeightVector;
capsule.radius = capsuleGeom.radius;
// PT: TODO: remove this. We convert to PxTansform here but inside sweepCapsuleBox we convert back to a matrix.
const PxTransform boxWorldPose(delta, boxPose_.q);
PxVec3 n;
if(!sweepCapsuleBox(capsule, boxWorldPose, relBox.extents, -unitDir, distance, sweepHit.position, sweepHit.distance, n, hitFlags))
return false;
sweepHit.normal = -n;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
relBox.center += (unitDir * sweepHit.distance);
PxVec3 closest;
const PxReal d = distanceSegmentBoxSquared(capsule, relBox, NULL, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
sweepHit.position = relBox.transform(closest) + pose.p; // PT: undo move to local space here
sweepHit.flags |= PxHitFlag::ePOSITION;
}
return true;
}
bool sweepBox_BoxGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
// PT: move to local space
const Box relBox(box.center - pose.p, box.extents, box.rot);
Box staticBox; buildFrom(staticBox, PxVec3(0), boxGeom.halfExtents, pose.q);
if(!sweepBoxBox(relBox, staticBox, unitDir, distance, hitFlags, sweepHit))
return false;
if(sweepHit.distance!=0.0f)
sweepHit.position += pose.p; // PT: undo move to local space
return true;
}
// PT: test: new version for CCT, based on code for general sweeps. Just to check it works or not with rotations
// TODO: refactor this and the similar code in sweptBox for box-vs-mesh. Not so easy though.
static bool sweepBoxVsTriangles(PxU32 nbTris, const PxTriangle* triangles, const Box& box, const PxVec3& unitDir, PxReal distance, PxGeomSweepHit& sweepHit,
PxHitFlags hitFlags, bool isDoubleSided, const PxU32* cachedIndex)
{
if(!nbTris)
return false;
const bool meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
const bool doBackfaceCulling = !isDoubleSided && !meshBothSides;
// Move to AABB space
PxMat34 worldToBox;
computeWorldToBoxMatrix(worldToBox, box);
const PxVec3 localDir = worldToBox.rotate(unitDir);
const PxVec3 localMotion = localDir * distance;
bool status = false;
sweepHit.distance = distance; //was PX_MAX_F32, but that may trigger an assert in the caller!
const PxVec3 oneOverMotion(
localDir.x!=0.0f ? 1.0f/localMotion.x : 0.0f,
localDir.y!=0.0f ? 1.0f/localMotion.y : 0.0f,
localDir.z!=0.0f ? 1.0f/localMotion.z : 0.0f);
// PT: experimental code, don't clean up before I test it more and validate it
// Project box
/*float boxRadius0 =
PxAbs(dir.x) * box.extents.x
+ PxAbs(dir.y) * box.extents.y
+ PxAbs(dir.z) * box.extents.z;*/
float boxRadius =
PxAbs(localDir.x) * box.extents.x
+ PxAbs(localDir.y) * box.extents.y
+ PxAbs(localDir.z) * box.extents.z;
if(gValidateBoxRadiusComputation) // PT: run this to check the box radius is correctly computed
{
PxVec3 boxVertices2[8];
box.computeBoxPoints(boxVertices2);
float dpmin = FLT_MAX;
float dpmax = -FLT_MAX;
for(int i=0;i<8;i++)
{
const float dp = boxVertices2[i].dot(unitDir);
if(dp<dpmin) dpmin = dp;
if(dp>dpmax) dpmax = dp;
}
const float goodRadius = (dpmax-dpmin)/2.0f;
PX_UNUSED(goodRadius);
}
const float dpc0 = box.center.dot(unitDir);
float localMinDist = 1.0f;
#if PX_DEBUG
PxU32 totalTestsExpected = nbTris;
PxU32 totalTestsReal = 0;
PX_UNUSED(totalTestsExpected);
PX_UNUSED(totalTestsReal);
#endif
const PxU32 idx = cachedIndex ? *cachedIndex : 0;
PxVec3 bestTriNormal(0.0f);
for(PxU32 ii=0;ii<nbTris;ii++)
{
const PxU32 triangleIndex = getTriangleIndex(ii, idx);
const PxTriangle& tri = triangles[triangleIndex];
if(!cullTriangle(tri.verts, unitDir, boxRadius, localMinDist*distance, dpc0))
continue;
#if PX_DEBUG
totalTestsReal++;
#endif
// Move to box space
const PxTriangle currentTriangle(
worldToBox.transform(tri.verts[0]),
worldToBox.transform(tri.verts[1]),
worldToBox.transform(tri.verts[2]));
PxF32 t = PX_MAX_F32; // could be better!
if(triBoxSweepTestBoxSpace(currentTriangle, box.extents, localMotion, oneOverMotion, localMinDist, t, doBackfaceCulling))
{
if(t < localMinDist)
{
// PT: test if shapes initially overlap
if(t==0.0f)
return setInitialOverlapResults(sweepHit, unitDir, triangleIndex);
localMinDist = t;
sweepHit.distance = t * distance;
sweepHit.faceIndex = triangleIndex;
status = true;
// PT: TODO: optimize this.... already computed in triBoxSweepTestBoxSpace...
currentTriangle.denormalizedNormal(bestTriNormal);
if(hitFlags & PxHitFlag::eANY_HIT)
break;
}
}
}
if(status)
{
sweepHit.flags = PxHitFlag::eFACE_INDEX;
// PT: TODO: refactor with computeBoxLocalImpact (TA34704)
if(hitFlags & (PxHitFlag::eNORMAL|PxHitFlag::ePOSITION))
{
const PxTriangle& tri = triangles[sweepHit.faceIndex];
// Move to box space
const PxTriangle currentTriangle(
worldToBox.transform(tri.verts[0]),
worldToBox.transform(tri.verts[1]),
worldToBox.transform(tri.verts[2]));
computeBoxTriImpactData(sweepHit.position, sweepHit.normal, box.extents, localDir, currentTriangle, sweepHit.distance);
if(hitFlags & PxHitFlag::eNORMAL)
{
PxVec3 localNormal = sweepHit.normal; // PT: both local space & local variable
localNormal.normalize();
if(shouldFlipNormal(localNormal, meshBothSides, isDoubleSided, bestTriNormal, localDir))
localNormal = -localNormal;
sweepHit.normal = box.rotate(localNormal);
sweepHit.flags |= PxHitFlag::eNORMAL;
}
if(hitFlags & PxHitFlag::ePOSITION)
{
sweepHit.position = box.rotate(sweepHit.position) + box.center;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
}
return status;
}
bool sweepBox_HeightFieldGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
const PxHeightFieldGeometry& heightFieldGeom = static_cast<const PxHeightFieldGeometry&>(geom);
// Compute swept box
Box sweptBox;
computeSweptBox(sweptBox, box.extents, box.center, box.rot, unitDir, distance);
//### Temp hack until we can directly collide the OBB against the HF
const PxTransform sweptBoxTR = sweptBox.getTransform();
const PxBounds3 bounds = PxBounds3::poseExtent(sweptBoxTR, sweptBox.extents);
sweepHit.distance = PX_MAX_F32;
struct LocalReport : OverlapReport
{
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
for(PxU32 i=0; i<nb; i++)
{
const PxU32 triangleIndex = indices[i];
PxTriangle currentTriangle; // in world space
mHFUtil->getTriangle(*mPose, currentTriangle, NULL, NULL, triangleIndex, true, true);
PxGeomSweepHit sweepHit_;
const bool b = sweepBoxVsTriangles(1, &currentTriangle, mBox, mDir, mDist, sweepHit_, mHitFlags, mIsDoubleSided, NULL);
if(b && sweepHit_.distance<mHit->distance)
{
*mHit = sweepHit_;
mHit->faceIndex = triangleIndex;
mStatus = true;
}
}
return true;
}
const HeightFieldUtil* mHFUtil;
const PxTransform* mPose;
PxGeomSweepHit* mHit;
bool mStatus;
Box mBox;
PxVec3 mDir;
float mDist;
PxHitFlags mHitFlags;
bool mIsDoubleSided;
} myReport;
HeightFieldUtil hfUtil(heightFieldGeom);
myReport.mBox = box;
myReport.mDir = unitDir;
myReport.mDist = distance;
myReport.mHitFlags = hitFlags;
myReport.mHFUtil = &hfUtil;
myReport.mStatus = false;
myReport.mPose = &pose;
myReport.mHit = &sweepHit;
const PxU32 meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
myReport.mIsDoubleSided = (heightFieldGeom.heightFieldFlags & PxMeshGeometryFlag::eDOUBLE_SIDED) || meshBothSides;
hfUtil.overlapAABBTriangles(pose, bounds, myReport);
return myReport.mStatus;
}
bool Gu::sweepBoxTriangles_Precise(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxBoxGeometry))
{
PX_UNUSED(inflation);
Box box;
buildFrom(box, pose.p, geom.halfExtents, pose.q);
return sweepBoxVsTriangles(nbTris, triangles, box, unitDir, distance, hit, hitFlags, doubleSided, cachedIndex);
}

View File

@@ -0,0 +1,71 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CALLBACK_ADAPTER_H
#define GU_CALLBACK_ADAPTER_H
#include "GuPruner.h"
#include "GuPruningPool.h"
namespace physx
{
namespace Gu
{
struct RaycastCallbackAdapter
{
PX_FORCE_INLINE RaycastCallbackAdapter(PrunerRaycastCallback& pcb, const PruningPool& pool) : mCallback(pcb), mPool(pool) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 primIndex)
{
return mCallback.invoke(distance, primIndex, mPool.getObjects(), mPool.getTransforms());
}
PrunerRaycastCallback& mCallback;
const PruningPool& mPool;
PX_NOCOPY(RaycastCallbackAdapter)
};
struct OverlapCallbackAdapter
{
PX_FORCE_INLINE OverlapCallbackAdapter(PrunerOverlapCallback& pcb, const PruningPool& pool) : mCallback(pcb), mPool(pool) {}
PX_FORCE_INLINE bool invoke(PxU32 primIndex)
{
return mCallback.invoke(primIndex, mPool.getObjects(), mPool.getTransforms());
}
PrunerOverlapCallback& mCallback;
const PruningPool& mPool;
PX_NOCOPY(OverlapCallbackAdapter)
};
}
}
#endif

View File

@@ -0,0 +1,62 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "GuInternal.h"
#include "GuBox.h"
#include "GuCapsule.h"
using namespace physx;
/**
* Computes an OBB surrounding the capsule.
* \param box [out] the OBB
*/
void Gu::computeBoxAroundCapsule(const Gu::Capsule& capsule, Gu::Box& box)
{
// Box center = center of the two capsule's endpoints
box.center = capsule.computeCenter();
// Box extents
const PxF32 d = (capsule.p0 - capsule.p1).magnitude();
box.extents.x = capsule.radius + (d * 0.5f);
box.extents.y = capsule.radius;
box.extents.z = capsule.radius;
// Box orientation
if(d==0.0f)
{
box.rot = PxMat33(PxIdentity);
}
else
{
PxVec3 dir, right, up;
PxComputeBasisVectors(capsule.p0, capsule.p1, dir, right, up);
box.setAxes(dir, right, up);
}
}

View File

@@ -0,0 +1,653 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBasicTemplates.h"
#include "geometry/PxConvexCoreGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "GuConvexGeometry.h"
#include "GuConvexSupport.h"
#include "GuBounds.h"
#include "common/PxRenderOutput.h"
using namespace physx;
// This enums should match
PX_COMPILE_TIME_ASSERT(PxU32(Gu::ConvexCore::Type::ePOINT) == PxU32(PxConvexCore::ePOINT));
PX_COMPILE_TIME_ASSERT(PxU32(Gu::ConvexCore::Type::eSEGMENT) == PxU32(PxConvexCore::eSEGMENT));
PX_COMPILE_TIME_ASSERT(PxU32(Gu::ConvexCore::Type::eBOX) == PxU32(PxConvexCore::eBOX));
PX_COMPILE_TIME_ASSERT(PxU32(Gu::ConvexCore::Type::eELLIPSOID) == PxU32(PxConvexCore::eELLIPSOID));
PX_COMPILE_TIME_ASSERT(PxU32(Gu::ConvexCore::Type::eCYLINDER) == PxU32(PxConvexCore::eCYLINDER));
PX_COMPILE_TIME_ASSERT(Gu::ConvexCore::MAX_CORE_SIZE >= PxConvexCoreGeometry::MAX_CORE_SIZE);
static const PxU32 GEOMETRY_COLOR = PxU32(PxDebugColor::eARGB_MAGENTA);
static const PxU32 GEOMETRY_CORE_COLOR = PxU32(0x88880088); // dark magenta
namespace aux
{
PxReal coneRadiusAtHeight(PxReal height, PxReal radius, PxReal margin, PxReal h)
{
float angle = atan2f(radius, height);
float aSin = sinf(angle);
float aCos = cosf(angle);
if (h > -height * 0.5f + margin * aSin && h < height * 0.5f + margin * aSin)
return radius * (height * 0.5f - h) / height + margin / aCos;
if (h <= -height * 0.5f + margin * aSin)
{
float a = -h - height * 0.5f;
return radius + sqrtf(margin * margin - a * a);
}
if (h >= height * 0.5f + margin * aSin)
{
float a = h - height * 0.5f;
return sqrtf(margin * margin - a * a);
}
PX_ASSERT(0);
return 0;
}
}
namespace debug
{
static void drawArc(const PxVec3& center, const PxVec3& radius, const PxVec3& axis, PxReal angle, PxReal error, PxRenderOutput& out)
{
int sides = int(ceilf(angle / (2 * acosf(1.0f - error))));
float step = angle / sides;
out << PxRenderOutput::LINESTRIP;
for (int i = 0; i <= sides; ++i)
out << center + PxQuat(step * i, axis).rotate(radius);
}
static void drawCircle(const PxVec3& center, const PxVec3& radius, const PxVec3& axis, PxReal error, PxRenderOutput& out)
{
drawArc(center, radius, axis, PxTwoPi, error, out);
}
static void drawQuarterCircle(const PxVec3& center, const PxVec3& radius, const PxVec3& axis, PxReal error, PxRenderOutput& out)
{
drawArc(center, radius, axis, PxPiDivTwo, error, out);
}
static void drawLine(const PxVec3& s, const PxVec3& e, PxRenderOutput& out)
{
out << PxRenderOutput::LINES << s << e;
}
static void drawSphere(PxReal radius, PxReal error, PxRenderOutput& out)
{
drawCircle(PxVec3(0), PxVec3(0, radius, 0), PxVec3(1, 0, 0), error, out);
drawCircle(PxVec3(0), PxVec3(0, 0, radius), PxVec3(0, 1, 0), error, out);
drawCircle(PxVec3(0), PxVec3(radius, 0, 0), PxVec3(0, 0, 1), error, out);
}
static void drawCapsule(PxReal length, PxReal radius, PxReal error, PxRenderOutput& out)
{
drawLine(PxVec3(length * 0.5f, radius, 0), PxVec3(-length * 0.5f, radius, 0), out);
drawLine(PxVec3(length * 0.5f, -radius, 0), PxVec3(-length * 0.5f, -radius, 0), out);
drawLine(PxVec3(length * 0.5f, 0, radius), PxVec3(-length * 0.5f, 0, radius), out);
drawLine(PxVec3(length * 0.5f, 0, -radius), PxVec3(-length * 0.5f, 0, -radius), out);
drawCircle(PxVec3(length * 0.5f, 0, 0), PxVec3(0, radius, 0), PxVec3(1, 0, 0), error, out);
drawCircle(PxVec3(-length * 0.5f, 0, 0), PxVec3(0, radius, 0), PxVec3(1, 0, 0), error, out);
drawArc(PxVec3(length * 0.5f, 0, 0), PxVec3(0, radius, 0), PxVec3(0, 0, -1), PxPi, error, out);
drawArc(PxVec3(length * 0.5f, 0, 0), PxVec3(0, 0, radius), PxVec3(0, 1, 0), PxPi, error, out);
drawArc(PxVec3(-length * 0.5f, 0, 0), PxVec3(0, radius, 0), PxVec3(0, 0, 1), PxPi, error, out);
drawArc(PxVec3(-length * 0.5f, 0, 0), PxVec3(0, 0, radius), PxVec3(0, -1, 0), PxPi, error, out);
}
static void drawBox(const PxVec3& extents, PxReal margin, PxReal error, PxRenderOutput& out)
{
for (PxU32 i = 0; i < 3; ++i)
{
PxU32 axis0 = i, axis1 = (i + 1) % 3, axis2 = (i + 2) % 3;
PxVec3 ax0(0), ax1(0), ax2(0);
ax0[axis0] = 1; ax1[axis1] = 1; ax2[axis2] = 1;
PxReal s[4][2] = { { 1, 1 }, { -1, 1 }, { -1,-1 }, { 1, -1 } };
for (PxU32 j = 0; j < 4; ++j)
{
PxVec3 c(0);
c[axis1] = extents[axis1] * 0.5f * s[j][0];
c[axis2] = extents[axis2] * 0.5f * s[j][1];
if (margin > FLT_EPSILON)
{
drawLine(c + ax0 * extents[axis0] * 0.5f + ax1 * margin * s[j][0], c - ax0 * extents[axis0] * 0.5f + ax1 * margin * s[j][0], out);
drawLine(c + ax0 * extents[axis0] * 0.5f + ax2 * margin * s[j][1], c - ax0 * extents[axis0] * 0.5f + ax2 * margin * s[j][1], out);
drawQuarterCircle(c + ax0 * extents[axis0] * 0.5f, ax1 * margin * s[j][0], ax0 * (j % 2 ? -1.0f : 1.0f), error, out);
drawQuarterCircle(c - ax0 * extents[axis0] * 0.5f, ax1 * margin * s[j][0], ax0 * (j % 2 ? -1.0f : 1.0f), error, out);
}
else
drawLine(c + ax0 * extents[axis0] * 0.5f, c - ax0 * extents[axis0] * 0.5f, out);
}
}
}
static void drawEllipse(const PxVec3& center, const PxVec3& axis0, PxReal radius0, const PxVec3& axis1, PxReal radius1, PxReal margin, PxReal error, PxRenderOutput& out)
{
if (radius0 * radius1 > 0)
{
out << PxRenderOutput::LINESTRIP;
for (PxReal t = 0, dT = 0; t - dT < PxTwoPi; t += dT)
{
PxReal si, co; PxSinCos(t, si, co);
PxVec3 p = axis0 * radius0 * co + axis1 * radius1 * si;
PxVec3 tang = -axis0 * radius0 * si + axis1 * radius1 * co;
PxVec3 norm = axis1.cross(axis0).cross(tang).getNormalized();
p += norm * margin;
out << center + p;
PxReal d0, d1, d20, d21, C;
d0 = -radius0 * si; d1 = radius1 * co; d20 = -radius0 * co; d21 = -radius1 * si;
C = PxAbs(d0 * d21 - d1 * d20) / PxPow(d0 * d0 + d1 * d1, 1.5f);
dT = 100.0f * error / (1.0f + C) / PxMax(radius0, radius1);
}
}
else
{
const PxVec3 axis = axis1.cross(axis0);
if (radius0 > 0)
{
drawLine(center + axis0 * radius0 + axis1 * margin, center - axis0 * radius0 + axis1 * margin, out);
drawLine(center + axis0 * radius0 - axis1 * margin, center - axis0 * radius0 - axis1 * margin, out);
drawArc(center + axis0 * radius0, axis1 * margin, axis, PxPi, error, out);
drawArc(center - axis0 * radius0, axis1 * margin, -axis, PxPi, error, out);
}
else if (radius1 > 0)
{
drawLine(center + axis1 * radius1 + axis0 * margin, center - axis1 * radius1 + axis0 * margin, out);
drawLine(center + axis1 * radius1 - axis0 * margin, center - axis1 * radius1 - axis0 * margin, out);
drawArc(center + axis1 * radius1, axis0 * margin, -axis, PxPi, error, out);
drawArc(center - axis1 * radius1, axis0 * margin, axis, PxPi, error, out);
}
else
{
drawArc(center - axis1 * radius1, axis0 * margin, axis, PxTwoPi, error, out);
}
}
}
static void drawEllipsoid(const PxVec3& radii, PxReal margin, PxReal error, PxRenderOutput& out)
{
const PxVec3 zero(0), X(1, 0, 0), Y(0, 1, 0), Z(0, 0, 1);
drawEllipse(zero, X, radii.x, Y, radii.y, margin, error, out);
drawEllipse(zero, X, radii.x, Z, radii.z, margin, error, out);
drawEllipse(zero, Z, radii.z, Y, radii.y, margin, error, out);
}
static void drawCylinder(PxReal height, PxReal radius, PxReal margin, PxReal error, PxRenderOutput& out)
{
const PxReal ERR = error;
PxU32 axis0 = 0, axis1 = 1, axis2 = 2;
PxVec3 zr(PxZero), rd(PxZero), ax(PxZero), ax1(PxZero), ax2(PxZero), r0(PxZero), r1(PxZero);
ax[axis0] = ax1[axis1] = ax2[axis2] = 1.0f;
r0[axis1] = r1[axis2] = radius;
rd[axis1] = radius;
rd[axis0] = -(height * 0.5f + margin);
debug::drawCircle(zr, rd, ax, ERR, out);
rd[axis0] = (height * 0.5f + margin);
debug::drawCircle(zr, rd, ax, ERR, out);
rd[axis1] = radius + margin;
rd[axis0] = -(height * 0.5f);
debug::drawCircle(zr, rd, ax, ERR, out);
rd[axis0] = (height * 0.5f);
debug::drawCircle(zr, rd, ax, ERR, out);
debug::drawLine(-ax * height * 0.5f + ax1 * (radius + margin), ax * height * 0.5f + ax1 * (radius + margin), out);
debug::drawLine(-ax * height * 0.5f - ax1 * (radius + margin), ax * height * 0.5f - ax1 * (radius + margin), out);
debug::drawLine(-ax * height * 0.5f + ax2 * (radius + margin), ax * height * 0.5f + ax2 * (radius + margin), out);
debug::drawLine(-ax * height * 0.5f - ax2 * (radius + margin), ax * height * 0.5f - ax2 * (radius + margin), out);
debug::drawQuarterCircle(-ax * height * 0.5f + ax1 * radius, -ax * margin, -ax2, ERR, out);
debug::drawQuarterCircle(-ax * height * 0.5f - ax1 * radius, -ax * margin, ax2, ERR, out);
debug::drawQuarterCircle(-ax * height * 0.5f + ax2 * radius, -ax * margin, ax1, ERR, out);
debug::drawQuarterCircle(-ax * height * 0.5f - ax2 * radius, -ax * margin, -ax1, ERR, out);
debug::drawQuarterCircle(ax * height * 0.5f + ax1 * radius, ax * margin, ax2, ERR, out);
debug::drawQuarterCircle(ax * height * 0.5f - ax1 * radius, ax * margin, -ax2, ERR, out);
debug::drawQuarterCircle(ax * height * 0.5f + ax2 * radius, ax * margin, -ax1, ERR, out);
debug::drawQuarterCircle(ax * height * 0.5f - ax2 * radius, ax * margin, ax1, ERR, out);
}
static void drawCone(PxReal height, PxReal radius, PxReal margin, PxReal error, PxRenderOutput& out)
{
const PxReal ERR = error;
PxU32 axis0 = 0, axis1 = 1, axis2 = 2;
PxVec3 zr(PxZero), rd(PxZero), ax(PxZero), ax1(PxZero), ax2(PxZero), r0(PxZero), r1(PxZero);
ax[axis0] = ax1[axis1] = ax2[axis2] = 1.0f;
r0[axis1] = r1[axis2] = radius;
float ang = atan2f(radius, height);
float aSin = sinf(ang);
rd[axis1] = radius;
rd[axis0] = -(height * 0.5f + margin);
drawCircle(zr, rd, ax, ERR, out);
if (radius >= height && margin > 0)
{
rd[axis1] = radius + margin;
rd[axis0] = -height * 0.5f;
drawCircle(zr, rd, ax, ERR, out);
}
rd[axis0] = -(height * 0.5f) + margin * aSin;
rd[axis1] = aux::coneRadiusAtHeight(height, radius, margin, rd[axis0]);
drawCircle(zr, rd, ax, ERR, out);
rd[axis0] = height * 0.5f + margin * aSin;
rd[axis1] = aux::coneRadiusAtHeight(height, radius, margin, rd[axis0]);
drawCircle(zr, rd, ax, ERR, out);
float h0 = -height * 0.5f + margin * aSin, h1 = height * 0.5f + margin * aSin;
float s0 = aux::coneRadiusAtHeight(height, radius, margin, h0), s1 = aux::coneRadiusAtHeight(height, radius, margin, h1);
debug::drawLine(ax * h0 + ax1 * s0, ax * h1 + ax1 * s1, out);
debug::drawLine(ax * h0 - ax1 * s0, ax * h1 - ax1 * s1, out);
debug::drawLine(ax * h0 + ax2 * s0, ax * h1 + ax2 * s1, out);
debug::drawLine(ax * h0 - ax2 * s0, ax * h1 - ax2 * s1, out);
debug::drawArc(-ax * height * 0.5f + ax1 * radius, -ax * margin, -ax2, PxPiDivTwo + ang, ERR, out);
debug::drawArc(-ax * height * 0.5f - ax1 * radius, -ax * margin, ax2, PxPiDivTwo + ang, ERR, out);
debug::drawArc(-ax * height * 0.5f + ax2 * radius, -ax * margin, ax1, PxPiDivTwo + ang, ERR, out);
debug::drawArc(-ax * height * 0.5f - ax2 * radius, -ax * margin, -ax1, PxPiDivTwo + ang, ERR, out);
debug::drawArc(ax * height * 0.5f, ax * margin, ax2, PxPiDivTwo - ang, ERR, out);
debug::drawArc(ax * height * 0.5f, ax * margin, -ax2, PxPiDivTwo - ang, ERR, out);
debug::drawArc(ax * height * 0.5f, ax * margin, -ax1, PxPiDivTwo - ang, ERR, out);
debug::drawArc(ax * height * 0.5f, ax * margin, ax1, PxPiDivTwo - ang, ERR, out);
}
}
PX_PHYSX_COMMON_API bool PxConvexCoreGeometry::isValid() const
{
const PxReal margin = getMargin();
if (margin < 0)
return false;
switch (getCoreType())
{
case PxConvexCore::ePOINT:
return margin > 0; // has volume
case PxConvexCore::eSEGMENT:
{
const PxConvexCore::Segment& c = getCore<PxConvexCore::Segment>();
return c.length >= 0 &&
(margin + c.length) * margin > 0; // has volume
}
case PxConvexCore::eBOX:
{
const PxConvexCore::Box& c = getCore<PxConvexCore::Box>();
return c.extents.x >= 0 && c.extents.y >= 0 && c.extents.z >= 0 &&
(margin + c.extents.x) * (margin + c.extents.y) * (margin + c.extents.z) > 0; // has volume
}
case PxConvexCore::eELLIPSOID:
{
const PxConvexCore::Ellipsoid& c = getCore<PxConvexCore::Ellipsoid>();
return c.radii.x >= 0 && c.radii.y >= 0 && c.radii.z >= 0 &&
(margin + c.radii.x) * (margin + c.radii.y) * (margin + c.radii.z) > 0; // has volume
}
case PxConvexCore::eCYLINDER:
{
const PxConvexCore::Cylinder& c = getCore<PxConvexCore::Cylinder>();
return c.height >= 0 && c.radius >= 0 &&
(margin + c.height) * (margin + c.radius) > 0; // has volume
}
case PxConvexCore::eCONE:
{
const PxConvexCore::Cone& c = getCore<PxConvexCore::Cone>();
return c.height >= 0 && c.radius >= 0 &&
(margin + c.height) * (margin + c.radius) > 0; // has volume
}
default:
break;
}
return false;
}
PX_PHYSX_COMMON_API bool Gu::isGPUCompatible(const PxConvexCoreGeometry& convex)
{
// there's no types a.t.m. that don't support GPU,
// but if there will be, we'll return 'false' here.
switch (convex.getCoreType())
{
case PxConvexCore::ePOINT:
case PxConvexCore::eSEGMENT:
case PxConvexCore::eBOX:
case PxConvexCore::eELLIPSOID:
case PxConvexCore::eCYLINDER:
case PxConvexCore::eCONE:
return true;
default:
break;
}
return false;
}
namespace
{
struct MassInfo
{
PxReal density1Mass;
PxMat33 inertiaTensor;
PxVec3 centerOfMass;
};
MassInfo cylinderMassInfo(PxReal height, PxReal radius, const PxVec3& centerOfMass = PxVec3(0))
{
const PxReal h = height, r = radius;
const PxReal m = PxPi * r * r * h;
const PxReal ix = m * r * r / 2;
const PxReal iyz = m * (3 * r * r + h * h) / 12;
MassInfo mi;
mi.density1Mass = m;
mi.inertiaTensor = PxMat33::createDiagonal(PxVec3(ix, iyz, iyz));
mi.centerOfMass = centerOfMass;
return mi;
}
}
PX_PHYSX_COMMON_API void Gu::computeMassInfo(const PxConvexCoreGeometry& convex, PxReal& density1Mass, PxMat33& inertiaTensor, PxVec3& centerOfMass)
{
PxReal margin = convex.getMargin();
switch (convex.getCoreType())
{
case PxConvexCore::ePOINT:
{
const PxReal r = margin;
density1Mass = PxPi * r * r * r * 4.0f / 3.0f;
inertiaTensor = PxMat33::createDiagonal(PxVec3(r * r)) * (density1Mass * 2.0f / 5.0f);
centerOfMass = PxVec3(0);
break;
}
case PxConvexCore::eCYLINDER:
{
const PxConvexCore::Cylinder& core = convex.getCore<PxConvexCore::Cylinder>();
const PxReal H = core.height + margin * 2;
const PxReal R = core.radius + margin;
MassInfo mi = cylinderMassInfo(H, R);
density1Mass = mi.density1Mass;
inertiaTensor = mi.inertiaTensor;
centerOfMass = mi.centerOfMass;
break;
}
case PxConvexCore::eCONE:
{
const PxU32 SLICE_COUNT = 32;
const PxConvexCore::Cone& core = convex.getCore<PxConvexCore::Cone>();
const PxReal H = core.height + margin * 2;
const PxReal h = H / SLICE_COUNT;
MassInfo mis[SLICE_COUNT];
for (PxU32 i = 0; i < SLICE_COUNT; ++i)
{
const PxReal t = -H * 0.5f + i * h + h * 0.5f;
const PxReal r = aux::coneRadiusAtHeight(core.height, core.radius, margin, t);
mis[i] = cylinderMassInfo(h, r, PxVec3(t, 0, 0));
}
MassInfo mi{ 0, PxMat33(PxZero), PxVec3(0) };
for (PxU32 i = 0; i < SLICE_COUNT; i++)
{
mi.density1Mass += mis[i].density1Mass;
mi.centerOfMass += mis[i].centerOfMass * mis[i].density1Mass;
}
if (mi.density1Mass > 0.f)
mi.centerOfMass /= mi.density1Mass;
for (PxU32 i = 0; i < SLICE_COUNT; i++)
{
const PxVec3 t = mi.centerOfMass - mis[i].centerOfMass;
const PxMat33 s(PxVec3(0, t.z, -t.y), PxVec3(-t.z, 0, t.x), PxVec3(t.y, -t.x, 0));
mi.inertiaTensor += s.getTranspose() * s * mis[i].density1Mass + mis[i].inertiaTensor;
}
{
const PxVec3 t = mi.centerOfMass;
const PxMat33 s(PxVec3(0, t.z, -t.y), PxVec3(-t.z, 0, t.x), PxVec3(t.y, -t.x, 0));
mi.inertiaTensor += s.getTranspose() * s * mi.density1Mass;
}
density1Mass = mi.density1Mass;
inertiaTensor = mi.inertiaTensor;
centerOfMass = mi.centerOfMass;
break;
}
default:
{
const PxBounds3 bounds = Gu::computeBounds(convex, PxTransform(PxIdentity));
const PxVec3 halfExtents = bounds.getDimensions() * 0.5f;
density1Mass = halfExtents.x * halfExtents.y * halfExtents.z * 8.0f;
PxVec3 d2 = halfExtents.multiply(halfExtents);
inertiaTensor = PxMat33::createDiagonal(PxVec3(d2.y + d2.z, d2.x + d2.z, d2.x + d2.y)) * (density1Mass * 1.0f / 3.0f);
centerOfMass = bounds.getCenter();
break;
}
}
}
PX_PHYSX_COMMON_API void Gu::visualize(const PxConvexCoreGeometry& convex, const PxTransform& pose, bool drawCore, const PxBounds3& cullbox, PxRenderOutput& out)
{
PX_UNUSED(cullbox);
PxReal margin = convex.getMargin();
switch (convex.getCoreType())
{
case PxConvexCore::ePOINT:
{
const PxReal error = 0.001f;
out << pose;
out << GEOMETRY_COLOR;
debug::drawSphere(margin, error, out);
if (drawCore)
{
out << GEOMETRY_CORE_COLOR;
debug::drawSphere(0, error, out);
}
break;
}
case PxConvexCore::eSEGMENT:
{
const PxConvexCore::Segment& core = convex.getCore<PxConvexCore::Segment>();
const PxReal error = 0.001f;
out << pose;
out << GEOMETRY_COLOR;
debug::drawCapsule(core.length, margin, error, out);
if (drawCore)
{
out << GEOMETRY_CORE_COLOR;
debug::drawCapsule(core.length, 0, error, out);
}
break;
}
case PxConvexCore::eBOX:
{
const PxConvexCore::Box& core = convex.getCore<PxConvexCore::Box>();
const PxReal error = 0.001f;
out << pose;
out << GEOMETRY_COLOR;
debug::drawBox(core.extents, margin, error, out);
if (drawCore)
{
out << GEOMETRY_CORE_COLOR;
debug::drawBox(core.extents, 0, error, out);
}
break;
}
case PxConvexCore::eELLIPSOID:
{
const PxConvexCore::Ellipsoid& core = convex.getCore<PxConvexCore::Ellipsoid>();
const PxReal error = 0.001f;
out << pose;
out << GEOMETRY_COLOR;
debug::drawEllipsoid(core.radii, margin, error, out);
if (drawCore)
{
out << GEOMETRY_CORE_COLOR;
debug::drawEllipsoid(core.radii, 0, error, out);
}
break;
}
case PxConvexCore::eCYLINDER:
{
const PxConvexCore::Cylinder& core = convex.getCore<PxConvexCore::Cylinder>();
const PxReal height = core.height;
const PxReal radius = core.radius;
const PxReal error = 0.001f;
out << pose;
out << GEOMETRY_COLOR;
debug::drawCylinder(height, radius, margin, error, out);
if (drawCore)
{
out << GEOMETRY_CORE_COLOR;
debug::drawCylinder(height, radius, 0, error, out);
}
break;
}
case PxConvexCore::eCONE:
{
const PxConvexCore::Cone& core = convex.getCore<PxConvexCore::Cone>();
const PxReal height = core.height;
const PxReal radius = core.radius;
const PxReal error = 0.001f;
out << pose;
out << GEOMETRY_COLOR;
debug::drawCone(height, radius, margin, error, out);
if (drawCore)
{
out << GEOMETRY_CORE_COLOR;
debug::drawCone(height, radius, 0, error, out);
}
break;
}
default:
{
out << pose;
out << GEOMETRY_COLOR;
out << PxDebugBox(Gu::computeBounds(convex, PxTransform(PxIdentity)));
break;
}
}
}
PX_PHYSX_COMMON_API bool Gu::makeConvexShape(const PxGeometry& geom, const PxTransform& pose, ConvexShape& convex)
{
convex.coreType = Gu::ConvexCore::Type::Enum(-1);
convex.pose = pose;
switch (geom.getType())
{
case PxGeometryType::eCONVEXCORE:
{
const PxConvexCoreGeometry& g = static_cast<const PxConvexCoreGeometry&>(geom);
convex.coreType = Gu::ConvexCore::Type::Enum(g.getCoreType());
PxMemCopy(convex.coreData, g.getCoreData(), PxConvexCoreGeometry::MAX_CORE_SIZE);
convex.margin = g.getMargin();
return true;
}
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& g = static_cast<const PxSphereGeometry&>(geom);
convex.coreType = Gu::ConvexCore::Type::ePOINT;
convex.margin = g.radius;
return true;
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& g = static_cast<const PxCapsuleGeometry&>(geom);
convex.coreType = Gu::ConvexCore::Type::eSEGMENT;
Gu::ConvexCore::SegmentCore& core = *reinterpret_cast<Gu::ConvexCore::SegmentCore*>(convex.coreData);
core.length = g.halfHeight * 2.0f;
convex.margin = g.radius;
return true;
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& g = static_cast<const PxBoxGeometry&>(geom);
convex.coreType = Gu::ConvexCore::Type::eBOX;
Gu::ConvexCore::BoxCore& core = *reinterpret_cast<Gu::ConvexCore::BoxCore*>(convex.coreData);
core.extents = g.halfExtents * 2.0f;
convex.margin = 0;
return true;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& g = static_cast<const PxConvexMeshGeometry&>(geom);
convex.coreType = Gu::ConvexCore::Type::ePOINTS;
Gu::ConvexCore::PointsCore& core = *reinterpret_cast<Gu::ConvexCore::PointsCore*>(convex.coreData);
core.points = g.convexMesh->getVertices();
core.numPoints = PxU8(g.convexMesh->getNbVertices());
core.stride = sizeof(PxVec3);
core.S = g.scale.scale;
core.R = g.scale.rotation;
convex.margin = 0;
return true;
}
default:
break;
}
return false;
}

View File

@@ -0,0 +1,75 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxConvexCoreGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "GuConvexSupport.h"
using namespace physx;
using namespace Gu;
PX_CUDA_CALLABLE
// generates contacts between a plane and a convex
PxU32 physx::Gu::generateContacts(const PxPlane& plane0, const ConvexShape& convex1, const PxReal contactDist,
PxVec3& normal, PxVec3 points[MAX_CONVEX_CONTACTS], PxReal dists[MAX_CONVEX_CONTACTS])
{
normal = -plane0.n;
const PxVec3 point1 = convex1.support(normal);
const PxReal dist = plane0.distance(point1);
PxU32 numContacts = 0;
if (dist < contactDist)
{
PxVec3 faceNormal, facePoints[Gu::ConvexCore::MAX_FACE_POINTS];
const PxU32 numPoints = convex1.contactFace(normal, point1, faceNormal, facePoints);
if (numPoints == 0)
{
const PxVec3 point = point1 + normal * dist * 0.5f;
points[numContacts] = point;
dists[numContacts] = dist;
++numContacts;
}
for (PxU32 i = 0; i < numPoints; ++i)
{
const PxVec3 p1 = facePoints[i];
const PxReal d = plane0.distance(p1);
points[numContacts] = p1 + normal * d * 0.5f;
dists[numContacts] = d;
++numContacts;
}
}
return numContacts;
}

View File

@@ -0,0 +1,870 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBitMap.h"
#include "GuExtendedBucketPruner.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuQuery.h"
#include "GuCallbackAdapter.h"
#include "GuSqInternal.h"
using namespace physx;
using namespace Gu;
#define EXT_NB_OBJECTS_PER_NODE 4
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
ExtendedBucketPruner::ExtendedBucketPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool) :
mCompanion (createCompanionPruner(contextID, type, pool)),
mPruningPool (pool),
mMainTree (NULL),
mMergedTrees (NULL),
mCurrentTreeIndex (0),
mTreesDirty (false)
{
// preallocated size for bounds, trees
mCurrentTreeCapacity = 32;
mBounds.init(mCurrentTreeCapacity);
mMergedTrees = PX_ALLOCATE(MergedTree, mCurrentTreeCapacity, "AABB trees");
mExtendedBucketPrunerMap.reserve(mCurrentTreeCapacity);
// create empty main tree
mMainTree = PX_NEW(AABBTree);
// create empty merge trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
}
//////////////////////////////////////////////////////////////////////////
ExtendedBucketPruner::~ExtendedBucketPruner()
{
// release main tree
PX_DELETE(mMainTree);
// release merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
AABBTree* aabbTree = mMergedTrees[i].mTree;
PX_DELETE(aabbTree);
}
mBounds.release();
PX_FREE(mMergedTrees);
PX_DELETE(mCompanion);
}
//////////////////////////////////////////////////////////////////////////
// release all objects in bucket pruner
void ExtendedBucketPruner::release()
{
if(mCompanion)
mCompanion->release();
mMainTreeUpdateMap.release();
mMergeTreeUpdateMap.release();
// release all objecs from the map
mExtendedBucketPrunerMap.clear();
// release all merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree->release();
}
// reset current tree index
mCurrentTreeIndex = 0;
}
//////////////////////////////////////////////////////////////////////////
// Add a tree from a pruning structure
// 1. get new tree index
// 2. initialize merged tree, bounds
// 3. create update map for the merged tree
// 4. build new tree of trees from given trees bounds
// 5. add new objects into extended bucket pruner map
// 6. shift indices in the merged tree
void ExtendedBucketPruner::addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp)
{
// check if we have to resize
if(mCurrentTreeIndex == mCurrentTreeCapacity)
{
resize(mCurrentTreeCapacity*2);
}
// get current merge tree index
const PxU32 mergeTreeIndex = mCurrentTreeIndex++;
// get payloads/userdata pointers - the pointers start at mIndicesOffset, thats where all
// objects were added before merge was called
const PrunerPayload* data = &mPruningPool->getObjects()[mergeData.mIndicesOffset];
// setup merged tree with the merge data and timestamp
mMergedTrees[mergeTreeIndex].mTimeStamp = timeStamp;
AABBTree& mergedTree = *mMergedTrees[mergeTreeIndex].mTree;
mergedTree.initTree(mergeData);
// set bounds
mBounds.getBounds()[mergeTreeIndex] = mergeData.getRootNode().mBV;
// update temporally update map for the current merge tree, map is used to setup the base extended bucket pruner map
mMergeTreeUpdateMap.initMap(mergeData.mNbIndices, mergedTree);
// create new base tree of trees
buildMainAABBTree();
// Add each object into extended bucket pruner hash map
for (PxU32 i = 0; i < mergeData.mNbIndices; i++)
{
ExtendedBucketPrunerData mapData;
mapData.mMergeIndex = mergeTreeIndex;
mapData.mTimeStamp = timeStamp;
PX_ASSERT(mMergeTreeUpdateMap[i] < mergedTree.getNbNodes());
// get node information from the merge tree update map
mapData.mSubTreeNode = mMergeTreeUpdateMap[i];
mExtendedBucketPrunerMap.insert(data[i], mapData);
}
// merged tree indices needs to be shifted now, we cannot shift it in init - the update map
// could not be constructed otherwise, as the indices wont start from 0. The indices
// needs to be shifted by offset from the pruning pool, where the new objects were added into the pruning pool.
mergedTree.shiftIndices(mergeData.mIndicesOffset);
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
}
//////////////////////////////////////////////////////////////////////////
// Builds the new main AABB tree with given current active merged trees and its bounds
void ExtendedBucketPruner::buildMainAABBTree()
{
// create the AABB tree from given merged trees bounds
NodeAllocator nodeAllocator;
bool status = mMainTree->build(AABBTreeBuildParams(EXT_NB_OBJECTS_PER_NODE, mCurrentTreeIndex, &mBounds), nodeAllocator);
PX_UNUSED(status);
PX_ASSERT(status);
// Init main tree update map for the new main tree
mMainTreeUpdateMap.initMap(mCurrentTreeIndex, *mMainTree);
}
//////////////////////////////////////////////////////////////////////////
// resize internal memory, buffers
void ExtendedBucketPruner::resize(PxU32 size)
{
PX_ASSERT(size > mCurrentTreeCapacity);
mBounds.resize(size, mCurrentTreeCapacity);
// allocate new merged trees
MergedTree* newMergeTrees = PX_ALLOCATE(MergedTree, size, "AABB trees");
// copy previous merged trees
PxMemCopy(newMergeTrees, mMergedTrees, sizeof(MergedTree)*mCurrentTreeCapacity);
PX_FREE(mMergedTrees);
mMergedTrees = newMergeTrees;
// allocate new trees for merged trees
for (PxU32 i = mCurrentTreeCapacity; i < size; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
mCurrentTreeCapacity = size;
}
//////////////////////////////////////////////////////////////////////////
// Update object
bool ExtendedBucketPruner::updateObject(const PxBounds3& worldAABB, const PxTransform& transform, const PrunerPayload& object, PrunerHandle handle, const PoolIndex poolIndex)
{
const ExtendedBucketPrunerMap::Entry* extendedPrunerEntry = mExtendedBucketPrunerMap.find(object);
// if object is not in tree of trees, it is in bucket pruner core
if(!extendedPrunerEntry)
{
if(mCompanion)
mCompanion->updateObject(object, handle, worldAABB, transform, poolIndex);
}
else
{
const ExtendedBucketPrunerData& data = extendedPrunerEntry->second;
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
// update tree where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark for refit node in merged tree
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark for refit node in main aabb tree
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
mTreesDirty = true;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// refit merged nodes
// 1. refit nodes in merged trees
// 2. check if after refit root node is valid - might happen edge case
// where all objects were released - the root node is then invalid
// in this edge case we need to compact the merged trees array
// and create new main AABB tree
// 3. If all merged trees bounds are valid - refit main tree
// 4. If bounds are invalid create new main AABB tree
void ExtendedBucketPruner::refitMarkedNodes(const PxBounds3* boxes)
{
// if no tree needs update early exit
if(!mTreesDirty)
return;
// refit trees and update bounds for main tree
PxU32 nbValidTrees = 0;
for (PxU32 i = mCurrentTreeIndex; i--; )
{
AABBTree& tree = *mMergedTrees[i].mTree;
tree.refitMarkedNodes(boxes);
const PxBounds3& bounds = tree.getNodes()[0].mBV;
// check if bounds are valid, if all objects of the tree were released, the bounds
// will be invalid, in that case we cannot use this tree anymore.
if(bounds.isValid())
{
nbValidTrees++;
}
mBounds.getBounds()[i] = bounds;
}
if(nbValidTrees == mCurrentTreeIndex)
{
// no tree has been removed refit main tree
mMainTree->refitMarkedNodes(mBounds.getBounds());
}
else
{
// edge case path, tree does not have a valid root node bounds - all objects from the tree were released
// we might even fire perf warning
// compact the tree array - no holes in the array, remember the swap position
PxU32* swapMap = PX_ALLOCATE(PxU32, (mCurrentTreeIndex + 1), "Swap Map");
PxU32 writeIndex = 0;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
AABBTree& tree = *mMergedTrees[i].mTree;
if(tree.getNodes()[0].mBV.isValid())
{
// we have to store the tree into an empty location
if(i != writeIndex)
{
PX_ASSERT(writeIndex < i);
AABBTree* ptr = mMergedTrees[writeIndex].mTree;
mMergedTrees[writeIndex] = mMergedTrees[i];
mMergedTrees[i].mTree = ptr;
mBounds.getBounds()[writeIndex] = mBounds.getBounds()[i];
}
// remember the swap location
swapMap[i] = writeIndex;
writeIndex++;
}
else
{
// tree is not valid, release it
tree.release();
mMergedTrees[i].mTimeStamp = 0;
}
// remember the swap
swapMap[mCurrentTreeIndex] = i;
}
PX_ASSERT(writeIndex == nbValidTrees);
// new merged trees size
mCurrentTreeIndex = nbValidTrees;
if(mCurrentTreeIndex)
{
// trees have changed, we need to rebuild the main tree
buildMainAABBTree();
// fixup the object entries, the merge index has changed
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(swapMap[data.mMergeIndex] < nbValidTrees);
data.mMergeIndex = swapMap[data.mMergeIndex];
}
}
else
{
// if there is no tree release the main tree
mMainTree->release();
}
PX_FREE(swapMap);
}
#if PX_DEBUG
checkValidity();
#endif
mTreesDirty = false;
}
//////////////////////////////////////////////////////////////////////////
// remove object
bool ExtendedBucketPruner::removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
{
ExtendedBucketPrunerMap::Entry dataEntry;
// if object is not in tree of trees, it is in bucket pruner core
if (!mExtendedBucketPrunerMap.erase(object, dataEntry))
{
// we need to call invalidateObjects, it might happen that the swapped object
// does belong to the extended bucket pruner, in that case the objects index
// needs to be swapped.
// do not call additional bucket pruner swap, that does happen during remove
swapIndex(objectIndex, swapObject, swapObjectIndex, false);
return mCompanion ? mCompanion->removeObject(object, handle, objectIndex, swapObjectIndex) : true;
}
else
{
const ExtendedBucketPrunerData& data = dataEntry.second;
// mark tree nodes where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark the merged tree for refit
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark the main tree for refit
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
// call invalidate object to swap the object indices in the merged trees
invalidateObject(data, objectIndex, swapObject, swapObjectIndex);
mTreesDirty = true;
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
return true;
}
//////////////////////////////////////////////////////////////////////////
// invalidate object
// remove the objectIndex from the merged tree
void ExtendedBucketPruner::invalidateObject(const ExtendedBucketPrunerData& data, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
{
// get the merged tree
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
PX_ASSERT(tree.getNodes()[data.mSubTreeNode].isLeaf());
// get merged tree node
BVHNode& node0 = tree.getNodes()[data.mSubTreeNode];
const PxU32 nbPrims = node0.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node0.getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// Look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (objectIndex == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims - 1;
node0.setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
// Swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if (last != i)
PxSwap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
swapIndex(objectIndex, swapObject, swapObjectIndex);
}
// Swap object index
// if swapObject is in a merged tree its index needs to be swapped with objectIndex
void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded)
{
PX_UNUSED(corePrunerIncluded);
if (objectIndex == swapObjectIndex)
return;
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(swapObject);
// if swapped object index is in extended pruner, we have to fix the primitives index
if (extendedPrunerSwapEntry)
{
const ExtendedBucketPrunerData& swapData = extendedPrunerSwapEntry->second;
AABBTree& swapTree = *mMergedTrees[swapData.mMergeIndex].mTree;
// With multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
PX_ASSERT(swapData.mSubTreeNode < swapTree.getNbNodes());
PX_ASSERT(swapTree.getNodes()[swapData.mSubTreeNode].isLeaf());
BVHNode* node1 = swapTree.getNodes() + swapData.mSubTreeNode;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(swapTree.getIndices());
PX_ASSERT(primitives);
// look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (swapObjectIndex == primitives[i])
{
foundIt = true;
primitives[i] = objectIndex; // point node to the pool object moved to
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
else
{
if(corePrunerIncluded)
if(mCompanion)
mCompanion->swapIndex(objectIndex, swapObjectIndex);
}
}
//////////////////////////////////////////////////////////////////////////
// Optimized removal of timestamped objects from the extended bucket pruner
PxU32 ExtendedBucketPruner::removeMarkedObjects(PxU32 timeStamp)
{
// remove objects from the core bucket pruner
PxU32 retVal = mCompanion ? mCompanion->removeMarkedObjects(timeStamp) : 0;
// nothing to be removed
if(!mCurrentTreeIndex)
return retVal;
// if last merged tree is the timeStamp to remove, we can clear all
// this is safe as the merged trees array is time ordered, never shifted
if(mMergedTrees[mCurrentTreeIndex - 1].mTimeStamp == timeStamp)
{
retVal += mExtendedBucketPrunerMap.size();
cleanTrees();
return retVal;
}
// get the highest index in the merged trees array, where timeStamp match
// we release than all trees till the index
PxU32 highestTreeIndex = 0xFFFFFFFF;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
if(mMergedTrees[i].mTimeStamp == timeStamp)
highestTreeIndex = i;
else
break;
}
// if no timestamp found early exit
if(highestTreeIndex == 0xFFFFFFFF)
return retVal;
PX_ASSERT(highestTreeIndex < mCurrentTreeIndex);
// get offset, where valid trees start
const PxU32 mergeTreeOffset = highestTreeIndex + 1;
// shrink the array to merged trees with a valid timeStamp
mCurrentTreeIndex = mCurrentTreeIndex - mergeTreeOffset;
// go over trees and swap released trees with valid trees from the back (valid trees are at the back)
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// store bounds, timestamp
mBounds.getBounds()[i] = mMergedTrees[mergeTreeOffset + i].mTree->getNodes()[0].mBV;
mMergedTrees[i].mTimeStamp = mMergedTrees[mergeTreeOffset + i].mTimeStamp;
// release the tree with timestamp
AABBTree* ptr = mMergedTrees[i].mTree;
ptr->release();
// store the valid tree
mMergedTrees[i].mTree = mMergedTrees[mergeTreeOffset + i].mTree;
// store the release tree at the offset
mMergedTrees[mergeTreeOffset + i].mTree = ptr;
mMergedTrees[mergeTreeOffset + i].mTimeStamp = 0;
}
// release the rest of the trees with not valid timestamp
for (PxU32 i = mCurrentTreeIndex; i <= highestTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
// build new main AABB tree with only trees with valid valid timeStamp
buildMainAABBTree();
// remove all unnecessary trees and map entries
bool removeEntry = false;
PxU32 numRemovedEntries = 0;
ExtendedBucketPrunerMap::EraseIterator eraseIterator = mExtendedBucketPrunerMap.getEraseIterator();
ExtendedBucketPrunerMap::Entry* entry = eraseIterator.eraseCurrentGetNext(removeEntry);
while (entry)
{
ExtendedBucketPrunerData& data = entry->second;
// data to be removed
if (data.mTimeStamp == timeStamp)
{
removeEntry = true;
numRemovedEntries++;
}
else
{
// update the merge index and main tree node index
PX_ASSERT(highestTreeIndex < data.mMergeIndex);
data.mMergeIndex -= mergeTreeOffset;
removeEntry = false;
}
entry = eraseIterator.eraseCurrentGetNext(removeEntry);
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
// return the number of removed objects
return retVal + numRemovedEntries;
}
//////////////////////////////////////////////////////////////////////////
// clean all trees, all objects have been released
void ExtendedBucketPruner::cleanTrees()
{
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
mExtendedBucketPrunerMap.clear();
mCurrentTreeIndex = 0;
mMainTree->release();
}
//////////////////////////////////////////////////////////////////////////
// shift origin
void ExtendedBucketPruner::shiftOrigin(const PxVec3& shift)
{
mMainTree->shiftOrigin(shift);
for(PxU32 i=0; i<mCurrentTreeIndex; i++)
mMergedTrees[i].mTree->shiftOrigin(shift);
if(mCompanion)
mCompanion->shiftOrigin(shift);
}
//////////////////////////////////////////////////////////////////////////
// Queries implementation
//////////////////////////////////////////////////////////////////////////
// Raycast/sweeps callback for main AABB tree
template<const bool tInflate>
struct MainTreeRaycastPrunerCallback
{
MainTreeRaycastPrunerCallback(const PxVec3& origin, const PxVec3& unitDir, const PxVec3& extent, PrunerRaycastCallback& prunerCallback, const PruningPool* pool, const MergedTree* mergedTrees)
: mOrigin(origin), mUnitDir(unitDir), mExtent(extent), mPrunerCallback(prunerCallback), mPruningPool(pool), mMergedTrees(mergedTrees)
{
}
bool invoke(PxReal& distance, PxU32 primIndex)
{
const AABBTree* aabbTree = mMergedTrees[primIndex].mTree;
// raycast the merged tree
RaycastCallbackAdapter pcb(mPrunerCallback, *mPruningPool);
return AABBTreeRaycast<tInflate, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPruningPool->getCurrentAABBTreeBounds(), *aabbTree, mOrigin, mUnitDir, distance, mExtent, pcb);
}
PX_NOCOPY(MainTreeRaycastPrunerCallback)
private:
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
const PxVec3& mExtent;
PrunerRaycastCallback& mPrunerCallback;
const PruningPool* mPruningPool;
const MergedTree* mMergedTrees;
};
//////////////////////////////////////////////////////////////////////////
// raycast against the extended bucket pruner
bool ExtendedBucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->raycast(origin, unitDir, inOutDistance, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
const PxVec3 extent(0.0f);
// main tree callback
MainTreeRaycastPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, mPruningPool, mMergedTrees);
// traverse the main tree
again = AABBTreeRaycast<false, true, AABBTree, BVHNode, MainTreeRaycastPrunerCallback<false>>()(mBounds, *mMainTree, origin, unitDir, inOutDistance, extent, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// overlap main tree callback
template<typename Test>
struct MainTreeOverlapPrunerCallback
{
MainTreeOverlapPrunerCallback(const Test& test, PrunerOverlapCallback& prunerCallback, const PruningPool* pool, const MergedTree* mergedTrees)
: mTest(test), mPrunerCallback(prunerCallback), mPruningPool(pool), mMergedTrees(mergedTrees)
{
}
bool invoke(PxU32 primIndex)
{
const AABBTree* aabbTree = mMergedTrees[primIndex].mTree;
// overlap the merged tree
OverlapCallbackAdapter pcb(mPrunerCallback, *mPruningPool);
return AABBTreeOverlap<true, Test, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPruningPool->getCurrentAABBTreeBounds(), *aabbTree, mTest, pcb);
}
PX_NOCOPY(MainTreeOverlapPrunerCallback)
private:
const Test& mTest;
PrunerOverlapCallback& mPrunerCallback;
const PruningPool* mPruningPool;
const MergedTree* mMergedTrees;
};
//////////////////////////////////////////////////////////////////////////
// overlap implementation
bool ExtendedBucketPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->overlap(queryVolume, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
switch (queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if (queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<OBBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<OBBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<AABBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, AABBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<AABBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
MainTreeOverlapPrunerCallback<CapsuleAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, CapsuleAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<CapsuleAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<SphereAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, SphereAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<SphereAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<OBBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<OBBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// sweep implementation
bool ExtendedBucketPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->sweep(queryVolume, unitDir, inOutDistance, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
const PxVec3 center = aabb.getCenter();
MainTreeRaycastPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeRaycast<true, true, AABBTree, BVHNode, MainTreeRaycastPrunerCallback<true>>()(mBounds, *mMainTree, center, unitDir, inOutDistance, extents, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
void ExtendedBucketPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mCompanion)
mCompanion->getGlobalBounds(bounds);
else
bounds.setEmpty();
if(mExtendedBucketPrunerMap.size() && mMainTree && mMainTree->getNodes())
bounds.include(mMainTree->getNodes()->mBV);
}
//////////////////////////////////////////////////////////////////////////
void ExtendedBucketPruner::visualize(PxRenderOutput& out, PxU32 color) const
{
visualizeTree(out, color, mMainTree);
for(PxU32 i=0; i<mCurrentTreeIndex; i++)
visualizeTree(out, color, mMergedTrees[i].mTree);
if(mCompanion)
mCompanion->visualize(out, color);
}
//////////////////////////////////////////////////////////////////////////
#if PX_DEBUG
// extended bucket pruner validity check
bool ExtendedBucketPruner::checkValidity()
{
PxBitMap testBitmap;
testBitmap.resizeAndClear(mCurrentTreeIndex);
for (PxU32 i = 0; i < mMainTree->getNbNodes(); i++)
{
const BVHNode& node = mMainTree->getNodes()[i];
if(node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mMainTree->getIndices());
for (PxU32 j = 0; j < nbPrims; j++)
{
const PxU32 index = primitives[j];
// check if index is correct
PX_ASSERT(index < mCurrentTreeIndex);
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(testBitmap.test(index) == PxIntFalse);
testBitmap.set(index);
}
}
}
PxBitMap mergeTreeTestBitmap;
mergeTreeTestBitmap.resizeAndClear(mPruningPool->getNbActiveObjects());
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// check if bounds are the same as the merged tree root bounds
PX_ASSERT(mBounds.getBounds()[i].maximum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.x);
PX_ASSERT(mBounds.getBounds()[i].maximum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.y);
PX_ASSERT(mBounds.getBounds()[i].maximum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.z);
PX_ASSERT(mBounds.getBounds()[i].minimum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.x);
PX_ASSERT(mBounds.getBounds()[i].minimum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.y);
PX_ASSERT(mBounds.getBounds()[i].minimum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.z);
// check each tree
const AABBTree& mergedTree = *mMergedTrees[i].mTree;
for (PxU32 j = 0; j < mergedTree.getNbNodes(); j++)
{
const BVHNode& node = mergedTree.getNodes()[j];
if (node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mergedTree.getIndices());
for (PxU32 k = 0; k < nbPrims; k++)
{
const PxU32 index = primitives[k];
// check if index is correct
PX_ASSERT(index < mPruningPool->getNbActiveObjects());
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(mergeTreeTestBitmap.test(index) == PxIntFalse);
mergeTreeTestBitmap.set(index);
#if PX_ENABLE_ASSERTS
const PrunerPayload& payload = mPruningPool->getObjects()[index];
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(payload);
PX_ASSERT(extendedPrunerSwapEntry);
const ExtendedBucketPrunerData& data = extendedPrunerSwapEntry->second;
PX_ASSERT(data.mMergeIndex == i);
PX_ASSERT(data.mSubTreeNode == j);
#endif
}
}
}
}
for (PxU32 i = mCurrentTreeIndex; i < mCurrentTreeCapacity; i++)
{
PX_ASSERT(mMergedTrees[i].mTree->getIndices() == NULL);
PX_ASSERT(mMergedTrees[i].mTree->getNodes() == NULL);
}
#if PX_ENABLE_ASSERTS
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
const ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
PX_ASSERT(data.mSubTreeNode < mMergedTrees[data.mMergeIndex].mTree->getNbNodes());
}
#endif
return true;
}
#endif

View File

@@ -0,0 +1,188 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_EXTENDED_BUCKET_PRUNER_H
#define GU_EXTENDED_BUCKET_PRUNER_H
#include "GuPrunerTypedef.h"
#include "GuAABBTreeUpdateMap.h"
#include "foundation/PxHashMap.h"
#include "GuAABBTreeBounds.h"
#include "GuSecondaryPruner.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class AABBTreeMergeData;
// Extended bucket pruner data, if an object belongs to the tree of trees, we need to
// remember node for the sub tree, the tree it belongs to and the main tree node
struct ExtendedBucketPrunerData
{
PxU32 mTimeStamp; // timestamp
TreeNodeIndex mSubTreeNode; // sub tree node index
PxU32 mMergeIndex; // index in bounds and merged trees array
};
// Merged tree structure, holds tree and its timeStamp, released when no objects is in the tree
// or timeStamped objects are released
struct MergedTree
{
AABBTree* mTree; // AABB tree
size_t mTimeStamp; //
};
// hashing function for PrunerPayload key
// PT: TODO: move this to PrunerPayload?
struct ExtendedBucketPrunerHash
{
PX_FORCE_INLINE uint32_t operator()(const PrunerPayload& payload) const
{
#if PX_P64_FAMILY
// const PxU32 h0 = PxHash((const void*)payload.data[0]);
// const PxU32 h1 = PxHash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return physx::PxComputeHash(PxU64(h0) | (PxU64(h1) << 32));
#else
return physx::PxComputeHash(PxU64(payload.data[0]) | (PxU64(payload.data[1]) << 32));
#endif
}
PX_FORCE_INLINE bool equal(const PrunerPayload& k0, const PrunerPayload& k1) const
{
return (k0.data[0] == k1.data[0]) && (k0.data[1] == k1.data[1]);
}
};
// A.B. replace, this is useless, need to be able to traverse the map and release while traversing, also eraseAt failed
typedef PxHashMap<PrunerPayload, ExtendedBucketPrunerData, ExtendedBucketPrunerHash> ExtendedBucketPrunerMap;
// Extended bucket pruner holds single objects in a bucket pruner and AABBtrees in a tree of trees.
// Base usage of ExtendedBucketPruner is for dynamic AABBPruner new objects, that did not make it
// into new tree. Single objects go directly into a bucket pruner, while merged AABBtrees
// go into a tree of trees.
// PT: TODO: this is not a Pruner (doesn't use the Pruner API) so its name should be e.g. "ExtendedBucketPrunerCore".
// And it's also not always using a bucket pruner... so the whole "ExtendedBucketPruner" name everywhere is wrong.
class ExtendedBucketPruner
{
public:
ExtendedBucketPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool);
~ExtendedBucketPruner();
// release
void release();
// add single object into a bucket pruner directly
PX_FORCE_INLINE bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, const PoolIndex poolIndex)
{
return mCompanion ? mCompanion->addObject(object, handle, worldAABB, transform, timeStamp, poolIndex) : true;
}
// add AABB tree from pruning structure - adds new primitive into main AABB tree
void addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp);
// update object
bool updateObject(const PxBounds3& worldAABB, const PxTransform& transform, const PrunerPayload& object, PrunerHandle handle, const PoolIndex poolIndex);
// remove object, removed object is replaced in pruning pool by swapped object, indices needs to be updated
bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
// swap object index, the object index can be in core pruner or tree of trees
void swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded = true);
// refit marked nodes in tree of trees
void refitMarkedNodes(const PxBounds3* boxes);
// notify timestampChange - swap trees in incremental pruner
PX_FORCE_INLINE void timeStampChange()
{
if(mCompanion)
mCompanion->timeStampChange();
}
// look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
// queries against the pruner
bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
// origin shift
void shiftOrigin(const PxVec3& shift);
// debug visualize
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build()
{
if(mCompanion)
mCompanion->build();
}
PX_FORCE_INLINE PxU32 getNbObjects() const
{
const PxU32 nb = mCompanion ? mCompanion->getNbObjects() : 0;
return nb + mExtendedBucketPrunerMap.size();
}
void getGlobalBounds(PxBounds3&) const;
private:
// separate call for indices invalidation, object can be either in AABBPruner or Bucket pruner, but the swapped object can be
// in the tree of trees
void invalidateObject(const ExtendedBucketPrunerData& object, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
void resize(PxU32 size);
void buildMainAABBTree();
void cleanTrees();
#if PX_DEBUG
// Extended bucket pruner validity check
bool checkValidity();
#endif
CompanionPruner* mCompanion; // Companion pruner for single objects
const PruningPool* mPruningPool; // Pruning pool from AABB pruner
ExtendedBucketPrunerMap mExtendedBucketPrunerMap; // Map holding objects from tree merge - objects in tree of trees
AABBTree* mMainTree; // Main tree holding merged trees
AABBTreeUpdateMap mMainTreeUpdateMap; // Main tree updated map - merged trees index to nodes
AABBTreeUpdateMap mMergeTreeUpdateMap; // Merged tree update map used while tree is merged
AABBTreeBounds mBounds; // Merged trees bounds used for main tree building
MergedTree* mMergedTrees; // Merged trees
PxU32 mCurrentTreeIndex; // Current trees index
PxU32 mCurrentTreeCapacity; // Current tress capacity
bool mTreesDirty; // Dirty marker
};
}
}
#endif

View File

@@ -0,0 +1,51 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuFactory.h"
#include "GuAABBPruner.h"
#include "GuBucketPruner.h"
#include "GuIncrementalAABBPruner.h"
using namespace physx;
using namespace Gu;
Pruner* physx::Gu::createBucketPruner(PxU64 contextID)
{
return PX_NEW(BucketPruner)(contextID);
}
Pruner* physx::Gu::createAABBPruner(PxU64 contextID, bool dynamic, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy, PxU32 nbObjectsPerNode)
{
return PX_NEW(AABBPruner)(dynamic, contextID, cpType, buildStrategy, nbObjectsPerNode);
}
Pruner* physx::Gu::createIncrementalPruner(PxU64 contextID)
{
return PX_NEW(IncrementalAABBPruner)(32, contextID);
}

View File

@@ -0,0 +1,96 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_GEOMETRY_CHECKS_H
#define GU_GEOMETRY_CHECKS_H
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "geometry/PxConvexCoreGeometry.h"
namespace physx
{
// We sometimes overload capsule code for spheres, so every sphere should have
// valid capsule data (height = 0). This is preferable to a typedef so that we
// can maintain traits separately for a sphere, but some care is required to deal
// with the fact that when a reference to a capsule is extracted, it may have its
// type field set to eSPHERE
template <typename T>
struct PxcGeometryTraits
{
enum {TypeID = PxGeometryType::eINVALID };
};
template <typename T> struct PxcGeometryTraits<const T> { enum { TypeID = PxcGeometryTraits<T>::TypeID }; };
template <> struct PxcGeometryTraits<PxBoxGeometry> { enum { TypeID = PxGeometryType::eBOX }; };
template <> struct PxcGeometryTraits<PxSphereGeometry> { enum { TypeID = PxGeometryType::eSPHERE }; };
template <> struct PxcGeometryTraits<PxCapsuleGeometry> { enum { TypeID = PxGeometryType::eCAPSULE }; };
template <> struct PxcGeometryTraits<PxPlaneGeometry> { enum { TypeID = PxGeometryType::ePLANE }; };
template <> struct PxcGeometryTraits<PxParticleSystemGeometry> { enum { TypeID = PxGeometryType::ePARTICLESYSTEM}; };
template <> struct PxcGeometryTraits<PxConvexCoreGeometry> { enum { TypeID = PxGeometryType::eCONVEXCORE }; };
template <> struct PxcGeometryTraits<PxConvexMeshGeometry> { enum { TypeID = PxGeometryType::eCONVEXMESH }; };
template <> struct PxcGeometryTraits<PxTriangleMeshGeometry> { enum { TypeID = PxGeometryType::eTRIANGLEMESH }; };
template <> struct PxcGeometryTraits<PxTetrahedronMeshGeometry> { enum { TypeID = PxGeometryType::eTETRAHEDRONMESH }; };
template <> struct PxcGeometryTraits<PxHeightFieldGeometry> { enum { TypeID = PxGeometryType::eHEIGHTFIELD }; };
template <> struct PxcGeometryTraits<PxCustomGeometry> { enum { TypeID = PxGeometryType::eCUSTOM }; };
template<class T> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType(const PxGeometry& geometry)
{
PX_ASSERT(PxU32(geometry.getType()) == PxU32(PxcGeometryTraits<T>::TypeID));
PX_UNUSED(geometry);
}
template<> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType<PxCapsuleGeometry>(const PxGeometry& geometry)
{
PX_ASSERT(geometry.getType() == PxGeometryType::eCAPSULE || geometry.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(geometry);
}
template<> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType<const PxCapsuleGeometry>(const PxGeometry& geometry)
{
PX_ASSERT(geometry.getType()== PxGeometryType::eCAPSULE || geometry.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(geometry);
}
}
#if !PX_CUDA_COMPILER
// the shape structure relies on punning capsules and spheres
PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(physx::PxCapsuleGeometry, radius) == PX_OFFSET_OF(physx::PxSphereGeometry, radius));
#endif
#endif

View File

@@ -0,0 +1,555 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGeometryQuery.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "geometry/PxConvexCoreGeometry.h"
#include "foundation/PxAtomic.h"
#include "GuInternal.h"
#include "GuOverlapTests.h"
#include "GuSweepTests.h"
#include "GuRaycastTests.h"
#include "GuBoxConversion.h"
#include "GuTriangleMesh.h"
#include "GuMTD.h"
#include "GuBounds.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "GuDistancePointBox.h"
#include "GuMidphaseInterface.h"
#include "foundation/PxFPU.h"
#include "GuConvexEdgeFlags.h"
#include "GuVecBox.h"
#include "GuVecConvexHull.h"
#include "GuPCMShapeConvex.h"
#include "GuPCMContactConvexCommon.h"
#include "GuConvexSupport.h"
#include "GuConvexGeometry.h"
using namespace physx;
using namespace Gu;
extern GeomSweepFuncs gGeomSweepFuncs;
extern GeomOverlapTable gGeomOverlapMethodTable[];
extern RaycastFunc gRaycastMap[PxGeometryType::eGEOMETRY_COUNT];
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::isValid(const PxGeometry& g)
{
switch(PxU32(g.getType()))
{
case PxGeometryType::eSPHERE: return static_cast<const PxSphereGeometry&>(g).isValid();
case PxGeometryType::ePLANE: return static_cast<const PxPlaneGeometry&>(g).isValid();
case PxGeometryType::eCAPSULE: return static_cast<const PxCapsuleGeometry&>(g).isValid();
case PxGeometryType::eBOX: return static_cast<const PxBoxGeometry&>(g).isValid();
case PxGeometryType::eCONVEXCORE: return static_cast<const PxConvexCoreGeometry&>(g).isValid();
case PxGeometryType::eCONVEXMESH: return static_cast<const PxConvexMeshGeometry&>(g).isValid();
case PxGeometryType::eTRIANGLEMESH: return static_cast<const PxTriangleMeshGeometry&>(g).isValid();
case PxGeometryType::eHEIGHTFIELD: return static_cast<const PxHeightFieldGeometry&>(g).isValid();
case PxGeometryType::eTETRAHEDRONMESH: return static_cast<const PxTetrahedronMeshGeometry&>(g).isValid();
case PxGeometryType::ePARTICLESYSTEM: return static_cast<const PxParticleSystemGeometry&>(g).isValid();
case PxGeometryType::eCUSTOM: return static_cast<const PxCustomGeometry&>(g).isValid();
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::sweep(const PxVec3& unitDir, const PxReal distance,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags,
const PxReal inflation, PxGeometryQueryFlags queryFlags, PxSweepThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxGeometryQuery::sweep(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxGeometryQuery::sweep(): pose1 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(unitDir.isFinite(), "PxGeometryQuery::sweep(): unitDir is not valid.", false);
PX_CHECK_AND_RETURN_VAL(PxIsFinite(distance), "PxGeometryQuery::sweep(): distance is not valid.", false);
PX_CHECK_AND_RETURN_VAL((distance >= 0.0f && !(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP)) || distance > 0.0f,
"PxGeometryQuery::sweep(): sweep distance must be >=0 or >0 with eASSUME_NO_INITIAL_OVERLAP.", 0);
#if PX_CHECKED
if(!PxGeometryQuery::isValid(geom0))
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Provided geometry 0 is not valid");
if(!PxGeometryQuery::isValid(geom1))
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Provided geometry 1 is not valid");
#endif
const GeomSweepFuncs& sf = gGeomSweepFuncs;
switch(geom0.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry capsuleGeom(sphereGeom.radius, 0.0f);
const Capsule worldCapsule(pose0.p, pose0.p, sphereGeom.radius);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()];
return func(geom1, pose1, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
Capsule worldCapsule;
getCapsule(worldCapsule, capsuleGeom, pose0);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()];
return func(geom1, pose1, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
Box box;
buildFrom(box, pose0.p, boxGeom.halfExtents, pose0.q);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepBoxFunc func = precise ? sf.preciseBoxMap[geom1.getType()] : sf.boxMap[geom1.getType()];
return func(geom1, pose1, boxGeom, pose0, box, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const SweepConvexFunc func = sf.convexMap[geom1.getType()];
return func(geom1, pose1, convexGeom, pose0, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
default:
PX_CHECK_MSG(false, "PxGeometryQuery::sweep(): first geometry object parameter must be sphere, capsule, box or convex geometry.");
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::overlap( const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags, PxOverlapThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
return Gu::overlap(geom0, pose0, geom1, pose1, gGeomOverlapMethodTable, threadContext);
}
///////////////////////////////////////////////////////////////////////////////
PxU32 PxGeometryQuery::raycast( const PxVec3& rayOrigin, const PxVec3& rayDir,
const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* PX_RESTRICT rayHits, PxU32 stride,
PxGeometryQueryFlags queryFlags, PxRaycastThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(rayDir.isFinite(), "PxGeometryQuery::raycast(): rayDir is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(rayOrigin.isFinite(), "PxGeometryQuery::raycast(): rayOrigin is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(pose.isValid(), "PxGeometryQuery::raycast(): pose is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(maxDist >= 0.0f, "PxGeometryQuery::raycast(): maxDist is negative.", false);
PX_CHECK_AND_RETURN_VAL(PxIsFinite(maxDist), "PxGeometryQuery::raycast(): maxDist is not valid.", false);
PX_CHECK_AND_RETURN_VAL(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f, "PxGeometryQuery::raycast(): ray direction must be unit vector.", false);
const RaycastFunc func = gRaycastMap[geom.getType()];
return func(geom, pose, rayOrigin, rayDir, maxDist, hitFlags, maxHits, rayHits, stride, threadContext);
}
///////////////////////////////////////////////////////////////////////////////
bool pointConvexDistance(PxVec3& normal_, PxVec3& closestPoint_, PxReal& sqDistance, const PxVec3& pt, const ConvexMesh* convexMesh, const PxMeshScale& meshScale, const PxTransform32& convexPose);
PxReal PxGeometryQuery::pointDistance(const PxVec3& point, const PxGeometry& geom, const PxTransform& pose, PxVec3* closestPoint, PxU32* closestIndex, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose.isValid(), "PxGeometryQuery::pointDistance(): pose is not valid.", -1.0f);
switch(geom.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const PxReal r = sphereGeom.radius;
PxVec3 delta = point - pose.p;
const PxReal d = delta.magnitude();
if(d<=r)
return 0.0f;
if(closestPoint)
{
delta /= d;
*closestPoint = pose.p + delta * r;
}
return (d - r)*(d - r);
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule capsule;
getCapsule(capsule, capsGeom, pose);
const PxReal r = capsGeom.radius;
PxReal param;
const PxReal sqDistance = distancePointSegmentSquared(capsule, point, &param);
if(sqDistance<=r*r)
return 0.0f;
const PxReal d = physx::intrinsics::sqrt(sqDistance);
if(closestPoint)
{
const PxVec3 cp = capsule.getPointAt(param);
PxVec3 delta = point - cp;
delta.normalize();
*closestPoint = cp + delta * r;
}
return (d - r)*(d - r);
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
Box obb;
buildFrom(obb, pose.p, boxGeom.halfExtents, pose.q);
PxVec3 boxParam;
const PxReal sqDistance = distancePointBoxSquared(point, obb, &boxParam);
if(closestPoint && sqDistance!=0.0f)
{
*closestPoint = obb.transform(boxParam);
}
return sqDistance;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
const PxTransform32 poseA(pose);
PxVec3 normal, cp;
PxReal sqDistance;
const bool intersect = pointConvexDistance(normal, cp, sqDistance, point, static_cast<ConvexMesh*>(convexGeom.convexMesh), convexGeom.scale, poseA);
if(!intersect && closestPoint)
*closestPoint = cp;
return sqDistance;
}
case PxGeometryType::eCONVEXCORE:
{
const PxConvexCoreGeometry& convexGeom = static_cast<const PxConvexCoreGeometry&>(geom);
// Create a point support
Gu::ConvexShape pointShape;
pointShape.coreType = Gu::ConvexCore::Type::ePOINT;
pointShape.pose = PxTransform(PxIdentity);
pointShape.margin = 0.0f;
// Create convex core shape
Gu::ConvexShape convexShape;
Gu::makeConvexShape(convexGeom, pose, convexShape);
PX_ASSERT(convexShape.isValid());
// Compute distance using GJK
PxVec3 pointA, pointB, axis;
const PxReal distance = Gu::RefGjkEpa::computeGjkDistance(pointShape, convexShape, PxTransform(PxIdentity), pose, FLT_MAX, pointA, pointB, axis);
if(closestPoint)
*closestPoint = pointB;
return distance * distance;
}
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom);
PxU32 index;
float dist;
PxVec3 cp;
Midphase::pointMeshDistance(static_cast<TriangleMesh*>(meshGeom.triangleMesh), meshGeom, pose, point, FLT_MAX, index, dist, cp);
if(closestPoint)
*closestPoint = cp;
if(closestIndex)
*closestIndex = index;
return dist*dist;
}
default:
PX_CHECK_MSG(false, "PxGeometryQuery::pointDistance(): geometry object parameter must be sphere, capsule, box, convex or mesh geometry.");
break;
}
return -1.0f;
}
///////////////////////////////////////////////////////////////////////////////
void PxGeometryQuery::computeGeomBounds(PxBounds3& bounds, const PxGeometry& geom, const PxTransform& pose, float offset, float inflation, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN(pose.isValid(), "PxGeometryQuery::computeGeomBounds(): pose is not valid.");
Gu::computeBounds(bounds, geom, pose, offset, inflation);
PX_ASSERT(bounds.isValid());
}
///////////////////////////////////////////////////////////////////////////////
extern GeomMTDFunc gGeomMTDMethodTable[][PxGeometryType::eGEOMETRY_COUNT];
bool PxGeometryQuery::computePenetration( PxVec3& mtd, PxF32& depth,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxGeometryQuery::computePenetration(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxGeometryQuery::computePenetration(): pose1 is not valid.", false);
const PxTransform32 pose0A(pose0);
const PxTransform32 pose1A(pose1);
if(geom0.getType() > geom1.getType())
{
GeomMTDFunc mtdFunc = gGeomMTDMethodTable[geom1.getType()][geom0.getType()];
PX_ASSERT(mtdFunc);
if(!mtdFunc(mtd, depth, geom1, pose1A, geom0, pose0A))
return false;
mtd = -mtd;
return true;
}
else
{
GeomMTDFunc mtdFunc = gGeomMTDMethodTable[geom0.getType()][geom1.getType()];
PX_ASSERT(mtdFunc);
return mtdFunc(mtd, depth, geom0, pose0A, geom1, pose1A);
}
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::generateTriangleContacts(const PxGeometry& geom, const PxTransform& pose, const PxVec3 triangleVertices[3], PxU32 triangleIndex, PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength, PxContactBuffer& contactBuffer)
{
using namespace aos;
const PxU32 triangleIndices[3]{ 0, 1, 2 };
PxInlineArray<PxU32, LOCAL_PCM_CONTACTS_SIZE> deferredContacts;
Gu::MultiplePersistentContactManifold multiManifold;
multiManifold.initialize();
PxContactBuffer contactBuffer0; contactBuffer0.reset();
const PxTransformV geomTransform = loadTransformU(pose);
const PxTransformV triangleTransform = loadTransformU(PxTransform(PxIdentity));
float radius0 = 0;
float radius1 = meshContactMargin;
PxU32 oldCount = contactBuffer.count;
switch (geom.getType())
{
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsule = static_cast<const PxCapsuleGeometry&>(geom);
radius0 = capsule.radius;
const FloatV capsuleRadius = FLoad(capsule.radius);
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(capsuleRadius, FLoad(0.001f));
const PxTransformV capsuleTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
multiManifold.setRelativeTransform(capsuleTransform);
const Gu::CapsuleV capsuleV(V3LoadU(pose.p), V3LoadU(pose.q.rotate(PxVec3(capsule.halfHeight, 0, 0))), capsuleRadius);
Gu::PCMCapsuleVsMeshContactGeneration contactGeneration(capsuleV, contactDist, replaceBreakingThreshold, capsuleTransform, meshTransform, multiManifold, contactBuffer0, &deferredContacts);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_CAPSULE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& box = static_cast<const PxBoxGeometry&>(geom);
const PxBounds3 hullAABB(-box.halfExtents, box.halfExtents);
const Vec3V boxExtents = V3LoadU(box.halfExtents);
const FloatV minMargin = Gu::CalculatePCMBoxMargin(boxExtents, toleranceLength, GU_PCM_MESH_MANIFOLD_EPSILON);
Cm::FastVertex2ShapeScaling idtScaling;
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(minMargin, FLoad(0.05f));
const BoxV boxV(V3Zero(), boxExtents);
const PxTransformV boxTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
PolygonalData polyData;
PCMPolygonalBox polyBox(box.halfExtents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
SupportLocalImpl<BoxV> boxMap(boxV, boxTransform, identity, identity, true);
Gu::PCMConvexVsMeshContactGeneration contactGeneration(contactDist, replaceBreakingThreshold, boxTransform, meshTransform, multiManifold, contactBuffer0, polyData, &boxMap, &deferredContacts, idtScaling, true, true, NULL);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.generateLastContacts();
contactGeneration.processContacts(GU_SINGLE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convex = static_cast<const PxConvexMeshGeometry&>(geom);
const ConvexHullData* hullData = _getHullData(convex);
Cm::FastVertex2ShapeScaling convexScaling;
PxBounds3 hullAABB;
PolygonalData polyData;
const bool idtConvexScale = getPCMConvexData(convex, convexScaling, hullAABB, polyData);
const QuatV vQuat = QuatVLoadU(&convex.scale.rotation.x);
const Vec3V vScale = V3LoadU_SafeReadW(convex.scale.scale);
const FloatV minMargin = CalculatePCMConvexMargin(hullData, vScale, toleranceLength, GU_PCM_MESH_MANIFOLD_EPSILON);
const ConvexHullV convexHull(hullData, V3Zero(), vScale, vQuat, idtConvexScale);
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(minMargin, FLoad(0.05f));
const PxTransformV convexTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
SupportLocalImpl<Gu::ConvexHullV> convexMap(convexHull, convexTransform, convexHull.vertex2Shape, convexHull.shape2Vertex, false);
Gu::PCMConvexVsMeshContactGeneration contactGeneration(contactDist, replaceBreakingThreshold, convexTransform, meshTransform, multiManifold, contactBuffer0, polyData, &convexMap, &deferredContacts, convexScaling, idtConvexScale, true, NULL);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.generateLastContacts();
contactGeneration.processContacts(GU_SINGLE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eCONVEXCORE:
{
const PxConvexCoreGeometry& convex = static_cast<const PxConvexCoreGeometry&>(geom);
Gu::ConvexShape convexShape;
Gu::makeConvexShape(convex, pose, convexShape);
PX_ASSERT(convexShape.isValid());
// Create the triangle shape as a points-based convex shape
Gu::ConvexShape triShape;
triShape.coreType = Gu::ConvexCore::Type::ePOINTS;
triShape.pose = PxTransform(PxIdentity);
triShape.margin = meshContactMargin;
// Initialize the points core data
Gu::ConvexCore::PointsCore triCore;
triCore.points = triangleVertices;
triCore.numPoints = 3;
triCore.stride = sizeof(PxVec3);
triCore.S = PxVec3(1.0f);
triCore.R = PxQuat(PxIdentity);
// Copy the core data into the shape's core data buffer
PX_ASSERT(sizeof(triCore) <= Gu::ConvexCore::MAX_CORE_SIZE);
PxMemCopy(triShape.coreData, &triCore, sizeof(triCore));
PxVec3 normal, points[Gu::MAX_CONVEX_CONTACTS];
PxReal dists[Gu::MAX_CONVEX_CONTACTS];
if (PxU32 count = Gu::generateContacts(convexShape, triShape, contactDistance + meshContactMargin, normal, points, dists))
{
for (PxU32 i = 0; i < count; ++i)
{
PxContactPoint contact;
contact.point = points[i];
contact.normal = normal;
contact.separation = dists[i];
contact.internalFaceIndex1 = triangleIndex;
contactBuffer.contact(contact);
}
}
break;
}
default:
PX_ASSERT(0); // Unsupported geometry type
break;
}
for (PxU32 manifoldIndex = 0; manifoldIndex < multiManifold.mNumManifolds; ++manifoldIndex)
{
Gu::SinglePersistentContactManifold& manifold = *multiManifold.getManifold(manifoldIndex);
PxVec3 normal; V3StoreU(manifold.getWorldNormal(triangleTransform), normal);
for (PxU32 contactIndex = 0; contactIndex < manifold.getNumContacts(); ++contactIndex)
{
Gu::MeshPersistentContact& meshContact = manifold.getContactPoint(contactIndex);
PxContactPoint contact;
PxVec3 p0; V3StoreU(geomTransform.transform(meshContact.mLocalPointA), p0); p0 -= normal * radius0;
PxVec3 p1; V3StoreU(meshContact.mLocalPointB, p1); p1 += normal * radius1;
contact.point = (p0 + p1) * 0.5f;
contact.normal = normal;
contact.separation = normal.dot(p0 - p1);
contact.internalFaceIndex1 = triangleIndex;
contactBuffer.contact(contact);
}
}
return oldCount < contactBuffer.count;
}
///////////////////////////////////////////////////////////////////////////////
PxU32 PxCustomGeometry::getUniqueID()
{
static PxU32 uniqueID(0);
PxAtomicIncrement(reinterpret_cast<volatile PxI32*>(&uniqueID));
return uniqueID;
}

View File

@@ -0,0 +1,277 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGjkQuery.h"
#include "GuInternal.h"
#include "GuOverlapTests.h"
#include "GuSweepTests.h"
#include "GuRaycastTests.h"
#include "GuBoxConversion.h"
#include "GuTriangleMesh.h"
#include "GuMTD.h"
#include "GuBounds.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "GuDistancePointBox.h"
#include "GuMidphaseInterface.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Gu;
#include "GuGJK.h"
#include "GuGJKPenetration.h"
#include "GuGJKRaycast.h"
#include "GuEPA.h"
#include "geomutils/PxContactBuffer.h"
using namespace aos;
static PX_SUPPORT_INLINE PxVec3 Vec3V_To_PxVec3(const Vec3V& a)
{
PxVec3 v;
V3StoreU(a, v);
return v;
}
static PX_SUPPORT_INLINE PxReal FloatV_To_PxReal(const FloatV& a)
{
PxF32 f;
FStore(a, &f);
return f;
}
struct CustomConvexV : ConvexV
{
const PxGjkQuery::Support* s;
PxReal supportScale;
CustomConvexV(const PxGjkQuery::Support& _s) : ConvexV(Gu::ConvexType::eCUSTOM), s(&_s), supportScale(1.0f)
{
setMinMargin(FLoad(0.001f));
setSweepMargin(FLoad(0.001f));
}
PX_SUPPORT_INLINE Vec3V supportPoint(const PxI32 /*index*/) const
{
return supportLocal(V3LoadU(PxVec3(1, 0, 0)));
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir) const
{
return V3Scale(V3LoadU(s->supportLocal(Vec3V_To_PxVec3(dir))), FLoad(supportScale));
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir, PxI32& index) const
{
index = 0;
return supportLocal(dir);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT) const
{
const Vec3V _dir = aTobT.rotate(dir);
const Vec3V p = supportLocal(_dir);
return aTob.transform(p);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT, PxI32& index) const
{
index = 0;
return supportRelative(dir, aTob, aTobT);
}
};
bool PxGjkQuery::proximityInfo(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB, PxReal contactDistance, PxReal toleranceLength, PxVec3& pointA, PxVec3& pointB, PxVec3& separatingAxis, PxReal& separation)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
const PxReal degenerateScale = 0.001f;
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialSearchDir = aToB.p;
FloatV contactDist = FLoad((a.getMargin() + b.getMargin()) + contactDistance);
Vec3V aPoints[4];
Vec3V bPoints[4];
PxU8 size = 0;
GjkOutput output;
GjkStatus status = gjkPenetration(convexA, convexB, initialSearchDir, contactDist, true, aPoints, bPoints, size, output);
if (status == GJK_DEGENERATE)
{
supportA.supportScale = supportB.supportScale = 1.0f - degenerateScale;
status = gjkPenetration(convexA, convexB, initialSearchDir, contactDist, true, aPoints, bPoints, size, output);
supportA.supportScale = supportB.supportScale = 1.0f;
}
if (status == GJK_CONTACT || status == GJK_DEGENERATE)
{
separatingAxis = poseB.rotate(Vec3V_To_PxVec3(output.normal).getNormalized());
pointA = poseB.transform(Vec3V_To_PxVec3(output.closestA)) - separatingAxis * a.getMargin();
pointB = poseB.transform(Vec3V_To_PxVec3(output.closestB)) + separatingAxis * b.getMargin();
separation = (pointA - pointB).dot(separatingAxis);
return true;
}
if (status == EPA_CONTACT)
{
status = epaPenetration(convexA, convexB, aPoints, bPoints, size, true, FLoad(toleranceLength), output);
if (status == EPA_CONTACT || status == EPA_DEGENERATE)
{
separatingAxis = poseB.rotate(Vec3V_To_PxVec3(output.normal).getNormalized());
pointA = poseB.transform(Vec3V_To_PxVec3(output.closestA)) - separatingAxis * a.getMargin();
pointB = poseB.transform(Vec3V_To_PxVec3(output.closestB)) + separatingAxis * b.getMargin();
separation = (pointA - pointB).dot(separatingAxis);
return true;
}
}
return false;
}
struct PointConvexV : ConvexV
{
Vec3V zero;
PointConvexV() : ConvexV(Gu::ConvexType::eCUSTOM)
{
zero = V3Zero();
setMinMargin(FLoad(0.001f));
setSweepMargin(FLoad(0.001f));
}
PX_SUPPORT_INLINE Vec3V supportPoint(const PxI32 /*index*/) const
{
return zero;
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& /*dir*/) const
{
return zero;
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir, PxI32& index) const
{
index = 0;
return supportLocal(dir);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT) const
{
const Vec3V _dir = aTobT.rotate(dir);
const Vec3V p = supportLocal(_dir);
return aTob.transform(p);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT, PxI32& index) const
{
index = 0;
return supportRelative(dir, aTob, aTobT);
}
};
bool PxGjkQuery::raycast(const Support& shape, const PxTransform& pose, const PxVec3& rayStart, const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p)
{
const PxTransformV transf0 = loadTransformU(pose);
const PxTransformV transf1 = PxTransformV(V3LoadU(rayStart));
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(shape);
PointConvexV supportB;
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<PointConvexV> convexB(supportB);
Vec3V initialDir = aToB.p;
FloatV initialLambda = FLoad(0);
Vec3V s = V3Zero();
Vec3V r = V3LoadU(unitDir * maxDist);
FloatV lambda;
Vec3V normal, closestA;
if (gjkRaycast(convexA, convexB, initialDir, initialLambda, s, r, lambda, normal, closestA, shape.getMargin()))
{
t = FloatV_To_PxReal(lambda) * maxDist;
n = -Vec3V_To_PxVec3(normal).getNormalized();
p = Vec3V_To_PxVec3(closestA) + n * shape.getMargin() + rayStart;
return true;
}
return false;
}
bool PxGjkQuery::overlap(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialSearchDir = aToB.p;
FloatV contactDist = FLoad(a.getMargin() + b.getMargin());
Vec3V closestA, closestB, normal;
FloatV distance;
GjkStatus status = gjk(convexA, convexB, initialSearchDir, contactDist, closestA, closestB, normal, distance);
return status == GJK_CLOSE || status == GJK_CONTACT;
}
bool PxGjkQuery::sweep(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB, const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialDir = aToB.p;
FloatV initialLambda = FLoad(0);
Vec3V s = V3Zero();
Vec3V r = V3LoadU(poseB.rotateInv(unitDir * maxDist));
FloatV lambda;
Vec3V normal, closestA;
if (gjkRaycast(convexA, convexB, initialDir, initialLambda, s, r, lambda, normal, closestA, a.getMargin() + b.getMargin()))
{
t = FloatV_To_PxReal(lambda) * maxDist;
n = poseB.rotate(-(Vec3V_To_PxVec3(normal)).getNormalized());
p = poseB.transform(Vec3V_To_PxVec3(closestA)) + n * a.getMargin();
return true;
}
return false;
}

View File

@@ -0,0 +1,398 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
// PT: TODO: this class isn't actually used at the moment
#define COMPILE_INCREMENTAL_AABB_PRUNER
#ifdef COMPILE_INCREMENTAL_AABB_PRUNER
#include "common/PxProfileZone.h"
#include "CmVisualization.h"
#include "foundation/PxBitUtils.h"
#include "GuIncrementalAABBPruner.h"
#include "GuIncrementalAABBTree.h"
#include "GuCallbackAdapter.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuQuery.h"
using namespace physx;
using namespace Gu;
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
#define PARANOIA_CHECKS 0
IncrementalAABBPruner::IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID) :
mAABBTree (NULL),
mPool (contextID, TRANSFORM_CACHE_GLOBAL),
mContextID (contextID)
{
mMapping.resizeUninitialized(sceneLimit);
mPool.preallocate(sceneLimit);
mChangedLeaves.reserve(sceneLimit);
}
IncrementalAABBPruner::~IncrementalAABBPruner()
{
release();
}
bool IncrementalAABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool )
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mContextID);
if(!count)
return true;
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
if(mAABBTree)
{
for(PxU32 i=0;i<valid;i++)
{
const PrunerHandle& handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->insert(poolIndex, mPool.getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
return valid==count;
}
void IncrementalAABBPruner::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// resize mapping if needed
if(mMapping.size() <= poolIndex)
{
mMapping.resize(mMapping.size() * 2);
}
// if a node was split we need to update the node indices and also the sibling indices
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
mMapping[node->getPrimitives(NULL)[j]] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
mMapping[changedNode->getPrimitives(NULL)[j]] = changedNode;
}
}
}
else
{
mMapping[poolIndex] = node;
}
}
void IncrementalAABBPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count)
return;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
if(!mAABBTree)
return;
const PxBounds3* poolBounds = mPool.getCurrentWorldBoxes();
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->update(mMapping[poolIndex], poolIndex, poolBounds, mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mContextID);
if(!count)
return;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h, removalCallback); // save the lastIndex returned by removeObject
if(mAABBTree)
{
IncrementalAABBTreeNode* node = mAABBTree->remove(mMapping[poolIndex], poolIndex, mPool.getCurrentWorldBoxes());
// if node moved to its parent
if (node && node->isLeaf())
{
for (PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mMapping[index] = node;
}
}
mMapping[poolIndex] = mMapping[poolRelocatedLastIndex];
// fix indices if we made a swap
if(poolRelocatedLastIndex != poolIndex)
mAABBTree->fixupTreeIndices(mMapping[poolIndex], poolRelocatedLastIndex, poolIndex);
if(!mAABBTree->getNodes())
{
release();
}
}
}
#if PARANOIA_CHECKS
test();
#endif
}
bool IncrementalAABBPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
OverlapCallbackAdapter pcb(pcbArgName, mPool);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
bool IncrementalAABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
return again;
}
bool IncrementalAABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
return again;
}
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void IncrementalAABBPruner::purge()
{
release();
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void IncrementalAABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mContextID);
if (!mAABBTree)
{
fullRebuildAABBTree();
return;
}
}
void IncrementalAABBPruner::fullRebuildAABBTree()
{
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if (!nbObjects)
return;
const PxU32 indicesSize = PxNextPowerOfTwo(nbObjects);
if(indicesSize > mMapping.size())
{
mMapping.resizeUninitialized(indicesSize);
}
// copy the temp optimized tree into the new incremental tree
mAABBTree = PX_NEW(IncrementalAABBTree)();
mAABBTree->build(AABBTreeBuildParams(INCR_NB_OBJECTS_PER_NODE, nbObjects, &mPool.getCurrentAABBTreeBounds()), mMapping);
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
}
void IncrementalAABBPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 /*secondaryColor*/) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
visualizeTree(out, primaryColor, mAABBTree);
// Render added objects not yet in the tree
//out << PxTransform(PxIdentity);
//out << PxU32(PxDebugColor::eARGB_WHITE);
}
void IncrementalAABBPruner::release() // this can be called from purge()
{
PX_DELETE(mAABBTree);
}
void IncrementalAABBPruner::test()
{
if(mAABBTree)
{
mAABBTree->hierarchyCheck(mPool.getNbActiveObjects(), mPool.getCurrentWorldBoxes());
for(PxU32 i = 0; i < mPool.getNbActiveObjects(); i++)
{
mAABBTree->checkTreeLeaf(mMapping[i], i);
}
}
}
void IncrementalAABBPruner::merge(const void* )
{
//const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
//if(mAABBTree)
//{
// // index in pruning pool, where new objects were added
// const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// // create tree from given nodes and indices
// AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
// pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
// if (!mIncrementalRebuild)
// {
// // merge tree directly
// mAABBTree->mergeTree(aabbTreeMergeParams);
// }
// else
// {
// mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
// }
//}
}
void IncrementalAABBPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mAABBTree && mAABBTree->getNodes())
{
StoreBounds(bounds, mAABBTree->getNodes()->mBVMin, mAABBTree->getNodes()->mBVMax);
}
else
bounds.setEmpty();
}
#endif

View File

@@ -0,0 +1,80 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_PRUNER_H
#define GU_INCREMENTAL_AABB_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
#include "GuPruningPool.h"
#include "GuIncrementalAABBTree.h"
#include "GuSqInternal.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class IncrementalAABBPruner : public Pruner
{
public:
PX_PHYSX_COMMON_API IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID);
virtual ~IncrementalAABBPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
//~Pruner
// direct access for test code
PX_FORCE_INLINE const IncrementalAABBTree* getAABBTree() const { return mAABBTree; }
private:
void release();
void fullRebuildAABBTree();
void test();
void updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
IncrementalAABBTree* mAABBTree;
PruningPool mPool; // Pool of AABBs
PxArray<IncrementalAABBTreeNode*> mMapping;
PxU64 mContextID;
NodeList mChangedLeaves;
};
}
}
#endif

View File

@@ -0,0 +1,413 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmVisualization.h"
#include "GuIncrementalAABBPrunerCore.h"
#include "GuSqInternal.h"
#include "GuIncrementalAABBTree.h"
#include "GuCallbackAdapter.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuQuery.h"
using namespace physx;
using namespace Gu;
#define PARANOIA_CHECKS 0
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
IncrementalAABBPrunerCore::IncrementalAABBPrunerCore(const PruningPool* pool) :
mCurrentTree (1),
mLastTree (0),
mPool (pool)
{
mAABBTree[0].mapping.reserve(256);
mAABBTree[1].mapping.reserve(256);
mChangedLeaves.reserve(32);
}
IncrementalAABBPrunerCore::~IncrementalAABBPrunerCore()
{
release();
}
void IncrementalAABBPrunerCore::release() // this can be called from purge()
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
PX_DELETE(mAABBTree[i].tree);
mAABBTree[i].mapping.clear();
mAABBTree[i].timeStamp = 0;
}
mCurrentTree = 1;
mLastTree = 0;
}
bool IncrementalAABBPrunerCore::addObject(const PoolIndex poolIndex, PxU32 timeStamp)
{
CoreTree& tree = mAABBTree[mCurrentTree];
if(!tree.tree || !tree.tree->getNodes())
{
if(!tree.tree)
tree.tree = PX_NEW(IncrementalAABBTree)();
tree.timeStamp = timeStamp;
}
PX_ASSERT(tree.timeStamp == timeStamp);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->insert(poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// if some node leaves changed, we need to update mapping
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mapping[index] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
const PoolIndex index = changedNode->getPrimitives(NULL)[j];
mapping[index] = changedNode;
}
}
}
else
{
PX_ASSERT(node->isLeaf());
mapping[poolIndex] = node;
}
}
bool IncrementalAABBPrunerCore::removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp)
{
// erase the entry and get the data
IncrementalPrunerMap::Entry entry;
bool foundEntry = true;
const PxU32 treeIndex = mAABBTree[mLastTree].mapping.erase(poolIndex, entry) ? mLastTree : mCurrentTree;
// if it was not found in the last tree look at the current tree
if(treeIndex == mCurrentTree)
foundEntry = mAABBTree[mCurrentTree].mapping.erase(poolIndex, entry);
// exit somethings is wrong here, entry was not found here
// PT: removed assert to avoid crashing all UTs
// PX_ASSERT(foundEntry);
if(!foundEntry)
return false;
// tree must exist
PX_ASSERT(mAABBTree[treeIndex].tree);
CoreTree& tree = mAABBTree[treeIndex];
timeStamp = tree.timeStamp;
// remove the poolIndex from the tree, update the tree bounds immediatelly
IncrementalAABBTreeNode* node = tree.tree->remove(entry.second, poolIndex, mPool->getCurrentWorldBoxes());
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
tree.mapping[index] = node;
}
}
// nothing to swap, last object, early exit
if(poolIndex == poolRelocatedLastIndex)
{
#if PARANOIA_CHECKS
test();
#endif
return true;
}
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tree it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
if(foundEntry)
{
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex)
{
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tre it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
bool foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
// relocated index is not here
if(!foundEntry)
return;
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
bool IncrementalAABBPrunerCore::updateObject(const PoolIndex poolIndex)
{
const IncrementalPrunerMap::Entry* entry = mAABBTree[mLastTree].mapping.find(poolIndex);
const PxU32 treeIndex = entry ? mLastTree : mCurrentTree;
if(!entry)
entry = mAABBTree[mCurrentTree].mapping.find(poolIndex);
// we have not found it
PX_ASSERT(entry);
if(!entry)
return false;
CoreTree& tree = mAABBTree[treeIndex];
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->updateFast(entry->second, poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
if(!mChangedLeaves.empty() || node != entry->second)
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test(false);
#endif
return true;
}
PxU32 IncrementalAABBPrunerCore::removeMarkedObjects(PxU32 timeStamp)
{
// early exit is no tree exists
if(!mAABBTree[mLastTree].tree || !mAABBTree[mLastTree].tree->getNodes())
{
PX_ASSERT(mAABBTree[mLastTree].mapping.size() == 0);
PX_ASSERT(!mAABBTree[mCurrentTree].tree || mAABBTree[mCurrentTree].timeStamp != timeStamp);
return 0;
}
PX_UNUSED(timeStamp);
PX_ASSERT(timeStamp == mAABBTree[mLastTree].timeStamp);
// release the last tree
CoreTree& tree = mAABBTree[mLastTree];
PxU32 nbObjects = tree.mapping.size();
tree.mapping.clear();
tree.timeStamp = 0;
tree.tree->release();
return nbObjects;
}
bool IncrementalAABBPrunerCore::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
bool again = true;
OverlapCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
}
return again;
}
bool IncrementalAABBPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
RaycastCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
}
return again;
}
bool IncrementalAABBPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
RaycastCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
}
return again;
}
void IncrementalAABBPrunerCore::getGlobalBounds(PxBounds3& bounds) const
{
bounds.setEmpty();
// PT: TODO: optimize this
for(PxU32 i=0; i<NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes())
{
PxBounds3 tmp;
StoreBounds(tmp, tree.tree->getNodes()->mBVMin, tree.tree->getNodes()->mBVMax);
bounds.include(tmp);
}
}
}
void IncrementalAABBPrunerCore::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
mAABBTree[i].tree->shiftOrigin(shift);
}
}
}
void IncrementalAABBPrunerCore::visualize(PxRenderOutput& out, PxU32 color) const
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
visualizeTree(out, color, mAABBTree[i].tree);
// Render added objects not yet in the tree
//out << PxTransform(PxIdentity);
//out << PxU32(PxDebugColor::eARGB_WHITE);
}
}
void IncrementalAABBPrunerCore::test(bool hierarchyCheck)
{
PxU32 maxDepth[NUM_TREES] = { 0, 0 };
for(PxU32 i=0; i<NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
if(hierarchyCheck)
mAABBTree[i].tree->hierarchyCheck(mPool->getCurrentWorldBoxes());
for(IncrementalPrunerMap::Iterator iter = mAABBTree[i].mapping.getIterator(); !iter.done(); ++iter)
{
mAABBTree[i].tree->checkTreeLeaf(iter->second, iter->first);
const PxU32 depth = mAABBTree[i].tree->getTreeLeafDepth(iter->second);
if(depth > maxDepth[i])
maxDepth[i] = depth;
}
}
}
}

View File

@@ -0,0 +1,109 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_PRUNER_CORE_H
#define GU_INCREMENTAL_AABB_PRUNER_CORE_H
#include "GuPruner.h"
#include "GuIncrementalAABBTree.h"
#include "GuPruningPool.h"
#include "GuAABBTreeUpdateMap.h"
#include "foundation/PxHashMap.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
typedef PxHashMap<PoolIndex, IncrementalAABBTreeNode*> IncrementalPrunerMap;
struct CoreTree
{
PX_FORCE_INLINE CoreTree() : timeStamp(0), tree(NULL) {}
PxU32 timeStamp;
IncrementalAABBTree* tree;
IncrementalPrunerMap mapping;
};
class IncrementalAABBPrunerCore : public PxUserAllocated
{
public:
IncrementalAABBPrunerCore(const PruningPool* pool);
~IncrementalAABBPrunerCore();
void release();
bool addObject(const PoolIndex poolIndex, PxU32 timeStamp);
bool removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp);
// if we swap object from bucket pruner index with an index in the regular AABB pruner
void swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex);
bool updateObject(const PoolIndex poolIndex);
PxU32 removeMarkedObjects(PxU32 timeStamp);
bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
void getGlobalBounds(PxBounds3&) const;
void shiftOrigin(const PxVec3& shift);
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void timeStampChange()
{
// swap current and last tree
mLastTree = (mLastTree + 1) % 2;
mCurrentTree = (mCurrentTree + 1) % 2;
}
void build() {}
PX_FORCE_INLINE PxU32 getNbObjects() const { return mAABBTree[0].mapping.size() + mAABBTree[1].mapping.size(); }
private:
void updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
void test(bool hierarchyCheck = true);
private:
static const PxU32 NUM_TREES = 2;
PxU32 mCurrentTree;
PxU32 mLastTree;
CoreTree mAABBTree[NUM_TREES];
const PruningPool* mPool; // Pruning pool from AABB pruner
NodeList mChangedLeaves;
};
}}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,195 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_TREE_H
#define GU_INCREMENTAL_AABB_TREE_H
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxPool.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuAABBTree.h"
#include "GuPrunerTypedef.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct BVHNode;
class BVH;
#define INCR_NB_OBJECTS_PER_NODE 4
// tree indices, can change in runtime
struct AABBTreeIndices
{
PX_FORCE_INLINE AABBTreeIndices(PoolIndex index) : nbIndices(1)
{
indices[0] = index;
for(PxU32 i=1; i<INCR_NB_OBJECTS_PER_NODE; i++)
indices[i] = 0;
}
PxU32 nbIndices;
PoolIndex indices[INCR_NB_OBJECTS_PER_NODE];
};
// tree node, has parent information
class IncrementalAABBTreeNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE IncrementalAABBTreeNode() : mParent(NULL)
{
mChilds[0] = NULL;
mChilds[1] = NULL;
}
PX_FORCE_INLINE IncrementalAABBTreeNode(AABBTreeIndices* indices) : mParent(NULL)
{
mIndices = indices;
mChilds[1] = NULL;
}
PX_FORCE_INLINE ~IncrementalAABBTreeNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return PxU32(mChilds[1]==0); }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32*) const { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32*) { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mIndices->nbIndices; }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return PX_INVALID_U32; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getPos(const IncrementalAABBTreeNode*) const { return mChilds[0]; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNeg(const IncrementalAABBTreeNode*) const { return mChilds[1]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getPos(IncrementalAABBTreeNode*) { return mChilds[0]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getNeg(IncrementalAABBTreeNode*) { return mChilds[1]; }
// PT: TODO: these functions are duplicates from the regular AABB tree node
PX_FORCE_INLINE void getAABBCenterExtentsV(physx::aos::Vec3V* center, physx::aos::Vec3V* extents) const
{
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V((V4Scale(V4Sub(mBVMax, mBVMin), halfV)));
*center = Vec3V_From_Vec4V((V4Scale(V4Add(mBVMax, mBVMin), halfV)));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(physx::aos::Vec3V* center, physx::aos::Vec3V* extents) const
{
*extents = Vec3V_From_Vec4V((V4Sub(mBVMax, mBVMin)));
*center = Vec3V_From_Vec4V((V4Add(mBVMax, mBVMin)));
}
Vec4V mBVMin; // Global bounding-volume min enclosing all the node-related primitives
Vec4V mBVMax; // Global bounding-volume max enclosing all the node-related primitives
IncrementalAABBTreeNode* mParent; // node parent
union
{
IncrementalAABBTreeNode* mChilds[2]; // childs of node if not a leaf
AABBTreeIndices* mIndices; // if leaf, indices information
};
};
struct IncrementalAABBTreeNodePair
{
IncrementalAABBTreeNode mNode0;
IncrementalAABBTreeNode mNode1;
};
typedef PxArray<IncrementalAABBTreeNode*> NodeList;
// incremental AABB tree, all changes are immediatelly reflected to the tree
class IncrementalAABBTree : public PxUserAllocated
{
public:
PX_PHYSX_COMMON_API IncrementalAABBTree();
PX_PHYSX_COMMON_API ~IncrementalAABBTree();
// Build the tree for the first time
PX_PHYSX_COMMON_API bool build(const AABBTreeBuildParams& params, PxArray<IncrementalAABBTreeNode*>& mapping);
// insert a new index into the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* insert(const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree - full update insert/remove
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree, faster method, that may unbalance the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// remove object from the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds);
// fixup the tree indices, if we swapped the objects in the pruning pool
PX_PHYSX_COMMON_API void fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex);
// origin shift
PX_PHYSX_COMMON_API void shiftOrigin(const PxVec3& shift);
// get the tree root node
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNodes() const { return mRoot; }
// define this function so we can share the scene query code with regular AABBTree
PX_FORCE_INLINE const PxU32* getIndices() const { return NULL; }
// paranoia checks
PX_PHYSX_COMMON_API void hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds);
PX_PHYSX_COMMON_API void hierarchyCheck(const PxBounds3* bounds);
PX_PHYSX_COMMON_API void checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h);
PX_PHYSX_COMMON_API PxU32 getTreeLeafDepth(IncrementalAABBTreeNode* leaf);
PX_PHYSX_COMMON_API void release();
PX_PHYSX_COMMON_API void copy(const BVH& bvh, PxArray<IncrementalAABBTreeNode*>& mapping);
private:
// clone the tree from the generic AABB tree that was built
void clone(PxArray<IncrementalAABBTreeNode*>& mapping, const PxU32* indices, IncrementalAABBTreeNode** treeNodes);
void copyNode(IncrementalAABBTreeNode& destNode, const BVHNode& sourceNode, const BVHNode* nodeBase,
IncrementalAABBTreeNode* parent, const PxU32* primitivesBase, PxArray<IncrementalAABBTreeNode*>& mapping);
// split leaf node, the newly added object does not fit in
IncrementalAABBTreeNode* splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds);
void rotateTree(IncrementalAABBTreeNode* node, NodeList& changedLeaf, PxU32 largesRotateNode, const PxBounds3* bounds, bool rotateAgain);
void releaseNode(IncrementalAABBTreeNode* node);
PxPool<AABBTreeIndices> mIndicesPool;
PxPool<IncrementalAABBTreeNodePair> mNodesPool;
IncrementalAABBTreeNode* mRoot;
NodeAllocator mNodeAllocator;
};
}
}
#endif

View File

@@ -0,0 +1,124 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "geometry/PxCapsuleGeometry.h"
#include "foundation/PxIntrinsics.h"
#include "GuInternal.h"
#include "GuBox.h"
#include "GuVecPlane.h"
#include "foundation/PxVecMath.h"
using namespace physx;
using namespace aos;
/**
Computes the aabb points.
\param pts [out] 8 box points
*/
void Gu::computeBoxPoints(const PxBounds3& bounds, PxVec3* PX_RESTRICT pts)
{
PX_ASSERT(pts);
// Get box corners
const PxVec3& minimum = bounds.minimum;
const PxVec3& maximum = bounds.maximum;
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
// Generate 8 corners of the bbox
pts[0] = PxVec3(minimum.x, minimum.y, minimum.z);
pts[1] = PxVec3(maximum.x, minimum.y, minimum.z);
pts[2] = PxVec3(maximum.x, maximum.y, minimum.z);
pts[3] = PxVec3(minimum.x, maximum.y, minimum.z);
pts[4] = PxVec3(minimum.x, minimum.y, maximum.z);
pts[5] = PxVec3(maximum.x, minimum.y, maximum.z);
pts[6] = PxVec3(maximum.x, maximum.y, maximum.z);
pts[7] = PxVec3(minimum.x, maximum.y, maximum.z);
}
PxPlane Gu::getPlane(const PxTransform& pose)
{
const PxVec3 n = pose.q.getBasisVector0();
return PxPlane(n, -pose.p.dot(n));
}
void Gu::computeSweptBox(Gu::Box& dest, const PxVec3& extents, const PxVec3& center, const PxMat33& rot, const PxVec3& unitDir, PxReal distance)
{
PxVec3 R1, R2;
PxComputeBasisVectors(unitDir, R1, R2);
PxReal dd[3];
dd[0] = PxAbs(rot.column0.dot(unitDir));
dd[1] = PxAbs(rot.column1.dot(unitDir));
dd[2] = PxAbs(rot.column2.dot(unitDir));
PxReal dmax = dd[0];
PxU32 ax0=1;
PxU32 ax1=2;
if(dd[1]>dmax)
{
dmax=dd[1];
ax0=0;
ax1=2;
}
if(dd[2]>dmax)
{
dmax=dd[2];
ax0=0;
ax1=1;
}
if(dd[ax1]<dd[ax0])
PxSwap(ax0, ax1);
R1 = rot[ax0];
R1 -= (R1.dot(unitDir))*unitDir; // Project to plane whose normal is dir
R1.normalize();
R2 = unitDir.cross(R1);
dest.setAxes(unitDir, R1, R2);
PxReal offset[3];
offset[0] = distance;
offset[1] = distance*(unitDir.dot(R1));
offset[2] = distance*(unitDir.dot(R2));
for(PxU32 r=0; r<3; r++)
{
const PxVec3& R = dest.rot[r];
dest.extents[r] = offset[r]*0.5f + PxAbs(rot.column0.dot(R))*extents.x + PxAbs(rot.column1.dot(R))*extents.y + PxAbs(rot.column2.dot(R))*extents.z;
}
dest.center = center + unitDir*distance*0.5f;
}

View File

@@ -0,0 +1,314 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERNAL_H
#define GU_INTERNAL_H
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "GuCapsule.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxMat33.h"
#define GU_EPSILON_SAME_DISTANCE 1e-3f
namespace physx
{
class PxBounds3;
namespace Gu
{
class Box;
// PT: TODO: now that the Gu files are not exposed to users anymore, we should move back capsule-related functions
// to GuCapsule.h, etc
PX_PHYSX_COMMON_API const PxU8* getBoxEdges();
PX_PHYSX_COMMON_API void computeBoxPoints(const PxBounds3& bounds, PxVec3* PX_RESTRICT pts);
void computeBoxAroundCapsule(const Capsule& capsule, Box& box);
PxPlane getPlane(const PxTransform& pose);
PX_FORCE_INLINE PxVec3 getCapsuleHalfHeightVector(const PxTransform& transform, const PxCapsuleGeometry& capsuleGeom)
{
return transform.q.getBasisVector0() * capsuleGeom.halfHeight;
}
PX_FORCE_INLINE void getCapsuleSegment(const PxTransform& transform, const PxCapsuleGeometry& capsuleGeom, Gu::Segment& segment)
{
const PxVec3 tmp = getCapsuleHalfHeightVector(transform, capsuleGeom);
segment.p0 = transform.p + tmp;
segment.p1 = transform.p - tmp;
}
PX_FORCE_INLINE void getCapsule(Gu::Capsule& capsule, const PxCapsuleGeometry& capsuleGeom, const PxTransform& pose)
{
getCapsuleSegment(pose, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
}
void computeSweptBox(Gu::Box& box, const PxVec3& extents, const PxVec3& center, const PxMat33& rot, const PxVec3& unitDir, PxReal distance);
/**
* PT: computes "alignment value" used to select the "best" triangle in case of identical impact distances (for sweeps).
* This simply computes how much a triangle is aligned with a given sweep direction.
* Captured in a function to make sure it is always computed correctly, i.e. working for double-sided triangles.
*
* \param triNormal [in] triangle's normal
* \param unitDir [in] sweep direction (normalized)
* \return alignment value in [-1.0f, 0.0f]. -1.0f for fully aligned, 0.0f for fully orthogonal.
*/
PX_FORCE_INLINE PxReal computeAlignmentValue(const PxVec3& triNormal, const PxVec3& unitDir)
{
PX_ASSERT(triNormal.isNormalized());
// PT: initial dot product gives the angle between the two, with "best" triangles getting a +1 or -1 score
// depending on their winding. We take the absolute value to ignore the impact of winding. We negate the result
// to make the function compatible with the initial code, which assumed single-sided triangles and expected -1
// for best triangles.
return -PxAbs(triNormal.dot(unitDir));
}
/**
* PT: sweeps: determines if a newly touched triangle is "better" than best one so far.
* In this context "better" means either clearly smaller impact distance, or a similar impact
* distance but a normal more aligned with the sweep direction.
*
* \param triImpactDistance [in] new triangle's impact distance
* \param triAlignmentValue [in] new triangle's alignment value (as computed by computeAlignmentValue)
* \param bestImpactDistance [in] current best triangle's impact distance
* \param bestAlignmentValue [in] current best triangle's alignment value (as computed by computeAlignmentValue)
* \param maxDistance [in] maximum distance of the query, hit cannot be longer than this maxDistance
* \return true if new triangle is better
*/
PX_FORCE_INLINE bool keepTriangle( float triImpactDistance, float triAlignmentValue,
float bestImpactDistance, float bestAlignmentValue, float maxDistance)
{
// Reject triangle if further than the maxDistance
if(triImpactDistance > maxDistance)
return false;
// If initial overlap happens, keep the triangle
if(triImpactDistance == 0.0f)
return true;
// tris have "similar" impact distances if the difference is smaller than 2*distEpsilon
float distEpsilon = GU_EPSILON_SAME_DISTANCE; // pick a farther hit within distEpsilon that is more opposing than the previous closest hit
// PT: make it a relative epsilon to make sure it still works with large distances
distEpsilon *= PxMax(1.0f, PxMax(triImpactDistance, bestImpactDistance));
// If new distance is more than epsilon closer than old distance
if(triImpactDistance < bestImpactDistance - distEpsilon)
return true;
// If new distance is no more than epsilon farther than oldDistance and "face is more opposing than previous"
if(triImpactDistance < bestImpactDistance+distEpsilon && triAlignmentValue < bestAlignmentValue)
return true;
// If alignment value is the same, but the new triangle is closer than the best distance
if(triAlignmentValue == bestAlignmentValue && triImpactDistance < bestImpactDistance)
return true;
return false;
}
PX_FORCE_INLINE bool keepTriangleBasic(float triImpactDistance, float bestImpactDistance, float maxDistance)
{
// Reject triangle if further than the maxDistance
if(triImpactDistance > maxDistance)
return false;
// If initial overlap happens, keep the triangle
if(triImpactDistance == 0.0f)
return true;
// If new distance is more than epsilon closer than old distance
if(triImpactDistance < bestImpactDistance)
return true;
return false;
}
PX_FORCE_INLINE PxVec3 cross100(const PxVec3& b)
{
return PxVec3(0.0f, -b.z, b.y);
}
PX_FORCE_INLINE PxVec3 cross010(const PxVec3& b)
{
return PxVec3(b.z, 0.0f, -b.x);
}
PX_FORCE_INLINE PxVec3 cross001(const PxVec3& b)
{
return PxVec3(-b.y, b.x, 0.0f);
}
//! Compute point as combination of barycentric coordinates
PX_FORCE_INLINE PxVec3 computeBarycentricPoint(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2, PxReal u, PxReal v)
{
// This seems to confuse the compiler...
// return (1.0f - u - v)*p0 + u*p1 + v*p2;
const PxF32 w = 1.0f - u - v;
return PxVec3(w * p0.x + u * p1.x + v * p2.x, w * p0.y + u * p1.y + v * p2.y, w * p0.z + u * p1.z + v * p2.z);
}
PX_FORCE_INLINE PxReal computeTetrahedronVolume(const PxVec3& x0, const PxVec3& x1, const PxVec3& x2, const PxVec3& x3, PxMat33& edgeMatrix)
{
const PxVec3 u1 = x1 - x0;
const PxVec3 u2 = x2 - x0;
const PxVec3 u3 = x3 - x0;
edgeMatrix = PxMat33(u1, u2, u3);
const PxReal det = edgeMatrix.getDeterminant();
const PxReal volume = det / 6.0f;
return volume;
}
PX_FORCE_INLINE PxReal computeTetrahedronVolume(const PxVec3& x0, const PxVec3& x1, const PxVec3& x2, const PxVec3& x3)
{
PxMat33 edgeMatrix;
return computeTetrahedronVolume(x0, x1, x2, x3, edgeMatrix);
}
// IndexType should be PxU16 or PxU32.
template<typename IndexType>
PX_FORCE_INLINE PxReal computeTriangleMeshVolume(const PxVec3* vertices, const IndexType* indices,
const PxU32 numTriangles)
{
// See https://twitter.com/keenanisalive/status/1437178786286653445?lang=en
float volume = 0.0f;
for(PxU32 i = 0; i < numTriangles; ++i)
{
PxVec3 v0 = vertices[indices[3*i]];
PxVec3 v1 = vertices[indices[3 * i + 1]];
PxVec3 v2 = vertices[indices[3 * i + 2]];
PxVec3 v0v1 = v0.cross(v1);
volume += v0v1.dot(v2);
}
return volume / 6.0f;
}
// IndexType should be PxU16 or PxU32.
// W in PxVec4 of vertices are ignored.
template <typename IndexType>
PX_FORCE_INLINE PxReal computeTriangleMeshVolume(const PxVec4* vertices, const IndexType* indices,
const PxU32 numTriangles)
{
// See https://twitter.com/keenanisalive/status/1437178786286653445?lang=en
float volume = 0.0f;
for(PxU32 i = 0; i < numTriangles; ++i)
{
PxVec3 v0 = vertices[indices[3 * i]].getXYZ();
PxVec3 v1 = vertices[indices[3 * i + 1]].getXYZ();
PxVec3 v2 = vertices[indices[3 * i + 2]].getXYZ();
PxVec3 v0v1 = v0.cross(v1);
volume += v0v1.dot(v2);
}
return volume / 6.0f;
}
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(PxVec3& p0, PxVec3& p1, PxReal fatCoeff)
{
PxVec3 delta = p1 - p0;
const PxReal m = delta.magnitude();
if (m > 0.0f)
{
delta *= fatCoeff / m;
p0 -= delta;
p1 += delta;
}
}
#if 0
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(aos::Vec3V& p0, aos::Vec3V& p1, const aos::FloatVArg fatCoeff)
{
const aos::Vec3V delta = aos::V3Sub(p1, p0);
const aos::FloatV m = aos::V3Length(delta);
const aos::BoolV con = aos::FIsGrtr(m, aos::FZero());
const aos::Vec3V fatDelta = aos::V3Scale(aos::V3ScaleInv(delta, m), fatCoeff);
p0 = aos::V3Sel(con, aos::V3Sub(p0, fatDelta), p0);
p1 = aos::V3Sel(con, aos::V3Add(p1, fatDelta), p1);
}
#endif
PX_FORCE_INLINE PxU32 closestAxis(const PxVec3& v, PxU32& j, PxU32& k)
{
// find largest 2D plane projection
const PxF32 absPx = PxAbs(v.x);
const PxF32 absNy = PxAbs(v.y);
const PxF32 absNz = PxAbs(v.z);
PxU32 m = 0; // x biggest axis
j = 1;
k = 2;
if (absNy > absPx && absNy > absNz)
{
// y biggest
j = 2;
k = 0;
m = 1;
}
else if (absNz > absPx)
{
// z biggest
j = 0;
k = 1;
m = 2;
}
return m;
}
PX_FORCE_INLINE bool isAlmostZero(const PxVec3& v)
{
if (PxAbs(v.x) > 1e-6f || PxAbs(v.y) > 1e-6f || PxAbs(v.z) > 1e-6f)
return false;
return true;
}
} // namespace Gu
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,74 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MTD_H
#define GU_MTD_H
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "geometry/PxGeometry.h"
namespace physx
{
namespace Gu
{
// PT: we use a define to be able to quickly change the signature of all MTD functions.
// (this also ensures they all use consistent names for passed parameters).
// \param[out] mtd computed depenetration dir
// \param[out] depth computed depenetration depth
// \param[in] geom0 first geometry object
// \param[in] pose0 pose of first geometry object
// \param[in] geom1 second geometry object
// \param[in] pose1 pose of second geometry object
// \param[in] cache optional cached data for triggers
#define GU_MTD_FUNC_PARAMS PxVec3& mtd, PxF32& depth, \
const PxGeometry& geom0, const PxTransform32& pose0, \
const PxGeometry& geom1, const PxTransform32& pose1
// PT: function pointer for Geom-indexed MTD functions
// See GU_MTD_FUNC_PARAMS for function parameters details.
// \return true if an overlap was found, false otherwise
// \note depenetration vector D is equal to mtd * depth. It should be applied to the 1st object, to get out of the 2nd object.
typedef bool (*GeomMTDFunc) (GU_MTD_FUNC_PARAMS);
PX_FORCE_INLINE PxF32 manualNormalize(PxVec3& mtd, const PxVec3& normal, PxReal lenSq)
{
const PxF32 len = PxSqrt(lenSq);
// We do a *manual* normalization to check for singularity condition
if(lenSq < 1e-6f)
mtd = PxVec3(1.0f, 0.0f, 0.0f); // PT: zero normal => pick up random one
else
mtd = normal * 1.0f / len;
return len;
}
}
}
#endif

View File

@@ -0,0 +1,146 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuMaverickNode.h"
using namespace physx;
using namespace Gu;
const PxU32 MaverickNode::mIndices[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
bool MaverickNode::addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
if(mNbFree<FREE_PRUNER_SIZE)
{
const PxU32 index = mNbFree++;
mFreeObjects[index] = object;
mFreeHandles[index] = handle;
mFreeBounds[index] = worldAABB;
mFreeTransforms[index] = transform;
mFreeStamps[index] = timeStamp;
return true;
}
return false;
}
bool MaverickNode::updateObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
mFreeBounds[i] = worldAABB;
mFreeTransforms[i] = transform;
return true;
}
}
return false;
}
bool MaverickNode::updateObject(PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeHandles[i]==handle)
{
mFreeBounds[i] = worldAABB;
mFreeTransforms[i] = transform;
return true;
}
}
return false;
}
void MaverickNode::remove(PxU32 index)
{
mNbFree--;
if(index!=mNbFree)
{
mFreeBounds[index] = mFreeBounds[mNbFree];
mFreeTransforms[index] = mFreeTransforms[mNbFree];
mFreeObjects[index] = mFreeObjects[mNbFree];
mFreeHandles[index] = mFreeHandles[mNbFree];
mFreeStamps[index] = mFreeStamps[mNbFree];
}
}
bool MaverickNode::removeObject(const PrunerPayload& object, PxU32& timeStamp)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
remove(i);
return true;
}
}
return false;
}
bool MaverickNode::removeObject(PrunerHandle handle, PxU32& timeStamp)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeHandles[i]==handle)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
remove(i);
return true;
}
}
return false;
}
PxU32 MaverickNode::removeMarkedObjects(PxU32 timeStamp)
{
PxU32 nbRemoved=0;
PxU32 i=0;
while(i<mNbFree)
{
if(mFreeStamps[i]==timeStamp)
{
nbRemoved++;
remove(i);
}
else i++;
}
return nbRemoved;
}
void MaverickNode::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0;i<mNbFree;i++)
{
mFreeBounds[i].minimum -= shift;
mFreeBounds[i].maximum -= shift;
mFreeTransforms[i].p -= shift;
}
}

View File

@@ -0,0 +1,82 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MAVERICK_NODE_H
#define GU_MAVERICK_NODE_H
#include "foundation/PxBounds3.h"
#include "foundation/PxTransform.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerPayload.h"
#include "GuPrunerTypedef.h"
#define FREE_PRUNER_SIZE 16
#ifdef FREE_PRUNER_SIZE
namespace physx
{
namespace Gu
{
class MaverickNode
{
public:
MaverickNode() : mNbFree(0) {}
~MaverickNode() {}
PX_FORCE_INLINE void release() { mNbFree = 0; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32*) const { return mIndices; }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return 0; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbFree; }
bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp);
bool updateObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform);
bool updateObject(PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool removeObject(PrunerHandle handle, PxU32& timeStamp);
PxU32 removeMarkedObjects(PxU32 timeStamp);
void shiftOrigin(const PxVec3& shift);
void remove(PxU32 index);
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PrunerHandle mFreeHandles[FREE_PRUNER_SIZE]; // mNbFree handles are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxTransform mFreeTransforms[FREE_PRUNER_SIZE]; // mNbFree transforms are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
static const PxU32 mIndices[FREE_PRUNER_SIZE];
};
}
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,199 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MESH_FACTORY_H
#define GU_MESH_FACTORY_H
#include "foundation/PxIO.h"
#include "foundation/PxHashSet.h"
#include "foundation/PxUserAllocated.h"
#include "geometry/PxTriangleMesh.h"
#include "geometry/PxTetrahedronMesh.h"
#include "geometry/PxConvexMesh.h"
#include "geometry/PxHeightField.h"
#include "geometry/PxBVH.h"
#include "PxPhysXConfig.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
// PT: added for platforms that compile the onRefCountZero template immediately
#include "CmUtils.h"
#include "foundation/PxFoundation.h"
namespace physx
{
namespace Gu
{
class ConvexMesh;
class HeightField;
class TriangleMesh;
class TriangleMeshData;
class DeformableVolumeMesh;
class DeformableVolumeMeshData;
class TetrahedronMesh;
class TetrahedronMeshData;
class BVH;
struct ConvexHullInitData;
class BVHData;
class MeshFactoryListener
{
protected:
virtual ~MeshFactoryListener(){}
public:
virtual void onMeshFactoryBufferRelease(const PxBase* object, PxType type) = 0;
#if PX_SUPPORT_OMNI_PVD
virtual void onObjectAdd(const PxBase*) {}
virtual void onObjectRemove(const PxBase*) {}
#endif
};
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
class PX_PHYSX_COMMON_API MeshFactory : public PxUserAllocated
{
PX_NOCOPY(MeshFactory)
public:
MeshFactory();
protected:
virtual ~MeshFactory();
public:
void release();
// Triangle meshes
void addTriangleMesh(Gu::TriangleMesh* np, bool lock=true);
PxTriangleMesh* createTriangleMesh(PxInputStream& stream);
PxTriangleMesh* createTriangleMesh(void* triangleMeshData);
bool removeTriangleMesh(PxTriangleMesh&);
PxU32 getNbTriangleMeshes() const;
PxU32 getTriangleMeshes(PxTriangleMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Tetrahedron meshes
void addTetrahedronMesh(Gu::TetrahedronMesh* np, bool lock = true);
PxTetrahedronMesh* createTetrahedronMesh(PxInputStream& stream);
PxTetrahedronMesh* createTetrahedronMesh(void* tetrahedronMeshData);
bool removeTetrahedronMesh(PxTetrahedronMesh&);
PxU32 getNbTetrahedronMeshes() const;
PxU32 getTetrahedronMeshes(PxTetrahedronMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Deformable volume meshes
void addDeformableVolumeMesh(Gu::DeformableVolumeMesh* np, bool lock = true);
PxDeformableVolumeMesh* createDeformableVolumeMesh(PxInputStream& stream);
PxDeformableVolumeMesh* createDeformableVolumeMesh(void* tetrahedronMeshData);
bool removeDeformableVolumeMesh(PxDeformableVolumeMesh&);
PxU32 getNbDeformableVolumeMeshes() const;
PxU32 getDeformableVolumeMeshes(PxDeformableVolumeMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Convexes
void addConvexMesh(Gu::ConvexMesh* np, bool lock=true);
PxConvexMesh* createConvexMesh(PxInputStream&);
PxConvexMesh* createConvexMesh(void* convexMeshData);
bool removeConvexMesh(PxConvexMesh&);
PxU32 getNbConvexMeshes() const;
PxU32 getConvexMeshes(PxConvexMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Heightfields
void addHeightField(Gu::HeightField* np, bool lock=true);
PxHeightField* createHeightField(void* heightFieldMeshData);
PxHeightField* createHeightField(PxInputStream&);
bool removeHeightField(PxHeightField&);
PxU32 getNbHeightFields() const;
PxU32 getHeightFields(PxHeightField** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// BVH
void addBVH(Gu::BVH* np, bool lock=true);
PxBVH* createBVH(PxInputStream&);
PxBVH* createBVH(void* bvhData);
bool removeBVH(PxBVH&);
PxU32 getNbBVHs() const;
PxU32 getBVHs(PxBVH** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
void addFactoryListener(MeshFactoryListener& listener);
void removeFactoryListener(MeshFactoryListener& listener);
void notifyFactoryListener(const PxBase*, PxType typeID);
bool remove(PxBase&);
protected:
PxTriangleMesh* createTriangleMesh(Gu::TriangleMeshData& data);
PxTetrahedronMesh* createTetrahedronMesh(Gu::TetrahedronMeshData& data);
PxDeformableVolumeMesh* createDeformableVolumeMesh(Gu::DeformableVolumeMeshData& data);
PxConvexMesh* createConvexMesh(Gu::ConvexHullInitData& data);
PxBVH* createBVH(Gu::BVHData& data);
mutable PxMutex mTrackingMutex;
private:
PxCoalescedHashSet<Gu::TriangleMesh*> mTriangleMeshes;
PxCoalescedHashSet<Gu::TetrahedronMesh*> mTetrahedronMeshes;
PxCoalescedHashSet<Gu::DeformableVolumeMesh*> mDeformableVolumeMeshes;
PxCoalescedHashSet<Gu::ConvexMesh*> mConvexMeshes;
PxCoalescedHashSet<Gu::HeightField*> mHeightFields;
PxCoalescedHashSet<Gu::BVH*> mBVHs;
PxArray<MeshFactoryListener*> mFactoryListeners;
#if PX_SUPPORT_OMNI_PVD
protected:
void notifyListenersAdd(const PxBase*);
void notifyListenersRemove(const PxBase*);
#endif
};
#if PX_VC
#pragma warning(pop)
#endif
template<typename T>
PX_INLINE void onRefCountZero(T* object, Gu::MeshFactory* mf, bool cndt, const char* errorMsg)
{
if(mf)
{
if(cndt || mf->remove(*object))
{
const PxType type = object->getConcreteType();
Cm::deletePxBase(object);
mf->notifyFactoryListener(object, type);
return;
}
// PT: if we reach this point, we didn't find the mesh in the Physics object => don't delete!
// This prevents deleting the object twice.
PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, errorMsg);
}
else
Cm::deletePxBase(object);
}
}
}
#endif

View File

@@ -0,0 +1,904 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuOverlapTests.h"
#include "GuIntersectionBoxBox.h"
#include "GuIntersectionSphereBox.h"
#include "GuDistancePointSegment.h"
#include "GuDistanceSegmentBox.h"
#include "GuDistanceSegmentSegment.h"
#include "GuSphere.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHull.h"
#include "GuVecBox.h"
#include "GuConvexMesh.h"
#include "GuHillClimbing.h"
#include "GuGJK.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "CmMatrix34.h"
#include "geometry/PxConvexCoreGeometry.h"
#include "GuConvexGeometry.h"
#include "GuConvexSupport.h"
#include "GuRefGjkEpa.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
// PT: TODO: why don't we use ShapeData for overlaps?
//returns the maximal vertex in shape space
// PT: this function should be removed. We already have 2 different project hull functions in PxcShapeConvex & GuGJKObjectSupport, this one looks like a weird mix of both!
static PxVec3 projectHull_( const ConvexHullData& hull,
float& minimum, float& maximum,
const PxVec3& localDir, // expected to be normalized
const PxMat33& vert2ShapeSkew)
{
PX_ASSERT(localDir.isNormalized());
//use property that x|My == Mx|y for symmetric M to avoid having to transform vertices.
const PxVec3 vertexSpaceDir = vert2ShapeSkew * localDir;
const PxVec3* Verts = hull.getHullVertices();
const PxVec3* bestVert = NULL;
if(!hull.mBigConvexRawData) // Brute-force, local space. Experiments show break-even point is around 32 verts.
{
PxU32 NbVerts = hull.mNbHullVertices;
float min_ = PX_MAX_F32;
float max_ = -PX_MAX_F32;
while(NbVerts--)
{
const float dp = (*Verts).dot(vertexSpaceDir);
min_ = physx::intrinsics::selectMin(min_, dp);
if(dp > max_) { max_ = dp; bestVert = Verts; }
Verts++;
}
minimum = min_;
maximum = max_;
PX_ASSERT(bestVert != NULL);
return vert2ShapeSkew * *bestVert;
}
else //*/if(1) // This version is better for objects with a lot of vertices
{
const PxU32 Offset = ComputeCubemapNearestOffset(vertexSpaceDir, hull.mBigConvexRawData->mSubdiv);
PxU32 MinID = hull.mBigConvexRawData->mSamples[Offset];
PxU32 MaxID = hull.mBigConvexRawData->getSamples2()[Offset];
localSearch(MinID, -vertexSpaceDir, Verts, hull.mBigConvexRawData);
localSearch(MaxID, vertexSpaceDir, Verts, hull.mBigConvexRawData);
minimum = (Verts[MinID].dot(vertexSpaceDir));
maximum = (Verts[MaxID].dot(vertexSpaceDir));
PX_ASSERT(maximum >= minimum);
return vert2ShapeSkew * Verts[MaxID];
}
}
static bool intersectSphereConvex(const PxTransform& sphereTransform, float radius, const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hullData = &mesh.getHull();
const FloatV sphereRadius = FLoad(radius);
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const PxMatTransformV aToB(convexGlobalPose.transformInv(sphereTransform));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, meshScale.isIdentity());
const CapsuleV capsule(aToB.p, sphereRadius);
Vec3V contactA, contactB, normal;
FloatV dist;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
GjkStatus status = gjk(convexA, convexB, initialSearchDir, FZero(), contactA, contactB, normal, dist);
return status == GJK_CONTACT;
}
static bool intersectCapsuleConvex( const PxCapsuleGeometry& capsGeom, const PxTransform& capsGlobalPose,
const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hull = &mesh.getHull();
const FloatV capsuleHalfHeight = FLoad(capsGeom.halfHeight);
const FloatV capsuleRadius = FLoad(capsGeom.radius);
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const PxMatTransformV aToB(convexGlobalPose.transformInv(capsGlobalPose));
const ConvexHullV convexHull(hull, zeroV, vScale, vQuat, meshScale.isIdentity());
const CapsuleV capsule(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
Vec3V contactA, contactB, normal;
FloatV dist;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
GjkStatus status = gjk(convexA, convexB, initialSearchDir, FZero(), contactA, contactB, normal, dist);
return status == GJK_CONTACT;
}
static bool intersectBoxConvex(const PxBoxGeometry& boxGeom, const PxTransform& boxGlobalPose,
const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
// AP: see archived non-GJK version in //sw/physx/dev/pterdiman/graveyard/contactConvexBox.cpp
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hull = &mesh.getHull();
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const Vec3V boxExtents = V3LoadU(boxGeom.halfExtents);
const PxMatTransformV aToB(convexGlobalPose.transformInv(boxGlobalPose));
const ConvexHullV convexHull(hull, zeroV, vScale, vQuat, meshScale.isIdentity());
const BoxV box(zeroV, boxExtents);
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<BoxV> convexA(box, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
//PX_PRINTF("BOX status = %i, overlap = %i, PxVec3(%f, %f, %f)\n", status, overlap, boxGlobalPose.p.x, boxGlobalPose.p.y, boxGlobalPose.p.z);
return status == GJK_CONTACT;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxVec3* getCachedAxis(TriggerCache* cache)
{
if(cache && cache->state==TRIGGER_OVERLAP)
return &cache->dir;
else
return NULL;
}
static PX_FORCE_INLINE bool updateTriggerCache(bool overlap, TriggerCache* cache)
{
if(cache)
{
if(overlap)
cache->state = TRIGGER_OVERLAP;
else
cache->state = TRIGGER_DISJOINT;
}
return overlap;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Sphere-vs-shape
static bool GeomOverlapCallback_SphereSphere(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eSPHERE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom0 = static_cast<const PxSphereGeometry&>(geom0);
const PxSphereGeometry& sphereGeom1 = static_cast<const PxSphereGeometry&>(geom1);
const PxVec3 delta = pose1.p - pose0.p;
const PxReal r = sphereGeom0.radius + sphereGeom1.radius;
return delta.magnitudeSquared() <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SpherePlane(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::ePLANE);
PX_UNUSED(cache);
PX_UNUSED(geom1);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
return getPlane(pose1).distance(pose0.p) <= sphereGeom.radius; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SphereCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector = getCapsuleHalfHeightVector(pose1, capsuleGeom);
const PxReal r = sphereGeom.radius + capsuleGeom.radius;
return distancePointSegmentSquared(capsuleHalfHeightVector, -capsuleHalfHeightVector, pose0.p - pose1.p) <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SphereBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// PT: TODO: remove this useless conversion
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return intersectSphereBox(Sphere(pose0.p, sphereGeom.radius), obb);
}
static bool GeomOverlapCallback_SphereConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0,0,1.f);
const bool overlap = intersectSphereConvex(pose0, sphereGeom.radius,
*cm,
convexGeom.scale, pose1,
&cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Plane-vs-shape
static bool GeomOverlapCallback_PlaneCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: TODO: remove this useless conversion
Capsule capsule;
getCapsule(capsule, capsuleGeom, pose1);
const PxPlane plane = getPlane(pose0);
// We handle the capsule-plane collision with 2 sphere-plane collisions.
// Seems ok so far, since plane is infinite.
if(plane.distance(capsule.p0) <= capsule.radius) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
if(plane.distance(capsule.p1) <= capsule.radius) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
return false;
}
/*static bool intersectPlaneBox(const PxPlane& plane, const Box& box)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
for(PxU32 i=0;i<8;i++)
{
if(plane.distance(pts[i]) <= 0.0f) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
}
return false;
}*/
static bool GeomOverlapCallback_PlaneBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// I currently use the same code as for contact generation but maybe we could do something faster (in theory testing
// only 2 pts is enough).
const Matrix34FromTransform absPose(pose1);
const PxPlane worldPlane = getPlane(pose0);
for(int vx=-1; vx<=1; vx+=2)
for(int vy=-1; vy<=1; vy+=2)
for(int vz=-1; vz<=1; vz+=2)
{
const PxVec3 v = absPose.transform(PxVec3(PxReal(vx),PxReal(vy),PxReal(vz)).multiply(boxGeom.halfExtents));
if(worldPlane.distance(v) <= 0.0f) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
}
return false;
}
static bool GeomOverlapCallback_PlaneConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
//find plane normal in shape space of convex:
// PT:: tag: scalar transform*transform
const PxTransform plane2convex = pose1.getInverse().transform(pose0);
const PxPlane shapeSpacePlane = getPlane(plane2convex);
PxReal minimum, maximum;
projectHull_(cm->getHull(), minimum, maximum, shapeSpacePlane.n, toMat33(convexGeom.scale));
return (minimum <= -shapeSpacePlane.d);
}
static bool GeomOverlapCallback_PlaneConvexCore(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXCORE);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
const PxPlane plane = getPlane(pose0);
const PxConvexCoreGeometry& convexCore = static_cast<const PxConvexCoreGeometry&>(geom1);
Gu::ConvexShape shape; Gu::makeConvexShape(convexCore, pose1, shape);
PxVec3 closestPoint = shape.support(-plane.n);
PxReal closestDist = plane.distance(closestPoint);
return closestDist <= 0;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Capsule-vs-shape
static bool GeomOverlapCallback_CapsuleCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom0 = static_cast<const PxCapsuleGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom1 = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: move computation to local space for improved accuracy
const PxVec3 delta = pose1.p - pose0.p;
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector0 = getCapsuleHalfHeightVector(pose0, capsuleGeom0);
const PxVec3 capsuleHalfHeightVector1 = getCapsuleHalfHeightVector(pose1, capsuleGeom1);
const PxReal squareDist = distanceSegmentSegmentSquared(-capsuleHalfHeightVector0, capsuleHalfHeightVector0*2.0f,
delta-capsuleHalfHeightVector1, capsuleHalfHeightVector1*2.0f);
const PxReal r = capsuleGeom0.radius + capsuleGeom1.radius;
return squareDist <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_CapsuleBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// PT: move computation to local space for improved accuracy
const PxVec3 delta = pose1.p - pose0.p;
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector = getCapsuleHalfHeightVector(pose0, capsuleGeom);
// PT: TODO: remove this useless conversion
const PxMat33Padded obbRot(pose1.q);
// PT: objects are defined as closed, so we return 'true' in case of equality
return distanceSegmentBoxSquared(capsuleHalfHeightVector, -capsuleHalfHeightVector, delta, boxGeom.halfExtents, obbRot) <= capsuleGeom.radius*capsuleGeom.radius;
}
static bool GeomOverlapCallback_CapsuleConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0,0,1.0f);
const bool overlap = intersectCapsuleConvex(capsuleGeom, pose0, *cm, convexGeom.scale, pose1, &cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Box-vs-shape
static bool GeomOverlapCallback_BoxBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom0 = static_cast<const PxBoxGeometry&>(geom0);
const PxBoxGeometry& boxGeom1 = static_cast<const PxBoxGeometry&>(geom1);
// PT: TODO: remove this useless conversion
return intersectOBBOBB( boxGeom0.halfExtents, pose0.p, PxMat33Padded(pose0.q),
boxGeom1.halfExtents, pose1.p, PxMat33Padded(pose1.q), true);
}
static bool GeomOverlapCallback_BoxConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0.0f, 0.0f, 1.0f);
const bool overlap = intersectBoxConvex(boxGeom, pose0, *cm, convexGeom.scale, pose1, &cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convex-vs-shape
static bool GeomOverlapCallback_ConvexConvex(GU_OVERLAP_FUNC_PARAMS)
{
using namespace aos;
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const Vec3V zeroV = V3Zero();
const PxConvexMeshGeometry& convexGeom0 = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom1 = static_cast<const PxConvexMeshGeometry&>(geom1);
const ConvexMesh* cm0 = static_cast<ConvexMesh*>(convexGeom0.convexMesh);
const ConvexMesh* cm1 = static_cast<ConvexMesh*>(convexGeom1.convexMesh);
bool overlap;
{
const ConvexHullData* hullData0 = &cm0->getHull();
const ConvexHullData* hullData1 = &cm1->getHull();
const Vec3V vScale0 = V3LoadU_SafeReadW(convexGeom0.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat0 = QuatVLoadU(&convexGeom0.scale.rotation.x);
const Vec3V vScale1 = V3LoadU_SafeReadW(convexGeom1.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat1 = QuatVLoadU(&convexGeom1.scale.rotation.x);
const QuatV q0 = QuatVLoadU(&pose0.q.x);
const Vec3V p0 = V3LoadU(&pose0.p.x);
const QuatV q1 = QuatVLoadU(&pose1.q.x);
const Vec3V p1 = V3LoadU(&pose1.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
const PxMatTransformV aToB(transf1.transformInv(transf0));
const ConvexHullV convexHull0(hullData0, zeroV, vScale0, vQuat0, convexGeom0.scale.isIdentity());
const ConvexHullV convexHull1(hullData1, zeroV, vScale1, vQuat1, convexGeom1.scale.isIdentity());
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<ConvexHullV> convexA(convexHull0, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull1);
GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
overlap = (status == GJK_CONTACT);
}
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static bool GeomOverlapCallback_NotSupported(GU_OVERLAP_FUNC_PARAMS)
{
PX_ALWAYS_ASSERT_MESSAGE("NOT SUPPORTED");
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(pose0);
PX_UNUSED(pose1);
PX_UNUSED(geom0);
PX_UNUSED(geom1);
return false;
}
bool GeomOverlapCallback_SphereMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_CapsuleMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_BoxMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexCoreMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_MeshMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_SphereHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_CapsuleHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_BoxHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexHeightfield (GU_OVERLAP_FUNC_PARAMS);
static bool GeomOverlapCallback_CustomGeometry(GU_OVERLAP_FUNC_PARAMS)
{
PX_UNUSED(cache);
if(geom0.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom0).callbacks->overlap(geom0, pose0, geom1, pose1, threadContext);
if(geom1.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom1).callbacks->overlap(geom1, pose1, geom0, pose0, threadContext);
return false;
}
// VR: only support primitives and convexes so far. meshes will follow
static bool GeomOverlapCallback_ConvexCoreGeometry(GU_OVERLAP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(cache);
Gu::ConvexShape shape0, shape1;
Gu::makeConvexShape(geom0, pose0, shape0);
Gu::makeConvexShape(geom1, pose1, shape1);
if (!shape0.isValid() || !shape1.isValid())
return false;
PxVec3 point0, point1, axis;
PxReal dist = Gu::RefGjkEpa::computeGjkDistance(shape0, shape1, shape0.pose, shape1.pose, shape0.margin + shape1.margin, point0, point1, axis);
return dist <= shape0.margin + shape1.margin + FLT_EPSILON;
}
bool GeomOverlapCallback_ConvexCoreHeightfield(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXCORE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxConvexCoreGeometry& convexGeom = static_cast<const PxConvexCoreGeometry&>(geom0);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
struct ConvexCoreOverlapReport : Gu::OverlapReport
{
const Gu::ConvexShape& mConvex;
const PxTransform& mTransform;
const Gu::HeightFieldUtil mHfUtil;
const PxTransform& mHFPose;
PxIntBool mOverlap;
ConvexCoreOverlapReport(const PxHeightFieldGeometry& hfGeom_, const PxTransform& hfPose, const Gu::ConvexShape& convex, const PxTransform& transform)
: mConvex(convex), mTransform(transform), mHfUtil(hfGeom_), mHFPose(hfPose), mOverlap(PxIntFalse) {}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
while(nb--)
{
const PxU32 triangleIndex = *indices++;
PxTrianglePadded currentTriangle;
mHfUtil.getTriangle(mHFPose, currentTriangle, NULL, NULL, triangleIndex, false, false);
Gu::ConvexShape tri;
tri.coreType = Gu::ConvexCore::Type::ePOINTS;
tri.pose = PxTransform(PxIdentity);
Gu::ConvexCore::PointsCore& core = *reinterpret_cast<Gu::ConvexCore::PointsCore*>(tri.coreData);
core.points = currentTriangle.verts;
core.numPoints = 3;
core.stride = sizeof(PxVec3);
core.S = PxVec3(1);
core.R = PxQuat(PxIdentity);
tri.margin = 0.0f;
PxVec3 point0, point1, axis;
PxReal dist = Gu::RefGjkEpa::computeGjkDistance(mConvex, tri, mConvex.pose, tri.pose, mConvex.margin + tri.margin, point0, point1, axis);
if(dist <= mConvex.margin + tri.margin + FLT_EPSILON)
{
mOverlap = PxIntTrue;
return false;
}
}
return true;
}
};
Gu::ConvexShape convexShape;
Gu::makeConvexShape(convexGeom, pose0, convexShape);
if (!convexShape.isValid())
return false;
ConvexCoreOverlapReport report(hfGeom, pose1, convexShape, pose0);
PxBounds3 bounds = Gu::computeBounds(convexGeom, pose0);
report.mHfUtil.overlapAABBTriangles(bounds, report, 4);
return report.mOverlap!=PxIntFalse;
}
GeomOverlapTable gGeomOverlapMethodTable[] =
{
//PxGeometryType::eSPHERE
{
GeomOverlapCallback_SphereSphere, //PxGeometryType::eSPHERE
GeomOverlapCallback_SpherePlane, //PxGeometryType::ePLANE
GeomOverlapCallback_SphereCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_SphereBox, //PxGeometryType::eBOX
GeomOverlapCallback_ConvexCoreGeometry, //PxGeometryType::eCONVEXCORE
GeomOverlapCallback_SphereConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_SphereMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_SphereHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
GeomOverlapCallback_NotSupported, //PxGeometryType::ePLANE
GeomOverlapCallback_PlaneCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_PlaneBox, //PxGeometryType::eBOX
GeomOverlapCallback_PlaneConvexCore, //PxGeometryType::eCONVEXCORE
GeomOverlapCallback_PlaneConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
GeomOverlapCallback_CapsuleCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_CapsuleBox, //PxGeometryType::eBOX
GeomOverlapCallback_ConvexCoreGeometry, //PxGeometryType::eCONVEXCORE
GeomOverlapCallback_CapsuleConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_CapsuleMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_CapsuleHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
GeomOverlapCallback_BoxBox, //PxGeometryType::eBOX
GeomOverlapCallback_ConvexCoreGeometry, //PxGeometryType::eCONVEXCORE
GeomOverlapCallback_BoxConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_BoxMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_BoxHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXCORE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
GeomOverlapCallback_ConvexCoreGeometry, //PxGeometryType::eCONVEXCORE
GeomOverlapCallback_ConvexCoreGeometry, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_ConvexCoreMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_ConvexCoreHeightfield,//PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
GeomOverlapCallback_ConvexConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_ConvexMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
GeomOverlapCallback_ConvexHeightfield, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_MeshMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(gGeomOverlapMethodTable) / sizeof(gGeomOverlapMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
const GeomOverlapTable* Gu::getOverlapFuncTable()
{
return gGeomOverlapMethodTable;
}

View File

@@ -0,0 +1,265 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuPruningPool.h"
#include "foundation/PxMemory.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Gu;
PruningPool::PruningPool(PxU64 contextID, TransformCacheMode mode) :
mNbObjects (0),
mMaxNbObjects (0),
mObjects (NULL),
mTransforms (NULL),
mTransformCacheMode (mode),
mHandleToIndex (NULL),
mIndexToHandle (NULL),
mFirstRecycledHandle(INVALID_PRUNERHANDLE),
mContextID (contextID)
{
}
PruningPool::~PruningPool()
{
mWorldBoxes.release();
PX_FREE(mIndexToHandle);
PX_FREE(mHandleToIndex);
PX_FREE(mTransforms);
PX_FREE(mObjects);
}
bool PruningPool::resize(PxU32 newCapacity)
{
PX_PROFILE_ZONE("PruningPool::resize", mContextID);
const bool useTransforms = mTransformCacheMode!=TRANSFORM_CACHE_UNUSED;
PxTransform* newTransforms = useTransforms ? PX_ALLOCATE(PxTransform, newCapacity, "Pruner transforms") : NULL;
if(useTransforms && !newTransforms)
return false;
PrunerPayload* newData = PX_ALLOCATE(PrunerPayload, newCapacity, "PrunerPayload*");
PrunerHandle* newIndexToHandle = PX_ALLOCATE(PrunerHandle, newCapacity, "Pruner Index Mapping");
PoolIndex* newHandleToIndex = PX_ALLOCATE(PoolIndex, newCapacity, "Pruner Index Mapping");
if( (!newData) || (!newIndexToHandle) || (!newHandleToIndex))
{
PX_FREE(newHandleToIndex);
PX_FREE(newIndexToHandle);
PX_FREE(newTransforms);
PX_FREE(newData);
return false;
}
mWorldBoxes.resize(newCapacity, mNbObjects);
if(mObjects) PxMemCopy(newData, mObjects, mNbObjects*sizeof(PrunerPayload));
if(mTransforms) PxMemCopy(newTransforms, mTransforms, mNbObjects*sizeof(PxTransform));
if(mIndexToHandle) PxMemCopy(newIndexToHandle, mIndexToHandle, mNbObjects*sizeof(PrunerHandle));
if(mHandleToIndex) PxMemCopy(newHandleToIndex, mHandleToIndex, mMaxNbObjects*sizeof(PoolIndex)); // PT: why mMaxNbObjects here? on purpose?
mMaxNbObjects = newCapacity;
PX_FREE(mIndexToHandle);
PX_FREE(mHandleToIndex);
PX_FREE(mTransforms);
PX_FREE(mObjects);
mObjects = newData;
mTransforms = newTransforms;
mHandleToIndex = newHandleToIndex;
mIndexToHandle = newIndexToHandle;
return true;
}
void PruningPool::preallocate(PxU32 newCapacity)
{
if(newCapacity>mMaxNbObjects)
resize(newCapacity);
}
PxU32 PruningPool::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count)
{
PX_PROFILE_ZONE("PruningPool::addObjects", mContextID);
PX_ASSERT((!transforms && mTransformCacheMode==TRANSFORM_CACHE_UNUSED) || (transforms && mTransformCacheMode!=TRANSFORM_CACHE_UNUSED));
for(PxU32 i=0;i<count;i++)
{
if(mNbObjects==mMaxNbObjects) // increase the capacity on overflow
{
const PxU32 newCapacity = PxU32(float(mMaxNbObjects)*1.5f);
if(!resize(PxMax<PxU32>(newCapacity, 64)))
//if(!resize(PxMax<PxU32>(mMaxNbObjects*2, 64)))
{
// pool can return an invalid handle if memory alloc fails
// should probably have an error here or not handle this
results[i] = INVALID_PRUNERHANDLE; // PT: we need to write the potentially invalid handle to let users know which object failed first
return i;
}
}
PX_ASSERT(mNbObjects!=mMaxNbObjects);
const PoolIndex index = mNbObjects++;
// update mHandleToIndex and mIndexToHandle mappings
PrunerHandle handle;
if(mFirstRecycledHandle != INVALID_PRUNERHANDLE)
{
// mFirstRecycledHandle is an entry into a freelist for removed slots
// this path is only taken if we have any removed slots
handle = mFirstRecycledHandle;
mFirstRecycledHandle = mHandleToIndex[handle];
}
else
{
handle = index;
}
// PT: TODO: investigate why we added mIndexToHandle/mHandleToIndex. The initial design with 'Prunable' objects didn't need these arrays.
// PT: these arrays are "parallel"
mWorldBoxes.getBounds() [index] = bounds[i]; // store the payload/userData and AABB in parallel arrays
mObjects [index] = data[i];
mIndexToHandle [index] = handle;
if(transforms && mTransforms)
mTransforms [index] = transforms[i];
mHandleToIndex[handle] = index;
results[i] = handle;
}
return count;
}
PoolIndex PruningPool::removeObject(PrunerHandle h, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("PruningPool::removeObject", mContextID);
PX_ASSERT(mNbObjects);
// remove the object and its AABB by provided PrunerHandle and update mHandleToIndex and mIndexToHandle mappings
const PoolIndex indexOfRemovedObject = mHandleToIndex[h]; // retrieve object's index from handle
if(removalCallback)
removalCallback->invoke(1, &mObjects[indexOfRemovedObject]);
const PoolIndex indexOfLastObject = --mNbObjects; // swap the object at last index with index
if(indexOfLastObject!=indexOfRemovedObject)
{
// PT: move last object's data to recycled spot (from removed object)
// PT: the last object has moved so we need to handle the mappings for this object
// PT: TODO: investigate where this double-mapping comes from. It was not needed in the original design.
// PT: these arrays are "parallel"
PxBounds3* bounds = mWorldBoxes.getBounds();
const PrunerHandle handleOfLastObject = mIndexToHandle[indexOfLastObject];
bounds [indexOfRemovedObject] = bounds [indexOfLastObject];
mObjects [indexOfRemovedObject] = mObjects [indexOfLastObject];
if(mTransforms)
mTransforms [indexOfRemovedObject] = mTransforms [indexOfLastObject];
mIndexToHandle [indexOfRemovedObject] = handleOfLastObject;
mHandleToIndex[handleOfLastObject] = indexOfRemovedObject;
}
// mHandleToIndex also stores the freelist for removed handles (in place of holes formed by removed handles)
mHandleToIndex[h] = mFirstRecycledHandle; // update linked list of available recycled handles
mFirstRecycledHandle = h; // update the list head
return indexOfLastObject;
}
void PruningPool::shiftOrigin(const PxVec3& shift)
{
PX_PROFILE_ZONE("PruningPool::shiftOrigin", mContextID);
const PxU32 nb = mNbObjects;
PxBounds3* bounds = mWorldBoxes.getBounds();
for(PxU32 i=0; i<nb; i++)
{
bounds[i].minimum -= shift;
bounds[i].maximum -= shift;
}
if(mTransforms && mTransformCacheMode==TRANSFORM_CACHE_GLOBAL)
{
for(PxU32 i=0; i<nb; i++)
mTransforms[i].p -= shift;
}
}
template<const bool hasTransforms>
static void updateAndInflateBounds(PruningPool& pool, const PrunerHandle* PX_RESTRICT handles, const PxU32* PX_RESTRICT boundsIndices, const PxBounds3* PX_RESTRICT newBounds,
const PxTransform32* PX_RESTRICT newTransforms, PxU32 count, float epsilon)
{
PxBounds3* PX_RESTRICT bounds = pool.mWorldBoxes.getBounds();
PxTransform* PX_RESTRICT transforms = hasTransforms ? pool.mTransforms : NULL;
if(boundsIndices)
{
while(count--)
{
const PoolIndex poolIndex = pool.getIndex(*handles++);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
const PxU32 remappedIndex = *boundsIndices++;
if(hasTransforms)
transforms[poolIndex] = newTransforms[remappedIndex];
inflateBounds<true>(bounds[poolIndex], newBounds[remappedIndex], epsilon);
}
}
else
{
while(count--)
{
const PoolIndex poolIndex = pool.getIndex(*handles++);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
if(hasTransforms)
{
transforms[poolIndex] = *newTransforms;
newTransforms++;
}
inflateBounds<true>(bounds[poolIndex], *newBounds++, epsilon);
}
}
}
void PruningPool::updateAndInflateBounds(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* newBounds,
const PxTransform32* newTransforms, PxU32 count, float epsilon)
{
PX_PROFILE_ZONE("PruningPool::updateAndInflateBounds", mContextID);
if(mTransforms)
::updateAndInflateBounds<1>(*this, handles, boundsIndices, newBounds, newTransforms, count, epsilon);
else
::updateAndInflateBounds<0>(*this, handles, boundsIndices, newBounds, NULL, count, epsilon);
}

View File

@@ -0,0 +1,129 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_PRUNING_POOL_H
#define GU_PRUNING_POOL_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
#include "GuPrunerPayload.h"
#include "GuBounds.h"
#include "GuAABBTreeBounds.h"
namespace physx
{
namespace Gu
{
enum TransformCacheMode
{
TRANSFORM_CACHE_UNUSED,
TRANSFORM_CACHE_LOCAL,
TRANSFORM_CACHE_GLOBAL
};
// This class is designed to maintain a two way mapping between pair(PrunerPayload/userdata,AABB) and PrunerHandle
// Internally there's also an index for handles (AP: can be simplified?)
// This class effectively stores bounded pruner payloads/userdata, returns a PrunerHandle and allows O(1)
// access to them using a PrunerHandle
// Supported operations are add, remove, update bounds
class PX_PHYSX_COMMON_API PruningPool : public PxUserAllocated
{
PX_NOCOPY(PruningPool)
public:
PruningPool(PxU64 contextID, TransformCacheMode mode/*=TRANSFORM_CACHE_UNUSED*/);
~PruningPool();
PX_FORCE_INLINE const PrunerPayload& getPayloadData(PrunerHandle handle, PrunerPayloadData* data=NULL) const
{
const PoolIndex index = getIndex(handle);
if(data)
{
PxBounds3* wb = const_cast<PxBounds3*>(mWorldBoxes.getBounds());
data->mBounds = wb + index;
data->mTransform = mTransforms ? mTransforms + index : NULL;
}
return mObjects[index];
}
void shiftOrigin(const PxVec3& shift);
// PT: adds 'count' objects to the pool. Needs 'count' bounds and 'count' payloads passed as input. Writes out 'count' handles
// in 'results' array. Function returns number of successfully added objects, ideally 'count' but can be less in case we run
// out of memory.
PxU32 addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count);
// this function will swap the last object with the hole formed by removed PrunerHandle object
// and return the removed last object's index in the pool
PoolIndex removeObject(PrunerHandle h, PrunerPayloadRemovalCallback* removalCallback);
// Data access
PX_FORCE_INLINE PoolIndex getIndex(PrunerHandle h)const { return mHandleToIndex[h]; }
PX_FORCE_INLINE PrunerPayload* getObjects() const { return mObjects; }
PX_FORCE_INLINE const PxTransform* getTransforms() const { return mTransforms; }
PX_FORCE_INLINE PxTransform* getTransforms() { return mTransforms; }
PX_FORCE_INLINE bool setTransform(PrunerHandle handle, const PxTransform& transform)
{
if(!mTransforms)
return false;
mTransforms[getIndex(handle)] = transform;
return true;
}
PX_FORCE_INLINE PxU32 getNbActiveObjects() const { return mNbObjects; }
PX_FORCE_INLINE const PxBounds3* getCurrentWorldBoxes() const { return mWorldBoxes.getBounds(); }
PX_FORCE_INLINE PxBounds3* getCurrentWorldBoxes() { return mWorldBoxes.getBounds(); }
PX_FORCE_INLINE const AABBTreeBounds& getCurrentAABBTreeBounds() const { return mWorldBoxes; }
void updateAndInflateBounds(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms, PxU32 count, float epsilon);
void preallocate(PxU32 entries);
// protected:
PxU32 mNbObjects; //!< Current number of objects
PxU32 mMaxNbObjects; //!< Max. number of objects (capacity for mWorldBoxes, mObjects)
//!< these arrays are parallel
AABBTreeBounds mWorldBoxes; //!< List of world boxes, stores mNbObjects, capacity=mMaxNbObjects
PrunerPayload* mObjects; //!< List of objects, stores mNbObjects, capacity=mMaxNbObjects
PxTransform* mTransforms;
const TransformCacheMode mTransformCacheMode;
// private:
PoolIndex* mHandleToIndex; //!< Maps from PrunerHandle to internal index (payload/userData index in mObjects)
PrunerHandle* mIndexToHandle; //!< Inverse map from objectIndex to PrunerHandle
// this is the head of a list of holes formed in mHandleToIndex by removed handles
// the rest of the list is stored in holes in mHandleToIndex (in place)
PrunerHandle mFirstRecycledHandle;
PxU64 mContextID;
bool resize(PxU32 newCapacity);
};
}
}
#endif

View File

@@ -0,0 +1,76 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_QUERY_H
#define GU_QUERY_H
#include "GuBounds.h"
#include "GuBVHTestsSIMD.h"
namespace physx
{
namespace Gu
{
// PT: TODO: the various V3LoadUs in the base tests like SphereAABBTest could be avoided
// PT: TODO: check inflation is consistent in all of these. Looks like it's not.
struct DefaultOBBAABBTest : OBBAABBTest
{
PX_FORCE_INLINE DefaultOBBAABBTest(const ShapeData& queryVolume) :
OBBAABBTest(queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerWorldRot33(),
queryVolume.getPrunerBoxGeomExtentsInflated()) {}
};
struct DefaultAABBAABBTest : AABBAABBTest
{
PX_FORCE_INLINE DefaultAABBAABBTest(const ShapeData& queryVolume) :
AABBAABBTest(queryVolume.getPrunerInflatedWorldAABB()) {}
};
struct DefaultSphereAABBTest : SphereAABBTest
{
PX_FORCE_INLINE DefaultSphereAABBTest(const ShapeData& queryVolume) :
SphereAABBTest( queryVolume.getGuSphere().center,
queryVolume.getGuSphere().radius) {}
};
struct DefaultCapsuleAABBTest : CapsuleAABBTest
{
PX_FORCE_INLINE DefaultCapsuleAABBTest(const ShapeData& queryVolume, float inflation) :
CapsuleAABBTest(queryVolume.getGuCapsule().p1,
queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f,
PxVec3(queryVolume.getGuCapsule().radius*inflation)) {}
};
}
}
#endif

View File

@@ -0,0 +1,829 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuQuerySystem.h"
#include "GuBounds.h"
#include "GuBVH.h"
#include "foundation/PxAlloca.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
bool contains(PxArray<PxU32>& pruners, PxU32 index)
{
const PxU32 nb = pruners.size();
for(PxU32 i=0;i<nb;i++)
{
if(pruners[i]==index)
return true;
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
QuerySystem::PrunerExt::PrunerExt(Pruner* pruner, PxU32 preallocated) : mPruner(pruner), mDirtyList("QuerySystem::PrunerExt::mDirtyList"), mNbStatic(0), mNbDynamic(0), mDirtyStatic(false)
{
if(pruner&& preallocated)
pruner->preallocate(preallocated);
}
QuerySystem::PrunerExt::~PrunerExt()
{
PX_DELETE(mPruner);
}
void QuerySystem::PrunerExt::flushMemory()
{
if(!mDirtyList.size())
mDirtyList.reset();
// PT: TODO: flush bitmap here
// PT: TODO: flush pruner here?
}
// PT: ok things became more complicated than before here. We'd like to delay the update of *both* the transform and the bounds,
// since immediately updating only one of them doesn't make much sense (it invalidates the pruner's data structure anyway). When both
// are delayed it gives users the ability to query the pruners *without* commiting the changes, i.e. they can query the old snapshot
// for as long as they please (i.e. a raycast wouldn't automatically trigger a structure update).
//
// Now the problem is that we need to store (at least) the transform until the update actually happens, and the initial code didn't
// support this. We also want to do this in an efficient way, which of course makes things more difficult.
//
// A naive version would simply use a per-pruner hashmap between the PrunerHandle and its data. Might be slower than before.
//
// Another version could build on the initial bitmap-based solution and use arrays of transforms/bounds as companions to the array
// of PrunerHandle (or we could mix all that data in a single structure). The issue with this is that two consecutive updates on the
// same object wouldn't work anymore: the second call would check the bitmap, see that the bit is set already, and skip the work.
// We'd need to update the cached data instead, i.e. we'd need a mapping between the PrunerHandle and its position in mDirtyList.
// And we don't have that.
//
// A potential way to fix this could be to allow the same PrunerHandle to appear multiple times in mDirtyList, with the assumption
// that users will not update the same object multiple times very often (...). The way it would work:
// - during "add", dirtyMap is set, handle/transform/bounds are pushed to mDirtyList.
// - during "remove", dirtyMap is reset *and that's it*. We don't bother purging mDirtyList (i.e. we kill the current O(n) search there)
// - during "process" we use dirtyMap to validate the update. If bit is cleared, ignore mDirtyList entry. Duplicate entries work as long
// as mDirtyList is processed in linear order. One issue is that the current mDirtyList is also passed to the pruner as-is for the
// update, so we'd need to rebuild a separate array for that and/or make sure all pruners accept duplicate entries in that array.
// Deep down that specific rabbit hole we'll actually find the recently discovered issue regarding the mToRefit array...
//
// Bit tricky. This is only for user-updates anyway (as opposed to sim updates) so this probably doesn't need ultimate perf? Note however
// that we "remove from dirty list" when an object is removed, which happens all the time with or without user updates (e.g. streaming etc).
static const bool gUseOldCode = false;
void QuerySystem::PrunerExt::addToDirtyList(PrunerHandle handle, PxU32 dynamic, const PxTransform& transform, const PxBounds3* userBounds)
{
PxBitMap& dirtyMap = mDirtyMap;
{
if(dirtyMap.size() <= handle)
{
PxU32 size = PxMax<PxU32>(dirtyMap.size()*2, 1024);
const PxU32 minSize = handle+1;
if(minSize>size)
size = minSize*2;
dirtyMap.resize(size);
PX_ASSERT(handle<dirtyMap.size());
PX_ASSERT(!dirtyMap.test(handle));
}
}
if(gUseOldCode)
{
if(!dirtyMap.test(handle))
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
}
}
else
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
Data& d = mDirtyData.insert();
d.mPose = transform;
if(userBounds)
d.mBounds = *userBounds;
else
d.mBounds.setEmpty();
}
if(!dynamic)
mDirtyStatic = true;
}
void QuerySystem::PrunerExt::removeFromDirtyList(PrunerHandle handle)
{
PxBitMap& dirtyMap = mDirtyMap;
if(gUseOldCode)
{
if(dirtyMap.boundedTest(handle))
{
dirtyMap.reset(handle);
mDirtyList.findAndReplaceWithLast(handle);
}
}
else
{
dirtyMap.boundedReset(handle);
}
// PT: if we remove the object that made us set mDirtyStatic to true, tough luck,
// we don't bother fixing that bool here. It's going to potentially cause an
// unnecessary update of the character controller's caches, which is not a big deal.
}
bool QuerySystem::PrunerExt::processDirtyList(const Adapter& adapter, float inflation)
{
const PxU32 numDirtyList = mDirtyList.size();
if(!numDirtyList)
return false;
if(gUseOldCode)
{
const PrunerHandle* const prunerHandles = mDirtyList.begin();
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
mDirtyMap.reset(handle);
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PrunerPayloadData payloadData;
const PrunerPayload& pp = mPruner->getPayloadData(handle, &payloadData);
computeBounds(*payloadData.mBounds, adapter.getGeometry(pp), *payloadData.mTransform, 0.0f, inflation);
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjects(prunerHandles, numDirtyList);
mDirtyList.clear();
}
else
{
// PT: TODO: this stuff is not 100% satisfying, since we do allow the same object to be updated multiple times.
// Would be nice to revisit & improve at some point.
PrunerHandle* prunerHandles = mDirtyList.begin();
PxU32 nbValid = 0;
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
if(mDirtyMap.test(handle))
{
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PrunerPayloadData payloadData;
const PrunerPayload& pp = mPruner->getPayloadData(handle, &payloadData);
*payloadData.mTransform = mDirtyData[i].mPose;
if(mDirtyData[i].mBounds.isEmpty())
computeBounds(*payloadData.mBounds, adapter.getGeometry(pp), mDirtyData[i].mPose, 0.0f, inflation);
else
*payloadData.mBounds = mDirtyData[i].mBounds;
prunerHandles[nbValid++] = handle;
}
else
{
// PT: if not set, object has been added to the list then removed
}
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjects(prunerHandles, nbValid);
// PT: have to reset the bits *after* the above loop now. Unclear if clearing the
// whole map would be faster ("it depends" I guess).
while(nbValid--)
{
const PrunerHandle handle = *prunerHandles++;
mDirtyMap.reset(handle);
}
mDirtyList.clear();
mDirtyData.clear();
}
const bool ret = mDirtyStatic;
mDirtyStatic = false;
return ret;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE QuerySystem::PrunerExt* checkPrunerIndex(PxU32 prunerIndex, const PxArray<QuerySystem::PrunerExt*>& prunerExt)
{
if(prunerIndex>=prunerExt.size() || !prunerExt[prunerIndex])
{
PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Invalid pruner index");
return NULL;
}
return prunerExt[prunerIndex];
}
QuerySystem::QuerySystem(PxU64 contextID, float inflation, const Adapter& adapter, bool usesTreeOfPruners) :
mAdapter (adapter),
mTreeOfPruners (NULL),
mContextID (contextID),
mStaticTimestamp (0),
mInflation (inflation),
mPrunerNeedsUpdating (false),
mTimestampNeedsUpdating (false),
mUsesTreeOfPruners (usesTreeOfPruners)
//mBatchUserUpdates (batchUserUpdates)
{
}
QuerySystem::~QuerySystem()
{
PX_DELETE(mTreeOfPruners);
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
PX_DELETE(pe);
}
}
PxU32 QuerySystem::addPruner(Pruner* pruner, PxU32 preallocated)
{
PrunerExt* pe = PX_NEW(PrunerExt)(pruner, preallocated);
PxU32 prunerIndex;
if(mFreePruners.size())
{
prunerIndex = mFreePruners.popBack();
mPrunerExt[prunerIndex] = pe;
}
else
{
prunerIndex = mPrunerExt.size();
mPrunerExt.pushBack(pe);
}
return prunerIndex;
}
void QuerySystem::removePruner(PxU32 prunerIndex)
{
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
// PT: it is legal to delete a pruner that still contains objects, but we should still properly update the static timestamp.
if(pe->mNbStatic)
invalidateStaticTimestamp();
PX_DELETE(pe);
mPrunerExt[prunerIndex] = NULL;
mFreePruners.pushBack(prunerIndex);
// We don't bother searching mDirtyPruners since it's going to be cleared next frame
}
void QuerySystem::flushMemory()
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(pe)
pe->flushMemory();
}
}
ActorShapeData QuerySystem::addPrunerShape(const PrunerPayload& payload, PxU32 prunerIndex, bool dynamic, const PxTransform& transform, const PxBounds3* userBounds)
{
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return INVALID_ACTOR_SHAPE_DATA;
mPrunerNeedsUpdating = true;
if(dynamic)
{
pe->mNbDynamic++;
}
else
{
pe->mNbStatic++;
invalidateStaticTimestamp();
}
PX_ASSERT(pe->mPruner);
const PxBounds3* boundsPtr;
PxBounds3 bounds;
if(userBounds)
{
boundsPtr = userBounds;
}
else
{
computeBounds(bounds, mAdapter.getGeometry(payload), transform, 0.0f, 1.0f + mInflation);
boundsPtr = &bounds;
}
PrunerHandle handle;
pe->mPruner->addObjects(&handle, boundsPtr, &payload, &transform, 1, false);
return createActorShapeData(createPrunerInfo(prunerIndex, dynamic), handle);
}
void QuerySystem::removePrunerShape(ActorShapeData data, PrunerPayloadRemovalCallback* removalCallback)
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
mPrunerNeedsUpdating = true;
const PxU32 dynamic = getDynamic(info);
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(pe->mPruner);
if(dynamic)
{
PX_ASSERT(pe->mNbDynamic);
pe->mNbDynamic--;
}
else
{
PX_ASSERT(pe->mNbStatic);
pe->mNbStatic--;
invalidateStaticTimestamp();
}
//if(mBatchUserUpdates)
pe->removeFromDirtyList(handle);
pe->mPruner->removeObjects(&handle, 1, removalCallback);
}
void QuerySystem::updatePrunerShape(ActorShapeData data, bool immediately, const PxTransform& transform, const PxBounds3* userBounds)
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
mPrunerNeedsUpdating = true;
const PxU32 dynamic = getDynamic(info);
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(pe->mPruner);
Pruner* pruner = pe->mPruner;
if(immediately)
{
if(!dynamic)
invalidateStaticTimestamp();
PrunerPayloadData payloadData;
const PrunerPayload& pp = pruner->getPayloadData(handle, &payloadData);
*payloadData.mTransform = transform;
if(userBounds)
*payloadData.mBounds = *userBounds;
else
computeBounds(*payloadData.mBounds, mAdapter.getGeometry(pp), transform, 0.0f, 1.0f + mInflation);
// PT: TODO: would it be better to pass the bounds & transform directly to this function?
pruner->updateObjects(&handle, 1);
}
else
{
// PT: we don't update the static timestamp immediately, so that users can query the
// old state of the structure without invalidating their caches. This will be resolved
// in processDirtyLists.
if(gUseOldCode)
pruner->setTransform(handle, transform);
// PT: we don't shrink mDirtyList anymore in removePrunerShape so the size of that array can be reused as
// a flag telling us whether we already encountered this pruner or not. If not, we add its index to mDirtyPruners.
// Goal is to avoid processing all pruners in processDirtyLists.
if(!pe->mDirtyList.size())
{
PX_ASSERT(!contains(mDirtyPruners, prunerIndex));
mDirtyPruners.pushBack(prunerIndex);
}
else
{
PX_ASSERT(contains(mDirtyPruners, prunerIndex));
}
pe->addToDirtyList(handle, dynamic, transform, userBounds);
}
}
const PrunerPayload& QuerySystem::getPayloadData(ActorShapeData data, PrunerPayloadData* ppd) const
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PX_ASSERT(checkPrunerIndex(prunerIndex, mPrunerExt));
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(mPrunerExt[prunerIndex]->mPruner);
return mPrunerExt[prunerIndex]->mPruner->getPayloadData(handle, ppd);
}
void QuerySystem::processDirtyLists()
{
PX_PROFILE_ZONE("QuerySystem.processDirtyLists", mContextID);
const PxU32 nbDirtyPruners = mDirtyPruners.size();
if(!nbDirtyPruners)
return;
// must already have acquired writer lock here
const float inflation = 1.0f + mInflation;
bool mustInvalidateStaticTimestamp = false;
for(PxU32 ii=0;ii<nbDirtyPruners;ii++)
{
const PxU32 i = mDirtyPruners[ii];
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(pe && pe->processDirtyList(mAdapter, inflation))
mustInvalidateStaticTimestamp = true;
}
if(mustInvalidateStaticTimestamp)
invalidateStaticTimestamp();
mDirtyPruners.clear();
}
void QuerySystem::update(bool buildStep, bool commit)
{
PX_PROFILE_ZONE("QuerySystem::update", mContextID);
if(!buildStep && !commit)
{
//mPrunerNeedsUpdating = true; // PT: removed, why was it here?
return;
}
// flush user modified objects
// if(mBatchUserUpdates)
processDirtyLists();
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
Pruner* pruner = pe->mPruner;
if(pruner)
{
if(buildStep && pruner->isDynamic())
static_cast<DynamicPruner*>(pruner)->buildStep(true);
if(commit)
pruner->commit();
}
}
if(commit)
{
if(mUsesTreeOfPruners)
createTreeOfPruners();
}
mPrunerNeedsUpdating = !commit;
}
void QuerySystem::commitUpdates()
{
PX_PROFILE_ZONE("QuerySystem.commitUpdates", mContextID);
if(mPrunerNeedsUpdating)
{
mSQLock.lock();
if(mPrunerNeedsUpdating)
{
//if(mBatchUserUpdates)
processDirtyLists();
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->commit();
}
if(mUsesTreeOfPruners)
createTreeOfPruners();
PxMemoryBarrier();
mPrunerNeedsUpdating = false;
}
mSQLock.unlock();
}
}
PxU32 QuerySystem::startCustomBuildstep()
{
PX_PROFILE_ZONE("QuerySystem.startCustomBuildstep", mContextID);
mTimestampNeedsUpdating = false;
return mPrunerExt.size();
}
void QuerySystem::customBuildstep(PxU32 index)
{
PX_PROFILE_ZONE("QuerySystem.customBuildstep", mContextID);
PX_ASSERT(index<mPrunerExt.size());
// PT: TODO: would be better to not schedule the update of removed pruners at all
PrunerExt* pe = mPrunerExt[index]; // Can be NULL if the pruner has been removed
if(!pe)
return;
Pruner* pruner = pe->mPruner;
//void QuerySystem::processDirtyLists()
{
PX_PROFILE_ZONE("QuerySystem.processDirtyLists", mContextID);
// must already have acquired writer lock here
const float inflation = 1.0f + mInflation;
// PT: note that we don't use the mDirtyPruners array here
if(pe->processDirtyList(mAdapter, inflation))
mTimestampNeedsUpdating = true;
}
if(pruner)
{
if(pruner->isDynamic())
static_cast<DynamicPruner*>(pruner)->buildStep(true); // PT: "true" because that parameter was made for PxSceneQuerySystem::sceneQueryBuildStep(), not us
pruner->commit();
}
}
void QuerySystem::finishCustomBuildstep()
{
PX_PROFILE_ZONE("QuerySystem.finishCustomBuildstep", mContextID);
if(mUsesTreeOfPruners)
createTreeOfPruners();
mPrunerNeedsUpdating = false;
if(mTimestampNeedsUpdating)
invalidateStaticTimestamp();
mDirtyPruners.clear();
}
void QuerySystem::sync(PxU32 prunerIndex, const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count)
{
if(!count)
return;
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->updateObjects(handles, count, mInflation, boundsIndices, bounds, transforms);
}
///////////////////////////////////////////////////////////////////////////////
namespace
{
struct LocalRaycastCB : PxBVH::RaycastCallback
{
LocalRaycastCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const PxVec3& origin, const PxVec3& unitDir, PrunerRaycastCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mOrigin(origin), mUnitDir(unitDir), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->raycast(mOrigin, mUnitDir, distance, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
PrunerRaycastCallback& mCB;
PX_NOCOPY(LocalRaycastCB)
};
struct LocalOverlapCB : PxBVH::OverlapCallback
{
LocalOverlapCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const ShapeData& queryVolume, PrunerOverlapCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mQueryVolume(queryVolume), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->overlap(mQueryVolume, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const ShapeData& mQueryVolume;
PrunerOverlapCallback& mCB;
PX_NOCOPY(LocalOverlapCB)
};
struct LocalSweepCB : PxBVH::RaycastCallback
{
LocalSweepCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const ShapeData& queryVolume, const PxVec3& unitDir, PrunerRaycastCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mQueryVolume(queryVolume), mUnitDir(unitDir), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->sweep(mQueryVolume, mUnitDir, distance, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const ShapeData& mQueryVolume;
const PxVec3& mUnitDir;
PrunerRaycastCallback& mCB;
PX_NOCOPY(LocalSweepCB)
};
}
void QuerySystem::raycast(const PxVec3& origin, const PxVec3& unitDir, float& inOutDistance, PrunerRaycastCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalRaycastCB localCB(mPrunerExt, prunerFilter, origin, unitDir, cb);
mTreeOfPruners->raycast(origin, unitDir, inOutDistance, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->raycast(origin, unitDir, inOutDistance, cb))
return;
}
}
}
}
void QuerySystem::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalOverlapCB localCB(mPrunerExt, prunerFilter, queryVolume, cb);
mTreeOfPruners->overlap(queryVolume, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->overlap(queryVolume, cb))
return;
}
}
}
}
void QuerySystem::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, float& inOutDistance, PrunerRaycastCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalSweepCB localCB(mPrunerExt, prunerFilter, queryVolume, unitDir, cb);
mTreeOfPruners->sweep(queryVolume, unitDir, inOutDistance, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->sweep(queryVolume, unitDir, inOutDistance, cb))
return;
}
}
}
}
void QuerySystem::createTreeOfPruners()
{
PX_PROFILE_ZONE("QuerySystem.createTreeOfPruners", mContextID);
PX_DELETE(mTreeOfPruners);
mTreeOfPruners = PX_NEW(BVH)(NULL);
const PxU32 nb = mPrunerExt.size();
PxBounds3* prunerBounds = reinterpret_cast<PxBounds3*>(PxAlloca(sizeof(PxBounds3)*(nb+1)));
PxU32 nbBounds = 0;
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i];
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->getGlobalBounds(prunerBounds[nbBounds++]);
}
mTreeOfPruners->init(nbBounds, NULL, prunerBounds, sizeof(PxBounds3), BVH_SPLATTER_POINTS, 1, 0.01f);
}

View File

@@ -0,0 +1,667 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "geometry/PxConvexCoreGeometry.h"
#include "geometry/PxGjkQuery.h"
#include "GuMidphaseInterface.h"
#include "GuInternal.h"
#include "GuIntersectionRayCapsule.h"
#include "GuIntersectionRaySphere.h"
#include "GuIntersectionRayPlane.h"
#include "GuHeightFieldUtil.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "CmScaling.h"
#include "GuConvexGeometry.h"
#include "GuConvexSupport.h"
#include "GuBounds.h"
using namespace physx;
using namespace Gu;
////////////////////////////////////////////////// raycasts //////////////////////////////////////////////////////////////////
PxU32 raycast_box(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const PxTransform& absPose = pose;
PxVec3 localOrigin = rayOrigin - absPose.p;
localOrigin = absPose.q.rotateInv(localOrigin);
const PxVec3 localDir = absPose.q.rotateInv(rayDir);
PxVec3 localImpact;
PxReal t;
PxU32 rval = rayAABBIntersect2(-boxGeom.halfExtents, boxGeom.halfExtents, localOrigin, localDir, localImpact, t);
if(!rval)
return 0;
if(t>maxDist)
return 0;
hits->distance = t; //worldRay.orig.distance(hit.worldImpact); //should be the same, assuming ray dir was normalized!!
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
PxHitFlags outFlags = PxHitFlags(0);
if((hitFlags & PxHitFlag::ePOSITION))
{
outFlags |= PxHitFlag::ePOSITION;
if(t!=0.0f)
hits->position = absPose.transform(localImpact);
else
hits->position = rayOrigin;
}
// Compute additional information if needed
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
//Because rayAABBIntersect2 set t = 0 if start point inside shape
if(t == 0)
{
hits->normal = -rayDir;
}
else
{
//local space normal is:
rval--;
PxVec3 n(0.0f);
n[rval] = PxReal((localImpact[rval] > 0.0f) ? 1.0f : -1.0f);
hits->normal = absPose.q.rotate(n);
}
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_sphere(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
if(!intersectRaySphere(rayOrigin, rayDir, maxDist, pose.p, sphereGeom.radius, hits->distance, &hits->position))
return 0;
/* // PT: should be useless now
hit.distance = worldRay.orig.distance(hit.worldImpact);
if(hit.distance>maxDist)
return false;
*/
// PT: we can't avoid computing the position here since it's needed to compute the normal anyway
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
// Compute additional information if needed
PxHitFlags outFlags = PxHitFlag::ePOSITION;
if(hitFlags & PxHitFlag::eNORMAL)
{
// User requested impact normal
//Because intersectRaySphere set distance = 0 if start point inside shape
if(hits->distance == 0.0f)
{
hits->normal = -rayDir;
}
else
{
hits->normal = hits->position - pose.p;
hits->normal.normalize();
}
outFlags |= PxHitFlag::eNORMAL;
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_capsule(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
// TODO: PT: could we simplify this ?
Capsule capsule;
getCapsuleSegment(pose, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
PxReal t = 0.0f;
if(!intersectRayCapsule(rayOrigin, rayDir, capsule, t))
return 0;
if(t<0.0f || t>maxDist)
return 0;
// PT: we can't avoid computing the position here since it's needed to compute the normal anyway
hits->position = rayOrigin + rayDir*t; // PT: will be rayOrigin for t=0.0f (i.e. what the spec wants)
hits->distance = t;
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
// Compute additional information if needed
PxHitFlags outFlags = PxHitFlag::ePOSITION;
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
if(t==0.0f)
{
hits->normal = -rayDir;
}
else
{
PxReal capsuleT;
distancePointSegmentSquared(capsule, hits->position, &capsuleT);
capsule.computePoint(hits->normal, capsuleT);
hits->normal = hits->position - hits->normal; //this should never be zero. It should have a magnitude of the capsule radius.
hits->normal.normalize();
}
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_plane(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
PX_UNUSED(geom);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
// Perform backface culling so that we can pick objects beyond planes
const PxPlane plane = getPlane(pose);
if(rayDir.dot(plane.n)>=0.0f)
return false;
PxReal distanceAlongLine;
if(!intersectRayPlane(rayOrigin, rayDir, plane, distanceAlongLine, &hits->position))
return 0;
/*
PxReal test = worldRay.orig.distance(hit.worldImpact);
PxReal dd;
PxVec3 pp;
PxSegmentPlaneIntersect(worldRay.orig, worldRay.orig+worldRay.dir*1000.0f, plane, dd, pp);
*/
if(distanceAlongLine<0.0f)
return 0;
if(distanceAlongLine>maxDist)
return 0;
hits->distance = distanceAlongLine;
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
hits->flags = PxHitFlag::ePOSITION|PxHitFlag::eNORMAL;
hits->normal = plane.n;
return 1;
}
PxU32 raycast_convexMesh(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
PX_ASSERT(maxHits && hits);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxGeomRaycastHit& hit = *hits;
//scaling: transform the ray to vertex space
const PxMat34 world2vertexSkew = convexGeom.scale.getInverse() * pose.getInverse();
//ConvexMesh* cmesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
const PxU32 nPolys = convexMesh->getNbPolygonsFast();
const HullPolygonData* PX_RESTRICT polysEA = convexMesh->getPolygons();
const HullPolygonData* polys = polysEA;
const PxVec3 vrayOrig = world2vertexSkew.transform(rayOrigin);
const PxVec3 vrayDir = world2vertexSkew.rotate(rayDir);
/*
Purely convex planes based algorithm
Iterate all planes of convex, with following rules:
* determine of ray origin is inside them all or not.
* planes parallel to ray direction are immediate early out if we're on the outside side (plane normal is sep axis)
* else
- for all planes the ray direction "enters" from the front side, track the one furthest along the ray direction (A)
- for all planes the ray direction "exits" from the back side, track the one furthest along the negative ray direction (B)
if the ray origin is outside the convex and if along the ray, A comes before B, the directed line stabs the convex at A
*/
bool originInsideAllPlanes = true;
PxReal latestEntry = -FLT_MAX;
PxReal earliestExit = FLT_MAX;
// PxU32 bestPolygonIndex = 0;
hit.faceIndex = 0xffffffff;
for(PxU32 i=0;i<nPolys;i++)
{
const HullPolygonData& poly = polys[i];
const PxPlane& vertSpacePlane = poly.mPlane;
const PxReal distToPlane = vertSpacePlane.distance(vrayOrig);
const PxReal dn = vertSpacePlane.n.dot(vrayDir);
const PxReal distAlongRay = -distToPlane/dn; // PT: TODO: potential divide by zero here!
// PT: TODO: this is computed again in the last branch!
if(distToPlane > 0.0f)
originInsideAllPlanes = false; //origin not behind plane == ray starts outside the convex.
if(dn > 1E-7f) //the ray direction "exits" from the back side
{
earliestExit = physx::intrinsics::selectMin(earliestExit, distAlongRay);
}
else if(dn < -1E-7f) //the ray direction "enters" from the front side
{
if(distAlongRay > latestEntry)
{
latestEntry = distAlongRay;
hit.faceIndex = i;
}
}
else
{
//plane normal and ray dir are orthogonal
if(distToPlane > 0.0f)
return 0; //a plane is parallel with ray -- and we're outside the ray -- we definitely miss the entire convex!
}
}
if(originInsideAllPlanes) //ray starts inside convex
{
hit.distance = 0.0f;
hit.faceIndex = 0xffffffff;
hit.u = 0.0f;
hit.v = 0.0f;
hit.position = rayOrigin;
hit.normal = -rayDir;
hit.flags = PxHitFlag::eNORMAL|PxHitFlag::ePOSITION;
return 1;
}
// AP: changed to latestEntry < maxDist-1e-5f so that we have a conservatively negative result near end of ray
if(latestEntry < earliestExit && latestEntry > 0.0f && latestEntry < maxDist-1e-5f)
{
PxHitFlags outFlags = PxHitFlag::eFACE_INDEX;
if(hitFlags & PxHitFlag::ePOSITION)
{
outFlags |= PxHitFlag::ePOSITION;
const PxVec3 pointOnPlane = vrayOrig + latestEntry * vrayDir;
hit.position = pose.transform(Cm::toMat33(convexGeom.scale) * pointOnPlane);
}
hit.distance = latestEntry;
hit.u = 0.0f;
hit.v = 0.0f;
hit.normal = PxVec3(0.0f);
// Compute additional information if needed
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
//when we have nonuniform scaling we actually have to transform by the transpose of the inverse of vertex2worldSkew.M == transpose of world2vertexSkew:
hit.normal = world2vertexSkew.rotateTranspose(polys[hit.faceIndex].mPlane.n);
hit.normal.normalize();
}
hit.flags = outFlags;
return 1;
}
return 0;
}
PxU32 raycast_particlesystem(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePARTICLESYSTEM);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
return 0;
}
PxU32 raycast_softbody(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eTETRAHEDRONMESH);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
const PxTetrahedronMeshGeometry& meshGeom = static_cast<const PxTetrahedronMeshGeometry&>(geom);
PX_UNUSED(meshGeom);
//ML: need to implement raycastTetrahedronMesh
return 0;
}
PxU32 raycast_triangleMesh(GU_RAY_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eTRIANGLEMESH);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom);
TriangleMesh* meshData = static_cast<TriangleMesh*>(meshGeom.triangleMesh);
return Midphase::raycastTriangleMesh(meshData, meshGeom, pose, rayOrigin, rayDir, maxDist, hitFlags, maxHits, hits, stride);
}
namespace
{
struct HFTraceSegmentCallback
{
PX_NOCOPY(HFTraceSegmentCallback)
public:
PxU8* mHits;
const PxU32 mMaxHits;
const PxU32 mStride;
PxU32 mNbHits;
const HeightFieldUtil& mUtil;
const PxTransform& mPose;
const PxVec3& mRayDir;
const PxVec3& mLocalRayDir;
const PxVec3& mLocalRayOrig;
const PxHitFlags mHitFlags;
const bool mIsDoubleSided;
HFTraceSegmentCallback( PxGeomRaycastHit* hits, PxU32 maxHits, PxU32 stride, const PxHitFlags hitFlags, const HeightFieldUtil& hfUtil, const PxTransform& pose,
const PxVec3& rayDir, const PxVec3& localRayDir, const PxVec3& localRayOrig,
bool isDoubleSided) :
mHits (reinterpret_cast<PxU8*>(hits)),
mMaxHits (maxHits),
mStride (stride),
mNbHits (0),
mUtil (hfUtil),
mPose (pose),
mRayDir (rayDir),
mLocalRayDir (localRayDir),
mLocalRayOrig (localRayOrig),
mHitFlags (hitFlags),
mIsDoubleSided (isDoubleSided)
{
PX_ASSERT(maxHits > 0);
}
PX_FORCE_INLINE bool onEvent(PxU32, const PxU32*)
{
return true;
}
PX_FORCE_INLINE bool underFaceHit(const HeightFieldUtil&, const PxVec3&, const PxVec3&, PxF32, PxF32, PxF32, PxU32)
{
return true; // true means continue traversal
}
PxAgain faceHit(const HeightFieldUtil&, const PxVec3& aHitPoint, PxU32 aTriangleIndex, PxReal u, PxReal v)
{
// traversal is strictly sorted so there's no need to sort hits
if(mNbHits >= mMaxHits)
return false; // false = stop traversal
PxGeomRaycastHit& hit = *reinterpret_cast<PxGeomRaycastHit*>(mHits);
mNbHits++;
mHits += mStride;
hit.position = aHitPoint;
hit.faceIndex = aTriangleIndex;
hit.u = u;
hit.v = v;
hit.flags = PxHitFlag::eUV | PxHitFlag::eFACE_INDEX; // UVs and face index are always set
if(mHitFlags & PxHitFlag::eNORMAL)
{
// We need the normal for the dot product.
PxVec3 normal = mPose.q.rotate(mUtil.getNormalAtShapePoint(hit.position.x, hit.position.z));
normal.normalize();
if(mIsDoubleSided && normal.dot(mRayDir) > 0.0f) // comply with normal spec for double sided (should always face opposite rayDir)
hit.normal = -normal;
else
hit.normal = normal;
hit.flags |= PxHitFlag::eNORMAL;
}
hit.distance = physx::intrinsics::selectMax(0.f, (hit.position - mLocalRayOrig).dot(mLocalRayDir));
if(mHitFlags & PxHitFlag::ePOSITION)
{
hit.position = mPose.transform(hit.position);
hit.flags |= PxHitFlag::ePOSITION;
}
return (mNbHits < mMaxHits); // true = continue traversal, false = stop traversal
}
};
}
PxU32 raycast_heightField(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHEIGHTFIELD);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom);
const PxTransform invAbsPose = pose.getInverse();
const PxVec3 localRayOrig = invAbsPose.transform(rayOrigin);
const PxVec3 localRayDir = invAbsPose.rotate(rayDir);
const bool isDoubleSided = hfGeom.heightFieldFlags.isSet(PxMeshGeometryFlag::eDOUBLE_SIDED);
const bool bothSides = isDoubleSided || (hitFlags & PxHitFlag::eMESH_BOTH_SIDES);
const HeightFieldTraceUtil hfUtil(hfGeom);
PxVec3 normRayDir = localRayDir;
normRayDir.normalizeSafe(); // nothing will happen if length is < PX_NORMALIZATION_EPSILON
// pretest if we intersect HF bounds. If no early exit, if yes move the origin and shorten the maxDist
// to deal with precision issues with large maxDist
PxBounds3 hfLocalBounds;
hfUtil.computeLocalBounds(hfLocalBounds);
// PT: inflate the bounds like we do in the scene-tree (see PX-1179)
const PxVec3 center = hfLocalBounds.getCenter();
const PxVec3 extents = hfLocalBounds.getExtents() * 1.01f; //SQ_PRUNER_INFLATION;
hfLocalBounds.minimum = center - extents;
hfLocalBounds.maximum = center + extents;
PxVec3 localImpact;
PxReal t; // closest intersection, t==0 hit inside
PxU32 rval = rayAABBIntersect2(hfLocalBounds.minimum, hfLocalBounds.maximum, localRayOrig, localRayDir, localImpact, t);
// early exit we miss the AABB
if (!rval)
return 0;
if (t > maxDist)
return 0;
// PT: if eANY_HIT is used then eMESH_MULTIPLE won't be, and we'll stop the query after 1 hit is found. There is no difference
// between 'any hit' and 'closest hit' for HFs since hits are reported in order.
HFTraceSegmentCallback callback(hits, hitFlags.isSet(PxHitFlag::eMESH_MULTIPLE) ? maxHits : 1, stride, hitFlags, hfUtil, pose,
rayDir, localRayDir, localRayOrig, isDoubleSided); // make sure we return only 1 hit without eMESH_MULTIPLE
PxReal offset = 0.0f;
PxReal maxDistOffset = maxDist;
PxVec3 localRayOrigOffset = localRayOrig;
// if we don't start inside the AABB box, offset the start pos, because of precision issues with large maxDist
if(t > 0.0f)
{
offset = t - GU_RAY_SURFACE_OFFSET;
// move the rayOrig to offset start pos
localRayOrigOffset = localRayOrig + normRayDir*offset;
}
// shorten the maxDist of the offset that was cut off and clip it
// we pick either the original maxDist, if maxDist is huge we clip it
maxDistOffset = PxMin(maxDist - offset, GU_RAY_SURFACE_OFFSET + 2.0f * PxMax(hfLocalBounds.maximum.x - hfLocalBounds.minimum.x, PxMax(hfLocalBounds.maximum.y - hfLocalBounds.minimum.y, hfLocalBounds.maximum.z - hfLocalBounds.minimum.z)));
hfUtil.traceSegment<HFTraceSegmentCallback, false, false>(localRayOrigOffset, normRayDir, maxDistOffset,
&callback, hfLocalBounds, !bothSides);
return callback.mNbHits;
}
static PxU32 raycast_custom(GU_RAY_FUNC_PARAMS)
{
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom);
if(customGeom.isValid())
return customGeom.callbacks->raycast(rayOrigin, rayDir, geom, pose, maxDist, hitFlags, maxHits, hits, stride, threadContext);
return 0;
}
static PxU32 raycast_convexCore(GU_RAY_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(maxHits);
PX_UNUSED(hitFlags);
struct GjkSupport : PxGjkQuery::Support
{
Gu::ConvexShape shape;
GjkSupport(const PxConvexCoreGeometry& g)
{
Gu::makeConvexShape(g, PxTransform(PxIdentity), shape);
}
virtual PxReal getMargin() const
{
return shape.margin;
}
virtual PxVec3 supportLocal(const PxVec3& dir) const
{
return shape.supportLocal(dir);
}
};
const PxConvexCoreGeometry& convex = static_cast<const PxConvexCoreGeometry&>(geom);
if (convex.isValid())
{
PxBounds3 bounds = Gu::computeBounds(convex, pose);
bounds.include(rayOrigin);
PxReal wiseDist = PxMin(maxDist, bounds.getDimensions().magnitude());
PxReal t;
PxVec3 n, p;
if (PxGjkQuery::raycast(GjkSupport(convex), pose, rayOrigin, rayDir, wiseDist, t, n, p))
{
PxGeomRaycastHit& hit = *hits;
hit.distance = t;
hit.position = p;
hit.normal = n;
hit.flags |= PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return 1;
}
}
return 0;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: table is not static because it's accessed as 'extern' within Gu (bypassing the function call).
RaycastFunc gRaycastMap[] =
{
raycast_sphere,
raycast_plane,
raycast_capsule,
raycast_box,
raycast_convexCore,
raycast_convexMesh,
raycast_particlesystem,
raycast_softbody,
raycast_triangleMesh,
raycast_heightField,
raycast_custom
};
PX_COMPILE_TIME_ASSERT(sizeof(gRaycastMap) / sizeof(gRaycastMap[0]) == PxGeometryType::eGEOMETRY_COUNT);
// PT: the function is used by external modules (Np, CCT, Sq)
const Gu::GeomRaycastTable& Gu::getRaycastFuncTable()
{
return gRaycastMap;
}

View File

@@ -0,0 +1,192 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
#include "GuSAH.h"
using namespace physx;
using namespace Gu;
static PX_FORCE_INLINE float getSurfaceArea(const PxBounds3& bounds)
{
const PxVec3 e = bounds.maximum - bounds.minimum;
return 2.0f * (e.x * e.y + e.x * e.z + e.y * e.z);
}
SAH_Buffers::SAH_Buffers(PxU32 nb_prims)
{
mKeys = PX_ALLOCATE(float, nb_prims, "temp");
mCumulativeLower = PX_ALLOCATE(float, nb_prims, "temp");
mCumulativeUpper = PX_ALLOCATE(float, nb_prims, "temp");
mNb = nb_prims;
}
SAH_Buffers::~SAH_Buffers()
{
PX_FREE(mKeys);
PX_FREE(mCumulativeLower);
PX_FREE(mCumulativeUpper);
}
bool SAH_Buffers::split(PxU32& leftCount, PxU32 nb, const PxU32* PX_RESTRICT prims, const PxBounds3* PX_RESTRICT boxes, const PxVec3* PX_RESTRICT centers)
{
PxU32 bestAxis = 0;
PxU32 bestIndex = 0;
float bestCost = PX_MAX_F32;
PX_ASSERT(nb<=mNb);
for(PxU32 axis=0;axis<3;axis++)
{
const PxU32* sorted;
{
float* keys = mKeys;
for(PxU32 i=0;i<nb;i++)
{
const PxU32 index = prims[i];
const float center = centers[index][axis];
keys[i] = center;
}
sorted = mSorters[axis].Sort(keys, nb).GetRanks();
}
float* cumulativeLower = mCumulativeLower;
float* cumulativeUpper = mCumulativeUpper;
/* if(0)
{
PxBounds3 bbox = PxBounds3::empty();
for(PxU32 i=0; i<nb; i++)
{
bbox.include(bboxes[references[axis][i]]);
bbox.include(boxes[prims[nb-sortedIndex-1]]);
}
for (size_t i = end - 1; i > begin; --i) {
bbox.extend(bboxes[references[axis][i]]);
costs[axis][i] = bbox.half_area() * (end - i);
}
bbox = BoundingBox<Scalar>::empty();
auto best_split = std::pair<Scalar, size_t>(std::numeric_limits<Scalar>::max(), end);
for (size_t i = begin; i < end - 1; ++i) {
bbox.extend(bboxes[references[axis][i]]);
auto cost = bbox.half_area() * (i + 1 - begin) + costs[axis][i + 1];
if (cost < best_split.first)
best_split = std::make_pair(cost, i + 1);
}
return best_split;
}*/
if(1)
{
// two passes over data to calculate upper and lower bounds
PxBounds3 lower = PxBounds3::empty();
PxBounds3 upper = PxBounds3::empty();
// lower.minimum = lower.maximum = PxVec3(0.0f);
// upper.minimum = upper.maximum = PxVec3(0.0f);
#if PX_ENABLE_ASSERTS
float prevLowerCenter = -PX_MAX_F32;
float prevUpperCenter = PX_MAX_F32;
#endif
for(PxU32 i=0; i<nb; ++i)
{
const PxU32 lowSortedIndex = sorted[i];
const PxU32 highSortedIndex = sorted[nb-i-1];
//lower.Union(m_faceBounds[faces[i]]);
PX_ASSERT(centers[prims[lowSortedIndex]][axis]>=prevLowerCenter);
lower.include(boxes[prims[lowSortedIndex]]);
#if PX_ENABLE_ASSERTS
prevLowerCenter = centers[prims[lowSortedIndex]][axis];
#endif
//upper.Union(m_faceBounds[faces[numFaces - i - 1]]);
PX_ASSERT(centers[prims[highSortedIndex]][axis]<=prevUpperCenter);
upper.include(boxes[prims[highSortedIndex]]);
#if PX_ENABLE_ASSERTS
prevUpperCenter = centers[prims[highSortedIndex]][axis];
#endif
cumulativeLower[i] = getSurfaceArea(lower);
cumulativeUpper[nb - i - 1] = getSurfaceArea(upper);
}
// const float invTotalSA = 1.0f / cumulativeUpper[0];
// test all split positions
for (PxU32 i = 0; i < nb - 1; ++i)
{
const float pBelow = cumulativeLower[i];// * invTotalSA;
const float pAbove = cumulativeUpper[i];// * invTotalSA;
// const float cost = 0.125f + (pBelow * i + pAbove * float(nb - i));
const float cost = (pBelow * i + pAbove * float(nb - i));
if(cost <= bestCost)
{
bestCost = cost;
bestIndex = i;
bestAxis = axis;
}
}
}
}
leftCount = bestIndex + 1;
if(leftCount==1 || leftCount==nb)
{
// Invalid split
return false;
}
/*
// re-sort by best axis
FaceSorter predicate(&m_vertices[0], &m_indices[0], m_numFaces * 3, bestAxis);
std::sort(faces, faces + numFaces, predicate);
return bestIndex + 1;
*/
{
PxU32* tmp = reinterpret_cast<PxU32*>(mKeys);
PxMemCopy(tmp, prims, nb*sizeof(PxU32));
const PxU32* bestOrder = mSorters[bestAxis].GetRanks();
PxU32* dst = const_cast<PxU32*>(prims);
for(PxU32 i=0;i<nb;i++)
dst[i] = tmp[bestOrder[i]];
}
return true;
}

View File

@@ -0,0 +1,56 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SAH_H
#define GU_SAH_H
#include "foundation/PxBounds3.h"
#include "CmRadixSort.h"
namespace physx
{
namespace Gu
{
struct SAH_Buffers
{
SAH_Buffers(PxU32 nb_prims);
~SAH_Buffers();
bool split(PxU32& leftCount, PxU32 nb, const PxU32* PX_RESTRICT prims, const PxBounds3* PX_RESTRICT boxes, const PxVec3* PX_RESTRICT centers);
Cm::RadixSortBuffered mSorters[3];
float* mKeys;
float* mCumulativeLower;
float* mCumulativeUpper;
PxU32 mNb;
};
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,534 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SDF_H
#define GU_SDF_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxArray.h"
#include "foundation/PxMathUtils.h"
namespace physx
{
class PxSDFBuilder;
class PxSerializationContext;
class PxDeserializationContext;
class PxOutputStream;
namespace Gu
{
/**
\brief Represents dimensions of signed distance field
*/
class Dim3
{
public:
/**
\brief Constructor
*/
Dim3()
{
}
/**
\brief Constructor
*/
Dim3(PxZERO d) : x(0), y(0), z(0)
{
PX_UNUSED(d);
}
/**
\brief Constructor
*/
Dim3(PxU32 _x, PxU32 _y, PxU32 _z) : x(_x), y(_y), z(_z)
{
}
/**
\brief Copy constructor
*/
Dim3(const Dim3& d) : x(d.x), y(d.y), z(d.z)
{
}
PxU32 x; //!< Size of X dimension
PxU32 y; //!< Size of Y dimension
PxU32 z; //!< Size of Z dimension
template <typename T>
explicit operator PxVec3T<T>() const { return PxVec3T<T>(static_cast<T>(x), static_cast<T>(y), static_cast<T>(z)); }
};
// Interval containing its endpoints
struct Interval
{
PxReal min;
PxReal max;
PX_CUDA_CALLABLE Interval() : min(FLT_MAX), max(-FLT_MAX) {}
PX_CUDA_CALLABLE Interval(PxReal min_, PxReal max_) : min(min_), max(max_) {}
PX_FORCE_INLINE PX_CUDA_CALLABLE bool overlaps(const Interval& i) const
{
return !(min > i.max || i.min > max);
}
PX_FORCE_INLINE PX_CUDA_CALLABLE bool contains(PxReal value) const
{
return !(value < min || value > max);
}
};
/**
\brief Represents a signed distance field.
*/
class SDF : public PxUserAllocated
{
public:
// PX_SERIALIZATION
SDF(const PxEMPTY) : mOwnsMemory(false) {}
void exportExtraData(PxSerializationContext& context);
void importExtraData(PxDeserializationContext& context);
//~PX_SERIALIZATION
/**
\brief Constructor
*/
SDF() : mSdf(NULL), mSubgridStartSlots(NULL), mSubgridSdf(NULL), mOwnsMemory(true)
{
}
/**
\brief Constructor
*/
SDF(PxZERO s)
: mMeshLower(PxZero), mSpacing(0.0f), mDims(PxZero), mNumSdfs(0), mSdf(NULL),
mSubgridSize(PxZero), mNumStartSlots(0), mSubgridStartSlots(NULL), mNumSubgridSdfs(0), mSubgridSdf(NULL), mSdfSubgrids3DTexBlockDim(PxZero),
mSubgridsMinSdfValue(0.0f), mSubgridsMaxSdfValue(0.0f), mBytesPerSparsePixel(0), mOwnsMemory(true)
{
PX_UNUSED(s);
}
/**
\brief Copy constructor
*/
SDF(const SDF& sdf)
: mMeshLower(sdf.mMeshLower), mSpacing(sdf.mSpacing), mDims(sdf.mDims), mNumSdfs(sdf.mNumSdfs), mSdf(sdf.mSdf),
mSubgridSize(sdf.mSubgridSize), mNumStartSlots(sdf.mNumStartSlots), mSubgridStartSlots(sdf.mSubgridStartSlots), mNumSubgridSdfs(sdf.mNumSubgridSdfs), mSubgridSdf(sdf.mSubgridSdf), mSdfSubgrids3DTexBlockDim(sdf.mSdfSubgrids3DTexBlockDim),
mSubgridsMinSdfValue(sdf.mSubgridsMinSdfValue), mSubgridsMaxSdfValue(sdf.mSubgridsMaxSdfValue), mBytesPerSparsePixel(sdf.mBytesPerSparsePixel),
mOwnsMemory(true)
{
}
// Given a SubgridStartSlot id `id`, decode the position of the start
// index of the corresponding subgrid in the subgrid texture
static PX_FORCE_INLINE void decodeTriple(PxU32 id, PxU32& x, PxU32& y, PxU32& z)
{
x = id & 0x000003FF;
id = id >> 10;
y = id & 0x000003FF;
id = id >> 10;
z = id & 0x000003FF;
}
static PX_FORCE_INLINE PxReal decodeSample(const PxU8* data, PxU32 index, PxU32 bytesPerSparsePixel, PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue)
{
switch (bytesPerSparsePixel)
{
case 1:
return PxReal(data[index]) * (1.0f / 255.0f) * (subgridsMaxSdfValue - subgridsMinSdfValue) + subgridsMinSdfValue;
case 2:
{
const PxU16* ptr = reinterpret_cast<const PxU16*>(data);
return PxReal(ptr[index]) * (1.0f / 65535.0f) * (subgridsMaxSdfValue - subgridsMinSdfValue) + subgridsMinSdfValue;
}
case 4:
{
//If 4 bytes per subgrid pixel are available, then normal floats are used. No need to
//de-normalize integer values since the floats already contain real distance values
const PxReal* ptr = reinterpret_cast<const PxReal*>(data);
return ptr[index];
}
default:
PX_ASSERT(0);
}
return 0;
}
PX_PHYSX_COMMON_API PxReal decodeSparse(PxI32 xx, PxI32 yy, PxI32 zz) const;
PX_PHYSX_COMMON_API PxReal decodeDense(PxI32 x, PxI32 y, PxI32 z) const;
PX_FORCE_INLINE PxU32 nbSubgridsX() const
{
return mDims.x / mSubgridSize;
}
PX_FORCE_INLINE PxU32 nbSubgridsY() const
{
return mDims.y / mSubgridSize;
}
PX_FORCE_INLINE PxU32 nbSubgridsZ() const
{
return mDims.z / mSubgridSize;
}
PX_FORCE_INLINE PxVec3 getCellSize() const
{
return PxVec3(mSpacing);
}
PX_FORCE_INLINE bool subgridExists(PxU32 sgX, PxU32 sgY, PxU32 sgZ) const
{
const PxU32 nbX = mDims.x / mSubgridSize;
const PxU32 nbY = mDims.y / mSubgridSize;
PX_ASSERT(sgX <= nbX && sgY <= nbY && sgZ <= mDims.z / mSubgridSize);
PxU32 startId = mSubgridStartSlots[sgZ * (nbX) * (nbY) + sgY * (nbX) + sgX];
return startId != 0xFFFFFFFFu;
}
/**
\brief Destructor
*/
~SDF();
PxReal* allocateSdfs(const PxVec3& meshLower, const PxReal& spacing, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ,
const PxU32 subgridSize, const PxU32 sdfSubgrids3DTexBlockDimX, const PxU32 sdfSubgrids3DTexBlockDimY, const PxU32 sdfSubgrids3DTexBlockDimZ,
PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue, PxU32 bytesPerSparsePixel);
PxVec3 mMeshLower; //!< Lower bound of the original mesh
PxReal mSpacing; //!< Spacing of each sdf voxel
Dim3 mDims; //!< Dimension of the sdf
PxU32 mNumSdfs; //!< Number of sdf values
PxReal* mSdf; //!< Array of sdf
// Additional data to support sparse grid SDFs
PxU32 mSubgridSize; //!< The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples). If set to zero, this indicates that only a dense background grid SDF is used without sparse blocks
PxU32 mNumStartSlots; //!< Array length of mSubgridStartSlots. Only used for serialization
PxU32* mSubgridStartSlots; //!< Array with start indices into the subgrid texture for every subgrid block. 10bits for z coordinate, 10bits for y and 10bits for x
PxU32 mNumSubgridSdfs; //!< Array length of mSubgridSdf. Only used for serialization
PxU8* mSubgridSdf; //!< The data to create the 3d texture containg the packed subgrid blocks. Stored as PxU8 to support multiple formats (8, 16 and 32 bits per pixel)
Dim3 mSdfSubgrids3DTexBlockDim; //!< Subgrid sdf is layed out as a 3d texture including packed blocks of size (mSubgridSize+1)^3
PxReal mSubgridsMinSdfValue; //!< The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
PxReal mSubgridsMaxSdfValue; //!< The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
PxU32 mBytesPerSparsePixel; //!< The number of bytes per subgrid pixel
bool mOwnsMemory; //!< Only false for binary deserialized data
};
/**
\brief Returns the number of times a point is enclosed by a triangle mesh. Therefore points with a winding number of 0 lie oufside of the mesh, others lie inside. The sign of the winding number
is dependent ond the triangle orientation. For close meshes, a robust inside/outside check should not test for a value of 0 exactly, inside = PxAbs(windingNumber) > 0.5f should be preferred.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] windingNumbers The winding number for the center of every grid cell, index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner
\param[in] maxExtents The grid's upper corner
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
*/
PX_PHYSX_COMMON_API void windingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* windingNumbers, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL);
/**
\brief Returns if a point is enclosed by a triangle mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] insideResult Booleans that indicate if the center of a grid cell is inside or outside, index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
*/
PX_PHYSX_COMMON_API void windingNumbersInsideCheck(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
bool* insideResult, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL);
/**
\brief Returns the distance to the mesh's surface for all samples in a grid. The sign is dependent on the triangle orientation. Negative distances indicate that a sample is inside the mesh, positive
distances mean the sample is outside of the mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] sdf The signed distance field (negative values indicate that a point is inside of the mesh), index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
\param[in] cellCenteredSamples Determines if the sample points are chosen at cell centers or at cell origins
\param[in] numThreads The number of cpu threads to use during the computation
\param[in] sdfBuilder Optional pointer to a sdf builder to accelerate the sdf construction. The pointer is owned by the caller and must remain valid until the function terminates.
*/
PX_PHYSX_COMMON_API void SDFUsingWindingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL, bool cellCenteredSamples = true,
PxU32 numThreads = 1, PxSDFBuilder* sdfBuilder = NULL);
/**
\brief Returns the distance to the mesh's surface for all samples in a grid. The sign is dependent on the triangle orientation. Negative distances indicate that a sample is inside the mesh, positive
distances mean the sample is outside of the mesh. Near mesh surfaces, a higher resolution is available than further away from the surface (sparse sdf format) to save memory.
The samples are not cell centered but located at the cell origin. This is a requirement of the sparse grid format.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] narrowBandThicknessRelativeToExtentDiagonal The thickness of the narrow band as a fraction of the sdf box diagonal length. Can be as small as 0 but a value of at least 0.01 is recommended.
\param[in] cellsPerSubgrid The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples)
\param[out] sdfCoarse The coarse sdf as a dense 3d array of lower resolution (resulution is (with/cellsPerSubgrid+1, height/cellsPerSubgrid+1, depth/cellsPerSubgrid+1))
\param[out] sdfFineStartSlots The start slot indices of the subgrid blocks. If a subgrid block is empty, the start slot will be 0xFFFFFFFF
\param[out] subgridData The array containing subgrid data blocks
\param[out] denseSdf Provides acces to the denxe sdf that is used for compuation internally
\param[out] subgridsMinSdfValue The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
\param[out] subgridsMaxSdfValue The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
\param[in] numThreads The number of cpu threads to use during the computation
\param[in] sdfBuilder Optional pointer to a sdf builder to accelerate the sdf construction. The pointer is owned by the caller and must remain valid until the function terminates.
*/
PX_PHYSX_COMMON_API void SDFUsingWindingNumbersSparse(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
const PxVec3& minExtents, const PxVec3& maxExtents, PxReal narrowBandThicknessRelativeToExtentDiagonal, PxU32 cellsPerSubgrid,
PxArray<PxReal>& sdfCoarse, PxArray<PxU32>& sdfFineStartSlots, PxArray<PxReal>& subgridData, PxArray<PxReal>& denseSdf,
PxReal& subgridsMinSdfValue, PxReal& subgridsMaxSdfValue, PxU32 numThreads = 1, PxSDFBuilder* sdfBuilder = NULL);
PX_PHYSX_COMMON_API void analyzeAndFixMesh(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxArray<PxU32>& repairedIndices);
/**
\brief Converts a sparse grid sdf to a format that can be used to create a 3d texture. 3d textures support very efficient
trilinear interpolation on the GPU which is very important during sdf evaluation.
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[in] cellsPerSubgrid The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples)
\param[in,out] sdfFineStartSlots Array with linear start indices into the subgrid data array. This array gets converted by this method to start indices for every subgrid block in the 3d texture. The result uses 10bits for z coordinate, 10bits for y and 10bits for x
\param[in] sdfFineSubgridsIn Subgrid data array
\param[in] sdfFineSubgridsSize Number of elements in sdfFineSubgridsIn
\param[out] subgrids3DTexFormat The subgrid data organized in a 3d texture compatible order
\param[out] numSubgridsX Number of subgrid blocks in the 3d texture along x. The full texture dimension along x will be numSubgridsX*(cellsPerSubgrid+1).
\param[out] numSubgridsY Number of subgrid blocks in the 3d texture along y. The full texture dimension along y will be numSubgridsY*(cellsPerSubgrid+1).
\param[out] numSubgridsZ Number of subgrid blocks in the 3d texture along z. The full texture dimension along z will be numSubgridsZ*(cellsPerSubgrid+1).
*/
PX_PHYSX_COMMON_API void convertSparseSDFTo3DTextureLayout(PxU32 width, PxU32 height, PxU32 depth, PxU32 cellsPerSubgrid,
PxU32* sdfFineStartSlots, const PxReal* sdfFineSubgridsIn, PxU32 sdfFineSubgridsSize, PxArray<PxReal>& subgrids3DTexFormat,
PxU32& numSubgridsX, PxU32& numSubgridsY, PxU32& numSubgridsZ);
/**
\brief Extracts an isosurface as a triangular mesh from a signed distance function
\param[in] sdf The signed distance function
\param[out] isosurfaceVertices The vertices of the extracted isosurface
\param[out] isosurfaceTriangleIndices The triangles of the extracted isosurface
\param[in] numThreads The number of threads to use
*/
PX_PHYSX_COMMON_API void extractIsosurfaceFromSDF(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices, PxU32 numThreads = 1);
/**
\brief A class that allows to efficiently project points onto the surface of a triangle mesh.
*/
class PxPointOntoTriangleMeshProjector
{
public:
/**
\brief Projects a point onto the surface of a triangle mesh.
\param[in] point The point to project
\return the projected point
*/
virtual PxVec3 projectPoint(const PxVec3& point) = 0;
/**
\brief Projects a point onto the surface of a triangle mesh.
\param[in] point The point to project
\param[out] closestTriangleIndex The index of the triangle on which the projected point is located
\return the projected point
*/
virtual PxVec3 projectPoint(const PxVec3& point, PxU32& closestTriangleIndex) = 0;
/**
\brief Releases the instance and its data
*/
virtual void release() = 0;
};
/**
\brief Creates a helper class that allows to efficiently project points onto the surface of a triangle mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] triangleIndices The triangle mesh's indices
\param[in] numTriangles The number of triangles
\return A point onto triangle mesh projector instance. The caller needs to delete the instance once it is not used anymore by calling its release method
*/
PX_PHYSX_COMMON_API PxPointOntoTriangleMeshProjector* PxCreatePointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* triangleIndices, PxU32 numTriangles);
/**
\brief Utility to convert from a linear index to x/y/z indices given the grid size (only sizeX and sizeY required)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void idToXYZ(PxU32 id, PxU32 sizeX, PxU32 sizeY, PxU32& xi, PxU32& yi, PxU32& zi)
{
xi = id % sizeX; id /= sizeX;
yi = id % sizeY;
zi = id / sizeY;
}
/**
\brief Utility to convert from x/y/z indices to a linear index given the grid size (only width and height required)
*/
PX_FORCE_INLINE PX_CUDA_CALLABLE PxU32 idx3D(PxU32 x, PxU32 y, PxU32 z, PxU32 width, PxU32 height)
{
return (z * height + y) * width + x;
}
/**
\brief Utility to encode 3 indices into a single integer. Each index is allowed to use up to 10 bits.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 encodeTriple(PxU32 x, PxU32 y, PxU32 z)
{
PX_ASSERT(x >= 0 && x < 1024);
PX_ASSERT(y >= 0 && y < 1024);
PX_ASSERT(z >= 0 && z < 1024);
return (z << 20) | (y << 10) | x;
}
/**
\brief Computes sample point locations from x/y/z indices
*/
PX_ALIGN_PREFIX(16)
struct GridQueryPointSampler
{
private:
PxVec3 mOrigin;
PxVec3 mCellSize;
PxI32 mOffsetX, mOffsetY, mOffsetZ;
PxI32 mStepX, mStepY, mStepZ;
public:
PX_CUDA_CALLABLE GridQueryPointSampler() {}
PX_CUDA_CALLABLE GridQueryPointSampler(const PxVec3& origin, const PxVec3& cellSize, bool cellCenteredSamples,
PxI32 offsetX = 0, PxI32 offsetY = 0, PxI32 offsetZ = 0, PxI32 stepX = 1, PxI32 stepY = 1, PxI32 stepZ = 1)
: mCellSize(cellSize), mOffsetX(offsetX), mOffsetY(offsetY), mOffsetZ(offsetZ), mStepX(stepX), mStepY(stepY), mStepZ(stepZ)
{
if (cellCenteredSamples)
mOrigin = origin + 0.5f * cellSize;
else
mOrigin = origin;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getOrigin() const
{
return mOrigin;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getActiveCellSize() const
{
return PxVec3(mCellSize.x * mStepX, mCellSize.y * mStepY, mCellSize.z * mStepZ);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getPoint(PxI32 x, PxI32 y, PxI32 z) const
{
return PxVec3(mOrigin.x + (x * mStepX + mOffsetX) * mCellSize.x,
mOrigin.y + (y * mStepY + mOffsetY) * mCellSize.y,
mOrigin.z + (z * mStepZ + mOffsetZ) * mCellSize.z);
}
}
PX_ALIGN_SUFFIX(16);
/**
\brief Represents a dense SDF and allows to evaluate it. Uses trilinear interpolation between samples.
*/
class DenseSDF
{
public:
PxU32 mWidth, mHeight, mDepth;
private:
PxReal* mSdf;
public:
PX_INLINE PX_CUDA_CALLABLE DenseSDF(PxU32 width, PxU32 height, PxU32 depth, PxReal* sdf)
{
initialize(width, height, depth, sdf);
}
DenseSDF() {}
PX_FORCE_INLINE PX_CUDA_CALLABLE void initialize(PxU32 width, PxU32 height, PxU32 depth, PxReal* sdf)
{
mWidth = width;
mHeight = height;
mDepth = depth;
mSdf = sdf;
}
PX_FORCE_INLINE PxU32 memoryConsumption()
{
return mWidth * mHeight * mDepth * sizeof(PxReal);
}
PX_INLINE PX_CUDA_CALLABLE PxReal sampleSDFDirect(const PxVec3& samplePoint)
{
const PxU32 xBase = PxClamp(PxU32(samplePoint.x), 0u, mWidth - 2);
const PxU32 yBase = PxClamp(PxU32(samplePoint.y), 0u, mHeight - 2);
const PxU32 zBase = PxClamp(PxU32(samplePoint.z), 0u, mDepth - 2);
return PxTriLerp(
mSdf[idx3D(xBase, yBase, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase + 1, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase + 1, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase + 1, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase + 1, zBase + 1, mWidth, mHeight)], samplePoint.x - xBase, samplePoint.y - yBase, samplePoint.z - zBase);
}
};
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,70 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SECONDARY_PRUNER_H
#define GU_SECONDARY_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class PruningPool;
class CompanionPruner : public PxUserAllocated
{
public:
CompanionPruner() {}
virtual ~CompanionPruner() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex) = 0;
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex) = 0;
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex) = 0;
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex) = 0;
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) = 0;
virtual void shiftOrigin(const PxVec3& shift) = 0;
virtual void timeStampChange() = 0;
virtual void build() = 0;
virtual PxU32 getNbObjects() const = 0;
virtual void release() = 0;
virtual void visualize(PxRenderOutput& out, PxU32 color) const = 0;
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const = 0;
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const = 0;
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const = 0;
virtual void getGlobalBounds(PxBounds3&) const = 0;
};
CompanionPruner* createCompanionPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool);
}
}
#endif

View File

@@ -0,0 +1,101 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSqInternal.h"
#include "CmVisualization.h"
#include "GuAABBTree.h"
#include "GuAABBTreeNode.h"
#include "GuIncrementalAABBTree.h"
#include "GuBVH.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
static void drawBVH(const BVHNode* root, const BVHNode* node, PxRenderOutput& out_)
{
renderOutputDebugBox(out_, node->mBV);
if(node->isLeaf())
return;
drawBVH(root, node->getPos(root), out_);
drawBVH(root, node->getNeg(root), out_);
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const BVH* tree)
{
if(tree && tree->getNodes())
{
out << PxTransform(PxIdentity);
out << color;
drawBVH(tree->getNodes(), tree->getNodes(), out);
}
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const AABBTree* tree)
{
if(tree && tree->getNodes())
{
out << PxTransform(PxIdentity);
out << color;
drawBVH(tree->getNodes(), tree->getNodes(), out);
}
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const IncrementalAABBTree* tree, DebugVizCallback* cb)
{
if(tree && tree->getNodes())
{
struct Local
{
static void _draw(const IncrementalAABBTreeNode* root, const IncrementalAABBTreeNode* node, PxRenderOutput& out_, DebugVizCallback* cb_)
{
PxBounds3 bounds;
V4StoreU(node->mBVMin, &bounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
bounds.maximum = PxVec3(max4.x, max4.y, max4.z);
bool discard = false;
if(cb_)
discard = cb_->visualizeNode(*node, bounds);
if(!discard)
Cm::renderOutputDebugBox(out_, bounds);
if(node->isLeaf())
return;
_draw(root, node->getPos(root), out_, cb_);
_draw(root, node->getNeg(root), out_, cb_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_draw(tree->getNodes(), tree->getNodes(), out, cb);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_MTD_H
#define GU_SWEEP_MTD_H
namespace physx
{
class PxConvexMeshGeometry;
class PxTriangleMeshGeometry;
class PxGeometry;
class PxHeightFieldGeometry;
class PxPlane;
namespace Gu
{
class Sphere;
class Capsule;
bool computeCapsule_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, Gu::CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeCapsule_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, Gu::CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeBox_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const Gu::Box& box, const PxTransform& boxTransform, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit);
bool computeBox_HeightFieldMTD( const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const Gu::Box& box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeConvex_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexTransform, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit);
bool computeConvex_HeightFieldMTD( const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeSphere_SphereMTD(const Sphere& sphere0, const Sphere& sphere1, PxGeomSweepHit& hit);
bool computeSphere_CapsuleMTD(const Sphere& sphere, const Capsule& capsule, PxGeomSweepHit& hit);
bool computeCapsule_CapsuleMTD(const Capsule& capsule0, const Capsule& capsule1, PxGeomSweepHit& hit);
bool computePlane_CapsuleMTD(const PxPlane& plane, const Capsule& capsule, PxGeomSweepHit& hit);
bool computePlane_BoxMTD(const PxPlane& plane, const Box& box, PxGeomSweepHit& hit);
bool computePlane_ConvexMTD(const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxGeomSweepHit& hit);
// PT: wrapper just to avoid duplicating these lines.
PX_FORCE_INLINE void setupSweepHitForMTD(PxGeomSweepHit& sweepHit, bool hasContacts, const PxVec3& unitDir)
{
sweepHit.flags = PxHitFlag::eNORMAL | PxHitFlag::eFACE_INDEX;
if(!hasContacts)
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
else
{
//ML: touching contact. We need to overwrite the normal to the negative of sweep direction
if(sweepHit.distance == 0.0f && sweepHit.normal.isZero())
sweepHit.normal = -unitDir;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
}
}
#endif

View File

@@ -0,0 +1,758 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "GuSweepTests.h"
#include "GuHeightFieldUtil.h"
#include "CmScaling.h"
#include "GuConvexMesh.h"
#include "GuIntersectionRayPlane.h"
#include "GuVecBox.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHull.h"
#include "GuSweepMTD.h"
#include "GuSweepSphereCapsule.h"
#include "GuSweepCapsuleCapsule.h"
#include "GuSweepTriangleUtils.h"
#include "GuSweepCapsuleTriangle.h"
#include "GuInternal.h"
#include "GuGJKRaycast.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
static const PxReal gEpsilon = .01f;
//#define USE_VIRTUAL_GJK
#ifdef USE_VIRTUAL_GJK
static bool virtualGjkRaycastPenetration(const GjkConvex& a, const GjkConvex& b, const aos::Vec3VArg initialDir, const aos::FloatVArg initialLambda, const aos::Vec3VArg s, const aos::Vec3VArg r, aos::FloatV& lambda,
aos::Vec3V& normal, aos::Vec3V& closestA, const PxReal _inflation, const bool initialOverlap)
{
return gjkRaycastPenetration<GjkConvex, GjkConvex >(a, b, initialDir, initialLambda, s, r, lambda, normal, closestA, _inflation, initialOverlap);
}
#endif
static PxU32 computeSweepConvexPlane(
const PxConvexMeshGeometry& convexGeom, ConvexHullData* hullData, const PxU32& nbPolys, const PxTransform& pose,
const PxVec3& impact_, const PxVec3& unitDir)
{
PX_ASSERT(nbPolys);
const PxVec3 impact = impact_ - unitDir * gEpsilon;
const PxVec3 localPoint = pose.transformInv(impact);
const PxVec3 localDir = pose.rotateInv(unitDir);
const FastVertex2ShapeScaling scaling(convexGeom.scale);
PxU32 minIndex = 0;
PxReal minD = PX_MAX_REAL;
for(PxU32 j=0; j<nbPolys; j++)
{
const PxPlane& pl = hullData->mPolygons[j].mPlane;
PxPlane plane;
scaling.transformPlaneToShapeSpace(pl.n, pl.d, plane.n, plane.d);
PxReal d = plane.distance(localPoint);
if(d<0.0f)
continue;
const PxReal tweak = plane.n.dot(localDir) * gEpsilon;
d += tweak;
if(d<minD)
{
minIndex = j;
minD = d;
}
}
return minIndex;
}
static PX_FORCE_INLINE bool computeFaceIndex(PxGeomSweepHit& sweepHit, const PxHitFlags hitFlags, const PxConvexMeshGeometry& convexGeom, ConvexHullData* hullData, const PxTransform& pose, const PxVec3& unitDir)
{
if(hitFlags & PxHitFlag::eFACE_INDEX)
{
// PT: compute closest polygon using the same tweak as in swept-capsule-vs-mesh
sweepHit.faceIndex = computeSweepConvexPlane(convexGeom, hullData, hullData->mNbPolygons, pose, sweepHit.position, unitDir);
sweepHit.flags |= PxHitFlag::eFACE_INDEX;
}
return true;
}
static PX_FORCE_INLINE bool hasInitialOverlap(PxGeomSweepHit& sweepHit, const PxVec3& unitDir,
const FloatVArg toi,
const Vec3VArg normal, const Vec3VArg closestA,
const PxTransformV& convexPose,
const bool isMtd, const bool impactPointOnTheOtherShape)
{
sweepHit.flags = PxHitFlag::eNORMAL;
const FloatV zero = FZero();
if(FAllGrtrOrEq(zero, toi))
{
//ML: initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const FloatV length = toi;
const Vec3V worldPointA = convexPose.transform(closestA);
const Vec3V worldNormal = V3Normalize(convexPose.rotate(normal));
if(impactPointOnTheOtherShape)
{
const Vec3V destWorldPointA = V3NegScaleSub(worldNormal, length, worldPointA);
V3StoreU(worldNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
}
else
{
const Vec3V destNormal = V3Neg(worldNormal);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(worldPointA, sweepHit.position);
}
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
sweepHit.faceIndex = 0xffffffff;
return true;
}
return false;
}
///////////////////////////////////////////////// sweepCapsule/Sphere //////////////////////////////////////////////////////
bool sweepCapsule_SphereGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const Sphere sphere(pose.p, sphereGeom.radius+inflation);
if(!sweepSphereCapsule(sphere, lss, -unitDir, distance, sweepHit.distance, sweepHit.position, sweepHit.normal, hitFlags))
return false;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
if(sweepHit.distance == 0.f)
{
//intialOverlap
if(lss.p0 == lss.p1)
{
//sphere
return computeSphere_SphereMTD(sphere, Sphere(lss.p0, lss.radius), sweepHit);
}
else
{
//capsule
return computeSphere_CapsuleMTD(sphere, lss, sweepHit);
}
}
}
else
{
if(sweepHit.distance!=0.0f)
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
else
sweepHit.flags = PxHitFlag::eNORMAL;
}
return true;
}
bool sweepCapsule_PlaneGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(geom);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
const PxPlane& worldPlane = getPlane(pose);
const PxF32 capsuleRadius = lss.radius + inflation;
PxU32 index = 0;
PxVec3 pts[2];
PxReal minDp = PX_MAX_REAL;
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
// Find extreme point on the capsule
// AP: removed if (lss.p0 == lss.p1 clause because it wasn't properly computing minDp)
pts[0] = lss.p0;
pts[1] = lss.p1;
for(PxU32 i=0; i<2; i++)
{
const PxReal dp = pts[i].dot(worldPlane.n);
if(dp<minDp)
{
minDp = dp;
index = i;
}
}
const bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
//initial overlap with the plane
if(minDp <= capsuleRadius - worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL| PxHitFlag::ePOSITION;
return computePlane_CapsuleMTD(worldPlane, lss, sweepHit);
}
}
else
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// test if the capsule initially overlaps with plane
if(minDp <= capsuleRadius - worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
return true;
}
}
}
const PxVec3 ptOnCapsule = pts[index] - worldPlane.n*capsuleRadius;
// Raycast extreme vertex against plane
bool hitPlane = intersectRayPlane(ptOnCapsule, unitDir, worldPlane, sweepHit.distance, &sweepHit.position);
if(hitPlane && sweepHit.distance > 0 && sweepHit.distance <= distance)
{
sweepHit.normal = worldPlane.n;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return true;
}
return false;
}
bool sweepCapsule_CapsuleGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule staticCapsule;
getCapsule(staticCapsule, capsuleGeom, pose);
staticCapsule.radius +=inflation;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
PxU16 outFlags;
if(!sweepCapsuleCapsule(lss, staticCapsule, -unitDir, distance, sweepHit.distance, sweepHit.position, sweepHit.normal, hitFlags, outFlags))
return false;
sweepHit.flags = PxHitFlags(outFlags);
if(sweepHit.distance == 0.0f)
{
//initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
return computeCapsule_CapsuleMTD(lss, staticCapsule, sweepHit);
}
}
return true;
}
bool sweepCapsule_ConvexGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const FloatV dist = FLoad(distance);
const Vec3V worldDir = V3LoadU(unitDir);
const PxTransformV capPose = loadTransformU(capsulePose_);
const PxTransformV convexPose = loadTransformU(pose);
const PxMatTransformV aToB(convexPose.transformInv(capPose));
const FloatV capsuleHalfHeight = FLoad(capsuleGeom_.halfHeight);
const FloatV capsuleRadius = FLoad(lss.radius);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const CapsuleV capsule(aToB.p, aToB.rotate( V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const Vec3V dir = convexPose.rotateInv(V3Neg(V3Scale(worldDir, dist)));
bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of convex hull
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, true))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = convexPose.transform(closestA);
const FloatV length = FMul(dist, toi);
const Vec3V destNormal = V3Normalize(convexPose.rotate(normal));
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, convexGeom, hullData, pose, unitDir);
}
///////////////////////////////////////////////// sweepBox //////////////////////////////////////////////////////
bool sweepBox_PlaneGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(threadContext);
PX_UNUSED(geom);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
PxPlane worldPlane = getPlane(pose);
worldPlane.d -=inflation;
// Find extreme point on the box
PxVec3 boxPts[8];
box.computeBoxPoints(boxPts);
PxU32 index = 0;
PxReal minDp = PX_MAX_REAL;
for(PxU32 i=0;i<8;i++)
{
const PxReal dp = boxPts[i].dot(worldPlane.n);
if(dp<minDp)
{
minDp = dp;
index = i;
}
}
bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
// test if box initially overlap with plane
if(minDp <= -worldPlane.d)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
//compute Mtd;
return computePlane_BoxMTD(worldPlane, box, sweepHit);
}
}
else
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// test if box initially overlap with plane
if(minDp <= -worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
return true;
}
}
}
// Raycast extreme vertex against plane
bool hitPlane = intersectRayPlane(boxPts[index], unitDir, worldPlane, sweepHit.distance, &sweepHit.position);
if(hitPlane && sweepHit.distance > 0 && sweepHit.distance <= distance)
{
sweepHit.normal = worldPlane.n;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return true;
}
return false;
}
bool sweepBox_ConvexGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxGeom_);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const PxTransformV boxPose = loadTransformU(boxPose_);
const PxTransformV convexPose = loadTransformU(pose);
const PxMatTransformV aToB(convexPose.transformInv(boxPose));
const Vec3V boxExtents = V3LoadU(box.extents);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const BoxV boxV(zeroV, boxExtents);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexPose.rotateInv(V3Neg(V3Scale(worldDir, dist)));
bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const RelativeConvex<BoxV> convexA(boxV, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<BoxV>,LocalConvex<ConvexHullV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, true))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destNormal = V3Normalize(convexPose.rotate(normal));
const FloatV length = FMul(dist, toi);
const Vec3V worldPointA = convexPose.transform(closestA);
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, convexGeom, hullData, pose, unitDir);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Gu::sweepCapsuleTriangles(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxCapsuleGeometry))
{
Capsule capsule;
getCapsule(capsule, geom, pose);
capsule.radius +=inflation;
// Compute swept box
Box capsuleBox;
computeBoxAroundCapsule(capsule, capsuleBox);
BoxPadded sweptBounds;
computeSweptBox(sweptBounds, capsuleBox.extents, capsuleBox.center, capsuleBox.rot, unitDir, distance);
PxVec3 triNormal;
return sweepCapsuleTriangles_Precise(nbTris, triangles, capsule, unitDir, distance, cachedIndex, hit, triNormal, hitFlags, doubleSided, &sweptBounds);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool sweepConvex_SphereGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const FloatV sphereRadius = FLoad(sphereGeom.radius);
const PxTransformV sphereTransf = loadTransformU(pose);
const PxTransformV convexTransf = loadTransformU(convexPose);
const PxMatTransformV aToB(convexTransf.transformInv(sphereTransf));
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexTransf.rotateInv(V3Scale(worldDir, dist));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//CapsuleV capsule(zeroV, sphereRadius);
const CapsuleV capsule(aToB.p, sphereRadius);
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, false))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destNormal = V3Neg(V3Normalize(convexTransf.rotate(normal)));
const FloatV length = FMul(dist, toi);
const Vec3V destWorldPointA = convexTransf.transform(closestA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_PlaneGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
PX_UNUSED(threadContext);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
const PxVec3* PX_RESTRICT hullVertices = hullData->getHullVertices();
PxU32 numHullVertices = hullData->mNbHullVertices;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
const FastVertex2ShapeScaling convexScaling(convexGeom.scale);
PxPlane plane = getPlane(pose);
plane.d -=inflation;
sweepHit.distance = distance;
bool status = false;
bool initialOverlap = false;
while(numHullVertices--)
{
const PxVec3& vertex = *hullVertices++;
const PxVec3 worldPt = convexPose.transform(convexScaling * vertex);
float t;
PxVec3 pointOnPlane;
if(intersectRayPlane(worldPt, unitDir, plane, t, &pointOnPlane))
{
if(plane.distance(worldPt) <= 0.0f)
{
initialOverlap = true;
break;
//// Convex touches plane
//sweepHit.distance = 0.0f;
//sweepHit.flags = PxHitFlag::eNORMAL;
//sweepHit.normal = -unitDir;
//return true;
}
if(t > 0.0f && t <= sweepHit.distance)
{
sweepHit.distance = t;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
sweepHit.position = pointOnPlane;
sweepHit.normal = plane.n;
status = true;
}
}
}
if(initialOverlap)
{
if(isMtd)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return computePlane_ConvexMTD(plane, convexGeom, convexPose, sweepHit);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.normal = -unitDir;
return true;
}
}
return status;
}
bool sweepConvex_CapsuleGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule capsule;
getCapsule(capsule, capsuleGeom, pose);
// remove PxHitFlag::eFACE_INDEX, not neeeded to compute.
PxHitFlags tempHitFlags = hitFlags;
tempHitFlags &= ~PxHitFlag::eFACE_INDEX;
if(!sweepCapsule_ConvexGeom(convexGeom, convexPose, capsuleGeom, pose, capsule, -unitDir, distance, sweepHit, tempHitFlags, inflation, threadContext))
return false;
if(sweepHit.flags & PxHitFlag::ePOSITION)
sweepHit.position += unitDir * sweepHit.distance;
sweepHit.normal = -sweepHit.normal;
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_BoxGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
Box box;
buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
// remove PxHitFlag::eFACE_INDEX, not neeeded to compute.
PxHitFlags tempHitFlags = hitFlags;
tempHitFlags &= ~PxHitFlag::eFACE_INDEX;
if(!sweepBox_ConvexGeom(convexGeom, convexPose, boxGeom, pose, box, -unitDir, distance, sweepHit, tempHitFlags, inflation, threadContext))
return false;
if(sweepHit.flags & PxHitFlag::ePOSITION)
sweepHit.position += unitDir * sweepHit.distance;
sweepHit.normal = -sweepHit.normal;
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_ConvexGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& otherConvexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh& otherConvexMesh = *static_cast<ConvexMesh*>(otherConvexGeom.convexMesh);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
ConvexHullData* otherHullData = &otherConvexMesh.getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const Vec3V otherVScale = V3LoadU_SafeReadW(otherConvexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV otherVQuat = QuatVLoadU(&otherConvexGeom.scale.rotation.x);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const PxTransformV otherTransf = loadTransformU(pose);
const PxTransformV convexTransf = loadTransformU(convexPose);
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexTransf.rotateInv(V3Scale(worldDir, dist));
const PxMatTransformV aToB(convexTransf.transformInv(otherTransf));
const ConvexHullV otherConvexHull(otherHullData, zeroV, otherVScale, otherVQuat, otherConvexGeom.scale.isIdentity());
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const RelativeConvex<ConvexHullV> convexA(otherConvexHull, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<ConvexHullV>, LocalConvex<ConvexHullV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, false))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = convexTransf.transform(closestA);
const Vec3V destNormal = V3Neg(V3Normalize(convexTransf.rotate(normal)));
const FloatV length = FMul(dist, toi);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(worldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, otherConvexGeom, otherHullData, pose, unitDir);
}

View File

@@ -0,0 +1,54 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_SHARED_TESTS_H
#define GU_SWEEP_SHARED_TESTS_H
#include "GuBoxConversion.h"
namespace physx
{
PX_FORCE_INLINE void computeWorldToBoxMatrix(PxMat34& worldToBox, const physx::Gu::Box& box)
{
PxMat34 boxToWorld;
physx::buildMatrixFromBox(boxToWorld, box);
worldToBox = boxToWorld.getInverseRT();
}
PX_FORCE_INLINE PxU32 getTriangleIndex(PxU32 i, PxU32 cachedIndex)
{
PxU32 triangleIndex;
if(i==0) triangleIndex = cachedIndex;
else if(i==cachedIndex) triangleIndex = 0;
else triangleIndex = i;
return triangleIndex;
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,65 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuWindingNumberT.h"
#include "GuWindingNumber.h"
namespace physx
{
namespace Gu
{
PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, PxF32 beta, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points)
{
return Gu::computeWindingNumber<PxF32, PxVec3>(tree, q, beta, clusters, triangles, points);
}
PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points)
{
return Gu::computeWindingNumber<PxF32, PxVec3>(tree, q, 2.0f, clusters, triangles, points);
}
void precomputeClusterInformation(const Gu::BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const PxVec3* points, PxHashMap<PxU32, ClusterApproximation>& result, PxI32 rootNodeIndex)
{
Gu::precomputeClusterInformation<PxF32, PxVec3>(tree, triangles, numTriangles, points, result, rootNodeIndex);
}
PxF32 computeWindingNumber(const PxVec3& q, const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points)
{
PxReal windingNumber = 0.0f;
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
windingNumber += Gu::evaluateExact<PxReal, PxVec3>(points[tri[0]], points[tri[1]], points[tri[2]], q);
}
return windingNumber;
}
}
}

View File

@@ -0,0 +1,60 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_H
#define GU_WINDING_NUMBER_H
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxArray.h"
#include "GuWindingNumberCluster.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
typedef ClusterApproximationT<PxReal, PxVec3> ClusterApproximation;
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points);
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, PxF32 beta, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points);
PX_PHYSX_COMMON_API void precomputeClusterInformation(const Gu::BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const PxVec3* points, PxHashMap<PxU32, ClusterApproximation>& result, PxI32 rootNodeIndex = 0);
//Quite slow, only useful for few query points, otherwise it is worth to construct a tree for acceleration
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const PxVec3& q, const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points);
}
}
#endif

View File

@@ -0,0 +1,54 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_CLUSTER_H
#define GU_WINDING_NUMBER_CLUSTER_H
namespace physx
{
namespace Gu
{
template<typename R, typename V3>
struct ClusterApproximationT
{
R Radius;
R AreaSum;
V3 WeightedCentroid;
V3 WeightedNormalSum;
PX_FORCE_INLINE ClusterApproximationT() {}
PX_FORCE_INLINE ClusterApproximationT(R radius, R areaSum, const V3& weightedCentroid, const V3& weightedNormalSum) :
Radius(radius), AreaSum(areaSum), WeightedCentroid(weightedCentroid), WeightedNormalSum(weightedNormalSum)
{ }
};
}
}
#endif

View File

@@ -0,0 +1,328 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_T_H
#define GU_WINDING_NUMBER_T_H
#include "GuTriangle.h"
#include "foundation/PxArray.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxVec3.h"
#include "GuBVH.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuWindingNumberCluster.h"
namespace physx
{
namespace Gu
{
using Triangle = Gu::IndexedTriangleT<PxI32>;
template<typename R, typename V3>
struct SecondOrderClusterApproximationT : public ClusterApproximationT<R, V3>
{
PxMat33 WeightedOuterProductSum;
PX_FORCE_INLINE SecondOrderClusterApproximationT() {}
PX_FORCE_INLINE SecondOrderClusterApproximationT(R radius, R areaSum, const V3& weightedCentroid, const V3& weightedNormalSum, const PxMat33& weightedOuterProductSum) :
ClusterApproximationT<R, V3>(radius, areaSum, weightedCentroid, weightedNormalSum), WeightedOuterProductSum(weightedOuterProductSum)
{ }
};
//Evaluates a first order winding number approximation for a given cluster (cluster = bunch of triangles)
template<typename R, typename V3>
PX_FORCE_INLINE R firstOrderClusterApproximation(const V3& weightedCentroid, const V3& weightedNormalSum,
const V3& evaluationPoint)
{
const V3 dir = weightedCentroid - evaluationPoint;
const R l = dir.magnitude();
return (R(0.25 / 3.141592653589793238462643383) / (l * l * l)) * weightedNormalSum.dot(dir);
}
template<typename R, typename V3>
PX_FORCE_INLINE R clusterApproximation(const ClusterApproximationT<R, V3>& c, const V3& evaluationPoint)
{
return firstOrderClusterApproximation(c.WeightedCentroid, c.WeightedNormalSum, evaluationPoint);
}
//Evaluates a second order winding number approximation for a given cluster (cluster = bunch of triangles)
template<typename R, typename V3>
PX_FORCE_INLINE R secondOrderClusterApproximation(const V3& weightedCentroid, const V3& weightedNormalSum,
const PxMat33& weightedOuterProductSum, const V3& evaluationPoint)
{
const V3 dir = weightedCentroid - evaluationPoint;
const R l = dir.magnitude();
const R l2 = l * l;
const R scaling = R(0.25 / 3.141592653589793238462643383) / (l2 * l);
const R firstOrder = scaling * weightedNormalSum.dot(dir);
const R scaling2 = -R(3.0) * scaling / l2;
const R m11 = scaling + scaling2 * dir.x * dir.x, m12 = scaling2 * dir.x * dir.y, m13 = scaling2 * dir.x * dir.z;
const R m21 = scaling2 * dir.y * dir.x, m22 = scaling + scaling2 * dir.y * dir.y, m23 = scaling2 * dir.y * dir.z;
const R m31 = scaling2 * dir.z * dir.x, m32 = scaling2 * dir.z * dir.y, m33 = scaling + scaling2 * dir.z * dir.z;
return firstOrder + (weightedOuterProductSum.column0.x * m11 + weightedOuterProductSum.column1.x * m12 + weightedOuterProductSum.column2.x * m13 +
weightedOuterProductSum.column0.y * m21 + weightedOuterProductSum.column1.y * m22 + weightedOuterProductSum.column2.y * m23 +
weightedOuterProductSum.column0.z * m31 + weightedOuterProductSum.column1.z * m32 + weightedOuterProductSum.column2.z * m33);
}
template<typename R, typename V3>
PX_FORCE_INLINE R clusterApproximation(const SecondOrderClusterApproximationT<R, V3>& c, const V3& evaluationPoint)
{
return secondOrderClusterApproximation(c.WeightedCentroid, c.WeightedNormalSum, c.WeightedOuterProductSum, evaluationPoint);
}
//Computes parameters to approximately represent a cluster (cluster = bunch of triangles) to be used to compute a winding number approximation
template<typename R, typename V3>
void approximateCluster(const PxArray<PxI32>& triangleSet, PxU32 start, PxU32 end, const PxU32* triangles, const V3* points,
const PxArray<R>& triangleAreas, const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids, ClusterApproximationT<R, V3>& cluster)
{
V3 weightedCentroid(0., 0., 0.);
R areaSum = 0;
V3 weightedNormalSum(0., 0., 0.);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
areaSum += triangleAreas[triId];
weightedCentroid += triangleCentroids[triId] * triangleAreas[triId];
weightedNormalSum += triangleNormalsTimesTriangleArea[triId];
}
weightedCentroid = weightedCentroid / areaSum;
R radiusSquared = 0;
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
const PxU32* tri = &triangles[3 * triId];
R d2 = (weightedCentroid - points[tri[0]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[1]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[2]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
}
cluster = ClusterApproximationT<R, V3>(PxSqrt(radiusSquared), areaSum, weightedCentroid, weightedNormalSum/*, weightedOuterProductSum*/);
}
//Computes parameters to approximately represent a cluster (cluster = bunch of triangles) to be used to compute a winding number approximation
template<typename R, typename V3>
void approximateCluster(const PxArray<PxI32>& triangleSet, PxU32 start, PxU32 end, const PxU32* triangles, const V3* points,
const PxArray<R>& triangleAreas, const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids, SecondOrderClusterApproximationT<R, V3>& cluster)
{
V3 weightedCentroid(0., 0., 0.);
R areaSum = 0;
V3 weightedNormalSum(0., 0., 0.);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
areaSum += triangleAreas[triId];
weightedCentroid += triangleCentroids[triId] * triangleAreas[triId];
weightedNormalSum += triangleNormalsTimesTriangleArea[triId];
}
weightedCentroid = weightedCentroid / areaSum;
R radiusSquared = 0;
PxMat33 weightedOuterProductSum(PxZERO::PxZero);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
const PxU32* tri = &triangles[3 * triId];
R d2 = (weightedCentroid - points[tri[0]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[1]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[2]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
weightedOuterProductSum = weightedOuterProductSum + PxMat33::outer(triangleCentroids[triId] - weightedCentroid, triangleNormalsTimesTriangleArea[triId]);
}
cluster = SecondOrderClusterApproximationT<R, V3>(PxSqrt(radiusSquared), areaSum, weightedCentroid, weightedNormalSum, weightedOuterProductSum);
}
//Exact winding number evaluation, needs to be called for every triangle close to the winding number query point
template<typename R, typename V3>
PX_FORCE_INLINE R evaluateExact(V3 a, V3 b, V3 c, const V3& p)
{
const R twoOver4PI = R(0.5 / 3.141592653589793238462643383);
a -= p;
b -= p;
c -= p;
const R la = a.magnitude(),
lb = b.magnitude(),
lc = c.magnitude();
const R y = a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x;
const R x = (la * lb * lc + (a.x * b.x + a.y * b.y + a.z * b.z) * lc +
(b.x * c.x + b.y * c.y + b.z * c.z) * la + (c.x * a.x + c.y * a.y + c.z * a.z) * lb);
return twoOver4PI * PxAtan2(y, x);
}
struct Section
{
PxI32 start;
PxI32 end;
Section(PxI32 s, PxI32 e) : start(s), end(e)
{}
};
//Helper method that recursively traverses the given BVH tree and computes a cluster approximation for every node and links it to the node
template<typename R, typename V3>
void precomputeClusterInformation(PxI32 nodeId, const BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const V3* points, PxHashMap<PxU32, ClusterApproximationT<R, V3>>& infos, const PxArray<R> triangleAreas,
const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids)
{
PxArray<PxI32> stack;
stack.pushBack(nodeId);
PxArray<Section> returnStack;
PxArray<PxI32> triIndices;
triIndices.reserve(numTriangles);
infos.reserve(PxU32(1.2f*numTriangles));
while (stack.size() > 0)
{
nodeId = stack.popBack();
if (nodeId >= 0)
{
const BVHNode& node = tree[nodeId];
if (node.isLeaf())
{
triIndices.pushBack(node.getPrimitiveIndex());
returnStack.pushBack(Section(triIndices.size() - 1, triIndices.size()));
continue;
}
stack.pushBack(-nodeId - 1); //Marker for return index
stack.pushBack(node.getPosIndex());
stack.pushBack(node.getPosIndex() + 1);
}
else
{
Section trianglesA = returnStack.popBack();
Section trianglesB = returnStack.popBack();
Section sum(trianglesB.start, trianglesA.end);
nodeId = -nodeId - 1;
ClusterApproximationT<R, V3> c;
approximateCluster<R, V3>(triIndices, sum.start, sum.end, triangles, points, triangleAreas, triangleNormalsTimesTriangleArea, triangleCentroids, c);
infos.insert(PxU32(nodeId), c);
returnStack.pushBack(sum);
}
}
}
//Precomputes a cluster approximation for every node in the BVH tree
template<typename R, typename V3>
void precomputeClusterInformation(const BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const V3* points, PxHashMap<PxU32, ClusterApproximationT<R, V3>>& result, PxI32 rootNodeIndex)
{
PxArray<R> triangleAreas;
triangleAreas.resize(numTriangles);
PxArray<V3> triangleNormalsTimesTriangleArea;
triangleNormalsTimesTriangleArea.resize(numTriangles);
PxArray<V3> triangleCentroids;
triangleCentroids.resize(numTriangles);
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
const V3& a = points[tri[0]];
const V3& b = points[tri[1]];
const V3& c = points[tri[2]];
triangleNormalsTimesTriangleArea[i] = (b - a).cross(c - a) * R(0.5);
triangleAreas[i] = triangleNormalsTimesTriangleArea[i].magnitude();
triangleCentroids[i] = (a + b + c) * R(1.0 / 3.0);
}
result.clear();
precomputeClusterInformation(rootNodeIndex, tree, triangles, numTriangles, points, result, triangleAreas, triangleNormalsTimesTriangleArea, triangleCentroids);
}
template<typename R, typename V3>
class WindingNumberTraversalController
{
public:
R mWindingNumber = 0;
private:
const PxU32* mTriangles;
const V3* mPoints;
const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& mClusters;
V3 mQueryPoint;
R mDistanceThresholdBeta;
public:
PX_FORCE_INLINE WindingNumberTraversalController(const PxU32* triangles, const V3* points,
const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& clusters, const V3& queryPoint, R distanceThresholdBeta = 2)
: mTriangles(triangles), mPoints(points), mClusters(clusters), mQueryPoint(queryPoint), mDistanceThresholdBeta(distanceThresholdBeta)
{ }
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const BVHNode& node, PxI32 nodeIndex)
{
if (node.isLeaf())
{
PX_ASSERT(node.getNbPrimitives() == 1);
const PxU32* tri = &mTriangles[3 * node.getPrimitiveIndex()];
mWindingNumber += evaluateExact<R, V3>(mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]], mQueryPoint);
return Gu::TraversalControl::eDontGoDeeper;
}
const ClusterApproximationT<R, V3>& cluster = mClusters.find(nodeIndex)->second;
const R distSquared = (mQueryPoint - cluster.WeightedCentroid).magnitudeSquared();
const R threshold = mDistanceThresholdBeta * cluster.Radius;
if (distSquared > threshold * threshold)
{
//mWindingNumber += secondOrderClusterApproximation(cluster.WeightedCentroid, cluster.WeightedNormalSum, cluster.WeightedOuterProductSum, mQueryPoint);
mWindingNumber += firstOrderClusterApproximation<R, V3>(cluster.WeightedCentroid, cluster.WeightedNormalSum, mQueryPoint); // secondOrderClusterApproximation(cluster.WeightedCentroid, cluster.WeightedNormalSum, cluster.WeightedOuterProductSum, mQueryPoint);
return Gu::TraversalControl::eDontGoDeeper;
}
return Gu::TraversalControl::eGoDeeper;
}
private:
PX_NOCOPY(WindingNumberTraversalController)
};
template<typename R, typename V3>
R computeWindingNumber(const BVHNode* tree, const V3& q, R beta, const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& clusters,
const PxU32* triangles, const V3* points)
{
WindingNumberTraversalController<R, V3> c(triangles, points, clusters, q, beta);
traverseBVH<WindingNumberTraversalController<R, V3>>(tree, c);
return c.mWindingNumber;
}
}
}
#endif

View File

@@ -0,0 +1,724 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuVecCapsule.h"
#include "GuVecBox.h"
#include "GuVecConvexHull.h"
#include "GuVecTriangle.h"
#include "GuGJKRaycast.h"
#include "GuCCDSweepConvexMesh.h"
#include "GuHeightFieldUtil.h"
#include "foundation/PxInlineArray.h"
#include "GuEntityReport.h"
#include "PxContact.h"
#include "GuDistancePointTriangle.h"
#include "GuBox.h"
#include "GuInternal.h"
#include "GuBoxConversion.h"
#include "GuConvexUtilsInternal.h"
#include "GuMidphaseInterface.h"
#include "geometry/PxGeometryQuery.h"
// PT: this one makes the "behavior after impact" PEEL test "fail" (rocks stop after impact)
// It also makes these UTs fail:
// [ FAILED ] CCDReportTest.CCD_soakTest_mesh
// [ FAILED ] CCDNegativeScalingTest.SLOW_ccdNegScaledMesh
static const bool gUseGeometryQuery = false;
// PT: this one seems to work.
// Timings for PEEL's "limits of speculative contacts test2", for 3 runs:
// false: true:
// Time: 504 220
// Time: 89 7
// Time: 5 84
// Time: 8 11
// Time: 423 56
// Time: 103 14
// Time: 10 11
// Time: 10 9
// Time: 418 60
// Time: 139 17
// Time: 9 9
// Time: 9 10
static const bool gUseGeometryQueryEst = false;
//#define CCD_BASIC_PROFILING
#ifdef CCD_BASIC_PROFILING
#include <stdio.h>
#endif
namespace physx
{
namespace Gu
{
PxReal SweepShapeTriangle(GU_TRIANGLE_SWEEP_METHOD_ARGS);
using namespace aos;
namespace
{
struct AccumCallback: public MeshHitCallback<PxGeomRaycastHit>
{
PX_NOCOPY(AccumCallback)
public:
PxInlineArray<PxU32, 64>& mResult;
AccumCallback(PxInlineArray<PxU32, 64>& result)
: MeshHitCallback<PxGeomRaycastHit>(CallbackMode::eMULTIPLE),
mResult(result)
{
}
virtual PxAgain processHit( // all reported coords are in mesh local space including hit.position
const PxGeomRaycastHit& hit, const PxVec3&, const PxVec3&, const PxVec3&, PxReal&, const PxU32*) PX_OVERRIDE PX_FINAL
{
mResult.pushBack(hit.faceIndex);
return true;
}
};
// PT: TODO: refactor with MidPhaseQueryLocalReport
struct EntityReportContainerCallback : public OverlapReport
{
PxInlineArray<PxU32, 64>& container;
EntityReportContainerCallback(PxInlineArray<PxU32,64>& container_) : container(container_)
{
container.forceSize_Unsafe(0);
}
virtual ~EntityReportContainerCallback() {}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices) PX_OVERRIDE PX_FINAL
{
for(PxU32 i=0; i<nb; i++)
container.pushBack(indices[i]);
return true;
}
private:
EntityReportContainerCallback& operator=(const EntityReportContainerCallback&);
};
class TriangleHelper
{
public:
TriangleHelper(const PxTriangleMeshGeometry& shapeMesh,
const Cm::FastVertex2ShapeScaling& skew, // object is not copied, beware!
const PxU32 triangleIndex);
void getBounds(PxBounds3& bounds, const physx::PxTransform& transform) const;
//non-virtuals:
PX_FORCE_INLINE const TriangleMesh* getMeshData() const { return _getMeshData(mShapeMesh); }
PxVec3 getPolygonNormal() const;
private:
TriangleHelper& operator=(const TriangleHelper&);
const PxTriangleMeshGeometry& mShapeMesh;
const Cm::FastVertex2ShapeScaling& mVertex2ShapeSkew;
const PxU32 mTriangleIndex;
};
TriangleHelper::TriangleHelper(const PxTriangleMeshGeometry& md, const Cm::FastVertex2ShapeScaling& skew, const PxU32 tg)
: mShapeMesh(md), mVertex2ShapeSkew(skew), mTriangleIndex(tg)
{
}
void TriangleHelper::getBounds(PxBounds3& bounds, const physx::PxTransform& transform) const
{
PxTriangle localTri;
getMeshData()->getLocalTriangle(localTri, mTriangleIndex, false); // PT: 'false': no need to flip winding to compute bounds
//gotta take bounds in shape space because building it in vertex space and transforming it out would skew it.
bounds = PxBounds3::empty();
bounds.include(transform.transform(mVertex2ShapeSkew * localTri.verts[0]));
bounds.include(transform.transform(mVertex2ShapeSkew * localTri.verts[1]));
bounds.include(transform.transform(mVertex2ShapeSkew * localTri.verts[2]));
}
PxVec3 TriangleHelper::getPolygonNormal() const
{
PxTriangle localTri;
getMeshData()->getLocalTriangle(localTri, mTriangleIndex, mVertex2ShapeSkew.flipsNormal());
const PxVec3 t0 = mVertex2ShapeSkew * localTri.verts[0];
const PxVec3 t1 = mVertex2ShapeSkew * localTri.verts[1];
const PxVec3 t2 = mVertex2ShapeSkew * localTri.verts[2];
const PxVec3 v0 = t0 - t1;
const PxVec3 v1 = t0 - t2;
const PxVec3 nor = v0.cross(v1);
return nor.getNormalized();
}
}
PxReal SweepAnyShapeHeightfield(GU_SWEEP_METHOD_ARGS)
{
PX_UNUSED(toiEstimate);
PX_ASSERT(shape1.mGeometry->getType()==PxGeometryType::eHEIGHTFIELD);
const HeightFieldUtil hfUtil(static_cast<const PxHeightFieldGeometry&>(*shape1.mGeometry));
PxInlineArray<PxU32,64> tempContainer;
EntityReportContainerCallback callback(tempContainer);
const PxVec3 trA = transform0.p - lastTm0.p;
const PxVec3 trB = transform1.p - lastTm1.p;
const PxVec3 relTr = trA - trB;
const PxVec3 halfRelTr = relTr * 0.5f;
const PxVec3 ext = shape0.mExtents + halfRelTr.abs() + PxVec3(restDistance);
const PxVec3 cent = shape0.mCenter + halfRelTr;
const PxBounds3 bounds0(cent - ext, cent + ext);
hfUtil.overlapAABBTriangles(transform1, bounds0, callback);
PxArray<PxU32> orderedContainer(tempContainer.size());
PxArray<PxU32> distanceEntries(tempContainer.size());
PxU32* orderedList = orderedContainer.begin();
PxF32* distances = reinterpret_cast<PxF32*>(distanceEntries.begin());
const PxVec3 origin = shape0.mCenter;
const PxVec3 extent = shape0.mExtents + PxVec3(restDistance);
PxReal minTOI = PX_MAX_REAL;
PxU32 numTrigs = tempContainer.size();
PxU32* trianglesIndices = tempContainer.begin();
PxU32 count = 0;
for(PxU32 a = 0; a < numTrigs; ++a)
{
PxTriangle tri;
hfUtil.getTriangle(shape1.mPrevTransform, tri, 0, 0, trianglesIndices[a], true, true);
PxVec3 resultNormal = -(tri.verts[1]-tri.verts[0]).cross(tri.verts[2]-tri.verts[0]);
resultNormal.normalize();
if(relTr.dot(resultNormal) >= fastMovingThreshold)
{
PxBounds3 bounds;
bounds.setEmpty();
bounds.include(tri.verts[0]);
bounds.include(tri.verts[1]);
bounds.include(tri.verts[2]);
PxF32 toi = sweepAABBAABB(origin, extent * 1.1f, bounds.getCenter(), (bounds.getExtents() + PxVec3(0.01f, 0.01f, 0.01f)) * 1.1f, trA, trB);
PxU32 index = 0;
if(toi <= 1.f)
{
for(PxU32 b = count; b > 0; --b)
{
if(distances[b-1] <= toi)
{
//shuffle down and swap
index = b;
break;
}
PX_ASSERT(b > 0);
PX_ASSERT(b < numTrigs);
distances[b] = distances[b-1];
orderedList[b] = orderedList[b-1];
}
PX_ASSERT(index < numTrigs);
orderedList[index] = trianglesIndices[a];
distances[index] = toi;
count++;
}
}
}
worldNormal = PxVec3(PxReal(0));
worldPoint = PxVec3(PxReal(0));
Cm::FastVertex2ShapeScaling idScale;
PxU32 ccdFaceIndex = PXC_CONTACT_NO_FACE_INDEX;
const PxVec3 sphereCenter(shape0.mPrevTransform.p);
const PxF32 inSphereRadius = shape0.mFastMovingThreshold;
const PxF32 inRadSq = inSphereRadius * inSphereRadius;
const PxVec3 sphereCenterInTr1 = transform1.transformInv(sphereCenter);
const PxVec3 sphereCenterInTr1T0 = transform1.transformInv(lastTm0.p);
PxVec3 tempWorldNormal(0.f), tempWorldPoint(0.f);
for (PxU32 ti = 0; ti < count; ti++)
{
PxTriangle tri;
hfUtil.getTriangle(lastTm1, tri, 0, 0, orderedList[ti], false, false);
PxVec3 resultNormal, resultPoint;
TriangleV triangle(V3LoadU(tri.verts[0]), V3LoadU(tri.verts[1]), V3LoadU(tri.verts[2]));
//do sweep
PxReal res = SweepShapeTriangle(
*shape0.mGeometry, *shape1.mGeometry, transform0, transform1, lastTm0, lastTm1, restDistance,
resultNormal, resultPoint, Cm::FastVertex2ShapeScaling(), triangle,
0.f);
if(res <= 0.f)
{
res = 0.f;
const PxVec3 v0 = tri.verts[1] - tri.verts[0];
const PxVec3 v1 = tri.verts[2] - tri.verts[0];
//Now we have a 0 TOI, lets see if the in-sphere hit it!
PxF32 distanceSq = distancePointTriangleSquared( sphereCenterInTr1, tri.verts[0], v0, v1);
if(distanceSq < inRadSq)
{
const PxVec3 nor = v0.cross(v1);
const PxF32 distance = PxSqrt(distanceSq);
res = distance - inSphereRadius;
const PxF32 d = nor.dot(tri.verts[0]);
const PxF32 dd = nor.dot(sphereCenterInTr1T0);
if((dd - d) > 0.f)
{
//back side, penetration
res = -(2.f * inSphereRadius - distance);
}
}
}
if (res < minTOI)
{
const PxVec3 v0 = tri.verts[1] - tri.verts[0];
const PxVec3 v1 = tri.verts[2] - tri.verts[0];
PxVec3 resultNormal1 = v0.cross(v1);
resultNormal1.normalize();
//if(norDotRel > 1e-6f)
{
tempWorldNormal = resultNormal1;
tempWorldPoint = resultPoint;
minTOI = res;
ccdFaceIndex = orderedList[ti];
}
}
}
worldNormal = transform1.rotate(tempWorldNormal);
worldPoint = tempWorldPoint;
outCCDFaceIndex = ccdFaceIndex;
return minTOI;
}
PxReal SweepEstimateAnyShapeHeightfield(GU_SWEEP_ESTIMATE_ARGS)
{
PX_ASSERT(shape1.mGeometry->getType()==PxGeometryType::eHEIGHTFIELD);
const HeightFieldUtil hfUtil(static_cast<const PxHeightFieldGeometry&>(*shape1.mGeometry));
PxInlineArray<PxU32,64> tempContainer;
EntityReportContainerCallback callback(tempContainer);
const PxTransform& transform0 = shape0.mCurrentTransform;
const PxTransform& lastTr0 = shape0.mPrevTransform;
const PxTransform& transform1 = shape1.mCurrentTransform;
const PxTransform& lastTr1 = shape1.mPrevTransform;
const PxVec3 trA = transform0.p - lastTr0.p;
const PxVec3 trB = transform1.p - lastTr1.p;
const PxVec3 relTr = trA - trB;
const PxVec3 halfRelTr = relTr * 0.5f;
const PxVec3 extents = shape0.mExtents + halfRelTr.abs() + PxVec3(restDistance);
const PxVec3 center = shape0.mCenter + halfRelTr;
const PxBounds3 bounds0(center - extents, center + extents);
hfUtil.overlapAABBTriangles(transform1, bounds0, callback);
PxVec3 origin = shape0.mCenter;
PxVec3 extent = shape0.mExtents;
PxReal minTOI = PX_MAX_REAL;
PxU32 numTrigs = tempContainer.size();
PxU32* trianglesIndices = tempContainer.begin();
for(PxU32 a = 0; a < numTrigs; ++a)
{
PxTriangle tri;
hfUtil.getTriangle(shape1.mPrevTransform, tri, 0, 0, trianglesIndices[a], true, true);
PxVec3 resultNormal = -(tri.verts[1]-tri.verts[0]).cross(tri.verts[2]-tri.verts[0]);
resultNormal.normalize();
if(relTr.dot(resultNormal) >= fastMovingThreshold)
{
PxBounds3 bounds;
bounds.setEmpty();
bounds.include(tri.verts[0]);
bounds.include(tri.verts[1]);
bounds.include(tri.verts[2]);
PxF32 toi = sweepAABBAABB(origin, extent * 1.1f, bounds.getCenter(), (bounds.getExtents() + PxVec3(0.01f, 0.01f, 0.01f)) * 1.1f, trA, trB);
minTOI = PxMin(minTOI, toi);
}
}
return minTOI;
}
PxReal SweepAnyShapeMesh(GU_SWEEP_METHOD_ARGS)
{
PX_UNUSED(toiEstimate);
// this is the trimesh midphase for convex vs mesh sweep. shape0 is the convex shape.
const PxVec3 trA = transform0.p - lastTm0.p;
const PxVec3 trB = transform1.p - lastTm1.p;
const PxVec3 relTr = trA - trB;
PxVec3 unitDir = relTr;
const PxReal length = unitDir.normalize();
PX_UNUSED(restDistance);
PX_UNUSED(fastMovingThreshold);
if(gUseGeometryQuery)
{
PxGeomSweepHit sweepHit;
if(!PxGeometryQuery::sweep(unitDir, length, *shape0.mGeometry, lastTm0, *shape1.mGeometry, lastTm1, sweepHit, PxHitFlag::eDEFAULT, 0.0f, PxGeometryQueryFlag::Enum(0), NULL))
//if(!PxGeometryQuery::sweep(unitDir, length, *shape0.mGeometry, transform0, *shape1.mGeometry, transform1, sweepHit, PxHitFlag::eDEFAULT, 0.0f, PxGeometryQueryFlag::Enum(0), NULL))
return PX_MAX_REAL;
worldNormal = sweepHit.normal;
worldPoint = sweepHit.position;
outCCDFaceIndex = sweepHit.faceIndex;
return sweepHit.distance/length;
}
else
{
// Get actual shape data
PX_ASSERT(shape1.mGeometry->getType()==PxGeometryType::eTRIANGLEMESH);
const PxTriangleMeshGeometry& shapeMesh = static_cast<const PxTriangleMeshGeometry&>(*shape1.mGeometry);
const Cm::FastVertex2ShapeScaling meshScaling(shapeMesh.scale);
const PxMat33 matRot(PxIdentity);
//1) Compute the swept bounds
Box sweptBox;
computeSweptBox(sweptBox, shape0.mExtents, shape0.mCenter, matRot, unitDir, length);
Box vertexSpaceBox;
if (shapeMesh.scale.isIdentity())
vertexSpaceBox = transformBoxOrthonormal(sweptBox, transform1.getInverse());
else
computeVertexSpaceOBB(vertexSpaceBox, sweptBox, transform1, shapeMesh.scale);
vertexSpaceBox.extents += PxVec3(restDistance);
PxInlineArray<PxU32, 64> tempContainer;
AccumCallback callback(tempContainer);
// AP scaffold: early out opportunities, should probably use fat raycast
Midphase::intersectOBB(_getMeshData(shapeMesh), vertexSpaceBox, callback, true);
if (tempContainer.size() == 0)
return PX_MAX_REAL;
// Intersection found, fetch triangles
PxU32 numTrigs = tempContainer.size();
const PxU32* triangleIndices = tempContainer.begin();
PxVec3 origin = shape0.mCenter;
PxVec3 extent = shape0.mExtents + PxVec3(restDistance);
PxInlineArray<PxU32, 64> orderedContainer;
orderedContainer.resize(tempContainer.size());
PxInlineArray<PxU32, 64> distanceEntries;
distanceEntries.resize(tempContainer.size());
PxU32* orderedList = orderedContainer.begin();
PxF32* distances = reinterpret_cast<PxF32*>(distanceEntries.begin());
PxReal minTOI = PX_MAX_REAL;
PxU32 count = 0;
for(PxU32 a = 0; a < numTrigs; ++a)
{
const TriangleHelper convexPartOfMesh1(shapeMesh, meshScaling, triangleIndices[a]);
const PxVec3 resultNormal = -transform1.rotate(convexPartOfMesh1.getPolygonNormal());
if(relTr.dot(resultNormal) >= fastMovingThreshold)
{
PxBounds3 bounds;
convexPartOfMesh1.getBounds(bounds, lastTm1);
//OK, we have all 3 vertices, now calculate bounds...
PxF32 toi = sweepAABBAABB(origin, extent, bounds.getCenter(), bounds.getExtents() + PxVec3(0.02f, 0.02f, 0.02f), trA, trB);
PxU32 index = 0;
if(toi <= 1.f)
{
for(PxU32 b = count; b > 0; --b)
{
if(distances[b-1] <= toi)
{
//shuffle down and swap
index = b;
break;
}
PX_ASSERT(b > 0);
PX_ASSERT(b < numTrigs);
distances[b] = distances[b-1];
orderedList[b] = orderedList[b-1];
}
PX_ASSERT(index < numTrigs);
orderedList[index] = triangleIndices[a];
distances[index] = toi;
count++;
}
}
}
PxVec3 tempWorldNormal(0.f), tempWorldPoint(0.f);
Cm::FastVertex2ShapeScaling idScale;
PxU32 ccdFaceIndex = PXC_CONTACT_NO_FACE_INDEX;
const PxVec3 sphereCenter(lastTm1.p);
const PxF32 inSphereRadius = shape0.mFastMovingThreshold;
//PxF32 inRadSq = inSphereRadius * inSphereRadius;
const PxVec3 sphereCenterInTransform1 = transform1.transformInv(sphereCenter);
const PxVec3 sphereCenterInTransform0p = transform1.transformInv(lastTm0.p);
for (PxU32 ti = 0; ti < count /*&& PxMax(minTOI, 0.f) >= distances[ti]*/; ti++)
{
const TriangleHelper convexPartOfMesh1(shapeMesh, meshScaling, orderedList[ti]);
PxVec3 resultNormal, resultPoint;
PxTriangle localTri;
_getMeshData(shapeMesh)->getLocalTriangle(localTri, orderedList[ti], meshScaling.flipsNormal());
const PxVec3 v0 = meshScaling * localTri.verts[0];
const PxVec3 v1 = meshScaling * localTri.verts[1];
const PxVec3 v2 = meshScaling * localTri.verts[2];
TriangleV triangle(V3LoadU(v0), V3LoadU(v1), V3LoadU(v2));
//do sweep
PxReal res = SweepShapeTriangle(
*shape0.mGeometry, *shape1.mGeometry, transform0, transform1, lastTm0, lastTm1, restDistance,
resultNormal, resultPoint, Cm::FastVertex2ShapeScaling(), triangle,
0.f);
resultNormal = -resultNormal;
if(res <= 0.f)
{
res = 0.f;
const PxF32 inRad = inSphereRadius + restDistance;
const PxF32 inRadSq = inRad*inRad;
const PxVec3 vv0 = v1 - v0;
const PxVec3 vv1 = v2 - v0;
const PxVec3 nor = vv0.cross(vv1);
//Now we have a 0 TOI, lets see if the in-sphere hit it!
const PxF32 distanceSq = distancePointTriangleSquared( sphereCenterInTransform1, v0, vv0, vv1);
if(distanceSq < inRadSq)
{
const PxF32 distance = PxSqrt(distanceSq);
res = distance - inRad;
const PxF32 d = nor.dot(v0);
const PxF32 dd = nor.dot(sphereCenterInTransform0p);
if((dd - d) < 0.f)
{
//back side, penetration
res = -(2.f * inRad - distance);
}
}
PX_ASSERT(PxIsFinite(res));
resultNormal = transform1.rotate(convexPartOfMesh1.getPolygonNormal());
}
if (res < minTOI)
{
tempWorldNormal = resultNormal;//convexPartOfMesh1.getPolygonNormal(0);//transform1.rotate(convexPartOfMesh1.getPolygonNormal(0));
tempWorldPoint = resultPoint;
minTOI = res;
ccdFaceIndex = orderedList[ti];
}
}
worldNormal = tempWorldNormal;//transform1.rotate(tempWorldNormal);
worldPoint = tempWorldPoint;
outCCDFaceIndex = ccdFaceIndex;
return minTOI;
}
}
/**
\brief This code performs a conservative estimate of the TOI of a shape v mesh.
*/
PxReal SweepEstimateAnyShapeMesh(GU_SWEEP_ESTIMATE_ARGS)
{
// this is the trimesh midphase for convex vs mesh sweep. shape0 is the convex shape.
// Get actual shape data
PX_ASSERT(shape1.mGeometry->getType()==PxGeometryType::eTRIANGLEMESH);
const PxTriangleMeshGeometry& shapeMesh = static_cast<const PxTriangleMeshGeometry&>(*shape1.mGeometry);
const PxTransform& transform0 = shape0.mCurrentTransform;
const PxTransform& lastTr0 = shape0.mPrevTransform;
const PxTransform& transform1 = shape1.mCurrentTransform;
const PxTransform& lastTr1 = shape1.mPrevTransform;
const PxVec3 trA = transform0.p - lastTr0.p;
const PxVec3 trB = transform1.p - lastTr1.p;
const PxVec3 relTr = trA - trB;
PxVec3 unitDir = relTr;
const PxReal length = unitDir.normalize();
#ifdef CCD_BASIC_PROFILING
unsigned long long time = __rdtsc();
#endif
if(gUseGeometryQueryEst)
{
PX_UNUSED(restDistance);
PX_UNUSED(fastMovingThreshold);
PX_UNUSED(shapeMesh);
{
PxGeomSweepHit sweepHit;
bool status = PxGeometryQuery::sweep(unitDir, length, *shape0.mGeometry, lastTr0, *shape1.mGeometry, lastTr1, sweepHit, PxHitFlag::eDEFAULT, 0.0f, PxGeometryQueryFlag::Enum(0), NULL);
#ifdef CCD_BASIC_PROFILING
unsigned long long time2 = __rdtsc();
printf("Time: %d\n", PxU32(time2 - time)/1024);
#endif
return status ? sweepHit.distance/length : PX_MAX_REAL;
}
}
else
{
const Cm::FastVertex2ShapeScaling meshScaling(shapeMesh.scale);
const PxMat33 matRot(PxIdentity);
//1) Compute the swept bounds
Box sweptBox;
computeSweptBox(sweptBox, shape0.mExtents, shape0.mCenter, matRot, unitDir, length);
Box vertexSpaceBox;
computeVertexSpaceOBB(vertexSpaceBox, sweptBox, transform1, shapeMesh.scale);
vertexSpaceBox.extents += PxVec3(restDistance);
// TODO: implement a cached mode that fetches the trigs from a cache rather than per opcode if there is little motion.
struct CB : MeshHitCallback<PxGeomRaycastHit>
{
PxReal minTOI;
PxReal sumFastMovingThresh;
const PxTriangleMeshGeometry& shapeMesh;
const Cm::FastVertex2ShapeScaling& meshScaling;
const PxVec3& relTr;
const PxVec3& trA;
const PxVec3& trB;
const PxTransform& transform1;
const PxVec3& origin;
const PxVec3& extent;
CB(PxReal aSumFast, const PxTriangleMeshGeometry& aShapeMesh, const Cm::FastVertex2ShapeScaling& aMeshScaling,
const PxVec3& aRelTr, const PxVec3& atrA, const PxVec3& atrB, const PxTransform& aTransform1, const PxVec3& aOrigin, const PxVec3& aExtent)
: MeshHitCallback<PxGeomRaycastHit>(CallbackMode::eMULTIPLE),
sumFastMovingThresh(aSumFast), shapeMesh(aShapeMesh), meshScaling(aMeshScaling), relTr(aRelTr), trA(atrA), trB(atrB),
transform1(aTransform1), origin(aOrigin), extent(aExtent)
{
minTOI = PX_MAX_REAL;
}
virtual PxAgain processHit( // all reported coords are in mesh local space including hit.position
const PxGeomRaycastHit& hit, const PxVec3&, const PxVec3&, const PxVec3&, PxReal& shrunkMaxT, const PxU32*) PX_OVERRIDE PX_FINAL
{
const TriangleHelper convexPartOfMesh1(shapeMesh, meshScaling, hit.faceIndex);
const PxVec3 resultNormal = -transform1.rotate(convexPartOfMesh1.getPolygonNormal());
if(relTr.dot(resultNormal) >= sumFastMovingThresh)
{
PxBounds3 bounds;
convexPartOfMesh1.getBounds(bounds, transform1);
//OK, we have all 3 vertices, now calculate bounds...
PxF32 toi = sweepAABBAABB(
origin, extent * 1.1f, bounds.getCenter(), (bounds.getExtents() + PxVec3(0.01f, 0.01f, 0.01f)) * 1.1f, trA, trB);
minTOI = PxMin(minTOI, toi);
shrunkMaxT = minTOI;
}
return (minTOI > 0.0f); // stop traversal if minTOI == 0.0f
}
void operator=(const CB&) {}
};
const PxVec3& origin = shape0.mCenter;
const PxVec3 extent = shape0.mExtents + PxVec3(restDistance);
CB callback(fastMovingThreshold, shapeMesh, meshScaling, relTr, trA, trB, transform1, origin, extent);
Midphase::intersectOBB(_getMeshData(shapeMesh), vertexSpaceBox, callback, true);
#ifdef CCD_BASIC_PROFILING
unsigned long long time2 = __rdtsc();
printf("Time: %d\n", PxU32(time2 - time)/1024);
#endif
return callback.minTOI;
}
}
}
}

View File

@@ -0,0 +1,165 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CCD_SWEEP_CONVEX_MESH_H
#define GU_CCD_SWEEP_CONVEX_MESH_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVecTransform.h"
#include "CmScaling.h"
#define GU_TRIANGLE_SWEEP_METHOD_ARGS \
const PxGeometry& shape0, \
const PxGeometry& shape1, \
const PxTransform32& transform0, \
const PxTransform32& transform1, \
const PxTransform32& lastTm0, \
const PxTransform32& lastTm1, \
PxReal restDistance, \
PxVec3& worldNormal, \
PxVec3& worldPoint, \
const Cm::FastVertex2ShapeScaling& meshScaling, \
Gu::TriangleV& triangle, \
const PxF32 toiEstimate
#define GU_SWEEP_METHOD_ARGS \
const Gu::CCDShape& shape0, \
const Gu::CCDShape& shape1, \
const PxTransform32& transform0, \
const PxTransform32& transform1, \
const PxTransform32& lastTm0, \
const PxTransform32& lastTm1, \
PxReal restDistance, \
PxVec3& worldNormal, \
PxVec3& worldPoint, \
const PxF32 toiEstimate, \
PxU32& outCCDFaceIndex, \
const PxReal fastMovingThreshold
#define GU_SWEEP_ESTIMATE_ARGS \
const CCDShape& shape0, \
const CCDShape& shape1, \
const PxReal restDistance, \
const PxReal fastMovingThreshold
#define GU_SWEEP_METHOD_ARGS_UNUSED \
const Gu::CCDShape& /*shape0*/, \
const Gu::CCDShape& /*shape1*/, \
const PxTransform32& /*transform0*/,\
const PxTransform32& /*transform1*/,\
const PxTransform32& /*lastTm0*/, \
const PxTransform32& /*lastTm1*/, \
PxReal /*restDistance*/, \
PxVec3& /*worldNormal*/, \
PxVec3& /*worldPoint*/, \
const PxF32 /*toiEstimate*/, \
PxU32& /*outCCDFaceIndex*/, \
const PxReal /*fastMovingThreshold*/
namespace physx
{
namespace Gu
{
struct CCDShape
{
const PxGeometry* mGeometry;
PxReal mFastMovingThreshold; //The CCD threshold for this shape
PxTransform mPrevTransform; //This shape's previous transform
PxTransform mCurrentTransform; //This shape's current transform
PxVec3 mExtents; //The extents of this shape's AABB
PxVec3 mCenter; //The center of this shape's AABB
PxU32 mUpdateCount; //How many times this shape has been updated in the CCD. This is correlated with the CCD body's update count.
};
PX_FORCE_INLINE PxF32 sweepAABBAABB(const PxVec3& centerA, const PxVec3& extentsA, const PxVec3& centerB, const PxVec3& extentsB, const PxVec3& trA, const PxVec3& trB)
{
//Sweep 2 AABBs against each other, return the TOI when they hit else PX_MAX_REAL if they don't hit
const PxVec3 cAcB = centerA - centerB;
const PxVec3 sumExtents = extentsA + extentsB;
//Initial hit
if(PxAbs(cAcB.x) <= sumExtents.x &&
PxAbs(cAcB.y) <= sumExtents.y &&
PxAbs(cAcB.z) <= sumExtents.z)
return 0.f;
//No initial hit - perform the sweep
const PxVec3 relTr = trB - trA;
PxF32 tfirst = 0.f;
PxF32 tlast = 1.f;
const PxVec3 aMax = centerA + extentsA;
const PxVec3 aMin = centerA - extentsA;
const PxVec3 bMax = centerB + extentsB;
const PxVec3 bMin = centerB - extentsB;
const PxF32 eps = 1e-6f;
for(PxU32 a = 0; a < 3; ++a)
{
if(relTr[a] < -eps)
{
if(bMax[a] < aMin[a])
return PX_MAX_REAL;
if(aMax[a] < bMin[a])
tfirst = PxMax((aMax[a] - bMin[a])/relTr[a], tfirst);
if(bMax[a] > aMin[a])
tlast = PxMin((aMin[a] - bMax[a])/relTr[a], tlast);
}
else if(relTr[a] > eps)
{
if(bMin[a] > aMax[a])
return PX_MAX_REAL;
if(bMax[a] < aMin[a])
tfirst = PxMax((aMin[a] - bMax[a])/relTr[a], tfirst);
if(aMax[a] > bMin[a])
tlast = PxMin((aMax[a] - bMin[a])/relTr[a], tlast);
}
else
{
if(bMax[a] < aMin[a] || bMin[a] > aMax[a])
return PX_MAX_REAL;
}
//No hit
if(tfirst > tlast)
return PX_MAX_REAL;
}
//There was a hit so return the TOI
return tfirst;
}
PX_PHYSX_COMMON_API PxReal SweepShapeShape(GU_SWEEP_METHOD_ARGS);
PX_PHYSX_COMMON_API PxReal SweepEstimateAnyShapeHeightfield(GU_SWEEP_ESTIMATE_ARGS);
PX_PHYSX_COMMON_API PxReal SweepEstimateAnyShapeMesh(GU_SWEEP_ESTIMATE_ARGS);
}
}
#endif

View File

@@ -0,0 +1,415 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuVecCapsule.h"
#include "GuVecBox.h"
#include "GuVecConvexHull.h"
#include "GuVecTriangle.h"
#include "GuGJKRaycast.h"
#include "GuCCDSweepConvexMesh.h"
#include "GuGJKType.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCustomGeometry.h"
//#define USE_VIRTUAL_GJK
namespace physx
{
namespace Gu
{
using namespace aos;
template<typename Geom> PX_FORCE_INLINE PxReal getRadius(const PxGeometry&)
{
return 0;
}
template<> PX_FORCE_INLINE PxReal getRadius<CapsuleV>(const PxGeometry& g)
{
PX_ASSERT(g.getType() == PxGeometryType::eCAPSULE || g.getType() == PxGeometryType::eSPHERE);
PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(PxSphereGeometry, radius) == PX_OFFSET_OF(PxCapsuleGeometry, radius));
return static_cast<const PxSphereGeometry&>(g).radius;
}
#ifdef USE_VIRTUAL_GJK
static bool virtualGjkRaycastPenetration(const GjkConvex& a, const GjkConvex& b, const aos::Vec3VArg initialDir, const aos::FloatVArg initialLambda, const aos::Vec3VArg s, const aos::Vec3VArg r, aos::FloatV& lambda,
aos::Vec3V& normal, aos::Vec3V& closestA, const PxReal _inflation, const bool initialOverlap)
{
return gjkRaycastPenetration<GjkConvex, GjkConvex >(a, b, initialDir, initialLambda, s, r, lambda, normal, closestA, _inflation, initialOverlap);
}
#endif
template<class ConvexA, class ConvexB>
static PX_FORCE_INLINE PxReal CCDSweep( ConvexA& a, ConvexB& b, const PxTransform32& transform0, const PxTransform32& transform1, const PxTransform32& lastTm0, const PxTransform32& lastTm1,
const aos::FloatV& toiEstimate, PxVec3& worldPoint, PxVec3& worldNormal, PxReal inflation = 0.0f)
{
PX_UNUSED(toiEstimate); //KS - TODO - can we use this again?
using namespace aos;
const QuatV q0 = QuatVLoadA(&transform0.q.x);
const Vec3V p0 = V3LoadA(&lastTm0.p.x);
const QuatV q1 = QuatVLoadA(&transform1.q.x);
const Vec3V p1 = V3LoadA(&lastTm1.p.x);
const PxTransformV tr0(p0, q0);
const PxTransformV tr1(p1, q1);
const PxMatTransformV aToB(tr1.transformInv(tr0));
const Vec3V trans0p = V3LoadA(transform0.p);
const Vec3V trans1p = V3LoadA(transform1.p);
const Vec3V trA = V3Sub(trans0p, p0);
const Vec3V trB = V3Sub(trans1p, p1);
const Vec3V relTr = tr1.rotateInv(V3Sub(trB, trA));
FloatV lambda;
Vec3V closestA, normal;
const FloatV initialLambda = FZero();
const RelativeConvex<ConvexA> convexA(a, aToB);
const LocalConvex<ConvexB> convexB(b);
#ifdef USE_VIRTUAL_GJK
if(virtualGjkRaycastPenetration(convexA, convexB, aToB.p, initialLambda, V3Zero(), relTr, lambda, normal, closestA, inflation, true))
#else
if(gjkRaycastPenetration<RelativeConvex<ConvexA>, LocalConvex<ConvexB> >(convexA, convexB, aToB.p, initialLambda, V3Zero(), relTr, lambda, normal, closestA, inflation, true))
#endif
{
//Adjust closestA because it will be on the surface of convex a in its initial position (s). If the TOI > 0, we need to move
//the point along the sweep direction to get the world-space hit position.
PxF32 res;
FStore(lambda, &res);
closestA = V3ScaleAdd(trA, FMax(lambda, FZero()), tr1.transform(closestA));
normal = tr1.rotate(normal);
V3StoreU(normal, worldNormal);
V3StoreU(closestA, worldPoint);
return res;
}
return PX_MAX_REAL;
}
//
// lookup table for geometry-vs-geometry sweeps
//
PxReal UnimplementedSweep (GU_SWEEP_METHOD_ARGS_UNUSED)
{
return PX_MAX_REAL; //no impact
}
template<typename Geom0, typename Geom1>
static PxReal SweepGeomGeom(GU_SWEEP_METHOD_ARGS)
{
PX_UNUSED(outCCDFaceIndex);
PX_UNUSED(fastMovingThreshold);
const PxGeometry& g0 = *shape0.mGeometry;
const PxGeometry& g1 = *shape1.mGeometry;
typename ConvexGeom<Geom0>::Type geom0(g0);
typename ConvexGeom<Geom1>::Type geom1(g1);
return CCDSweep(geom0, geom1, transform0, transform1, lastTm0, lastTm1, FLoad(toiEstimate), worldPoint, worldNormal, restDistance+getRadius<Geom0>(g0)+getRadius<Geom1>(g1) );
}
static PxReal SweepAnyShapeCustom(GU_SWEEP_METHOD_ARGS)
{
PX_UNUSED(fastMovingThreshold);
PX_UNUSED(toiEstimate);
PX_UNUSED(restDistance);
const PxGeometry& g0 = *shape0.mGeometry;
const PxGeometry& g1 = *shape1.mGeometry;
PX_ASSERT(g1.getType() == PxGeometryType::eCUSTOM);
const PxVec3 trA = transform0.p - lastTm0.p;
const PxVec3 trB = transform1.p - lastTm1.p;
const PxVec3 relTr = trA - trB;
PxVec3 unitDir = relTr;
const PxReal length = unitDir.normalize();
PxGeomSweepHit sweepHit;
if (!static_cast<const PxCustomGeometry&>(g1).callbacks->sweep(unitDir, length, g1, lastTm1, g0, lastTm0, sweepHit, PxHitFlag::eDEFAULT, 0.0f, NULL))
return PX_MAX_REAL;
worldNormal = sweepHit.normal;
worldPoint = sweepHit.position;
outCCDFaceIndex = sweepHit.faceIndex;
return sweepHit.distance / length;
}
typedef PxReal (*SweepMethod) (GU_SWEEP_METHOD_ARGS);
PxReal SweepAnyShapeHeightfield(GU_SWEEP_METHOD_ARGS);
PxReal SweepAnyShapeMesh(GU_SWEEP_METHOD_ARGS);
SweepMethod g_SweepMethodTable[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
SweepGeomGeom<CapsuleV, CapsuleV>, //PxGeometryType::eSPHERE
UnimplementedSweep, //PxGeometryType::ePLANE
SweepGeomGeom<CapsuleV, CapsuleV>, //PxGeometryType::eCAPSULE
SweepGeomGeom<CapsuleV, BoxV>, //PxGeometryType::eBOX
UnimplementedSweep, //PxGeometryType::eCONVEXCORE
SweepGeomGeom<CapsuleV, ConvexHullV>, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
SweepAnyShapeMesh, //PxGeometryType::eTRIANGLEMESH
SweepAnyShapeHeightfield, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
UnimplementedSweep, //PxGeometryType::ePLANE
UnimplementedSweep, //PxGeometryType::eCAPSULE
UnimplementedSweep, //PxGeometryType::eBOX
UnimplementedSweep, //PxGeometryType::eCONVEXCORE
UnimplementedSweep, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
UnimplementedSweep, //PxGeometryType::eTRIANGLEMESH
UnimplementedSweep, //PxGeometryType::eHEIGHTFIELD
UnimplementedSweep, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
SweepGeomGeom<CapsuleV, CapsuleV>, //PxGeometryType::eCAPSULE
SweepGeomGeom<CapsuleV, BoxV>, //PxGeometryType::eBOX
UnimplementedSweep, //PxGeometryType::eCONVEXCORE
SweepGeomGeom<CapsuleV, ConvexHullV>, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
SweepAnyShapeMesh, //PxGeometryType::eTRIANGLEMESH
SweepAnyShapeHeightfield, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
SweepGeomGeom<BoxV, BoxV>, //PxGeometryType::eBOX
UnimplementedSweep, //PxGeometryType::eCONVEXCORE
SweepGeomGeom<BoxV, ConvexHullV>, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
SweepAnyShapeMesh, //PxGeometryType::eTRIANGLEMESH
SweepAnyShapeHeightfield, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXCORE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
UnimplementedSweep, //PxGeometryType::eCONVEXCORE
UnimplementedSweep, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
UnimplementedSweep, //PxGeometryType::eTRIANGLEMESH
UnimplementedSweep, //PxGeometryType::eHEIGHTFIELD
UnimplementedSweep, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
SweepGeomGeom<ConvexHullV, ConvexHullV>, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
SweepAnyShapeMesh, //PxGeometryType::eTRIANGLEMESH
SweepAnyShapeHeightfield, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
UnimplementedSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
UnimplementedSweep, //PxGeometryType::eTRIANGLEMESH
UnimplementedSweep, //PxGeometryType::eHEIGHTFIELD
UnimplementedSweep, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
UnimplementedSweep, //PxGeometryType::eTETRAHEDRONMESH
UnimplementedSweep, //PxGeometryType::eTRIANGLEMESH
UnimplementedSweep, //PxGeometryType::eHEIGHTFIELD
UnimplementedSweep, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
UnimplementedSweep, //PxGeometryType::eTRIANGLEMESH
UnimplementedSweep, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
UnimplementedSweep, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXCORE
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
SweepAnyShapeCustom, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(g_SweepMethodTable) / sizeof(g_SweepMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
PxReal SweepShapeShape(GU_SWEEP_METHOD_ARGS)
{
const PxGeometryType::Enum type0 = shape0.mGeometry->getType();
const PxGeometryType::Enum type1 = shape1.mGeometry->getType();
return g_SweepMethodTable[type0][type1](shape0, shape1, transform0, transform1, lastTm0, lastTm1,
restDistance, worldNormal, worldPoint, toiEstimate, outCCDFaceIndex, fastMovingThreshold);
}
//
// lookup table for sweeps agains triangles
//
PxReal UnimplementedTriangleSweep(GU_TRIANGLE_SWEEP_METHOD_ARGS)
{
PX_UNUSED(shape0);
PX_UNUSED(shape1);
PX_UNUSED(transform0);
PX_UNUSED(transform1);
PX_UNUSED(lastTm0);
PX_UNUSED(lastTm1);
PX_UNUSED(restDistance);
PX_UNUSED(worldNormal);
PX_UNUSED(worldPoint);
PX_UNUSED(meshScaling);
PX_UNUSED(triangle);
PX_UNUSED(toiEstimate);
return 1e10f; //no impact
}
template<typename Geom>
PxReal SweepGeomTriangles(GU_TRIANGLE_SWEEP_METHOD_ARGS)
{
PX_UNUSED(meshScaling);
PX_UNUSED(shape1);
const PxGeometry& g = shape0;
//Geom geom(g);
typename ConvexGeom<Geom>::Type geom(g);
return CCDSweep<TriangleV, Geom>(triangle, geom, transform1, transform0, lastTm1, lastTm0, FLoad(toiEstimate), worldPoint, worldNormal, restDistance+getRadius<Geom>(g) );
}
typedef PxReal (*TriangleSweepMethod) (GU_TRIANGLE_SWEEP_METHOD_ARGS);
TriangleSweepMethod g_TriangleSweepMethodTable[] =
{
SweepGeomTriangles<CapsuleV>, //PxGeometryType::eSPHERE
UnimplementedTriangleSweep, //PxGeometryType::ePLANE
SweepGeomTriangles<CapsuleV>, //PxGeometryType::eCAPSULE
SweepGeomTriangles<BoxV>, //PxGeometryType::eBOX
UnimplementedTriangleSweep, //PxGeometryType::eCONVEXCORE
SweepGeomTriangles<ConvexHullV>, //PxGeometryType::eCONVEXMESH
UnimplementedTriangleSweep, //PxGeometryType::ePARTICLESYSTEM
UnimplementedTriangleSweep, //PxGeometryType::eTETRAHEDRONMESH
UnimplementedTriangleSweep, //PxGeometryType::eTRIANGLEMESH
UnimplementedTriangleSweep, //PxGeometryType::eHEIGHTFIELD
UnimplementedTriangleSweep, //PxGeometryType::eCUSTOM
};
PX_COMPILE_TIME_ASSERT(sizeof(g_TriangleSweepMethodTable) / sizeof(g_TriangleSweepMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
PxReal SweepShapeTriangle(GU_TRIANGLE_SWEEP_METHOD_ARGS)
{
const PxGeometryType::Enum type0 = shape0.getType();
const TriangleSweepMethod method = g_TriangleSweepMethodTable[type0];
return method(shape0, shape1, transform0, transform1, lastTm0, lastTm1, restDistance, worldNormal, worldPoint, meshScaling, triangle, toiEstimate);
}
}
}

View File

@@ -0,0 +1,662 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "GuEdgeList.h"
#include "GuAdjacencies.h"
#include "CmSerialize.h"
#include "CmRadixSort.h"
// PT: code archeology: this initially came from ICE (IceAdjacencies.h/cpp). Consider putting it back the way it was initially.
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
/**
* Flips the winding.
*/
void AdjTriangle::Flip()
{
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
// Call the Triangle method
IndexedTriangle::Flip();
#endif
// Flip links. We flipped vertex references 1 & 2, i.e. links 0 & 1.
physx::PxSwap(mATri[0], mATri[1]);
}
/**
* Computes the number of boundary edges in a triangle.
* \return the number of boundary edges. (0 => 3)
*/
PxU32 AdjTriangle::ComputeNbBoundaryEdges() const
{
// Look for boundary edges
PxU32 Nb = 0;
if(IS_BOUNDARY(mATri[0])) Nb++;
if(IS_BOUNDARY(mATri[1])) Nb++;
if(IS_BOUNDARY(mATri[2])) Nb++;
return Nb;
}
/**
* Computes the number of valid neighbors.
* \return the number of neighbors. (0 => 3)
*/
PxU32 AdjTriangle::ComputeNbNeighbors() const
{
PxU32 Nb = 0;
if(!IS_BOUNDARY(mATri[0])) Nb++;
if(!IS_BOUNDARY(mATri[1])) Nb++;
if(!IS_BOUNDARY(mATri[2])) Nb++;
return Nb;
}
/**
* Checks whether the triangle has a particular neighbor or not.
* \param tref [in] the triangle reference to look for
* \param index [out] the corresponding index in the triangle (NULL if not needed)
* \return true if the triangle has the given neighbor
*/
bool AdjTriangle::HasNeighbor(PxU32 tref, PxU32* index) const
{
// ### could be optimized
if(!IS_BOUNDARY(mATri[0]) && MAKE_ADJ_TRI(mATri[0])==tref) { if(index) *index = 0; return true; }
if(!IS_BOUNDARY(mATri[1]) && MAKE_ADJ_TRI(mATri[1])==tref) { if(index) *index = 1; return true; }
if(!IS_BOUNDARY(mATri[2]) && MAKE_ADJ_TRI(mATri[2])==tref) { if(index) *index = 2; return true; }
return false;
}
Adjacencies::Adjacencies() : mNbFaces(0), mFaces(NULL)
{
}
Adjacencies::~Adjacencies()
{
PX_DELETE_ARRAY(mFaces);
}
/**
* Computes the number of boundary edges.
* \return the number of boundary edges.
*/
PxU32 Adjacencies::ComputeNbBoundaryEdges() const
{
if(!mFaces)
return 0;
// Look for boundary edges
PxU32 Nb = 0;
for(PxU32 i=0;i<mNbFaces;i++)
{
AdjTriangle* CurTri = &mFaces[i];
Nb+=CurTri->ComputeNbBoundaryEdges();
}
return Nb;
}
/**
* Computes the boundary vertices. A boundary vertex is defined as a vertex shared by at least one boundary edge.
* \param nb_verts [in] the number of vertices
* \param bound_status [out] a user-provided array of bool
* \return true if success. The user-array is filled with true or false (boundary vertex / not boundary vertex)
*/
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
bool Adjacencies::GetBoundaryVertices(PxU32 nb_verts, bool* bound_status) const
#else
bool Adjacencies::GetBoundaryVertices(PxU32 nb_verts, bool* bound_status, const IndexedTriangle32* faces) const
#endif
{
// We need the adjacencies
if(!mFaces || !bound_status || !nb_verts)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "Adjacencies::GetBoundaryVertices: NULL parameter!");
#ifndef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
if(!faces)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "Adjacencies::GetBoundaryVertices: NULL parameter!");
#endif
// Init
PxMemZero(bound_status, nb_verts*sizeof(bool));
// Loop through faces
for(PxU32 i=0;i<mNbFaces;i++)
{
AdjTriangle* CurTri = &mFaces[i];
if(IS_BOUNDARY(CurTri->mATri[0]))
{
// Two boundary vertices: 0 - 1
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
PxU32 VRef0 = CurTri->v[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
PxU32 VRef1 = CurTri->v[1]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
#else
PxU32 VRef0 = faces[i].mRef[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
PxU32 VRef1 = faces[i].mRef[1]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
#endif
}
if(IS_BOUNDARY(CurTri->mATri[1]))
{
// Two boundary vertices: 0 - 2
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
PxU32 VRef0 = CurTri->v[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
PxU32 VRef1 = CurTri->v[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
#else
PxU32 VRef0 = faces[i].mRef[0]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
PxU32 VRef1 = faces[i].mRef[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
#endif
}
if(IS_BOUNDARY(CurTri->mATri[2]))
{
// Two boundary vertices: 1 - 2
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
PxU32 VRef0 = CurTri->v[1]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
PxU32 VRef1 = CurTri->v[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
#else
PxU32 VRef0 = faces[i].mRef[1]; if(VRef0>=nb_verts) return false; bound_status[VRef0] = true;
PxU32 VRef1 = faces[i].mRef[2]; if(VRef1>=nb_verts) return false; bound_status[VRef1] = true;
#endif
}
}
return true;
}
/**
* Assigns a new edge code to the counterpart link of a given link.
* \param link [in] the link to modify - shouldn't be a boundary link
* \param edge_nb [in] the new edge number
*/
void Adjacencies::AssignNewEdgeCode(PxU32 link, PxU8 edge_nb)
{
if(!IS_BOUNDARY(link))
{
PxU32 Id = MAKE_ADJ_TRI(link); // Triangle ID
PxU32 Edge = GET_EDGE_NB(link); // Counterpart edge ID
AdjTriangle* Tri = &mFaces[Id]; // Adjacent triangle
// Get link whose edge code is invalid
PxU32 AdjLink = Tri->mATri[Edge]; // Link to ourself (i.e. to 'link')
SET_EDGE_NB(AdjLink, edge_nb); // Assign new edge code
Tri->mATri[Edge] = AdjLink; // Put link back
}
}
/**
* Modifies the existing database so that reference 'vref' of triangle 'curtri' becomes the last one.
* Provided reference must already exist in provided triangle.
* \param cur_tri [in] the triangle
* \param vref [in] the reference
* \return true if success.
*/
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
bool Adjacencies::MakeLastRef(AdjTriangle& cur_tri, PxU32 vref)
#else
bool Adjacencies::MakeLastRef(AdjTriangle& cur_tri, PxU32 vref, IndexedTriangle32* cur_topo)
#endif
{
#ifndef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
if(!cur_topo)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "Adjacencies::MakeLastRef: NULL parameter!");
#endif
// We want pattern (x y vref)
// Edge 0-1 is (x y)
// Edge 0-2 is (x vref)
// Edge 1-2 is (y vref)
// First thing is to scroll the existing references in order for vref to become the last one. Scrolling assures winding order is conserved.
// Edge code need fixing as well:
// The two MSB for each link encode the counterpart edge in adjacent triangle. We swap the link positions, but adjacent triangles remain the
// same. In other words, edge codes are still valid for current triangle since counterpart edges have not been swapped. *BUT* edge codes of
// the three possible adjacent triangles *are* now invalid. We need to fix edge codes, but for adjacent triangles...
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
if(cur_tri.v[0]==vref)
#else
if(cur_topo->mRef[0]==vref)
#endif
{
// Pattern is (vref x y)
// Edge 0-1 is (vref x)
// Edge 0-2 is (vref y)
// Edge 1-2 is (x y)
// Catch original data
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
PxU32 Ref0 = cur_tri.v[0]; PxU32 Link01 = cur_tri.mATri[0];
PxU32 Ref1 = cur_tri.v[1]; PxU32 Link02 = cur_tri.mATri[1];
PxU32 Ref2 = cur_tri.v[2]; PxU32 Link12 = cur_tri.mATri[2];
// Swap
cur_tri.v[0] = Ref1;
cur_tri.v[1] = Ref2;
cur_tri.v[2] = Ref0;
#else
PxU32 Ref0 = cur_topo->mRef[0]; PxU32 Link01 = cur_tri.mATri[0];
PxU32 Ref1 = cur_topo->mRef[1]; PxU32 Link02 = cur_tri.mATri[1];
PxU32 Ref2 = cur_topo->mRef[2]; PxU32 Link12 = cur_tri.mATri[2];
// Swap
cur_topo->mRef[0] = Ref1;
cur_topo->mRef[1] = Ref2;
cur_topo->mRef[2] = Ref0;
#endif
cur_tri.mATri[0] = Link12; // Edge 0-1 now encodes Ref1-Ref2, i.e. previous Link12
cur_tri.mATri[1] = Link01; // Edge 0-2 now encodes Ref1-Ref0, i.e. previous Link01
cur_tri.mATri[2] = Link02; // Edge 1-2 now encodes Ref2-Ref0, i.e. previous Link02
// Fix edge codes
AssignNewEdgeCode(Link01, 1);
AssignNewEdgeCode(Link02, 2);
AssignNewEdgeCode(Link12, 0);
return true;
}
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
else if(cur_tri.v[1]==vref)
#else
else if(cur_topo->mRef[1]==vref)
#endif
{
// Pattern is (x vref y)
// Edge 0-1 is (x vref)
// Edge 0-2 is (x y)
// Edge 1-2 is (vref y)
// Catch original data
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
PxU32 Ref0 = cur_tri.v[0]; PxU32 Link01 = cur_tri.mATri[0];
PxU32 Ref1 = cur_tri.v[1]; PxU32 Link02 = cur_tri.mATri[1];
PxU32 Ref2 = cur_tri.v[2]; PxU32 Link12 = cur_tri.mATri[2];
// Swap
cur_tri.v[0] = Ref2;
cur_tri.v[1] = Ref0;
cur_tri.v[2] = Ref1;
#else
PxU32 Ref0 = cur_topo->mRef[0]; PxU32 Link01 = cur_tri.mATri[0];
PxU32 Ref1 = cur_topo->mRef[1]; PxU32 Link02 = cur_tri.mATri[1];
PxU32 Ref2 = cur_topo->mRef[2]; PxU32 Link12 = cur_tri.mATri[2];
// Swap
cur_topo->mRef[0] = Ref2;
cur_topo->mRef[1] = Ref0;
cur_topo->mRef[2] = Ref1;
#endif
cur_tri.mATri[0] = Link02; // Edge 0-1 now encodes Ref2-Ref0, i.e. previous Link02
cur_tri.mATri[1] = Link12; // Edge 0-2 now encodes Ref2-Ref1, i.e. previous Link12
cur_tri.mATri[2] = Link01; // Edge 1-2 now encodes Ref0-Ref1, i.e. previous Link01
// Fix edge codes
AssignNewEdgeCode(Link01, 2);
AssignNewEdgeCode(Link02, 0);
AssignNewEdgeCode(Link12, 1);
return true;
}
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
else if(cur_tri.v[2]==vref)
#else
else if(cur_topo->mRef[2]==vref)
#endif
{
// Nothing to do, provided reference already is the last one
return true;
}
// Here the provided reference doesn't belong to the provided triangle.
return false;
}
bool Adjacencies::Load(PxInputStream& stream)
{
// Import header
PxU32 Version;
bool Mismatch;
if(!ReadHeader('A', 'D', 'J', 'A', Version, Mismatch, stream))
return false;
// Import adjacencies
mNbFaces = readDword(Mismatch, stream);
mFaces = PX_NEW(AdjTriangle)[mNbFaces];
stream.read(mFaces, sizeof(AdjTriangle)*mNbFaces);
return true;
}
//#ifdef PX_COOKING
//! An edge class used to compute the adjacency structures.
class AdjEdge : public EdgeData, public PxUserAllocated
{
public:
PX_INLINE AdjEdge() {}
PX_INLINE ~AdjEdge() {}
PxU32 mFaceNb; //!< Owner face
};
/**
* Adds a new edge to the database.
* \param ref0 [in] vertex reference for the new edge
* \param ref1 [in] vertex reference for the new edge
* \param face [in] owner face
*/
static void AddEdge(PxU32 ref0, PxU32 ref1, PxU32 face, PxU32& nb_edges, AdjEdge* edges)
{
// Store edge data
edges[nb_edges].Ref0 = ref0;
edges[nb_edges].Ref1 = ref1;
edges[nb_edges].mFaceNb = face;
nb_edges++;
}
/**
* Adds a new triangle to the database.
* \param ref0 [in] vertex reference for the new triangle
* \param ref1 [in] vertex reference for the new triangle
* \param ref2 [in] vertex reference for the new triangle
* \param id [in] triangle index
*/
static void AddTriangle(PxU32 ref0, PxU32 ref1, PxU32 ref2, PxU32 id, AdjTriangle* faces, PxU32& nb_edges, AdjEdge* edges)
{
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
// Store vertex-references
faces[id].v[0] = ref0;
faces[id].v[1] = ref1;
faces[id].v[2] = ref2;
#endif
// Reset links
faces[id].mATri[0] = PX_INVALID_U32;
faces[id].mATri[1] = PX_INVALID_U32;
faces[id].mATri[2] = PX_INVALID_U32;
// Add edge 01 to database
if(ref0<ref1) AddEdge(ref0, ref1, id, nb_edges, edges);
else AddEdge(ref1, ref0, id, nb_edges, edges);
// Add edge 02 to database
if(ref0<ref2) AddEdge(ref0, ref2, id, nb_edges, edges);
else AddEdge(ref2, ref0, id, nb_edges, edges);
// Add edge 12 to database
if(ref1<ref2) AddEdge(ref1, ref2, id, nb_edges, edges);
else AddEdge(ref2, ref1, id, nb_edges, edges);
}
/**
* Updates the links in two adjacent triangles.
* \param first_tri [in] index of the first triangle
* \param second_tri [in] index of the second triangle
* \param ref0 [in] the common edge's first vertex reference
* \param ref1 [in] the common edge's second vertex reference
* \return true if success.
*/
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
static bool UpdateLink(PxU32 first_tri, PxU32 second_tri, PxU32 ref0, PxU32 ref1, AdjTriangle* faces)
#else
static bool UpdateLink(PxU32 first_tri, PxU32 second_tri, PxU32 ref0, PxU32 ref1, AdjTriangle* faces, const ADJACENCIESCREATE& create)
#endif
{
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
AdjTriangle& Tri0 = faces[first_tri]; // Catch the first triangle
AdjTriangle& Tri1 = faces[second_tri]; // Catch the second triangle
// Get the edge IDs. 0xff means input references are wrong.
PxU8 EdgeNb0 = Tri0.FindEdge(ref0, ref1); if(EdgeNb0==0xff) return SetIceError("Adjacencies::UpdateLink: invalid edge reference in first triangle");
PxU8 EdgeNb1 = Tri1.FindEdge(ref0, ref1); if(EdgeNb1==0xff) return SetIceError("Adjacencies::UpdateLink: invalid edge reference in second triangle");
// Update links. The two most significant bits contain the counterpart edge's ID.
Tri0.mATri[EdgeNb0] = second_tri |(PxU32(EdgeNb1)<<30);
Tri1.mATri[EdgeNb1] = first_tri |(PxU32(EdgeNb0)<<30);
#else
IndexedTriangle32 FirstTri, SecondTri;
if(create.DFaces)
{
FirstTri.mRef[0] = create.DFaces[first_tri*3+0];
FirstTri.mRef[1] = create.DFaces[first_tri*3+1];
FirstTri.mRef[2] = create.DFaces[first_tri*3+2];
SecondTri.mRef[0] = create.DFaces[second_tri*3+0];
SecondTri.mRef[1] = create.DFaces[second_tri*3+1];
SecondTri.mRef[2] = create.DFaces[second_tri*3+2];
}
if(create.WFaces)
{
FirstTri.mRef[0] = create.WFaces[first_tri*3+0];
FirstTri.mRef[1] = create.WFaces[first_tri*3+1];
FirstTri.mRef[2] = create.WFaces[first_tri*3+2];
SecondTri.mRef[0] = create.WFaces[second_tri*3+0];
SecondTri.mRef[1] = create.WFaces[second_tri*3+1];
SecondTri.mRef[2] = create.WFaces[second_tri*3+2];
}
// Get the edge IDs. 0xff means input references are wrong.
const PxU8 EdgeNb0 = FirstTri.findEdge(ref0, ref1);
const PxU8 EdgeNb1 = SecondTri.findEdge(ref0, ref1);
if(EdgeNb0==0xff || EdgeNb1==0xff)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "Adjacencies::UpdateLink: invalid edge reference");
// Update links. The two most significant bits contain the counterpart edge's ID.
faces[first_tri].mATri[EdgeNb0] = second_tri |(PxU32(EdgeNb1)<<30);
faces[second_tri].mATri[EdgeNb1] = first_tri |(PxU32(EdgeNb0)<<30);
#endif
return true;
}
/**
* Creates the adjacency structures.
* \return true if success.
*/
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
static bool CreateDatabase(AdjTriangle* faces, PxU32 nb_edges, const AdjEdge* edges)
#else
static bool CreateDatabase(AdjTriangle* faces, PxU32 nb_edges, const AdjEdge* edges, const ADJACENCIESCREATE& create)
#endif
{
RadixSortBuffered Core;
{
// Multiple sorts - this rewritten version uses less ram
// PT: TTP 2994: the mesh has 343000+ edges, so yeah, sure, allocating more than 1mb on the stack causes overflow...
PxU32* VRefs = PX_ALLOCATE(PxU32, nb_edges, "tmp");
// Sort according to mRef0, then mRef1
PxU32 i;
for(i=0;i<nb_edges;i++)
VRefs[i] = edges[i].Ref0;
Core.Sort(VRefs, nb_edges);
for(i=0;i<nb_edges;i++)
VRefs[i] = edges[i].Ref1;
Core.Sort(VRefs, nb_edges);
PX_FREE(VRefs);
}
const PxU32* Sorted = Core.GetRanks();
// Read the list in sorted order, look for similar edges
PxU32 LastRef0 = edges[Sorted[0]].Ref0;
PxU32 LastRef1 = edges[Sorted[0]].Ref1;
PxU32 Count = 0;
PxU32 TmpBuffer[3];
while(nb_edges--)
{
PxU32 SortedIndex = *Sorted++;
PxU32 Face = edges[SortedIndex].mFaceNb; // Owner face
PxU32 Ref0 = edges[SortedIndex].Ref0; // Vertex ref #1
PxU32 Ref1 = edges[SortedIndex].Ref1; // Vertex ref #2
if(Ref0==LastRef0 && Ref1==LastRef1)
{
// Current edge is the same as last one
TmpBuffer[Count++] = Face; // Store face number
// Only works with manifold meshes (i.e. an edge is not shared by more than 2 triangles)
if(Count==3)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "Adjacencies::CreateDatabase: can't work on non-manifold meshes.");
}
else
{
// Here we have a new edge (LastRef0, LastRef1) shared by Count triangles stored in TmpBuffer
if(Count==2)
{
// if Count==1 => edge is a boundary edge: it belongs to a single triangle.
// Hence there's no need to update a link to an adjacent triangle.
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
if(!UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces)) return false;
#else
if(!UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces, create)) return false;
#endif
}
// Reset for next edge
Count = 0;
TmpBuffer[Count++] = Face;
LastRef0 = Ref0;
LastRef1 = Ref1;
}
}
bool Status = true;
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
if(Count==2) Status = UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces);
#else
if(Count==2) Status = UpdateLink(TmpBuffer[0], TmpBuffer[1], LastRef0, LastRef1, faces, create);
#endif
return Status;
}
AdjacenciesBuilder::AdjacenciesBuilder()
{
}
AdjacenciesBuilder::~AdjacenciesBuilder()
{
}
/**
* Initializes the component.
* \param create [in] the creation structure
* \return true if success.
*/
bool AdjacenciesBuilder::Init(const ADJACENCIESCREATE& create)
{
if(!create.NbFaces)
return false;
// Get some bytes
mNbFaces = create.NbFaces;
mFaces = PX_NEW(AdjTriangle)[mNbFaces];
AdjEdge* Edges = PX_NEW(AdjEdge)[mNbFaces*3];
PxU32 NbEdges=0;
// Feed me with triangles.....
for(PxU32 i=0;i<mNbFaces;i++)
{
// Get correct vertex references
const PxU32 Ref0 = create.DFaces ? create.DFaces[i*3+0] : create.WFaces ? create.WFaces[i*3+0] : 0;
const PxU32 Ref1 = create.DFaces ? create.DFaces[i*3+1] : create.WFaces ? create.WFaces[i*3+1] : 1;
const PxU32 Ref2 = create.DFaces ? create.DFaces[i*3+2] : create.WFaces ? create.WFaces[i*3+2] : 2;
// Add a triangle to the database
AddTriangle(Ref0, Ref1, Ref2, i, mFaces, NbEdges, Edges);
}
// At this point of the process we have mFaces & Edges filled with input data. That is:
// - a list of triangles with 3 NULL links (i.e. PX_INVALID_U32)
// - a list of mNbFaces*3 edges, each edge having 2 vertex references and an owner face.
// Here NbEdges should be equal to mNbFaces*3.
PX_ASSERT(NbEdges==mNbFaces*3);
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
bool Status = CreateDatabase(mFaces, NbEdges, Edges);
#else
bool Status = CreateDatabase(mFaces, NbEdges, Edges, create);
#endif
// We don't need the edges anymore
PX_DELETE_ARRAY(Edges);
#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
// Now create convex information. This creates coupling between adjacencies & edge-list but in this case it's actually the goal:
// mixing the two structures to save memory.
if(Status && create.Verts)
{
EDGELISTCREATE ELC;
ELC.NbFaces = create.NbFaces;
ELC.DFaces = create.DFaces; // That's where I like having a unified way to do things... We
ELC.WFaces = create.WFaces; // can just directly copy the same pointers.
ELC.FacesToEdges = true;
ELC.Verts = create.Verts;
ELC.Epsilon = create.Epsilon;
EdgeList EL;
if(EL.init(ELC))
{
for(PxU32 i=0;i<mNbFaces;i++)
{
const EdgeTriangleData& ET = EL.getEdgeTriangle(i);
if(EdgeTriangleAC::HasActiveEdge01(ET)) mFaces[i].mATri[EDGE01] |= 0x20000000;
else mFaces[i].mATri[EDGE01] &= ~0x20000000;
if(EdgeTriangleAC::HasActiveEdge20(ET)) mFaces[i].mATri[EDGE02] |= 0x20000000;
else mFaces[i].mATri[EDGE02] &= ~0x20000000;
if(EdgeTriangleAC::HasActiveEdge12(ET)) mFaces[i].mATri[EDGE12] |= 0x20000000;
else mFaces[i].mATri[EDGE12] &= ~0x20000000;
PX_ASSERT((EdgeTriangleAC::HasActiveEdge01(ET) && mFaces[i].HasActiveEdge01()) || (!EdgeTriangleAC::HasActiveEdge01(ET) && !mFaces[i].HasActiveEdge01()));
PX_ASSERT((EdgeTriangleAC::HasActiveEdge20(ET) && mFaces[i].HasActiveEdge20()) || (!EdgeTriangleAC::HasActiveEdge20(ET) && !mFaces[i].HasActiveEdge20()));
PX_ASSERT((EdgeTriangleAC::HasActiveEdge12(ET) && mFaces[i].HasActiveEdge12()) || (!EdgeTriangleAC::HasActiveEdge12(ET) && !mFaces[i].HasActiveEdge12()));
}
}
}
#endif
return Status;
}
/*
bool AdjacenciesBuilder::Save(Stream& stream) const
{
bool PlatformMismatch = PxPlatformMismatch();
// Export header
if(!WriteHeader('A', 'D', 'J', 'A', gVersion, PlatformMismatch, stream))
return false;
// Export adjacencies
// stream.StoreDword(mNbFaces);
WriteDword(mNbFaces, PlatformMismatch, stream);
// stream.StoreBuffer(mFaces, sizeof(AdjTriangle)*mNbFaces);
WriteDwordBuffer((const PxU32*)mFaces, mNbFaces*3, PlatformMismatch, stream);
return true;
}*/
//#endif

View File

@@ -0,0 +1,229 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_ADJACENCIES_H
#define GU_ADJACENCIES_H
#define MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
#include "GuTriangle.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxIO.h"
namespace physx
{
namespace Gu
{
#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
#define ADJ_TRIREF_MASK 0x1fffffff //!< Masks 3 bits
#define IS_CONVEX_EDGE(x) (x & 0x20000000) //!< Returns true for convex edges
#else
#define ADJ_TRIREF_MASK 0x3fffffff //!< Masks 2 bits
#endif
#define MAKE_ADJ_TRI(x) (x & ADJ_TRIREF_MASK) //!< Transforms a link into a triangle reference.
#define GET_EDGE_NB(x) (x>>30) //!< Transforms a link into a counterpart edge ID.
// #define IS_BOUNDARY(x) (x==PX_INVALID_U32) //!< Returns true for boundary edges.
#define IS_BOUNDARY(x) ((x & ADJ_TRIREF_MASK)==ADJ_TRIREF_MASK) //!< Returns true for boundary edges.
// Forward declarations
class Adjacencies;
enum SharedEdgeIndex
{
EDGE01 = 0,
EDGE02 = 1,
EDGE12 = 2
};
/* PX_INLINE void GetEdgeIndices(SharedEdgeIndex edge_index, PxU32& id0, PxU32& id1)
{
if(edge_index==0)
{
id0 = 0;
id1 = 1;
}
else if(edge_index==1)
{
id0 = 0;
id1 = 2;
}
else if(edge_index==2)
{
id0 = 1;
id1 = 2;
}
}*/
//! Sets a new edge code
#define SET_EDGE_NB(link, code) \
link&=ADJ_TRIREF_MASK; \
link|=code<<30; \
//! A triangle class used to compute the adjacency structures.
class AdjTriangle
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
: public IndexedTriangle
#else
: public PxUserAllocated
#endif
{
public:
//! Constructor
PX_INLINE AdjTriangle() {}
//! Destructor
PX_INLINE ~AdjTriangle() {}
/**
* Computes the number of boundary edges in a triangle.
* \return the number of boundary edges. (0 => 3)
*/
PxU32 ComputeNbBoundaryEdges() const;
/**
* Computes the number of valid neighbors.
* \return the number of neighbors. (0 => 3)
*/
PxU32 ComputeNbNeighbors() const;
/**
* Checks whether the triangle has a particular neighbor or not.
* \param tref [in] the triangle reference to look for
* \param index [out] the corresponding index in the triangle (NULL if not needed)
* \return true if the triangle has the given neighbor
*/
bool HasNeighbor(PxU32 tref, PxU32* index=NULL) const;
/**
* Flips the winding.
*/
void Flip();
// Data access
PX_INLINE PxU32 GetLink(SharedEdgeIndex edge_index) const { return mATri[edge_index]; }
PX_INLINE PxU32 GetAdjTri(SharedEdgeIndex edge_index) const { return MAKE_ADJ_TRI(mATri[edge_index]); }
PX_INLINE PxU32 GetAdjEdge(SharedEdgeIndex edge_index) const { return GET_EDGE_NB(mATri[edge_index]); }
PX_INLINE PxIntBool IsBoundaryEdge(SharedEdgeIndex edge_index) const { return IS_BOUNDARY(mATri[edge_index]); }
#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
PX_INLINE PxIntBool HasActiveEdge01() const { return PxIntBool(IS_CONVEX_EDGE(mATri[EDGE01])); }
PX_INLINE PxIntBool HasActiveEdge20() const { return PxIntBool(IS_CONVEX_EDGE(mATri[EDGE02])); }
PX_INLINE PxIntBool HasActiveEdge12() const { return PxIntBool(IS_CONVEX_EDGE(mATri[EDGE12])); }
PX_INLINE PxIntBool HasActiveEdge(PxU32 i) const { return PxIntBool(IS_CONVEX_EDGE(mATri[i])); }
#endif
// private:
//! Links/References of adjacent triangles. The 2 most significant bits contains the counterpart edge in the adjacent triangle.
//! mATri[0] refers to edge 0-1
//! mATri[1] refers to edge 0-2
//! mATri[2] refers to edge 1-2
PxU32 mATri[3];
};
//! The adjacencies creation structure.
struct ADJACENCIESCREATE
{
//! Constructor
ADJACENCIESCREATE() : NbFaces(0), DFaces(NULL), WFaces(NULL)
{
#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
Verts = NULL;
Epsilon = 0.1f;
// Epsilon = 0.001f;
#endif
}
PxU32 NbFaces; //!< Number of faces in source topo
const PxU32* DFaces; //!< List of faces (dwords) or NULL
const PxU16* WFaces; //!< List of faces (words) or NULL
#ifdef MSH_ADJACENCIES_INCLUDE_CONVEX_BITS
const PxVec3* Verts;
float Epsilon;
#endif
};
class Adjacencies : public PxUserAllocated
{
public:
Adjacencies();
~Adjacencies();
PxU32 mNbFaces; //!< Number of faces involved in the computation.
AdjTriangle* mFaces; //!< A list of AdjTriangles (one/face)
bool Load(PxInputStream& stream);
// Basic mesh walking
PX_INLINE const AdjTriangle* GetAdjacentFace(const AdjTriangle& current_tri, SharedEdgeIndex edge_nb) const
{
// No checkings here, make sure mFaces has been created
// Catch the link
PxU32 Link = current_tri.GetLink(edge_nb);
// Returns NULL for boundary edges
if(IS_BOUNDARY(Link)) return NULL;
// Else transform into face index
PxU32 Id = MAKE_ADJ_TRI(Link);
// Possible counterpart edge is:
// PxU32 Edge = GET_EDGE_NB(Link);
// And returns adjacent triangle
return &mFaces[Id];
}
// Helpers
PxU32 ComputeNbBoundaryEdges() const;
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
bool GetBoundaryVertices(PxU32 nb_verts, bool* bound_status) const;
#else
bool GetBoundaryVertices(PxU32 nb_verts, bool* bound_status, const IndexedTriangle32* faces) const;
#endif
//
#ifdef MSH_ADJACENCIES_INCLUDE_TOPOLOGY
bool MakeLastRef(AdjTriangle& cur_tri, PxU32 vref);
#else
bool MakeLastRef(AdjTriangle& cur_tri, PxU32 vref, IndexedTriangle32* cur_topo);
#endif
private:
// New edge codes assignment
void AssignNewEdgeCode(PxU32 link, PxU8 edge_nb);
};
//#ifdef PX_COOKING
class AdjacenciesBuilder : public Adjacencies
{
public:
AdjacenciesBuilder();
~AdjacenciesBuilder();
bool Init(const ADJACENCIESCREATE& create);
// bool Save(Stream& stream) const;
};
//#endif
}
}
#endif

View File

@@ -0,0 +1,84 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuBarycentricCoordinates.h"
using namespace physx;
using namespace aos;
void Gu::barycentricCoordinates(const Vec3VArg p, const Vec3VArg a, const Vec3VArg b, FloatV& v)
{
const Vec3V v0 = V3Sub(a, p);
const Vec3V v1 = V3Sub(b, p);
const Vec3V d = V3Sub(v1, v0);
const FloatV denominator = V3Dot(d, d);
const FloatV numerator = V3Dot(V3Neg(v0), d);
const FloatV zero = FZero();
const FloatV denom = FSel(FIsGrtr(denominator, zero), FRecip(denominator), zero);
v = FMul(numerator, denom);
}
void Gu::barycentricCoordinates(const aos::Vec3VArg p, const aos::Vec3VArg a, const aos::Vec3VArg b, const aos::Vec3VArg c, aos::FloatV& v, aos::FloatV& w)
{
const Vec3V ab = V3Sub(b, a);
const Vec3V ac = V3Sub(c, a);
const Vec3V n = V3Cross(ab, ac);
const VecCrossV crossA = V3PrepareCross(V3Sub(a, p));
const VecCrossV crossB = V3PrepareCross(V3Sub(b, p));
const VecCrossV crossC = V3PrepareCross(V3Sub(c, p));
const Vec3V bCrossC = V3Cross(crossB, crossC);
const Vec3V cCrossA = V3Cross(crossC, crossA);
const Vec3V aCrossB = V3Cross(crossA, crossB);
const FloatV va = V3Dot(n, bCrossC);//edge region of BC, signed area rbc, u = S(rbc)/S(abc) for a
const FloatV vb = V3Dot(n, cCrossA);//edge region of AC, signed area rac, v = S(rca)/S(abc) for b
const FloatV vc = V3Dot(n, aCrossB);//edge region of AB, signed area rab, w = S(rab)/S(abc) for c
const FloatV totalArea = FAdd(va, FAdd(vb, vc));
const FloatV zero = FZero();
const FloatV denom = FSel(FIsEq(totalArea, zero), zero, FRecip(totalArea));
v = FMul(vb, denom);
w = FMul(vc, denom);
}
// v0 = b - a;
// v1 = c - a;
// v2 = p - a;
void Gu::barycentricCoordinates(const Vec3VArg v0, const Vec3VArg v1, const Vec3VArg v2, FloatV& v, FloatV& w)
{
const FloatV d00 = V3Dot(v0, v0);
const FloatV d01 = V3Dot(v0, v1);
const FloatV d11 = V3Dot(v1, v1);
const FloatV d20 = V3Dot(v2, v0);
const FloatV d21 = V3Dot(v2, v1);
const FloatV denom = FRecip(FSub(FMul(d00,d11), FMul(d01, d01)));
v = FMul(FSub(FMul(d11, d20), FMul(d01, d21)), denom);
w = FMul(FSub(FMul(d00, d21), FMul(d01, d20)), denom);
}

View File

@@ -0,0 +1,91 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BARYCENTRIC_COORDINATES_H
#define GU_BARYCENTRIC_COORDINATES_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVecMath.h"
namespace physx
{
namespace Gu
{
//calculate the barycentric coorinates for a point in a segment
void barycentricCoordinates(const aos::Vec3VArg p,
const aos::Vec3VArg a,
const aos::Vec3VArg b,
aos::FloatV& v);
//calculate the barycentric coorinates for a point in a triangle
void barycentricCoordinates(const aos::Vec3VArg p,
const aos::Vec3VArg a,
const aos::Vec3VArg b,
const aos::Vec3VArg c,
aos::FloatV& v,
aos::FloatV& w);
void barycentricCoordinates(const aos::Vec3VArg v0,
const aos::Vec3VArg v1,
const aos::Vec3VArg v2,
aos::FloatV& v,
aos::FloatV& w);
PX_INLINE aos::BoolV isValidTriangleBarycentricCoord(const aos::FloatVArg v, const aos::FloatVArg w)
{
using namespace aos;
const FloatV zero = FNeg(FEps());
const FloatV one = FAdd(FOne(), FEps());
const BoolV con0 = BAnd(FIsGrtrOrEq(v, zero), FIsGrtrOrEq(one, v));
const BoolV con1 = BAnd(FIsGrtrOrEq(w, zero), FIsGrtrOrEq(one, w));
const BoolV con2 = FIsGrtr(one, FAdd(v, w));
return BAnd(con0, BAnd(con1, con2));
}
PX_INLINE aos::BoolV isValidTriangleBarycentricCoord2(const aos::Vec4VArg vwvw)
{
using namespace aos;
const Vec4V eps = V4Splat(FEps());
const Vec4V zero = V4Neg(eps);
const Vec4V one = V4Add(V4One(), eps);
const Vec4V v0v1v0v1 = V4PermXZXZ(vwvw);
const Vec4V w0w1w0w1 = V4PermYWYW(vwvw);
const BoolV con0 = BAnd(V4IsGrtrOrEq(v0v1v0v1, zero), V4IsGrtrOrEq(one, v0v1v0v1));
const BoolV con1 = BAnd(V4IsGrtrOrEq(w0w1w0w1, zero), V4IsGrtrOrEq(one, w0w1w0w1));
const BoolV con2 = V4IsGrtr(one, V4Add(v0v1v0v1, w0w1w0w1));
return BAnd(con0, BAnd(con1, con2));
}
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,120 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BOX_CONVERSION_H
#define GU_BOX_CONVERSION_H
#include "GuBox.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxMat34.h"
#include "foundation/PxVecMath.h"
namespace physx
{
// PT: builds rot from quat. WARNING: writes 4 bytes after 'dst.rot'.
PX_FORCE_INLINE void buildFrom(Gu::Box& dst, const PxQuat& q)
{
using namespace aos;
const QuatV qV = V4LoadU(&q.x);
Vec3V column0, column1, column2;
QuatGetMat33V(qV, column0, column1, column2);
// PT: TODO: investigate if these overlapping stores are a problem
V4StoreU(Vec4V_From_Vec3V(column0), &dst.rot.column0.x);
V4StoreU(Vec4V_From_Vec3V(column1), &dst.rot.column1.x);
V4StoreU(Vec4V_From_Vec3V(column2), &dst.rot.column2.x);
}
PX_FORCE_INLINE void buildFrom(Gu::Box& dst, const PxVec3& center, const PxVec3& extents, const PxQuat& q)
{
using namespace aos;
// PT: writes 4 bytes after 'rot' but it's safe since we then write 'center' just afterwards
buildFrom(dst, q);
dst.center = center;
dst.extents = extents;
}
PX_FORCE_INLINE void buildMatrixFromBox(PxMat34& mat34, const Gu::Box& box)
{
mat34.m = box.rot;
mat34.p = box.center;
}
// SD: function is now the same as FastVertex2ShapeScaling::transformQueryBounds
// PT: lots of LHS in that one. TODO: revisit...
PX_INLINE Gu::Box transform(const PxMat34& transfo, const Gu::Box& box)
{
Gu::Box ret;
PxMat33& obbBasis = ret.rot;
obbBasis.column0 = transfo.rotate(box.rot.column0 * box.extents.x);
obbBasis.column1 = transfo.rotate(box.rot.column1 * box.extents.y);
obbBasis.column2 = transfo.rotate(box.rot.column2 * box.extents.z);
ret.center = transfo.transform(box.center);
ret.extents = PxOptimizeBoundingBox(obbBasis);
return ret;
}
PX_INLINE Gu::Box transformBoxOrthonormal(const Gu::Box& box, const PxTransform& t)
{
Gu::Box ret;
PxMat33& obbBasis = ret.rot;
obbBasis.column0 = t.rotate(box.rot.column0);
obbBasis.column1 = t.rotate(box.rot.column1);
obbBasis.column2 = t.rotate(box.rot.column2);
ret.center = t.transform(box.center);
ret.extents = box.extents;
return ret;
}
/**
\brief recomputes the OBB after an arbitrary transform by a 4x4 matrix.
\param mtx [in] the transform matrix
\param obb [out] the transformed OBB
*/
PX_INLINE void rotate(const Gu::Box& src, const PxMat34& mtx, Gu::Box& obb)
{
// The extents remain constant
obb.extents = src.extents;
// The center gets x-formed
obb.center = mtx.transform(src.center);
// Combine rotations
obb.rot = mtx.m * src.rot;
}
// PT: TODO: move this to a better place
PX_FORCE_INLINE void getInverse(PxMat33& dstRot, PxVec3& dstTrans, const PxMat33& srcRot, const PxVec3& srcTrans)
{
const PxMat33 invRot = srcRot.getInverse();
dstTrans = invRot.transform(-srcTrans);
dstRot = invRot;
}
}
#endif

View File

@@ -0,0 +1,84 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_EDGECACHE_H
#define GU_EDGECACHE_H
#include "foundation/PxMemory.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxHash.h"
namespace physx
{
namespace Gu
{
class EdgeCache
{
#define NUM_EDGES_IN_CACHE 64 //must be power of 2. 32 lines result in 10% extra work (due to cache misses), 64 lines in 6% extra work, 128 lines in 4%.
public:
EdgeCache()
{
PxMemZero(cacheLines, NUM_EDGES_IN_CACHE*sizeof(CacheLine));
}
PxU32 hash(PxU32 key) const
{
return (NUM_EDGES_IN_CACHE - 1) & PxComputeHash(key); //Only a 16 bit hash would be needed here.
}
bool isInCache(PxU8 vertex0, PxU8 vertex1)
{
PX_ASSERT(vertex1 >= vertex0);
PxU16 key = PxU16((vertex0 << 8) | vertex1);
PxU32 h = hash(key);
CacheLine& cl = cacheLines[h];
if (cl.fullKey == key)
{
return true;
}
else //cache the line now as it's about to be processed
{
cl.fullKey = key;
return false;
}
}
private:
struct CacheLine
{
PxU16 fullKey;
};
CacheLine cacheLines[NUM_EDGES_IN_CACHE];
#undef NUM_EDGES_IN_CACHE
};
}
}
#endif

View File

@@ -0,0 +1,719 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "geometry/PxTriangle.h"
#include "GuEdgeList.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxPlane.h"
#include "CmRadixSort.h"
#include "CmSerialize.h"
// PT: code archeology: this initially came from ICE (IceEdgeList.h/cpp). Consider putting it back the way it was initially.
// It makes little sense that something like EdgeList is in GeomUtils but some equivalent class like Adjacencies in is Cooking.
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
EdgeList::EdgeList() :
mNbEdges (0),
mEdges (NULL),
mNbFaces (0),
mEdgeFaces (NULL),
mEdgeToTriangles (NULL),
mFacesByEdges (NULL)
{
}
EdgeList::~EdgeList()
{
PX_FREE(mFacesByEdges);
PX_FREE(mEdgeToTriangles);
PX_FREE(mEdges);
PX_FREE(mEdgeFaces);
}
bool EdgeList::load(PxInputStream& stream)
{
// Import header
PxU32 Version;
bool Mismatch;
if(!ReadHeader('E', 'D', 'G', 'E', Version, Mismatch, stream))
return false;
// Import edges
mNbEdges = readDword(Mismatch, stream);
mEdges = PX_ALLOCATE(EdgeData, mNbEdges, "EdgeData");
stream.read(mEdges, sizeof(EdgeData)*mNbEdges);
mNbFaces = readDword(Mismatch, stream);
mEdgeFaces = PX_ALLOCATE(EdgeTriangleData, mNbFaces, "EdgeTriangleData");
stream.read(mEdgeFaces, sizeof(EdgeTriangleData)*mNbFaces);
mEdgeToTriangles = PX_ALLOCATE(EdgeDescData, mNbEdges, "EdgeDescData");
stream.read(mEdgeToTriangles, sizeof(EdgeDescData)*mNbEdges);
PxU32 LastOffset = mEdgeToTriangles[mNbEdges-1].Offset + mEdgeToTriangles[mNbEdges-1].Count;
mFacesByEdges = PX_ALLOCATE(PxU32, LastOffset, "EdgeList FacesByEdges");
stream.read(mFacesByEdges, sizeof(PxU32)*LastOffset);
return true;
}
/**
* Initializes the edge-list.
* \param create [in] edge-list creation structure
* \return true if success.
*/
bool EdgeList::init(const EDGELISTCREATE& create)
{
const bool FacesToEdges = create.Verts ? true : create.FacesToEdges;
const bool EdgesToFaces = create.Verts ? true : create.EdgesToFaces;
// "FacesToEdges" maps each face to three edges.
if(FacesToEdges && !createFacesToEdges(create.NbFaces, create.DFaces, create.WFaces))
return false;
// "EdgesToFaces" maps each edge to the set of faces sharing this edge
if(EdgesToFaces && !createEdgesToFaces(create.NbFaces, create.DFaces, create.WFaces))
return false;
// Create active edges
if(create.Verts && !computeActiveEdges(create.NbFaces, create.DFaces, create.WFaces, create.Verts, create.Epsilon))
return false;
// Get rid of useless data
if(!create.FacesToEdges)
PX_FREE(mEdgeFaces);
if(!create.EdgesToFaces)
{
PX_FREE(mEdgeToTriangles);
PX_FREE(mFacesByEdges);
}
return true;
}
/**
* Computes FacesToEdges.
* After the call:
* - mNbEdges is updated with the number of non-redundant edges
* - mEdges is a list of mNbEdges edges (one edge is 2 vertex-references)
* - mEdgesRef is a list of nbfaces structures with 3 indexes in mEdges for each face
*
* \param nb_faces [in] a number of triangles
* \param dfaces [in] list of triangles with PxU32 vertex references (or NULL)
* \param wfaces [in] list of triangles with PxU16 vertex references (or NULL)
* \return true if success.
*/
bool EdgeList::createFacesToEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces)
{
if(!nb_faces || (!dfaces && !wfaces))
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "EdgeList::CreateFacesToEdges: NULL parameter!");
if(mEdgeFaces)
return true; // Already computed!
// 1) Get some bytes: I need one EdgesRefs for each face, and some temp buffers
mEdgeFaces = PX_ALLOCATE(EdgeTriangleData, nb_faces, "mEdgeFaces"); // Link faces to edges
PxU32* VRefs0 = PX_ALLOCATE(PxU32, nb_faces*3, "Tmp"); // Temp storage
PxU32* VRefs1 = PX_ALLOCATE(PxU32, nb_faces*3, "Tmp"); // Temp storage
EdgeData* Buffer = PX_ALLOCATE(EdgeData, nb_faces*3, "Tmp"); // Temp storage
// 2) Create a full redundant list of 3 edges / face.
for(PxU32 i=0;i<nb_faces;i++)
{
// Get right vertex-references
const PxU32 Ref0 = dfaces ? dfaces[i*3+0] : wfaces ? wfaces[i*3+0] : 0;
const PxU32 Ref1 = dfaces ? dfaces[i*3+1] : wfaces ? wfaces[i*3+1] : 1;
const PxU32 Ref2 = dfaces ? dfaces[i*3+2] : wfaces ? wfaces[i*3+2] : 2;
// Pre-Sort vertex-references and put them in the lists
if(Ref0<Ref1) { VRefs0[i*3+0] = Ref0; VRefs1[i*3+0] = Ref1; } // Edge 0-1 maps (i%3)
else { VRefs0[i*3+0] = Ref1; VRefs1[i*3+0] = Ref0; } // Edge 0-1 maps (i%3)
if(Ref1<Ref2) { VRefs0[i*3+1] = Ref1; VRefs1[i*3+1] = Ref2; } // Edge 1-2 maps (i%3)+1
else { VRefs0[i*3+1] = Ref2; VRefs1[i*3+1] = Ref1; } // Edge 1-2 maps (i%3)+1
if(Ref2<Ref0) { VRefs0[i*3+2] = Ref2; VRefs1[i*3+2] = Ref0; } // Edge 2-0 maps (i%3)+2
else { VRefs0[i*3+2] = Ref0; VRefs1[i*3+2] = Ref2; } // Edge 2-0 maps (i%3)+2
}
// 3) Sort the list according to both keys (VRefs0 and VRefs1)
Cm::RadixSortBuffered Sorter;
const PxU32* Sorted = Sorter.Sort(VRefs1, nb_faces*3).Sort(VRefs0, nb_faces*3).GetRanks();
// 4) Loop through all possible edges
// - clean edges list by removing redundant edges
// - create EdgesRef list
mNbEdges = 0; // #non-redundant edges
mNbFaces = nb_faces;
PxU32 PreviousRef0 = PX_INVALID_U32;
PxU32 PreviousRef1 = PX_INVALID_U32;
for(PxU32 i=0;i<nb_faces*3;i++)
{
PxU32 Face = Sorted[i]; // Between 0 and nbfaces*3
PxU32 ID = Face % 3; // Get edge ID back.
PxU32 SortedRef0 = VRefs0[Face]; // (SortedRef0, SortedRef1) is the sorted edge
PxU32 SortedRef1 = VRefs1[Face];
if(SortedRef0!=PreviousRef0 || SortedRef1!=PreviousRef1)
{
// New edge found! => stored in temp buffer
Buffer[mNbEdges].Ref0 = SortedRef0;
Buffer[mNbEdges].Ref1 = SortedRef1;
mNbEdges++;
}
PreviousRef0 = SortedRef0;
PreviousRef1 = SortedRef1;
// Create mEdgesRef on the fly
mEdgeFaces[Face/3].mLink[ID] = mNbEdges-1;
}
// 5) Here, mNbEdges==#non redundant edges
mEdges = PX_ALLOCATE(EdgeData, mNbEdges, "EdgeData");
// Create real edges-list.
PxMemCopy(mEdges, Buffer, mNbEdges*sizeof(EdgeData));
// 6) Free ram and exit
PX_FREE(Buffer);
PX_FREE(VRefs1);
PX_FREE(VRefs0);
return true;
}
/**
* Computes EdgesToFaces.
* After the call:
* - mEdgeToTriangles is created
* - mFacesByEdges is created
*
* \param nb_faces [in] a number of triangles
* \param dfaces [in] list of triangles with PxU32 vertex references (or NULL)
* \param wfaces [in] list of triangles with PxU16 vertex references (or NULL)
* \return true if success.
*/
bool EdgeList::createEdgesToFaces(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces)
{
// 1) I need FacesToEdges !
if(!createFacesToEdges(nb_faces, dfaces, wfaces))
return false;
// 2) Get some bytes: one Pair structure / edge
mEdgeToTriangles = PX_ALLOCATE(EdgeDescData, mNbEdges, "EdgeDescData");
PxMemZero(mEdgeToTriangles, sizeof(EdgeDescData)*mNbEdges);
// 3) Create Counters, ie compute the #faces sharing each edge
for(PxU32 i=0;i<nb_faces;i++)
{
mEdgeToTriangles[mEdgeFaces[i].mLink[0]].Count++;
mEdgeToTriangles[mEdgeFaces[i].mLink[1]].Count++;
mEdgeToTriangles[mEdgeFaces[i].mLink[2]].Count++;
}
// 3) Create Radix-like Offsets
mEdgeToTriangles[0].Offset=0;
for(PxU32 i=1;i<mNbEdges;i++)
mEdgeToTriangles[i].Offset = mEdgeToTriangles[i-1].Offset + mEdgeToTriangles[i-1].Count;
const PxU32 LastOffset = mEdgeToTriangles[mNbEdges-1].Offset + mEdgeToTriangles[mNbEdges-1].Count;
// 4) Get some bytes for mFacesByEdges. LastOffset is the number of indices needed.
mFacesByEdges = PX_ALLOCATE(PxU32, LastOffset, "EdgeList FacesByEdges");
// 5) Create mFacesByEdges
for(PxU32 i=0;i<nb_faces;i++)
{
mFacesByEdges[mEdgeToTriangles[mEdgeFaces[i].mLink[0]].Offset++] = i;
mFacesByEdges[mEdgeToTriangles[mEdgeFaces[i].mLink[1]].Offset++] = i;
mFacesByEdges[mEdgeToTriangles[mEdgeFaces[i].mLink[2]].Offset++] = i;
}
// 6) Recompute offsets wasted by 5)
mEdgeToTriangles[0].Offset=0;
for(PxU32 i=1;i<mNbEdges;i++)
mEdgeToTriangles[i].Offset = mEdgeToTriangles[i-1].Offset + mEdgeToTriangles[i-1].Count;
return true;
}
static PX_INLINE PxU32 OppositeVertex(PxU32 r0, PxU32 r1, PxU32 r2, PxU32 vref0, PxU32 vref1)
{
if(vref0==r0)
{
if (vref1==r1) return r2;
else if(vref1==r2) return r1;
}
else if(vref0==r1)
{
if (vref1==r0) return r2;
else if(vref1==r2) return r0;
}
else if(vref0==r2)
{
if (vref1==r1) return r0;
else if(vref1==r0) return r1;
}
return PX_INVALID_U32;
}
bool EdgeList::computeActiveEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces, const PxVec3* verts, float epsilon)
{
if(!verts || (!dfaces && !wfaces))
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "EdgeList::ComputeActiveEdges: NULL parameter!");
PxU32 NbEdges = getNbEdges();
if(!NbEdges)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "ActiveEdges::ComputeConvexEdges: no edges in edge list!");
const EdgeData* Edges = getEdges();
if(!Edges)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "ActiveEdges::ComputeConvexEdges: no edge data in edge list!");
const EdgeDescData* ED = getEdgeToTriangles();
if(!ED)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "ActiveEdges::ComputeConvexEdges: no edge-to-triangle in edge list!");
const PxU32* FBE = getFacesByEdges();
if(!FBE)
return outputError<PxErrorCode::eINVALID_OPERATION>(__LINE__, "ActiveEdges::ComputeConvexEdges: no faces-by-edges in edge list!");
// We first create active edges in a temporaray buffer. We have one bool / edge.
bool* ActiveEdges = PX_ALLOCATE(bool, NbEdges, "bool");
// Loop through edges and look for convex ones
bool* CurrentMark = ActiveEdges;
while(NbEdges--)
{
// Get number of triangles sharing current edge
const PxU32 Count = ED->Count;
// Boundary edges are active => keep them (actually they're silhouette edges directly)
// Internal edges can be active => test them
// Singular edges ? => discard them
bool Active = false;
if(Count==1)
{
Active = true;
}
else if(Count==2)
{
const PxU32 FaceIndex0 = FBE[ED->Offset+0]*3;
const PxU32 FaceIndex1 = FBE[ED->Offset+1]*3;
PxU32 VRef00, VRef01, VRef02;
PxU32 VRef10, VRef11, VRef12;
if(dfaces)
{
VRef00 = dfaces[FaceIndex0+0];
VRef01 = dfaces[FaceIndex0+1];
VRef02 = dfaces[FaceIndex0+2];
VRef10 = dfaces[FaceIndex1+0];
VRef11 = dfaces[FaceIndex1+1];
VRef12 = dfaces[FaceIndex1+2];
}
else //if(wfaces)
{
PX_ASSERT(wfaces);
VRef00 = wfaces[FaceIndex0+0];
VRef01 = wfaces[FaceIndex0+1];
VRef02 = wfaces[FaceIndex0+2];
VRef10 = wfaces[FaceIndex1+0];
VRef11 = wfaces[FaceIndex1+1];
VRef12 = wfaces[FaceIndex1+2];
}
{
// We first check the opposite vertex against the plane
const PxU32 Op = OppositeVertex(VRef00, VRef01, VRef02, Edges->Ref0, Edges->Ref1);
const PxPlane PL1(verts[VRef10], verts[VRef11], verts[VRef12]);
if(PL1.distance(verts[Op])<0.0f) // If opposite vertex is below the plane, i.e. we discard concave edges
{
const PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
const PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
PxVec3 N0, N1;
T0.normal(N0);
T1.normal(N1);
const float a = PxComputeAngle(N0, N1);
if(fabsf(a)>epsilon)
Active = true;
}
else
{
const PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
const PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
PxVec3 N0, N1;
T0.normal(N0);
T1.normal(N1);
if(N0.dot(N1) < -0.999f)
Active = true;
}
//Active = true;
}
}
else
{
//Connected to more than 2
//We need to loop through the triangles and count the number of unique triangles (considering back-face triangles as non-unique). If we end up with more than 2 unique triangles,
//then by definition this is an inactive edge. However, if we end up with 2 unique triangles (say like a double-sided tesselated surface), then it depends on the same rules as above
const PxU32 FaceInd0 = FBE[ED->Offset]*3;
PxU32 VRef00, VRef01, VRef02;
PxU32 VRef10=0, VRef11=0, VRef12=0;
if(dfaces)
{
VRef00 = dfaces[FaceInd0+0];
VRef01 = dfaces[FaceInd0+1];
VRef02 = dfaces[FaceInd0+2];
}
else //if(wfaces)
{
PX_ASSERT(wfaces);
VRef00 = wfaces[FaceInd0+0];
VRef01 = wfaces[FaceInd0+1];
VRef02 = wfaces[FaceInd0+2];
}
PxU32 numUniqueTriangles = 1;
bool doubleSided0 = false;
bool doubleSided1 = 0;
for(PxU32 a = 1; a < Count; ++a)
{
const PxU32 FaceInd = FBE[ED->Offset+a]*3;
PxU32 VRef0, VRef1, VRef2;
if(dfaces)
{
VRef0 = dfaces[FaceInd+0];
VRef1 = dfaces[FaceInd+1];
VRef2 = dfaces[FaceInd+2];
}
else //if(wfaces)
{
PX_ASSERT(wfaces);
VRef0 = wfaces[FaceInd+0];
VRef1 = wfaces[FaceInd+1];
VRef2 = wfaces[FaceInd+2];
}
if(((VRef0 != VRef00) && (VRef0 != VRef01) && (VRef0 != VRef02)) ||
((VRef1 != VRef00) && (VRef1 != VRef01) && (VRef1 != VRef02)) ||
((VRef2 != VRef00) && (VRef2 != VRef01) && (VRef2 != VRef02)))
{
//Not the same as trig 0
if(numUniqueTriangles == 2)
{
if(((VRef0 != VRef10) && (VRef0 != VRef11) && (VRef0 != VRef12)) ||
((VRef1 != VRef10) && (VRef1 != VRef11) && (VRef1 != VRef12)) ||
((VRef2 != VRef10) && (VRef2 != VRef11) && (VRef2 != VRef12)))
{
//Too many unique triangles - terminate and mark as inactive
numUniqueTriangles++;
break;
}
else
{
const PxTriangle T0(verts[VRef10], verts[VRef11], verts[VRef12]);
const PxTriangle T1(verts[VRef0], verts[VRef1], verts[VRef2]);
PxVec3 N0, N1;
T0.normal(N0);
T1.normal(N1);
if(N0.dot(N1) < -0.999f)
doubleSided1 = true;
}
}
else
{
VRef10 = VRef0;
VRef11 = VRef1;
VRef12 = VRef2;
numUniqueTriangles++;
}
}
else
{
//Check for double sided...
const PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
const PxTriangle T1(verts[VRef0], verts[VRef1], verts[VRef2]);
PxVec3 N0, N1;
T0.normal(N0);
T1.normal(N1);
if(N0.dot(N1) < -0.999f)
doubleSided0 = true;
}
}
if(numUniqueTriangles == 1)
Active = true;
if(numUniqueTriangles == 2)
{
//Potentially active. Let's check the angles between the surfaces...
if(doubleSided0 || doubleSided1)
{
// Plane PL1 = faces[FBE[ED->Offset+1]].PlaneEquation(verts);
const PxPlane PL1(verts[VRef10], verts[VRef11], verts[VRef12]);
// if(PL1.Distance(verts[Op])<-epsilon) Active = true;
//if(PL1.distance(verts[Op])<0.0f) // If opposite vertex is below the plane, i.e. we discard concave edges
//KS - can't test signed distance for concave edges. This is a double-sided poly
{
const PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
const PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
PxVec3 N0, N1;
T0.normal(N0);
T1.normal(N1);
const float a = PxComputeAngle(N0, N1);
if(fabsf(a)>epsilon)
Active = true;
}
}
else
{
//Not double sided...must have had a bunch of duplicate triangles!!!!
//Treat as normal
const PxU32 Op = OppositeVertex(VRef00, VRef01, VRef02, Edges->Ref0, Edges->Ref1);
// Plane PL1 = faces[FBE[ED->Offset+1]].PlaneEquation(verts);
const PxPlane PL1(verts[VRef10], verts[VRef11], verts[VRef12]);
// if(PL1.Distance(verts[Op])<-epsilon) Active = true;
if(PL1.distance(verts[Op])<0.0f) // If opposite vertex is below the plane, i.e. we discard concave edges
{
const PxTriangle T0(verts[VRef00], verts[VRef01], verts[VRef02]);
const PxTriangle T1(verts[VRef10], verts[VRef11], verts[VRef12]);
PxVec3 N0, N1;
T0.normal(N0);
T1.normal(N1);
const float a = PxComputeAngle(N0, N1);
if(fabsf(a)>epsilon)
Active = true;
}
}
}
else
{
//Lots of triangles all smooshed together. Just activate the edge in this case
Active = true;
}
}
*CurrentMark++ = Active;
ED++;
Edges++;
}
// Now copy bits back into already existing edge structures
// - first in edge triangles
for(PxU32 i=0;i<mNbFaces;i++)
{
EdgeTriangleData& ET = mEdgeFaces[i];
for(PxU32 j=0;j<3;j++)
{
const PxU32 Link = ET.mLink[j];
if(!(Link & MSH_ACTIVE_EDGE_MASK)) // else already active
{
if(ActiveEdges[Link & MSH_EDGE_LINK_MASK])
ET.mLink[j] |= MSH_ACTIVE_EDGE_MASK; // Mark as active
}
}
}
// - then in edge-to-faces
for(PxU32 i=0;i<mNbEdges;i++)
{
if(ActiveEdges[i])
mEdgeToTriangles[i].Flags |= PX_EDGE_ACTIVE;
}
// Free & exit
PX_FREE(ActiveEdges);
if(0) // PT: this is not needed anymore
{
//initially all vertices are flagged to ignore them. (we assume them to be flat)
//for all NONFLAT edges, incl boundary
//unflag 2 vertices in up to 2 trigs as perhaps interesting
//for all CONCAVE edges
//flag 2 vertices in up to 2 trigs to ignore them.
// Handle active vertices
PxU32 MaxIndex = 0;
for(PxU32 i=0;i<nb_faces;i++)
{
PxU32 VRef0, VRef1, VRef2;
if(dfaces)
{
VRef0 = dfaces[i*3+0];
VRef1 = dfaces[i*3+1];
VRef2 = dfaces[i*3+2];
}
else //if(wfaces)
{
PX_ASSERT(wfaces);
VRef0 = wfaces[i*3+0];
VRef1 = wfaces[i*3+1];
VRef2 = wfaces[i*3+2];
}
if(VRef0>MaxIndex) MaxIndex = VRef0;
if(VRef1>MaxIndex) MaxIndex = VRef1;
if(VRef2>MaxIndex) MaxIndex = VRef2;
}
MaxIndex++;
bool* ActiveVerts = PX_ALLOCATE(bool, MaxIndex, "bool");
PxMemZero(ActiveVerts, MaxIndex*sizeof(bool));
PX_ASSERT(dfaces || wfaces);
for(PxU32 i=0;i<mNbFaces;i++)
{
PxU32 VRef[3];
if(dfaces)
{
VRef[0] = dfaces[i*3+0];
VRef[1] = dfaces[i*3+1];
VRef[2] = dfaces[i*3+2];
}
else if(wfaces)
{
VRef[0] = wfaces[i*3+0];
VRef[1] = wfaces[i*3+1];
VRef[2] = wfaces[i*3+2];
}
const EdgeTriangleData& ET = mEdgeFaces[i];
for(PxU32 j=0;j<3;j++)
{
PxU32 Link = ET.mLink[j];
if(Link & MSH_ACTIVE_EDGE_MASK)
{
// Active edge => mark edge vertices as active
PxU32 r0, r1;
if(j==0) { r0=0; r1=1; }
else if(j==1) { r0=1; r1=2; }
else /*if(j==2)*/ { PX_ASSERT(j==2); r0=0; r1=2; }
ActiveVerts[VRef[r0]] = ActiveVerts[VRef[r1]] = true;
}
}
}
/* for(PxU32 i=0;i<mNbFaces;i++)
{
PxU32 VRef[3];
if(dfaces)
{
VRef[0] = dfaces[i*3+0];
VRef[1] = dfaces[i*3+1];
VRef[2] = dfaces[i*3+2];
}
else if(wfaces)
{
VRef[0] = wfaces[i*3+0];
VRef[1] = wfaces[i*3+1];
VRef[2] = wfaces[i*3+2];
}
const EdgeTriangle& ET = mEdgeFaces[i];
for(PxU32 j=0;j<3;j++)
{
PxU32 Link = ET.mLink[j];
if(!(Link & MSH_ACTIVE_EDGE_MASK))
{
// Inactive edge => mark edge vertices as inactive
PxU32 r0, r1;
if(j==0) { r0=0; r1=1; }
if(j==1) { r0=1; r1=2; }
if(j==2) { r0=0; r1=2; }
ActiveVerts[VRef[r0]] = ActiveVerts[VRef[r1]] = false;
}
}
}*/
// Now stuff this into the structure
for(PxU32 i=0;i<mNbFaces;i++)
{
PxU32 VRef[3];
if(dfaces)
{
VRef[0] = dfaces[i*3+0];
VRef[1] = dfaces[i*3+1];
VRef[2] = dfaces[i*3+2];
}
else if(wfaces)
{
VRef[0] = wfaces[i*3+0];
VRef[1] = wfaces[i*3+1];
VRef[2] = wfaces[i*3+2];
}
EdgeTriangleData& ET = mEdgeFaces[i];
for(PxU32 j=0;j<3;j++)
{
const PxU32 Link = ET.mLink[j];
if(!(Link & MSH_ACTIVE_VERTEX_MASK)) // else already active
{
if(ActiveVerts[VRef[j]])
ET.mLink[j] |= MSH_ACTIVE_VERTEX_MASK; // Mark as active
}
}
}
PX_FREE(ActiveVerts);
}
return true;
}

View File

@@ -0,0 +1,177 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_EDGE_LIST_H
#define GU_EDGE_LIST_H
#include "foundation/PxSimpleTypes.h"
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxIO.h"
#include "foundation/PxVec3.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
namespace Gu
{
enum EdgeType
{
PX_EDGE_UNDEFINED,
PX_EDGE_BOUNDARY, //!< Edge belongs to a single triangle
PX_EDGE_INTERNAL, //!< Edge belongs to exactly two triangles
PX_EDGE_SINGULAR, //!< Edge belongs to three or more triangles
PX_EDGE_FORCE_DWORD = 0x7fffffff
};
enum EdgeFlag
{
PX_EDGE_ACTIVE = (1<<0)
};
//! Basic edge-data
struct EdgeData
{
PxU32 Ref0; //!< First vertex reference
PxU32 Ref1; //!< Second vertex reference
};
PX_COMPILE_TIME_ASSERT(sizeof(EdgeData) == 8);
//! Basic edge-data using 8-bit references
struct Edge8Data
{
PxU8 Ref0; //!< First vertex reference
PxU8 Ref1; //!< Second vertex reference
};
PX_COMPILE_TIME_ASSERT(sizeof(Edge8Data) == 2);
//! A count/offset pair = an edge descriptor
struct EdgeDescData
{
PxU16 Flags;
PxU16 Count;
PxU32 Offset;
};
PX_COMPILE_TIME_ASSERT(sizeof(EdgeDescData) == 8);
//! Edge<->triangle mapping
struct EdgeTriangleData
{
PxU32 mLink[3];
};
PX_COMPILE_TIME_ASSERT(sizeof(EdgeTriangleData) == 12);
enum
{
MSH_EDGE_LINK_MASK = 0x0fffffff,
MSH_ACTIVE_EDGE_MASK = 0x80000000,
MSH_ACTIVE_VERTEX_MASK = 0x40000000
};
class EdgeTriangleAC
{
public:
PX_INLINE static PxU32 GetEdge01(const EdgeTriangleData& data) { return data.mLink[0] & MSH_EDGE_LINK_MASK; }
PX_INLINE static PxU32 GetEdge12(const EdgeTriangleData& data) { return data.mLink[1] & MSH_EDGE_LINK_MASK; }
PX_INLINE static PxU32 GetEdge20(const EdgeTriangleData& data) { return data.mLink[2] & MSH_EDGE_LINK_MASK; }
PX_INLINE static PxU32 GetEdge(const EdgeTriangleData& data, PxU32 i) { return data.mLink[i] & MSH_EDGE_LINK_MASK; }
PX_INLINE static PxIntBool HasActiveEdge01(const EdgeTriangleData& data) { return PxIntBool(data.mLink[0] & MSH_ACTIVE_EDGE_MASK); }
PX_INLINE static PxIntBool HasActiveEdge12(const EdgeTriangleData& data) { return PxIntBool(data.mLink[1] & MSH_ACTIVE_EDGE_MASK); }
PX_INLINE static PxIntBool HasActiveEdge20(const EdgeTriangleData& data) { return PxIntBool(data.mLink[2] & MSH_ACTIVE_EDGE_MASK); }
PX_INLINE static PxIntBool HasActiveEdge(const EdgeTriangleData& data, PxU32 i) { return PxIntBool(data.mLink[i] & MSH_ACTIVE_EDGE_MASK); }
};
//! The edge-list creation structure.
struct EDGELISTCREATE
{
EDGELISTCREATE() :
NbFaces (0),
DFaces (NULL),
WFaces (NULL),
FacesToEdges (false),
EdgesToFaces (false),
Verts (NULL),
Epsilon (0.1f)
{}
PxU32 NbFaces; //!< Number of faces in source topo
const PxU32* DFaces; //!< List of faces (dwords) or NULL
const PxU16* WFaces; //!< List of faces (words) or NULL
bool FacesToEdges;
bool EdgesToFaces;
const PxVec3* Verts;
float Epsilon;
};
class EdgeList : public PxUserAllocated
{
public:
EdgeList();
~EdgeList();
bool init(const EDGELISTCREATE& create);
bool load(PxInputStream& stream);
PX_FORCE_INLINE PxU32 getNbEdges() const { return mNbEdges; }
PX_FORCE_INLINE const EdgeData* getEdges() const { return mEdges; }
PX_FORCE_INLINE const EdgeData& getEdge(PxU32 edge_index) const { return mEdges[edge_index]; }
PX_FORCE_INLINE PxU32 getNbFaces() const { return mNbFaces; }
PX_FORCE_INLINE const EdgeTriangleData* getEdgeTriangles() const { return mEdgeFaces; }
PX_FORCE_INLINE const EdgeTriangleData& getEdgeTriangle(PxU32 face_index) const { return mEdgeFaces[face_index]; }
PX_FORCE_INLINE const EdgeDescData* getEdgeToTriangles() const { return mEdgeToTriangles; }
PX_FORCE_INLINE const EdgeDescData& getEdgeToTriangles(PxU32 edge_index) const { return mEdgeToTriangles[edge_index]; }
PX_FORCE_INLINE const PxU32* getFacesByEdges() const { return mFacesByEdges; }
PX_FORCE_INLINE PxU32 getFacesByEdges(PxU32 face_index) const { return mFacesByEdges[face_index]; }
private:
// The edge list
PxU32 mNbEdges; //!< Number of edges in the list
EdgeData* mEdges; //!< List of edges
// Faces to edges
PxU32 mNbFaces; //!< Number of faces for which we have data
EdgeTriangleData* mEdgeFaces; //!< Array of edge-triangles referencing mEdges
// Edges to faces
EdgeDescData* mEdgeToTriangles; //!< An EdgeDesc structure for each edge
PxU32* mFacesByEdges; //!< A pool of face indices
bool createFacesToEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces);
bool createEdgesToFaces(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces);
bool computeActiveEdges(PxU32 nb_faces, const PxU32* dfaces, const PxU16* wfaces, const PxVec3* verts, float epsilon);
};
} // namespace Gu
}
#endif

View File

@@ -0,0 +1,221 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxVec3.h"
#include "foundation/PxArray.h"
#include "GuMeshAnalysis.h"
using namespace physx;
using namespace Gu;
PX_FORCE_INLINE PxU64 key(PxI32 a, PxI32 b)
{
if (a < b)
return ((PxU64(a)) << 32) | (PxU64(b));
else
return ((PxU64(b)) << 32) | (PxU64(a));
}
#define INITIAL_VALUE -3
const static PxU32 neighborEdges[3][2] = { { 0, 1 }, { 2, 0 }, { 1, 2 } };
//const static PxU32 triTip[3] = { 2, 1, 0 };
bool MeshAnalyzer::buildTriangleAdjacency(const Triangle* tris, PxU32 numTriangles, PxArray<PxI32>& result, PxHashMap<PxU64, PxI32>& edges)
{
PxU32 l = 4 * numTriangles; //Still factor 4 - waste one entry per triangle to get a power of 2 which allows for bit shift usage instead of modulo
result.clear();
result.resize(l, -1);
for (PxU32 i = 3; i < l; i += 4)
result[i] = INITIAL_VALUE; //Mark the fields that get never accessed because they are just not used, this is useful for debugging
edges.clear();
for (PxU32 i = 0; i < numTriangles; ++i)
{
const Triangle& tri = tris[i];
if (tri[0] < 0)
continue;
for (PxU32 j = 0; j < 3; ++j)
{
PxU64 edge = key(tri[neighborEdges[j][0]], tri[neighborEdges[j][1]]);
if (const PxPair<const PxU64, PxI32>* ptr = edges.find(edge))
{
if (ptr->second < 0)
return false; //Edge shared by more than 2 triangles
if (result[4 * i + j] == -4 || result[ptr->second] == -4)
{
result[4 * i + j] = -4; //Mark as non-manifold edge
result[ptr->second] = -4;
}
else
{
if (result[4 * i + j] != -1 || result[ptr->second] != -1)
{
result[4 * i + j] = -4; //Mark as non-manifold edge
result[ptr->second] = -4;
}
result[4 * i + j] = ptr->second;
result[ptr->second] = 4 * i + j;
}
edges.erase(ptr->first);
edges.insert(edge, -1); //Mark as processed
}
else
edges.insert(edge, 4 * i + j);
}
}
return true;
}
PxI32 indexOf(const Triangle& tri, PxI32 node)
{
if (tri[0] == node) return 0;
if (tri[1] == node) return 1;
if (tri[2] == node) return 2;
return 0xFFFFFFFF;
}
bool MeshAnalyzer::checkConsistentTriangleOrientation(const Triangle* tris, PxU32 numTriangles)
{
PxArray<bool> flip;
PxHashMap<PxU64, PxI32> edges;
PxArray<PxArray<PxU32>> connectedTriangleGroups;
if (!buildConsistentTriangleOrientationMap(tris, numTriangles, flip, edges, connectedTriangleGroups))
return false;
for (PxU32 i = 0; i < flip.size(); ++i)
{
if (flip[i])
return false;
}
return true;
}
bool MeshAnalyzer::buildConsistentTriangleOrientationMap(const Triangle* tris, PxU32 numTriangles, PxArray<bool>& flip,
PxHashMap<PxU64, PxI32>& edges, PxArray<PxArray<PxU32>>& connectedTriangleGroups)
{
PxArray<PxI32> adj;
if (!buildTriangleAdjacency(tris, numTriangles, adj, edges))
return false;
PxU32 l = numTriangles;
PxArray<bool> done;
done.resize(l, false);
flip.clear();
flip.resize(l, false);
PxU32 seedIndex = 0;
PxArray<PxI32> stack;
while (true)
{
if (stack.size() == 0)
{
while (seedIndex < done.size() && done[seedIndex])
++seedIndex;
if (seedIndex == done.size())
break;
done[seedIndex] = true;
flip[seedIndex] = false;
stack.pushBack(seedIndex);
PxArray<PxU32> currentGroup;
currentGroup.pushBack(seedIndex);
connectedTriangleGroups.pushBack(currentGroup);
}
PxI32 index = stack.popBack();
bool f = flip[index];
const Triangle& tri = tris[index];
for (PxU32 i = 0; i < 3; ++i)
{
if (adj[4 * index + i] >= 0 && !done[adj[4 * index + i] >> 2])
{
PxI32 neighborTriIndex = adj[4 * index + i] >> 2;
done[neighborTriIndex] = true;
connectedTriangleGroups[connectedTriangleGroups.size() - 1].pushBack(neighborTriIndex);
const Triangle& neighborTri = tris[neighborTriIndex];
PxI32 j = indexOf(neighborTri, tri[neighborEdges[i][0]]);
flip[neighborTriIndex] = (neighborTri[(j + 1) % 3] == tri[neighborEdges[i][1]]) != f;
stack.pushBack(neighborTriIndex);
}
}
}
return true;
}
bool MeshAnalyzer::makeTriOrientationConsistent(Triangle* tris, PxU32 numTriangles, bool invertOrientation)
{
PxHashMap<PxU64, PxI32> edges;
PxArray<bool> flipTriangle;
PxArray<PxArray<PxU32>> connectedTriangleGroups;
if (!buildConsistentTriangleOrientationMap(tris, numTriangles, flipTriangle, edges, connectedTriangleGroups))
return false;
for (PxU32 i = 0; i < flipTriangle.size(); ++i)
{
Triangle& t = tris[i];
if (flipTriangle[i] != invertOrientation)
PxSwap(t[0], t[1]);
}
return true;
}
bool MeshAnalyzer::checkMeshWatertightness(const Triangle* tris, PxU32 numTriangles, bool treatInconsistentWindingAsNonWatertight)
{
PxArray<bool> flip;
PxHashMap<PxU64, PxI32> edges;
PxArray<PxArray<PxU32>> connectedTriangleGroups;
if (!MeshAnalyzer::buildConsistentTriangleOrientationMap(tris, numTriangles, flip, edges, connectedTriangleGroups))
return false;
if (treatInconsistentWindingAsNonWatertight)
{
for (PxU32 i = 0; i < flip.size(); ++i)
{
if (flip[i])
return false;
}
}
for (PxHashMap<PxU64, PxI32>::Iterator iter = edges.getIterator(); !iter.done(); ++iter)
{
if (iter->second >= 0)
return false;
}
return true;
}

View File

@@ -0,0 +1,166 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MESH_ANALYSIS_H
#define GU_MESH_ANALYSIS_H
#include "common/PxPhysXCommonConfig.h"
#include "GuTriangle.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxSort.h"
namespace physx
{
namespace Gu
{
using Triangle = Gu::IndexedTriangleT<PxI32>;
class MeshAnalyzer
{
struct Range
{
PxI32 start;
PxI32 end; //Exclusive
Range(PxI32 start_, PxI32 end_)
{
start = start_;
end = end_;
}
PxI32 Length() const { return end - start; }
};
template<typename T, typename S>
static void splitRanges(PxArray<Range>& mergeRanges, const PxArray<PxI32>& indexer, const T* points, PxI32 dimIndex, S tol)
{
PxArray<Range> newMergeRanges;
for (PxU32 i = 0; i < mergeRanges.size(); ++i)
{
const Range& r = mergeRanges[i];
PxI32 start = r.start;
for (PxI32 j = r.start + 1; j < r.end; ++j)
{
//PxF64 delta = PxAbs(points[start][dimIndex] - points[j - 1][dimIndex]);
S delta = PxAbs(points[indexer[j]][dimIndex] - points[indexer[j - 1]][dimIndex]);
if (delta > tol)
{
if (j - start > 1)
newMergeRanges.pushBack(Range(start, j));
start = j;
}
}
if (r.end - start > 1)
newMergeRanges.pushBack(Range(start, r.end));
}
mergeRanges.clear();
for (PxU32 i = 0; i < newMergeRanges.size(); ++i)
mergeRanges.pushBack(newMergeRanges[i]);
}
template<typename T>
struct Comparer
{
const T* points;
PxU32 dimension;
Comparer(const T* points_, const PxU32 dimension_) : points(points_), dimension(dimension_) {}
bool operator()(const PxI32& a, const PxI32& b) const
{
return points[a][dimension] > points[b][dimension];
}
private:
PX_NOCOPY(Comparer)
};
public:
template<typename T, typename S>
static void mapDuplicatePoints(const T* points, const PxU32 nbPoints, PxArray<PxI32>& result, S duplicateDistanceManhattanMetric = static_cast<S>(1e-6))
{
result.reserve(nbPoints);
result.forceSize_Unsafe(nbPoints);
PxArray<PxI32> indexer;
indexer.reserve(nbPoints);
indexer.forceSize_Unsafe(nbPoints);
for (PxU32 i = 0; i < nbPoints; ++i)
{
indexer[i] = i;
result[i] = i;
}
Comparer<T> comparer(points, 0);
PxSort(indexer.begin(), indexer.size(), comparer);
PxArray<Range> mergeRanges;
mergeRanges.pushBack(Range(0, nbPoints));
splitRanges<T>(mergeRanges, indexer, points, 0, duplicateDistanceManhattanMetric);
comparer.dimension = 1;
for (PxU32 i = 0; i < mergeRanges.size(); ++i)
{
const Range& r = mergeRanges[i];
PxSort(indexer.begin() + r.start, r.Length(), comparer);
}
splitRanges<T>(mergeRanges, indexer, points, 1, duplicateDistanceManhattanMetric);
comparer.dimension = 2;
for (PxU32 i = 0; i < mergeRanges.size(); ++i)
{
const Range& r = mergeRanges[i];
PxSort(indexer.begin() + r.start, r.Length(), comparer);
}
splitRanges<T>(mergeRanges, indexer, points, 2, duplicateDistanceManhattanMetric);
//Merge the ranges
for (PxU32 i = 0; i < mergeRanges.size(); ++i)
{
const Range& r = mergeRanges[i];
PxSort(indexer.begin() + r.start, r.Length());
for (PxI32 j = r.start + 1; j < r.end; ++j)
result[indexer[j]] = result[indexer[r.start]];
}
}
PX_PHYSX_COMMON_API static bool buildTriangleAdjacency(const Triangle* tris, PxU32 numTriangles, PxArray<PxI32>& result, PxHashMap<PxU64, PxI32>& edges);
PX_PHYSX_COMMON_API static bool checkConsistentTriangleOrientation(const Triangle* tris, PxU32 numTriangles);
PX_PHYSX_COMMON_API static bool buildConsistentTriangleOrientationMap(const Triangle* tris, PxU32 numTriangles, PxArray<bool>& flipMap,
PxHashMap<PxU64, PxI32>& edges, PxArray<PxArray<PxU32>>& connectedTriangleGroups);
PX_PHYSX_COMMON_API static bool makeTriOrientationConsistent(Triangle* tris, PxU32 numTriangles, bool invertOrientation = false);
PX_PHYSX_COMMON_API static bool checkMeshWatertightness(const Triangle* tris, PxU32 numTriangles, bool treatInconsistentWindingAsNonWatertight = true);
};
}
}
#endif

View File

@@ -0,0 +1,237 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxVec3.h"
#include "foundation/PxMemory.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxBitUtils.h"
#include "GuMeshCleaner.h"
using namespace physx;
using namespace Gu;
struct Indices
{
PxU32 mRef[3];
PX_FORCE_INLINE bool operator!=(const Indices&v) const { return mRef[0] != v.mRef[0] || mRef[1] != v.mRef[1] || mRef[2] != v.mRef[2]; }
};
static PX_FORCE_INLINE PxU32 getHashValue(const PxVec3& v)
{
const PxU32* h = reinterpret_cast<const PxU32*>(&v.x);
const PxU32 f = (h[0]+h[1]*11-(h[2]*17)) & 0x7fffffff; // avoid problems with +-0
return (f>>22)^(f>>12)^(f);
}
static PX_FORCE_INLINE PxU32 getHashValue(const Indices& v)
{
// const PxU32* h = v.mRef;
// const PxU32 f = (h[0]+h[1]*11-(h[2]*17)) & 0x7fffffff; // avoid problems with +-0
// return (f>>22)^(f>>12)^(f);
PxU32 a = v.mRef[0];
PxU32 b = v.mRef[1];
PxU32 c = v.mRef[2];
a=a-b; a=a-c; a=a^(c >> 13);
b=b-c; b=b-a; b=b^(a << 8);
c=c-a; c=c-b; c=c^(b >> 13);
a=a-b; a=a-c; a=a^(c >> 12);
b=b-c; b=b-a; b=b^(a << 16);
c=c-a; c=c-b; c=c^(b >> 5);
a=a-b; a=a-c; a=a^(c >> 3);
b=b-c; b=b-a; b=b^(a << 10);
c=c-a; c=c-b; c=c^(b >> 15);
return c;
}
MeshCleaner::MeshCleaner(PxU32 nbVerts, const PxVec3* srcVerts, PxU32 nbTris, const PxU32* srcIndices, PxF32 meshWeldTolerance, PxF32 areaLimit)
{
PxVec3* cleanVerts = PX_ALLOCATE(PxVec3, nbVerts, "MeshCleaner");
PX_ASSERT(cleanVerts);
PxU32* indices = PX_ALLOCATE(PxU32, (nbTris*3), "MeshCleaner");
PxU32* remapTriangles = PX_ALLOCATE(PxU32, nbTris, "MeshCleaner");
PxU32* vertexIndices = NULL;
if(meshWeldTolerance!=0.0f)
{
vertexIndices = PX_ALLOCATE(PxU32, nbVerts, "MeshCleaner");
const PxF32 weldTolerance = 1.0f / meshWeldTolerance;
// snap to grid
for(PxU32 i=0; i<nbVerts; i++)
{
vertexIndices[i] = i;
cleanVerts[i] = PxVec3( PxFloor(srcVerts[i].x*weldTolerance + 0.5f),
PxFloor(srcVerts[i].y*weldTolerance + 0.5f),
PxFloor(srcVerts[i].z*weldTolerance + 0.5f));
}
}
else
{
PxMemCopy(cleanVerts, srcVerts, nbVerts*sizeof(PxVec3));
}
const PxU32 maxNbElems = PxMax(nbTris, nbVerts);
const PxU32 hashSize = PxNextPowerOfTwo(maxNbElems);
const PxU32 hashMask = hashSize-1;
PxU32* hashTable = PX_ALLOCATE(PxU32, (hashSize + maxNbElems), "MeshCleaner");
PX_ASSERT(hashTable);
PxMemSet(hashTable, 0xff, hashSize * sizeof(PxU32));
PxU32* const next = hashTable + hashSize;
PxU32* remapVerts = PX_ALLOCATE(PxU32, nbVerts, "MeshCleaner");
PxMemSet(remapVerts, 0xff, nbVerts * sizeof(PxU32));
for(PxU32 i=0;i<nbTris*3;i++)
{
const PxU32 vref = srcIndices[i];
if(vref<nbVerts)
remapVerts[vref] = 0;
}
PxU32 nbCleanedVerts = 0;
for(PxU32 i=0;i<nbVerts;i++)
{
if(remapVerts[i]==0xffffffff)
continue;
const PxVec3& v = cleanVerts[i];
const PxU32 hashValue = getHashValue(v) & hashMask;
PxU32 offset = hashTable[hashValue];
while(offset!=0xffffffff && cleanVerts[offset]!=v)
offset = next[offset];
if(offset==0xffffffff)
{
remapVerts[i] = nbCleanedVerts;
cleanVerts[nbCleanedVerts] = v;
if(vertexIndices)
vertexIndices[nbCleanedVerts] = i;
next[nbCleanedVerts] = hashTable[hashValue];
hashTable[hashValue] = nbCleanedVerts++;
}
else remapVerts[i] = offset;
}
// PT: area = ((p0 - p1).cross(p0 - p2)).magnitude() * 0.5
// area < areaLimit
// <=> ((p0 - p1).cross(p0 - p2)).magnitude() < areaLimit * 2.0
// <=> ((p0 - p1).cross(p0 - p2)).magnitudeSquared() < (areaLimit * 2.0)^2
const PxF32 limit = areaLimit * areaLimit * 4.0f;
PxU32 nbCleanedTris = 0;
for(PxU32 i=0;i<nbTris;i++)
{
PxU32 vref0 = *srcIndices++;
PxU32 vref1 = *srcIndices++;
PxU32 vref2 = *srcIndices++;
if(vref0>=nbVerts || vref1>=nbVerts || vref2>=nbVerts)
continue;
// PT: you can still get zero-area faces when the 3 vertices are perfectly aligned
const PxVec3& p0 = srcVerts[vref0];
const PxVec3& p1 = srcVerts[vref1];
const PxVec3& p2 = srcVerts[vref2];
const float area2 = ((p0 - p1).cross(p0 - p2)).magnitudeSquared();
if(area2<=limit)
continue;
vref0 = remapVerts[vref0];
vref1 = remapVerts[vref1];
vref2 = remapVerts[vref2];
if(vref0==vref1 || vref1==vref2 || vref2==vref0)
continue;
indices[nbCleanedTris*3+0] = vref0;
indices[nbCleanedTris*3+1] = vref1;
indices[nbCleanedTris*3+2] = vref2;
remapTriangles[nbCleanedTris] = i;
nbCleanedTris++;
}
PX_FREE(remapVerts);
PxU32 nbToGo = nbCleanedTris;
nbCleanedTris = 0;
PxMemSet(hashTable, 0xff, hashSize * sizeof(PxU32));
Indices* const I = reinterpret_cast<Indices*>(indices);
bool idtRemap = true;
for(PxU32 i=0;i<nbToGo;i++)
{
const Indices& v = I[i];
const PxU32 hashValue = getHashValue(v) & hashMask;
PxU32 offset = hashTable[hashValue];
while(offset!=0xffffffff && I[offset]!=v)
offset = next[offset];
if(offset==0xffffffff)
{
const PxU32 originalIndex = remapTriangles[i];
PX_ASSERT(nbCleanedTris<=i);
remapTriangles[nbCleanedTris] = originalIndex;
if(originalIndex!=nbCleanedTris)
idtRemap = false;
I[nbCleanedTris] = v;
next[nbCleanedTris] = hashTable[hashValue];
hashTable[hashValue] = nbCleanedTris++;
}
}
PX_FREE(hashTable);
if(vertexIndices)
{
for(PxU32 i=0;i<nbCleanedVerts;i++)
cleanVerts[i] = srcVerts[vertexIndices[i]];
PX_FREE(vertexIndices);
}
mNbVerts = nbCleanedVerts;
mNbTris = nbCleanedTris;
mVerts = cleanVerts;
mIndices = indices;
if(idtRemap)
{
PX_FREE(remapTriangles);
mRemap = NULL;
}
else
{
mRemap = remapTriangles;
}
}
MeshCleaner::~MeshCleaner()
{
PX_FREE(mRemap);
PX_FREE(mIndices);
PX_FREE(mVerts);
}

View File

@@ -0,0 +1,54 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MESH_CLEANER_H
#define GU_MESH_CLEANER_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
class MeshCleaner
{
public:
MeshCleaner(PxU32 nbVerts, const PxVec3* verts, PxU32 nbTris, const PxU32* indices, PxF32 meshWeldTolerance, PxF32 areaLimit);
~MeshCleaner();
PxU32 mNbVerts;
PxU32 mNbTris;
PxVec3* mVerts;
PxU32* mIndices;
PxU32* mRemap;
};
}
}
#endif

View File

@@ -0,0 +1,332 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuQuantizer.h"
#include "foundation/PxVec3.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxArray.h"
using namespace physx;
using namespace Gu;
PxU32 kmeans_cluster3d(const PxVec3* input, // an array of input 3d data points.
PxU32 inputSize, // the number of input data points.
PxU32 clumpCount, // the number of clumps you wish to product.
PxVec3* outputClusters, // The output array of clumps 3d vectors, should be at least 'clumpCount' in size.
PxU32* outputIndices, // A set of indices which remaps the input vertices to clumps; should be at least 'inputSize'
float errorThreshold=0.01f, // The error threshold to converge towards before giving up.
float collapseDistance=0.01f); // distance so small it is not worth bothering to create a new clump.
template <class Vec,class Type >
PxU32 kmeans_cluster(const Vec* input,
PxU32 inputCount,
PxU32 clumpCount,
Vec* clusters,
PxU32* outputIndices,
Type threshold, // controls how long it works to converge towards a least errors solution.
Type collapseDistance) // distance between clumps to consider them to be essentially equal.
{
PxU32 convergeCount = 64; // maximum number of iterations attempting to converge to a solution..
PxU32* counts = PX_ALLOCATE(PxU32, clumpCount, "PxU32");
Type error=0;
if ( inputCount <= clumpCount ) // if the number of input points is less than our clumping size, just return the input points.
{
clumpCount = inputCount;
for (PxU32 i=0; i<inputCount; i++)
{
if ( outputIndices )
{
outputIndices[i] = i;
}
clusters[i] = input[i];
counts[i] = 1;
}
}
else
{
PxVec3* centroids = PX_ALLOCATE(PxVec3, clumpCount, "PxVec3");
// Take a sampling of the input points as initial centroid estimates.
for (PxU32 i=0; i<clumpCount; i++)
{
PxU32 index = (i*inputCount)/clumpCount;
PX_ASSERT( index < inputCount );
clusters[i] = input[index];
}
// Here is the main convergence loop
Type old_error = FLT_MAX; // old and initial error estimates are max Type
error = FLT_MAX;
do
{
old_error = error; // preserve the old error
// reset the counts and centroids to current cluster location
for (PxU32 i=0; i<clumpCount; i++)
{
counts[i] = 0;
centroids[i] = PxVec3(PxZero);
}
error = 0;
// For each input data point, figure out which cluster it is closest too and add it to that cluster.
for (PxU32 i=0; i<inputCount; i++)
{
Type min_distance = FLT_MAX;
// find the nearest clump to this point.
for (PxU32 j=0; j<clumpCount; j++)
{
const Type distance = (input[i] - clusters[j]).magnitudeSquared();
if ( distance < min_distance )
{
min_distance = distance;
outputIndices[i] = j; // save which clump this point indexes
}
}
const PxU32 index = outputIndices[i]; // which clump was nearest to this point.
centroids[index]+=input[i];
counts[index]++; // increment the counter indicating how many points are in this clump.
error+=min_distance; // save the error accumulation
}
// Now, for each clump, compute the mean and store the result.
for (PxU32 i=0; i<clumpCount; i++)
{
if ( counts[i] ) // if this clump got any points added to it...
{
const Type recip = 1.0f / Type(counts[i]); // compute the average (center of those points)
centroids[i]*=recip; // compute the average center of the points in this clump.
clusters[i] = centroids[i]; // store it as the new cluster.
}
}
// decrement the convergence counter and bail if it is taking too long to converge to a solution.
convergeCount--;
if (convergeCount == 0 )
{
break;
}
if ( error < threshold ) // early exit if our first guess is already good enough (if all input points are the same)
break;
} while ( PxAbs(error - old_error) > threshold ); // keep going until the error is reduced by this threshold amount.
PX_FREE(centroids);
}
// ok..now we prune the clumps if necessary.
// The rules are; first, if a clump has no 'counts' then we prune it as it's unused.
// The second, is if the centroid of this clump is essentially the same (based on the distance tolerance)
// as an existing clump, then it is pruned and all indices which used to point to it, now point to the one
// it is closest too.
PxU32 outCount = 0; // number of clumps output after pruning performed.
Type d2 = collapseDistance*collapseDistance; // squared collapse distance.
for (PxU32 i=0; i<clumpCount; i++)
{
if ( counts[i] == 0 ) // if no points ended up in this clump, eliminate it.
continue;
// see if this clump is too close to any already accepted clump.
bool add = true;
PxU32 remapIndex = outCount; // by default this clump will be remapped to its current index.
for (PxU32 j=0; j<outCount; j++)
{
Type distance = (clusters[i] - clusters[j]).magnitudeSquared();
if ( distance < d2 )
{
remapIndex = j;
add = false; // we do not add this clump
break;
}
}
// If we have fewer output clumps than input clumps so far, then we need to remap the old indices to the new ones.
if ( outputIndices )
{
if ( outCount != i || !add ) // we need to remap indices! everything that was index 'i' now needs to be remapped to 'outCount'
{
for (PxU32 j=0; j<inputCount; j++)
{
if ( outputIndices[j] == i )
{
outputIndices[j] = remapIndex; //
}
}
}
}
if ( add )
{
clusters[outCount] = clusters[i];
outCount++;
}
}
PX_FREE(counts);
clumpCount = outCount;
return clumpCount;
}
PxU32 kmeans_cluster3d( const PxVec3* input, // an array of input 3d data points.
PxU32 inputSize, // the number of input data points.
PxU32 clumpCount, // the number of clumps you wish to produce
PxVec3* outputClusters, // The output array of clumps 3d vectors, should be at least 'clumpCount' in size.
PxU32* outputIndices, // A set of indices which remaps the input vertices to clumps; should be at least 'inputSize'
float errorThreshold, // The error threshold to converge towards before giving up.
float collapseDistance) // distance so small it is not worth bothering to create a new clump.
{
return kmeans_cluster< PxVec3, float >(input, inputSize, clumpCount, outputClusters, outputIndices, errorThreshold, collapseDistance);
}
class QuantizerImpl : public Quantizer, public PxUserAllocated
{
public:
QuantizerImpl()
{
mScale = PxVec3(1.0f, 1.0f, 1.0f);
mCenter = PxVec3(0.0f, 0.0f, 0.0f);
}
// Use the k-means quantizer, similar results, but much slower.
virtual const PxVec3* kmeansQuantize3D(PxU32 vcount,
const PxVec3* vertices,
PxU32 stride,
bool denormalizeResults,
PxU32 maxVertices,
PxU32& outVertsCount)
{
const PxVec3* ret = NULL;
outVertsCount = 0;
mNormalizedInput.clear();
mQuantizedOutput.clear();
if ( vcount > 0 )
{
normalizeInput(vcount,vertices, stride);
PxVec3* quantizedOutput = PX_ALLOCATE(PxVec3, vcount, "PxVec3");
PxU32* quantizedIndices = PX_ALLOCATE(PxU32, vcount, "PxU32");
outVertsCount = kmeans_cluster3d(&mNormalizedInput[0], vcount, maxVertices, quantizedOutput, quantizedIndices, 0.01f, 0.0001f );
if ( outVertsCount > 0 )
{
if ( denormalizeResults )
{
for (PxU32 i=0; i<outVertsCount; i++)
{
PxVec3 v( quantizedOutput[i] );
v = v.multiply(mScale) + mCenter;
mQuantizedOutput.pushBack(v);
}
}
else
{
for (PxU32 i=0; i<outVertsCount; i++)
{
const PxVec3& v( quantizedOutput[i] );
mQuantizedOutput.pushBack(v);
}
}
ret = &mQuantizedOutput[0];
}
PX_FREE(quantizedOutput);
PX_FREE(quantizedIndices);
}
return ret;
}
virtual void release()
{
PX_DELETE_THIS;
}
virtual const PxVec3& getDenormalizeScale() const
{
return mScale;
}
virtual const PxVec3& getDenormalizeCenter() const
{
return mCenter;
}
private:
void normalizeInput(PxU32 vcount, const PxVec3* vertices, PxU32 stride)
{
const char* vtx = reinterpret_cast<const char *> (vertices);
mNormalizedInput.clear();
mQuantizedOutput.clear();
PxBounds3 bounds;
bounds.setEmpty();
for (PxU32 i=0; i<vcount; i++)
{
const PxVec3& v = *reinterpret_cast<const PxVec3 *> (vtx);
vtx += stride;
bounds.include(v);
}
mCenter = bounds.getCenter();
PxVec3 dim = bounds.getDimensions();
dim *= 1.001f;
mScale = dim*0.5f;
for (PxU32 i = 0; i < 3; i++)
{
if(dim[i] == 0)
mScale[i] = 1.0f;
}
PxVec3 recip;
recip.x = 1.0f / mScale.x;
recip.y = 1.0f / mScale.y;
recip.z = 1.0f / mScale.z;
vtx = reinterpret_cast<const char *> (vertices);
for (PxU32 i=0; i<vcount; i++)
{
PxVec3 v = *reinterpret_cast<const PxVec3 *> (vtx);
vtx += stride;
v = (v - mCenter).multiply(recip);
mNormalizedInput.pushBack(v);
}
}
virtual ~QuantizerImpl()
{
}
private:
PxVec3 mScale;
PxVec3 mCenter;
PxArray<PxVec3> mNormalizedInput;
PxArray<PxVec3> mQuantizedOutput;
};
Quantizer* physx::Gu::createQuantizer()
{
return PX_NEW(QuantizerImpl);
}

View File

@@ -0,0 +1,75 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_QUANTIZER_H
#define GU_QUANTIZER_H
#include "foundation/PxVec3.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
//////////////////////////////////////////////////////////////////////////
// K-means quantization class
// see http://en.wikipedia.org/wiki/K-means_clustering
// implementation from John Ratcliff http://codesuppository.blogspot.ch/2010/12/k-means-clustering-algorithm.html
class Quantizer
{
public:
// quantize the input vertices
virtual const PxVec3* kmeansQuantize3D( PxU32 vcount,
const PxVec3* vertices,
PxU32 stride,
bool denormalizeResults,
PxU32 maxVertices,
PxU32& outVertsCount) = 0;
// returns the denormalized scale
virtual const PxVec3& getDenormalizeScale() const = 0;
// returns the denormalized center
virtual const PxVec3& getDenormalizeCenter() const = 0;
// release internal data
virtual void release() = 0;
protected:
virtual ~Quantizer()
{
}
};
// creates the quantizer class
Quantizer * createQuantizer();
}
}
#endif

View File

@@ -0,0 +1,57 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSeparatingAxes.h"
using namespace physx;
bool Gu::SeparatingAxes::addAxis(const PxVec3& axis)
{
PxU32 numAxes = getNumAxes();
const PxVec3* PX_RESTRICT axes = getAxes();
const PxVec3* PX_RESTRICT axes_end = axes + numAxes;
while(axes<axes_end)
{
if(PxAbs(axis.dot(*axes))>0.9999f)
return false;
axes++;
}
#ifdef SEP_AXIS_FIXED_MEMORY
if(mNbAxes<SEP_AXIS_FIXED_MEMORY)
{
mAxes[mNbAxes++] = axis;
return true;
}
return false;
#else
mAxes.pushBack(axis);
return true;
#endif
}

View File

@@ -0,0 +1,90 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SEPARATINGAXES_H
#define GU_SEPARATINGAXES_H
#include "foundation/PxVec3.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
// PT: this is a number of axes. Multiply by sizeof(PxVec3) for size in bytes.
#define SEP_AXIS_FIXED_MEMORY 256
// This class holds a list of potential separating axes.
// - the orientation is irrelevant so V and -V should be the same vector
// - the scale is irrelevant so V and n*V should be the same vector
// - a given separating axis should appear only once in the class
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
class SeparatingAxes
{
public:
PX_INLINE SeparatingAxes() : mNbAxes(0) {}
bool addAxis(const PxVec3& axis);
PX_FORCE_INLINE const PxVec3* getAxes() const
{
return mAxes;
}
PX_FORCE_INLINE PxU32 getNumAxes() const
{
return mNbAxes;
}
PX_FORCE_INLINE void reset()
{
mNbAxes = 0;
}
private:
PxU32 mNbAxes;
PxVec3 mAxes[SEP_AXIS_FIXED_MEMORY];
};
#if PX_VC
#pragma warning(pop)
#endif
enum PxcSepAxisType
{
SA_NORMAL0, // Normal of object 0
SA_NORMAL1, // Normal of object 1
SA_EE // Cross product of edges
};
}
}
#endif

View File

@@ -0,0 +1,112 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuVertexReducer.h"
#include "foundation/PxAllocator.h"
#include "CmRadixSort.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
// PT: code archeology: this initially came from ICE (IceVertexCloud.h/cpp). Consider dropping it.
ReducedVertexCloud::ReducedVertexCloud(const PxVec3* verts, PxU32 nb_verts) : mNbRVerts(0), mRVerts(NULL), mXRef(NULL)
{
mVerts = verts;
mNbVerts = nb_verts;
}
ReducedVertexCloud::~ReducedVertexCloud()
{
clean();
}
ReducedVertexCloud& ReducedVertexCloud::clean()
{
PX_FREE(mXRef);
PX_FREE(mRVerts);
return *this;
}
/**
* Reduction method. Use this to create a minimal vertex cloud.
* \param rc [out] result structure
* \return true if success
* \warning This is not about welding nearby vertices, here we look for real redundant ones.
*/
bool ReducedVertexCloud::reduce(REDUCEDCLOUD* rc)
{
clean();
mXRef = PX_ALLOCATE(PxU32, mNbVerts, "mXRef");
float* f = PX_ALLOCATE(float, mNbVerts, "tmp");
for(PxU32 i=0;i<mNbVerts;i++)
f[i] = mVerts[i].x;
RadixSortBuffered Radix;
Radix.Sort(reinterpret_cast<const PxU32*>(f), mNbVerts, RADIX_UNSIGNED);
for(PxU32 i=0;i<mNbVerts;i++)
f[i] = mVerts[i].y;
Radix.Sort(reinterpret_cast<const PxU32*>(f), mNbVerts, RADIX_UNSIGNED);
for(PxU32 i=0;i<mNbVerts;i++)
f[i] = mVerts[i].z;
const PxU32* Sorted = Radix.Sort(reinterpret_cast<const PxU32*>(f), mNbVerts, RADIX_UNSIGNED).GetRanks();
PX_FREE(f);
mNbRVerts = 0;
const PxU32 Junk[] = {PX_INVALID_U32, PX_INVALID_U32, PX_INVALID_U32};
const PxU32* Previous = Junk;
mRVerts = PX_ALLOCATE(PxVec3, mNbVerts, "PxVec3");
PxU32 Nb = mNbVerts;
while(Nb--)
{
const PxU32 Vertex = *Sorted++; // Vertex number
const PxU32* current = reinterpret_cast<const PxU32*>(&mVerts[Vertex]);
if(current[0]!=Previous[0] || current[1]!=Previous[1] || current[2]!=Previous[2])
mRVerts[mNbRVerts++] = mVerts[Vertex];
Previous = current;
mXRef[Vertex] = mNbRVerts-1;
}
if(rc)
{
rc->CrossRef = mXRef;
rc->NbRVerts = mNbRVerts;
rc->RVerts = mRVerts;
}
return true;
}

View File

@@ -0,0 +1,77 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_VERTEX_REDUCER_H
#define GU_VERTEX_REDUCER_H
#include "foundation/PxVec3.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
//! Vertex cloud reduction result structure
struct REDUCEDCLOUD
{
// Out
PxVec3* RVerts; //!< Reduced list
PxU32 NbRVerts; //!< Reduced number of vertices
PxU32* CrossRef; //!< nb_verts remapped indices
};
class ReducedVertexCloud
{
public:
ReducedVertexCloud(const PxVec3* verts, PxU32 nb_verts);
~ReducedVertexCloud();
ReducedVertexCloud& clean();
bool reduce(REDUCEDCLOUD* rc=NULL);
PX_FORCE_INLINE PxU32 getNbVerts() const { return mNbVerts; }
PX_FORCE_INLINE PxU32 getNbReducedVerts() const { return mNbRVerts; }
PX_FORCE_INLINE const PxVec3* getReducedVerts() const { return mRVerts; }
PX_FORCE_INLINE const PxVec3& getReducedVertex(PxU32 i) const { return mRVerts[i]; }
PX_FORCE_INLINE const PxU32* getCrossRefTable() const { return mXRef; }
private:
// Original vertex cloud
PxU32 mNbVerts; //!< Number of vertices
const PxVec3* mVerts; //!< List of vertices (pointer copy)
// Reduced vertex cloud
PxU32 mNbRVerts; //!< Reduced number of vertices
PxVec3* mRVerts; //!< Reduced list of vertices
PxU32* mXRef; //!< Cross-reference table (used to remap topologies)
};
}
}
#endif

View File

@@ -0,0 +1,371 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COLLISION_SDF_H
#define GU_COLLISION_SDF_H
#include "GuSDF.h"
#include "foundation/PxPreprocessor.h"
namespace physx
{
namespace Gu
{
// SDF wrapper for collision computations
// may be shared by CPU/GPU code in the future
//
// \detailed CollisionSDF wraps an `SDF` object, providing useful methods for SDF collision.
//
// Conventions
// * The coarse or background SDF is always referred to as _coarse_. Its associated coordinates are called `cPos` and
// are in units of `mSpacing*mSubgridSize`. The origin is normally at the first grid point.
// * The fine SDF is always referred to as _fine_. Its associated coordinates are called `fPos` and are in units of
// `mSpacing`. If the sdf is dense, `cPos` is equivalent to `fPos`. The origin is normally at the first grid point.
// * Coordinates in the native space of the SDF are denoted `sPos` and are in native units.
template <int BytesPerSparsePixelT>
PX_FORCE_INLINE PxReal decodeSample(PxReal subgridScalingFactor, PxReal subgridMinSdfValue, const PxU8* data, PxU32 index);
template <>
PX_FORCE_INLINE PxReal decodeSample<1>(PxReal subgridScalingFactor, PxReal subgridMinSdfValue, const PxU8* data, PxU32 index)
{
return static_cast<PxReal>(data[index]) * subgridScalingFactor + subgridMinSdfValue;
}
template <>
PX_FORCE_INLINE PxReal decodeSample<2>(PxReal subgridScalingFactor, PxReal subgridMinSdfValue, const PxU8* data, PxU32 index)
{
const PxU16* ptr = reinterpret_cast<const PxU16*>(data);
return static_cast<PxReal>(ptr[index]) * subgridScalingFactor + subgridMinSdfValue;
}
template <>
PX_FORCE_INLINE PxReal decodeSample<4>(PxReal /*unused*/, PxReal /*unused*/, const PxU8* data, PxU32 index)
{
//If 4 bytes per subgrid pixel are available, then normal floats are used. No need to
//de-normalize integer values since the floats already contain real distance values
const PxReal* ptr = reinterpret_cast<const PxReal*>(data);
return ptr[index];
}
struct CollisionSDF
{
CollisionSDF(const SDF& sdf): mSdf(sdf), mSdfBoxLower(sdf.mMeshLower), mFDims(sdf.mDims), mInvGridDx(1.0f / sdf.mSpacing),
mInvSubgridSize(sdf.mSubgridSize ? 1.0f / sdf.mSubgridSize : 0), mIsDense(sdf.mSubgridSize == 0)
{
// assume that `mMeshLower` is also the location of the lowest grid point
if (mIsDense)
{
mCDims = mFDims;
mCSamples = mCDims;
mSdfBoxUpper = sdf.mMeshLower + sdf.mSpacing * PxVec3(static_cast<PxReal>(mFDims.x-1), static_cast<PxReal>(mFDims.y-1), static_cast<PxReal>(mFDims.z-1));
}
else
{
mCDims = Dim3(mFDims.x / sdf.mSubgridSize, mFDims.y / sdf.mSubgridSize, mFDims.z / sdf.mSubgridSize);
mCSamples = Dim3(mCDims.x + 1, mCDims.y + 1, mCDims.z + 1);
mSdfBoxUpper = sdf.mMeshLower + sdf.mSpacing * PxVec3(mFDims);
}
if (mSdf.mBytesPerSparsePixel == 1)
mSubgridScalingFactor = (1.0f / 255.0f) * (mSdf.mSubgridsMaxSdfValue - mSdf.mSubgridsMinSdfValue);
else if (mSdf.mBytesPerSparsePixel == 2)
mSubgridScalingFactor = (1.0f / 65535.0f) * (mSdf.mSubgridsMaxSdfValue - mSdf.mSubgridsMinSdfValue);
const PxU32 fW = mSdf.mSdfSubgrids3DTexBlockDim.x * (mSdf.mSubgridSize + 1),
fH = mSdf.mSdfSubgrids3DTexBlockDim.y * (mSdf.mSubgridSize + 1);
mFStrideY = fW;
mFStrideZ = fW*fH;
}
// clamp `fPos` to the grid on which `sdf` is defined
PX_INLINE PxVec3 clampToFine(const PxVec3& fPos) const
{
if (!mIsDense)
return fPos.maximum(PxVec3(0.0f)).minimum(PxVec3(mFDims));
return fPos.maximum(PxVec3(0.5f)).minimum(PxVec3(mFDims) + PxVec3(0.5f));
}
// clamp `sPos` to the grid on which `sdf` is defined
PX_INLINE PxVec3 clampToBox(const PxVec3& sPos) const
{
if (mIsDense)
return sPos.maximum(mSdfBoxLower+PxVec3(0.5f*mSdf.mSpacing)).minimum(mSdfBoxUpper+PxVec3(0.5f*mSdf.mSpacing));
return sPos.maximum(mSdfBoxLower).minimum(mSdfBoxUpper);
}
// Utility to convert from x/y/z indices to a linear index given the grid size (only width and height required)
PX_FORCE_INLINE PX_CUDA_CALLABLE static PxU32 idx3D(PxU32 x, PxU32 y, PxU32 z, PxU32 width, PxU32 height)
{
return z * width * height + y * width + x;
}
static PX_INLINE PxReal TriLerpWithGradient(
const PxReal f000,
const PxReal f100,
const PxReal f010,
const PxReal f110,
const PxReal f001,
const PxReal f101,
const PxReal f011,
const PxReal f111,
const PxReal tx,
const PxReal ty,
const PxReal tz,
PxVec3* grad = NULL)
{
if (grad)
{
const PxReal a = f100 - f000;
const PxReal b = f110 - f010;
const PxReal c = f101 - f001;
const PxReal d = f111 - f011;
grad->x = a + (b - (a)) * ty + (c + (d - (c)) * ty - (a + (b - (a)) * ty)) * tz;
grad->y = f010 + tx * (b) - (f000 + tx * (a)) + (f011 + tx * (d) - (f001 + tx * (c)) - (f010 + tx * (b) - (f000 + tx * (a)))) * tz;
grad->z = f001 + tx * (c) + ty * (f011 + tx * (d) - (f001 + tx * (c))) - (f000 + tx * (a) + ty * (f010 + tx * (b) - (f000 + tx * (a))));
}
return PxTriLerp(
f000,
f100,
f010,
f110,
f001,
f101,
f011,
f111,
tx,
ty,
tz);
}
template <int BytesPerSparsePixelT>
PX_INLINE PxReal interpolateSubgrid (const PxU8* subgridBase, PxU32 baseIdx, PxReal x, PxReal y, PxReal z, PxVec3* gradient = NULL) const
{
PX_COMPILE_TIME_ASSERT(
BytesPerSparsePixelT == 1 || BytesPerSparsePixelT == 2 || BytesPerSparsePixelT == 4);
return TriLerpWithGradient(
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+1 ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+mFStrideY ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+mFStrideY+1 ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+mFStrideZ ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+mFStrideZ+1 ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+mFStrideZ+mFStrideY ),
decodeSample<BytesPerSparsePixelT>(mSubgridScalingFactor, mSdf.mSubgridsMinSdfValue, subgridBase, baseIdx+mFStrideZ+mFStrideY+1),
x, y, z, gradient);
}
// interpolate the values of `array` at position `fPos`
// input vector `fPos` is in units of subgrid cells, with 0 corresponding to the subgrid origin
PX_INLINE PxReal evaluateSubgrid(const PxU32 subgridInfo, const PxVec3& fPos, PxVec3* gradient = NULL) const
{
const PxU32 sgSamples = mSdf.mSubgridSize + 1;
PX_ASSERT(fPos.x >= 0 && fPos.y >= 0 && fPos.z >= 0);
PX_ASSERT(fPos.x < sgSamples && fPos.y < sgSamples && fPos.z < sgSamples);
// find the subgrid offset in memory
PxU32 xSubgrid, ySubgrid, zSubgrid;
SDF::decodeTriple(subgridInfo, xSubgrid, ySubgrid, zSubgrid);
xSubgrid *= (mSdf.mSubgridSize + 1);
ySubgrid *= (mSdf.mSubgridSize + 1);
zSubgrid *= (mSdf.mSubgridSize + 1);
// reference subgrid point
const PxU32 x = PxMin(static_cast<PxU32>(fPos.x), sgSamples - 2),
y = PxMin(static_cast<PxU32>(fPos.y), sgSamples - 2),
z = PxMin(static_cast<PxU32>(fPos.z), sgSamples - 2);
// offset values by the subgrid memory offset
const PxU32 xM = xSubgrid + x, yM = ySubgrid + y, zM = zSubgrid + z;
const PxU32 base = mFStrideZ * zM + mFStrideY * yM + xM;
switch (mSdf.mBytesPerSparsePixel)
{
case 1:
return interpolateSubgrid<1>(mSdf.mSubgridSdf, base, fPos.x - x, fPos.y - y, fPos.z - z, gradient);
case 2:
return interpolateSubgrid<2>(mSdf.mSubgridSdf, base, fPos.x - x, fPos.y - y, fPos.z - z, gradient);
case 4:
return interpolateSubgrid<4>(mSdf.mSubgridSdf, base, fPos.x - x, fPos.y - y, fPos.z - z, gradient);
default: // never reached
PX_ASSERT(0);
return 0;
}
}
// interpolate the values of `array` at position `cPos`.
// `cPos` must be >= 0 and < `cDims`
PX_INLINE PxReal evaluateCoarse(const PxVec3& cPos, PxVec3* gradient = NULL) const
{
PX_ASSERT(cPos.x >= 0 && cPos.y >= 0 && cPos.z >= 0);
PX_ASSERT(cPos.x < mCSamples.x && cPos.y < mCSamples.y && cPos.z < mCSamples.z);
// reference grid point
const PxU32 x = PxMin(static_cast<PxU32>(cPos.x), mCSamples.x - 2),
y = PxMin(static_cast<PxU32>(cPos.y), mCSamples.y - 2),
z = PxMin(static_cast<PxU32>(cPos.z), mCSamples.z - 2);
const PxU32 w = mCSamples.x, h = mCSamples.y;
const PxU32 cStrideY = w, cStrideZ = w*h; // Note that this is sample, not cell, stride
const PxU32 base = cStrideZ * z + cStrideY * y + x;
return TriLerpWithGradient(
mSdf.mSdf[base],
mSdf.mSdf[base+1],
mSdf.mSdf[base+cStrideY],
mSdf.mSdf[base+cStrideY+1],
mSdf.mSdf[base+cStrideZ],
mSdf.mSdf[base+cStrideZ+1],
mSdf.mSdf[base+cStrideZ+cStrideY],
mSdf.mSdf[base+cStrideZ+cStrideY+1],
cPos.x - x, cPos.y - y, cPos.z - z, gradient);
}
// sample the SDF at `fPos`
// input vector `fPos` is in units of (sub-) grid cells, with integer values representing nodes
PX_INLINE PxReal sample(PxVec3 fPos, PxVec3* gradient = NULL) const
{
if (mIsDense)
fPos -= PxVec3(0.5);
PX_ASSERT(fPos.x >= 0 && fPos.y >= 0 && fPos.z >= 0);
PX_ASSERT(fPos.x <= mFDims.x && fPos.y <= mFDims.y && fPos.z <= mFDims.z);
if (mIsDense) // fPos = cPos
return evaluateCoarse(fPos, gradient);
// coarse reference gridpoint index
const Dim3 cBase(
PxMin(static_cast<PxU32>(fPos.x * mInvSubgridSize), mCSamples.x - 2),
PxMin(static_cast<PxU32>(fPos.y * mInvSubgridSize), mCSamples.y - 2),
PxMin(static_cast<PxU32>(fPos.z * mInvSubgridSize), mCSamples.z - 2)
);
const PxU32 i = idx3D(cBase.x, cBase.y, cBase.z, mCDims.x, mCDims.y);
const PxU32 subgridInfo = mSdf.mSubgridStartSlots[i];
if (subgridInfo == 0xFFFFFFFF) // Evaluate (coarse) background of sparse SDF
return evaluateCoarse((fPos * mInvSubgridSize).minimum(PxVec3(PxReal(mCSamples.x), PxReal(mCSamples.y), PxReal(mCSamples.z))), gradient);
// offset to subgrid origin
PxVec3 fPosInSubgrid;
fPosInSubgrid.x = PxMax(0.f, fPos.x - cBase.x * mSdf.mSubgridSize);
fPosInSubgrid.y = PxMax(0.f, fPos.y - cBase.y * mSdf.mSubgridSize);
fPosInSubgrid.z = PxMax(0.f, fPos.z - cBase.z * mSdf.mSubgridSize);
return evaluateSubgrid(subgridInfo, fPosInSubgrid, gradient);
}
// evaluate & interpolate `sdf` (in `sdf`'s "vertex" space) at `sPos`
// when outside the sdf grid, this should be considered an upper bound
// TODO(CA): add more clamps or prove they're unnecessary
inline PxReal dist(const PxVec3& sPos, PxVec3* gradient = NULL) const
{
// clamped to SDF support
const PxVec3 boxPos = clampToBox(sPos);
const PxVec3 diff = sPos - boxPos;
const PxReal diffMag = diff.magnitude();
const PxVec3 fPos = (boxPos - mSdfBoxLower) * mInvGridDx;
const PxReal distance = sample(clampToFine(fPos), gradient) + diffMag; // division inaccuracy necessitates clamp
if (gradient && diffMag > 0.0f)
*gradient = diff; //A quite coarse approximation but it's only used if the sample point is outside of the sdf's bounding box
return distance;
}
// evaluate & interpolate `sdf` at `sPos` (in `sdf`'s "vertex" space), and compute its gradient
inline PxVec3 grad(const PxVec3& sPos) const
{
// clamped to SDF support
const PxVec3 boxPos = clampToBox(sPos);
const PxVec3 fPos = (boxPos - mSdfBoxLower) * mInvGridDx;
PxVec3 gradient;
if ( fPos.x >= 1.0f && fPos.x <= mFDims.x - 2.0f &&
fPos.y >= 1.0f && fPos.y <= mFDims.y - 2.0f &&
fPos.z >= 1.0f && fPos.z <= mFDims.z - 2.0f)
{
gradient.x = sample(PxVec3(fPos.x+1, fPos.y, fPos.z)) - sample(PxVec3(fPos.x-1, fPos.y, fPos.z));
gradient.y = sample(PxVec3(fPos.x, fPos.y+1, fPos.z)) - sample(PxVec3(fPos.x, fPos.y -1, fPos.z));
gradient.z = sample(PxVec3(fPos.x, fPos.y, fPos.z+1)) - sample(PxVec3(fPos.x, fPos.y, fPos.z -1));
}
else
{
const PxReal h = mSdf.mSpacing;
gradient.x = dist(PxVec3(sPos.x+h, sPos.y, sPos.z)) - dist(PxVec3(sPos.x-h, sPos.y, sPos.z));
gradient.y = dist(PxVec3(sPos.x, sPos.y+h, sPos.z)) - dist(PxVec3(sPos.x, sPos.y-h, sPos.z));
gradient.z = dist(PxVec3(sPos.x, sPos.y, sPos.z+h)) - dist(PxVec3(sPos.x, sPos.y, sPos.z-h));
}
gradient *= 0.5f / mSdf.mSpacing;
return gradient;
}
// Estimate the value and gradient of `sdf` at `sPos`, using gradient information when `sPos` is
// outside the SDF grid. Return `PX_MAX_F32` when the distance exceeds `cutoffDistance`
PX_INLINE PxReal distUsingGradient(const PxVec3& sPos, PxVec3& gradient, const PxReal& cutoffDistance) const
{
// clamped to SDF support
const PxVec3 boxPos = clampToBox(sPos);
const PxVec3 diff = sPos - boxPos;
const PxVec3 fPos = (boxPos - mSdfBoxLower) * mInvGridDx;
const PxReal dist = sample(clampToFine(fPos));
if (dist > cutoffDistance)
return PX_MAX_F32;
gradient = grad(sPos);
gradient = (gradient.getNormalized() * PxAbs(dist) + diff).getNormalized();
return dist + gradient.dot(diff);
}
// data members
const SDF& mSdf;
PxVec3 mSdfBoxLower, mSdfBoxUpper; // Positions of the first and last grid points
Dim3 mCDims, mFDims; // background and high-resolution SDF dimensions in cells. Equal if dense.
// fDims is equally divisible by subgridSize, which is also in cells, for sparse SDFs
// the coarse grid has cDims+1 (cDims) samples per dimension for sparse (dense) SDFs
// subgrids have subgridSize+1 samples per dimension
Dim3 mCSamples; // Number of samples in each dimension. Equal to cDims for dense, and cDims + 1 for spares SDFs
PxReal mInvGridDx; // invSdfDx
PxReal mInvSubgridSize; // fineToCoarse
bool mIsDense;
PxReal mSubgridScalingFactor; // purely for optimization
PxU32 mFStrideY, mFStrideZ;
};
}
}
#endif

View File

@@ -0,0 +1,695 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuContactMethodImpl.h"
#include "CmMatrix34.h"
#include "foundation/PxUtilities.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
#define MAX_NB_CTCS 8 + 12*5 + 6*4
#define ABS_GREATER(x, y) (PxAbs(x) > (y))
#define ABS_SMALLER_EQUAL(x, y) (PxAbs(x) <= (y))
//#define AIR(x) ((PxU32&)(x)&SIGN_BITMASK)
//#define ABS_GREATER(x, y) (AIR(x) > IR(y))
//#define ABS_SMALLER_EQUAL(x, y) (AIR(x) <= IR(y))
#if PX_X86 && !PX_OSX
// Some float optimizations ported over from novodex.
//returns non zero if the value is negative.
#define PXC_IS_NEGATIVE(x) (((PxU32&)(x)) & 0x80000000)
#else
//On most platforms using the integer rep is worse(produces LHSs) since the CPU has more registers.
//returns non zero if the value is negative.
#define PXC_IS_NEGATIVE(x) ((x) < 0.0f)
#endif
enum
{
AXIS_A0, AXIS_A1, AXIS_A2,
AXIS_B0, AXIS_B1, AXIS_B2
};
struct VertexInfo
{
PxVec3 pos;
bool penetrate;
bool area;
};
/*static PxI32 doBoxBoxContactGeneration(PxVec3 ctcPts[MAX_NB_CTCS], PxReal depths[MAX_NB_CTCS], PxVec3* ctcNrm,
const PxVec3& extents0, const PxVec3& extents1,
PxU32& collisionData,
const PxMat34& transform0, const PxMat34& transform1, PxReal contactDistance);*/
static PxI32 doBoxBoxContactGeneration(PxContactBuffer& contactBuffer,
const PxVec3& extents0, const PxVec3& extents1,
PxU32& collisionData,
const PxMat34& transform0, const PxMat34& transform1, PxReal contactDistance);
bool Gu::contactBoxBox(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(renderOutput);
// Get actual shape data
const PxBoxGeometry& shapeBox0 = checkedCast<PxBoxGeometry>(shape0);
const PxBoxGeometry& shapeBox1 = checkedCast<PxBoxGeometry>(shape1);
PxU32 pd = PxU32(cache.mPairData);
PxI32 Nb = doBoxBoxContactGeneration(contactBuffer,
shapeBox0.halfExtents, shapeBox1.halfExtents,
pd,
Matrix34FromTransform(transform0), Matrix34FromTransform(transform1),
params.mContactDistance);
cache.mPairData = PxTo8(pd);
if(!Nb)
{
cache.mPairData = 0; // Mark as separated for temporal coherence
return false; // WARNING: the contact stream code below used to output stuff even for 0 contacts (!). Now we just return here.
}
return true;
}
// face => 4 vertices of a face of the cube (i.e. a quad)
static PX_FORCE_INLINE PxReal IsInYZ(const PxReal y, const PxReal z, const VertexInfo** PX_RESTRICT face)
{
// Warning, indices have been remapped. We're now actually like this:
//
// 3+------+2
// | | |
// | *--|
// | (y,z)|
// 0+------+1
PxReal PreviousY = face[3]->pos.y;
PxReal PreviousZ = face[3]->pos.z;
// Loop through quad vertices
for(PxI32 i=0; i<4; i++)
{
const PxReal CurrentY = face[i]->pos.y;
const PxReal CurrentZ = face[i]->pos.z;
// |CurrentY - PreviousY y - PreviousY|
// |CurrentZ - PreviousZ z - PreviousZ|
// => similar to backface culling, check each one of the 4 triangles are consistent, in which case
// the point is within the parallelogram.
if((CurrentY - PreviousY)*(z - PreviousZ) - (CurrentZ - PreviousZ)*(y - PreviousY) >= 0.0f) return -1.0f;
PreviousY = CurrentY;
PreviousZ = CurrentZ;
}
PxReal x = face[0]->pos.x;
{
const PxReal ay = y - face[0]->pos.y;
const PxReal az = z - face[0]->pos.z;
PxVec3 b = face[1]->pos - face[0]->pos; // ### could be precomputed ?
x += b.x * (ay*b.y + az*b.z) / b.magnitudeSquared(); // ### could be precomputed ?
b = face[3]->pos - face[0]->pos; // ### could be precomputed ?
x += b.x * (ay*b.y + az*b.z) / b.magnitudeSquared(); // ### could be precomputed ?
}
return x;
}
// Test with respect to the quad defined by (0,-y1,-z1) and (0,y1,z1)
// +------+ y1 y
// | | |
// | * | |
// | | |
// +------+ -y1 *-----z
static PxI32 generateContacts(//PxVec3 ctcPts[], PxReal depths[],
PxContactBuffer& contactBuffer, const PxVec3& contactNormal,
PxReal y1, PxReal z1, const PxVec3& box2,
const PxMat34& transform0, const PxMat34& transform1, PxReal contactDistance)
{
// PxI32 NbContacts=0;
contactBuffer.reset();
y1 += contactDistance;
z1 += contactDistance;
const PxMat34 trans1to0 = transform0.getInverseRT() * transform1;
VertexInfo vtx[8]; // The 8 cube vertices
// PxI32 i;
// 6+------+7
// /| /|
// / | / |
// / 4+---/--+5
// 2+------+3 / y z
// | / | / | /
// |/ |/ |/
// 0+------+1 *---x
{
const PxVec3 ex = trans1to0.m.column0 * box2.x;
const PxVec3 ey = trans1to0.m.column1 * box2.y;
const PxVec3 ez = trans1to0.m.column2 * box2.z;
/*
vtx[0].pos = mat.pos - ex - ey - ez;
vtx[1].pos = mat.pos + ex - ey - ez;
vtx[2].pos = mat.pos - ex + ey - ez;
vtx[3].pos = mat.pos + ex + ey - ez;
vtx[4].pos = mat.pos - ex - ey + ez;
vtx[5].pos = mat.pos + ex - ey + ez;
vtx[6].pos = mat.pos - ex + ey + ez;
vtx[7].pos = mat.pos + ex + ey + ez;
*/
// 12 vector ops = 12*3 = 36 FPU ops
vtx[0].pos = vtx[2].pos = vtx[4].pos = vtx[6].pos = trans1to0.p - ex;
vtx[1].pos = vtx[3].pos = vtx[5].pos = vtx[7].pos = trans1to0.p + ex;
PxVec3 e = ey+ez;
vtx[0].pos -= e;
vtx[1].pos -= e;
vtx[6].pos += e;
vtx[7].pos += e;
e = ey-ez;
vtx[2].pos += e;
vtx[3].pos += e;
vtx[4].pos -= e;
vtx[5].pos -= e;
}
// Create vertex info for 8 vertices
for(PxU32 i=0; i<8; i++)
{
// Vertex suivant
VertexInfo& p = vtx[i];
// test the point with respect to the x = 0 plane
// if(p.pos.x < 0)
if(p.pos.x < -contactDistance) //if(PXC_IS_NEGATIVE(p.pos.x))
{
p.area = false;
p.penetrate = false;
continue;
}
{
// we penetrated the quad plane
p.penetrate = true;
// test to see if we are in the quad
// PxAbs => thus we test Y with respect to -Y1 and +Y1 (same for Z)
// if(PxAbs(p->pos.y) <= y1 && PxAbs(p->pos.z) <= z1)
if(ABS_SMALLER_EQUAL(p.pos.y, y1) && ABS_SMALLER_EQUAL(p.pos.z, z1))
{
// the point is inside the quad
p.area=true;
// Since we are testing with respect to x = 0, the penetration is directly the x coordinate.
// depths[NbContacts] = p.pos.x;
// We take the vertex as the impact point
// ctcPts[NbContacts++] = p.pos;
contactBuffer.contact(p.pos, contactNormal, -p.pos.x);
}
else
{
p.area=false;
}
}
}
// Teste 12 edges on the quad
static const PxI32 indices[]={ 0,1, 1,3, 3,2, 2,0, 4,5, 5,7, 7,6, 6,4, 0,4, 1,5, 2,6, 3,7, };
const PxI32* runningLine = indices;
const PxI32* endLine = runningLine+24;
while(runningLine!=endLine)
{
// The two vertices of the current edge
const VertexInfo* p1 = &vtx[*runningLine++];
const VertexInfo* p2 = &vtx[*runningLine++];
// Penetrate|Area|Penetrate|Area => 16 cases
// We only take the edges that at least penetrated the quad's plane into account.
if(p1->penetrate || p2->penetrate)
// if(p1->penetrate + p2->penetrate) // One branch only
{
// If at least one of the two vertices is not in the quad...
if(!p1->area || !p2->area)
// if(!p1->area + !p2->area) // One branch only
{
// Test y
if(p1->pos.y > p2->pos.y) { const VertexInfo* tmp=p1; p1=p2; p2=tmp; }
// Impact on the +Y1 edge of the quad
if(p1->pos.y < +y1 && p2->pos.y >= +y1)
// => a point under Y1, the other above
{
// Case 1
PxReal a = (+y1 - p1->pos.y)/(p2->pos.y - p1->pos.y);
PxReal z = p1->pos.z + (p2->pos.z - p1->pos.z)*a;
if(PxAbs(z) <= z1)
{
PxReal x = p1->pos.x + (p2->pos.x - p1->pos.x)*a;
if(x+contactDistance>=0.0f)
{
// depths[NbContacts] = x;
// ctcPts[NbContacts++] = PxVec3(x, y1, z);
contactBuffer.contact(PxVec3(x, y1, z), contactNormal, -x);
}
}
}
// Impact on the edge -Y1 of the quad
if(p1->pos.y < -y1 && p2->pos.y >= -y1)
{
// Case 2
PxReal a = (-y1 - p1->pos.y)/(p2->pos.y - p1->pos.y);
PxReal z = p1->pos.z + (p2->pos.z - p1->pos.z)*a;
if(PxAbs(z) <= z1)
{
PxReal x = p1->pos.x + (p2->pos.x - p1->pos.x)*a;
if(x+contactDistance>=0.0f)
{
// depths[NbContacts] = x;
// ctcPts[NbContacts++] = PxVec3(x, -y1, z);
contactBuffer.contact(PxVec3(x, -y1, z), contactNormal, -x);
}
}
}
// Test z
if(p1->pos.z > p2->pos.z) { const VertexInfo* tmp=p1; p1=p2; p2=tmp; }
// Impact on the edge +Z1 of the quad
if(p1->pos.z < +z1 && p2->pos.z >= +z1)
{
// Case 3
PxReal a = (+z1 - p1->pos.z)/(p2->pos.z - p1->pos.z);
PxReal y = p1->pos.y + (p2->pos.y - p1->pos.y)*a;
if(PxAbs(y) <= y1)
{
PxReal x = p1->pos.x + (p2->pos.x - p1->pos.x)*a;
if(x+contactDistance>=0.0f)
{
// depths[NbContacts] = x;
// ctcPts[NbContacts++] = PxVec3(x, y, z1);
contactBuffer.contact(PxVec3(x, y, z1), contactNormal, -x);
}
}
}
// Impact on the edge -Z1 of the quad
if(p1->pos.z < -z1 && p2->pos.z >= -z1)
{
// Case 4
PxReal a = (-z1 - p1->pos.z)/(p2->pos.z - p1->pos.z);
PxReal y = p1->pos.y + (p2->pos.y - p1->pos.y)*a;
if(PxAbs(y) <= y1)
{
PxReal x = p1->pos.x + (p2->pos.x - p1->pos.x)*a;
if(x+contactDistance>=0.0f)
{
// depths[NbContacts] = x;
// ctcPts[NbContacts++] = PxVec3(x, y, -z1);
contactBuffer.contact(PxVec3(x, y, -z1), contactNormal, -x);
}
}
}
}
// The case where one point penetrates the plane, and the other is not in the quad.
if((!p1->penetrate && !p2->area) || (!p2->penetrate && !p1->area))
{
// Case 5
PxReal a = (-p1->pos.x)/(p2->pos.x - p1->pos.x);
PxReal y = p1->pos.y + (p2->pos.y - p1->pos.y)*a;
if(PxAbs(y) <= y1)
{
PxReal z = p1->pos.z + (p2->pos.z - p1->pos.z)*a;
if(PxAbs(z) <= z1)
{
// depths[NbContacts] = 0;
// ctcPts[NbContacts++] = PxVec3(0, y, z);
contactBuffer.contact(PxVec3(0, y, z), contactNormal, 0);
}
}
}
}
}
{
// 6 quads => 6 faces of the cube
static const PxI32 face[][4]={ {0,1,3,2}, {1,5,7,3}, {5,4,6,7}, {4,0,2,6}, {2,3,7,6}, {0,4,5,1} };
PxI32 addflg=0;
for(PxU32 i=0; i<6 && addflg!=0x0f; i++)
{
const PxI32* p = face[i];
const VertexInfo* q[4];
if((q[0]=&vtx[p[0]])->penetrate && (q[1]=&vtx[p[1]])->penetrate && (q[2]=&vtx[p[2]])->penetrate && (q[3]=&vtx[p[3]])->penetrate)
{
if(!q[0]->area || !q[1]->area || !q[2]->area || !q[3]->area)
{
if(!(addflg&1)) { PxReal x = IsInYZ(-y1, -z1, q); if(x>=0.0f) { addflg|=1; contactBuffer.contact(PxVec3(x, -y1, -z1), contactNormal, -x); /*depths[NbContacts]=x; ctcPts[NbContacts++] = PxVec3(x, -y1, -z1);*/ } }
if(!(addflg&2)) { PxReal x = IsInYZ(+y1, -z1, q); if(x>=0.0f) { addflg|=2; contactBuffer.contact(PxVec3(x, +y1, -z1), contactNormal, -x); /*depths[NbContacts]=x; ctcPts[NbContacts++] = PxVec3(x, +y1, -z1);*/ } }
if(!(addflg&4)) { PxReal x = IsInYZ(-y1, +z1, q); if(x>=0.0f) { addflg|=4; contactBuffer.contact(PxVec3(x, -y1, +z1), contactNormal, -x); /*depths[NbContacts]=x; ctcPts[NbContacts++] = PxVec3(x, -y1, +z1);*/ } }
if(!(addflg&8)) { PxReal x = IsInYZ(+y1, +z1, q); if(x>=0.0f) { addflg|=8; contactBuffer.contact(PxVec3(x, +y1, +z1), contactNormal, -x); /*depths[NbContacts]=x; ctcPts[NbContacts++] = PxVec3(x, +y1, +z1);*/ } }
}
}
}
}
// for(i=0; i<NbContacts; i++)
for(PxU32 i=0; i<contactBuffer.count; i++)
// ctcPts[i] = transform0.transform(ctcPts[i]); // local to world
contactBuffer.contacts[i].point = transform0.transform(contactBuffer.contacts[i].point); // local to world
//PX_ASSERT(NbContacts); //if this did not make contacts then something went wrong in theory, but even the old code without distances had this flaw!
// return NbContacts;
return PxI32(contactBuffer.count);
}
//static PxI32 doBoxBoxContactGeneration(PxVec3 ctcPts[MAX_NB_CTCS], PxReal depths[MAX_NB_CTCS], PxVec3* ctcNrm,
static PxI32 doBoxBoxContactGeneration(PxContactBuffer& contactBuffer,
const PxVec3& extents0, const PxVec3& extents1,
PxU32& collisionData,
const PxMat34& transform0, const PxMat34& transform1, PxReal contactDistance)
{
PxReal aafC[3][3]; // matrix C = A^T B, c_{ij} = Dot(A_i,B_j)
PxReal aafAbsC[3][3]; // |c_{ij}|
PxReal afAD[3]; // Dot(A_i,D)
PxReal d1[6];
PxReal overlap[6];
PxVec3 kD = transform1.p - transform0.p;
const PxVec3& axis00 = transform0.m.column0;
const PxVec3& axis01 = transform0.m.column1;
const PxVec3& axis02 = transform0.m.column2;
const PxVec3& axis10 = transform1.m.column0;
const PxVec3& axis11 = transform1.m.column1;
const PxVec3& axis12 = transform1.m.column2;
// Perform Class I tests
aafC[0][0] = axis00.dot(axis10);
aafC[0][1] = axis00.dot(axis11);
aafC[0][2] = axis00.dot(axis12);
afAD[0] = axis00.dot(kD);
aafAbsC[0][0] = 1e-6f + PxAbs(aafC[0][0]);
aafAbsC[0][1] = 1e-6f + PxAbs(aafC[0][1]);
aafAbsC[0][2] = 1e-6f + PxAbs(aafC[0][2]);
d1[AXIS_A0] = afAD[0];
PxReal d0 = extents0.x + extents1.x*aafAbsC[0][0] + extents1.y*aafAbsC[0][1] + extents1.z*aafAbsC[0][2];
overlap[AXIS_A0] = d0 - PxAbs(d1[AXIS_A0]) + contactDistance;
if(PXC_IS_NEGATIVE(overlap[AXIS_A0])) return 0;
aafC[1][0] = axis01.dot(axis10);
aafC[1][1] = axis01.dot(axis11);
aafC[1][2] = axis01.dot(axis12);
afAD[1] = axis01.dot(kD);
aafAbsC[1][0] = 1e-6f + PxAbs(aafC[1][0]);
aafAbsC[1][1] = 1e-6f + PxAbs(aafC[1][1]);
aafAbsC[1][2] = 1e-6f + PxAbs(aafC[1][2]);
d1[AXIS_A1] = afAD[1];
d0 = extents0.y + extents1.x*aafAbsC[1][0] + extents1.y*aafAbsC[1][1] + extents1.z*aafAbsC[1][2];
overlap[AXIS_A1] = d0 - PxAbs(d1[AXIS_A1]) + contactDistance;
if(PXC_IS_NEGATIVE(overlap[AXIS_A1])) return 0;
aafC[2][0] = axis02.dot(axis10);
aafC[2][1] = axis02.dot(axis11);
aafC[2][2] = axis02.dot(axis12);
afAD[2] = axis02.dot(kD);
aafAbsC[2][0] = 1e-6f + PxAbs(aafC[2][0]);
aafAbsC[2][1] = 1e-6f + PxAbs(aafC[2][1]);
aafAbsC[2][2] = 1e-6f + PxAbs(aafC[2][2]);
d1[AXIS_A2] = afAD[2];
d0 = extents0.z + extents1.x*aafAbsC[2][0] + extents1.y*aafAbsC[2][1] + extents1.z*aafAbsC[2][2];
overlap[AXIS_A2] = d0 - PxAbs(d1[AXIS_A2]) + contactDistance;
if(PXC_IS_NEGATIVE(overlap[AXIS_A2])) return 0;
// Perform Class II tests
d1[AXIS_B0] = axis10.dot(kD);
d0 = extents1.x + extents0.x*aafAbsC[0][0] + extents0.y*aafAbsC[1][0] + extents0.z*aafAbsC[2][0];
overlap[AXIS_B0] = d0 - PxAbs(d1[AXIS_B0]) + contactDistance;
if(PXC_IS_NEGATIVE(overlap[AXIS_B0])) return 0;
d1[AXIS_B1] = axis11.dot(kD);
d0 = extents1.y + extents0.x*aafAbsC[0][1] + extents0.y*aafAbsC[1][1] + extents0.z*aafAbsC[2][1];
overlap[AXIS_B1] = d0 - PxAbs(d1[AXIS_B1]) + contactDistance;
if(PXC_IS_NEGATIVE(overlap[AXIS_B1])) return 0;
d1[AXIS_B2] = axis12.dot(kD);
d0 = extents1.z + extents0.x*aafAbsC[0][2] + extents0.y*aafAbsC[1][2] + extents0.z*aafAbsC[2][2];
overlap[AXIS_B2] = d0 - PxAbs(d1[AXIS_B2]) + contactDistance;
if(PXC_IS_NEGATIVE(overlap[AXIS_B2])) return 0;
// Perform Class III tests - we don't need to store distances for those ones.
// We only test those axes when objects are likely to be separated, i.e. when they where previously non-colliding. For stacks, we'll have
// to do full contact generation anyway, and those tests are useless - so we skip them. This is similar to what I did in Opcode.
if(!collisionData) // separated or first run
{
PxReal d = afAD[2]*aafC[1][0] - afAD[1]*aafC[2][0];
d0 = contactDistance + extents0.y*aafAbsC[2][0] + extents0.z*aafAbsC[1][0] + extents1.y*aafAbsC[0][2] + extents1.z*aafAbsC[0][1];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[2]*aafC[1][1] - afAD[1]*aafC[2][1];
d0 = contactDistance + extents0.y*aafAbsC[2][1] + extents0.z*aafAbsC[1][1] + extents1.x*aafAbsC[0][2] + extents1.z*aafAbsC[0][0];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[2]*aafC[1][2] - afAD[1]*aafC[2][2];
d0 = contactDistance + extents0.y*aafAbsC[2][2] + extents0.z*aafAbsC[1][2] + extents1.x*aafAbsC[0][1] + extents1.y*aafAbsC[0][0];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[0]*aafC[2][0] - afAD[2]*aafC[0][0];
d0 = contactDistance + extents0.x*aafAbsC[2][0] + extents0.z*aafAbsC[0][0] + extents1.y*aafAbsC[1][2] + extents1.z*aafAbsC[1][1];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[0]*aafC[2][1] - afAD[2]*aafC[0][1];
d0 = contactDistance + extents0.x*aafAbsC[2][1] + extents0.z*aafAbsC[0][1] + extents1.x*aafAbsC[1][2] + extents1.z*aafAbsC[1][0];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[0]*aafC[2][2] - afAD[2]*aafC[0][2];
d0 = contactDistance + extents0.x*aafAbsC[2][2] + extents0.z*aafAbsC[0][2] + extents1.x*aafAbsC[1][1] + extents1.y*aafAbsC[1][0];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[1]*aafC[0][0] - afAD[0]*aafC[1][0];
d0 = contactDistance + extents0.x*aafAbsC[1][0] + extents0.y*aafAbsC[0][0] + extents1.y*aafAbsC[2][2] + extents1.z*aafAbsC[2][1];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[1]*aafC[0][1] - afAD[0]*aafC[1][1];
d0 = contactDistance + extents0.x*aafAbsC[1][1] + extents0.y*aafAbsC[0][1] + extents1.x*aafAbsC[2][2] + extents1.z*aafAbsC[2][0];
if(ABS_GREATER(d, d0)) return 0;
d = afAD[1]*aafC[0][2] - afAD[0]*aafC[1][2];
d0 = contactDistance + extents0.x*aafAbsC[1][2] + extents0.y*aafAbsC[0][2] + extents1.x*aafAbsC[2][1] + extents1.y*aafAbsC[2][0];
if(ABS_GREATER(d, d0)) return 0;
}
/* djs - tempUserData can be zero when it gets here
- maybe if there was no previous axis?
- which causes stack corruption, and thence a crash, in .NET
PT: right! At first tempUserData wasn't ever supposed to be zero, but then I used that
value to mark separation of boxes, and forgot to update the code below. Now I think
the test is redundant with the one performed above, and the line could eventually
be merged in the previous block. I'll do that later when removing all the #defines.
*/
// NB: the "16" here has nothing to do with MAX_NB_CTCS. Don't touch.
if(collisionData) // if initialized & not previously separated
overlap[collisionData-1] *= 0.999f; // Favorise previous axis .999 is too little.
PxReal minimum = PX_MAX_REAL;
PxI32 minIndex = 0;
for(PxU32 i=AXIS_A0; i<6; i++)
{
PxReal d = overlap[i];
if(d>=0.0f && d<minimum) { minimum=d; minIndex=PxI32(i); } // >=0 !! otherwise bug at sep = 0
}
collisionData = PxU32(minIndex + 1); // Leave "0" for separation
#if PX_X86
const PxU32 sign = PXC_IS_NEGATIVE(d1[minIndex]);
#else
const PxU32 sign = PxU32(PXC_IS_NEGATIVE(d1[minIndex]));
#endif
PxMat34 trs;
PxVec3 ctcNrm;
switch(minIndex)
{
default:
return 0;
case AXIS_A0:
// *ctcNrm = axis00;
if(sign)
{
ctcNrm = axis00;
trs.m = transform0.m;
trs.p = transform0.p - extents0.x*axis00;
}
else
{
// *ctcNrm = -*ctcNrm;
ctcNrm = -axis00;
trs.m.column0 = -axis00;
trs.m.column1 = -axis01;
trs.m.column2 = axis02;
trs.p = transform0.p + extents0.x*axis00;
}
// return generateContacts(ctcPts, depths, extents0.y, extents0.z, extents1, trs, transform1, contactDistance);
return generateContacts(contactBuffer, ctcNrm, extents0.y, extents0.z, extents1, trs, transform1, contactDistance);
case AXIS_A1:
// *ctcNrm = axis01;
trs.m.column2 = axis00; // Factored out
if(sign)
{
ctcNrm = axis01;
trs.m.column0 = axis01;
trs.m.column1 = axis02;
trs.p = transform0.p - extents0.y*axis01;
}
else
{
// *ctcNrm = -*ctcNrm;
ctcNrm = -axis01;
trs.m.column0 = -axis01;
trs.m.column1 = -axis02;
trs.p = transform0.p + extents0.y*axis01;
}
// return generateContacts(ctcPts, depths, extents0.z, extents0.x, extents1, trs, transform1, contactDistance);
return generateContacts(contactBuffer, ctcNrm, extents0.z, extents0.x, extents1, trs, transform1, contactDistance);
case AXIS_A2:
// *ctcNrm = axis02;
trs.m.column2 = axis01; // Factored out
if(sign)
{
ctcNrm = axis02;
trs.m.column0 = axis02;
trs.m.column1 = axis00;
trs.p = transform0.p - extents0.z*axis02;
}
else
{
// *ctcNrm = -*ctcNrm;
ctcNrm = -axis02;
trs.m.column0 = -axis02;
trs.m.column1 = -axis00;
trs.p = transform0.p + extents0.z*axis02;
}
// return generateContacts(ctcPts, depths, extents0.x, extents0.y, extents1, trs, transform1, contactDistance);
return generateContacts(contactBuffer, ctcNrm, extents0.x, extents0.y, extents1, trs, transform1, contactDistance);
case AXIS_B0:
// *ctcNrm = axis10;
if(sign)
{
ctcNrm = axis10;
trs.m.column0 = -axis10;
trs.m.column1 = -axis11;
trs.m.column2 = axis12;
trs.p = transform1.p + extents1.x*axis10;
}
else
{
// *ctcNrm = -*ctcNrm;
ctcNrm = -axis10;
trs.m = transform1.m;
trs.p = transform1.p - extents1.x*axis10;
}
// return generateContacts(ctcPts, depths, extents1.y, extents1.z, extents0, trs, transform0, contactDistance);
return generateContacts(contactBuffer, ctcNrm, extents1.y, extents1.z, extents0, trs, transform0, contactDistance);
case AXIS_B1:
// *ctcNrm = axis11;
trs.m.column2 = axis10; // Factored out
if(sign)
{
ctcNrm = axis11;
trs.m.column0 = -axis11;
trs.m.column1 = -axis12;
trs.p = transform1.p + extents1.y*axis11;
}
else
{
// *ctcNrm = -*ctcNrm;
ctcNrm = -axis11;
trs.m.column0 = axis11;
trs.m.column1 = axis12;
trs.m.column2 = axis10;
trs.p = transform1.p - extents1.y*axis11;
}
// return generateContacts(ctcPts, depths, extents1.z, extents1.x, extents0, trs, transform0, contactDistance);
return generateContacts(contactBuffer, ctcNrm, extents1.z, extents1.x, extents0, trs, transform0, contactDistance);
case AXIS_B2:
// *ctcNrm = axis12;
trs.m.column2 = axis11; // Factored out
if(sign)
{
ctcNrm = axis12;
trs.m.column0 = -axis12;
trs.m.column1 = -axis10;
trs.p = transform1.p + extents1.z*axis12;
}
else
{
// *ctcNrm = -*ctcNrm;
ctcNrm = -axis12;
trs.m.column0 = axis12;
trs.m.column1 = axis10;
trs.p = transform1.p - extents1.z*axis12;
}
// return generateContacts(ctcPts, depths, extents1.x, extents1.y, extents0, trs, transform0, contactDistance);
return generateContacts(contactBuffer, ctcNrm, extents1.x, extents1.y, extents0, trs, transform0, contactDistance);
}
}

View File

@@ -0,0 +1,442 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuIntersectionRayBox.h"
#include "GuDistanceSegmentBox.h"
#include "GuInternal.h"
#include "GuContactMethodImpl.h"
#include "GuBoxConversion.h"
#include "foundation/PxUtilities.h"
using namespace physx;
using namespace Gu;
/*namespace Gu
{
const PxU8* getBoxEdges();
}*/
/////////
/*#include "common/PxRenderOutput.h"
#include "PxsContext.h"
static void gVisualizeBox(const Box& box, PxcNpThreadContext& context, PxU32 color=0xffffff)
{
PxMat33 rot(box.base.column0, box.base.column1, box.base.column2);
PxMat44 m(rot, box.origin);
DebugBox db(box.extent);
PxRenderOutput& out = context.mRenderOutput;
out << color << m;
out << db;
}
static void gVisualizeLine(const PxVec3& a, const PxVec3& b, PxcNpThreadContext& context, PxU32 color=0xffffff)
{
PxMat44 m = PxMat44::identity();
RenderOutput& out = context.mRenderOutput;
out << color << m << RenderOutput::LINES << a << b;
}*/
/////////
static const PxReal fatBoxEdgeCoeff = 0.01f;
static bool intersectEdgeEdgePreca(const PxVec3& p1, const PxVec3& p2, const PxVec3& v1, const PxPlane& plane, PxU32 i, PxU32 j, float coeff, const PxVec3& dir, const PxVec3& p3, const PxVec3& p4, PxReal& dist, PxVec3& ip)
{
// if colliding edge (p3,p4) does not cross plane return no collision
// same as if p3 and p4 on same side of plane return 0
//
// Derivation:
// d3 = d(p3, P) = (p3 | plane.n) - plane.d; Reversed sign compared to Plane::Distance() because plane.d is negated.
// d4 = d(p4, P) = (p4 | plane.n) - plane.d; Reversed sign compared to Plane::Distance() because plane.d is negated.
// if d3 and d4 have the same sign, they're on the same side of the plane => no collision
// We test both sides at the same time by only testing Sign(d3 * d4).
// ### put that in the Plane class
// ### also check that code in the triangle class that might be similar
const PxReal d3 = plane.distance(p3);
PxReal temp = d3 * plane.distance(p4);
if(temp>0.0f) return false;
// if colliding edge (p3,p4) and plane are parallel return no collision
PxVec3 v2 = p4 - p3;
temp = plane.n.dot(v2);
if(temp==0.0f) return false; // ### epsilon would be better
// compute intersection point of plane and colliding edge (p3,p4)
ip = p3-v2*(d3/temp);
// compute distance of intersection from line (ip, -dir) to line (p1,p2)
dist = (v1[i]*(ip[j]-p1[j])-v1[j]*(ip[i]-p1[i]))*coeff;
if(dist<0.0f) return false;
// compute intersection point on edge (p1,p2) line
ip -= dist*dir;
// check if intersection point (ip) is between edge (p1,p2) vertices
temp = (p1.x-ip.x)*(p2.x-ip.x)+(p1.y-ip.y)*(p2.y-ip.y)+(p1.z-ip.z)*(p2.z-ip.z);
if(temp<0.0f) return true; // collision found
return false; // no collision
}
static bool GuTestAxis(const PxVec3& axis, const Segment& segment, PxReal radius, const Box& box, PxReal& depth)
{
// Project capsule
PxReal min0 = segment.p0.dot(axis);
PxReal max0 = segment.p1.dot(axis);
if(min0>max0) PxSwap(min0, max0);
min0 -= radius;
max0 += radius;
// Project box
PxReal Min1, Max1;
{
const PxReal BoxCen = box.center.dot(axis);
const PxReal BoxExt =
PxAbs(box.rot.column0.dot(axis)) * box.extents.x
+ PxAbs(box.rot.column1.dot(axis)) * box.extents.y
+ PxAbs(box.rot.column2.dot(axis)) * box.extents.z;
Min1 = BoxCen - BoxExt;
Max1 = BoxCen + BoxExt;
}
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
static bool GuCapsuleOBBOverlap3(const Segment& segment, PxReal radius, const Box& box, PxReal* t=NULL, PxVec3* pp=NULL)
{
PxVec3 Sep(PxReal(0));
PxReal PenDepth = PX_MAX_REAL;
// Test normals
for(PxU32 i=0;i<3;i++)
{
PxReal d;
if(!GuTestAxis(box.rot[i], segment, radius, box, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = box.rot[i];
}
}
// Test edges
PxVec3 CapsuleAxis(segment.p1 - segment.p0);
CapsuleAxis = CapsuleAxis.getNormalized();
for(PxU32 i=0;i<3;i++)
{
PxVec3 Cross = CapsuleAxis.cross(box.rot[i]);
if(!isAlmostZero(Cross))
{
Cross = Cross.getNormalized();
PxReal d;
if(!GuTestAxis(Cross, segment, radius, box, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = Cross;
}
}
}
const PxVec3 Witness = segment.computeCenter() - box.center;
if(Sep.dot(Witness) < 0.0f)
Sep = -Sep;
if(t)
*t = PenDepth;
if(pp)
*pp = Sep;
return true;
}
static void GuGenerateVFContacts( PxContactBuffer& contactBuffer,
//
const Segment& segment,
PxReal radius,
//
const Box& worldBox,
//
const PxVec3& normal,
PxReal contactDistance)
{
const PxVec3 Max = worldBox.extents;
const PxVec3 Min = -worldBox.extents;
const PxVec3 tmp2 = - worldBox.rot.transformTranspose(normal);
const PxVec3* PX_RESTRICT Ptr = &segment.p0;
for(PxU32 i=0;i<2;i++)
{
const PxVec3& Pos = Ptr[i];
const PxVec3 tmp = worldBox.rot.transformTranspose(Pos - worldBox.center);
PxReal tnear, tfar;
int Res = intersectRayAABB(Min, Max, tmp, tmp2, tnear, tfar);
if(Res!=-1 && tnear < radius + contactDistance)
{
contactBuffer.contact(Pos - tnear * normal, normal, tnear - radius);
}
}
}
// PT: this looks similar to PxcGenerateEEContacts2 but it is mandatory to properly handle thin capsules.
static void GuGenerateEEContacts( PxContactBuffer& contactBuffer,
//
const Segment& segment,
const PxReal radius,
//
const Box& worldBox,
//
const PxVec3& normal)
{
const PxU8* PX_RESTRICT Indices = getBoxEdges();
PxVec3 Pts[8];
worldBox.computeBoxPoints(Pts);
PxVec3 s0 = segment.p0;
PxVec3 s1 = segment.p1;
makeFatEdge(s0, s1, fatBoxEdgeCoeff);
// PT: precomputed part of edge-edge intersection test
// const PxVec3 v1 = segment.p1 - segment.p0;
const PxVec3 v1 = s1 - s0;
PxPlane plane;
plane.n = v1.cross(normal);
// plane.d = -(plane.normal|segment.p0);
plane.d = -(plane.n.dot(s0));
PxU32 ii,jj;
closestAxis(plane.n, ii, jj);
const float coeff = 1.0f /(v1[ii]*normal[jj]-v1[jj]*normal[ii]);
for(PxU32 i=0;i<12;i++)
{
// PxVec3 p1 = Pts[*Indices++];
// PxVec3 p2 = Pts[*Indices++];
// makeFatEdge(p1, p2, fatBoxEdgeCoeff); // PT: TODO: make fat segment instead
const PxVec3& p1 = Pts[*Indices++];
const PxVec3& p2 = Pts[*Indices++];
// PT: keep original code in case something goes wrong
// PxReal dist;
// PxVec3 ip;
// if(intersectEdgeEdge(p1, p2, -normal, segment.p0, segment.p1, dist, ip))
// contactBuffer.contact(ip, normal, - (radius + dist));
PxReal dist;
PxVec3 ip;
if(intersectEdgeEdgePreca(s0, s1, v1, plane, ii, jj, coeff, normal, p1, p2, dist, ip))
// if(intersectEdgeEdgePreca(segment.p0, segment.p1, v1, plane, ii, jj, coeff, normal, p1, p2, dist, ip))
{
contactBuffer.contact(ip-normal*dist, normal, - (radius + dist));
// if(contactBuffer.count==2) // PT: we only need 2 contacts to be stable
// return;
}
}
}
static void GuGenerateEEContacts2( PxContactBuffer& contactBuffer,
//
const Segment& segment,
PxReal radius,
//
const Box& worldBox,
//
const PxVec3& normal,
PxReal contactDistance)
{
const PxU8* PX_RESTRICT Indices = getBoxEdges();
PxVec3 Pts[8];
worldBox.computeBoxPoints(Pts);
PxVec3 s0 = segment.p0;
PxVec3 s1 = segment.p1;
makeFatEdge(s0, s1, fatBoxEdgeCoeff);
// PT: precomputed part of edge-edge intersection test
// const PxVec3 v1 = segment.p1 - segment.p0;
const PxVec3 v1 = s1 - s0;
PxPlane plane;
plane.n = -(v1.cross(normal));
// plane.d = -(plane.normal|segment.p0);
plane.d = -(plane.n.dot(s0));
PxU32 ii,jj;
closestAxis(plane.n, ii, jj);
const float coeff = 1.0f /(v1[jj]*normal[ii]-v1[ii]*normal[jj]);
for(PxU32 i=0;i<12;i++)
{
// PxVec3 p1 = Pts[*Indices++];
// PxVec3 p2 = Pts[*Indices++];
// makeFatEdge(p1, p2, fatBoxEdgeCoeff); // PT: TODO: make fat segment instead
const PxVec3& p1 = Pts[*Indices++];
const PxVec3& p2 = Pts[*Indices++];
// PT: keep original code in case something goes wrong
// PxReal dist;
// PxVec3 ip;
// bool contact = intersectEdgeEdge(p1, p2, normal, segment.p0, segment.p1, dist, ip);
// if(contact && dist < radius + contactDistance)
// contactBuffer.contact(ip, normal, dist - radius);
PxReal dist;
PxVec3 ip;
// bool contact = intersectEdgeEdgePreca(segment.p0, segment.p1, v1, plane, ii, jj, coeff, -normal, p1, p2, dist, ip);
bool contact = intersectEdgeEdgePreca(s0, s1, v1, plane, ii, jj, coeff, -normal, p1, p2, dist, ip);
if(contact && dist < radius + contactDistance)
{
contactBuffer.contact(ip-normal*dist, normal, dist - radius);
// if(contactBuffer.count==2) // PT: we only need 2 contacts to be stable
// return;
}
}
}
bool Gu::contactCapsuleBox(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(renderOutput);
PX_UNUSED(cache);
// Get actual shape data
const PxCapsuleGeometry& shapeCapsule = checkedCast<PxCapsuleGeometry>(shape0);
const PxBoxGeometry& shapeBox = checkedCast<PxBoxGeometry>(shape1);
// PT: TODO: move computations to local space
// Capsule data
Segment worldSegment;
getCapsuleSegment(transform0, shapeCapsule, worldSegment);
const PxReal inflatedRadius = shapeCapsule.radius + params.mContactDistance;
// Box data
Box worldBox;
buildFrom(worldBox, transform1.p, shapeBox.halfExtents, transform1.q);
// Collision detection
PxReal t;
PxVec3 onBox;
const PxReal squareDist = distanceSegmentBoxSquared(worldSegment.p0, worldSegment.p1, worldBox.center, worldBox.extents, worldBox.rot, &t, &onBox);
if(squareDist >= inflatedRadius*inflatedRadius)
return false;
PX_ASSERT(contactBuffer.count==0);
if(squareDist != 0.0f)
{
// PT: the capsule segment doesn't intersect the box => distance-based version
const PxVec3 onSegment = worldSegment.getPointAt(t);
onBox = worldBox.center + worldBox.rot.transform(onBox);
PxVec3 normal = onSegment - onBox;
PxReal normalLen = normal.magnitude();
if(normalLen > 0.0f)
{
normal *= 1.0f/normalLen;
// PT: generate VF contacts for segment's vertices vs box
GuGenerateVFContacts(contactBuffer, worldSegment, shapeCapsule.radius, worldBox, normal, params.mContactDistance);
// PT: early exit if we already have 2 stable contacts
if(contactBuffer.count==2)
return true;
// PT: else generate slower EE contacts
GuGenerateEEContacts2(contactBuffer, worldSegment, shapeCapsule.radius, worldBox, normal, params.mContactDistance);
// PT: run VF case for box-vertex-vs-capsule only if we don't have any contact yet
if(!contactBuffer.count)
contactBuffer.contact(onBox, normal, sqrtf(squareDist) - shapeCapsule.radius);
}
else
{
// On linux we encountered the following:
// For a case where a segment endpoint lies on the surface of a box, the squared distance between segment and box was tiny but still larger than 0.
// However, the computation of the normal length was exactly 0. In that case we should have switched to the penetration based version so we do it now
// instead.
goto PenetrationBasedCode;
}
}
else
{
PenetrationBasedCode:
// PT: the capsule segment intersects the box => penetration-based version
// PT: compute penetration vector (MTD)
PxVec3 sepAxis;
PxReal depth;
if(!GuCapsuleOBBOverlap3(worldSegment, shapeCapsule.radius, worldBox, &depth, &sepAxis)) return false;
// PT: generate VF contacts for segment's vertices vs box
GuGenerateVFContacts(contactBuffer, worldSegment, shapeCapsule.radius, worldBox, sepAxis, params.mContactDistance);
// PT: early exit if we already have 2 stable contacts
if(contactBuffer.count==2)
return true;
// PT: else generate slower EE contacts
GuGenerateEEContacts(contactBuffer, worldSegment, shapeCapsule.radius, worldBox, sepAxis);
if(!contactBuffer.count)
{
contactBuffer.contact(worldSegment.computeCenter(), sepAxis, -(shapeCapsule.radius + depth));
return true;
}
}
return true;
}

View File

@@ -0,0 +1,148 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuDistanceSegmentSegment.h"
#include "GuContactMethodImpl.h"
#include "GuInternal.h"
using namespace physx;
bool Gu::contactCapsuleCapsule(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(renderOutput);
PX_UNUSED(cache);
const PxCapsuleGeometry& capsuleGeom0 = checkedCast<PxCapsuleGeometry>(shape0);
const PxCapsuleGeometry& capsuleGeom1 = checkedCast<PxCapsuleGeometry>(shape1);
// PT: get capsules in local space
PxVec3 dir[2];
Segment segment[2];
{
const PxVec3 capsuleLocalSegment0 = getCapsuleHalfHeightVector(transform0, capsuleGeom0);
const PxVec3 capsuleLocalSegment1 = getCapsuleHalfHeightVector(transform1, capsuleGeom1);
const PxVec3 delta = transform1.p - transform0.p;
segment[0].p0 = capsuleLocalSegment0;
segment[0].p1 = -capsuleLocalSegment0;
dir[0] = -capsuleLocalSegment0*2.0f;
segment[1].p0 = capsuleLocalSegment1 + delta;
segment[1].p1 = -capsuleLocalSegment1 + delta;
dir[1] = -capsuleLocalSegment1*2.0f;
}
// PT: compute distance between capsules' segments
PxReal s,t;
const PxReal squareDist = distanceSegmentSegmentSquared(segment[0], segment[1], &s, &t);
const PxReal radiusSum = capsuleGeom0.radius + capsuleGeom1.radius;
const PxReal inflatedSum = radiusSum + params.mContactDistance;
const PxReal inflatedSumSquared = inflatedSum*inflatedSum;
if(squareDist >= inflatedSumSquared)
return false;
// PT: TODO: optimize this away
PxReal segLen[2];
segLen[0] = dir[0].magnitude();
segLen[1] = dir[1].magnitude();
if (segLen[0]) dir[0] *= 1.0f / segLen[0];
if (segLen[1]) dir[1] *= 1.0f / segLen[1];
if (PxAbs(dir[0].dot(dir[1])) > 0.9998f) //almost parallel, ca. 1 degree difference --> generate two contact points at ends
{
PxU32 numCons = 0;
PxReal segLenEps[2];
segLenEps[0] = segLen[0] * 0.001f;//0.1% error is ok.
segLenEps[1] = segLen[1] * 0.001f;
//project the two end points of each onto the axis of the other and take those 4 points.
//we could also generate a single normal at the single closest point, but this would be 'unstable'.
for (PxU32 destShapeIndex = 0; destShapeIndex < 2; destShapeIndex ++)
{
for (PxU32 startEnd = 0; startEnd < 2; startEnd ++)
{
const PxU32 srcShapeIndex = 1-destShapeIndex;
//project start/end of srcShapeIndex onto destShapeIndex.
PxVec3 pos[2];
pos[destShapeIndex] = startEnd ? segment[srcShapeIndex].p1 : segment[srcShapeIndex].p0;
const PxReal p = dir[destShapeIndex].dot(pos[destShapeIndex] - segment[destShapeIndex].p0);
if (p >= -segLenEps[destShapeIndex] && p <= (segLen[destShapeIndex] + segLenEps[destShapeIndex]))
{
pos[srcShapeIndex] = p * dir[destShapeIndex] + segment[destShapeIndex].p0;
PxVec3 normal = pos[1] - pos[0];
const PxReal normalLenSq = normal.magnitudeSquared();
if (normalLenSq > 1e-6f && normalLenSq < inflatedSumSquared)
{
const PxReal distance = PxSqrt(normalLenSq);
normal *= 1.0f/distance;
PxVec3 point = pos[1] - normal * (srcShapeIndex ? capsuleGeom1 : capsuleGeom0).radius;
point += transform0.p;
contactBuffer.contact(point, normal, distance - radiusSum);
numCons++;
}
}
}
}
if (numCons) //if we did not have contacts, then we may have the case where they are parallel, but are stacked end to end, in which case the old code will generate good contacts.
return true;
}
// Collision response
PxVec3 pos1 = segment[0].getPointAt(s);
PxVec3 pos2 = segment[1].getPointAt(t);
PxVec3 normal = pos1 - pos2;
const PxReal normalLenSq = normal.magnitudeSquared();
if (normalLenSq < 1e-6f)
{
// PT: TODO: revisit this. "FW" sounds old.
// Zero normal -> pick the direction of segment 0.
// Not always accurate but consistent with FW.
if (segLen[0] > 1e-6f)
normal = dir[0];
else
normal = PxVec3(1.0f, 0.0f, 0.0f);
}
else
{
normal *= PxRecipSqrt(normalLenSq);
}
pos1 += transform0.p;
contactBuffer.contact(pos1 - normal * capsuleGeom0.radius, normal, PxSqrt(squareDist) - radiusSum);
return true;
}

View File

@@ -0,0 +1,577 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuConvexMesh.h"
#include "GuConvexHelper.h"
#include "GuContactMethodImpl.h"
#include "GuVecConvexHull.h"
#include "GuVecCapsule.h"
#include "GuInternal.h"
#include "GuGJK.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////
// #include "PxRenderOutput.h"
// #include "PxsContext.h"
// static void gVisualizeLine(const PxVec3& a, const PxVec3& b, PxcNpThreadContext& context, PxU32 color=0xffffff)
// {
// PxMat44 m = PxMat44::identity();
//
// PxRenderOutput& out = context.mRenderOutput;
// out << color << m << RenderOutput::LINES << a << b;
// }
///////////
static const PxReal fatConvexEdgeCoeff = 0.01f;
static bool intersectEdgeEdgePreca(const PxVec3& p1, const PxVec3& p2, const PxVec3& v1, const PxPlane& plane, PxU32 i, PxU32 j, float coeff, const PxVec3& dir, const PxVec3& p3, const PxVec3& p4, PxReal& dist, PxVec3& ip, float limit)
{
// if colliding edge (p3,p4) does not cross plane return no collision
// same as if p3 and p4 on same side of plane return 0
//
// Derivation:
// d3 = d(p3, P) = (p3 | plane.n) - plane.d; Reversed sign compared to Plane::Distance() because plane.d is negated.
// d4 = d(p4, P) = (p4 | plane.n) - plane.d; Reversed sign compared to Plane::Distance() because plane.d is negated.
// if d3 and d4 have the same sign, they're on the same side of the plane => no collision
// We test both sides at the same time by only testing Sign(d3 * d4).
// ### put that in the Plane class
// ### also check that code in the triangle class that might be similar
const PxReal d3 = plane.distance(p3);
PxReal temp = d3 * plane.distance(p4);
if(temp>0.0f)
return false;
// if colliding edge (p3,p4) and plane are parallel return no collision
PxVec3 v2 = p4 - p3;
temp = plane.n.dot(v2);
if(temp==0.0f)
return false; // ### epsilon would be better
// compute intersection point of plane and colliding edge (p3,p4)
ip = p3-v2*(d3/temp);
// compute distance of intersection from line (ip, -dir) to line (p1,p2)
dist = (v1[i]*(ip[j]-p1[j])-v1[j]*(ip[i]-p1[i]))*coeff;
if(dist<limit)
return false;
// compute intersection point on edge (p1,p2) line
ip -= dist*dir;
// check if intersection point (ip) is between edge (p1,p2) vertices
temp = (p1.x-ip.x)*(p2.x-ip.x)+(p1.y-ip.y)*(p2.y-ip.y)+(p1.z-ip.z)*(p2.z-ip.z);
if(temp<0.0f)
return true; // collision found
return false; // no collision
}
static bool GuTestAxis(const PxVec3& axis, const Segment& segment, PxReal radius,
const PolygonalData& polyData, const FastVertex2ShapeScaling& scaling,
const PxMat34& worldTM,
PxReal& depth)
{
// Project capsule
PxReal min0 = segment.p0.dot(axis);
PxReal max0 = segment.p1.dot(axis);
if(min0>max0) PxSwap(min0, max0);
min0 -= radius;
max0 += radius;
// Project convex
PxReal Min1, Max1;
(polyData.mProjectHull)(polyData, axis, worldTM, scaling, Min1, Max1);
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
static bool GuCapsuleConvexOverlap(const Segment& segment, PxReal radius,
const PolygonalData& polyData,
const FastVertex2ShapeScaling& scaling,
const PxTransform& transform,
PxReal* t, PxVec3* pp, bool isSphere)
{
// TODO:
// - test normal & edge in same loop
// - local space
// - use precomputed face value
// - optimize projection
PxVec3 Sep(0,0,0);
PxReal PenDepth = PX_MAX_REAL;
PxU32 nbPolys = polyData.mNbPolygons;
const HullPolygonData* polys = polyData.mPolygons;
const Matrix34FromTransform worldTM(transform);
// Test normals
for(PxU32 i=0;i<nbPolys;i++)
{
const HullPolygonData& poly = polys[i];
const PxPlane& vertSpacePlane = poly.mPlane;
const PxVec3 worldNormal = worldTM.rotate(vertSpacePlane.n);
PxReal d;
if(!GuTestAxis(worldNormal, segment, radius, polyData, scaling, worldTM, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = worldNormal;
}
}
// Test edges
if(!isSphere)
{
PxVec3 CapsuleAxis(segment.p1 - segment.p0);
CapsuleAxis = CapsuleAxis.getNormalized();
for(PxU32 i=0;i<nbPolys;i++)
{
const HullPolygonData& poly = polys[i];
const PxPlane& vertSpacePlane = poly.mPlane;
const PxVec3 worldNormal = worldTM.rotate(vertSpacePlane.n);
PxVec3 Cross = CapsuleAxis.cross(worldNormal);
if(!isAlmostZero(Cross))
{
Cross = Cross.getNormalized();
PxReal d;
if(!GuTestAxis(Cross, segment, radius, polyData, scaling, worldTM, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = Cross;
}
}
}
}
const PxVec3 Witness = segment.computeCenter() - transform.transform(polyData.mCenter);
if(Sep.dot(Witness) < 0.0f)
Sep = -Sep;
if(t) *t = PenDepth;
if(pp) *pp = Sep;
return true;
}
static bool raycast_convexMesh2( const PolygonalData& polyData,
const PxVec3& vrayOrig, const PxVec3& vrayDir,
PxReal maxDist, PxF32& t)
{
PxU32 nPolys = polyData.mNbPolygons;
const HullPolygonData* PX_RESTRICT polys = polyData.mPolygons;
/*
Purely convex planes based algorithm
Iterate all planes of convex, with following rules:
* determine of ray origin is inside them all or not.
* planes parallel to ray direction are immediate early out if we're on the outside side (plane normal is sep axis)
* else
- for all planes the ray direction "enters" from the front side, track the one furthest along the ray direction (A)
- for all planes the ray direction "exits" from the back side, track the one furthest along the negative ray direction (B)
if the ray origin is outside the convex and if along the ray, A comes before B, the directed line stabs the convex at A
*/
PxReal latestEntry = -FLT_MAX;
PxReal earliestExit = FLT_MAX;
while(nPolys--)
{
const HullPolygonData& poly = *polys++;
const PxPlane& vertSpacePlane = poly.mPlane;
const PxReal distToPlane = vertSpacePlane.distance(vrayOrig);
const PxReal dn = vertSpacePlane.n.dot(vrayDir);
const PxReal distAlongRay = -distToPlane/dn;
if (dn > 1E-7f) //the ray direction "exits" from the back side
{
earliestExit = physx::intrinsics::selectMin(earliestExit, distAlongRay);
}
else if (dn < -1E-7f) //the ray direction "enters" from the front side
{
/* if (distAlongRay > latestEntry)
{
latestEntry = distAlongRay;
}*/
latestEntry = physx::intrinsics::selectMax(latestEntry, distAlongRay);
}
else
{
//plane normal and ray dir are orthogonal
if(distToPlane > 0.0f)
return false; //a plane is parallel with ray -- and we're outside the ray -- we definitely miss the entire convex!
}
}
if(latestEntry < earliestExit && latestEntry != -FLT_MAX && latestEntry < maxDist-1e-5f)
{
t = latestEntry;
return true;
}
return false;
}
// PT: version based on Gu::raycast_convexMesh to handle scaling, but modified to make sure it works when ray starts inside the convex
static void GuGenerateVFContacts2(PxContactBuffer& contactBuffer,
//
const PxTransform& convexPose,
const PolygonalData& polyData, // Convex data
const PxMeshScale& scale,
//
PxU32 nbPts,
const PxVec3* PX_RESTRICT points,
PxReal radius, // Capsule's radius
//
const PxVec3& normal,
PxReal contactDistance)
{
PX_ASSERT(PxAbs(normal.magnitudeSquared()-1)<1e-4f);
//scaling: transform the ray to vertex space
const PxMat34 world2vertexSkew = scale.getInverse() * convexPose.getInverse();
const PxVec3 vrayDir = world2vertexSkew.rotate( -normal );
const PxReal maxDist = contactDistance + radius;
for(PxU32 i=0;i<nbPts;i++)
{
const PxVec3& rayOrigin = points[i];
const PxVec3 vrayOrig = world2vertexSkew.transform(rayOrigin);
PxF32 t;
if(raycast_convexMesh2(polyData, vrayOrig, vrayDir, maxDist, t))
{
contactBuffer.contact(rayOrigin - t * normal, normal, t - radius);
}
}
}
static void GuGenerateEEContacts( PxContactBuffer& contactBuffer,
//
const Segment& segment,
PxReal radius,
PxReal contactDistance,
//
const PolygonalData& polyData,
const PxTransform& transform,
const FastVertex2ShapeScaling& scaling,
//
const PxVec3& normal)
{
PxU32 numPolygons = polyData.mNbPolygons;
const HullPolygonData* PX_RESTRICT polygons = polyData.mPolygons;
const PxU8* PX_RESTRICT vertexData = polyData.mPolygonVertexRefs;
ConvexEdge edges[512];
PxU32 nbEdges = findUniqueConvexEdges(512, edges, numPolygons, polygons, vertexData);
//
PxVec3 s0 = segment.p0;
PxVec3 s1 = segment.p1;
makeFatEdge(s0, s1, fatConvexEdgeCoeff);
// PT: precomputed part of edge-edge intersection test
// const PxVec3 v1 = segment.p1 - segment.p0;
const PxVec3 v1 = s1 - s0;
PxPlane plane;
plane.n = v1.cross(normal);
// plane.d = -(plane.normal|segment.p0);
plane.d = -(plane.n.dot(s0));
PxU32 ii,jj;
closestAxis(plane.n, ii, jj);
const float coeff = 1.0f /(v1[ii]*normal[jj]-v1[jj]*normal[ii]);
//
const PxVec3* PX_RESTRICT verts = polyData.mVerts;
for(PxU32 i=0;i<nbEdges;i++)
{
const PxU8 vi0 = edges[i].vref0;
const PxU8 vi1 = edges[i].vref1;
// PxVec3 p1 = transform.transform(verts[vi0]);
// PxVec3 p2 = transform.transform(verts[vi1]);
// makeFatEdge(p1, p2, fatConvexEdgeCoeff); // PT: TODO: make fat segment instead
const PxVec3 p1 = transform.transform(scaling * verts[vi0]);
const PxVec3 p2 = transform.transform(scaling * verts[vi1]);
PxReal dist;
PxVec3 ip;
// if(intersectEdgeEdgePreca(segment.p0, segment.p1, v1, plane, ii, jj, coeff, normal, p1, p2, dist, ip))
// if(intersectEdgeEdgePreca(s0, s1, v1, plane, ii, jj, coeff, normal, p1, p2, dist, ip, -FLT_MAX))
if(intersectEdgeEdgePreca(s0, s1, v1, plane, ii, jj, coeff, normal, p1, p2, dist, ip, -radius-contactDistance))
// if(intersectEdgeEdgePreca(s0, s1, v1, plane, ii, jj, coeff, normal, p1, p2, dist, ip, 0))
{
contactBuffer.contact(ip-normal*dist, normal, - (radius + dist));
// if(contactBuffer.count==2) // PT: we only need 2 contacts to be stable
// return;
}
}
}
static void GuGenerateEEContacts2b(PxContactBuffer& contactBuffer,
//
const Segment& segment,
PxReal radius,
//
const PxMat34& transform,
const PolygonalData& polyData,
const FastVertex2ShapeScaling& scaling,
//
const PxVec3& normal,
PxReal contactDistance)
{
// TODO:
// - local space
const PxVec3 localDir = transform.rotateTranspose(normal);
PxU32 polyIndex = (polyData.mSelectClosestEdgeCB)(polyData, scaling, localDir);
PxVec3 s0 = segment.p0;
PxVec3 s1 = segment.p1;
makeFatEdge(s0, s1, fatConvexEdgeCoeff);
// PT: precomputed part of edge-edge intersection test
// const PxVec3 v1 = segment.p1 - segment.p0;
const PxVec3 v1 = s1 - s0;
PxPlane plane;
plane.n = -(v1.cross(normal));
// plane.d = -(plane.normal|segment.p0);
plane.d = -(plane.n.dot(s0));
PxU32 ii,jj;
closestAxis(plane.n, ii, jj);
const float coeff = 1.0f /(v1[jj]*normal[ii]-v1[ii]*normal[jj]);
//
const PxVec3* PX_RESTRICT verts = polyData.mVerts;
const HullPolygonData& polygon = polyData.mPolygons[polyIndex];
const PxU8* PX_RESTRICT vRefBase = polyData.mPolygonVertexRefs + polygon.mVRef8;
PxU32 numEdges = polygon.mNbVerts;
PxU32 a = numEdges - 1;
PxU32 b = 0;
while(numEdges--)
{
// const PxVec3 p1 = transform.transform(verts[vRefBase[a]]);
// const PxVec3 p2 = transform.transform(verts[vRefBase[b]]);
const PxVec3 p1 = transform.transform(scaling * verts[vRefBase[a]]);
const PxVec3 p2 = transform.transform(scaling * verts[vRefBase[b]]);
PxReal dist;
PxVec3 ip;
// bool contact = intersectEdgeEdgePreca(segment.p0, segment.p1, v1, plane, ii, jj, coeff, -normal, p1, p2, dist, ip);
bool contact = intersectEdgeEdgePreca(s0, s1, v1, plane, ii, jj, coeff, -normal, p1, p2, dist, ip, 0.0f);
if(contact && dist < radius + contactDistance)
{
contactBuffer.contact(ip-normal*dist, normal, dist - radius);
// if(contactBuffer.count==2) // PT: we only need 2 contacts to be stable
// return;
}
a = b;
b++;
}
}
bool Gu::contactCapsuleConvex(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(renderOutput);
PX_UNUSED(cache);
// Get actual shape data
// PT: the capsule can be a sphere in this case so we do this special piece of code:
PxCapsuleGeometry shapeCapsule = static_cast<const PxCapsuleGeometry&>(shape0);
if(shape0.getType()==PxGeometryType::eSPHERE)
shapeCapsule.halfHeight = 0.0f;
const PxConvexMeshGeometry& shapeConvex = checkedCast<PxConvexMeshGeometry>(shape1);
PxVec3 onSegment, onConvex;
PxReal distance;
PxVec3 normal_;
{
const ConvexMesh* cm = static_cast<const ConvexMesh*>(shapeConvex.convexMesh);
using namespace aos;
Vec3V closA, closB, normalV;
GjkStatus status;
FloatV dist;
{
const Vec3V zeroV = V3Zero();
const ConvexHullData* hullData = &cm->getHull();
const FloatV capsuleHalfHeight = FLoad(shapeCapsule.halfHeight);
const Vec3V vScale = V3LoadU_SafeReadW(shapeConvex.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&shapeConvex.scale.rotation.x);
const PxMatTransformV aToB(transform1.transformInv(transform0));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, shapeConvex.scale.isIdentity());
//transform capsule(a) into the local space of convexHull(b), treat capsule as segment
const CapsuleV capsule(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), FZero());
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(convexA.getCenter(), convexB.getCenter());
status = gjk<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, FMax(),closA, closB, normalV, dist);
}
if(status == GJK_CONTACT)
distance = 0.f;
else
{
//const FloatV sqDist = FMul(dist, dist);
V3StoreU(closB, onConvex);
FStore(dist, &distance);
V3StoreU(normalV, normal_);
onConvex = transform1.transform(onConvex);
normal_ = transform1.rotate(normal_);
}
}
const PxReal inflatedRadius = shapeCapsule.radius + params.mContactDistance;
if(distance >= inflatedRadius)
return false;
Segment worldSegment;
getCapsuleSegment(transform0, shapeCapsule, worldSegment);
const bool isSphere = worldSegment.p0 == worldSegment.p1;
const PxU32 nbPts = PxU32(isSphere ? 1 : 2);
PX_ASSERT(contactBuffer.count==0);
FastVertex2ShapeScaling convexScaling;
const bool idtConvexScale = shapeConvex.scale.isIdentity();
if(!idtConvexScale)
convexScaling.init(shapeConvex.scale);
PolygonalData polyData;
getPolygonalData_Convex(&polyData, _getHullData(shapeConvex), convexScaling);
// if(0)
if(distance > 0.f)
{
// PT: the capsule segment doesn't intersect the convex => distance-based version
PxVec3 normal = -normal_;
// PT: generate VF contacts for segment's vertices vs convex
GuGenerateVFContacts2( contactBuffer,
transform1, polyData, shapeConvex.scale,
nbPts, &worldSegment.p0, shapeCapsule.radius,
normal, params.mContactDistance);
// PT: early exit if we already have 2 stable contacts
if(contactBuffer.count==2)
return true;
// PT: else generate slower EE contacts
if(!isSphere)
{
const Matrix34FromTransform worldTM(transform1);
GuGenerateEEContacts2b(contactBuffer, worldSegment, shapeCapsule.radius,
worldTM, polyData, convexScaling,
normal, params.mContactDistance);
}
// PT: run VF case for convex-vertex-vs-capsule only if we don't have any contact yet
if(!contactBuffer.count)
{
// gVisualizeLine(onConvex, onConvex + normal, context, PxDebugColor::eARGB_RED);
//PxReal distance = PxSqrt(sqDistance);
contactBuffer.contact(onConvex, normal, distance - shapeCapsule.radius);
}
}
else
{
// PT: the capsule segment intersects the convex => penetration-based version
//printf("Penetration-based:\n");
// PT: compute penetration vector (MTD)
PxVec3 SepAxis;
if(!GuCapsuleConvexOverlap(worldSegment, shapeCapsule.radius, polyData, convexScaling, transform1, NULL, &SepAxis, isSphere))
{
//printf("- no overlap\n");
return false;
}
// PT: generate VF contacts for segment's vertices vs convex
GuGenerateVFContacts2( contactBuffer,
transform1, polyData, shapeConvex.scale,
nbPts, &worldSegment.p0, shapeCapsule.radius,
SepAxis, params.mContactDistance);
// PT: early exit if we already have 2 stable contacts
//printf("- %d VF contacts\n", contactBuffer.count);
if(contactBuffer.count==2)
return true;
// PT: else generate slower EE contacts
if(!isSphere)
{
GuGenerateEEContacts(contactBuffer, worldSegment, shapeCapsule.radius, params.mContactDistance, polyData, transform1, convexScaling, SepAxis);
//printf("- %d total contacts\n", contactBuffer.count);
}
}
return true;
}

View File

@@ -0,0 +1,637 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuIntersectionEdgeEdge.h"
#include "GuDistanceSegmentTriangle.h"
#include "GuIntersectionRayTriangle.h"
#include "GuIntersectionTriangleBox.h"
#include "GuInternal.h"
#include "GuContactMethodImpl.h"
#include "GuFeatureCode.h"
#include "GuMidphaseInterface.h"
#include "GuEntityReport.h"
#include "GuHeightFieldUtil.h"
#include "GuConvexEdgeFlags.h"
#include "GuBox.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
#define DEBUG_RENDER_MESHCONTACTS 0
#if DEBUG_RENDER_MESHCONTACTS
#include "PxPhysics.h"
#include "PxScene.h"
#endif
#define USE_AABB_TRI_CULLING
//#define USE_CAPSULE_TRI_PROJ_CULLING
//#define USE_CAPSULE_TRI_SAT_CULLING
#define VISUALIZE_TOUCHED_TRIS 0
#define VISUALIZE_CULLING_BOX 0
#if VISUALIZE_TOUCHED_TRIS
#include "PxRenderOutput.h"
#include "PxsContactManager.h"
#include "PxsContext.h"
static void gVisualizeLine(const PxVec3& a, const PxVec3& b, PxcNpThreadContext& context, PxU32 color=0xffffff)
{
PxMat44 m = PxMat44::identity();
PxRenderOutput& out = context.mRenderOutput;
out << color << m << PxRenderOutput::LINES << a << b;
}
static void gVisualizeTri(const PxVec3& a, const PxVec3& b, const PxVec3& c, PxcNpThreadContext& context, PxU32 color=0xffffff)
{
PxMat44 m = PxMat44::identity();
PxRenderOutput& out = context.mRenderOutput;
out << color << m << PxRenderOutput::TRIANGLES << a << b << c;
}
static PxU32 gColors[8] = { 0xff0000ff, 0xff00ff00, 0xffff0000,
0xff00ffff, 0xffff00ff, 0xffffff00,
0xff000080, 0xff008000};
#endif
static const float fatBoxEdgeCoeff = 0.01f;
static bool PxcTestAxis(const PxVec3& axis, const Segment& segment, PxReal radius,
const PxVec3* PX_RESTRICT triVerts, PxReal& depth)
{
// Project capsule
PxReal min0 = segment.p0.dot(axis);
PxReal max0 = segment.p1.dot(axis);
if(min0>max0) PxSwap(min0, max0);
min0 -= radius;
max0 += radius;
// Project triangle
float Min1, Max1;
{
Min1 = Max1 = triVerts[0].dot(axis);
const PxReal dp1 = triVerts[1].dot(axis);
Min1 = physx::intrinsics::selectMin(Min1, dp1);
Max1 = physx::intrinsics::selectMax(Max1, dp1);
const PxReal dp2 = triVerts[2].dot(axis);
Min1 = physx::intrinsics::selectMin(Min1, dp2);
Max1 = physx::intrinsics::selectMax(Max1, dp2);
}
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
PX_FORCE_INLINE static PxVec3 PxcComputeTriangleNormal(const PxVec3* PX_RESTRICT triVerts)
{
return ((triVerts[0]-triVerts[1]).cross(triVerts[0]-triVerts[2])).getNormalized();
}
PX_FORCE_INLINE static PxVec3 PxcComputeTriangleCenter(const PxVec3* PX_RESTRICT triVerts)
{
static const PxReal inv3 = 1.0f / 3.0f;
return (triVerts[0] + triVerts[1] + triVerts[2]) * inv3;
}
static bool PxcCapsuleTriOverlap3(PxU8 edgeFlags, const Segment& segment, PxReal radius, const PxVec3* PX_RESTRICT triVerts,
PxReal* PX_RESTRICT t=NULL, PxVec3* PX_RESTRICT pp=NULL)
{
PxReal penDepth = PX_MAX_REAL;
// Test normal
PxVec3 sep = PxcComputeTriangleNormal(triVerts);
if(!PxcTestAxis(sep, segment, radius, triVerts, penDepth))
return false;
// Test edges
// ML:: use the active edge flag instead of the concave flag
const PxU32 activeEdgeFlag[] = {ETD_CONVEX_EDGE_01, ETD_CONVEX_EDGE_12, ETD_CONVEX_EDGE_20};
const PxVec3 capsuleAxis = (segment.p1 - segment.p0).getNormalized();
for(PxU32 i=0;i<3;i++)
{
//bool active =((edgeFlags & ignoreEdgeFlag[i]) == 0);
if(edgeFlags & activeEdgeFlag[i])
{
const PxVec3 e0 = triVerts[i];
// const PxVec3 e1 = triVerts[(i+1)%3];
const PxVec3 e1 = triVerts[PxGetNextIndex3(i)];
const PxVec3 edge = e0 - e1;
PxVec3 cross = capsuleAxis.cross(edge);
if(!isAlmostZero(cross))
{
cross = cross.getNormalized();
PxReal d;
if(!PxcTestAxis(cross, segment, radius, triVerts, d))
return false;
if(d<penDepth)
{
penDepth = d;
sep = cross;
}
}
}
}
const PxVec3 capsuleCenter = segment.computeCenter();
const PxVec3 triCenter = PxcComputeTriangleCenter(triVerts);
const PxVec3 witness = capsuleCenter - triCenter;
if(sep.dot(witness) < 0.0f)
sep = -sep;
if(t) *t = penDepth;
if(pp) *pp = sep;
return true;
}
static void PxcGenerateVFContacts( const PxMat34& meshAbsPose, PxContactBuffer& contactBuffer, const Segment& segment,
const PxReal radius, const PxVec3* PX_RESTRICT triVerts, const PxVec3& normal,
PxU32 triangleIndex, PxReal contactDistance)
{
const PxVec3* PX_RESTRICT Ptr = &segment.p0;
for(PxU32 i=0;i<2;i++)
{
const PxVec3& Pos = Ptr[i];
PxReal t,u,v;
if(intersectRayTriangleCulling(Pos, -normal, triVerts[0], triVerts[1], triVerts[2], t, u, v, 1e-3f) && t < radius + contactDistance)
{
const PxVec3 Hit = meshAbsPose.transform(Pos - t * normal);
const PxVec3 wn = meshAbsPose.rotate(normal);
contactBuffer.contact(Hit, wn, t-radius, triangleIndex);
#if DEBUG_RENDER_MESHCONTACTS
PxScene *s; PxGetPhysics().getScenes(&s, 1, 0);
PxRenderOutput((PxRenderBufferImpl&)s->getRenderBuffer()) << PxRenderOutput::LINES << PxDebugColor::eARGB_BLUE // red
<< Hit << (Hit + wn * 10.0f);
#endif
}
}
}
// PT: PxcGenerateEEContacts2 uses a segment-triangle distance function, which breaks when the segment
// intersects the triangle, in which case you need to switch to a penetration-depth computation.
// If you don't do this thin capsules don't work.
static void PxcGenerateEEContacts( const PxMat34& meshAbsPose, PxContactBuffer& contactBuffer, const Segment& segment, const PxReal radius,
const PxVec3* PX_RESTRICT triVerts, const PxVec3& normal, PxU32 triangleIndex)
{
PxVec3 s0 = segment.p0;
PxVec3 s1 = segment.p1;
makeFatEdge(s0, s1, fatBoxEdgeCoeff);
for(PxU32 i=0;i<3;i++)
{
PxReal dist;
PxVec3 ip;
if(intersectEdgeEdge(triVerts[i], triVerts[PxGetNextIndex3(i)], -normal, s0, s1, dist, ip))
{
ip = meshAbsPose.transform(ip);
const PxVec3 wn = meshAbsPose.rotate(normal);
contactBuffer.contact(ip, wn, - (radius + dist), triangleIndex);
#if DEBUG_RENDER_MESHCONTACTS
PxScene *s; PxGetPhysics().getScenes(&s, 1, 0);
PxRenderOutput((PxRenderBufferImpl&)s->getRenderBuffer()) << PxRenderOutput::LINES << PxDebugColor::eARGB_BLUE // red
<< ip << (ip + wn * 10.0f);
#endif
}
}
}
static void PxcGenerateEEContacts2( const PxMat34& meshAbsPose, PxContactBuffer& contactBuffer, const Segment& segment, const PxReal radius,
const PxVec3* PX_RESTRICT triVerts, const PxVec3& normal, PxU32 triangleIndex, PxReal contactDistance)
{
PxVec3 s0 = segment.p0;
PxVec3 s1 = segment.p1;
makeFatEdge(s0, s1, fatBoxEdgeCoeff);
for(PxU32 i=0;i<3;i++)
{
PxReal dist;
PxVec3 ip;
if(intersectEdgeEdge(triVerts[i], triVerts[PxGetNextIndex3(i)], normal, s0, s1, dist, ip) && dist < radius+contactDistance)
{
ip = meshAbsPose.transform(ip);
const PxVec3 wn = meshAbsPose.rotate(normal);
contactBuffer.contact(ip, wn, dist - radius, triangleIndex);
#if DEBUG_RENDER_MESHCONTACTS
PxScene *s; PxGetPhysics().getScenes(&s, 1, 0);
PxRenderOutput((PxRenderBufferImpl&)s->getRenderBuffer()) << PxRenderOutput::LINES << PxDebugColor::eARGB_BLUE // red
<< ip << (ip + wn * 10.0f);
#endif
}
}
}
namespace
{
struct CapsuleMeshContactGeneration
{
PxContactBuffer& mContactBuffer;
const PxMat34 mMeshAbsPose;
const Segment& mMeshCapsule;
#ifdef USE_AABB_TRI_CULLING
PxVec3p mBC;
PxVec3p mBE;
#endif
PxReal mInflatedRadius;
PxReal mContactDistance;
PxReal mShapeCapsuleRadius;
CapsuleMeshContactGeneration(PxContactBuffer& contactBuffer, const PxTransform& transform1, const Segment& meshCapsule, PxReal inflatedRadius, PxReal contactDistance, PxReal shapeCapsuleRadius) :
mContactBuffer (contactBuffer),
mMeshAbsPose (Matrix34FromTransform(transform1)),
mMeshCapsule (meshCapsule),
mInflatedRadius (inflatedRadius),
mContactDistance (contactDistance),
mShapeCapsuleRadius (shapeCapsuleRadius)
{
PX_ASSERT(contactBuffer.count==0);
#ifdef USE_AABB_TRI_CULLING
mBC = (meshCapsule.p0 + meshCapsule.p1)*0.5f;
const PxVec3p be = (meshCapsule.p0 - meshCapsule.p1)*0.5f;
mBE.x = fabsf(be.x) + inflatedRadius;
mBE.y = fabsf(be.y) + inflatedRadius;
mBE.z = fabsf(be.z) + inflatedRadius;
#endif
}
void processTriangle(PxU32 triangleIndex, const PxTrianglePadded& tri, PxU8 extraData/*, const PxU32* vertInds*/)
{
#ifdef USE_AABB_TRI_CULLING
#if VISUALIZE_CULLING_BOX
{
PxRenderOutput& out = context.mRenderOutput;
PxTransform idt = PxTransform(PxIdentity);
out << idt;
out << 0xffffffff;
out << PxDebugBox(mBC, mBE, true);
}
#endif
#endif
const PxVec3& p0 = tri.verts[0];
const PxVec3& p1 = tri.verts[1];
const PxVec3& p2 = tri.verts[2];
#ifdef USE_AABB_TRI_CULLING
// PT: this one is safe because triangle class is padded
// PT: TODO: is this test really needed? Not done in midphase already?
if(!intersectTriangleBox_Unsafe(mBC, mBE, p0, p1, p2))
return;
#endif
#ifdef USE_CAPSULE_TRI_PROJ_CULLING
PxVec3 triCenter = (p0 + p1 + p2)*0.33333333f;
PxVec3 delta = mBC - triCenter;
PxReal depth;
if(!PxcTestAxis(delta, mMeshCapsule, mInflatedRadius, tri.verts, depth))
return;
#endif
#if VISUALIZE_TOUCHED_TRIS
gVisualizeTri(p0, p1, p2, context, PxDebugColor::eARGB_RED);
#endif
#ifdef USE_CAPSULE_TRI_SAT_CULLING
PxVec3 SepAxis;
if(!PxcCapsuleTriOverlap3(extraData, mMeshCapsule, mInflatedRadius, tri.verts, NULL, &SepAxis))
return;
#endif
PxReal t,u,v;
const PxVec3 p1_p0 = p1 - p0;
const PxVec3 p2_p0 = p2 - p0;
const PxReal squareDist = distanceSegmentTriangleSquared(mMeshCapsule, p0, p1_p0, p2_p0, &t, &u, &v);
// PT: do cheaper test first!
if(squareDist >= mInflatedRadius*mInflatedRadius)
return;
// PT: backface culling without the normalize
// PT: TODO: consider doing before the segment-triangle distance test if it's cheaper
const PxVec3 planeNormal = p1_p0.cross(p2_p0);
const PxF32 planeD = planeNormal.dot(p0); // PT: actually -d compared to PxcPlane
if(planeNormal.dot(mBC) < planeD)
return;
if(squareDist > 0.001f*0.001f)
{
// Contact information
PxVec3 normal;
if(selectNormal(extraData, u, v))
{
normal = planeNormal.getNormalized();
}
else
{
const PxVec3 pointOnTriangle = computeBarycentricPoint(p0, p1, p2, u, v);
const PxVec3 pointOnSegment = mMeshCapsule.getPointAt(t);
normal = pointOnSegment - pointOnTriangle;
const PxReal l = normal.magnitude();
if(l == 0.0f)
return;
normal = normal / l;
}
PxcGenerateEEContacts2(mMeshAbsPose, mContactBuffer, mMeshCapsule, mShapeCapsuleRadius, tri.verts, normal, triangleIndex, mContactDistance);
PxcGenerateVFContacts(mMeshAbsPose, mContactBuffer, mMeshCapsule, mShapeCapsuleRadius, tri.verts, normal, triangleIndex, mContactDistance);
}
else
{
PxVec3 SepAxis;
if(!PxcCapsuleTriOverlap3(extraData, mMeshCapsule, mInflatedRadius, tri.verts, NULL, &SepAxis))
return;
PxcGenerateEEContacts(mMeshAbsPose, mContactBuffer, mMeshCapsule, mShapeCapsuleRadius, tri.verts, SepAxis, triangleIndex);
PxcGenerateVFContacts(mMeshAbsPose, mContactBuffer, mMeshCapsule, mShapeCapsuleRadius, tri.verts, SepAxis, triangleIndex, mContactDistance);
}
}
private:
CapsuleMeshContactGeneration& operator=(const CapsuleMeshContactGeneration&);
};
struct CapsuleMeshContactGenerationCallback_NoScale : MeshHitCallback<PxGeomRaycastHit>
{
CapsuleMeshContactGeneration mGeneration;
const TriangleMesh* mMeshData;
CapsuleMeshContactGenerationCallback_NoScale(
PxContactBuffer& contactBuffer,
const PxTransform& transform1, const Segment& meshCapsule,
PxReal inflatedRadius, PxReal contactDistance,
PxReal shapeCapsuleRadius, const TriangleMesh* meshData
) :
MeshHitCallback<PxGeomRaycastHit> (CallbackMode::eMULTIPLE),
mGeneration (contactBuffer, transform1, meshCapsule, inflatedRadius, contactDistance, shapeCapsuleRadius),
mMeshData (meshData)
{
PX_ASSERT(contactBuffer.count==0);
}
virtual PxAgain processHit(
const PxGeomRaycastHit& hit, const PxVec3& v0, const PxVec3& v1, const PxVec3& v2, PxReal&, const PxU32* /*vInds*/)
{
PxTrianglePadded tri;
// PT: TODO: revisit this, avoid the copy
tri.verts[0] = v0;
tri.verts[1] = v1;
tri.verts[2] = v2;
const PxU32 triangleIndex = hit.faceIndex;
//ML::set all the edges to be active, if the mExtraTrigData exist, we overwrite this flag
const PxU8 extraData = getConvexEdgeFlags(mMeshData->getExtraTrigData(), triangleIndex);
mGeneration.processTriangle(triangleIndex, tri, extraData);
return true;
}
private:
CapsuleMeshContactGenerationCallback_NoScale& operator=(const CapsuleMeshContactGenerationCallback_NoScale&);
};
struct CapsuleMeshContactGenerationCallback_Scale : CapsuleMeshContactGenerationCallback_NoScale
{
const FastVertex2ShapeScaling& mScaling;
CapsuleMeshContactGenerationCallback_Scale(
PxContactBuffer& contactBuffer,
const PxTransform& transform1, const Segment& meshCapsule,
PxReal inflatedRadius, const FastVertex2ShapeScaling& scaling, PxReal contactDistance,
PxReal shapeCapsuleRadius, const TriangleMesh* meshData
) :
CapsuleMeshContactGenerationCallback_NoScale(contactBuffer, transform1, meshCapsule, inflatedRadius, contactDistance, shapeCapsuleRadius, meshData),
mScaling (scaling)
{
}
virtual PxAgain processHit(
const PxGeomRaycastHit& hit, const PxVec3& v0, const PxVec3& v1, const PxVec3& v2, PxReal&, const PxU32* /*vInds*/)
{
PxTrianglePadded tri;
getScaledVertices(tri.verts, v0, v1, v2, false, mScaling);
const PxU32 triangleIndex = hit.faceIndex;
//ML::set all the edges to be active, if the mExtraTrigData exist, we overwrite this flag
PxU8 extraData = getConvexEdgeFlags(mMeshData->getExtraTrigData(), triangleIndex);
if(mScaling.flipsNormal())
flipConvexEdgeFlags(extraData);
mGeneration.processTriangle(triangleIndex, tri, extraData);
return true;
}
private:
CapsuleMeshContactGenerationCallback_Scale& operator=(const CapsuleMeshContactGenerationCallback_Scale&);
};
}
// PT: computes local capsule without going to world-space
static PX_FORCE_INLINE Segment computeLocalCapsule(const PxTransform& transform0, const PxTransform& transform1, const PxCapsuleGeometry& shapeCapsule)
{
const PxVec3 halfHeight = getCapsuleHalfHeightVector(transform0, shapeCapsule);
const PxVec3 delta = transform1.p - transform0.p;
return Segment(
transform1.rotateInv(halfHeight - delta),
transform1.rotateInv(-halfHeight - delta));
}
bool Gu::contactCapsuleMesh(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(cache);
PX_UNUSED(renderOutput);
const PxCapsuleGeometry& shapeCapsule = checkedCast<PxCapsuleGeometry>(shape0);
const PxTriangleMeshGeometry& shapeMesh = checkedCast<PxTriangleMeshGeometry>(shape1);
const PxReal inflatedRadius = shapeCapsule.radius + params.mContactDistance; //AM: inflate!
const Segment meshCapsule = computeLocalCapsule(transform0, transform1, shapeCapsule);
const TriangleMesh* meshData = _getMeshData(shapeMesh);
//bound the capsule in shape space by an OBB:
Box queryBox;
{
const Capsule queryCapsule(meshCapsule, inflatedRadius);
queryBox.create(queryCapsule);
}
if(shapeMesh.scale.isIdentity())
{
CapsuleMeshContactGenerationCallback_NoScale callback(contactBuffer, transform1, meshCapsule,
inflatedRadius, params.mContactDistance, shapeCapsule.radius, meshData);
// PT: TODO: switch to capsule query here
Midphase::intersectOBB(meshData, queryBox, callback, true);
}
else
{
const FastVertex2ShapeScaling meshScaling(shapeMesh.scale);
CapsuleMeshContactGenerationCallback_Scale callback(contactBuffer, transform1, meshCapsule,
inflatedRadius, meshScaling, params.mContactDistance, shapeCapsule.radius, meshData);
//switched from capsuleCollider to boxCollider so we can support nonuniformly scaled meshes by scaling the query region:
//apply the skew transform to the box:
meshScaling.transformQueryBounds(queryBox.center, queryBox.extents, queryBox.rot);
Midphase::intersectOBB(meshData, queryBox, callback, true);
}
return contactBuffer.count > 0;
}
namespace
{
struct CapsuleHeightfieldContactGenerationCallback : OverlapReport
{
CapsuleMeshContactGeneration mGeneration;
const HeightFieldUtil& mHfUtil;
const PxTransform& mTransform1;
CapsuleHeightfieldContactGenerationCallback(
PxContactBuffer& contactBuffer,
const PxTransform& transform1, const HeightFieldUtil& hfUtil, const Segment& meshCapsule,
PxReal inflatedRadius, PxReal contactDistance, PxReal shapeCapsuleRadius
) :
mGeneration (contactBuffer, transform1, meshCapsule, inflatedRadius, contactDistance, shapeCapsuleRadius),
mHfUtil (hfUtil),
mTransform1 (transform1)
{
PX_ASSERT(contactBuffer.count==0);
}
// PT: TODO: refactor/unify with similar code in other places
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
const PxU8 nextInd[] = {2,0,1};
while(nb--)
{
const PxU32 triangleIndex = *indices++;
PxU32 vertIndices[3];
PxTrianglePadded currentTriangle; // in world space
PxU32 adjInds[3];
mHfUtil.getTriangle(mTransform1, currentTriangle, vertIndices, adjInds, triangleIndex, false, false);
PxVec3 normal;
currentTriangle.normal(normal);
PxU8 triFlags = 0; //KS - temporary until we can calculate triFlags for HF
for(PxU32 a = 0; a < 3; ++a)
{
if(adjInds[a] != 0xFFFFFFFF)
{
PxTriangle adjTri;
mHfUtil.getTriangle(mTransform1, adjTri, NULL, NULL, adjInds[a], false, false);
//We now compare the triangles to see if this edge is active
PxVec3 adjNormal;
adjTri.denormalizedNormal(adjNormal);
PxU32 otherIndex = nextInd[a];
PxF32 projD = adjNormal.dot(currentTriangle.verts[otherIndex] - adjTri.verts[0]);
if(projD < 0.f)
{
adjNormal.normalize();
PxF32 proj = adjNormal.dot(normal);
if(proj < 0.999f)
{
triFlags |= 1 << (a+3);
}
}
}
else
{
triFlags |= 1 << (a+3);
}
}
mGeneration.processTriangle(triangleIndex, currentTriangle, triFlags);
}
return true;
}
private:
CapsuleHeightfieldContactGenerationCallback& operator=(const CapsuleHeightfieldContactGenerationCallback&);
};
}
bool Gu::contactCapsuleHeightfield(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(cache);
PX_UNUSED(renderOutput);
const PxCapsuleGeometry& shapeCapsule = checkedCast<PxCapsuleGeometry>(shape0);
const PxHeightFieldGeometry& shapeMesh = checkedCast<PxHeightFieldGeometry>(shape1);
const PxReal inflatedRadius = shapeCapsule.radius + params.mContactDistance; //AM: inflate!
const Segment meshCapsule = computeLocalCapsule(transform0, transform1, shapeCapsule);
// We must be in local space to use the cache
const HeightFieldUtil hfUtil(shapeMesh);
CapsuleHeightfieldContactGenerationCallback callback(
contactBuffer, transform1, hfUtil, meshCapsule, inflatedRadius, params.mContactDistance, shapeCapsule.radius);
//switched from capsuleCollider to boxCollider so we can support nonuniformly scaled meshes by scaling the query region:
//bound the capsule in shape space by an AABB:
// PT: TODO: improve these bounds (see computeCapsuleBounds)
hfUtil.overlapAABBTriangles(transform0, transform1, getLocalCapsuleBounds(inflatedRadius, shapeCapsule.halfHeight), callback);
return contactBuffer.count > 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,66 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuGJKPenetration.h"
#include "GuEPA.h"
#include "GuVecConvexHull.h"
#include "GuVecConvexHullNoScale.h"
#include "GuContactMethodImpl.h"
#include "GuPCMShapeConvex.h"
#include "GuPCMContactGen.h"
#include "GuConvexGeometry.h"
#include "GuConvexSupport.h"
#include "GuRefGjkEpa.h"
using namespace physx;
using namespace Gu;
using namespace aos;
bool Gu::contactConvexCoreConvex(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(cache);
PX_UNUSED(renderOutput);
const PxVec3 shift = (transform0.p + transform1.p) * 0.5f;
const PxTransform pose0(transform0.p - shift, transform0.q);
const PxTransform pose1(transform1.p - shift, transform1.q);
const PxReal contactDist = params.mContactDistance;
ConvexShape convex0; Gu::makeConvexShape(shape0, pose0, convex0);
ConvexShape convex1; Gu::makeConvexShape(shape1, pose1, convex1);
PX_ASSERT(convex0.isValid() && convex1.isValid());
PxVec3 normal, points[Gu::MAX_CONVEX_CONTACTS];
PxReal dists[Gu::MAX_CONVEX_CONTACTS];
if (PxU32 count = Gu::generateContacts(convex0, convex1, contactDist, normal, points, dists))
for (PxU32 i = 0; i < count; ++i)
contactBuffer.contact(points[i] + shift, normal, dists[i]);
return contactBuffer.count > 0;
}

View File

@@ -0,0 +1,435 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuContactPolygonPolygon.h"
#include "GuContactMethodImpl.h"
#include "GuMidphaseInterface.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
#include "GuBounds.h"
#include "GuConvexGeometry.h"
#include "GuConvexSupport.h"
#include "GuContactReduction.h"
#include <GuTriangleMesh.h>
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
using namespace intrinsics;
namespace
{
struct TriangleMeshTriangles
{
const TriangleMesh* data;
const PxMeshScale& scale;
TriangleMeshTriangles(const TriangleMesh* _data, const PxMeshScale& _scale)
:
data(_data), scale(_scale)
{}
void getVertexIndices(PxU32 triIndex, PxU32& i0, PxU32& i1, PxU32& i2) const
{
const void* tris = data->getTriangles();
const bool ints16bit = data->has16BitIndices();
getVertexRefs(triIndex, i0, i1, i2, tris, ints16bit);
}
PxVec3 getVertex(PxU32 vertIndex) const
{
const PxVec3* verts = data->getVertices();
return scale.transform(verts[vertIndex]);
}
bool hasAdjacency() const
{
return data->getAdjacencies() != NULL;
}
PxU32 getAdjacentTriIndex(PxU32 triIndex, PxU32 edgIndex) const
{
const PxU32* adjucent = data->getAdjacencies();
return adjucent[triIndex * 3 + edgIndex];
}
};
struct HeightFieldTriangles
{
const HeightFieldUtil& hfUtil;
HeightFieldTriangles(const HeightFieldUtil& _hfUtil)
:
hfUtil(_hfUtil)
{}
void getVertexIndices(PxU32 triIndex, PxU32& i0, PxU32& i1, PxU32& i2) const
{
hfUtil.mHeightField->getTriangleVertexIndices(triIndex, i0, i1, i2);
}
PxVec3 getVertex(PxU32 vertIndex) const
{
PxVec3 v = hfUtil.mHeightField->getVertex(vertIndex);
PxVec3 s(hfUtil.mHfGeom->rowScale, hfUtil.mHfGeom->heightScale, hfUtil.mHfGeom->columnScale);
return PxVec3(v.x * s.x, v.y * s.y, v.z * s.z);
}
bool hasAdjacency() const
{
return true;
}
PxU32 getAdjacentTriIndex(PxU32 triIndex, PxU32 edgIndex) const
{
PxU32 adjucent[3];
hfUtil.mHeightField->getTriangleAdjacencyIndices(triIndex, 0, 0, 0, adjucent[0], adjucent[1], adjucent[2]);
return adjucent[edgIndex];
}
};
PxVec3 computeBarycentric(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& p)
{
PxVec4 bary;
PxComputeBarycentric(a, b, c, p, bary);
//PxReal u = bary.x, v = bary.y, w = bary.z;
//PX_ASSERT((a * u + b * v + c * w - p).magnitude() < 1e-3f); // VR: find out why this asserts sometimes
return bary.getXYZ();
}
template <typename TriangleSource>
bool validateContact(const PxVec3& normal, const PxVec3& pointB, PxU32 triIndex, const TriangleSource& tris)
{
const PxReal eps = 1e-5f;
PxU32 i0, i1, i2;
tris.getVertexIndices(triIndex, i0, i1, i2);
const PxVec3 v0 = tris.getVertex(i0),
v1 = tris.getVertex(i1),
v2 = tris.getVertex(i2);
const PxVec3 tn = (v1 - v0).cross(v2 - v0).getNormalized();
// close enough to a face contact
if (tn.dot(normal) > 0.99f)
// better to accept
return true;
const PxVec3 bc = computeBarycentric(v0, v1, v2, pointB);
// face contact
if (bc.x > eps && bc.x < 1.0f - eps &&
bc.y > eps && bc.y < 1.0f - eps &&
bc.z > eps && bc.z < 1.0f - eps)
// always accept
return true;
// vertex contact
if (bc.x > 1.0f - eps ||
bc.y > 1.0f - eps ||
bc.z > 1.0f - eps)
{
PxU32 vrtIndex = 0xffffffff;
if (tris.hasAdjacency())
{
if (bc.x > 1.0f - eps)
vrtIndex = 0;
else if (bc.y > 1.0f - eps)
vrtIndex = 1;
else if (bc.z > 1.0f - eps)
vrtIndex = 2;
}
if (vrtIndex != 0xffffffff)
{
PxU32 ai[] = { i0, i1, i2 };
PxU32 ai0 = ai[vrtIndex];
PxU32 adjIndex = tris.getAdjacentTriIndex(triIndex, (vrtIndex + 2) % 3);
while (adjIndex != triIndex && adjIndex != 0xffffffff)
{
// walking through the adjucent triangles surrounding the vertex and checking
// if any other end of the edges sharing the vertex projects onto the contact
// normal higher than the vertex itself.it'd meand that the contact normal is
// out of the vertex's voronoi region.
PxU32 bi[3]; tris.getVertexIndices(adjIndex, bi[0], bi[1], bi[2]);
for (PxU32 i = 0; i < 3; ++i)
{
PxU32 bi0 = bi[i], bi1 = bi[(i + 1) % 3], bi2 = bi[(i + 2) % 3];
if (bi1 == ai0)
{
const PxVec3 bv0 = tris.getVertex(bi0),
bv1 = tris.getVertex(bi1),
bv2 = tris.getVertex(bi2);
const PxReal bd10 = normal.dot((bv0 - bv1).getNormalized()),
bd12 = normal.dot((bv2 - bv1).getNormalized());
if (bd10 > eps || bd12 > eps)
// the vertex is hidden by one of the adjacent
// edges we can't collide with this vertex
return false;
// next triangle to check
adjIndex = tris.getAdjacentTriIndex(adjIndex, i);
break;
}
}
}
}
return true;
}
// edge contact
PxU32 edgIndex = 0xffffffff;
if (tris.hasAdjacency())
{
if (bc.x < eps)
edgIndex = 1;
else if (bc.y < eps)
edgIndex = 2;
else if (bc.z < eps)
edgIndex = 0;
}
if (edgIndex != 0xffffffff)
{
PxU32 ai[] = { i0, i1, i2 };
PxU32 ai0 = ai[edgIndex], ai1 = ai[(edgIndex + 1) % 3];
PxU32 adjIndex = tris.getAdjacentTriIndex(triIndex, edgIndex);
if (adjIndex != 0xffffffff)
{
// testing if the adjacent triangle's vertex opposite to this edge
// projects onto the contact normal higher than the edge itself. it'd
// mean that the normal is out of the edge's voronoi region.
PxU32 bi[3]; tris.getVertexIndices(adjIndex, bi[0], bi[1], bi[2]);
for (PxU32 i = 0; i < 3; ++i)
{
PxU32 bi0 = bi[i], bi1 = bi[(i + 1) % 3], bi2 = bi[(i + 2) % 3];
if (bi0 == ai1 && bi1 == ai0)
{
const PxVec3 bv1 = tris.getVertex(bi1),
bv2 = tris.getVertex(bi2);
const PxReal bd12 = normal.dot((bv2 - bv1).getNormalized());
if (bd12 > eps)
// the edge is hidden by the adjacent triangle
// we can't collide with this edge
return false;
}
}
}
}
return true;
}
}
bool Gu::contactConvexCoreTrimesh(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(cache);
PX_UNUSED(renderOutput);
struct Callback : MeshHitCallback<PxGeomRaycastHit>
{
const Gu::ConvexShape& mConvex;
const PxMeshScale& mScale;
const TriangleMesh* mData;
const PxReal mContactDist;
const PxReal mTriMargin;
const PxTransform& mTransform;
Gu::Contact& mContact;
PxRenderOutput* mRenderOutput;
Callback(const Gu::ConvexShape& convex, const PxMeshScale& scale, const TriangleMesh* data, PxReal contactDist,
PxReal triMargin, const PxTransform& transform, Gu::Contact& contact, PxRenderOutput* renderOutput)
:
MeshHitCallback<PxGeomRaycastHit>(CallbackMode::eMULTIPLE),
mConvex(convex), mScale(scale), mData(data), mContactDist(contactDist),
mTriMargin(triMargin), mTransform(transform), mContact(contact), mRenderOutput(renderOutput)
{}
virtual PxAgain processHit(const PxGeomRaycastHit& hit, const PxVec3& v0, const PxVec3& v1, const PxVec3& v2,
PxReal&, const PxU32*)
{
const PxVec3 verts[] = { v0, v1, v2 };
Gu::ConvexShape tri;
tri.coreType = Gu::ConvexCore::Type::ePOINTS;
tri.pose = PxTransform(PxIdentity);
Gu::ConvexCore::PointsCore& core = *reinterpret_cast<Gu::ConvexCore::PointsCore*>(tri.coreData);
core.points = verts;
core.numPoints = 3;
core.stride = sizeof(PxVec3);
core.S = mScale.scale;
core.R = mScale.rotation;
tri.margin = mTriMargin;
const PxVec3 triNormal = (v1 - v0).cross(v2 - v0).getNormalized();
TriangleMeshTriangles triSource(mData, mScale);
PxVec3 normal, points[Gu::MAX_CONVEX_CONTACTS];
PxReal dists[Gu::MAX_CONVEX_CONTACTS];
if (PxU32 count = Gu::generateContacts(mConvex, tri, mContactDist, triNormal, normal, points, dists))
{
const PxVec3 worldNormal = mTransform.rotate(normal);
for (PxU32 i = 0; i < count; ++i)
{
PxVec3 pointB = points[i] - normal * dists[i];
if (validateContact(normal, pointB, hit.faceIndex, triSource))
{
const PxVec3 worldPoint = mTransform.transform(points[i]);
mContact.addPoint(worldPoint, worldNormal, dists[i]);
}
}
}
return true;
}
};
const PxConvexCoreGeometry& shapeConvex = checkedCast<PxConvexCoreGeometry>(shape0);
const PxTriangleMeshGeometry& shapeMesh = checkedCast<PxTriangleMeshGeometry>(shape1);
const TriangleMesh* meshData = _getMeshData(shapeMesh);
const PxTransform transform0in1 = transform1.transformInv(transform0);
const PxBounds3 bounds = Gu::computeBounds(shapeConvex, PxTransform(PxIdentity));
Box queryBox;
queryBox.extents = bounds.getExtents() + PxVec3(params.mContactDistance);
queryBox.center = transform0in1.transform(bounds.getCenter());
queryBox.rot = PxMat33(transform0in1.q);
PxReal triMargin = queryBox.extents.minElement() * 0.0001f;
const FastVertex2ShapeScaling meshScaling(shapeMesh.scale);
meshScaling.transformQueryBounds(queryBox.center, queryBox.extents, queryBox.rot);
Gu::Contact contact;
Gu::ConvexShape convex; Gu::makeConvexShape(shapeConvex, transform0in1, convex);
Callback callback(convex, shapeMesh.scale, meshData, params.mContactDistance, triMargin, transform1, contact, renderOutput);
Midphase::intersectOBB(meshData, queryBox, callback, false);
for (PxU32 i = 0; i < contact.numPatches(); ++i)
for (PxU32 j = 0; j < contact.numPatchPoints(i); ++j)
contactBuffer.contact(contact.patchPoint(i, j).p, contact.patchNormal(i), contact.patchPoint(i, j).d);
return contactBuffer.count > 0;
}
bool Gu::contactConvexCoreHeightfield(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(cache);
PX_UNUSED(renderOutput);
struct Callback : Gu::OverlapReport
{
const Gu::ConvexShape& mConvex;
const HeightFieldUtil& mHfUtil;
const PxReal mContactDist;
const PxTransform& mTransform;
Gu::Contact& mContact;
Callback(const Gu::ConvexShape& convex, const HeightFieldUtil& hfUtil, const PxReal contactDist,
const PxTransform& transform, Gu::Contact& contact)
:
mConvex(convex), mHfUtil(hfUtil), mContactDist(contactDist), mTransform(transform), mContact(contact)
{}
virtual bool reportTouchedTris(PxU32 numTris, const PxU32* triInds)
{
HeightFieldTriangles triSource(mHfUtil);
for (PxU32 t = 0; t < numTris; ++t)
{
PxU32 triIndex = triInds[t];
PxU32 vertInds[3];
triSource.getVertexIndices(triIndex, vertInds[0], vertInds[1], vertInds[2]);
PxVec3 verts[] = { triSource.getVertex(vertInds[0]),
triSource.getVertex(vertInds[1]),
triSource.getVertex(vertInds[2]) };
Gu::ConvexShape tri;
tri.coreType = Gu::ConvexCore::Type::ePOINTS;
tri.pose = PxTransform(PxIdentity);
Gu::ConvexCore::PointsCore& core = *reinterpret_cast<Gu::ConvexCore::PointsCore*>(tri.coreData);
core.points = verts;
core.numPoints = 3;
core.stride = sizeof(PxVec3);
core.S = PxVec3(1);
core.R = PxQuat(PxIdentity);
tri.margin = 0.0f;
PxVec3 normal, points[Gu::MAX_CONVEX_CONTACTS];
PxReal dists[Gu::MAX_CONVEX_CONTACTS];
if (PxU32 count = Gu::generateContacts(mConvex, tri, mContactDist, normal, points, dists))
{
const PxVec3 worldNormal = mTransform.rotate(normal);
for (PxU32 i = 0; i < count; ++i)
{
// VR: disabled for now - find out why it skips the tris it shouldn't
//PxVec3 pointB = points[i] - normal * (dists[i] * 0.5f);
//if (validateContact(normal, pointB, triIndex, triSource))
{
const PxVec3 worldPoint = mTransform.transform(points[i]);
mContact.addPoint(worldPoint, worldNormal, dists[i]);
}
}
}
}
return true;
}
};
const PxConvexCoreGeometry& shapeConvex = checkedCast<PxConvexCoreGeometry>(shape0);
const PxHeightFieldGeometry& shapeHeightfield = checkedCast<PxHeightFieldGeometry>(shape1);
const HeightFieldUtil hfUtil(shapeHeightfield);
const PxTransform transform0in1 = transform1.transformInv(transform0);
PxBounds3 bounds = Gu::computeBounds(shapeConvex, PxTransform(PxIdentity));
bounds.fattenFast(params.mContactDistance);
Gu::Contact contact;
Gu::ConvexShape convex; Gu::makeConvexShape(shapeConvex, transform0in1, convex);
Callback callback(convex, hfUtil, params.mContactDistance, transform1, contact);
hfUtil.overlapAABBTriangles0to1(transform0in1, bounds, callback);
for (PxU32 i = 0; i < contact.numPatches(); ++i)
for (PxU32 j = 0; j < contact.numPatchPoints(i); ++j)
contactBuffer.contact(contact.patchPoint(i, j).p, contact.patchNormal(i), contact.patchPoint(i, j).d);
return contactBuffer.count > 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,56 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geomutils/PxContactBuffer.h"
#include "GuContactMethodImpl.h"
using namespace physx;
bool Gu::contactCustomGeometryGeometry(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(renderOutput);
PX_UNUSED(cache);
const PxCustomGeometry& customGeom = checkedCast<PxCustomGeometry>(shape0);
const PxGeometry& otherGeom = shape1;
customGeom.callbacks->generateContacts(customGeom, otherGeom, transform0, transform1,
params.mContactDistance, params.mMeshContactMargin, params.mToleranceLength,
contactBuffer);
return true;
}
bool Gu::contactGeometryCustomGeometry(GU_CONTACT_METHOD_ARGS)
{
bool res = contactCustomGeometryGeometry(shape1, shape0, transform1, transform0, params, cache, contactBuffer, renderOutput);
for (PxU32 i = 0; i < contactBuffer.count; ++i)
contactBuffer.contacts[i].normal = -contactBuffer.contacts[i].normal;
return res;
}

View File

@@ -0,0 +1,285 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxArray.h"
#include "foundation/PxAssert.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxMath.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxTransform.h"
#include "foundation/PxVec3.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxMeshQuery.h"
#include "geometry/PxTriangleMesh.h"
#include "GuCollisionSDF.h"
#include "GuTriangleMesh.h"
#include "GuContactMeshMesh.h"
#include "GuContactMethodImpl.h"
#include "GuContactReduction.h"
#include "GuMidphaseInterface.h"
#include "GuTriangle.h"
#include "GuTriangleRefinement.h"
using namespace physx;
using namespace Gu;
using ContactReduction = SDFContactReduction<5, 10000, 32>;
const PxI32 maxRefinementLevel = 8;
struct TransformedTriangle
{
PxVec3 v0, v1, v2;
PxI16 refinementLevel;
PxU8 boundary; // information about boundaries; currently unused
};
PX_FORCE_INLINE bool needsRefinement(const PxReal triRefThreshold, const TransformedTriangle& tri)
{
return (
(tri.v0-tri.v1).magnitudeSquared() > triRefThreshold ||
(tri.v1-tri.v2).magnitudeSquared() > triRefThreshold ||
(tri.v2-tri.v0).magnitudeSquared() > triRefThreshold
) && tri.refinementLevel < maxRefinementLevel;
}
// Find contacts between an SDF and a triangle mesh and return the number of contacts generated
PxU32 sdfMeshCollision (
const PxTransform32& PX_RESTRICT tfSdf, const PxTriangleMeshGeometry& PX_RESTRICT sdfGeom,
const PxTransform32& PX_RESTRICT tfMesh, const PxTriangleMeshGeometry& PX_RESTRICT meshGeom,
ContactReduction& contactReducer, const PxReal totalContactDistance, bool flipContactNormals
)
{
float min_separation = PX_MAX_REAL;
const TriangleMesh& mesh = static_cast<const TriangleMesh&>(*meshGeom.triangleMesh);
const TriangleMesh& sdfMesh = static_cast<const TriangleMesh&>(*sdfGeom.triangleMesh);
const PxMeshScale& sdfScale = sdfGeom.scale, & meshScale = meshGeom.scale;
const CollisionSDF& PX_RESTRICT sdf(sdfMesh.mSdfData);
const PxTransform meshToSdf = tfSdf.transformInv(tfMesh);
const PxMat33 sdfScaleMat = sdfScale.toMat33();
PxBounds3 sdfBoundsAtWorldScale(sdfScaleMat.transform(sdf.mSdfBoxLower), sdfScaleMat.transform(sdf.mSdfBoxUpper));
sdfBoundsAtWorldScale.fattenSafe(totalContactDistance);
const PxTransform poseT(sdfBoundsAtWorldScale.getCenter());
const PxBoxGeometry boxGeom(sdfBoundsAtWorldScale.getExtents());
const PxReal sdfDiagSq = (sdf.mSdfBoxUpper - sdf.mSdfBoxLower).magnitudeSquared();
const PxReal div = 1.0f / 256.0f;
const PxReal triRefThreshold = sdfDiagSq * div;
const bool singleSdf = meshGeom.triangleMesh->getSDF() == NULL; // triangle subdivision if single SDF
const PxU32 MAX_INTERSECTIONS = 1024 * 32;
PxArray<PxU32> overlappingTriangles;
overlappingTriangles.resize(MAX_INTERSECTIONS); //TODO: Not ideal, dynamic allocation for every function call
//PxU32 overlappingTriangles[MAX_INTERSECTIONS]; //TODO: Is this too much memory to allocate on the stack?
bool overflow = false;
const PxU32 overlapCount = PxMeshQuery::findOverlapTriangleMesh(boxGeom, poseT, meshGeom, meshToSdf, overlappingTriangles.begin(), MAX_INTERSECTIONS, 0, overflow);
PX_ASSERT(!overflow);
// we use cullScale to account for SDF scaling whenever distances are
const PxReal cullScale = totalContactDistance / sdfScale.scale.minElement();
const PxVec3* PX_RESTRICT vertices = mesh.getVertices();
const void* PX_RESTRICT tris = mesh.getTriangles();
const bool has16BitIndices = mesh.getTriangleMeshFlags() & physx::PxTriangleMeshFlag::e16_BIT_INDICES;
const PxU32 nbTris = overlapCount; // mesh.getNbTriangles();
/* Transforms fused; unoptimized version:
v0 = shape2Vertex(
meshToSdf.transform(vertex2Shape(vertices[triIndices.mRef[0]], meshScale.scale, meshScale.rotation)),
sdfScale.scale, sdfScale.rotation); */
const PxMat33 sdfScaleIMat = sdfScale.getInverse().toMat33();
const PxMat33 fusedRotScale = sdfScaleIMat * PxMat33Padded(meshToSdf.q) * meshScale.toMat33();
const PxVec3 fusedTranslate = sdfScaleIMat * meshToSdf.p;
const PxMat33Padded tfSdfRotationMatrix(tfSdf.q);
const PxMat33 pointToWorldR = tfSdfRotationMatrix * sdfScale.toMat33();
const PxMat33 normalToWorld = tfSdfRotationMatrix * sdfScaleIMat;
const PxU32 COLLISION_BUF_SIZE = 512;
const PxU32 sudivBufSize = singleSdf ? maxRefinementLevel * 3 : 0; // Overhead for subdivision (pop one, push four)
PX_ASSERT(sudivBufSize < COLLISION_BUF_SIZE/4); // ensure reasonable buffer size
TransformedTriangle goodTriangles[COLLISION_BUF_SIZE];
PxU32 nbContacts = 0;
for (PxU32 i = 0, allTrisProcessed = 0; !allTrisProcessed;)
{
// try to find `COLLISION_BUF_SIZE` triangles that cannot be culled immediately
PxU32 nbGoodTris = 0;
// Every triangle that overlaps with the sdf's axis aligned bounding box is checked against the sdf to see if an intersection
// can be ruled out. If an intersection can be ruled out, the triangle is not further processed. Since SDF data is accessed,
// the check is more accurate (but still very fast) than a simple bounding box overlap test.
// Performance measurements confirm that this pre-pruning loop actually increases performance significantly on some scenes
for ( ; nbGoodTris < COLLISION_BUF_SIZE - sudivBufSize; ++i)
{
if (i == nbTris)
{
allTrisProcessed = true;
break;
}
const PxU32 triIdx = overlappingTriangles[i];
TransformedTriangle niceTri;
const Gu::IndexedTriangle32 triIndices = has16BitIndices ?
getTriangleVertexIndices<PxU16>(tris, triIdx) :
getTriangleVertexIndices<PxU32>(tris, triIdx);
niceTri.v0 = fusedTranslate + fusedRotScale * vertices[triIndices.mRef[0]];
niceTri.v1 = fusedTranslate + fusedRotScale * vertices[triIndices.mRef[1]];
niceTri.v2 = fusedTranslate + fusedRotScale * vertices[triIndices.mRef[2]];
if (singleSdf)
niceTri.refinementLevel = 0;
// - triangles that are not culled are added to goodTriangles
if (sdfTriangleSphericalCull(sdf, niceTri.v0, niceTri.v1, niceTri.v2, cullScale))
goodTriangles[nbGoodTris++] = niceTri;
}
// in promising triangles
// - triangles are popped from goodTriangles and a contact generated or,
// if subdivision is indicated, their children are pushed on top
for (PxU32 goodTriEnd = nbGoodTris; goodTriEnd > 0;)
{
const TransformedTriangle tri = goodTriangles[--goodTriEnd]; // pop
// decide on need for subdivision
if (singleSdf && needsRefinement(triRefThreshold, tri))
{
for (int childIdx = 0; childIdx < 4; ++childIdx)
{
TransformedTriangle child = tri;
Gu::getSubTriangle4(childIdx, child.v0, child.v1, child.v2);
++child.refinementLevel;
if (sdfTriangleSphericalCull(sdf, child.v0, child.v1, child.v2, cullScale))
goodTriangles[goodTriEnd++] = child;
}
continue;
}
// generate contacts
PxVec3 sdfPoint, contactDir;
PxReal separation = sdfTriangleCollision(sdf, tri.v0, tri.v1, tri.v2, sdfPoint, contactDir, cullScale);
min_separation = PxMin(min_separation, separation);
if (separation < cullScale)
{
const PxVec3 worldPoint = tfSdf.p + pointToWorldR * sdfPoint;
contactDir = normalToWorld * contactDir;
const PxReal magSq = contactDir.magnitudeSquared();
// TODO(CA): handle case where only one mesh has an SDF and update heuristic once this is done
if (magSq < 1e-6f) // ignore contacts with a bad/missing normal
continue;
const PxReal mag = PxRecipSqrt(magSq);
if (singleSdf && tri.refinementLevel)
{
const PxVec3 n = (tri.v1 - tri.v0).getNormalized().cross(tri.v2 - tri.v0).getNormalized();
const PxVec3 sdfBoxCenter = 0.5f * (sdf.mSdfBoxUpper + sdf.mSdfBoxLower);
const PxReal triangleNormalSign = -PxSign((sdfBoxCenter - tri.v0).dot(n));
contactDir = normalToWorld * triangleNormalSign * n;
contactDir.normalize();
contactDir /= mag ;
}
separation *= mag;
contactDir *= mag;
const TinyContact contact{flipContactNormals ? -contactDir : contactDir, separation, worldPoint};
contactReducer.addContact(contact);
++nbContacts;
}
}
}
return nbContacts;
};
bool Gu::contactMeshMesh(GU_CONTACT_METHOD_ARGS)
{
PX_UNUSED(renderOutput);
PX_UNUSED(cache);
// Get meshes
const PxTriangleMeshGeometry& geom0 = checkedCast<PxTriangleMeshGeometry>(shape0),
& geom1 = checkedCast<PxTriangleMeshGeometry>(shape1);
PX_ASSERT(geom0.triangleMesh != NULL && geom1.triangleMesh != NULL);
PxU32 nbContacts = 0;
const PxReal contactDistance = params.mContactDistance; // computed in `checkContactsMustBeGenerated`
const bool mesh0PreferProj = static_cast<const TriangleMesh&>(*geom0.triangleMesh).getPreferSDFProjection(),
mesh1PreferProj = static_cast<const TriangleMesh&>(*geom1.triangleMesh).getPreferSDFProjection();
const PxU32 n0 = geom0.triangleMesh->getNbVertices(), n1 = geom1.triangleMesh->getNbVertices();
// sdf0first: in first pass, treat mesh0 as an sdf and mesh1 as a mesh
const bool sdf0first = ((!mesh0PreferProj && mesh1PreferProj) || (mesh0PreferProj == mesh1PreferProj && n1 < n0));
ContactReduction contactReducer;
const bool geom0HasSdf = geom0.triangleMesh->getSDF() != NULL,
geom1HasSdf = geom1.triangleMesh->getSDF() != NULL;
PX_ASSERT(geom0HasSdf || geom1HasSdf); // require at least one SDF
if (!geom0HasSdf && !geom1HasSdf)
{
return false;
}
if (!(geom0HasSdf && geom1HasSdf))
{
if (geom0HasSdf)
nbContacts += sdfMeshCollision(transform0, geom0, transform1, geom1, contactReducer, contactDistance, true);
else
nbContacts += sdfMeshCollision(transform1, geom1, transform0, geom0, contactReducer, contactDistance, false);
}
else if (sdf0first)
{
nbContacts += sdfMeshCollision(transform0, geom0, transform1, geom1, contactReducer, contactDistance, true);
nbContacts += sdfMeshCollision(transform1, geom1, transform0, geom0, contactReducer, contactDistance, false);
}
else
{
nbContacts += sdfMeshCollision(transform1, geom1, transform0, geom0, contactReducer, contactDistance, false);
nbContacts += sdfMeshCollision(transform0, geom0, transform1, geom1, contactReducer, contactDistance, true);
}
contactReducer.flushToContactBuffer(contactBuffer);
return nbContacts != 0;
}

View File

@@ -0,0 +1,196 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MESH_MESH_COLLISION_H
#define GU_MESH_MESH_COLLISION_H
#include "foundation/PxAssert.h"
#include "foundation/PxMath.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxVec3.h"
#include "GuCollisionSDF.h"
#include "GuDistancePointTriangle.h"
#include "GuTriangle.h"
namespace physx
{
namespace Gu
{
template <typename T>
int argmin(const T& v0, const T& v1)
{
if (v0 < v1)
return 0;
return 1;
}
template <typename T>
int argmin(const T& v0, const T& v1, const T& v2, const T& v3)
{
const int ma = argmin(v0, v1), mb = argmin(v2, v3);
if (!argmin((!ma ? v0 : v1),(!mb ? v2: v3)))
return ma;
return mb+2;
}
// Based on one SDF evaluation at the centroid, can the circumsphere of the triangle
// `v0`, `v1`, `v2` get closer to the surface given by `sdf` than `cutoffDistance`?
static PX_INLINE bool sdfTriangleSphericalCull(
const CollisionSDF& PX_RESTRICT sdf,
const PxVec3& PX_RESTRICT v0, const PxVec3& PX_RESTRICT v1, const PxVec3& PX_RESTRICT v2,
PxReal cutoffDistance)
{
const PxReal third = 1.0f / 3.0f;
const PxVec3 centroid = (v0 + v1 + v2) * third;
const PxReal sphereRadiusSq = PxMax(
(v0 - centroid).magnitudeSquared(),
PxMax((v1 - centroid).magnitudeSquared(), (v2 - centroid).magnitudeSquared()));
const PxVec3 boxPos = sdf.clampToBox(centroid);
const PxReal centroidToBoxSq = (centroid - boxPos).magnitudeSquared();
// TODO(CA): consider minimum of SDF on box boundary, making this check tighter
if (PxSqrt(centroidToBoxSq) > PxSqrt(sphereRadiusSq) + cutoffDistance)
return false; //Early out without touching SDF data
const PxReal centroidSdf = sdf.dist(centroid);
return centroidSdf < PxSqrt(sphereRadiusSq) + cutoffDistance;
}
// Find maximum separation of an sdf and a triangle and find the contact point and normal separation is below `cutoffDistance`
// Return the separation, or`PX\_MAX\_F32` if it exceeds `cutoffDistance`
template <PxU32 TMaxLineSearchIters = 0, PxU32 TMaxPGDIterations = 32, bool TFastGrad = true>
PX_INLINE PxReal sdfTriangleCollision(
const CollisionSDF& PX_RESTRICT sdf,
const PxVec3& PX_RESTRICT v0, const PxVec3& PX_RESTRICT v1, const PxVec3& PX_RESTRICT v2,
PxVec3& point, PxVec3& dir, PxReal cutoffDistance)
{
const PxReal third = 1.0f / 3.0f;
const PxVec3 centroid = (v0 + v1 + v2) * third;
// barycentric coordinates, corresponding to v0, v1, v2
PxVec3 c(0.f);
// choose starting iterate
const int start = argmin(sdf.dist(v0), sdf.dist(v1), sdf.dist(v2), sdf.dist(centroid));
switch (start)
{
case 0:
c.x = 1.f; break;
case 1:
c.y = 1.f; break;
case 2:
c.z = 1.f; break;
case 3:
c = PxVec3(third); break;
default:
PX_ASSERT(false);
}
PxReal stepSize = 0.25;
// we could also compute the gradient's lipschitz constant when baking!
PxVec3 dfdp; // gradient w.r.t. p
const PxReal toleranceSq = 1e-10f;
for (PxU32 i = 0; i < TMaxPGDIterations; ++i)
{
const PxVec3 p = c.x * v0 + c.y * v1 + c.z * v2;
PxReal dist_old = 0;
if (TFastGrad)
dist_old = sdf.dist(p, &dfdp);
else
dfdp = sdf.grad(p);
const PxReal dfdpMagSq = dfdp.magnitudeSquared();
if (dfdpMagSq == 0.0f)
{
// TODO(CA): consider expanding this into a stopping criterion
// At a critical point. Take a small step away into an arbitrary direction
dfdp = PxVec3(0.5718465865353257f, 0.7055450997557186f, 0.41856611625714474f);
}
else
dfdp *= PxRecipSqrt(dfdpMagSq);
// Simple projected gradient descent
const PxVec3 dfdc = PxVec3(dfdp.dot(v0-p), dfdp.dot(v1-p), dfdp.dot(v2-p));
const PxVec3 c_old = c;
if (TMaxLineSearchIters) //Line Search is quite expensive since it increases the number of expensive calls to sdf.dist by approximately a factor of MAX_LINE_SEARCH_ITERATIONS
{
PxReal s = 1;
if (!TFastGrad)
dist_old = sdf.dist(p);
c = closestPtPointBaryTriangle(c_old - s * dfdc);
for (PxU32 ls_it = 0; ls_it < TMaxLineSearchIters; ++ls_it)
{
// restore barycentric coordinates
const PxVec3 p_new = c.x * v0 + c.y * v1 + c.z * v2;
if (sdf.dist(p_new) <= dist_old)
{
#if 0
if (ls_it > 0)
printf("%d: %d ls iterations\n", i, ls_it+1);
#endif
break;
}
s *= 0.5f;
c = closestPtPointBaryTriangle(c_old - s * dfdc);
}
}
else
{
// take step and restore barycentric coordinates
c = closestPtPointBaryTriangle(c - stepSize * dfdc);
}
// this detects a minimum found on the boundary
if ((c - c_old).magnitudeSquared() < toleranceSq)
break;
stepSize *= 0.8f; // line search will get rid of this
}
const PxVec3 p = c.x * v0 + c.y * v1 + c.z * v2;
point = p;
return sdf.distUsingGradient(p, dir, cutoffDistance);
}
// Get the indices of the triangle at position `triIdx`.
// `T` should be `PxU16` if the mesh has 16 bit indices, and `PxU32` otherwise
template <typename T>
PX_INLINE Gu::IndexedTriangle32 getTriangleVertexIndices(const void* triangles, PxU32 triIdx)
{
const T* trisCast = reinterpret_cast<const T*>(triangles);
return {trisCast[triIdx*3], trisCast[triIdx*3+1], trisCast[triIdx*3+2]};
}
} // namespace Gu
} // namespace physx
#endif

View File

@@ -0,0 +1,208 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2025 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_CONTACTMETHODIMPL_H
#define GU_CONTACTMETHODIMPL_H
#include "foundation/PxAssert.h"
#include "common/PxPhysXCommonConfig.h"
#include "collision/PxCollisionDefs.h"
#include "GuGeometryChecks.h"
namespace physx
{
class PxGeometry;
class PxRenderOutput;
class PxContactBuffer;
namespace Gu
{
class PersistentContactManifold;
class MultiplePersistentContactManifold;
struct NarrowPhaseParams
{
PX_FORCE_INLINE NarrowPhaseParams(PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength) :
mContactDistance(contactDistance),
mMeshContactMargin(meshContactMargin),
mToleranceLength(toleranceLength) {}
PxReal mContactDistance;
const PxReal mMeshContactMargin; // PT: Margin used to generate mesh contacts. Temp & unclear, should be removed once GJK is default path.
const PxReal mToleranceLength; // PT: copy of PxTolerancesScale::length
};
enum ManifoldFlags
{
IS_MANIFOLD = (1<<0),
IS_MULTI_MANIFOLD = (1<<1)
};
struct Cache : public PxCache
{
Cache()
{
}
PX_FORCE_INLINE void setManifold(void* manifold)
{
PX_ASSERT((size_t(manifold) & 0xF) == 0);
mCachedData = reinterpret_cast<PxU8*>(manifold);
mManifoldFlags |= IS_MANIFOLD;
}
PX_FORCE_INLINE void setMultiManifold(void* manifold)
{
PX_ASSERT((size_t(manifold) & 0xF) == 0);
mCachedData = reinterpret_cast<PxU8*>(manifold);
mManifoldFlags |= IS_MANIFOLD|IS_MULTI_MANIFOLD;
}
PX_FORCE_INLINE PxU8 isManifold() const
{
return PxU8(mManifoldFlags & IS_MANIFOLD);
}
PX_FORCE_INLINE PxU8 isMultiManifold() const
{
return PxU8(mManifoldFlags & IS_MULTI_MANIFOLD);
}
PX_FORCE_INLINE PersistentContactManifold& getManifold()
{
PX_ASSERT(isManifold());
PX_ASSERT(!isMultiManifold());
PX_ASSERT((uintptr_t(mCachedData) & 0xf) == 0);
return *reinterpret_cast<PersistentContactManifold*>(mCachedData);
}
PX_FORCE_INLINE MultiplePersistentContactManifold& getMultipleManifold()
{
PX_ASSERT(isManifold());
PX_ASSERT(isMultiManifold());
PX_ASSERT((uintptr_t(mCachedData) & 0xf) == 0);
return *reinterpret_cast<MultiplePersistentContactManifold*>(mCachedData);
}
};
}
template<class Geom> PX_CUDA_CALLABLE PX_FORCE_INLINE const Geom& checkedCast(const PxGeometry& geom)
{
checkType<Geom>(geom);
return static_cast<const Geom&>(geom);
}
#define GU_CONTACT_METHOD_ARGS \
const PxGeometry& shape0, \
const PxGeometry& shape1, \
const PxTransform32& transform0, \
const PxTransform32& transform1, \
const Gu::NarrowPhaseParams& params, \
Gu::Cache& cache, \
PxContactBuffer& contactBuffer, \
PxRenderOutput* renderOutput
#define GU_CONTACT_METHOD_ARGS_UNUSED \
const PxGeometry&, \
const PxGeometry&, \
const PxTransform32&, \
const PxTransform32&, \
const Gu::NarrowPhaseParams&, \
Gu::Cache&, \
PxContactBuffer&, \
PxRenderOutput*
namespace Gu
{
PX_PHYSX_COMMON_API bool contactSphereSphere(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactSphereCapsule(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactSphereBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactCapsuleCapsule(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactCapsuleBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactCapsuleConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactBoxBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactBoxConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactConvexConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactSphereMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactCapsuleMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactBoxMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactConvexMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactSphereHeightfield(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactCapsuleHeightfield(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactBoxHeightfield(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactConvexHeightfield(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactSpherePlane(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactPlaneBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactPlaneCapsule(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactPlaneConvexCore(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactPlaneConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactPlaneMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactMeshMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactConvexCoreConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactConvexCoreTrimesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactConvexCoreHeightfield(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactCustomGeometryGeometry(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool contactGeometryCustomGeometry(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSphereMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactCapsuleMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactBoxMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactConvexMesh(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSphereHeightField(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactCapsuleHeightField(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactBoxHeightField(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactConvexHeightField(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactPlaneCapsule(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactPlaneBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactPlaneConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSphereSphere(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSpherePlane(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSphereCapsule(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSphereBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactSphereConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactCapsuleCapsule(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactCapsuleBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactCapsuleConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactBoxBox(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactBoxConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactConvexConvex(GU_CONTACT_METHOD_ARGS);
PX_PHYSX_COMMON_API bool pcmContactGeometryCustomGeometry(GU_CONTACT_METHOD_ARGS);
}
}
#endif

Some files were not shown because too many files have changed in this diff Show More