Skip to content

Commit 296de7d

Browse files
committed
Merge pull request #102614 from jrouwe/102544
[Jolt Physics] Fix ghost collision issue on dense triangle meshes
2 parents 4bac259 + 3e608b8 commit 296de7d

File tree

2 files changed

+188
-19
lines changed

2 files changed

+188
-19
lines changed
Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2+
// SPDX-FileCopyrightText: 2025 Jorrit Rouwe
3+
// SPDX-License-Identifier: MIT
4+
5+
#pragma once
6+
7+
#include <Jolt/Core/STLAllocator.h>
8+
9+
JPH_NAMESPACE_BEGIN
10+
11+
#ifndef JPH_DISABLE_CUSTOM_ALLOCATOR
12+
13+
/// STL allocator that keeps N elements in a local buffer before falling back to regular allocations
14+
template <typename T, size_t N>
15+
class STLLocalAllocator : private STLAllocator<T>
16+
{
17+
using Base = STLAllocator<T>;
18+
19+
public:
20+
/// General properties
21+
using value_type = T;
22+
using pointer = T *;
23+
using const_pointer = const T *;
24+
using reference = T &;
25+
using const_reference = const T &;
26+
using size_type = size_t;
27+
using difference_type = ptrdiff_t;
28+
29+
/// The allocator is not stateless (has local buffer)
30+
using is_always_equal = std::false_type;
31+
32+
/// We cannot copy, move or swap allocators
33+
using propagate_on_container_copy_assignment = std::false_type;
34+
using propagate_on_container_move_assignment = std::false_type;
35+
using propagate_on_container_swap = std::false_type;
36+
37+
/// Constructor
38+
STLLocalAllocator() = default;
39+
STLLocalAllocator(const STLLocalAllocator &) = delete; // Can't copy an allocator as the buffer is local to the original
40+
STLLocalAllocator(STLLocalAllocator &&) = delete; // Can't move an allocator as the buffer is local to the original
41+
STLLocalAllocator & operator = (const STLLocalAllocator &) = delete; // Can't copy an allocator as the buffer is local to the original
42+
43+
/// Constructor used when rebinding to another type. This expects the allocator to use the original memory pool from the first allocator,
44+
/// but in our case we cannot use the local buffer of the original allocator as it has different size and alignment rules.
45+
/// To solve this we make this allocator fall back to the heap immediately.
46+
template <class T2> STLLocalAllocator(const STLLocalAllocator<T2, N> &) : mNumElementsUsed(N) { }
47+
48+
/// Check if inPointer is in the local buffer
49+
inline bool is_local(const_pointer inPointer) const
50+
{
51+
ptrdiff_t diff = inPointer - reinterpret_cast<const_pointer>(mElements);
52+
return diff >= 0 && diff < ptrdiff_t(N);
53+
}
54+
55+
/// Allocate memory
56+
inline pointer allocate(size_type inN)
57+
{
58+
// If we allocate more than we have, fall back to the heap
59+
if (mNumElementsUsed + inN > N)
60+
return Base::allocate(inN);
61+
62+
// Allocate from our local buffer
63+
pointer result = reinterpret_cast<pointer>(mElements) + mNumElementsUsed;
64+
mNumElementsUsed += inN;
65+
return result;
66+
}
67+
68+
/// Always implements a reallocate function as we can often reallocate in place
69+
static constexpr bool has_reallocate = true;
70+
71+
/// Reallocate memory
72+
inline pointer reallocate(pointer inOldPointer, size_type inOldSize, size_type inNewSize)
73+
{
74+
JPH_ASSERT(inNewSize > 0); // Reallocating to zero size is implementation dependent, so we don't allow it
75+
76+
// If there was no previous allocation, we can go through the regular allocate function
77+
if (inOldPointer == nullptr)
78+
return allocate(inNewSize);
79+
80+
// If the pointer is outside our local buffer, fall back to the heap
81+
if (!is_local(inOldPointer))
82+
{
83+
if constexpr (AllocatorHasReallocate<Base>::sValue)
84+
return Base::reallocate(inOldPointer, inOldSize, inNewSize);
85+
else
86+
return ReallocateImpl(inOldPointer, inOldSize, inNewSize);
87+
}
88+
89+
// If we happen to have space left, we only need to update our bookkeeping
90+
pointer base_ptr = reinterpret_cast<pointer>(mElements) + mNumElementsUsed - inOldSize;
91+
if (inOldPointer == base_ptr
92+
&& mNumElementsUsed - inOldSize + inNewSize <= N)
93+
{
94+
mNumElementsUsed += inNewSize - inOldSize;
95+
return base_ptr;
96+
}
97+
98+
// We can't reallocate in place, fall back to the heap
99+
return ReallocateImpl(inOldPointer, inOldSize, inNewSize);
100+
}
101+
102+
/// Free memory
103+
inline void deallocate(pointer inPointer, size_type inN)
104+
{
105+
// If the pointer is not in our local buffer, fall back to the heap
106+
if (!is_local(inPointer))
107+
return Base::deallocate(inPointer, inN);
108+
109+
// Else we can only reclaim memory if it was the last allocation
110+
if (inPointer == reinterpret_cast<pointer>(mElements) + mNumElementsUsed - inN)
111+
mNumElementsUsed -= inN;
112+
}
113+
114+
/// Allocators are not-stateless, assume if allocator address matches that the allocators are the same
115+
inline bool operator == (const STLLocalAllocator<T, N> &inRHS) const
116+
{
117+
return this == &inRHS;
118+
}
119+
120+
inline bool operator != (const STLLocalAllocator<T, N> &inRHS) const
121+
{
122+
return this != &inRHS;
123+
}
124+
125+
/// Converting to allocator for other type
126+
template <typename T2>
127+
struct rebind
128+
{
129+
using other = STLLocalAllocator<T2, N>;
130+
};
131+
132+
private:
133+
/// Implements reallocate when the base class doesn't or when we go from local buffer to heap
134+
inline pointer ReallocateImpl(pointer inOldPointer, size_type inOldSize, size_type inNewSize)
135+
{
136+
pointer new_pointer = Base::allocate(inNewSize);
137+
size_type n = min(inOldSize, inNewSize);
138+
if constexpr (std::is_trivially_copyable<T>())
139+
{
140+
// Can use mem copy
141+
memcpy(new_pointer, inOldPointer, n * sizeof(T));
142+
}
143+
else
144+
{
145+
// Need to actually move the elements
146+
for (size_t i = 0; i < n; ++i)
147+
{
148+
new (new_pointer + i) T(std::move(inOldPointer[i]));
149+
inOldPointer[i].~T();
150+
}
151+
}
152+
deallocate(inOldPointer, inOldSize);
153+
return new_pointer;
154+
}
155+
156+
alignas(T) uint8 mElements[N * sizeof(T)];
157+
size_type mNumElementsUsed = 0;
158+
};
159+
160+
/// The STLLocalAllocator always implements a reallocate function as it can often reallocate in place
161+
template <class T, size_t N> struct AllocatorHasReallocate<STLLocalAllocator<T, N>> { static constexpr bool sValue = STLLocalAllocator<T, N>::has_reallocate; };
162+
163+
#else
164+
165+
template <typename T, size_t N> using STLLocalAllocator = std::allocator<T>;
166+
167+
#endif // !JPH_DISABLE_CUSTOM_ALLOCATOR
168+
169+
JPH_NAMESPACE_END

thirdparty/jolt_physics/Jolt/Physics/Collision/InternalEdgeRemovingCollector.h

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#pragma once
66

77
#include <Jolt/Core/QuickSort.h>
8+
#include <Jolt/Core/STLLocalAllocator.h>
89
#include <Jolt/Physics/Collision/CollisionDispatch.h>
910

1011
//#define JPH_INTERNAL_EDGE_REMOVING_COLLECTOR_DEBUG
@@ -17,10 +18,13 @@ JPH_NAMESPACE_BEGIN
1718

1819
/// Removes internal edges from collision results. Can be used to filter out 'ghost collisions'.
1920
/// Based on: Contact generation for meshes - Pierre Terdiman (https://www.codercorner.com/MeshContacts.pdf)
21+
///
22+
/// Note that this class requires that CollideSettingsBase::mActiveEdgeMode == EActiveEdgeMode::CollideWithAll
23+
/// and CollideSettingsBase::mCollectFacesMode == ECollectFacesMode::CollectFaces.
2024
class InternalEdgeRemovingCollector : public CollideShapeCollector
2125
{
22-
static constexpr uint cMaxDelayedResults = 16;
23-
static constexpr uint cMaxVoidedFeatures = 128;
26+
static constexpr uint cMaxLocalDelayedResults = 32;
27+
static constexpr uint cMaxLocalVoidedFeatures = 128;
2428

2529
/// Check if a vertex is voided
2630
inline bool IsVoided(const SubShapeID &inSubShapeID, Vec3 inV) const
@@ -35,17 +39,14 @@ class InternalEdgeRemovingCollector : public CollideShapeCollector
3539
/// Add all vertices of a face to the voided features
3640
inline void VoidFeatures(const CollideShapeResult &inResult)
3741
{
38-
if (mVoidedFeatures.size() < cMaxVoidedFeatures)
39-
for (const Vec3 &v : inResult.mShape2Face)
40-
if (!IsVoided(inResult.mSubShapeID1, v))
41-
{
42-
Voided vf;
43-
v.StoreFloat3(&vf.mFeature);
44-
vf.mSubShapeID = inResult.mSubShapeID1;
45-
mVoidedFeatures.push_back(vf);
46-
if (mVoidedFeatures.size() == cMaxVoidedFeatures)
47-
break;
48-
}
42+
for (const Vec3 &v : inResult.mShape2Face)
43+
if (!IsVoided(inResult.mSubShapeID1, v))
44+
{
45+
Voided vf;
46+
v.StoreFloat3(&vf.mFeature);
47+
vf.mSubShapeID = inResult.mSubShapeID1;
48+
mVoidedFeatures.push_back(vf);
49+
}
4950
}
5051

5152
/// Call the chained collector
@@ -119,19 +120,18 @@ class InternalEdgeRemovingCollector : public CollideShapeCollector
119120
return ChainAndVoid(inResult);
120121

121122
// Delayed processing
122-
if (mDelayedResults.size() == cMaxDelayedResults)
123-
return ChainAndVoid(inResult);
124123
mDelayedResults.push_back(inResult);
125124
}
126125

127126
/// After all hits have been added, call this function to process the delayed results
128127
void Flush()
129128
{
130129
// Sort on biggest penetration depth first
131-
uint sorted_indices[cMaxDelayedResults];
130+
Array<uint, STLLocalAllocator<uint, cMaxLocalDelayedResults>> sorted_indices;
131+
sorted_indices.resize(mDelayedResults.size());
132132
for (uint i = 0; i < uint(mDelayedResults.size()); ++i)
133133
sorted_indices[i] = i;
134-
QuickSort(sorted_indices, sorted_indices + mDelayedResults.size(), [this](uint inLHS, uint inRHS) { return mDelayedResults[inLHS].mPenetrationDepth > mDelayedResults[inRHS].mPenetrationDepth; });
134+
QuickSort(sorted_indices.begin(), sorted_indices.end(), [this](uint inLHS, uint inRHS) { return mDelayedResults[inLHS].mPenetrationDepth > mDelayedResults[inRHS].mPenetrationDepth; });
135135

136136
// Loop over all results
137137
for (uint i = 0; i < uint(mDelayedResults.size()); ++i)
@@ -243,8 +243,8 @@ class InternalEdgeRemovingCollector : public CollideShapeCollector
243243
};
244244

245245
CollideShapeCollector & mChainedCollector;
246-
StaticArray<Voided, cMaxVoidedFeatures> mVoidedFeatures;
247-
StaticArray<CollideShapeResult, cMaxDelayedResults> mDelayedResults;
246+
Array<Voided, STLLocalAllocator<Voided, cMaxLocalVoidedFeatures>> mVoidedFeatures;
247+
Array<CollideShapeResult, STLLocalAllocator<CollideShapeResult, cMaxLocalDelayedResults>> mDelayedResults;
248248
};
249249

250250
JPH_NAMESPACE_END

0 commit comments

Comments
 (0)