[compiler-rt] scudo: refactor scudo::Allocator::deallocate (PR #147735)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 9 07:09:23 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-compiler-rt-sanitizer
Author: Justin King (jcking)
<details>
<summary>Changes</summary>
Split off from #<!-- -->146556 as requested by reviewer.
---
Full diff: https://github.com/llvm/llvm-project/pull/147735.diff
3 Files Affected:
- (modified) compiler-rt/lib/scudo/standalone/combined.h (+78-51)
- (modified) compiler-rt/lib/scudo/standalone/tests/combined_test.cpp (+13-13)
- (modified) compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp (+14-14)
``````````diff
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 87acdec2a3bac..ec56307b66154 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -432,60 +432,30 @@ class Allocator {
SizeOrUnusedBytes, FillContents);
}
- NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
- UNUSED uptr Alignment = MinAlignment) {
- if (UNLIKELY(!Ptr))
- return;
-
- // For a deallocation, we only ensure minimal initialization, meaning thread
- // local data will be left uninitialized for now (when using ELF TLS). The
- // fallback cache will be used instead. This is a workaround for a situation
- // where the only heap operation performed in a thread would be a free past
- // the TLS destructors, ending up in initialized thread specific data never
- // being destroyed properly. Any other heap operation will do a full init.
- initThreadMaybe(/*MinimalInit=*/true);
-
-#ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
- GuardedAlloc.deallocate(Ptr);
- Stats.lock();
- Stats.add(StatFree, GuardedAllocSlotSize);
- Stats.sub(StatAllocated, GuardedAllocSlotSize);
- Stats.unlock();
- return;
- }
-#endif // GWP_ASAN_HOOKS
-
- if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
- reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
-
- void *TaggedPtr = Ptr;
- Ptr = getHeaderTaggedPointer(Ptr);
-
- Chunk::UnpackedHeader Header;
- Chunk::loadHeader(Cookie, Ptr, &Header);
-
- if (UNLIKELY(Header.State != Chunk::State::Allocated))
- reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ ALWAYS_INLINE void deallocate(void *Ptr, Chunk::Origin Origin) {
+ deallocate(Ptr, Origin, /*DeleteSize=*/0, /*HasDeleteSize=*/false,
+ /*DeleteAlignment=*/0, /*HasDeleteAlignment=*/false);
+ }
- const Options Options = Primary.Options.load();
- if (Options.get(OptionBit::DeallocTypeMismatch)) {
- if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
- // With the exception of memalign'd chunks, that can be still be free'd.
- if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
- Origin != Chunk::Origin::Malloc)
- reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
- Header.OriginOrWasZeroed, Origin);
- }
- }
+ ALWAYS_INLINE void deallocateSized(void *Ptr, Chunk::Origin Origin,
+ uptr DeleteSize) {
+ deallocate(Ptr, Origin, /*DeleteSize=*/DeleteSize, /*HasDeleteSize=*/true,
+ /*DeleteAlignment=*/0, /*HasDeleteAlignment=*/false);
+ }
- const uptr Size = getSize(Ptr, &Header);
- if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
- if (UNLIKELY(DeleteSize != Size))
- reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
- }
+ ALWAYS_INLINE void deallocateSizedAligned(void *Ptr, Chunk::Origin Origin,
+ uptr DeleteSize,
+ uptr DeleteAlignment) {
+ deallocate(Ptr, Origin, /*DeleteSize=*/DeleteSize, /*HasDeleteSize=*/true,
+ /*DeleteAlignment=*/DeleteAlignment,
+ /*HasDeleteAlignment=*/true);
+ }
- quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
+ ALWAYS_INLINE void deallocateAligned(void *Ptr, Chunk::Origin Origin,
+ uptr DeleteAlignment) {
+ deallocate(Ptr, Origin, /*DeleteSize=*/0, /*HasDeleteSize=*/false,
+ /*DeleteAlignment=*/DeleteAlignment,
+ /*HasDeleteAlignment=*/true);
}
void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
@@ -1245,6 +1215,63 @@ class Allocator {
return TaggedPtr;
}
+ NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize,
+ bool HasDeleteSize, uptr DeleteAlignment,
+ bool HasDeleteAlignment) {
+ if (UNLIKELY(!Ptr))
+ return;
+
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
+ GuardedAlloc.deallocate(Ptr);
+ Stats.lock();
+ Stats.add(StatFree, GuardedAllocSlotSize);
+ Stats.sub(StatAllocated, GuardedAllocSlotSize);
+ Stats.unlock();
+ return;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
+
+ void *TaggedPtr = Ptr;
+ Ptr = getHeaderTaggedPointer(Ptr);
+
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+
+ const Options Options = Primary.Options.load();
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
+ if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
+ // With the exception of memalign'd chunks, that can be still be free'd.
+ if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
+ Origin != Chunk::Origin::Malloc)
+ reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
+ Header.OriginOrWasZeroed, Origin);
+ }
+ }
+
+ const uptr Size = getSize(Ptr, &Header);
+ if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
+ if (UNLIKELY(DeleteSize != Size))
+ reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
+ }
+
+ quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
+ }
+
void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
Chunk::UnpackedHeader *Header,
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 7e8d5b4396d2e..2ee33b14541ff 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -327,7 +327,7 @@ void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
EXPECT_LE(Size, Allocator->getUsableSize(P));
memset(P, 0xaa, Size);
checkMemoryTaggingMaybe(Allocator, P, Size, Align);
- Allocator->deallocate(P, Origin, Size);
+ Allocator->deallocateSized(P, Origin, Size);
}
}
@@ -374,7 +374,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
for (scudo::uptr I = 0; I < Size; I++)
ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
memset(P, 0xaa, Size);
- Allocator->deallocate(P, Origin, Size);
+ Allocator->deallocateSized(P, Origin, Size);
}
}
}
@@ -392,7 +392,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
for (scudo::uptr I = 0; I < Size; I++)
ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
memset(P, 0xaa, Size);
- Allocator->deallocate(P, Origin, Size);
+ Allocator->deallocateSized(P, Origin, Size);
}
}
}
@@ -419,7 +419,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
}
memset(P, 0xaa, Size);
- Allocator->deallocate(P, Origin, Size);
+ Allocator->deallocateSized(P, Origin, Size);
}
}
}
@@ -709,7 +709,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
while (!V.empty()) {
auto Pair = V.back();
- Allocator->deallocate(Pair.first, Origin, Pair.second);
+ Allocator->deallocateSized(Pair.first, Origin, Pair.second);
V.pop_back();
}
});
@@ -782,26 +782,26 @@ TEST(ScudoCombinedDeathTest, DeathCombined) {
EXPECT_NE(P, nullptr);
// Invalid sized deallocation.
- EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
+ EXPECT_DEATH(Allocator->deallocateSized(P, Origin, Size + 8U), "");
// Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
UNUSED void *MisalignedP =
reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
- EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
+ EXPECT_DEATH(Allocator->deallocateSized(MisalignedP, Origin, Size), "");
EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
// Header corruption.
scudo::u64 *H =
reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
*H ^= 0x42U;
- EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
+ EXPECT_DEATH(Allocator->deallocateSized(P, Origin, Size), "");
*H ^= 0x420042U;
- EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
+ EXPECT_DEATH(Allocator->deallocateSized(P, Origin, Size), "");
*H ^= 0x420000U;
// Invalid chunk state.
- Allocator->deallocate(P, Origin, Size);
- EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
+ Allocator->deallocateSized(P, Origin, Size);
+ EXPECT_DEATH(Allocator->deallocateSized(P, Origin, Size), "");
EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
EXPECT_DEATH(Allocator->getUsableSize(P), "");
}
@@ -908,13 +908,13 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
memset(Ptrs[I], 0xaa, Size);
}
for (unsigned I = 0; I != Ptrs.size(); ++I)
- Allocator->deallocate(Ptrs[I], Origin, Size);
+ Allocator->deallocateSized(Ptrs[I], Origin, Size);
for (unsigned I = 0; I != Ptrs.size(); ++I) {
Ptrs[I] = Allocator->allocate(Size - 8, Origin);
memset(Ptrs[I], 0xbb, Size - 8);
}
for (unsigned I = 0; I != Ptrs.size(); ++I)
- Allocator->deallocate(Ptrs[I], Origin, Size - 8);
+ Allocator->deallocateSized(Ptrs[I], Origin, Size - 8);
for (unsigned I = 0; I != Ptrs.size(); ++I) {
Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
for (scudo::uptr J = 0; J < Size; ++J)
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
index 098d4f71acc4a..f1942acd02331 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
@@ -104,47 +104,47 @@ INTERFACE WEAK void operator delete[](void *ptr,
}
INTERFACE WEAK void operator delete(void *ptr, size_t size) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size);
+ Allocator.deallocateSized(ptr, scudo::Chunk::Origin::New, size);
}
INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
+ Allocator.deallocateSized(ptr, scudo::Chunk::Origin::NewArray, size);
}
INTERFACE WEAK void operator delete(void *ptr,
std::align_val_t align) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
- static_cast<scudo::uptr>(align));
+ Allocator.deallocateAligned(ptr, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr,
std::align_val_t align) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
- static_cast<scudo::uptr>(align));
+ Allocator.deallocateAligned(ptr, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
- static_cast<scudo::uptr>(align));
+ Allocator.deallocateAligned(ptr, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
- static_cast<scudo::uptr>(align));
+ Allocator.deallocateAligned(ptr, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete(void *ptr, size_t size,
std::align_val_t align) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size,
- static_cast<scudo::uptr>(align));
+ Allocator.deallocateSizedAligned(ptr, scudo::Chunk::Origin::New, size,
+ static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr, size_t size,
std::align_val_t align) NOEXCEPT {
reportDeallocation(ptr);
- Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
- static_cast<scudo::uptr>(align));
+ Allocator.deallocateSizedAligned(ptr, scudo::Chunk::Origin::NewArray, size,
+ static_cast<scudo::uptr>(align));
}
#endif // !SCUDO_ANDROID || !_BIONIC
``````````
</details>
https://github.com/llvm/llvm-project/pull/147735
More information about the llvm-commits
mailing list