[compiler-rt] [scudo] Add config option to modify get usable size behavior (PR #158710)
Christopher Ferris via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 31 12:13:42 PDT 2025
https://github.com/cferris1000 updated https://github.com/llvm/llvm-project/pull/158710
>From 680fdb3cb276e860f71bd6f2500194a2bf5f722f Mon Sep 17 00:00:00 2001
From: Christopher Ferris <cferris at google.com>
Date: Mon, 15 Sep 2025 18:34:46 +0000
Subject: [PATCH 1/4] [scudo] Add config option to modify get usable size
behavior
Currently, Scudo always returns the exact size allocated when calling
getUsableSize. This can be a performance issue where some programs
will get the usable size and do unnecessary calls to realloc since
they think there isn't enough space in the allocation. By default,
usable size will still return the exact size of the allocation.
Note that if the exact behavior is disabled and MTE is on, then the code
will still give an exact usable size.
---
.../lib/scudo/standalone/allocator_config.def | 4 +
compiler-rt/lib/scudo/standalone/combined.h | 68 +++--
.../scudo/standalone/tests/combined_test.cpp | 236 ++++++++++++++++++
.../standalone/tests/wrappers_c_test.cpp | 9 +-
4 files changed, 301 insertions(+), 16 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 748530820cd64..0aea7b8f2fb9a 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -57,6 +57,10 @@ BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
// Disable the quarantine code.
BASE_OPTIONAL(const bool, QuarantineDisabled, false)
+// If set to true, malloc_usable_size returns the exact size of the allocation.
+// If set to false, return the total available size in the allocation.
+BASE_OPTIONAL(const bool, ExactUsableSize, true)
+
// PRIMARY_REQUIRED_TYPE(NAME)
//
// SizeClassMap to use with the Primary.
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index c9ba28a52f780..3b641eb10d6a0 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -706,19 +706,24 @@ class Allocator {
if (!getChunkFromBlock(Block, &Chunk, &Header) &&
!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
return;
- } else {
- if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
- return;
- }
- if (Header.State == Chunk::State::Allocated) {
- uptr TaggedChunk = Chunk;
- if (allocatorSupportsMemoryTagging<AllocatorConfig>())
- TaggedChunk = untagPointer(TaggedChunk);
- if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
- TaggedChunk = loadTag(Chunk);
- Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
- Arg);
- }
+ } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
+ return;
+
+ if (Header.State != Chunk::State::Allocated)
+ return;
+
+ uptr TaggedChunk = Chunk;
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ TaggedChunk = untagPointer(TaggedChunk);
+ uptr Size;
+ if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) {
+ TaggedChunk = loadTag(Chunk);
+ Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
+ } else if (AllocatorConfig::getExactUsableSize())
+ Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
+ else
+ Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header);
+ Callback(TaggedChunk, Size, Arg);
};
Primary.iterateOverBlocks(Lambda);
Secondary.iterateOverBlocks(Lambda);
@@ -759,6 +764,22 @@ class Allocator {
return false;
}
+ ALWAYS_INLINE uptr getUsableSize(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ if (LIKELY(Header->ClassId)) {
+ return SizeClassMap::getSizeByClassId(Header->ClassId) -
+ (reinterpret_cast<uptr>(Ptr) - reinterpret_cast<uptr>(BlockBegin));
+ }
+
+ uptr UntaggedPtr = reinterpret_cast<uptr>(Ptr);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
+ UntaggedPtr = untagPointer(UntaggedPtr);
+ BlockBegin = untagPointer(BlockBegin);
+ }
+ return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr;
+ }
+
// Return the usable size for a given chunk. Technically we lie, as we just
// report the actual size of a chunk. This is done to counteract code actively
// writing past the end of a chunk (like sqlite3) when the usable size allows
@@ -768,7 +789,26 @@ class Allocator {
if (UNLIKELY(!Ptr))
return 0;
- return getAllocSize(Ptr);
+ if (AllocatorConfig::getExactUsableSize() ||
+ UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load())))
+ return getAllocSize(Ptr);
+
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ // Getting the alloc size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, Ptr);
+
+ return getUsableSize(Ptr, &Header);
}
uptr getAllocSize(const void *Ptr) {
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 1eff9ebcb7a4f..ab466539dc314 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -24,6 +24,7 @@
#include <set>
#include <stdlib.h>
#include <thread>
+#include <unordered_map>
#include <vector>
static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
@@ -1161,3 +1162,238 @@ TEST(ScudoCombinedTest, QuarantineDisabled) {
// No quarantine stats should not be present.
EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);
}
+
+struct UsableSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = false;
+ static const bool QuarantineDisabled = true;
+
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+ struct Primary {
+ // In order to properly test the usable size, this Primary config has
+ // four real size classes: 1024, 2048, 4096, 8192.
+ using SizeClassMap = scudo::FixedSizeClassMap<UsableSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = 21U;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+template <class AllocatorT> void VerifyExactUsableSize(AllocatorT &Allocator) {
+ // Scan through all sizes up to 10000 then some larger sizes.
+ for (scudo::uptr Size = 1; Size < 10000; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(Size, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify that aligned allocations also return the exact size allocated.
+ const scudo::uptr AllocSize = 313;
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(AllocSize, Origin, 1U << Align);
+ EXPECT_EQ(AllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify an explicitly large allocations.
+ const scudo::uptr LargeAllocSize = 1000000;
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P));
+ Allocator.deallocate(P, Origin);
+
+ // Now do it for aligned allocations for large allocations.
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+template <class AllocatorT>
+void VerifyIterateOverUsableSize(AllocatorT &Allocator) {
+ // This will not verify if the size is the exact size or the size of the
+ // size class. Instead verify that the size matches the usable size and
+ // assume the other tests have verified getUsableSize.
+ std::unordered_map<void *, size_t> Pointers;
+ Pointers.insert({Allocator.allocate(128, Origin), 0U});
+ Pointers.insert({Allocator.allocate(128, Origin, 32), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U});
+
+ Allocator.disable();
+ Allocator.iterateOverChunks(
+ 0, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
+ [](uintptr_t Base, size_t Size, void *Arg) {
+ std::unordered_map<void *, size_t> *Pointers =
+ reinterpret_cast<std::unordered_map<void *, size_t> *>(Arg);
+ (*Pointers)[reinterpret_cast<void *>(Base)] = Size;
+ },
+ reinterpret_cast<void *>(&Pointers));
+ Allocator.enable();
+
+ for (auto [Ptr, IterateSize] : Pointers) {
+ EXPECT_NE(0U, IterateSize)
+ << "Pointer " << Ptr << " not found in iterateOverChunks call.";
+ EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr))
+ << "Pointer " << Ptr
+ << " mismatch between iterate size and usable size.";
+ Allocator.deallocate(Ptr, Origin);
+ }
+}
+
+TEST(ScudoCombinedTest, ExactUsableSize) {
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, ExactUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+template <class AllocatorT> void VerifyUsableSize(AllocatorT &Allocator) {
+ // Check primary allocations first.
+ std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U};
+ scudo::uptr StartSize = 0;
+ for (auto SizeClass : SizeClasses) {
+ scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize();
+ for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(UsableSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size
+ << " for size class " << SizeClass;
+ Allocator.deallocate(P, Origin);
+ }
+ StartSize = UsableSize + 1;
+ }
+
+ // Check different alignments to verify usable space is calculated properly.
+ // Currently, the pointer plus usable size is aligned to the size class size.
+ const scudo::uptr AllocSize = 128;
+ EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(128, 32));
+ void *P = Allocator.allocate(AllocSize, Origin, 32);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_GE(UsableSize, AllocSize);
+ EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
+ EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024);
+ Allocator.deallocate(P, Origin);
+
+ EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(AllocSize, 64));
+ P = Allocator.allocate(AllocSize, Origin, 64);
+ UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_GE(UsableSize, AllocSize);
+ EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
+ EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024);
+ Allocator.deallocate(P, Origin);
+
+ EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(AllocSize, 128));
+ P = Allocator.allocate(AllocSize, Origin, 128);
+ UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_GE(UsableSize, AllocSize);
+ EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
+ EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024);
+ Allocator.deallocate(P, Origin);
+
+ // Check allocations in the secondary, the end of the allocation is always
+ // aligned to a page.
+ const scudo::uptr LargeAllocSize = 996780;
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ P = Allocator.allocate(LargeAllocSize, Origin);
+ UsableSize = Allocator.getUsableSize(P);
+ EXPECT_GE(UsableSize, LargeAllocSize);
+ EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % PageSize);
+ Allocator.deallocate(P, Origin);
+
+ // Check aligned allocations now.
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align);
+ UsableSize = Allocator.getUsableSize(P);
+ EXPECT_GE(UsableSize, LargeAllocSize);
+ EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % PageSize);
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+struct TestFullUsableSizeConfig : TestExactUsableSizeConfig {
+ static const bool ExactUsableSize = false;
+};
+
+TEST(ScudoCombinedTest, FullUsableSize) {
+ using AllocatorT = scudo::Allocator<TestFullUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, FullUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestFullUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ // When MTE is enabled, you get exact sizes.
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
index 612317b3c3293..9e5d0658e5ed5 100644
--- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
@@ -588,8 +588,13 @@ TEST_F(ScudoWrappersCTest, MallocInfo) {
EXPECT_EQ(errno, 0);
fclose(F);
EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
- EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
- EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
+ std::string expected;
+ expected =
+ "<alloc size=\"" + std::to_string(malloc_usable_size(P1)) + "\" count=\"";
+ EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));
+ expected =
+ "<alloc size=\"" + std::to_string(malloc_usable_size(P2)) + "\" count=\"";
+ EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));
free(P1);
free(P2);
>From 32e664e468a2d4c928c2ceacb73f92c43670c231 Mon Sep 17 00:00:00 2001
From: Christopher Ferris <cferris at google.com>
Date: Wed, 8 Oct 2025 01:09:27 +0000
Subject: [PATCH 2/4] Updated getUsableSize comment.
---
compiler-rt/lib/scudo/standalone/combined.h | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 3b641eb10d6a0..6141f27f723cb 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -780,11 +780,10 @@ class Allocator {
return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr;
}
- // Return the usable size for a given chunk. Technically we lie, as we just
- // report the actual size of a chunk. This is done to counteract code actively
- // writing past the end of a chunk (like sqlite3) when the usable size allows
- // for it, which then forces realloc to copy the usable size of a chunk as
- // opposed to its actual size.
+ // Return the usable size for a given chunk. If MTE is enabled or if the
+ // ExactUsableSize config parameter is true, we report the exact size of
+ // the original allocation size. Otherwise, we will return the total
+ // actual usable size.
uptr getUsableSize(const void *Ptr) {
if (UNLIKELY(!Ptr))
return 0;
>From f5d67ebd74b805e9771bf7ba1bd7b5c96e9e3142 Mon Sep 17 00:00:00 2001
From: Christopher Ferris <cferris at google.com>
Date: Fri, 31 Oct 2025 00:39:12 +0000
Subject: [PATCH 3/4] Update tests.
---
compiler-rt/lib/scudo/standalone/combined.h | 18 +++-
.../scudo/standalone/tests/combined_test.cpp | 98 ++++++++++---------
2 files changed, 69 insertions(+), 47 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 6141f27f723cb..dd058106333d0 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -719,10 +719,11 @@ class Allocator {
if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) {
TaggedChunk = loadTag(Chunk);
Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
- } else if (AllocatorConfig::getExactUsableSize())
+ } else if (AllocatorConfig::getExactUsableSize()) {
Size = getSize(reinterpret_cast<void *>(Chunk), &Header);
- else
+ } else {
Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header);
+ }
Callback(TaggedChunk, Size, Arg);
};
Primary.iterateOverBlocks(Lambda);
@@ -990,6 +991,19 @@ class Allocator {
MemorySize, 2, 16);
}
+ uptr getBlockBeginTestOnly(const void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+ DCHECK(Header.State == Chunk::State::Allocated);
+
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(const_cast<void *>(Ptr));
+ void *Begin = getBlockBegin(Ptr, &Header);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Begin = untagPointer(Begin);
+ return reinterpret_cast<uptr>(Begin);
+ }
+
private:
typedef typename PrimaryT::SizeClassMap SizeClassMap;
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index ab466539dc314..1b26d08bccffa 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -1301,68 +1301,75 @@ TEST(ScudoCombinedTest, ExactUsableSizeMTE) {
VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
}
-template <class AllocatorT> void VerifyUsableSize(AllocatorT &Allocator) {
- // Check primary allocations first.
+template <class AllocatorT>
+void VerifyUsableSizePrimary(AllocatorT &Allocator) {
std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U};
- scudo::uptr StartSize = 0;
- for (auto SizeClass : SizeClasses) {
+ for (size_t I = 0; I < SizeClasses.size(); I++) {
+ scudo::uptr SizeClass = SizeClasses[I];
+ scudo::uptr StartSize;
+ if (I == 0)
+ StartSize = 1;
+ else
+ StartSize = SizeClasses[I - 1];
scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize();
for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) {
void *P = Allocator.allocate(Size, Origin);
EXPECT_EQ(UsableSize, Allocator.getUsableSize(P))
<< "Failed usable size at allocation size " << Size
<< " for size class " << SizeClass;
+ memset(P, 0xff, UsableSize);
+ EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass,
+ reinterpret_cast<scudo::uptr>(P) + UsableSize);
Allocator.deallocate(P, Origin);
}
+
StartSize = UsableSize + 1;
}
- // Check different alignments to verify usable space is calculated properly.
- // Currently, the pointer plus usable size is aligned to the size class size.
- const scudo::uptr AllocSize = 128;
- EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(128, 32));
- void *P = Allocator.allocate(AllocSize, Origin, 32);
- scudo::uptr UsableSize = Allocator.getUsableSize(P);
- memset(P, 0xff, UsableSize);
- EXPECT_GE(UsableSize, AllocSize);
- EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
- EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024);
- Allocator.deallocate(P, Origin);
-
- EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(AllocSize, 64));
- P = Allocator.allocate(AllocSize, Origin, 64);
- UsableSize = Allocator.getUsableSize(P);
- memset(P, 0xff, UsableSize);
- EXPECT_GE(UsableSize, AllocSize);
- EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
- EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024);
- Allocator.deallocate(P, Origin);
-
- EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(AllocSize, 128));
- P = Allocator.allocate(AllocSize, Origin, 128);
- UsableSize = Allocator.getUsableSize(P);
- memset(P, 0xff, UsableSize);
- EXPECT_GE(UsableSize, AllocSize);
- EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
- EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024);
- Allocator.deallocate(P, Origin);
+ std::vector<scudo::uptr> Alignments = {32U, 128U};
+ for (size_t I = 0; I < SizeClasses.size(); I++) {
+ scudo::uptr SizeClass = SizeClasses[I];
+ scudo::uptr AllocSize;
+ if (I == 0)
+ AllocSize = 1;
+ else
+ AllocSize = SizeClasses[I - 1] + 1;
+
+ for (auto Alignment : Alignments) {
+ void *P = Allocator.allocate(AllocSize, Origin, Alignment);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass,
+ reinterpret_cast<scudo::uptr>(P) + UsableSize)
+ << "Failed usable size at allocation size " << AllocSize
+ << " for size class " << SizeClass << " at alignment " << Alignment;
+ Allocator.deallocate(P, Origin);
+ }
+ }
+}
- // Check allocations in the secondary, the end of the allocation is always
- // aligned to a page.
+template <class AllocatorT>
+void VerifyUsableSizeSecondary(AllocatorT &Allocator) {
const scudo::uptr LargeAllocSize = 996780;
const scudo::uptr PageSize = scudo::getPageSizeCached();
- P = Allocator.allocate(LargeAllocSize, Origin);
- UsableSize = Allocator.getUsableSize(P);
- EXPECT_GE(UsableSize, LargeAllocSize);
- EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % PageSize);
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ // Assumes that the secondary always rounds up allocations to a page boundary.
+ EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize,
+ PageSize),
+ reinterpret_cast<scudo::uptr>(P) + UsableSize);
Allocator.deallocate(P, Origin);
// Check aligned allocations now.
- for (scudo::uptr Align = 1; Align <= 8; Align++) {
- void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align);
- UsableSize = Allocator.getUsableSize(P);
- EXPECT_GE(UsableSize, LargeAllocSize);
- EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % PageSize);
+ for (scudo::uptr Alignment = 1; Alignment <= 8; Alignment++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Alignment);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize,
+ PageSize),
+ reinterpret_cast<scudo::uptr>(P) + UsableSize)
+ << "Failed usable size at allocation size " << LargeAllocSize
+ << " at alignment " << Alignment;
Allocator.deallocate(P, Origin);
}
}
@@ -1375,7 +1382,8 @@ TEST(ScudoCombinedTest, FullUsableSize) {
using AllocatorT = scudo::Allocator<TestFullUsableSizeConfig>;
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
- VerifyUsableSize<AllocatorT>(*Allocator);
+ VerifyUsableSizePrimary<AllocatorT>(*Allocator);
+ VerifyUsableSizeSecondary<AllocatorT>(*Allocator);
VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
}
>From fd82a95bd24e18a8631f6690d1b5eeac59f4cb27 Mon Sep 17 00:00:00 2001
From: Christopher Ferris <cferris at google.com>
Date: Fri, 31 Oct 2025 19:12:09 +0000
Subject: [PATCH 4/4] Update braces.
---
compiler-rt/lib/scudo/standalone/combined.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index dd058106333d0..4650246d77445 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -706,8 +706,9 @@ class Allocator {
if (!getChunkFromBlock(Block, &Chunk, &Header) &&
!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
return;
- } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
+ } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) {
return;
+ }
if (Header.State != Chunk::State::Allocated)
return;
More information about the llvm-commits
mailing list