[compiler-rt] [scudo] Add config option to modify get usable size behavior (PR #158710)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 8 16:04:41 PDT 2025
================
@@ -1161,3 +1162,238 @@ TEST(ScudoCombinedTest, QuarantineDisabled) {
// No quarantine stats should not be present.
EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);
}
+
+struct UsableSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = false;
+ static const bool QuarantineDisabled = true;
+
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+ struct Primary {
+ // In order to properly test the usable size, this Primary config has
+ // four real size classes: 1024, 2048, 4096, 8192.
+ using SizeClassMap = scudo::FixedSizeClassMap<UsableSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = 21U;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+template <class AllocatorT> void VerifyExactUsableSize(AllocatorT &Allocator) {
+ // Scan through all sizes up to 10000 then some larger sizes.
+ for (scudo::uptr Size = 1; Size < 10000; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(Size, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify that aligned allocations also return the exact size allocated.
+ const scudo::uptr AllocSize = 313;
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(AllocSize, Origin, 1U << Align);
+ EXPECT_EQ(AllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+
+ // Verify an explicitly large allocations.
+ const scudo::uptr LargeAllocSize = 1000000;
+ void *P = Allocator.allocate(LargeAllocSize, Origin);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P));
+ Allocator.deallocate(P, Origin);
+
+ // Now do it for aligned allocations for large allocations.
+ for (scudo::uptr Align = 1; Align <= 8; Align++) {
+ void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align);
+ EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << AllocSize << " at align "
+ << 1 << Align;
+ Allocator.deallocate(P, Origin);
+ }
+}
+
+template <class AllocatorT>
+void VerifyIterateOverUsableSize(AllocatorT &Allocator) {
+ // This will not verify if the size is the exact size or the size of the
+ // size class. Instead verify that the size matches the usable size and
+ // assume the other tests have verified getUsableSize.
+ std::unordered_map<void *, size_t> Pointers;
+ Pointers.insert({Allocator.allocate(128, Origin), 0U});
+ Pointers.insert({Allocator.allocate(128, Origin, 32), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin), 0U});
+ Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U});
+ Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U});
+
+ Allocator.disable();
+ Allocator.iterateOverChunks(
+ 0, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
+ [](uintptr_t Base, size_t Size, void *Arg) {
+ std::unordered_map<void *, size_t> *Pointers =
+ reinterpret_cast<std::unordered_map<void *, size_t> *>(Arg);
+ (*Pointers)[reinterpret_cast<void *>(Base)] = Size;
+ },
+ reinterpret_cast<void *>(&Pointers));
+ Allocator.enable();
+
+ for (auto [Ptr, IterateSize] : Pointers) {
+ EXPECT_NE(0U, IterateSize)
+ << "Pointer " << Ptr << " not found in iterateOverChunks call.";
+ EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr))
+ << "Pointer " << Ptr
+ << " mismatch between iterate size and usable size.";
+ Allocator.deallocate(Ptr, Origin);
+ }
+}
+
+TEST(ScudoCombinedTest, ExactUsableSize) {
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig {
+ static const bool MaySupportMemoryTagging = true;
+};
+
+TEST(ScudoCombinedTest, ExactUsableSizeMTE) {
+ if (!scudo::archSupportsMemoryTagging() ||
+ !scudo::systemDetectsMemoryTagFaultsTestOnly())
+ TEST_SKIP("Only supported on systems that can enable MTE.");
+
+ scudo::enableSystemMemoryTaggingTestOnly();
+
+ using AllocatorT = scudo::Allocator<TestExactUsableSizeMTEConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ VerifyExactUsableSize<AllocatorT>(*Allocator);
+ VerifyIterateOverUsableSize<AllocatorT>(*Allocator);
+}
+
+template <class AllocatorT> void VerifyUsableSize(AllocatorT &Allocator) {
+ // Check primary allocations first.
+ std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U};
+ scudo::uptr StartSize = 0;
+ for (auto SizeClass : SizeClasses) {
+ scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize();
+ for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) {
+ void *P = Allocator.allocate(Size, Origin);
+ EXPECT_EQ(UsableSize, Allocator.getUsableSize(P))
+ << "Failed usable size at allocation size " << Size
+ << " for size class " << SizeClass;
+ Allocator.deallocate(P, Origin);
+ }
+ StartSize = UsableSize + 1;
+ }
+
+ // Check different alignments to verify usable space is calculated properly.
+ // Currently, the pointer plus usable size is aligned to the size class size.
+ const scudo::uptr AllocSize = 128;
+ EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(128, 32));
+ void *P = Allocator.allocate(AllocSize, Origin, 32);
+ scudo::uptr UsableSize = Allocator.getUsableSize(P);
+ memset(P, 0xff, UsableSize);
+ EXPECT_GE(UsableSize, AllocSize);
+ EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize);
----------------
ChiaHungDuan wrote:
I think the `1024` comes from SizeClassConfig, can we use the MidSizeLog for this? Like using another constexpr variable
https://github.com/llvm/llvm-project/pull/158710
More information about the llvm-commits
mailing list