[compiler-rt] [scudo] Support no-preserve-all-regions mode (PR #85149)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 13 16:24:26 PDT 2024
https://github.com/ChiaHungDuan created https://github.com/llvm/llvm-project/pull/85149
This releases the requirement that we need to preserve the memory for all regions at the beginning. It needs a huge amount of contiguous pages and which may be a challenge in certain cases. Therefore, adding a new flag, PreserveAllRegions, to indicate whether we want to allocate the regions on demand.
Note that once the PreserveAllRegions is enabled, EnableRandomOffset becomes irrelevant because the base is already random.
>From 78c4343fa0ed1ab38c978ccceed832979b2146de Mon Sep 17 00:00:00 2001
From: Chia-hung Duan <chiahungduan at google.com>
Date: Mon, 26 Feb 2024 19:32:38 +0000
Subject: [PATCH] [scudo] Support no-preserve-all-regions mode
This releases the requirement that we need to preserve the memory for
all regions at the beginning. It needs a huge amount of contiguous pages
and which may be a challenge in certain cases. Therefore, adding a new
flag, PreserveAllRegions, to indicate whether we want to allocate the
regions on demand.
Note that once the PreserveAllRegions is enabled, EnableRandomOffset
becomes irrelevant because the base is already random.
---
.../lib/scudo/standalone/allocator_config.def | 8 +-
compiler-rt/lib/scudo/standalone/primary64.h | 128 +++++++++++-------
.../scudo/standalone/tests/primary_test.cpp | 1 +
3 files changed, 89 insertions(+), 48 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 92f4e39872d4c9..e080321825040b 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -87,9 +87,15 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
//
// Indicates support for offsetting the start of a region by a random number of
-// pages. Only used with primary64.
+// pages. This is only used if `PreserveAllRegions` is enabled.
PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
+// When `PreserveAllRegions` is true, the virtual address for all regions will
+// be preserved within a big chunk of memory. This will reduce the fragmentation
+// caused by region allocations but may require a huge amount of contiguous
+// pages at initialization.
+PRIMARY_OPTIONAL(const bool, PreserveAllRegions, true)
+
// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
//
// Use condition variable to shorten the waiting time of refillment of
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index f5e4ab57b4dfd5..cbb9dd04b85b24 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -117,40 +117,24 @@ template <typename Config> class SizeClassAllocator64 {
SmallerBlockReleasePageDelta =
PagesInGroup * (1 + MinSizeClass / 16U) / 100;
- // Reserve the space required for the Primary.
- CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
- "scudo:primary_reserve"));
- PrimaryBase = ReservedMemory.getBase();
- DCHECK_NE(PrimaryBase, 0U);
-
- u32 Seed;
- const u64 Time = getMonotonicTimeFast();
- if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
- Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
-
- for (uptr I = 0; I < NumClasses; I++) {
- RegionInfo *Region = getRegionInfo(I);
-
- // The actual start of a region is offset by a random number of pages
- // when PrimaryEnableRandomOffset is set.
- Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
- (Config::getEnableRandomOffset()
- ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
- : 0);
- Region->RandState = getRandomU32(&Seed);
- // Releasing small blocks is expensive, set a higher threshold to avoid
- // frequent page releases.
- if (isSmallBlock(getSizeByClassId(I)))
- Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
- else
- Region->TryReleaseThreshold = PageSize;
- Region->ReleaseInfo.LastReleaseAtNs = Time;
-
- Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
- PrimaryBase + (I << RegionSizeLog), RegionSize);
- CHECK(Region->MemMapInfo.MemMap.isAllocated());
+ if (Config::getPreserveAllRegions()) {
+ ReservedMemoryT ReservedMemory = {};
+ // Reserve the space required for the Primary.
+ CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
+ "scudo:primary_reserve"));
+ const uptr PrimaryBase = ReservedMemory.getBase();
+ const bool EnableRandomOffset =
+ Config::getPreserveAllRegions() && Config::getEnableRandomOffset();
+
+ for (uptr I = 0; I < NumClasses; I++) {
+ MemMapT RegionMemMap = ReservedMemory.dispatch(
+ PrimaryBase + (I << RegionSizeLog), RegionSize);
+ RegionInfo *Region = getRegionInfo(I);
+
+ initRegion(Region, I, RegionMemMap, EnableRandomOffset);
+ }
+ shuffle(RegionInfoArray, NumClasses, &getRegionInfo(0)->RandState);
}
- shuffle(RegionInfoArray, NumClasses, &Seed);
// The binding should be done after region shuffling so that it won't bind
// the FLLock from the wrong region.
@@ -160,14 +144,17 @@ template <typename Config> class SizeClassAllocator64 {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
+ void unmapTestOnly() {
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
+ {
+ ScopedLock ML(Region->MMLock);
+ MemMapT MemMap = Region->MemMapInfo.MemMap;
+ if (MemMap.isAllocated())
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
*Region = {};
}
- if (PrimaryBase)
- ReservedMemory.release();
- PrimaryBase = 0U;
}
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
@@ -251,9 +238,10 @@ template <typename Config> class SizeClassAllocator64 {
}
const bool RegionIsExhausted = Region->Exhausted;
- if (!RegionIsExhausted)
+ if (!RegionIsExhausted) {
PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
MaxBlockCount);
+ }
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
}
@@ -513,7 +501,6 @@ template <typename Config> class SizeClassAllocator64 {
private:
static const uptr RegionSize = 1UL << RegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
- static const uptr PrimarySize = RegionSize * NumClasses;
static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
// Fill at most this number of batches from the newly map'd memory.
@@ -569,9 +556,14 @@ template <typename Config> class SizeClassAllocator64 {
}
uptr getRegionBaseByClassId(uptr ClassId) {
- return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
- RegionSize) +
- PrimaryBase;
+ RegionInfo *Region = getRegionInfo(ClassId);
+ Region->MMLock.assertHeld();
+
+ if (!Config::getPreserveAllRegions() &&
+ !Region->MemMapInfo.MemMap.isAllocated()) {
+ return 0U;
+ }
+ return Region->MemMapInfo.MemMap.getBase();
}
static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
@@ -601,6 +593,35 @@ template <typename Config> class SizeClassAllocator64 {
return BlockSize > PageSize;
}
+ void initRegion(RegionInfo *Region, uptr ClassId, MemMapT MemMap,
+ bool EnableRandomOffset) REQUIRES(Region->MMLock) {
+ DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
+ DCHECK(MemMap.isAllocated());
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr RegionBase = MemMap.getBase();
+
+ Region->MemMapInfo.MemMap = MemMap;
+
+ u32 Seed;
+ const u64 Time = getMonotonicTimeFast();
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
+ Seed = static_cast<u32>(Time ^ (RegionBase >> 12));
+
+ Region->RegionBeg = RegionBase;
+ if (EnableRandomOffset)
+ Region->RegionBeg += (getRandomModN(&Seed, 16) + 1) * PageSize;
+
+ // Releasing small blocks is expensive, set a higher threshold to avoid
+ // frequent page releases.
+ if (isSmallBlock(getSizeByClassId(ClassId)))
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
+ else
+ Region->TryReleaseThreshold = PageSize;
+
+ Region->ReleaseInfo.LastReleaseAtNs = Time;
+ }
+
void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
REQUIRES(Region->FLLock) {
DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
@@ -988,9 +1009,26 @@ template <typename Config> class SizeClassAllocator64 {
CompactPtrT *ToArray,
const u16 MaxBlockCount)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ if (!Config::getPreserveAllRegions() &&
+ !Region->MemMapInfo.MemMap.isAllocated()) {
+ ReservedMemoryT ReservedMemory;
+ if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize,
+ "scudo:primary_reserve",
+ MAP_ALLOWNOMEM))) {
+ Printf("Can't preserve pages for size class %zu.\n",
+ getSizeByClassId(ClassId));
+ Region->Exhausted = true;
+ return 0U;
+ }
+ initRegion(Region, ClassId,
+ ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity()),
+ /*EnableRandomOffset*/ false);
+ }
+
+ DCHECK(Region->MemMapInfo.MemMap.isAllocated());
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = CacheT::getMaxCached(Size);
-
const uptr RegionBeg = Region->RegionBeg;
const uptr MappedUser = Region->MemMapInfo.MappedUser;
const uptr TotalUserBytes =
@@ -1682,10 +1720,6 @@ template <typename Config> class SizeClassAllocator64 {
Region->FLLockCV.notifyAll(Region->FLLock);
}
- // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
- // deprecated.
- uptr PrimaryBase = 0;
- ReservedMemoryT ReservedMemory = {};
// The minimum size of pushed blocks that we will try to release the pages in
// that size class.
uptr SmallerBlockReleasePageDelta = 0;
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index 683ce3e596596d..a6bbf98026455e 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -90,6 +90,7 @@ template <typename SizeClassMapT> struct TestConfig3 {
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
typedef scudo::uptr CompactPtrT;
static const scudo::uptr CompactPtrScale = 0;
+ static const bool PreserveAllRegions = false;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
};
More information about the llvm-commits
mailing list