[compiler-rt] [scudo] Support no-preserve-all-regions mode (PR #85149)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 29 13:13:02 PDT 2024
https://github.com/ChiaHungDuan updated https://github.com/llvm/llvm-project/pull/85149
>From 78c4343fa0ed1ab38c978ccceed832979b2146de Mon Sep 17 00:00:00 2001
From: Chia-hung Duan <chiahungduan at google.com>
Date: Mon, 26 Feb 2024 19:32:38 +0000
Subject: [PATCH 1/3] [scudo] Support no-preserve-all-regions mode
This releases the requirement that we need to preserve the memory for
all regions at the beginning. It needs a huge amount of contiguous pages
and which may be a challenge in certain cases. Therefore, adding a new
flag, PreserveAllRegions, to indicate whether we want to allocate the
regions on demand.
Note that once the PreserveAllRegions is enabled, EnableRandomOffset
becomes irrelevant because the base is already random.
---
.../lib/scudo/standalone/allocator_config.def | 8 +-
compiler-rt/lib/scudo/standalone/primary64.h | 128 +++++++++++-------
.../scudo/standalone/tests/primary_test.cpp | 1 +
3 files changed, 89 insertions(+), 48 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 92f4e39872d4c9..e080321825040b 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -87,9 +87,15 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
//
// Indicates support for offsetting the start of a region by a random number of
-// pages. Only used with primary64.
+// pages. This is only used if `PreserveAllRegions` is enabled.
PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
+// When `PreserveAllRegions` is true, the virtual address for all regions will
+// be preserved within a big chunk of memory. This will reduce the fragmentation
+// caused by region allocations but may require a huge amount of contiguous
+// pages at initialization.
+PRIMARY_OPTIONAL(const bool, PreserveAllRegions, true)
+
// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
//
// Use condition variable to shorten the waiting time of refillment of
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index f5e4ab57b4dfd5..cbb9dd04b85b24 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -117,40 +117,24 @@ template <typename Config> class SizeClassAllocator64 {
SmallerBlockReleasePageDelta =
PagesInGroup * (1 + MinSizeClass / 16U) / 100;
- // Reserve the space required for the Primary.
- CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
- "scudo:primary_reserve"));
- PrimaryBase = ReservedMemory.getBase();
- DCHECK_NE(PrimaryBase, 0U);
-
- u32 Seed;
- const u64 Time = getMonotonicTimeFast();
- if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
- Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
-
- for (uptr I = 0; I < NumClasses; I++) {
- RegionInfo *Region = getRegionInfo(I);
-
- // The actual start of a region is offset by a random number of pages
- // when PrimaryEnableRandomOffset is set.
- Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
- (Config::getEnableRandomOffset()
- ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
- : 0);
- Region->RandState = getRandomU32(&Seed);
- // Releasing small blocks is expensive, set a higher threshold to avoid
- // frequent page releases.
- if (isSmallBlock(getSizeByClassId(I)))
- Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
- else
- Region->TryReleaseThreshold = PageSize;
- Region->ReleaseInfo.LastReleaseAtNs = Time;
-
- Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
- PrimaryBase + (I << RegionSizeLog), RegionSize);
- CHECK(Region->MemMapInfo.MemMap.isAllocated());
+ if (Config::getPreserveAllRegions()) {
+ ReservedMemoryT ReservedMemory = {};
+ // Reserve the space required for the Primary.
+ CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
+ "scudo:primary_reserve"));
+ const uptr PrimaryBase = ReservedMemory.getBase();
+ const bool EnableRandomOffset =
+ Config::getPreserveAllRegions() && Config::getEnableRandomOffset();
+
+ for (uptr I = 0; I < NumClasses; I++) {
+ MemMapT RegionMemMap = ReservedMemory.dispatch(
+ PrimaryBase + (I << RegionSizeLog), RegionSize);
+ RegionInfo *Region = getRegionInfo(I);
+
+ initRegion(Region, I, RegionMemMap, EnableRandomOffset);
+ }
+ shuffle(RegionInfoArray, NumClasses, &getRegionInfo(0)->RandState);
}
- shuffle(RegionInfoArray, NumClasses, &Seed);
// The binding should be done after region shuffling so that it won't bind
// the FLLock from the wrong region.
@@ -160,14 +144,17 @@ template <typename Config> class SizeClassAllocator64 {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
+ void unmapTestOnly() {
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
+ {
+ ScopedLock ML(Region->MMLock);
+ MemMapT MemMap = Region->MemMapInfo.MemMap;
+ if (MemMap.isAllocated())
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
*Region = {};
}
- if (PrimaryBase)
- ReservedMemory.release();
- PrimaryBase = 0U;
}
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
@@ -251,9 +238,10 @@ template <typename Config> class SizeClassAllocator64 {
}
const bool RegionIsExhausted = Region->Exhausted;
- if (!RegionIsExhausted)
+ if (!RegionIsExhausted) {
PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
MaxBlockCount);
+ }
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
}
@@ -513,7 +501,6 @@ template <typename Config> class SizeClassAllocator64 {
private:
static const uptr RegionSize = 1UL << RegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
- static const uptr PrimarySize = RegionSize * NumClasses;
static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
// Fill at most this number of batches from the newly map'd memory.
@@ -569,9 +556,14 @@ template <typename Config> class SizeClassAllocator64 {
}
uptr getRegionBaseByClassId(uptr ClassId) {
- return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
- RegionSize) +
- PrimaryBase;
+ RegionInfo *Region = getRegionInfo(ClassId);
+ Region->MMLock.assertHeld();
+
+ if (!Config::getPreserveAllRegions() &&
+ !Region->MemMapInfo.MemMap.isAllocated()) {
+ return 0U;
+ }
+ return Region->MemMapInfo.MemMap.getBase();
}
static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
@@ -601,6 +593,35 @@ template <typename Config> class SizeClassAllocator64 {
return BlockSize > PageSize;
}
+ void initRegion(RegionInfo *Region, uptr ClassId, MemMapT MemMap,
+ bool EnableRandomOffset) REQUIRES(Region->MMLock) {
+ DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
+ DCHECK(MemMap.isAllocated());
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr RegionBase = MemMap.getBase();
+
+ Region->MemMapInfo.MemMap = MemMap;
+
+ u32 Seed;
+ const u64 Time = getMonotonicTimeFast();
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
+ Seed = static_cast<u32>(Time ^ (RegionBase >> 12));
+
+ Region->RegionBeg = RegionBase;
+ if (EnableRandomOffset)
+ Region->RegionBeg += (getRandomModN(&Seed, 16) + 1) * PageSize;
+
+ // Releasing small blocks is expensive, set a higher threshold to avoid
+ // frequent page releases.
+ if (isSmallBlock(getSizeByClassId(ClassId)))
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
+ else
+ Region->TryReleaseThreshold = PageSize;
+
+ Region->ReleaseInfo.LastReleaseAtNs = Time;
+ }
+
void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
REQUIRES(Region->FLLock) {
DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
@@ -988,9 +1009,26 @@ template <typename Config> class SizeClassAllocator64 {
CompactPtrT *ToArray,
const u16 MaxBlockCount)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ if (!Config::getPreserveAllRegions() &&
+ !Region->MemMapInfo.MemMap.isAllocated()) {
+ ReservedMemoryT ReservedMemory;
+ if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize,
+ "scudo:primary_reserve",
+ MAP_ALLOWNOMEM))) {
+ Printf("Can't preserve pages for size class %zu.\n",
+ getSizeByClassId(ClassId));
+ Region->Exhausted = true;
+ return 0U;
+ }
+ initRegion(Region, ClassId,
+ ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity()),
+ /*EnableRandomOffset*/ false);
+ }
+
+ DCHECK(Region->MemMapInfo.MemMap.isAllocated());
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = CacheT::getMaxCached(Size);
-
const uptr RegionBeg = Region->RegionBeg;
const uptr MappedUser = Region->MemMapInfo.MappedUser;
const uptr TotalUserBytes =
@@ -1682,10 +1720,6 @@ template <typename Config> class SizeClassAllocator64 {
Region->FLLockCV.notifyAll(Region->FLLock);
}
- // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
- // deprecated.
- uptr PrimaryBase = 0;
- ReservedMemoryT ReservedMemory = {};
// The minimum size of pushed blocks that we will try to release the pages in
// that size class.
uptr SmallerBlockReleasePageDelta = 0;
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index 683ce3e596596d..a6bbf98026455e 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -90,6 +90,7 @@ template <typename SizeClassMapT> struct TestConfig3 {
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
typedef scudo::uptr CompactPtrT;
static const scudo::uptr CompactPtrScale = 0;
+ static const bool PreserveAllRegions = false;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
};
>From 8f72c355f44f7bc95984e2a3be269424308d6f53 Mon Sep 17 00:00:00 2001
From: Chia-hung Duan <chiahungduan at google.com>
Date: Wed, 27 Mar 2024 21:42:39 +0000
Subject: [PATCH 2/3] Move the seed calculation to init()
---
compiler-rt/lib/scudo/standalone/primary64.h | 35 ++++++++++----------
1 file changed, 18 insertions(+), 17 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index cbb9dd04b85b24..252101ea219f2d 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -117,21 +117,27 @@ template <typename Config> class SizeClassAllocator64 {
SmallerBlockReleasePageDelta =
PagesInGroup * (1 + MinSizeClass / 16U) / 100;
+ u32 Seed;
+ const u64 Time = getMonotonicTimeFast();
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
+ Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12));
+
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->RandState = getRandomU32(&Seed);
+
if (Config::getPreserveAllRegions()) {
ReservedMemoryT ReservedMemory = {};
// Reserve the space required for the Primary.
CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
"scudo:primary_reserve"));
const uptr PrimaryBase = ReservedMemory.getBase();
- const bool EnableRandomOffset =
- Config::getPreserveAllRegions() && Config::getEnableRandomOffset();
for (uptr I = 0; I < NumClasses; I++) {
MemMapT RegionMemMap = ReservedMemory.dispatch(
PrimaryBase + (I << RegionSizeLog), RegionSize);
RegionInfo *Region = getRegionInfo(I);
- initRegion(Region, I, RegionMemMap, EnableRandomOffset);
+ initRegion(Region, I, RegionMemMap, Config::getEnableRandomOffset());
}
shuffle(RegionInfoArray, NumClasses, &getRegionInfo(0)->RandState);
}
@@ -593,24 +599,21 @@ template <typename Config> class SizeClassAllocator64 {
return BlockSize > PageSize;
}
- void initRegion(RegionInfo *Region, uptr ClassId, MemMapT MemMap,
- bool EnableRandomOffset) REQUIRES(Region->MMLock) {
+ ALWAYS_INLINE void initRegion(RegionInfo *Region, uptr ClassId,
+ MemMapT MemMap, bool EnableRandomOffset)
+ REQUIRES(Region->MMLock) {
DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
DCHECK(MemMap.isAllocated());
const uptr PageSize = getPageSizeCached();
- const uptr RegionBase = MemMap.getBase();
Region->MemMapInfo.MemMap = MemMap;
- u32 Seed;
- const u64 Time = getMonotonicTimeFast();
- if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
- Seed = static_cast<u32>(Time ^ (RegionBase >> 12));
-
- Region->RegionBeg = RegionBase;
- if (EnableRandomOffset)
- Region->RegionBeg += (getRandomModN(&Seed, 16) + 1) * PageSize;
+ Region->RegionBeg = MemMap.getBase();
+ if (EnableRandomOffset) {
+ Region->RegionBeg +=
+ (getRandomModN(&Region->RandState, 16) + 1) * PageSize;
+ }
// Releasing small blocks is expensive, set a higher threshold to avoid
// frequent page releases.
@@ -618,8 +621,6 @@ template <typename Config> class SizeClassAllocator64 {
Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
else
Region->TryReleaseThreshold = PageSize;
-
- Region->ReleaseInfo.LastReleaseAtNs = Time;
}
void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
@@ -1023,7 +1024,7 @@ template <typename Config> class SizeClassAllocator64 {
initRegion(Region, ClassId,
ReservedMemory.dispatch(ReservedMemory.getBase(),
ReservedMemory.getCapacity()),
- /*EnableRandomOffset*/ false);
+ /*EnableRandomOffset=*/false);
}
DCHECK(Region->MemMapInfo.MemMap.isAllocated());
>From 10558505e316bb36c6b90a0cd8ba780dc7cb9fd3 Mon Sep 17 00:00:00 2001
From: Chia-hung Duan <chiahungduan at google.com>
Date: Fri, 29 Mar 2024 20:12:15 +0000
Subject: [PATCH 3/3] Rename as EnableContiguousRegions
---
.../lib/scudo/standalone/allocator_config.def | 12 ++++++------
compiler-rt/lib/scudo/standalone/primary64.h | 6 +++---
.../lib/scudo/standalone/tests/primary_test.cpp | 2 +-
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index e080321825040b..ba3acf7a1b9227 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -87,14 +87,14 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
//
// Indicates support for offsetting the start of a region by a random number of
-// pages. This is only used if `PreserveAllRegions` is enabled.
+// pages. This is only used if `EnableContiguousRegions` is enabled.
PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
-// When `PreserveAllRegions` is true, the virtual address for all regions will
-// be preserved within a big chunk of memory. This will reduce the fragmentation
-// caused by region allocations but may require a huge amount of contiguous
-// pages at initialization.
-PRIMARY_OPTIONAL(const bool, PreserveAllRegions, true)
+// When `EnableContiguousRegions` is true, the virtual address for all regions
+// will be preserved within a big chunk of memory. This will reduce the
+// fragmentation caused by region allocations but may require a huge amount of
+// contiguous pages at initialization.
+PRIMARY_OPTIONAL(const bool, EnableContiguousRegions, true)
// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
//
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 252101ea219f2d..d642a6ceb494c6 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -125,7 +125,7 @@ template <typename Config> class SizeClassAllocator64 {
for (uptr I = 0; I < NumClasses; I++)
getRegionInfo(I)->RandState = getRandomU32(&Seed);
- if (Config::getPreserveAllRegions()) {
+ if (Config::getEnableContiguousRegions()) {
ReservedMemoryT ReservedMemory = {};
// Reserve the space required for the Primary.
CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
@@ -565,7 +565,7 @@ template <typename Config> class SizeClassAllocator64 {
RegionInfo *Region = getRegionInfo(ClassId);
Region->MMLock.assertHeld();
- if (!Config::getPreserveAllRegions() &&
+ if (!Config::getEnableContiguousRegions() &&
!Region->MemMapInfo.MemMap.isAllocated()) {
return 0U;
}
@@ -1010,7 +1010,7 @@ template <typename Config> class SizeClassAllocator64 {
CompactPtrT *ToArray,
const u16 MaxBlockCount)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
- if (!Config::getPreserveAllRegions() &&
+ if (!Config::getEnableContiguousRegions() &&
!Region->MemMapInfo.MemMap.isAllocated()) {
ReservedMemoryT ReservedMemory;
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize,
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index a6bbf98026455e..1cf3bb51db0e71 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -90,7 +90,7 @@ template <typename SizeClassMapT> struct TestConfig3 {
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
typedef scudo::uptr CompactPtrT;
static const scudo::uptr CompactPtrScale = 0;
- static const bool PreserveAllRegions = false;
+ static const bool EnableContiguousRegions = false;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
};
More information about the llvm-commits
mailing list