[compiler-rt] [scudo] Remove unused field in BatchGroup (PR #109322)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 19 12:35:34 PDT 2024
https://github.com/ChiaHungDuan created https://github.com/llvm/llvm-project/pull/109322
Also fix the logic while determining the size of BatchClass and update some legacy comments.
>From a5dc4fdf42ec702bd0c68006b84f3d4fe30d6861 Mon Sep 17 00:00:00 2001
From: Chia-hung Duan <chiahungduan at google.com>
Date: Thu, 19 Sep 2024 19:33:00 +0000
Subject: [PATCH] [scudo] Remove unused field in BatchGroup
Also fix the logic while determining the size of BatchClass and update
some legacy comments.
---
.../lib/scudo/standalone/allocator_common.h | 7 ++--
compiler-rt/lib/scudo/standalone/primary32.h | 16 +--------
compiler-rt/lib/scudo/standalone/primary64.h | 34 ++++++-------------
3 files changed, 14 insertions(+), 43 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/allocator_common.h b/compiler-rt/lib/scudo/standalone/allocator_common.h
index 2b77516ad11cae..0169601f9c6254 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_common.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_common.h
@@ -75,16 +75,13 @@ template <class SizeClassAllocator> struct BatchGroup {
BatchGroup *Next;
// The compact base address of each group
uptr CompactPtrGroupBase;
- // Cache value of SizeClassAllocatorLocalCache::getMaxCached()
- u16 MaxCachedPerBatch;
- // Number of blocks pushed into this group. This is an increment-only
- // counter.
- uptr PushedBlocks;
// This is used to track how many bytes are not in-use since last time we
// tried to release pages.
uptr BytesInBGAtLastCheckpoint;
// Blocks are managed by TransferBatch in a list.
SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
+ // Cache value of SizeClassAllocatorLocalCache::getMaxCached()
+ u16 MaxCachedPerBatch;
};
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 9ef5e7102e3220..654b129d9f5471 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -56,12 +56,9 @@ template <typename Config> class SizeClassAllocator32 {
typedef TransferBatch<ThisT> TransferBatchT;
typedef BatchGroup<ThisT> BatchGroupT;
- static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
- "BatchGroupT uses the same class size as TransferBatchT");
-
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? sizeof(TransferBatchT)
+ ? Max(sizeof(BatchGroupT), sizeof(TransferBatchT))
: SizeClassMap::getSizeByClassId(ClassId);
}
@@ -531,9 +528,6 @@ template <typename Config> class SizeClassAllocator32 {
// BatchClass hasn't enabled memory group. Use `0` to indicate there's no
// memory group here.
BG->CompactPtrGroupBase = 0;
- // `BG` is also the block of BatchClassId. Note that this is different
- // from `CreateGroup` in `pushBlocksImpl`
- BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
@@ -558,9 +552,6 @@ template <typename Config> class SizeClassAllocator32 {
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
--Size;
- DCHECK_EQ(BG->PushedBlocks, 1U);
- // `TB` is also the block of BatchClassId.
- BG->PushedBlocks += 1;
BG->Batches.push_front(TB);
}
@@ -587,8 +578,6 @@ template <typename Config> class SizeClassAllocator32 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}
-
- BG->PushedBlocks += Size;
}
// Push the blocks to their batch group. The layout will be like,
//
@@ -624,7 +613,6 @@ template <typename Config> class SizeClassAllocator32 {
BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
- BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
@@ -652,8 +640,6 @@ template <typename Config> class SizeClassAllocator32 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}
-
- BG->PushedBlocks += Size;
};
Sci->FreeListInfo.PushedBlocks += Size;
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index a3b6e309ed3fce..a3876479ec8158 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -61,12 +61,12 @@ template <typename Config> class SizeClassAllocator64 {
typedef TransferBatch<ThisT> TransferBatchT;
typedef BatchGroup<ThisT> BatchGroupT;
- static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
- "BatchGroupT uses the same class size as TransferBatchT");
-
+ // BachClass is used to store internal metadata so it needs to be at least as
+ // large as the largest data structure.
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
+ ? roundUp(Max(sizeof(TransferBatchT), sizeof(BatchGroupT)),
+ 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
@@ -236,7 +236,7 @@ template <typename Config> class SizeClassAllocator64 {
} else {
while (true) {
// When two threads compete for `Region->MMLock`, we only want one of
- // them to call populateFreeListAndPopBatch(). To avoid both of them
+ // them to call populateFreeListAndPopBlocks(). To avoid both of them
// doing that, always check the freelist before mapping new pages.
ScopedLock ML(Region->MMLock);
{
@@ -690,9 +690,6 @@ template <typename Config> class SizeClassAllocator64 {
// BatchClass hasn't enabled memory group. Use `0` to indicate there's no
// memory group here.
BG->CompactPtrGroupBase = 0;
- // `BG` is also the block of BatchClassId. Note that this is different
- // from `CreateGroup` in `pushBlocksImpl`
- BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
@@ -717,9 +714,6 @@ template <typename Config> class SizeClassAllocator64 {
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
--Size;
- DCHECK_EQ(BG->PushedBlocks, 1U);
- // `TB` is also the block of BatchClassId.
- BG->PushedBlocks += 1;
BG->Batches.push_front(TB);
}
@@ -746,8 +740,6 @@ template <typename Config> class SizeClassAllocator64 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}
-
- BG->PushedBlocks += Size;
}
// Push the blocks to their batch group. The layout will be like,
@@ -782,7 +774,6 @@ template <typename Config> class SizeClassAllocator64 {
BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
- BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
@@ -810,8 +801,6 @@ template <typename Config> class SizeClassAllocator64 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}
-
- BG->PushedBlocks += Size;
};
Region->FreeListInfo.PushedBlocks += Size;
@@ -884,7 +873,7 @@ template <typename Config> class SizeClassAllocator64 {
while (true) {
// We only expect one thread doing the freelist refillment and other
// threads will be waiting for either the completion of the
- // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
+ // `populateFreeListAndPopBlocks()` or `pushBlocks()` called by other
// threads.
bool PopulateFreeList = false;
{
@@ -922,7 +911,7 @@ template <typename Config> class SizeClassAllocator64 {
// At here, there are two preconditions to be met before waiting,
// 1. The freelist is empty.
// 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
- // `populateFreeListAndPopBatch()`.
+ // `populateFreeListAndPopBlocks()`.
//
// Note that it has the chance that freelist is empty but
// Region->isPopulatingFreeList == false because all the new populated
@@ -938,8 +927,8 @@ template <typename Config> class SizeClassAllocator64 {
// Now the freelist is empty and someone's doing the refillment. We will
// wait until anyone refills the freelist or someone finishes doing
- // `populateFreeListAndPopBatch()`. The refillment can be done by
- // `populateFreeListAndPopBatch()`, `pushBlocks()`,
+ // `populateFreeListAndPopBlocks()`. The refillment can be done by
+ // `populateFreeListAndPopBlocks()`, `pushBlocks()`,
// `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
Region->FLLockCV.wait(Region->FLLock);
@@ -1119,8 +1108,8 @@ template <typename Config> class SizeClassAllocator64 {
// Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
// the requests from `PushBlocks` and `PopBatch` which are external
- // interfaces. `populateFreeListAndPopBatch` is the internal interface so we
- // should set the values back to avoid incorrectly setting the stats.
+ // interfaces. `populateFreeListAndPopBlocks` is the internal interface so
+ // we should set the values back to avoid incorrectly setting the stats.
Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
const uptr AllocatedUser = Size * NumberOfBlocks;
@@ -1689,7 +1678,6 @@ template <typename Config> class SizeClassAllocator64 {
GroupsToRelease.pop_front();
if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
- BG->PushedBlocks += Cur->PushedBlocks;
// We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
// collecting the `GroupsToRelease`.
BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
More information about the llvm-commits
mailing list