[compiler-rt] 531f90a - [scudo] Verify the size of free blocks in primary allocator
Chia-hung Duan via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 6 10:43:30 PDT 2023
Author: Chia-hung Duan
Date: 2023-07-06T17:40:25Z
New Revision: 531f90acc555bae4688b5951eebd365f6e02979b
URL: https://github.com/llvm/llvm-project/commit/531f90acc555bae4688b5951eebd365f6e02979b
DIFF: https://github.com/llvm/llvm-project/commit/531f90acc555bae4688b5951eebd365f6e02979b.diff
LOG: [scudo] Verify the size of free blocks in primary allocator
When all the blocks (local caches are included) are freed, the size of
free blocks should be equal to `AllocatedUser`.
Reviewed By: cferris
Differential Revision: https://reviews.llvm.org/D152769
Added:
Modified:
compiler-rt/lib/scudo/standalone/primary32.h
compiler-rt/lib/scudo/standalone/primary64.h
compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index e99de23f8281e7..f6891dab0b0264 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -109,12 +109,46 @@ template <typename Config> class SizeClassAllocator32 {
}
ScopedLock L(ByteMapMutex);
- for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
}
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ const uptr BlockSize = getSizeByClassId(I);
+ DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
+ }
+
+ SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Sci->FreeListInfo.BlockList)
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Sci->AllocatedUser / BlockSize);
+ }
+
CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
return static_cast<CompactPtrT>(Ptr);
}
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 83d8a31caf616e..8571a2dfbae507 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -152,6 +152,41 @@ template <typename Config> class SizeClassAllocator64 {
PrimaryBase = 0U;
}
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(I);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
+ }
+
+ RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Region->FreeListInfo.BlockList)
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Region->MemMapInfo.AllocatedUser / BlockSize);
+ }
+
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index fb67d448331f55..a17fd58ac604d3 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -116,7 +116,10 @@ struct SizeClassAllocator<TestConfig1, SizeClassMapT>
template <template <typename> class BaseConfig, typename SizeClassMapT>
struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
- ~TestAllocator() { this->unmapTestOnly(); }
+ ~TestAllocator() {
+ this->verifyAllBlocksAreReleasedTestOnly();
+ this->unmapTestOnly();
+ }
void *operator new(size_t size) {
void *p = nullptr;
@@ -286,7 +289,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
std::condition_variable Cv;
bool Ready = false;
std::thread Threads[32];
- for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
+ for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) {
Threads[I] = std::thread([&]() {
static thread_local typename Primary::CacheT Cache;
Cache.init(nullptr, Allocator.get());
@@ -312,6 +315,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
}
Cache.destroy(nullptr);
});
+ }
{
std::unique_lock<std::mutex> Lock(Mutex);
Ready = true;
@@ -386,4 +390,10 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
*std::min_element(Blocks.begin(), Blocks.end()),
GroupSizeMem * 2);
+
+ while (!Blocks.empty()) {
+ Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
+ Blocks.pop_back();
+ }
+ Cache.drain();
}
More information about the llvm-commits
mailing list