[compiler-rt] [scudo] Add primary option to enable/disable cache blocks. (PR #129794)
Jordan R AW via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 7 10:48:11 PST 2025
================
@@ -184,6 +179,148 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
}
};
+template <class SizeClassAllocator> struct SizeClassAllocatorNoCache {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
+
+ void init(GlobalStats *S, SizeClassAllocator *A) {
+ Stats.init();
+ if (LIKELY(S))
+ S->link(&Stats);
+ Allocator = A;
+ initAllocator();
+ }
+
+ void destroy(GlobalStats *S) {
+ if (LIKELY(S))
+ S->unlink(&Stats);
+ }
+
+ void *allocate(uptr ClassId) {
+ CompactPtrT CompactPtr;
+ uptr NumBlocksPopped = Allocator->popBlocks(this, ClassId, &CompactPtr, 1U);
+ if (NumBlocksPopped == 0)
+ return nullptr;
+ DCHECK_EQ(NumBlocksPopped, 1U);
+ const PerClass *C = &PerClassArray[ClassId];
+ Stats.add(StatAllocated, C->ClassSize);
+ Stats.sub(StatFree, C->ClassSize);
+ return Allocator->decompactPtr(ClassId, CompactPtr);
+ }
+
+ bool deallocate(uptr ClassId, void *P) {
+ CHECK_LT(ClassId, NumClasses);
+
+ if (ClassId == BatchClassId)
+ return deallocateBatchClassBlock(P);
+
+ CompactPtrT CompactPtr =
+ Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
+ Allocator->pushBlocks(this, ClassId, &CompactPtr, 1U);
+ PerClass *C = &PerClassArray[ClassId];
+ Stats.sub(StatAllocated, C->ClassSize);
+ Stats.add(StatFree, C->ClassSize);
+
+ // The following adopts the same strategy of allocator draining as used
+ // in SizeClassAllocatorLocalCache so that use the same hint when doing
+ // a page release.
+ ++C->Count;
+ const bool SuggestDraining = C->Count == C->MaxCount;
+ if (SuggestDraining)
+ C->Count = 0;
+ return SuggestDraining;
+ }
+
+ void *getBatchClassBlock() {
+ PerClass *C = &PerClassArray[BatchClassId];
+ if (C->Count == 0) {
+ const u16 NumBlocksRefilled = Allocator->popBlocks(
+ this, BatchClassId, BatchClassStorage, C->MaxCount);
+ if (NumBlocksRefilled == 0)
+ reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
+ DCHECK_LE(NumBlocksRefilled, SizeClassMap::MaxNumCachedHint);
+ C->Count = NumBlocksRefilled;
+ }
+
+ const uptr ClassSize = C->ClassSize;
+ CompactPtrT CompactP = BatchClassStorage[--C->Count];
+ Stats.add(StatAllocated, ClassSize);
+ Stats.sub(StatFree, ClassSize);
+
+ return Allocator->decompactPtr(BatchClassId, CompactP);
+ }
+
+ LocalStats &getStats() { return Stats; }
+
+ void getStats(ScopedString *Str) { Str->append(" No block is cached.\n"); }
+
+ bool isEmpty() const {
+ const PerClass *C = &PerClassArray[BatchClassId];
+ return C->Count == 0;
+ }
+ void drain() {
+ PerClass *C = &PerClassArray[BatchClassId];
+ if (C->Count > 0) {
+ Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
+ C->Count = 0;
+ }
+ }
+
+ static u16 getMaxCached(uptr Size) {
+ return Min(SizeClassMap::MaxNumCachedHint,
+ SizeClassMap::getMaxCachedHint(Size));
+ }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr BatchClassId = SizeClassMap::BatchClassId;
+ struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
+ u16 Count = 0;
+ u16 MaxCount;
+ // Note: ClassSize is zero for the transfer batch.
+ uptr ClassSize;
+ };
+ PerClass PerClassArray[NumClasses] = {};
+ // Popping BatchClass blocks requires taking a certain amount of blocks at
+ // once. This restriction comes from how we manage the storing of BatchClass
+ // in the primary allocator. See more details in `popBlocksImpl` in the
+ // primary allocator.
+ CompactPtrT BatchClassStorage[SizeClassMap::MaxNumCachedHint] = {};
+ LocalStats Stats;
+ SizeClassAllocator *Allocator = nullptr;
+
+ bool deallocateBatchClassBlock(void *P) {
+ PerClass *C = &PerClassArray[BatchClassId];
+ // Drain all the blocks.
+ if (C->Count == C->MaxCount) {
+ Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
+ C->Count = 0;
+ }
+ BatchClassStorage[C->Count++] =
----------------
ajordanr-google wrote:
Also Prefer Preincrement: https://llvm.org/docs/CodingStandards.html#prefer-preincrement
https://github.com/llvm/llvm-project/pull/129794
More information about the llvm-commits
mailing list