[compiler-rt] bce8c9e - [scudo] Try to release pages after unlocking the TSDs
Chia-hung Duan via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 6 10:43:31 PDT 2023
Author: Chia-hung Duan
Date: 2023-07-06T17:40:26Z
New Revision: bce8c9e3d785cb99aab512698b4a1a6af5c032de
URL: https://github.com/llvm/llvm-project/commit/bce8c9e3d785cb99aab512698b4a1a6af5c032de
DIFF: https://github.com/llvm/llvm-project/commit/bce8c9e3d785cb99aab512698b4a1a6af5c032de.diff
LOG: [scudo] Try to release pages after unlocking the TSDs
This increases the parallelism and the usage of TSDs
Reviewed By: cferris
Differential Revision: https://reviews.llvm.org/D152988
Added:
Modified:
compiler-rt/lib/scudo/standalone/combined.h
compiler-rt/lib/scudo/standalone/local_cache.h
compiler-rt/lib/scudo/standalone/primary32.h
compiler-rt/lib/scudo/standalone/primary64.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 06ed28221eb196..b17acc71f89205 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -1178,9 +1178,16 @@ class Allocator {
if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- TSD->getCache().deallocate(ClassId, BlockBegin);
+ const bool CacheDrained =
+ TSD->getCache().deallocate(ClassId, BlockBegin);
if (UnlockRequired)
TSD->unlock();
+ // When we have drained some blocks back to the Primary from TSD, that
+ // implies that we may have the chance to release some pages as well.
+ // Note that in order not to block other thread's accessing the TSD,
+ // release the TSD first then try the page release.
+ if (CacheDrained)
+ Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
} else {
if (UNLIKELY(useMemoryTagging<Config>(Options)))
storeTags(reinterpret_cast<uptr>(BlockBegin),
diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h
index a3eca744b8f68b..6d36a1c399ff15 100644
--- a/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -113,13 +113,16 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
return Allocator->decompactPtr(ClassId, CompactP);
}
- void deallocate(uptr ClassId, void *P) {
+ bool deallocate(uptr ClassId, void *P) {
CHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
// We still have to initialize the cache in the event that the first heap
// operation in a thread is a deallocation.
initCacheMaybe(C);
- if (C->Count == C->MaxCount)
+
+ // If the cache is full, drain half of blocks back to the main allocator.
+ const bool NeedToDrainCache = C->Count == C->MaxCount;
+ if (NeedToDrainCache)
drain(C, ClassId);
// See comment in allocate() about memory accesses.
const uptr ClassSize = C->ClassSize;
@@ -127,6 +130,8 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
Stats.sub(StatAllocated, ClassSize);
Stats.add(StatFree, ClassSize);
+
+ return NeedToDrainCache;
}
bool isEmpty() const {
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index f6891dab0b0264..c8dd6977b9ab09 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -232,9 +232,6 @@ template <typename Config> class SizeClassAllocator32 {
ScopedLock L(Sci->Mutex);
pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
-
- if (ClassId != SizeClassMap::BatchClassId)
- releaseToOSMaybe(Sci, ClassId);
}
void disable() NO_THREAD_SAFETY_ANALYSIS {
@@ -323,6 +320,14 @@ template <typename Config> class SizeClassAllocator32 {
return true;
}
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ // TODO: Once we have separate locks like primary64, we may consider using
+ // tryLock() as well.
+ ScopedLock L(Sci->Mutex);
+ return releaseToOSMaybe(Sci, ClassId, ReleaseType);
+ }
+
uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 8571a2dfbae507..dd58ebabba0b30 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -331,16 +331,6 @@ template <typename Config> class SizeClassAllocator64 {
ScopedLock L(Region->FLLock);
pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
}
-
- // Only non-BatchClass will be here, try to release the pages in the region.
-
- // Note that the tryLock() may fail spuriously, given that it should rarely
- // happen and page releasing is fine to skip, we don't take certain
- // approaches to ensure one page release is done.
- if (Region->MMLock.tryLock()) {
- releaseToOSMaybe(Region, ClassId);
- Region->MMLock.unlock();
- }
}
void disable() NO_THREAD_SAFETY_ANALYSIS {
@@ -426,6 +416,19 @@ template <typename Config> class SizeClassAllocator64 {
return true;
}
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ // Note that the tryLock() may fail spuriously, given that it should rarely
+ // happen and page releasing is fine to skip, we don't take certain
+ // approaches to ensure one page release is done.
+ if (Region->MMLock.tryLock()) {
+ uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType);
+ Region->MMLock.unlock();
+ return BytesReleased;
+ }
+ return 0;
+ }
+
uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
More information about the llvm-commits
mailing list