[compiler-rt] [scudo] Update secondary cache release ordering. (PR #104079)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 14 10:38:27 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-compiler-rt-sanitizer
Author: Joshua Baehring (JoshuaMBa)
<details>
<summary>Changes</summary>
Secondary cache entries are now released to the OS from least recent to most recent entries. This helps to avoid unnecessary scans of the cache since entries ready to be released (specifically, entries that are considered old relative to the configurable release interval) will always be at the tail of the list of committed entries by the LRU ordering. For this same reason, the `OldestTime` variable is no longer needed to indicate when releases are necessary so it has been removed.
The committed (non-released) and decommitted (released) entries are now explicitly separated into two memory pools, allowing future cache logic to easily branch depending on whether or not entries are committed or decommitted. Specifically, committed entries that satisfy the MaxUnusedCachePages requirement are retrieved before optimal-fit, decommitted entries.
---
Full diff: https://github.com/llvm/llvm-project/pull/104079.diff
2 Files Affected:
- (modified) compiler-rt/lib/scudo/standalone/secondary.h (+155-95)
- (modified) compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp (+27-2)
``````````diff
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 27f8697db7838f..4b11d039c654a4 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -184,6 +184,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
class MapAllocatorCache {
public:
+ typedef enum { COMMITTED = 0, DECOMMITTED = 1, NONE } EntryListT;
+
+ // TODO: Refactor the intrusive list to support non-pointer link type
+ typedef struct {
+ u16 Head;
+ u16 Tail;
+ } ListInfo;
+
void getStats(ScopedString *Str) {
ScopedLock L(Mutex);
uptr Integral;
@@ -201,13 +209,18 @@ class MapAllocatorCache {
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
- CachedBlock &Entry = Entries[I];
- Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
- "BlockSize: %zu %s\n",
- Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
- Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
- }
+ auto printList = [&](EntryListT ListType) REQUIRES(Mutex) {
+ for (u32 I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
+ I = Entries[I].Next) {
+ CachedBlock &Entry = Entries[I];
+ Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
+ "BlockSize: %zu %s\n",
+ Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
+ Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
+ }
+ };
+ printList(COMMITTED);
+ printList(DECOMMITTED);
}
// Ensure the default maximum specified fits the array.
@@ -231,8 +244,10 @@ class MapAllocatorCache {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
// The cache is initially empty
- LRUHead = CachedBlock::InvalidEntry;
- LRUTail = CachedBlock::InvalidEntry;
+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
// Available entries will be retrieved starting from the beginning of the
// Entries array
@@ -250,7 +265,6 @@ class MapAllocatorCache {
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
u64 Time;
CachedBlock Entry;
-
Entry.CommitBase = CommitBase;
Entry.CommitSize = CommitSize;
Entry.BlockBegin = BlockBegin;
@@ -307,23 +321,28 @@ class MapAllocatorCache {
}
CachedBlock PrevEntry = Quarantine[QuarantinePos];
Quarantine[QuarantinePos] = Entry;
- if (OldestTime == 0)
- OldestTime = Entry.Time;
Entry = PrevEntry;
}
- // All excess entries are evicted from the cache
+ // All excess entries are evicted from the cache.
+ // DECOMMITTED entries, being older than the COMMITTED
+ // entries, are evicted first in least recently used (LRU)
+ // fashioned followed by the COMMITTED entries
while (needToEvict()) {
+ EntryListT EvictionListType;
+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
+ EvictionListType = COMMITTED;
+ else
+ EvictionListType = DECOMMITTED;
// Save MemMaps of evicted entries to perform unmap outside of lock
- EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
- remove(LRUTail);
+ EvictionMemMaps.push_back(
+ Entries[EntryLists[EvictionListType].Tail].MemMap);
+ remove(EntryLists[EvictionListType].Tail, EvictionListType);
}
- insert(Entry);
+ insert(Entry, (Entry.Time == 0) ? DECOMMITTED : COMMITTED);
- if (OldestTime == 0)
- OldestTime = Entry.Time;
- } while (0);
+ } while (0); // ScopedLock L(Mutex);
for (MemMapT &EvictMemMap : EvictionMemMaps)
unmapCallBack(EvictMemMap);
@@ -340,17 +359,14 @@ class MapAllocatorCache {
// 10% of the requested size proved to be the optimal choice for
// retrieving cached blocks after testing several options.
constexpr u32 FragmentedBytesDivisor = 10;
- bool Found = false;
CachedBlock Entry;
+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
+ uptr MinDiff = UINTPTR_MAX;
+ EntryListT OptimalFitListType = NONE;
EntryHeaderPos = 0;
- {
- ScopedLock L(Mutex);
- CallsToRetrieve++;
- if (EntriesCount == 0)
- return {};
- u32 OptimalFitIndex = 0;
- uptr MinDiff = UINTPTR_MAX;
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
+
+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES(Mutex) {
+ for (uptr I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
const uptr CommitBase = Entries[I].CommitBase;
const uptr CommitSize = Entries[I].CommitSize;
@@ -360,34 +376,48 @@ class MapAllocatorCache {
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
continue;
- }
- Found = true;
+
const uptr Diff = HeaderPos - CommitBase;
- // immediately use a cached block if it's size is close enough to the
- // requested size.
+ // immediately use a cached block if it's size is close enough to
+ // the requested size.
const uptr MaxAllowedFragmentedBytes =
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
if (Diff <= MaxAllowedFragmentedBytes) {
OptimalFitIndex = I;
EntryHeaderPos = HeaderPos;
- break;
+ OptimalFitListType = ListType;
+ return true;
}
+
// keep track of the smallest cached block
// that is greater than (AllocSize + HeaderSize)
if (Diff > MinDiff)
continue;
OptimalFitIndex = I;
MinDiff = Diff;
+ OptimalFitListType = ListType;
EntryHeaderPos = HeaderPos;
}
- if (Found) {
- Entry = Entries[OptimalFitIndex];
- remove(OptimalFitIndex);
- SuccessfulRetrieves++;
- }
- }
+ return (OptimalFitIndex != CachedBlock::InvalidEntry);
+ };
+
+ {
+ ScopedLock L(Mutex);
+ CallsToRetrieve++;
+ if (EntriesCount == 0)
+ return {};
+
+ // Prioritize valid fit from COMMITTED entries over
+ // optimal fit from DECOMMITTED entries
+ if (!FindAvailableEntry(COMMITTED) && !FindAvailableEntry(DECOMMITTED))
+ return {};
+
+ Entry = Entries[OptimalFitIndex];
+ remove(OptimalFitIndex, OptimalFitListType);
+ SuccessfulRetrieves++;
+ } // ScopedLock L(Mutex);
return Entry;
}
@@ -432,10 +462,15 @@ class MapAllocatorCache {
Quarantine[I].invalidate();
}
}
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
- Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
- Entries[I].CommitSize, 0);
- }
+ auto disableLists = [&](EntryListT EntryList) REQUIRES(Mutex) {
+ for (u32 I = EntryLists[EntryList].Head; I != CachedBlock::InvalidEntry;
+ I = Entries[I].Next) {
+ Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+ Entries[I].CommitSize, 0);
+ }
+ };
+ disableLists(COMMITTED);
+ disableLists(DECOMMITTED);
QuarantinePos = -1U;
}
@@ -450,7 +485,7 @@ class MapAllocatorCache {
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
}
- void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
+ void insert(const CachedBlock &Entry, EntryListT ListType) REQUIRES(Mutex) {
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
// Cache should be populated with valid entries when not empty
@@ -459,66 +494,86 @@ class MapAllocatorCache {
u32 FreeIndex = AvailableHead;
AvailableHead = Entries[AvailableHead].Next;
- if (EntriesCount == 0) {
- LRUTail = static_cast<u16>(FreeIndex);
- } else {
- // Check list order
- if (EntriesCount > 1)
- DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
- Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
- }
-
Entries[FreeIndex] = Entry;
- Entries[FreeIndex].Next = LRUHead;
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
- LRUHead = static_cast<u16>(FreeIndex);
+ pushFront(FreeIndex, ListType);
EntriesCount++;
+ if (Entries[EntryLists[ListType].Head].Next != CachedBlock::InvalidEntry) {
+ DCHECK_GE(Entries[EntryLists[ListType].Head].Time,
+ Entries[Entries[EntryLists[ListType].Head].Next].Time);
+ }
// Availability stack should not have available entries when all entries
// are in use
if (EntriesCount == Config::getEntriesArraySize())
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
}
- void remove(uptr I) REQUIRES(Mutex) {
- DCHECK(Entries[I].isValid());
-
- Entries[I].invalidate();
-
- if (I == LRUHead)
- LRUHead = Entries[I].Next;
+ // Joins the entries adjacent to Entries[I], effectively
+ // unlinking Entries[I] from the list
+ void unlink(uptr I, EntryListT ListType) REQUIRES(Mutex) {
+ if (I == EntryLists[ListType].Head)
+ EntryLists[ListType].Head = Entries[I].Next;
else
Entries[Entries[I].Prev].Next = Entries[I].Next;
- if (I == LRUTail)
- LRUTail = Entries[I].Prev;
+ if (I == EntryLists[ListType].Tail)
+ EntryLists[ListType].Tail = Entries[I].Prev;
else
Entries[Entries[I].Next].Prev = Entries[I].Prev;
+ }
+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
+ // Entries[I] onto the stack of available entries
+ void remove(uptr I, EntryListT ListType) REQUIRES(Mutex) {
+ DCHECK(Entries[I].isValid());
+
+ Entries[I].invalidate();
+
+ unlink(I, ListType);
Entries[I].Next = AvailableHead;
AvailableHead = static_cast<u16>(I);
EntriesCount--;
// Cache should not have valid entries when not empty
if (EntriesCount == 0) {
- DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
- DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
+ DCHECK_EQ(EntryLists[COMMITTED].Head, CachedBlock::InvalidEntry);
+ DCHECK_EQ(EntryLists[COMMITTED].Tail, CachedBlock::InvalidEntry);
+ DCHECK_EQ(EntryLists[DECOMMITTED].Head, CachedBlock::InvalidEntry);
+ DCHECK_EQ(EntryLists[DECOMMITTED].Tail, CachedBlock::InvalidEntry);
}
}
+ inline void pushFront(uptr I, EntryListT ListType) REQUIRES(Mutex) {
+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
+ EntryLists[ListType].Tail = static_cast<u16>(I);
+ else
+ Entries[EntryLists[ListType].Head].Prev = static_cast<u16>(I);
+
+ Entries[I].Next = EntryLists[ListType].Head;
+ Entries[I].Prev = CachedBlock::InvalidEntry;
+ EntryLists[ListType].Head = static_cast<u16>(I);
+ }
+
void empty() {
MemMapT MapInfo[Config::getEntriesArraySize()];
uptr N = 0;
{
ScopedLock L(Mutex);
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
- if (!Entries[I].isValid())
- continue;
- MapInfo[N] = Entries[I].MemMap;
- remove(I);
- N++;
- }
+ auto emptyList = [&](EntryListT ListType) REQUIRES(Mutex) {
+ for (uptr I = EntryLists[ListType].Head;
+ I != CachedBlock::InvalidEntry;) {
+ uptr ToRemove = I;
+ I = Entries[I].Next;
+ MapInfo[N] = Entries[ToRemove].MemMap;
+ remove(ToRemove, ListType);
+ N++;
+ }
+ };
+ emptyList(COMMITTED);
+ emptyList(DECOMMITTED);
EntriesCount = 0;
+ for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
+ DCHECK(!Entries[I].isValid());
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
@@ -526,27 +581,30 @@ class MapAllocatorCache {
}
}
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
- if (!Entry.isValid() || !Entry.Time)
- return;
- if (Entry.Time > Time) {
- if (OldestTime == 0 || Entry.Time < OldestTime)
- OldestTime = Entry.Time;
- return;
- }
+ inline void release(CachedBlock &Entry) {
+ DCHECK(Entry.Time != 0);
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
- if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
- return;
- OldestTime = 0;
- for (uptr I = 0; I < Config::getQuarantineSize(); I++)
- releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
- releaseIfOlderThan(Entries[I], Time);
+ for (uptr I = 0; I < Config::getQuarantineSize(); I++) {
+ CachedBlock &Entry = Quarantine[I];
+ if (!Entry.isValid() || Entry.Time > Time)
+ continue;
+ release(Entry);
+ }
+
+ // Release oldest entries first by releasing from tail
+ u16 ReleaseIndex = EntryLists[COMMITTED].Tail;
+ while (ReleaseIndex != CachedBlock::InvalidEntry &&
+ Entries[ReleaseIndex].Time <= Time) {
+ release(Entries[ReleaseIndex]);
+ unlink(ReleaseIndex, COMMITTED);
+ pushFront(ReleaseIndex, DECOMMITTED);
+ ReleaseIndex = Entries[ReleaseIndex].Prev;
+ }
}
HybridMutex Mutex;
@@ -554,7 +612,6 @@ class MapAllocatorCache {
u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
- u64 OldestTime GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
@@ -563,10 +620,12 @@ class MapAllocatorCache {
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};
- // The LRUHead of the cache is the most recently used cache entry
- u16 LRUHead GUARDED_BY(Mutex) = 0;
- // The LRUTail of the cache is the least recently used cache entry
- u16 LRUTail GUARDED_BY(Mutex) = 0;
+ // EntryLists stores the head and tail indices of all
+ // lists being used to store valid cache entries.
+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
+ // COMMITTED entries have memory chunks that have not been released to the OS
+ // DECOMMITTED entries have memory chunks that have been released to the OS
+ ListInfo EntryLists[2] GUARDED_BY(Mutex) = {};
// The AvailableHead is the top of the stack of available entries
u16 AvailableHead GUARDED_BY(Mutex) = 0;
};
@@ -706,6 +765,7 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
}
return Ptr;
}
+
// As with the Primary, the size passed to this function includes any desired
// alignment, so that the frontend can align the user allocation. The hint
// parameter allows us to unmap spurious memory when dealing with larger
diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index e85b6abdb36d22..61f7a46140c86a 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -308,14 +308,14 @@ struct MapAllocatorCacheTest : public Test {
scudo::uptr NumEntries, scudo::uptr Size) {
for (scudo::uptr I = 0; I < NumEntries; I++) {
MemMaps.emplace_back(allocate(Size));
- auto &MemMap = MemMaps[I];
+ auto &MemMap = MemMaps.back();
Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
MemMap.getBase(), MemMap);
}
}
};
-TEST_F(MapAllocatorCacheTest, CacheOrder) {
+TEST_F(MapAllocatorCacheTest, CacheOrderNoRelease) {
std::vector<scudo::MemMapT> MemMaps;
Cache->setOption(scudo::Option::MaxCacheEntriesCount,
CacheConfig::getEntriesArraySize());
@@ -336,6 +336,31 @@ TEST_F(MapAllocatorCacheTest, CacheOrder) {
MemMap.unmap();
}
+TEST_F(MapAllocatorCacheTest, CacheOrderWithRelease) {
+ std::vector<scudo::MemMapT> MemMaps;
+ Cache->setOption(scudo::Option::MaxCacheEntriesCount,
+ CacheConfig::getEntriesArraySize());
+
+ fillCacheWithSameSizeBlocks(MemMaps, CacheConfig::getEntriesArraySize(),
+ TestAllocSize);
+
+ // Release all entries to transfer COMMITTED list contents to
+ // DECOMMITTED list
+ Cache->releaseToOS();
+
+ // Retrieval order should be the inverse of insertion order
+ for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) {
+ scudo::uptr EntryHeaderPos;
+ scudo::CachedBlock Entry =
+ Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos);
+ EXPECT_EQ(Entry.MemMap.getBase(), MemMaps[I - 1].getBase());
+ }
+
+ // Clean up MemMaps
+ for (auto &MemMap : MemMaps)
+ MemMap.unmap();
+}
+
TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
std::vector<scudo::MemMapT> MemMaps;
// Fill the cache above MaxEntriesCount to force an eviction
``````````
</details>
https://github.com/llvm/llvm-project/pull/104079
More information about the llvm-commits
mailing list