[llvm-branch-commits] [compiler-rt] f78c1af - Revert "[scudo] Separated committed and decommitted entries. (#100818)"
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Jul 31 11:01:09 PDT 2024
Author: ChiaHungDuan
Date: 2024-07-31T11:01:07-07:00
New Revision: f78c1afddb70ba5b803c6f279a28c56534a1a081
URL: https://github.com/llvm/llvm-project/commit/f78c1afddb70ba5b803c6f279a28c56534a1a081
DIFF: https://github.com/llvm/llvm-project/commit/f78c1afddb70ba5b803c6f279a28c56534a1a081.diff
LOG: Revert "[scudo] Separated committed and decommitted entries. (#100818)"
This reverts commit 8b2688bd173e79392927bcaed91855e7c4db8eaa.
Added:
Modified:
compiler-rt/lib/scudo/standalone/secondary.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 0f0c4ca3a197b..d8505742d6054 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -180,14 +180,6 @@ template <typename T> class NonZeroLengthArray<T, 0> {
template <typename Config> class MapAllocatorCache {
public:
- typedef enum { COMMITTED = 0, DECOMMITTED = 1, NONE } EntryListT;
-
- // TODO: Refactor the intrusive list to support non-pointer link type
- typedef struct {
- u16 Head;
- u16 Tail;
- } ListInfo;
-
void getStats(ScopedString *Str) {
ScopedLock L(Mutex);
uptr Integral;
@@ -205,18 +197,13 @@ template <typename Config> class MapAllocatorCache {
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
- auto printList = [&](EntryListT ListType) REQUIRES(Mutex) {
- for (u32 I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
- I = Entries[I].Next) {
- CachedBlock &Entry = Entries[I];
- Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
- "BlockSize: %zu %s\n",
- Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
- Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
- }
- };
- printList(COMMITTED);
- printList(DECOMMITTED);
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
+ CachedBlock &Entry = Entries[I];
+ Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
+ "BlockSize: %zu %s\n",
+ Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
+ Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
+ }
}
// Ensure the default maximum specified fits the array.
@@ -240,10 +227,8 @@ template <typename Config> class MapAllocatorCache {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
// The cache is initially empty
- EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
- EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
- EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
- EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
+ LRUHead = CachedBlock::InvalidEntry;
+ LRUTail = CachedBlock::InvalidEntry;
// Available entries will be retrieved starting from the beginning of the
// Entries array
@@ -325,19 +310,15 @@ template <typename Config> class MapAllocatorCache {
// All excess entries are evicted from the cache
while (needToEvict()) {
// Save MemMaps of evicted entries to perform unmap outside of lock
- EntryListT EvictionListType;
- if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
- EvictionListType = COMMITTED;
- else
- EvictionListType = DECOMMITTED;
- remove(EntryLists[EvictionListType].Tail, EvictionListType);
+ EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
+ remove(LRUTail);
}
- insert(Entry, (Entry.Time == 0) ? DECOMMITTED : COMMITTED);
+ insert(Entry);
if (OldestTime == 0)
OldestTime = Entry.Time;
- } while (0); // ScopedLock L(Mutex);
+ } while (0);
for (MemMapT &EvictMemMap : EvictionMemMaps)
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
@@ -354,69 +335,56 @@ template <typename Config> class MapAllocatorCache {
// 10% of the requested size proved to be the optimal choice for
// retrieving cached blocks after testing several options.
constexpr u32 FragmentedBytesDivisor = 10;
+ bool Found = false;
CachedBlock Entry;
uptr EntryHeaderPos = 0;
- uptr OptimalFitIndex = CachedBlock::InvalidEntry;
{
ScopedLock L(Mutex);
CallsToRetrieve++;
if (EntriesCount == 0)
return false;
+ u32 OptimalFitIndex = 0;
uptr MinDiff = UINTPTR_MAX;
- EntryListT OptimalFitListType = NONE;
- auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES(Mutex) {
- for (uptr I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
- I = Entries[I].Next) {
- const uptr CommitBase = Entries[I].CommitBase;
- const uptr CommitSize = Entries[I].CommitSize;
- const uptr AllocPos =
- roundDown(CommitBase + CommitSize - Size, Alignment);
- const uptr HeaderPos = AllocPos - HeadersSize;
- if (HeaderPos > CommitBase + CommitSize)
- continue;
- if (HeaderPos < CommitBase ||
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
- continue;
-
- const uptr Diff = HeaderPos - CommitBase;
- // immediately use a cached block if it's size is close enough to
- // the requested size.
- const uptr MaxAllowedFragmentedBytes =
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
- if (Diff <= MaxAllowedFragmentedBytes) {
- OptimalFitIndex = I;
- EntryHeaderPos = HeaderPos;
- OptimalFitListType = ListType;
- return Entries[OptimalFitIndex];
- }
-
- // keep track of the smallest cached block
- // that is greater than (AllocSize + HeaderSize)
- if (Diff > MinDiff)
- continue;
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
+ I = Entries[I].Next) {
+ const uptr CommitBase = Entries[I].CommitBase;
+ const uptr CommitSize = Entries[I].CommitSize;
+ const uptr AllocPos =
+ roundDown(CommitBase + CommitSize - Size, Alignment);
+ const uptr HeaderPos = AllocPos - HeadersSize;
+ if (HeaderPos > CommitBase + CommitSize)
+ continue;
+ if (HeaderPos < CommitBase ||
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
+ continue;
+ }
+ Found = true;
+ const uptr Diff = HeaderPos - CommitBase;
+ // immediately use a cached block if it's size is close enough to the
+ // requested size.
+ const uptr MaxAllowedFragmentedBytes =
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
+ if (Diff <= MaxAllowedFragmentedBytes) {
OptimalFitIndex = I;
- MinDiff = Diff;
- OptimalFitListType = ListType;
EntryHeaderPos = HeaderPos;
+ break;
}
- CachedBlock FoundEntry;
- if (OptimalFitIndex != CachedBlock::InvalidEntry)
- FoundEntry = Entries[OptimalFitIndex];
- return FoundEntry;
- };
-
- // Prioritize valid fit from COMMITTED entries over
- // optimal fit from DECOMMITTED entries
- Entry = FindAvailableEntry(COMMITTED);
- if (!Entry.isValid())
- Entry = FindAvailableEntry(DECOMMITTED);
-
- if (!Entry.isValid())
- return false;
-
- remove(OptimalFitIndex, OptimalFitListType);
- SuccessfulRetrieves++;
- } // ScopedLock L(Mutex);
+ // keep track of the smallest cached block
+ // that is greater than (AllocSize + HeaderSize)
+ if (Diff > MinDiff)
+ continue;
+ OptimalFitIndex = I;
+ MinDiff = Diff;
+ EntryHeaderPos = HeaderPos;
+ }
+ if (Found) {
+ Entry = Entries[OptimalFitIndex];
+ remove(OptimalFitIndex);
+ SuccessfulRetrieves++;
+ }
+ }
+ if (!Found)
+ return false;
*H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -480,15 +448,10 @@ template <typename Config> class MapAllocatorCache {
Quarantine[I].invalidate();
}
}
- auto disableLists = [&](EntryListT EntryList) REQUIRES(Mutex) {
- for (u32 I = EntryLists[COMMITTED].Head; I != CachedBlock::InvalidEntry;
- I = Entries[I].Next) {
- Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
- Entries[I].CommitSize, 0);
- }
- };
- disableLists(COMMITTED);
- disableLists(DECOMMITTED);
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
+ Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+ Entries[I].CommitSize, 0);
+ }
QuarantinePos = -1U;
}
@@ -503,7 +466,7 @@ template <typename Config> class MapAllocatorCache {
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
}
- void insert(const CachedBlock &Entry, EntryListT ListType) REQUIRES(Mutex) {
+ void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
// Cache should be populated with valid entries when not empty
@@ -512,92 +475,71 @@ template <typename Config> class MapAllocatorCache {
u32 FreeIndex = AvailableHead;
AvailableHead = Entries[AvailableHead].Next;
+ if (EntriesCount == 0) {
+ LRUTail = static_cast<u16>(FreeIndex);
+ } else {
+ // Check list order
+ if (EntriesCount > 1)
+ DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
+ Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
+ }
+
Entries[FreeIndex] = Entry;
- pushFront(FreeIndex, ListType);
+ Entries[FreeIndex].Next = LRUHead;
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
+ LRUHead = static_cast<u16>(FreeIndex);
EntriesCount++;
- if (Entries[EntryLists[ListType].Head].Next != CachedBlock::InvalidEntry) {
- DCHECK_GE(Entries[EntryLists[ListType].Head].Time,
- Entries[Entries[EntryLists[ListType].Head].Next].Time);
- }
// Availability stack should not have available entries when all entries
// are in use
if (EntriesCount == Config::getEntriesArraySize())
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
}
- // Joins the entries adjacent to Entries[I], effectively
- // unlinking Entries[I] from the list
- void unlink(uptr I, EntryListT ListType) REQUIRES(Mutex) {
- if (I == EntryLists[ListType].Head)
- EntryLists[ListType].Head = Entries[I].Next;
+ void remove(uptr I) REQUIRES(Mutex) {
+ DCHECK(Entries[I].isValid());
+
+ Entries[I].invalidate();
+
+ if (I == LRUHead)
+ LRUHead = Entries[I].Next;
else
Entries[Entries[I].Prev].Next = Entries[I].Next;
- if (I == EntryLists[ListType].Tail)
- EntryLists[ListType].Tail = Entries[I].Prev;
+ if (I == LRUTail)
+ LRUTail = Entries[I].Prev;
else
Entries[Entries[I].Next].Prev = Entries[I].Prev;
- }
- // Invalidates Entries[I], removes Entries[I] from list, and pushes
- // Entries[I] onto the stack of available entries
- void remove(uptr I, EntryListT ListType) REQUIRES(Mutex) {
- DCHECK(Entries[I].isValid());
-
- Entries[I].invalidate();
-
- unlink(I, ListType);
Entries[I].Next = AvailableHead;
AvailableHead = static_cast<u16>(I);
EntriesCount--;
// Cache should not have valid entries when not empty
if (EntriesCount == 0) {
- DCHECK_EQ(EntryLists[COMMITTED].Head, CachedBlock::InvalidEntry);
- DCHECK_EQ(EntryLists[COMMITTED].Tail, CachedBlock::InvalidEntry);
- DCHECK_EQ(EntryLists[DECOMMITTED].Head, CachedBlock::InvalidEntry);
- DCHECK_EQ(EntryLists[DECOMMITTED].Tail, CachedBlock::InvalidEntry);
+ DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
+ DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
}
}
- inline void pushFront(uptr I, EntryListT ListType) REQUIRES(Mutex) {
- if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
- EntryLists[ListType].Tail = static_cast<u16>(I);
- else
- Entries[EntryLists[ListType].Head].Prev = static_cast<u16>(I);
-
- Entries[I].Next = EntryLists[ListType].Head;
- Entries[I].Prev = CachedBlock::InvalidEntry;
- EntryLists[ListType].Head = static_cast<u16>(I);
- }
-
void empty() {
MemMapT MapInfo[Config::getEntriesArraySize()];
uptr N = 0;
{
ScopedLock L(Mutex);
- auto emptyList = [&](EntryListT ListType) REQUIRES(Mutex) {
- for (uptr I = EntryLists[ListType].Head;
- I != CachedBlock::InvalidEntry;) {
- uptr ToRemove = I;
- I = Entries[I].Next;
- MapInfo[N] = Entries[ToRemove].MemMap;
- remove(ToRemove, ListType);
- N++;
- }
- };
- emptyList(COMMITTED);
- emptyList(DECOMMITTED);
+ for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
+ if (!Entries[I].isValid())
+ continue;
+ MapInfo[N] = Entries[I].MemMap;
+ remove(I);
+ N++;
+ }
EntriesCount = 0;
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}
-
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
- DCHECK(!Entries[I].isValid());
}
void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
@@ -619,13 +561,8 @@ template <typename Config> class MapAllocatorCache {
OldestTime = 0;
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
- if (Entries[I].isValid() && Entries[I].Time && Entries[I].Time <= Time) {
- unlink(I, COMMITTED);
- pushFront(I, DECOMMITTED);
- }
+ for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
releaseIfOlderThan(Entries[I], Time);
- }
}
HybridMutex Mutex;
@@ -642,12 +579,10 @@ template <typename Config> class MapAllocatorCache {
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};
- // EntryLists stores the head and tail indices of all
- // lists being used to store valid cache entries.
- // Currently there are lists storing COMMITTED and DECOMMITTED entries.
- // COMMITTED entries are those that are not madvise()'d
- // DECOMMITTED entries are those that are madvise()'d
- ListInfo EntryLists[2] GUARDED_BY(Mutex) = {};
+ // The LRUHead of the cache is the most recently used cache entry
+ u16 LRUHead GUARDED_BY(Mutex) = 0;
+ // The LRUTail of the cache is the least recently used cache entry
+ u16 LRUTail GUARDED_BY(Mutex) = 0;
// The AvailableHead is the top of the stack of available entries
u16 AvailableHead GUARDED_BY(Mutex) = 0;
};
More information about the llvm-branch-commits
mailing list