[compiler-rt] [scudo] Reapply "Update secondary cache time-based release logic" (PR #110391)

via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 28 17:08:45 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: Joshua Baehring (JoshuaMBa)

<details>
<summary>Changes</summary>

This reapplies commit #<!-- -->107507. In the event that MTE is turned on, in which case released cache entries may be  inserted into the cache, all released cache entries are inserted after `LastUnreleasedEntry`. Additionally, when a cache entry is moved from `Quarantine` to `Entries`, its time field will only be updated if the cache entry's memory has not been released.

---
Full diff: https://github.com/llvm/llvm-project/pull/110391.diff


1 Files Affected:

- (modified) compiler-rt/lib/scudo/standalone/secondary.h (+56-32) 


``````````diff
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 2fae29e5a21687..a3a5848f509ac5 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -247,6 +247,7 @@ class MapAllocatorCache {
     // The cache is initially empty
     LRUHead = CachedBlock::InvalidEntry;
     LRUTail = CachedBlock::InvalidEntry;
+    LastUnreleasedEntry = CachedBlock::InvalidEntry;
 
     // Available entries will be retrieved starting from the beginning of the
     // Entries array
@@ -321,9 +322,11 @@ class MapAllocatorCache {
         }
         CachedBlock PrevEntry = Quarantine[QuarantinePos];
         Quarantine[QuarantinePos] = Entry;
-        if (OldestTime == 0)
-          OldestTime = Entry.Time;
         Entry = PrevEntry;
+        // Update the entry time to reflect the time that the
+        // quarantined memory is placed in the Entries array
+        if (Entry.Time != 0)
+          Entry.Time = Time;
       }
 
       // All excess entries are evicted from the cache
@@ -334,16 +337,12 @@ class MapAllocatorCache {
       }
 
       insert(Entry);
-
-      if (OldestTime == 0)
-        OldestTime = Entry.Time;
     } while (0);
 
     for (MemMapT &EvictMemMap : EvictionMemMaps)
       unmapCallBack(EvictMemMap);
 
     if (Interval >= 0) {
-      // TODO: Add ReleaseToOS logic to LRU algorithm
       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
     }
   }
@@ -525,20 +524,35 @@ class MapAllocatorCache {
 
     u32 FreeIndex = AvailableHead;
     AvailableHead = Entries[AvailableHead].Next;
+    Entries[FreeIndex] = Entry;
 
-    if (EntriesCount == 0) {
-      LRUTail = static_cast<u16>(FreeIndex);
+    // Check list order
+    if (EntriesCount > 1)
+      DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
+
+    // Released entry goes after LastUnreleasedEntry rather than at LRUHead
+    if (Entry.Time == 0 && LastUnreleasedEntry != CachedBlock::InvalidEntry) {
+      Entries[FreeIndex].Next = Entries[LastUnreleasedEntry].Next;
+      Entries[FreeIndex].Prev = LastUnreleasedEntry;
+      Entries[LastUnreleasedEntry].Next = FreeIndex;
+      if (LRUTail == LastUnreleasedEntry) {
+        LRUTail = FreeIndex;
+      } else {
+        Entries[Entries[FreeIndex].Next].Prev = FreeIndex;
+      }
     } else {
-      // Check list order
-      if (EntriesCount > 1)
-        DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
-      Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
+      Entries[FreeIndex].Next = LRUHead;
+      Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
+      if (EntriesCount == 0) {
+        LRUTail = static_cast<u16>(FreeIndex);
+      } else {
+        Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
+      }
+      LRUHead = static_cast<u16>(FreeIndex);
+      if (LastUnreleasedEntry == CachedBlock::InvalidEntry)
+        LastUnreleasedEntry = static_cast<u16>(FreeIndex);
     }
 
-    Entries[FreeIndex] = Entry;
-    Entries[FreeIndex].Next = LRUHead;
-    Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
-    LRUHead = static_cast<u16>(FreeIndex);
     EntriesCount++;
 
     // Availability stack should not have available entries when all entries
@@ -552,6 +566,9 @@ class MapAllocatorCache {
 
     Entries[I].invalidate();
 
+    if (I == LastUnreleasedEntry)
+      LastUnreleasedEntry = Entries[LastUnreleasedEntry].Prev;
+
     if (I == LRUHead)
       LRUHead = Entries[I].Next;
     else
@@ -593,35 +610,39 @@ class MapAllocatorCache {
     }
   }
 
-  void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
-    if (!Entry.isValid() || !Entry.Time)
-      return;
-    if (Entry.Time > Time) {
-      if (OldestTime == 0 || Entry.Time < OldestTime)
-        OldestTime = Entry.Time;
-      return;
-    }
+  inline void release(CachedBlock &Entry) {
+    DCHECK(Entry.Time != 0);
     Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
     Entry.Time = 0;
   }
 
   void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
     ScopedLock L(Mutex);
-    if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
+    if (!EntriesCount)
       return;
-    OldestTime = 0;
-    for (uptr I = 0; I < Config::getQuarantineSize(); I++)
-      releaseIfOlderThan(Quarantine[I], Time);
-    for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
-      releaseIfOlderThan(Entries[I], Time);
-  }
 
+    // TODO: Add conditional to skip iteration over quarantine
+    // if quarantine is disabled
+    for (uptr I = 0; I < Config::getQuarantineSize(); I++) {
+      CachedBlock &ReleaseEntry = Quarantine[I];
+      if (!ReleaseEntry.isValid() || !ReleaseEntry.Time ||
+          ReleaseEntry.Time > Time)
+        continue;
+      release(ReleaseEntry);
+    }
+
+    // Release oldest entries first by releasing from decommit base
+    while (LastUnreleasedEntry != CachedBlock::InvalidEntry &&
+           Entries[LastUnreleasedEntry].Time <= Time) {
+      release(Entries[LastUnreleasedEntry]);
+      LastUnreleasedEntry = Entries[LastUnreleasedEntry].Prev;
+    }
+  }
   HybridMutex Mutex;
   u32 EntriesCount GUARDED_BY(Mutex) = 0;
   u32 QuarantinePos GUARDED_BY(Mutex) = 0;
   atomic_u32 MaxEntriesCount = {};
   atomic_uptr MaxEntrySize = {};
-  u64 OldestTime GUARDED_BY(Mutex) = 0;
   atomic_s32 ReleaseToOsIntervalMs = {};
   u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
   u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
@@ -636,6 +657,9 @@ class MapAllocatorCache {
   u16 LRUTail GUARDED_BY(Mutex) = 0;
   // The AvailableHead is the top of the stack of available entries
   u16 AvailableHead GUARDED_BY(Mutex) = 0;
+  // The LastUnreleasedEntry is the least recently used entry that has not
+  // been released
+  u16 LastUnreleasedEntry GUARDED_BY(Mutex) = 0;
 };
 
 template <typename Config> class MapAllocator {

``````````

</details>


https://github.com/llvm/llvm-project/pull/110391


More information about the llvm-commits mailing list