[compiler-rt] 4c6b8bb - [scudo] seperate cache retrieval logic

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 25 12:28:00 PDT 2023


Author: Fernando
Date: 2023-07-25T19:26:32Z
New Revision: 4c6b8bb87b3452d0bcef83cd0ea712d8426603b8

URL: https://github.com/llvm/llvm-project/commit/4c6b8bb87b3452d0bcef83cd0ea712d8426603b8
DIFF: https://github.com/llvm/llvm-project/commit/4c6b8bb87b3452d0bcef83cd0ea712d8426603b8.diff

LOG: [scudo] seperate cache retrieval logic

Split cache::retrieve() into separate functions. One that retrieves
the cached block and another that sets the header and MTE environment.
These were split so that the retrieve function could be more easily
changed in the future and so that the retrieve function had the sole
purpose of retrieving a CachedBlock.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D155660

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/secondary.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 931805c4b29aeb..dbb6006acbbb7d 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -72,13 +72,20 @@ static inline void unmap(LargeBlock::Header *H) {
   MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
 }
 
+namespace {
+struct CachedBlock {
+  uptr CommitBase = 0;
+  uptr CommitSize = 0;
+  uptr BlockBegin = 0;
+  MemMapT MemMap = {};
+  u64 Time = 0;
+};
+} // namespace
+
 template <typename Config> class MapAllocatorNoCache {
 public:
   void init(UNUSED s32 ReleaseToOsInterval) {}
-  bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
-                UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
-    return false;
-  }
+  bool retrieve(UNUSED uptr Size, UNUSED CachedBlock &Entry) { return false; }
   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
   bool canCache(UNUSED uptr Size) { return false; }
   void disable() {}
@@ -248,32 +255,18 @@ template <typename Config> class MapAllocatorCache {
       Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
   }
 
-  bool retrieve(Options Options, uptr Size, uptr Alignment,
-                LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
-    const uptr PageSize = getPageSizeCached();
+  bool retrieve(uptr Size, CachedBlock &Entry) EXCLUDES(Mutex) {
     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
     bool Found = false;
-    CachedBlock Entry;
-    uptr HeaderPos = 0;
     {
       ScopedLock L(Mutex);
       if (EntriesCount == 0)
         return false;
       for (u32 I = 0; I < MaxCount; I++) {
-        const uptr CommitBase = Entries[I].CommitBase;
-        if (!CommitBase)
-          continue;
-        const uptr CommitSize = Entries[I].CommitSize;
-        const uptr AllocPos =
-            roundDown(CommitBase + CommitSize - Size, Alignment);
-        HeaderPos =
-            AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
-        if (HeaderPos > CommitBase + CommitSize)
+        if (!Entries[I].CommitBase)
           continue;
-        if (HeaderPos < CommitBase ||
-            AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
+        if (Size > Entries[I].CommitSize)
           continue;
-        }
         Found = true;
         Entry = Entries[I];
         Entries[I].CommitBase = 0;
@@ -281,29 +274,7 @@ template <typename Config> class MapAllocatorCache {
         break;
       }
     }
-    if (!Found)
-      return false;
-
-    *H = reinterpret_cast<LargeBlock::Header *>(
-        LargeBlock::addHeaderTag<Config>(HeaderPos));
-    *Zeroed = Entry.Time == 0;
-    if (useMemoryTagging<Config>(Options))
-      Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
-    uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
-    if (useMemoryTagging<Config>(Options)) {
-      if (*Zeroed) {
-        storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
-                  NewBlockBegin);
-      } else if (Entry.BlockBegin < NewBlockBegin) {
-        storeTags(Entry.BlockBegin, NewBlockBegin);
-      } else {
-        storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
-      }
-    }
-    (*H)->CommitBase = Entry.CommitBase;
-    (*H)->CommitSize = Entry.CommitSize;
-    (*H)->MemMap = Entry.MemMap;
-    return true;
+    return Found;
   }
 
   bool canCache(uptr Size) {
@@ -383,14 +354,6 @@ template <typename Config> class MapAllocatorCache {
     }
   }
 
-  struct CachedBlock {
-    uptr CommitBase = 0;
-    uptr CommitSize = 0;
-    uptr BlockBegin = 0;
-    MemMapT MemMap = {};
-    u64 Time = 0;
-  };
-
   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
     if (!Entry.CommitBase || !Entry.Time)
       return;
@@ -476,6 +439,27 @@ template <typename Config> class MapAllocator {
     }
   }
 
+  inline void setHeader(Options Options, CachedBlock &Entry,
+                        LargeBlock::Header *H, bool &Zeroed) {
+    Zeroed = Entry.Time == 0;
+    if (useMemoryTagging<Config>(Options)) {
+      Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+      // Block begins after the LargeBlock::Header
+      uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
+      if (Zeroed) {
+        storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+                  NewBlockBegin);
+      } else if (Entry.BlockBegin < NewBlockBegin) {
+        storeTags(Entry.BlockBegin, NewBlockBegin);
+      } else {
+        storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
+      }
+    }
+    H->CommitBase = Entry.CommitBase;
+    H->CommitSize = Entry.CommitSize;
+    H->MemMap = Entry.MemMap;
+  }
+
   bool canCache(uptr Size) { return Cache.canCache(Size); }
 
   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
@@ -530,7 +514,15 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
   if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
     LargeBlock::Header *H;
     bool Zeroed;
-    if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
+    CachedBlock Entry;
+    if (Cache.retrieve(RoundedSize, Entry)) {
+      const uptr AllocPos =
+          roundDown(Entry.CommitBase + Entry.CommitSize - Size, Alignment);
+      const uptr HeaderPos =
+          AllocPos - LargeBlock::getHeaderSize() - Chunk::getHeaderSize();
+      H = reinterpret_cast<LargeBlock::Header *>(
+          LargeBlock::addHeaderTag<Config>(HeaderPos));
+      setHeader(Options, Entry, H, Zeroed);
       const uptr BlockEnd = H->CommitBase + H->CommitSize;
       if (BlockEndPtr)
         *BlockEndPtr = BlockEnd;


        


More information about the llvm-commits mailing list