[compiler-rt] aeef9c0 - [scudo] CanCache should use Size + HeadersSize instead of RoundedSize

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 31 13:12:23 PDT 2023


Author: Chia-hung Duan
Date: 2023-07-31T20:10:43Z
New Revision: aeef9c0274c6419d15df5cb2bb885aa3072423d6

URL: https://github.com/llvm/llvm-project/commit/aeef9c0274c6419d15df5cb2bb885aa3072423d6
DIFF: https://github.com/llvm/llvm-project/commit/aeef9c0274c6419d15df5cb2bb885aa3072423d6.diff

LOG: [scudo] CanCache should use Size + HeadersSize instead of RoundedSize

Cached block may have nearly aligned address for a certain alignment so
that we don't have to round up the size in advance. The rounding should
only happen at determing the availability of a cached block.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D156582

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/secondary.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index d72ae9bb0c3650..5e76bc6432349a 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -90,7 +90,8 @@ template <typename Config> class MapAllocatorNoCache {
 public:
   void init(UNUSED s32 ReleaseToOsInterval) {}
   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
-                UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
+                UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
+                UNUSED bool *Zeroed) {
     return false;
   }
   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
@@ -262,7 +263,7 @@ template <typename Config> class MapAllocatorCache {
       Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
   }
 
-  bool retrieve(Options Options, uptr Size, uptr Alignment,
+  bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
                 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
     const uptr PageSize = getPageSizeCached();
     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
@@ -280,8 +281,7 @@ template <typename Config> class MapAllocatorCache {
         const uptr CommitSize = Entries[I].CommitSize;
         const uptr AllocPos =
             roundDown(CommitBase + CommitSize - Size, Alignment);
-        HeaderPos =
-            AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+        HeaderPos = AllocPos - HeadersSize;
         if (HeaderPos > CommitBase + CommitSize)
           continue;
         if (HeaderPos < CommitBase ||
@@ -461,6 +461,10 @@ template <typename Config> class MapAllocator {
     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
   }
 
+  static constexpr uptr getHeadersSize() {
+    return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
+  }
+
   void disable() NO_THREAD_SAFETY_ANALYSIS {
     Mutex.lock();
     Cache.disable();
@@ -526,17 +530,16 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
   const uptr PageSize = getPageSizeCached();
-  uptr RoundedSize =
-      roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
-                  Chunk::getHeaderSize(),
-              PageSize);
-  if (Alignment > PageSize)
-    RoundedSize += Alignment - PageSize;
 
-  if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
+  // Note that cached blocks may have aligned address already. Thus we simply
+  // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
+  const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
+
+  if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
     LargeBlock::Header *H;
     bool Zeroed;
-    if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
+    if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
+                       &Zeroed)) {
       const uptr BlockEnd = H->CommitBase + H->CommitSize;
       if (BlockEndPtr)
         *BlockEndPtr = BlockEnd;
@@ -560,6 +563,11 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
     }
   }
 
+  uptr RoundedSize =
+      roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
+  if (Alignment > PageSize)
+    RoundedSize += Alignment - PageSize;
+
   ReservedMemoryT ReservedMemory;
   const uptr MapSize = RoundedSize + 2 * PageSize;
   ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
@@ -604,8 +612,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
     MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
     return nullptr;
   }
-  const uptr HeaderPos =
-      AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+  const uptr HeaderPos = AllocPos - getHeadersSize();
   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
       LargeBlock::addHeaderTag<Config>(HeaderPos));
   if (useMemoryTagging<Config>(Options))


        


More information about the llvm-commits mailing list