[compiler-rt] a7f0195 - [scudo] secondary allocator cache optimal-fit retrieval

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 8 16:40:21 PDT 2023


Author: Fernando
Date: 2023-08-08T23:37:31Z
New Revision: a7f0195b440499483839ba284b7c24c4c707a866

URL: https://github.com/llvm/llvm-project/commit/a7f0195b440499483839ba284b7c24c4c707a866
DIFF: https://github.com/llvm/llvm-project/commit/a7f0195b440499483839ba284b7c24c4c707a866.diff

LOG: [scudo] secondary allocator cache optimal-fit retrieval

changed cache retrieve algorithm to an "optimal-fit" which immediate
returns blocks that are less than 110% of the requested size. This
reduces memory waste while still allowing for an early return without
traversing the entire array of cached blocks

Reviewed By: cferris, Chia-hungDuan

Differential Revision: https://reviews.llvm.org/D157155

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/secondary.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 06076fdd0aa553..f2170f274ecce2 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -279,14 +279,19 @@ template <typename Config> class MapAllocatorCache {
                 LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
     const uptr PageSize = getPageSizeCached();
     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+    // 10% of the requested size proved to be the optimal choice for
+    // retrieving cached blocks after testing several options.
+    constexpr u32 FragmentedBytesDivisor = 10;
     bool Found = false;
     CachedBlock Entry;
-    uptr HeaderPos = 0;
+    uptr EntryHeaderPos = 0;
     {
       ScopedLock L(Mutex);
       CallsToRetrieve++;
       if (EntriesCount == 0)
         return false;
+      u32 OptimalFitIndex = 0;
+      uptr MinDiff = UINTPTR_MAX;
       for (u32 I = 0; I < MaxCount; I++) {
         if (!Entries[I].isValid())
           continue;
@@ -294,7 +299,7 @@ template <typename Config> class MapAllocatorCache {
         const uptr CommitSize = Entries[I].CommitSize;
         const uptr AllocPos =
             roundDown(CommitBase + CommitSize - Size, Alignment);
-        HeaderPos = AllocPos - HeadersSize;
+        const uptr HeaderPos = AllocPos - HeadersSize;
         if (HeaderPos > CommitBase + CommitSize)
           continue;
         if (HeaderPos < CommitBase ||
@@ -302,18 +307,36 @@ template <typename Config> class MapAllocatorCache {
           continue;
         }
         Found = true;
-        Entry = Entries[I];
-        Entries[I].invalidate();
+        const uptr Diff = HeaderPos - CommitBase;
+        // immediately use a cached block if it's size is close enough to the
+        // requested size.
+        const uptr MaxAllowedFragmentedBytes =
+            (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
+        if (Diff <= MaxAllowedFragmentedBytes) {
+          OptimalFitIndex = I;
+          EntryHeaderPos = HeaderPos;
+          break;
+        }
+        // keep track of the smallest cached block
+        // that is greater than (AllocSize + HeaderSize)
+        if (Diff > MinDiff)
+          continue;
+        OptimalFitIndex = I;
+        MinDiff = Diff;
+        EntryHeaderPos = HeaderPos;
+      }
+      if (Found) {
+        Entry = Entries[OptimalFitIndex];
+        Entries[OptimalFitIndex].invalidate();
         EntriesCount--;
         SuccessfulRetrieves++;
-        break;
       }
     }
     if (!Found)
       return false;
 
     *H = reinterpret_cast<LargeBlock::Header *>(
-        LargeBlock::addHeaderTag<Config>(HeaderPos));
+        LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
     *Zeroed = Entry.Time == 0;
     if (useMemoryTagging<Config>(Options))
       Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);


        


More information about the llvm-commits mailing list