[compiler-rt] f0703cb - [scudo][standalone] Correct min/max region indices

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 16 12:43:27 PST 2020


Author: Kostya Kortchinsky
Date: 2020-11-16T12:43:10-08:00
New Revision: f0703cb1b24be4f915257f91a290a3d92ac6cd4f

URL: https://github.com/llvm/llvm-project/commit/f0703cb1b24be4f915257f91a290a3d92ac6cd4f
DIFF: https://github.com/llvm/llvm-project/commit/f0703cb1b24be4f915257f91a290a3d92ac6cd4f.diff

LOG: [scudo][standalone] Correct min/max region indices

The original code to keep track of the minimum and maximum indices
of allocated 32-bit primary regions was sketchy at best.

`MinRegionIndex` & `MaxRegionIndex` were shared between all size
classes, and could (theoretically) have been updated concurrently. This
didn't materialize anywhere I could see, but still it's not proper.

This changes those min/max indices by making them class specific rather
than global: classes are locked when growing, so there is no
concurrency there. This also allows to simplify some of the 32-bit
release code, that now doesn't have to go through all the regions to
get the proper min/max. Iterate and unmap will no longer have access to
the global min/max, but they aren't used as much so this is fine.

Differential Revision: https://reviews.llvm.org/D91106

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 21f579e2ba62..016a96bd2dfa 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -70,7 +70,6 @@ class SizeClassAllocator32 {
       reportError("SizeClassAllocator32 is not supported on Fuchsia");
 
     PossibleRegions.initLinkerInitialized();
-    MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
 
     u32 Seed;
     const u64 Time = getMonotonicTime();
@@ -81,6 +80,8 @@ class SizeClassAllocator32 {
     for (uptr I = 0; I < NumClasses; I++) {
       SizeClassInfo *Sci = getSizeClassInfo(I);
       Sci->RandState = getRandomU32(&Seed);
+      // Sci->MaxRegionIndex is already initialized to 0.
+      Sci->MinRegionIndex = NumRegions;
       // See comment in the 64-bit primary about releasing smaller size classes.
       Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
                         (getSizeByClassId(I) >= (PageSize / 32));
@@ -98,7 +99,15 @@ class SizeClassAllocator32 {
     while (NumberOfStashedRegions > 0)
       unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
             RegionSize);
-    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+    uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+    for (uptr I = 0; I < NumClasses; I++) {
+      SizeClassInfo *Sci = getSizeClassInfo(I);
+      if (Sci->MinRegionIndex < MinRegionIndex)
+        MinRegionIndex = Sci->MinRegionIndex;
+      if (Sci->MaxRegionIndex > MaxRegionIndex)
+        MaxRegionIndex = Sci->MaxRegionIndex;
+    }
+    for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
       if (PossibleRegions[I])
         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
     PossibleRegions.unmapTestOnly();
@@ -156,6 +165,14 @@ class SizeClassAllocator32 {
   }
 
   template <typename F> void iterateOverBlocks(F Callback) {
+    uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+    for (uptr I = 0; I < NumClasses; I++) {
+      SizeClassInfo *Sci = getSizeClassInfo(I);
+      if (Sci->MinRegionIndex < MinRegionIndex)
+        MinRegionIndex = Sci->MinRegionIndex;
+      if (Sci->MaxRegionIndex > MaxRegionIndex)
+        MaxRegionIndex = Sci->MaxRegionIndex;
+    }
     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
       if (PossibleRegions[I] &&
           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
@@ -207,18 +224,14 @@ class SizeClassAllocator32 {
     return TotalReleasedBytes;
   }
 
-  static bool useMemoryTagging(Options Options) {
-    (void)Options;
-    return false;
-  }
+  static bool useMemoryTagging(UNUSED Options Options) { return false; }
   void disableMemoryTagging() {}
 
   const char *getRegionInfoArrayAddress() const { return nullptr; }
   static uptr getRegionInfoArraySize() { return 0; }
 
-  static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
-    (void)RegionInfoData;
-    (void)Ptr;
+  static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
+                                    UNUSED uptr Ptr) {
     return {};
   }
 
@@ -252,6 +265,10 @@ class SizeClassAllocator32 {
     bool CanRelease;
     u32 RandState;
     uptr AllocatedUser;
+    // Lowest & highest region index allocated for this size class, to avoid
+    // looping through the whole NumRegions.
+    uptr MinRegionIndex;
+    uptr MaxRegionIndex;
     ReleaseToOsInfo ReleaseInfo;
   };
   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
@@ -287,7 +304,7 @@ class SizeClassAllocator32 {
     return Region;
   }
 
-  uptr allocateRegion(uptr ClassId) {
+  uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
     DCHECK_LT(ClassId, NumClasses);
     uptr Region = 0;
     {
@@ -298,11 +315,12 @@ class SizeClassAllocator32 {
     if (!Region)
       Region = allocateRegionSlow();
     if (LIKELY(Region)) {
+      // Sci->Mutex is held by the caller, updating the Min/Max is safe.
       const uptr RegionIndex = computeRegionId(Region);
-      if (RegionIndex < MinRegionIndex)
-        MinRegionIndex = RegionIndex;
-      if (RegionIndex > MaxRegionIndex)
-        MaxRegionIndex = RegionIndex;
+      if (RegionIndex < Sci->MinRegionIndex)
+        Sci->MinRegionIndex = RegionIndex;
+      if (RegionIndex > Sci->MaxRegionIndex)
+        Sci->MaxRegionIndex = RegionIndex;
       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
     }
     return Region;
@@ -327,7 +345,7 @@ class SizeClassAllocator32 {
       Offset = Sci->CurrentRegionAllocated;
     } else {
       DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
-      Region = allocateRegion(ClassId);
+      Region = allocateRegion(Sci, ClassId);
       if (UNLIKELY(!Region))
         return nullptr;
       C->getStats().add(StatMapped, RegionSize);
@@ -411,7 +429,7 @@ class SizeClassAllocator32 {
     const uptr BlockSize = getSizeByClassId(ClassId);
     const uptr PageSize = getPageSizeCached();
 
-    CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+    DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
     const uptr BytesInFreeList =
         Sci->AllocatedUser -
         (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
@@ -446,39 +464,27 @@ class SizeClassAllocator32 {
       }
     }
 
-    DCHECK_GT(MinRegionIndex, 0U);
-    uptr First = 0;
-    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
-      if (PossibleRegions[I] - 1U == ClassId) {
-        First = I;
-        break;
-      }
-    }
-    uptr Last = 0;
-    for (uptr I = MaxRegionIndex; I >= MinRegionIndex; I--) {
-      if (PossibleRegions[I] - 1U == ClassId) {
-        Last = I;
-        break;
-      }
-    }
+    const uptr First = Sci->MinRegionIndex;
+    const uptr Last = Sci->MaxRegionIndex;
+    DCHECK_NE(Last, 0U);
+    DCHECK_LE(First, Last);
     uptr TotalReleasedBytes = 0;
+    const uptr Base = First * RegionSize;
+    const uptr NumberOfRegions = Last - First + 1U;
+    ReleaseRecorder Recorder(Base);
     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
     };
-    if (First && Last) {
-      const uptr Base = First * RegionSize;
-      const uptr NumberOfRegions = Last - First + 1U;
-      ReleaseRecorder Recorder(Base);
-      releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
-                            BlockSize, &Recorder, SkipRegion);
-      if (Recorder.getReleasedRangesCount() > 0) {
-        Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
-        Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
-        Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
-        TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
-      }
+    releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
+                          BlockSize, &Recorder, SkipRegion);
+    if (Recorder.getReleasedRangesCount() > 0) {
+      Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+      Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+      Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+      TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
     }
     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
     return TotalReleasedBytes;
   }
 
@@ -486,10 +492,6 @@ class SizeClassAllocator32 {
 
   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
   ByteMap PossibleRegions;
-  // Keep track of the lowest & highest regions allocated to avoid looping
-  // through the whole NumRegions.
-  uptr MinRegionIndex;
-  uptr MaxRegionIndex;
   atomic_s32 ReleaseToOsIntervalMs;
   // Unless several threads request regions simultaneously from 
diff erent size
   // classes, the stash rarely contains more than 1 entry.


        


More information about the llvm-commits mailing list