[compiler-rt] 6860d1c - [scudo] Accessing PossibleRegions requires lock

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 23 20:19:49 PST 2023


Author: Chia-hung Duan
Date: 2023-02-24T04:15:33Z
New Revision: 6860d1c6e44f3e0a0d182f58b3848f720b5ade59

URL: https://github.com/llvm/llvm-project/commit/6860d1c6e44f3e0a0d182f58b3848f720b5ade59
DIFF: https://github.com/llvm/llvm-project/commit/6860d1c6e44f3e0a0d182f58b3848f720b5ade59.diff

LOG: [scudo] Accessing PossibleRegions requires lock

It is preferable to use `std::shared_mutex` style mutex. Will switch to
using it when it's available.

Differential Revision: https://reviews.llvm.org/D144691

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index aeed972518561..df4053a6dde4e 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -87,19 +87,27 @@ template <typename Config> class SizeClassAllocator32 {
     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
   }
 
-  void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
-    while (NumberOfStashedRegions > 0)
-      unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
-            RegionSize);
+  void unmapTestOnly() {
+    {
+      ScopedLock L(RegionsStashMutex);
+      while (NumberOfStashedRegions > 0) {
+        unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+              RegionSize);
+      }
+    }
+
     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
     for (uptr I = 0; I < NumClasses; I++) {
       SizeClassInfo *Sci = getSizeClassInfo(I);
+      ScopedLock L(Sci->Mutex);
       if (Sci->MinRegionIndex < MinRegionIndex)
         MinRegionIndex = Sci->MinRegionIndex;
       if (Sci->MaxRegionIndex > MaxRegionIndex)
         MaxRegionIndex = Sci->MaxRegionIndex;
       *Sci = {};
     }
+
+    ScopedLock L(ByteMapMutex);
     for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
       if (PossibleRegions[I])
         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
@@ -192,11 +200,11 @@ template <typename Config> class SizeClassAllocator32 {
     }
     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
     RegionsStashMutex.lock();
-    PossibleRegions.disable();
+    ByteMapMutex.lock();
   }
 
   void enable() NO_THREAD_SAFETY_ANALYSIS {
-    PossibleRegions.enable();
+    ByteMapMutex.unlock();
     RegionsStashMutex.unlock();
     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
     for (uptr I = 0; I < NumClasses; I++) {
@@ -219,7 +227,11 @@ template <typename Config> class SizeClassAllocator32 {
       if (Sci->MaxRegionIndex > MaxRegionIndex)
         MaxRegionIndex = Sci->MaxRegionIndex;
     }
-    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+
+    // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
+    ByteMapMutex.assertHeld();
+
+    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
       if (PossibleRegions[I] &&
           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
         const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
@@ -228,6 +240,7 @@ template <typename Config> class SizeClassAllocator32 {
         for (uptr Block = From; Block < To; Block += BlockSize)
           Callback(Block);
       }
+    }
   }
 
   void getStats(ScopedString *Str) {
@@ -370,6 +383,7 @@ template <typename Config> class SizeClassAllocator32 {
         Sci->MinRegionIndex = RegionIndex;
       if (RegionIndex > Sci->MaxRegionIndex)
         Sci->MaxRegionIndex = RegionIndex;
+      ScopedLock L(ByteMapMutex);
       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
     }
     return Region;
@@ -815,6 +829,7 @@ template <typename Config> class SizeClassAllocator32 {
       return 0;
 
     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+      ScopedLock L(ByteMapMutex);
       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
     };
     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
@@ -832,9 +847,9 @@ template <typename Config> class SizeClassAllocator32 {
 
   SizeClassInfo SizeClassInfoArray[NumClasses] = {};
 
+  HybridMutex ByteMapMutex;
   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
-  // FIXME: There is no dedicated lock for `PossibleRegions`.
-  ByteMap PossibleRegions = {};
+  ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
   atomic_s32 ReleaseToOsIntervalMs = {};
   // Unless several threads request regions simultaneously from 
diff erent size
   // classes, the stash rarely contains more than 1 entry.


        


More information about the llvm-commits mailing list