[compiler-rt] 2f04b68 - Revert "[scudo] Support partial concurrent page release in SizeClassAllocator64"

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 11 13:34:43 PDT 2023


Author: Chia-hung Duan
Date: 2023-07-11T20:33:58Z
New Revision: 2f04b688aadef747172a8f4a7d1658dc049b1e24

URL: https://github.com/llvm/llvm-project/commit/2f04b688aadef747172a8f4a7d1658dc049b1e24
DIFF: https://github.com/llvm/llvm-project/commit/2f04b688aadef747172a8f4a7d1658dc049b1e24.diff

LOG: Revert "[scudo] Support partial concurrent page release in SizeClassAllocator64"

We should merge two top TransferBatches so that the range marking can be
done correctly

This reverts commit 57ae8a2a1acb1aa1a5f55c29b1b338a780d649d5.

Differential Revision: https://reviews.llvm.org/D155009

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary64.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index c9958ea82039ac..00bc2c4d89fb35 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -984,6 +984,8 @@ template <typename Config> class SizeClassAllocator64 {
   NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
                                  ReleaseToOS ReleaseType = ReleaseToOS::Normal)
       REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+    // TODO(chiahungduan): Release `FLLock` in step 3 & 4 described in the
+    // comment below.
     ScopedLock L(Region->FLLock);
 
     const uptr BlockSize = getSizeByClassId(ClassId);
@@ -1008,6 +1010,10 @@ template <typename Config> class SizeClassAllocator64 {
       return 0;
     }
 
+    // This is only used for debugging to ensure the consistency of the number
+    // of groups.
+    uptr NumberOfBatchGroups = Region->FreeListInfo.BlockList.size();
+
     // ====================================================================== //
     // 2. Determine which groups can release the pages. Use a heuristic to
     //    gather groups that are candidates for doing a release.
@@ -1023,71 +1029,39 @@ template <typename Config> class SizeClassAllocator64 {
     if (GroupsToRelease.empty())
       return 0;
 
-    // Ideally, we should use a class like `ScopedUnlock`. However, this form of
-    // unlocking is not supported by the thread-safety analysis. See
-    // https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-alias-analysis
-    // for more details.
-    // Put it as local class so that we can mark the ctor/dtor with proper
-    // annotations associated to the target lock. Note that accessing the
-    // function variable in local class only works in thread-safety annotations.
-    // TODO: Implement general `ScopedUnlock` when it's supported.
-    class FLLockScopedUnlock {
-    public:
-      FLLockScopedUnlock(RegionInfo *Region) RELEASE(Region->FLLock)
-          : R(Region) {
-        R->FLLock.assertHeld();
-        R->FLLock.unlock();
-      }
-      ~FLLockScopedUnlock() ACQUIRE(Region->FLLock) { R->FLLock.lock(); }
-
-    private:
-      RegionInfo *R;
-    };
-
-    // Note that we have extracted the `GroupsToRelease` from region freelist.
-    // It's safe to let pushBlocks()/popBatches() access the remaining region
-    // freelist. In the steps 3 and 4, we will temporarily release the FLLock
-    // and lock it again before step 5.
-
-    uptr ReleasedBytes = 0;
-    {
-      FLLockScopedUnlock UL(Region);
-      // ==================================================================== //
-      // 3. Mark the free blocks in `GroupsToRelease` in the
-      //    `PageReleaseContext`. Then we can tell which pages are in-use by
-      //    querying `PageReleaseContext`.
-      // ==================================================================== //
-      PageReleaseContext Context = markFreeBlocks(
-          Region, BlockSize, AllocatedUserEnd, CompactPtrBase, GroupsToRelease);
-      if (UNLIKELY(!Context.hasBlockMarked())) {
-        ScopedLock L(Region->FLLock);
-        mergeGroupsToReleaseBack(Region, GroupsToRelease);
-        return 0;
-      }
+    // ====================================================================== //
+    // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
+    //    Then we can tell which pages are in-use by querying
+    //    `PageReleaseContext`.
+    // ====================================================================== //
+    PageReleaseContext Context = markFreeBlocks(
+        Region, BlockSize, AllocatedUserEnd, CompactPtrBase, GroupsToRelease);
+    if (UNLIKELY(!Context.hasBlockMarked())) {
+      mergeGroupsToReleaseBack(Region, GroupsToRelease, NumberOfBatchGroups);
+      return 0;
+    }
 
-      // ==================================================================== //
-      // 4. Release the unused physical pages back to the OS.
-      // ==================================================================== //
-      RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
-                                              Region->RegionBeg,
-                                              Context.getReleaseOffset());
-      auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
-      releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-      if (Recorder.getReleasedRangesCount() > 0) {
-        Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
-        Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
-        Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
-      }
-      Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
-      ReleasedBytes = Recorder.getReleasedBytes();
+    // ====================================================================== //
+    // 4. Release the unused physical pages back to the OS.
+    // ====================================================================== //
+    RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
+                                            Region->RegionBeg,
+                                            Context.getReleaseOffset());
+    auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+    releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+    if (Recorder.getReleasedRangesCount() > 0) {
+      Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+      Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+      Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
     }
+    Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
 
     // ====================================================================== //
     // 5. Merge the `GroupsToRelease` back to the freelist.
     // ====================================================================== //
-    mergeGroupsToReleaseBack(Region, GroupsToRelease);
+    mergeGroupsToReleaseBack(Region, GroupsToRelease, NumberOfBatchGroups);
 
-    return ReleasedBytes;
+    return Recorder.getReleasedBytes();
   }
 
   bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
@@ -1324,7 +1298,7 @@ template <typename Config> class SizeClassAllocator64 {
   markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
                  const uptr AllocatedUserEnd, const uptr CompactPtrBase,
                  SinglyLinkedList<BatchGroup> &GroupsToRelease)
-      REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+      REQUIRES(Region->MMLock, Region->FLLock) {
     const uptr GroupSize = (1U << GroupSizeLog);
     auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
       return decompactPtrInternal(CompactPtrBase, CompactPtr);
@@ -1397,22 +1371,9 @@ template <typename Config> class SizeClassAllocator64 {
   }
 
   void mergeGroupsToReleaseBack(RegionInfo *Region,
-                                SinglyLinkedList<BatchGroup> &GroupsToRelease)
+                                SinglyLinkedList<BatchGroup> &GroupsToRelease,
+                                const uptr NumberOfBatchGroups)
       REQUIRES(Region->MMLock, Region->FLLock) {
-    // After merging two freelists, we may have redundant `BatchGroup`s that
-    // need to be recycled. The number of unused `BatchGroup`s is expected to be
-    // small. Pick a constant which is inferred from real programs.
-    constexpr uptr MaxUnusedSize = 8;
-    CompactPtrT Blocks[MaxUnusedSize];
-    u32 Idx = 0;
-    RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId);
-    // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s
-    // when we are manipulating the freelist of `BatchClassRegion`. Instead, we
-    // should just push it back to the freelist when we merge two `BatchGroup`s.
-    // This logic hasn't been implemented because we haven't supported releasing
-    // pages in `BatchClassRegion`.
-    DCHECK_NE(BatchClassRegion, Region);
-
     // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
     // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
     // sorted.
@@ -1425,7 +1386,8 @@ template <typename Config> class SizeClassAllocator64 {
         break;
       }
 
-      DCHECK(!BG->Batches.empty());
+      DCHECK_NE(BG->CompactPtrGroupBase,
+                GroupsToRelease.front()->CompactPtrGroupBase);
 
       if (BG->CompactPtrGroupBase <
           GroupsToRelease.front()->CompactPtrGroupBase) {
@@ -1434,32 +1396,6 @@ template <typename Config> class SizeClassAllocator64 {
         continue;
       }
 
-      BatchGroup *Cur = GroupsToRelease.front();
-      GroupsToRelease.pop_front();
-
-      if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
-        BG->PushedBlocks += Cur->PushedBlocks;
-        // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
-        // collecting the `GroupsToRelease`.
-        BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
-        // Note that the first TransferBatches in both `Batches` may not be
-        // full.
-        // TODO: We may want to merge them if they can fit into one
-        // `TransferBatch`.
-        BG->Batches.append_back(&Cur->Batches);
-
-        if (Idx == MaxUnusedSize) {
-          ScopedLock L(BatchClassRegion->FLLock);
-          pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
-          Idx = 0;
-        }
-        Blocks[Idx++] =
-            compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur));
-        Prev = BG;
-        BG = BG->Next;
-        continue;
-      }
-
       // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
       // larger than the first element in `GroupsToRelease`. We need to insert
       // `GroupsToRelease::front()` (which is `Cur` below)  before `BG`.
@@ -1470,6 +1406,8 @@ template <typename Config> class SizeClassAllocator64 {
       //
       // Afterwards, we don't need to advance `BG` because the order between
       // `BG` and the new `GroupsToRelease::front()` hasn't been checked.
+      BatchGroup *Cur = GroupsToRelease.front();
+      GroupsToRelease.pop_front();
       if (Prev == nullptr)
         Region->FreeListInfo.BlockList.push_front(Cur);
       else
@@ -1478,10 +1416,8 @@ template <typename Config> class SizeClassAllocator64 {
       Prev = Cur;
     }
 
-    if (Idx != 0) {
-      ScopedLock L(BatchClassRegion->FLLock);
-      pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
-    }
+    DCHECK_EQ(Region->FreeListInfo.BlockList.size(), NumberOfBatchGroups);
+    (void)NumberOfBatchGroups;
 
     if (SCUDO_DEBUG) {
       BatchGroup *Prev = Region->FreeListInfo.BlockList.front();


        


More information about the llvm-commits mailing list