[compiler-rt] 387452e - Revert "[scudo] Only prepare PageMap entry for partial region"

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 27 13:13:01 PST 2023


Author: Chia-hung Duan
Date: 2023-02-27T21:12:28Z
New Revision: 387452ec591c81def6d8869b23c2ab2f1c56f999

URL: https://github.com/llvm/llvm-project/commit/387452ec591c81def6d8869b23c2ab2f1c56f999
DIFF: https://github.com/llvm/llvm-project/commit/387452ec591c81def6d8869b23c2ab2f1c56f999.diff

LOG: Revert "[scudo] Only prepare PageMap entry for partial region"

This reverts commit 0a0b6fa4fbdf3bdeb300ddd58859f66b714b8bdf.

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h
    compiler-rt/lib/scudo/standalone/release.h
    compiler-rt/lib/scudo/standalone/tests/release_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 0edc40d7e6c36..df4053a6dde4e 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -762,8 +762,7 @@ template <typename Config> class SizeClassAllocator32 {
         compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion));
 
     ReleaseRecorder Recorder(Base);
-    PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions,
-                               /*ReleaseSize=*/RegionSize);
+    PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
 
     auto DecompactPtr = [](CompactPtrT CompactPtr) {
       return reinterpret_cast<uptr>(CompactPtr);

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index a372104d8fdc8..ee8532f0967a4 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -794,34 +794,19 @@ template <typename Config> class SizeClassAllocator64 {
 
     const uptr GroupSize = (1U << GroupSizeLog);
     const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg;
+    ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
+    PageReleaseContext Context(BlockSize, Region->AllocatedUser,
+                               /*NumberOfRegions=*/1U);
+
     const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
     auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
       return decompactPtrInternal(CompactPtrBase, CompactPtr);
     };
-
-    // Instead of always preparing PageMap for the entire region, we only do it
-    // for the range of releasing groups. To do that, the free-block marking
-    // process includes visiting BlockGroups twice.
-
-    // The first visit is to determine the range of BatchGroups we are going to
-    // release. And we will extract those BatchGroups out and push into
-    // `GroupToRelease`.
-    SinglyLinkedList<BatchGroup> GroupToRelease;
-    GroupToRelease.clear();
-    BatchGroup *Prev = nullptr;
-
-    // This is only used for debugging to ensure the consistency of the number
-    // of groups.
-    uptr NumberOfBatchGroups = Region->FreeList.size();
-
-    for (BatchGroup *BG = Region->FreeList.front(); BG != nullptr;) {
+    for (BatchGroup &BG : Region->FreeList) {
       const uptr PushedBytesDelta =
-          BG->PushedBlocks - BG->PushedBlocksAtLastCheckpoint;
-      if (PushedBytesDelta * BlockSize < PageSize) {
-        Prev = BG;
-        BG = BG->Next;
+          BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
+      if (PushedBytesDelta * BlockSize < PageSize)
         continue;
-      }
 
       // Group boundary does not necessarily have the same alignment as Region.
       // It may sit across a Region boundary. Which means that we may have the
@@ -829,7 +814,7 @@ template <typename Config> class SizeClassAllocator64 {
       //
       // 1. Group boundary sits before RegionBeg.
       //
-      //                (BatchGroupBase)
+      //                (BatchGroupBeg)
       // batchGroupBase  RegionBeg       BatchGroupEnd
       //        |            |                |
       //        v            v                v
@@ -839,7 +824,7 @@ template <typename Config> class SizeClassAllocator64 {
       //
       // 2. Group boundary sits after RegionBeg.
       //
-      //               (BatchGroupBase)
+      //               (BatchGroupBeg)
       //    RegionBeg  batchGroupBase               BatchGroupEnd
       //        |           |                             |
       //        v           v                             v
@@ -850,24 +835,21 @@ template <typename Config> class SizeClassAllocator64 {
       // Note that in the first case, the group range before RegionBeg is never
       // used. Therefore, while calculating the used group size, we should
       // exclude that part to get the correct size.
-      const uptr BatchGroupBase =
-          Max(batchGroupBase(CompactPtrBase, BG->GroupId), Region->RegionBeg);
-      DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
+      const uptr BatchGroupBeg =
+          Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
+      DCHECK_GE(AllocatedUserEnd, BatchGroupBeg);
       const uptr BatchGroupEnd =
-          batchGroupBase(CompactPtrBase, BG->GroupId) + GroupSize;
+          batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
       const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
-                                          ? BatchGroupEnd - BatchGroupBase
-                                          : AllocatedUserEnd - BatchGroupBase;
-      if (AllocatedGroupSize == 0) {
-        Prev = BG;
-        BG = BG->Next;
+                                          ? BatchGroupEnd - BatchGroupBeg
+                                          : AllocatedUserEnd - BatchGroupBeg;
+      if (AllocatedGroupSize == 0)
         continue;
-      }
 
       // TransferBatches are pushed in front of BG.Batches. The first one may
       // not have all caches used.
-      const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
-                             BG->Batches.front()->getCount();
+      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+                             BG.Batches.front()->getCount();
       const uptr BytesInBG = NumBlocks * BlockSize;
       // Given the randomness property, we try to release the pages only if the
       // bytes used by free blocks exceed certain proportion of group size. Note
@@ -875,92 +857,12 @@ template <typename Config> class SizeClassAllocator64 {
       // are allocated.
       if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
                               (100U - 1U - BlockSize / 16U)) {
-        Prev = BG;
-        BG = BG->Next;
         continue;
       }
 
-      // If `BG` is the first BatchGroup in the list, we only need to advance
-      // `BG` and call FreeList::pop_front(). No update is needed for `Prev`.
-      //
-      //         (BG)   (BG->Next)
-      // Prev     Cur      BG
-      //   |       |       |
-      //   v       v       v
-      //  nil     +--+    +--+
-      //          |X | -> |  | -> ...
-      //          +--+    +--+
-      //
-      // Otherwise, `Prev` will be used to extract the `Cur` from the
-      // `FreeList`.
-      //
-      //         (BG)   (BG->Next)
-      // Prev     Cur      BG
-      //   |       |       |
-      //   v       v       v
-      //  +--+    +--+    +--+
-      //  |  | -> |X | -> |  | -> ...
-      //  +--+    +--+    +--+
-      //
-      // After FreeList::extract(),
-      //
-      // Prev     Cur       BG
-      //   |       |        |
-      //   v       v        v
-      //  +--+    +--+     +--+
-      //  |  |-+  |X |  +->|  | -> ...
-      //  +--+ |  +--+  |  +--+
-      //       +--------+
-      //
-      // Note that we need to advance before pushing this BatchGroup to
-      // GroupToRelease because it's a destructive operation.
-
-      BatchGroup *Cur = BG;
-      BG = BG->Next;
-
-      if (Prev != nullptr)
-        Region->FreeList.extract(Prev, Cur);
-      else
-        Region->FreeList.pop_front();
-      GroupToRelease.push_back(Cur);
-    }
-
-    if (GroupToRelease.empty())
-      return 0;
-
-    const uptr ReleaseBase =
-        Max(batchGroupBase(CompactPtrBase, GroupToRelease.front()->GroupId),
-            Region->RegionBeg);
-    const uptr LastGroupEnd =
-        Min(batchGroupBase(CompactPtrBase, GroupToRelease.back()->GroupId) +
-                GroupSize,
-            AllocatedUserEnd);
-    // The last block may straddle the group boundary. Rounding up to BlockSize
-    // to get the exact range.
-    const uptr ReleaseEnd =
-        roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
-        Region->RegionBeg;
-    const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
-
-    ReleaseRecorder Recorder(ReleaseBase, &Region->Data);
-    PageReleaseContext Context(
-        BlockSize, Region->AllocatedUser, /*NumberOfRegions=*/1U,
-        ReleaseRangeSize, /*ReleaseOffset=*/ReleaseBase - Region->RegionBeg);
-
-    for (BatchGroup &BG : GroupToRelease) {
       BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
 
-      // TODO(chiahungduan): Replace GroupId with BatchGroupBase to simplify the
-      // calculation here and above (where we determine the set of groups to
-      // release).
-      const uptr BatchGroupBase =
-          Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
-      const uptr BatchGroupEnd =
-          batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
-      const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
-                                          ? BatchGroupEnd - BatchGroupBase
-                                          : AllocatedUserEnd - BatchGroupBase;
-      const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
+      const uptr BatchGroupUsedEnd = BatchGroupBeg + AllocatedGroupSize;
       const bool BlockAlignedWithUsedEnd =
           (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
 
@@ -968,15 +870,12 @@ template <typename Config> class SizeClassAllocator64 {
       if (!BlockAlignedWithUsedEnd)
         ++MaxContainedBlocks;
 
-      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
-                             BG.Batches.front()->getCount();
-
       if (NumBlocks == MaxContainedBlocks) {
         for (const auto &It : BG.Batches)
           for (u16 I = 0; I < It.getCount(); ++I)
             DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId);
 
-        Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
+        Context.markRangeAsAllCounted(BatchGroupBeg, BatchGroupUsedEnd,
                                       Region->RegionBeg);
       } else {
         DCHECK_LT(NumBlocks, MaxContainedBlocks);
@@ -987,7 +886,8 @@ template <typename Config> class SizeClassAllocator64 {
       }
     }
 
-    DCHECK(Context.hasBlockMarked());
+    if (!Context.hasBlockMarked())
+      return 0;
 
     auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
@@ -999,29 +899,6 @@ template <typename Config> class SizeClassAllocator64 {
       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
     }
     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
-
-    // Merge GroupToRelease back to the Region::FreeList. Note that both
-    // `Region->FreeList` and `GroupToRelease` are sorted.
-    for (BatchGroup *BG = Region->FreeList.front();; BG = BG->Next) {
-      if (BG == nullptr || GroupToRelease.empty()) {
-        if (!GroupToRelease.empty())
-          Region->FreeList.append_back(&GroupToRelease);
-        break;
-      }
-
-      DCHECK_NE(BG->GroupId, GroupToRelease.front()->GroupId);
-
-      if (BG->GroupId < GroupToRelease.front()->GroupId)
-        continue;
-
-      BatchGroup *Cur = GroupToRelease.front();
-      GroupToRelease.pop_front();
-      Region->FreeList.insert(BG, Cur);
-    }
-
-    DCHECK_EQ(Region->FreeList.size(), NumberOfBatchGroups);
-    (void)NumberOfBatchGroups;
-
     return Recorder.getReleasedBytes();
   }
 };

diff  --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index d29d1c1f53f10..733326db7b81f 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -259,10 +259,10 @@ template <class ReleaseRecorderT> class FreePagesRangeTracker {
 };
 
 struct PageReleaseContext {
-  PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions,
-                     uptr ReleaseSize, uptr ReleaseOffset = 0)
-      : BlockSize(BlockSize), RegionSize(RegionSize),
-        NumberOfRegions(NumberOfRegions) {
+  PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
+      BlockSize(BlockSize),
+      RegionSize(RegionSize),
+      NumberOfRegions(NumberOfRegions) {
     PageSize = getPageSizeCached();
     if (BlockSize <= PageSize) {
       if (PageSize % BlockSize == 0) {
@@ -294,20 +294,10 @@ struct PageReleaseContext {
       }
     }
 
-    // TODO: For multiple regions, it's more complicated to support partial
-    // region marking (which includes the complexity of how to handle the last
-    // block in a region). We may consider this after markFreeBlocks() accepts
-    // only free blocks from the same region.
-    if (NumberOfRegions != 1) {
-      DCHECK_EQ(ReleaseSize, RegionSize);
-      DCHECK_EQ(ReleaseOffset, 0U);
-    }
-
-    PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
+    PagesCount = roundUp(RegionSize, PageSize) / PageSize;
     PageSizeLog = getLog2(PageSize);
-    RoundedRegionSize = roundUp(RegionSize, PageSize);
+    RoundedRegionSize = PagesCount << PageSizeLog;
     RoundedSize = NumberOfRegions * RoundedRegionSize;
-    ReleasePageOffset = ReleaseOffset >> PageSizeLog;
   }
 
   // PageMap is lazily allocated when markFreeBlocks() is invoked.
@@ -374,7 +364,7 @@ struct PageReleaseContext {
       uptr NumBlocksInFirstPage =
           (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
           BlockSize;
-      PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
+      PageMap.incN(RegionIndex, FromInRegion >> PageSizeLog,
                    NumBlocksInFirstPage);
       FromInRegion = roundUp(FromInRegion + 1, PageSize);
     }
@@ -402,8 +392,8 @@ struct PageReleaseContext {
       // The last block is not aligned to `To`, we need to increment the
       // counter of `next page` by 1.
       if (LastBlockInRange + BlockSize != ToInRegion) {
-        PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
-                         getPageIndex(LastBlockInRange + BlockSize - 1));
+        PageMap.incRange(RegionIndex, ToInRegion >> PageSizeLog,
+                         (LastBlockInRange + BlockSize - 1) >> PageSizeLog);
       }
     } else {
       ToInRegion = RegionSize;
@@ -412,8 +402,8 @@ struct PageReleaseContext {
     // After handling the first page and the last block, it's safe to mark any
     // page in between the range [From, To).
     if (FromInRegion < ToInRegion) {
-      PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
-                                   getPageIndex(ToInRegion - 1));
+      PageMap.setAsAllCountedRange(RegionIndex, FromInRegion >> PageSizeLog,
+                                   (ToInRegion - 1) >> PageSizeLog);
     }
   }
 
@@ -422,19 +412,6 @@ struct PageReleaseContext {
                       DecompactPtrT DecompactPtr, uptr Base) {
     ensurePageMapAllocated();
 
-    const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
-
-    // The last block in a region may not use the entire page, so if it's free,
-    // we mark the following "pretend" memory block(s) as free.
-    auto markLastBlock = [this, LastBlockInRegion](const uptr RegionIndex) {
-      uptr PInRegion = LastBlockInRegion + BlockSize;
-      while (PInRegion < RoundedRegionSize) {
-        PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
-                         getPageIndex(PInRegion + BlockSize - 1));
-        PInRegion += BlockSize;
-      }
-    };
-
     // Iterate over free chunks and count how many free chunks affect each
     // allocated page.
     if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
@@ -446,14 +423,14 @@ struct PageReleaseContext {
             continue;
           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
           const uptr PInRegion = P - RegionIndex * RegionSize;
-          PageMap.inc(RegionIndex, getPageIndex(PInRegion));
-          if (PInRegion == LastBlockInRegion)
-            markLastBlock(RegionIndex);
+          PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
         }
       }
     } else {
       // In all other cases chunks might affect more than one page.
       DCHECK_GE(RegionSize, BlockSize);
+      const uptr LastBlockInRegion =
+          ((RegionSize / BlockSize) - 1U) * BlockSize;
       for (const auto &It : FreeList) {
         for (u16 I = 0; I < It.getCount(); I++) {
           const uptr P = DecompactPtr(It.get(I)) - Base;
@@ -461,23 +438,26 @@ struct PageReleaseContext {
             continue;
           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
           uptr PInRegion = P - RegionIndex * RegionSize;
-          PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
-                           getPageIndex(PInRegion + BlockSize - 1));
-          if (PInRegion == LastBlockInRegion)
-            markLastBlock(RegionIndex);
+          PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
+                            (PInRegion + BlockSize - 1) >> PageSizeLog);
+          // The last block in a region might straddle a page, so if it's
+          // free, we mark the following "pretend" memory block(s) as free.
+          if (PInRegion == LastBlockInRegion) {
+            PInRegion += BlockSize;
+            while (PInRegion < RoundedRegionSize) {
+              PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
+                                (PInRegion + BlockSize - 1) >> PageSizeLog);
+              PInRegion += BlockSize;
+            }
+          }
         }
       }
     }
   }
 
-  uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
-
   uptr BlockSize;
   uptr RegionSize;
   uptr NumberOfRegions;
-  // For partial region marking, some pages in front are not needed to be
-  // counted.
-  uptr ReleasePageOffset;
   uptr PageSize;
   uptr PagesCount;
   uptr PageSizeLog;
@@ -499,7 +479,6 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
   const uptr BlockSize = Context.BlockSize;
   const uptr PagesCount = Context.PagesCount;
   const uptr NumberOfRegions = Context.NumberOfRegions;
-  const uptr ReleasePageOffset = Context.ReleasePageOffset;
   const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
   const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
   RegionPageMap &PageMap = Context.PageMap;
@@ -537,10 +516,6 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
       }
       uptr PrevPageBoundary = 0;
       uptr CurrentBoundary = 0;
-      if (ReleasePageOffset > 0) {
-        PrevPageBoundary = ReleasePageOffset * PageSize;
-        CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
-      }
       for (uptr J = 0; J < PagesCount; J++) {
         const uptr PageBoundary = PrevPageBoundary + PageSize;
         uptr BlocksPerPage = Pn;
@@ -572,8 +547,7 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
                       uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
                       ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
                       SkipRegionT SkipRegion) {
-  PageReleaseContext Context(BlockSize, /*ReleaseSize=*/RegionSize, RegionSize,
-                             NumberOfRegions);
+  PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
   Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
   releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
 }

diff  --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
index 2e7382147006b..3cc20c4621d2c 100644
--- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
@@ -138,18 +138,15 @@ TEST(ScudoReleaseTest, FreePagesRangeTracker) {
 
 class ReleasedPagesRecorder {
 public:
-  ReleasedPagesRecorder() = default;
-  explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {}
   std::set<scudo::uptr> ReportedPages;
 
   void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
     const scudo::uptr PageSize = scudo::getPageSizeCached();
     for (scudo::uptr I = From; I < To; I += PageSize)
-      ReportedPages.insert(I + getBase());
+      ReportedPages.insert(I);
   }
 
-  scudo::uptr getBase() const { return Base; }
-  scudo::uptr Base = 0;
+  scudo::uptr getBase() const { return 0; }
 };
 
 // Simplified version of a TransferBatch.
@@ -222,8 +219,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
     ReleasedPagesRecorder Recorder;
     scudo::PageReleaseContext Context(BlockSize,
                                       /*RegionSize=*/MaxBlocks * BlockSize,
-                                      /*NumberOfRegions=*/1U,
-                                      /*ReleaseSize=*/MaxBlocks * BlockSize);
+                                      /*NumberOfRegions=*/1U);
     ASSERT_FALSE(Context.hasBlockMarked());
     Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
     ASSERT_TRUE(Context.hasBlockMarked());
@@ -329,9 +325,8 @@ template <class SizeClassMap> void testPageMapMarkRange() {
       const scudo::uptr GroupEnd = GroupBeg + GroupSize;
 
       scudo::PageReleaseContext Context(BlockSize, RegionSize,
-                                        /*NumberOfRegions=*/1U,
-                                        /*ReleaseSize=*/RegionSize);
-      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U);
+                                        /*NumberOfRegions=*/1U);
+      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0);
 
       scudo::uptr FirstBlock =
           ((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
@@ -399,142 +394,13 @@ template <class SizeClassMap> void testPageMapMarkRange() {
 
     // Release the entire region. This is to ensure the last page is counted.
     scudo::PageReleaseContext Context(BlockSize, RegionSize,
-                                      /*NumberOfRegions=*/1U,
-                                      /*ReleaseSize=*/RegionSize);
+                                      /*NumberOfRegions=*/1U);
     Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0);
     for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
       EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
   } // Iterate each size class
 }
 
-template <class SizeClassMap> void testReleasePartialRegion() {
-  typedef FreeBatch<SizeClassMap> Batch;
-  const scudo::uptr PageSize = scudo::getPageSizeCached();
-  const scudo::uptr ReleaseBase = PageSize;
-  const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
-
-  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
-    // In the following, we want to ensure the region includes at least 2 pages
-    // and we will release all the pages except the first one. The handling of
-    // the last block is tricky, so we always test the case that includes the
-    // last block.
-    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
-    const scudo::uptr RegionSize =
-        scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) * 2, BlockSize) +
-        BlockSize;
-    const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
-
-    scudo::SinglyLinkedList<Batch> FreeList;
-    FreeList.clear();
-
-    // Skip the blocks in the first page and add the remaining.
-    std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
-    for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
-         Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
-      for (scudo::uptr Page = Block / PageSize;
-           Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-        ASSERT_LT(Page, Pages.size());
-        ++Pages[Page];
-      }
-    }
-
-    // This follows the logic how we count the last page. It should be
-    // consistent with how markFreeBlocks() handles the last block.
-    if (RoundedRegionSize % BlockSize != 0)
-      ++Pages.back();
-
-    Batch *CurrentBatch = nullptr;
-    for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
-         Block < RegionSize; Block += BlockSize) {
-      if (CurrentBatch == nullptr ||
-          CurrentBatch->getCount() == Batch::MaxCount) {
-        CurrentBatch = new Batch;
-        CurrentBatch->clear();
-        FreeList.push_back(CurrentBatch);
-      }
-      CurrentBatch->add(Block);
-    }
-
-    auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) {
-      auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
-      ReleasedPagesRecorder Recorder(ReleaseBase);
-      releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-      const scudo::uptr FirstBlock = scudo::roundUpSlow(PageSize, BlockSize);
-
-      for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
-        if (P < FirstBlock) {
-          // If FirstBlock is not aligned with page boundary, the first touched
-          // page will not be released either.
-          EXPECT_TRUE(Recorder.ReportedPages.find(P) ==
-                      Recorder.ReportedPages.end());
-        } else {
-          EXPECT_TRUE(Recorder.ReportedPages.find(P) !=
-                      Recorder.ReportedPages.end());
-        }
-      }
-    };
-
-    // Test marking by visiting each block.
-    {
-      auto DecompactPtr = [](scudo::uptr P) { return P; };
-      scudo::PageReleaseContext Context(
-          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
-          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
-      Context.markFreeBlocks(FreeList, DecompactPtr, /*Base=*/0U);
-      for (const Batch &It : FreeList) {
-        for (scudo::u16 I = 0; I < It.getCount(); I++) {
-          scudo::uptr Block = It.get(I);
-          for (scudo::uptr Page = Block / PageSize;
-               Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
-            EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U,
-                                                       Page - BasePageOffset));
-          }
-        }
-      }
-
-      VerifyReleaseToOs(Context);
-    }
-
-    // Test range marking.
-    {
-      scudo::PageReleaseContext Context(
-          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
-          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
-      Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U);
-      for (scudo::uptr Page = ReleaseBase / PageSize;
-           Page < RoundedRegionSize / PageSize; ++Page) {
-        if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
-            Pages[Page]) {
-          EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0,
-                                                   Page - BasePageOffset));
-        }
-      }
-
-      VerifyReleaseToOs(Context);
-    }
-
-    // Check the buffer size of PageMap.
-    {
-      scudo::PageReleaseContext Full(BlockSize, RegionSize,
-                                     /*NumberOfRegions=*/1U,
-                                     /*ReleaseSize=*/RegionSize);
-      Full.ensurePageMapAllocated();
-      scudo::PageReleaseContext Partial(
-          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
-          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
-      Partial.ensurePageMapAllocated();
-
-      EXPECT_GE(Full.PageMap.getBufferSize(), Partial.PageMap.getBufferSize());
-    }
-
-    while (!FreeList.empty()) {
-      CurrentBatch = FreeList.front();
-      FreeList.pop_front();
-      delete CurrentBatch;
-    }
-  } // Iterate each size class
-}
-
 TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
   testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
 }
@@ -553,10 +419,3 @@ TEST(ScudoReleaseTest, PageMapMarkRange) {
   testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
   testPageMapMarkRange<scudo::SvelteSizeClassMap>();
 }
-
-TEST(ScudoReleaseTest, ReleasePartialRegion) {
-  testReleasePartialRegion<scudo::DefaultSizeClassMap>();
-  testReleasePartialRegion<scudo::AndroidSizeClassMap>();
-  testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
-  testReleasePartialRegion<scudo::SvelteSizeClassMap>();
-}


        


More information about the llvm-commits mailing list