[compiler-rt] c6ef6bb - Revert "Revert "[scudo] Only prepare PageMap entry for partial region""

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 28 08:57:22 PST 2023


Author: Chia-hung Duan
Date: 2023-02-28T16:55:48Z
New Revision: c6ef6bbd8d964028ee6c2f03441604d7a7ba5375

URL: https://github.com/llvm/llvm-project/commit/c6ef6bbd8d964028ee6c2f03441604d7a7ba5375
DIFF: https://github.com/llvm/llvm-project/commit/c6ef6bbd8d964028ee6c2f03441604d7a7ba5375.diff

LOG: Revert "Revert "[scudo] Only prepare PageMap entry for partial region""

Fixed the bug in merging BatchGroups back to the FreeList. Added DCHECKs
to ensure the order of BatchGroups

This reverts commit 387452ec591c81def6d8869b23c2ab2f1c56f999.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D144920

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h
    compiler-rt/lib/scudo/standalone/release.h
    compiler-rt/lib/scudo/standalone/tests/release_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index df4053a6dde4e..0edc40d7e6c36 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -762,7 +762,8 @@ template <typename Config> class SizeClassAllocator32 {
         compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion));
 
     ReleaseRecorder Recorder(Base);
-    PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+    PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions,
+                               /*ReleaseSize=*/RegionSize);
 
     auto DecompactPtr = [](CompactPtrT CompactPtr) {
       return reinterpret_cast<uptr>(CompactPtr);

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index c0e50218fb1d9..4caf8eb1751a8 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -794,19 +794,34 @@ template <typename Config> class SizeClassAllocator64 {
 
     const uptr GroupSize = (1U << GroupSizeLog);
     const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg;
-    ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
-    PageReleaseContext Context(BlockSize, Region->AllocatedUser,
-                               /*NumberOfRegions=*/1U);
-
     const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
     auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
       return decompactPtrInternal(CompactPtrBase, CompactPtr);
     };
-    for (BatchGroup &BG : Region->FreeList) {
+
+    // Instead of always preparing PageMap for the entire region, we only do it
+    // for the range of releasing groups. To do that, the free-block marking
+    // process includes visiting BlockGroups twice.
+
+    // The first visit is to determine the range of BatchGroups we are going to
+    // release. And we will extract those BatchGroups out and push into
+    // `GroupToRelease`.
+    SinglyLinkedList<BatchGroup> GroupToRelease;
+    GroupToRelease.clear();
+
+    // This is only used for debugging to ensure the consistency of the number
+    // of groups.
+    uptr NumberOfBatchGroups = Region->FreeList.size();
+
+    for (BatchGroup *BG = Region->FreeList.front(), *Prev = nullptr;
+         BG != nullptr;) {
       const uptr PushedBytesDelta =
-          BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
-      if (PushedBytesDelta * BlockSize < PageSize)
+          BG->PushedBlocks - BG->PushedBlocksAtLastCheckpoint;
+      if (PushedBytesDelta * BlockSize < PageSize) {
+        Prev = BG;
+        BG = BG->Next;
         continue;
+      }
 
       // Group boundary does not necessarily have the same alignment as Region.
       // It may sit across a Region boundary. Which means that we may have the
@@ -814,7 +829,7 @@ template <typename Config> class SizeClassAllocator64 {
       //
       // 1. Group boundary sits before RegionBeg.
       //
-      //                (BatchGroupBeg)
+      //                (BatchGroupBase)
       // batchGroupBase  RegionBeg       BatchGroupEnd
       //        |            |                |
       //        v            v                v
@@ -824,7 +839,7 @@ template <typename Config> class SizeClassAllocator64 {
       //
       // 2. Group boundary sits after RegionBeg.
       //
-      //               (BatchGroupBeg)
+      //               (BatchGroupBase)
       //    RegionBeg  batchGroupBase               BatchGroupEnd
       //        |           |                             |
       //        v           v                             v
@@ -835,21 +850,24 @@ template <typename Config> class SizeClassAllocator64 {
       // Note that in the first case, the group range before RegionBeg is never
       // used. Therefore, while calculating the used group size, we should
       // exclude that part to get the correct size.
-      const uptr BatchGroupBeg =
-          Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
-      DCHECK_GE(AllocatedUserEnd, BatchGroupBeg);
+      const uptr BatchGroupBase =
+          Max(batchGroupBase(CompactPtrBase, BG->GroupId), Region->RegionBeg);
+      DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
       const uptr BatchGroupEnd =
-          batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
+          batchGroupBase(CompactPtrBase, BG->GroupId) + GroupSize;
       const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
-                                          ? BatchGroupEnd - BatchGroupBeg
-                                          : AllocatedUserEnd - BatchGroupBeg;
-      if (AllocatedGroupSize == 0)
+                                          ? BatchGroupEnd - BatchGroupBase
+                                          : AllocatedUserEnd - BatchGroupBase;
+      if (AllocatedGroupSize == 0) {
+        Prev = BG;
+        BG = BG->Next;
         continue;
+      }
 
       // TransferBatches are pushed in front of BG.Batches. The first one may
       // not have all caches used.
-      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
-                             BG.Batches.front()->getCount();
+      const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
+                             BG->Batches.front()->getCount();
       const uptr BytesInBG = NumBlocks * BlockSize;
       // Given the randomness property, we try to release the pages only if the
       // bytes used by free blocks exceed certain proportion of group size. Note
@@ -857,12 +875,92 @@ template <typename Config> class SizeClassAllocator64 {
       // are allocated.
       if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
                               (100U - 1U - BlockSize / 16U)) {
+        Prev = BG;
+        BG = BG->Next;
         continue;
       }
 
+      // If `BG` is the first BatchGroup in the list, we only need to advance
+      // `BG` and call FreeList::pop_front(). No update is needed for `Prev`.
+      //
+      //         (BG)   (BG->Next)
+      // Prev     Cur      BG
+      //   |       |       |
+      //   v       v       v
+      //  nil     +--+    +--+
+      //          |X | -> |  | -> ...
+      //          +--+    +--+
+      //
+      // Otherwise, `Prev` will be used to extract the `Cur` from the
+      // `FreeList`.
+      //
+      //         (BG)   (BG->Next)
+      // Prev     Cur      BG
+      //   |       |       |
+      //   v       v       v
+      //  +--+    +--+    +--+
+      //  |  | -> |X | -> |  | -> ...
+      //  +--+    +--+    +--+
+      //
+      // After FreeList::extract(),
+      //
+      // Prev     Cur       BG
+      //   |       |        |
+      //   v       v        v
+      //  +--+    +--+     +--+
+      //  |  |-+  |X |  +->|  | -> ...
+      //  +--+ |  +--+  |  +--+
+      //       +--------+
+      //
+      // Note that we need to advance before pushing this BatchGroup to
+      // GroupToRelease because it's a destructive operation.
+
+      BatchGroup *Cur = BG;
+      BG = BG->Next;
+
+      if (Prev != nullptr)
+        Region->FreeList.extract(Prev, Cur);
+      else
+        Region->FreeList.pop_front();
+      GroupToRelease.push_back(Cur);
+    }
+
+    if (GroupToRelease.empty())
+      return 0;
+
+    const uptr ReleaseBase =
+        Max(batchGroupBase(CompactPtrBase, GroupToRelease.front()->GroupId),
+            Region->RegionBeg);
+    const uptr LastGroupEnd =
+        Min(batchGroupBase(CompactPtrBase, GroupToRelease.back()->GroupId) +
+                GroupSize,
+            AllocatedUserEnd);
+    // The last block may straddle the group boundary. Rounding up to BlockSize
+    // to get the exact range.
+    const uptr ReleaseEnd =
+        roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
+        Region->RegionBeg;
+    const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
+
+    ReleaseRecorder Recorder(ReleaseBase, &Region->Data);
+    PageReleaseContext Context(
+        BlockSize, Region->AllocatedUser, /*NumberOfRegions=*/1U,
+        ReleaseRangeSize, /*ReleaseOffset=*/ReleaseBase - Region->RegionBeg);
+
+    for (BatchGroup &BG : GroupToRelease) {
       BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
 
-      const uptr BatchGroupUsedEnd = BatchGroupBeg + AllocatedGroupSize;
+      // TODO(chiahungduan): Replace GroupId with BatchGroupBase to simplify the
+      // calculation here and above (where we determine the set of groups to
+      // release).
+      const uptr BatchGroupBase =
+          Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
+      const uptr BatchGroupEnd =
+          batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
+      const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+                                          ? BatchGroupEnd - BatchGroupBase
+                                          : AllocatedUserEnd - BatchGroupBase;
+      const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
       const bool BlockAlignedWithUsedEnd =
           (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
 
@@ -870,12 +968,15 @@ template <typename Config> class SizeClassAllocator64 {
       if (!BlockAlignedWithUsedEnd)
         ++MaxContainedBlocks;
 
+      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+                             BG.Batches.front()->getCount();
+
       if (NumBlocks == MaxContainedBlocks) {
         for (const auto &It : BG.Batches)
           for (u16 I = 0; I < It.getCount(); ++I)
             DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId);
 
-        Context.markRangeAsAllCounted(BatchGroupBeg, BatchGroupUsedEnd,
+        Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
                                       Region->RegionBeg);
       } else {
         DCHECK_LT(NumBlocks, MaxContainedBlocks);
@@ -886,8 +987,7 @@ template <typename Config> class SizeClassAllocator64 {
       }
     }
 
-    if (!Context.hasBlockMarked())
-      return 0;
+    DCHECK(Context.hasBlockMarked());
 
     auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
@@ -899,6 +999,55 @@ template <typename Config> class SizeClassAllocator64 {
       Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
     }
     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
+    // Merge GroupToRelease back to the Region::FreeList. Note that both
+    // `Region->FreeList` and `GroupToRelease` are sorted.
+    for (BatchGroup *BG = Region->FreeList.front(), *Prev = nullptr;;) {
+      if (BG == nullptr || GroupToRelease.empty()) {
+        if (!GroupToRelease.empty())
+          Region->FreeList.append_back(&GroupToRelease);
+        break;
+      }
+
+      DCHECK_NE(BG->GroupId, GroupToRelease.front()->GroupId);
+
+      if (BG->GroupId < GroupToRelease.front()->GroupId) {
+        Prev = BG;
+        BG = BG->Next;
+        continue;
+      }
+
+      // At here, the `BG` is the first BatchGroup with GroupId larger than the
+      // first element in `GroupToRelease`. We need to insert
+      // `GroupToRelease::front()` (which is `Cur` below)  before `BG`.
+      //
+      //   1. If `Prev` is nullptr, we simply push `Cur` to the front of
+      //      FreeList.
+      //   2. Otherwise, use `insert()` which inserts an element next to `Prev`.
+      //
+      // Afterwards, we don't need to advance `BG` because the order between
+      // `BG` and the new `GroupToRelease::front()` hasn't been checked.
+      BatchGroup *Cur = GroupToRelease.front();
+      GroupToRelease.pop_front();
+      if (Prev == nullptr)
+        Region->FreeList.push_front(Cur);
+      else
+        Region->FreeList.insert(Prev, Cur);
+      DCHECK_EQ(Cur->Next, BG);
+      Prev = Cur;
+    }
+
+    DCHECK_EQ(Region->FreeList.size(), NumberOfBatchGroups);
+    (void)NumberOfBatchGroups;
+
+    if (SCUDO_DEBUG) {
+      BatchGroup *Prev = Region->FreeList.front();
+      for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
+           Prev = Cur, Cur = Cur->Next) {
+        CHECK_LT(Prev->GroupId, Cur->GroupId);
+      }
+    }
+
     return Recorder.getReleasedBytes();
   }
 };

diff  --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index 733326db7b81f..d29d1c1f53f10 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -259,10 +259,10 @@ template <class ReleaseRecorderT> class FreePagesRangeTracker {
 };
 
 struct PageReleaseContext {
-  PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
-      BlockSize(BlockSize),
-      RegionSize(RegionSize),
-      NumberOfRegions(NumberOfRegions) {
+  PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions,
+                     uptr ReleaseSize, uptr ReleaseOffset = 0)
+      : BlockSize(BlockSize), RegionSize(RegionSize),
+        NumberOfRegions(NumberOfRegions) {
     PageSize = getPageSizeCached();
     if (BlockSize <= PageSize) {
       if (PageSize % BlockSize == 0) {
@@ -294,10 +294,20 @@ struct PageReleaseContext {
       }
     }
 
-    PagesCount = roundUp(RegionSize, PageSize) / PageSize;
+    // TODO: For multiple regions, it's more complicated to support partial
+    // region marking (which includes the complexity of how to handle the last
+    // block in a region). We may consider this after markFreeBlocks() accepts
+    // only free blocks from the same region.
+    if (NumberOfRegions != 1) {
+      DCHECK_EQ(ReleaseSize, RegionSize);
+      DCHECK_EQ(ReleaseOffset, 0U);
+    }
+
+    PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
     PageSizeLog = getLog2(PageSize);
-    RoundedRegionSize = PagesCount << PageSizeLog;
+    RoundedRegionSize = roundUp(RegionSize, PageSize);
     RoundedSize = NumberOfRegions * RoundedRegionSize;
+    ReleasePageOffset = ReleaseOffset >> PageSizeLog;
   }
 
   // PageMap is lazily allocated when markFreeBlocks() is invoked.
@@ -364,7 +374,7 @@ struct PageReleaseContext {
       uptr NumBlocksInFirstPage =
           (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
           BlockSize;
-      PageMap.incN(RegionIndex, FromInRegion >> PageSizeLog,
+      PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
                    NumBlocksInFirstPage);
       FromInRegion = roundUp(FromInRegion + 1, PageSize);
     }
@@ -392,8 +402,8 @@ struct PageReleaseContext {
       // The last block is not aligned to `To`, we need to increment the
       // counter of `next page` by 1.
       if (LastBlockInRange + BlockSize != ToInRegion) {
-        PageMap.incRange(RegionIndex, ToInRegion >> PageSizeLog,
-                         (LastBlockInRange + BlockSize - 1) >> PageSizeLog);
+        PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
+                         getPageIndex(LastBlockInRange + BlockSize - 1));
       }
     } else {
       ToInRegion = RegionSize;
@@ -402,8 +412,8 @@ struct PageReleaseContext {
     // After handling the first page and the last block, it's safe to mark any
     // page in between the range [From, To).
     if (FromInRegion < ToInRegion) {
-      PageMap.setAsAllCountedRange(RegionIndex, FromInRegion >> PageSizeLog,
-                                   (ToInRegion - 1) >> PageSizeLog);
+      PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
+                                   getPageIndex(ToInRegion - 1));
     }
   }
 
@@ -412,6 +422,19 @@ struct PageReleaseContext {
                       DecompactPtrT DecompactPtr, uptr Base) {
     ensurePageMapAllocated();
 
+    const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
+
+    // The last block in a region may not use the entire page, so if it's free,
+    // we mark the following "pretend" memory block(s) as free.
+    auto markLastBlock = [this, LastBlockInRegion](const uptr RegionIndex) {
+      uptr PInRegion = LastBlockInRegion + BlockSize;
+      while (PInRegion < RoundedRegionSize) {
+        PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+                         getPageIndex(PInRegion + BlockSize - 1));
+        PInRegion += BlockSize;
+      }
+    };
+
     // Iterate over free chunks and count how many free chunks affect each
     // allocated page.
     if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
@@ -423,14 +446,14 @@ struct PageReleaseContext {
             continue;
           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
           const uptr PInRegion = P - RegionIndex * RegionSize;
-          PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
+          PageMap.inc(RegionIndex, getPageIndex(PInRegion));
+          if (PInRegion == LastBlockInRegion)
+            markLastBlock(RegionIndex);
         }
       }
     } else {
       // In all other cases chunks might affect more than one page.
       DCHECK_GE(RegionSize, BlockSize);
-      const uptr LastBlockInRegion =
-          ((RegionSize / BlockSize) - 1U) * BlockSize;
       for (const auto &It : FreeList) {
         for (u16 I = 0; I < It.getCount(); I++) {
           const uptr P = DecompactPtr(It.get(I)) - Base;
@@ -438,26 +461,23 @@ struct PageReleaseContext {
             continue;
           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
           uptr PInRegion = P - RegionIndex * RegionSize;
-          PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
-                            (PInRegion + BlockSize - 1) >> PageSizeLog);
-          // The last block in a region might straddle a page, so if it's
-          // free, we mark the following "pretend" memory block(s) as free.
-          if (PInRegion == LastBlockInRegion) {
-            PInRegion += BlockSize;
-            while (PInRegion < RoundedRegionSize) {
-              PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
-                                (PInRegion + BlockSize - 1) >> PageSizeLog);
-              PInRegion += BlockSize;
-            }
-          }
+          PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+                           getPageIndex(PInRegion + BlockSize - 1));
+          if (PInRegion == LastBlockInRegion)
+            markLastBlock(RegionIndex);
         }
       }
     }
   }
 
+  uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
+
   uptr BlockSize;
   uptr RegionSize;
   uptr NumberOfRegions;
+  // For partial region marking, some pages in front are not needed to be
+  // counted.
+  uptr ReleasePageOffset;
   uptr PageSize;
   uptr PagesCount;
   uptr PageSizeLog;
@@ -479,6 +499,7 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
   const uptr BlockSize = Context.BlockSize;
   const uptr PagesCount = Context.PagesCount;
   const uptr NumberOfRegions = Context.NumberOfRegions;
+  const uptr ReleasePageOffset = Context.ReleasePageOffset;
   const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
   const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
   RegionPageMap &PageMap = Context.PageMap;
@@ -516,6 +537,10 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
       }
       uptr PrevPageBoundary = 0;
       uptr CurrentBoundary = 0;
+      if (ReleasePageOffset > 0) {
+        PrevPageBoundary = ReleasePageOffset * PageSize;
+        CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
+      }
       for (uptr J = 0; J < PagesCount; J++) {
         const uptr PageBoundary = PrevPageBoundary + PageSize;
         uptr BlocksPerPage = Pn;
@@ -547,7 +572,8 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
                       uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
                       ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
                       SkipRegionT SkipRegion) {
-  PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+  PageReleaseContext Context(BlockSize, /*ReleaseSize=*/RegionSize, RegionSize,
+                             NumberOfRegions);
   Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
   releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
 }

diff  --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
index 3cc20c4621d2c..2e7382147006b 100644
--- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
@@ -138,15 +138,18 @@ TEST(ScudoReleaseTest, FreePagesRangeTracker) {
 
 class ReleasedPagesRecorder {
 public:
+  ReleasedPagesRecorder() = default;
+  explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {}
   std::set<scudo::uptr> ReportedPages;
 
   void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
     const scudo::uptr PageSize = scudo::getPageSizeCached();
     for (scudo::uptr I = From; I < To; I += PageSize)
-      ReportedPages.insert(I);
+      ReportedPages.insert(I + getBase());
   }
 
-  scudo::uptr getBase() const { return 0; }
+  scudo::uptr getBase() const { return Base; }
+  scudo::uptr Base = 0;
 };
 
 // Simplified version of a TransferBatch.
@@ -219,7 +222,8 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
     ReleasedPagesRecorder Recorder;
     scudo::PageReleaseContext Context(BlockSize,
                                       /*RegionSize=*/MaxBlocks * BlockSize,
-                                      /*NumberOfRegions=*/1U);
+                                      /*NumberOfRegions=*/1U,
+                                      /*ReleaseSize=*/MaxBlocks * BlockSize);
     ASSERT_FALSE(Context.hasBlockMarked());
     Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
     ASSERT_TRUE(Context.hasBlockMarked());
@@ -325,8 +329,9 @@ template <class SizeClassMap> void testPageMapMarkRange() {
       const scudo::uptr GroupEnd = GroupBeg + GroupSize;
 
       scudo::PageReleaseContext Context(BlockSize, RegionSize,
-                                        /*NumberOfRegions=*/1U);
-      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0);
+                                        /*NumberOfRegions=*/1U,
+                                        /*ReleaseSize=*/RegionSize);
+      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U);
 
       scudo::uptr FirstBlock =
           ((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
@@ -394,13 +399,142 @@ template <class SizeClassMap> void testPageMapMarkRange() {
 
     // Release the entire region. This is to ensure the last page is counted.
     scudo::PageReleaseContext Context(BlockSize, RegionSize,
-                                      /*NumberOfRegions=*/1U);
+                                      /*NumberOfRegions=*/1U,
+                                      /*ReleaseSize=*/RegionSize);
     Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0);
     for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
       EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
   } // Iterate each size class
 }
 
+template <class SizeClassMap> void testReleasePartialRegion() {
+  typedef FreeBatch<SizeClassMap> Batch;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  const scudo::uptr ReleaseBase = PageSize;
+  const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
+
+  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
+    // In the following, we want to ensure the region includes at least 2 pages
+    // and we will release all the pages except the first one. The handling of
+    // the last block is tricky, so we always test the case that includes the
+    // last block.
+    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
+    const scudo::uptr RegionSize =
+        scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) * 2, BlockSize) +
+        BlockSize;
+    const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
+
+    scudo::SinglyLinkedList<Batch> FreeList;
+    FreeList.clear();
+
+    // Skip the blocks in the first page and add the remaining.
+    std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
+    for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
+         Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
+      for (scudo::uptr Page = Block / PageSize;
+           Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
+        ASSERT_LT(Page, Pages.size());
+        ++Pages[Page];
+      }
+    }
+
+    // This follows the logic how we count the last page. It should be
+    // consistent with how markFreeBlocks() handles the last block.
+    if (RoundedRegionSize % BlockSize != 0)
+      ++Pages.back();
+
+    Batch *CurrentBatch = nullptr;
+    for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
+         Block < RegionSize; Block += BlockSize) {
+      if (CurrentBatch == nullptr ||
+          CurrentBatch->getCount() == Batch::MaxCount) {
+        CurrentBatch = new Batch;
+        CurrentBatch->clear();
+        FreeList.push_back(CurrentBatch);
+      }
+      CurrentBatch->add(Block);
+    }
+
+    auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) {
+      auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
+      ReleasedPagesRecorder Recorder(ReleaseBase);
+      releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+      const scudo::uptr FirstBlock = scudo::roundUpSlow(PageSize, BlockSize);
+
+      for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
+        if (P < FirstBlock) {
+          // If FirstBlock is not aligned with page boundary, the first touched
+          // page will not be released either.
+          EXPECT_TRUE(Recorder.ReportedPages.find(P) ==
+                      Recorder.ReportedPages.end());
+        } else {
+          EXPECT_TRUE(Recorder.ReportedPages.find(P) !=
+                      Recorder.ReportedPages.end());
+        }
+      }
+    };
+
+    // Test marking by visiting each block.
+    {
+      auto DecompactPtr = [](scudo::uptr P) { return P; };
+      scudo::PageReleaseContext Context(
+          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
+          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+      Context.markFreeBlocks(FreeList, DecompactPtr, /*Base=*/0U);
+      for (const Batch &It : FreeList) {
+        for (scudo::u16 I = 0; I < It.getCount(); I++) {
+          scudo::uptr Block = It.get(I);
+          for (scudo::uptr Page = Block / PageSize;
+               Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
+            EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U,
+                                                       Page - BasePageOffset));
+          }
+        }
+      }
+
+      VerifyReleaseToOs(Context);
+    }
+
+    // Test range marking.
+    {
+      scudo::PageReleaseContext Context(
+          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
+          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+      Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U);
+      for (scudo::uptr Page = ReleaseBase / PageSize;
+           Page < RoundedRegionSize / PageSize; ++Page) {
+        if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
+            Pages[Page]) {
+          EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0,
+                                                   Page - BasePageOffset));
+        }
+      }
+
+      VerifyReleaseToOs(Context);
+    }
+
+    // Check the buffer size of PageMap.
+    {
+      scudo::PageReleaseContext Full(BlockSize, RegionSize,
+                                     /*NumberOfRegions=*/1U,
+                                     /*ReleaseSize=*/RegionSize);
+      Full.ensurePageMapAllocated();
+      scudo::PageReleaseContext Partial(
+          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
+          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+      Partial.ensurePageMapAllocated();
+
+      EXPECT_GE(Full.PageMap.getBufferSize(), Partial.PageMap.getBufferSize());
+    }
+
+    while (!FreeList.empty()) {
+      CurrentBatch = FreeList.front();
+      FreeList.pop_front();
+      delete CurrentBatch;
+    }
+  } // Iterate each size class
+}
+
 TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
   testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
 }
@@ -419,3 +553,10 @@ TEST(ScudoReleaseTest, PageMapMarkRange) {
   testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
   testPageMapMarkRange<scudo::SvelteSizeClassMap>();
 }
+
+TEST(ScudoReleaseTest, ReleasePartialRegion) {
+  testReleasePartialRegion<scudo::DefaultSizeClassMap>();
+  testReleasePartialRegion<scudo::AndroidSizeClassMap>();
+  testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
+  testReleasePartialRegion<scudo::SvelteSizeClassMap>();
+}


        


More information about the llvm-commits mailing list