[compiler-rt] 657d297 - [scudo] Simplify markFreeBlocks

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 7 13:56:02 PST 2023


Author: Chia-hung Duan
Date: 2023-03-07T21:55:04Z
New Revision: 657d297a92aed929d37c8d7b148a6e848bc182df

URL: https://github.com/llvm/llvm-project/commit/657d297a92aed929d37c8d7b148a6e848bc182df
DIFF: https://github.com/llvm/llvm-project/commit/657d297a92aed929d37c8d7b148a6e848bc182df.diff

LOG: [scudo] Simplify markFreeBlocks

With memory group, we always mark the free blocks from the same region.
Therefore, we don't need to calculate the offset from base and determine
the region index. Also improve the way we deal with the last block in
the region so that the loop body is simpler.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D143303

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h
    compiler-rt/lib/scudo/standalone/release.h
    compiler-rt/lib/scudo/standalone/tests/release_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index ed3f77dc7f64b..1f06214753982 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -775,7 +775,7 @@ template <typename Config> class SizeClassAllocator32 {
         compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
 
     ReleaseRecorder Recorder(Base);
-    PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions,
+    PageReleaseContext Context(BlockSize, NumberOfRegions,
                                /*ReleaseSize=*/RegionSize);
 
     auto DecompactPtr = [](CompactPtrT CompactPtr) {
@@ -787,10 +787,9 @@ template <typename Config> class SizeClassAllocator32 {
       if (PushedBytesDelta * BlockSize < PageSize)
         continue;
 
+      const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
       uptr AllocatedGroupSize =
-          decompactGroupBase(BG.CompactPtrGroupBase) == CurGroupBase
-              ? Sci->CurrentRegionAllocated
-              : GroupSize;
+          GroupBase == CurGroupBase ? Sci->CurrentRegionAllocated : GroupSize;
       if (AllocatedGroupSize == 0)
         continue;
 
@@ -810,34 +809,25 @@ template <typename Config> class SizeClassAllocator32 {
       BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
 
       const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
-      // The first condition to do range marking is that all the blocks in the
-      // range need to be from the same region. In SizeClassAllocator32, this is
-      // true when GroupSize and RegionSize are the same. Another tricky case,
-      // while range marking, the last block in a region needs the logic to mark
-      // the last page. However, in SizeClassAllocator32, the RegionSize
-      // recorded in PageReleaseContext may be 
diff erent from
-      // `CurrentRegionAllocated` of the current region. This exception excludes
-      // the chance of doing range marking for the current region.
-      const bool CanDoRangeMark =
-          GroupSize == RegionSize &&
-          decompactGroupBase(BG.CompactPtrGroupBase) != CurGroupBase;
-
-      if (CanDoRangeMark && NumBlocks == MaxContainedBlocks) {
+      const uptr RegionIndex = (GroupBase - Base) / RegionSize;
+
+      if (NumBlocks == MaxContainedBlocks) {
         for (const auto &It : BG.Batches)
           for (u16 I = 0; I < It.getCount(); ++I)
             DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
 
-        const uptr From = decompactGroupBase(BG.CompactPtrGroupBase);
-        const uptr To = From + AllocatedGroupSize;
-        Context.markRangeAsAllCounted(From, To, Base);
+        const uptr To = GroupBase + AllocatedGroupSize;
+        Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
+                                      AllocatedGroupSize);
       } else {
-        if (CanDoRangeMark)
-          DCHECK_LT(NumBlocks, MaxContainedBlocks);
+        DCHECK_LT(NumBlocks, MaxContainedBlocks);
 
         // Note that we don't always visit blocks in each BatchGroup so that we
         // may miss the chance of releasing certain pages that cross
         // BatchGroups.
-        Context.markFreeBlocks(BG.Batches, DecompactPtr, Base);
+        Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
+                                       RegionIndex, AllocatedGroupSize,
+                                       /*MayContainLastBlockInRegion=*/true);
       }
     }
 

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index ad24afcfefbd1..3df8749898484 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -935,9 +935,8 @@ template <typename Config> class SizeClassAllocator64 {
     const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
 
     ReleaseRecorder Recorder(Region->RegionBeg, ReleaseOffset, &Region->Data);
-    PageReleaseContext Context(
-        BlockSize, Region->AllocatedUser, /*NumberOfRegions=*/1U,
-        ReleaseRangeSize, ReleaseOffset);
+    PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+                               ReleaseRangeSize, ReleaseOffset);
 
     for (BatchGroup &BG : GroupToRelease) {
       BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
@@ -949,6 +948,8 @@ template <typename Config> class SizeClassAllocator64 {
                                           ? GroupSize
                                           : AllocatedUserEnd - BatchGroupBase;
       const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
+      const bool MayContainLastBlockInRegion =
+          BatchGroupUsedEnd == AllocatedUserEnd;
       const bool BlockAlignedWithUsedEnd =
           (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
 
@@ -965,13 +966,16 @@ template <typename Config> class SizeClassAllocator64 {
             DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
 
         Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
-                                      Region->RegionBeg);
+                                      Region->RegionBeg, /*RegionIndex=*/0,
+                                      Region->AllocatedUser);
       } else {
         DCHECK_LT(NumBlocks, MaxContainedBlocks);
         // Note that we don't always visit blocks in each BatchGroup so that we
         // may miss the chance of releasing certain pages that cross
         // BatchGroups.
-        Context.markFreeBlocks(BG.Batches, DecompactPtr, Region->RegionBeg);
+        Context.markFreeBlocksInRegion(
+            BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
+            Region->AllocatedUser, MayContainLastBlockInRegion);
       }
     }
 

diff  --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index 245b65622c861..b0151a4d812be 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -266,10 +266,9 @@ template <class ReleaseRecorderT> class FreePagesRangeTracker {
 };
 
 struct PageReleaseContext {
-  PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions,
-                     uptr ReleaseSize, uptr ReleaseOffset = 0)
-      : BlockSize(BlockSize), RegionSize(RegionSize),
-        NumberOfRegions(NumberOfRegions) {
+  PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
+                     uptr ReleaseOffset = 0)
+      : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
     PageSize = getPageSizeCached();
     if (BlockSize <= PageSize) {
       if (PageSize % BlockSize == 0) {
@@ -305,15 +304,11 @@ struct PageReleaseContext {
     // region marking (which includes the complexity of how to handle the last
     // block in a region). We may consider this after markFreeBlocks() accepts
     // only free blocks from the same region.
-    if (NumberOfRegions != 1) {
-      DCHECK_EQ(ReleaseSize, RegionSize);
+    if (NumberOfRegions != 1)
       DCHECK_EQ(ReleaseOffset, 0U);
-    }
 
     PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
     PageSizeLog = getLog2(PageSize);
-    RoundedRegionSize = roundUp(RegionSize, PageSize);
-    RoundedSize = NumberOfRegions * RoundedRegionSize;
     ReleasePageOffset = ReleaseOffset >> PageSizeLog;
   }
 
@@ -333,25 +328,17 @@ struct PageReleaseContext {
   // the blocks, we will just mark the page as all counted. Note the `From` and
   // `To` has to be page aligned but with one exception, if `To` is equal to the
   // RegionSize, it's not necessary to be aligned with page size.
-  void markRangeAsAllCounted(uptr From, uptr To, uptr Base) {
+  void markRangeAsAllCounted(uptr From, uptr To, uptr Base,
+                             const uptr RegionIndex, const uptr RegionSize) {
     DCHECK_LT(From, To);
+    DCHECK_LE(To, Base + RegionSize);
     DCHECK_EQ(From % PageSize, 0U);
+    DCHECK_LE(To - From, RegionSize);
 
     ensurePageMapAllocated();
 
-    const uptr FromOffset = From - Base;
-    const uptr ToOffset = To - Base;
-
-    const uptr RegionIndex =
-        NumberOfRegions == 1U ? 0 : FromOffset / RegionSize;
-    if (SCUDO_DEBUG) {
-      const uptr ToRegionIndex =
-          NumberOfRegions == 1U ? 0 : (ToOffset - 1) / RegionSize;
-      CHECK_EQ(RegionIndex, ToRegionIndex);
-    }
-
-    uptr FromInRegion = FromOffset - RegionIndex * RegionSize;
-    uptr ToInRegion = ToOffset - RegionIndex * RegionSize;
+    uptr FromInRegion = From - Base;
+    uptr ToInRegion = To - Base;
     uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
 
     // The straddling block sits across entire range.
@@ -424,23 +411,26 @@ struct PageReleaseContext {
     }
   }
 
-  template<class TransferBatchT, typename DecompactPtrT>
-  void markFreeBlocks(const IntrusiveList<TransferBatchT> &FreeList,
-                      DecompactPtrT DecompactPtr, uptr Base) {
+  template <class TransferBatchT, typename DecompactPtrT>
+  void markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
+                              DecompactPtrT DecompactPtr, const uptr Base,
+                              const uptr RegionIndex, const uptr RegionSize,
+                              bool MayContainLastBlockInRegion) {
     ensurePageMapAllocated();
 
-    const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
-
-    // The last block in a region may not use the entire page, so if it's free,
-    // we mark the following "pretend" memory block(s) as free.
-    auto markLastBlock = [this, LastBlockInRegion](const uptr RegionIndex) {
+    if (MayContainLastBlockInRegion) {
+      const uptr LastBlockInRegion =
+          ((RegionSize / BlockSize) - 1U) * BlockSize;
+      // The last block in a region may not use the entire page, we mark the
+      // following "pretend" memory block(s) as free in advance.
+      const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
       uptr PInRegion = LastBlockInRegion + BlockSize;
       while (PInRegion < RoundedRegionSize) {
         PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
                          getPageIndex(PInRegion + BlockSize - 1));
         PInRegion += BlockSize;
       }
-    };
+    }
 
     // Iterate over free chunks and count how many free chunks affect each
     // allocated page.
@@ -448,14 +438,9 @@ struct PageReleaseContext {
       // Each chunk affects one page only.
       for (const auto &It : FreeList) {
         for (u16 I = 0; I < It.getCount(); I++) {
-          const uptr P = DecompactPtr(It.get(I)) - Base;
-          if (P >= RoundedSize)
-            continue;
-          const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
-          const uptr PInRegion = P - RegionIndex * RegionSize;
+          const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+          DCHECK_LT(PInRegion, RegionSize);
           PageMap.inc(RegionIndex, getPageIndex(PInRegion));
-          if (PInRegion == LastBlockInRegion)
-            markLastBlock(RegionIndex);
         }
       }
     } else {
@@ -463,15 +448,9 @@ struct PageReleaseContext {
       DCHECK_GE(RegionSize, BlockSize);
       for (const auto &It : FreeList) {
         for (u16 I = 0; I < It.getCount(); I++) {
-          const uptr P = DecompactPtr(It.get(I)) - Base;
-          if (P >= RoundedSize)
-            continue;
-          const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
-          uptr PInRegion = P - RegionIndex * RegionSize;
+          const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
           PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
                            getPageIndex(PInRegion + BlockSize - 1));
-          if (PInRegion == LastBlockInRegion)
-            markLastBlock(RegionIndex);
         }
       }
     }
@@ -480,7 +459,6 @@ struct PageReleaseContext {
   uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
 
   uptr BlockSize;
-  uptr RegionSize;
   uptr NumberOfRegions;
   // For partial region marking, some pages in front are not needed to be
   // counted.
@@ -488,8 +466,6 @@ struct PageReleaseContext {
   uptr PageSize;
   uptr PagesCount;
   uptr PageSizeLog;
-  uptr RoundedRegionSize;
-  uptr RoundedSize;
   uptr FullPagesBlockCountMax;
   bool SameBlockCountPerPage;
   RegionPageMap PageMap;
@@ -570,21 +546,6 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
   RangeTracker.finish();
 }
 
-// An overload releaseFreeMemoryToOS which doesn't require the page usage
-// information after releasing.
-template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
-          typename SkipRegionT>
-NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
-                      uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
-                      ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
-                      SkipRegionT SkipRegion) {
-  PageReleaseContext Context(BlockSize, /*ReleaseSize=*/RegionSize, RegionSize,
-                             NumberOfRegions);
-  Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
-  releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-}
-
 } // namespace scudo
 
 #endif // SCUDO_RELEASE_H_

diff  --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
index 2e7382147006b..e549d0dca2967 100644
--- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
@@ -220,12 +220,12 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
     auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
     auto DecompactPtr = [](scudo::uptr P) { return P; };
     ReleasedPagesRecorder Recorder;
-    scudo::PageReleaseContext Context(BlockSize,
-                                      /*RegionSize=*/MaxBlocks * BlockSize,
-                                      /*NumberOfRegions=*/1U,
+    scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
                                       /*ReleaseSize=*/MaxBlocks * BlockSize);
     ASSERT_FALSE(Context.hasBlockMarked());
-    Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
+    Context.markFreeBlocksInRegion(FreeList, DecompactPtr, Recorder.getBase(),
+                                   /*RegionIndex=*/0, MaxBlocks * BlockSize,
+                                   /*MayContainLastBlockInRegion=*/true);
     ASSERT_TRUE(Context.hasBlockMarked());
     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
     scudo::RegionPageMap &PageMap = Context.PageMap;
@@ -328,10 +328,10 @@ template <class SizeClassMap> void testPageMapMarkRange() {
       const scudo::uptr GroupBeg = GroupId * GroupSize;
       const scudo::uptr GroupEnd = GroupBeg + GroupSize;
 
-      scudo::PageReleaseContext Context(BlockSize, RegionSize,
-                                        /*NumberOfRegions=*/1U,
+      scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
                                         /*ReleaseSize=*/RegionSize);
-      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U);
+      Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U,
+                                    /*RegionIndex=*/0, RegionSize);
 
       scudo::uptr FirstBlock =
           ((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
@@ -398,10 +398,10 @@ template <class SizeClassMap> void testPageMapMarkRange() {
     } // Iterate each Group
 
     // Release the entire region. This is to ensure the last page is counted.
-    scudo::PageReleaseContext Context(BlockSize, RegionSize,
-                                      /*NumberOfRegions=*/1U,
+    scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
                                       /*ReleaseSize=*/RegionSize);
-    Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0);
+    Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0,
+                                  /*RegionIndex=*/0, RegionSize);
     for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
       EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
   } // Iterate each size class
@@ -439,7 +439,7 @@ template <class SizeClassMap> void testReleasePartialRegion() {
     }
 
     // This follows the logic how we count the last page. It should be
-    // consistent with how markFreeBlocks() handles the last block.
+    // consistent with how markFreeBlocksInRegion() handles the last block.
     if (RoundedRegionSize % BlockSize != 0)
       ++Pages.back();
 
@@ -477,10 +477,12 @@ template <class SizeClassMap> void testReleasePartialRegion() {
     // Test marking by visiting each block.
     {
       auto DecompactPtr = [](scudo::uptr P) { return P; };
-      scudo::PageReleaseContext Context(
-          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
-          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
-      Context.markFreeBlocks(FreeList, DecompactPtr, /*Base=*/0U);
+      scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+                                        /*ReleaseSize=*/RegionSize - PageSize,
+                                        ReleaseBase);
+      Context.markFreeBlocksInRegion(FreeList, DecompactPtr, /*Base=*/0U,
+                                     /*RegionIndex=*/0, RegionSize,
+                                     /*MayContainLastBlockInRegion=*/true);
       for (const Batch &It : FreeList) {
         for (scudo::u16 I = 0; I < It.getCount(); I++) {
           scudo::uptr Block = It.get(I);
@@ -497,10 +499,11 @@ template <class SizeClassMap> void testReleasePartialRegion() {
 
     // Test range marking.
     {
-      scudo::PageReleaseContext Context(
-          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
-          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
-      Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U);
+      scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+                                        /*ReleaseSize=*/RegionSize - PageSize,
+                                        ReleaseBase);
+      Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U,
+                                    /*RegionIndex=*/0, RegionSize);
       for (scudo::uptr Page = ReleaseBase / PageSize;
            Page < RoundedRegionSize / PageSize; ++Page) {
         if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
@@ -515,13 +518,12 @@ template <class SizeClassMap> void testReleasePartialRegion() {
 
     // Check the buffer size of PageMap.
     {
-      scudo::PageReleaseContext Full(BlockSize, RegionSize,
-                                     /*NumberOfRegions=*/1U,
+      scudo::PageReleaseContext Full(BlockSize, /*NumberOfRegions=*/1U,
                                      /*ReleaseSize=*/RegionSize);
       Full.ensurePageMapAllocated();
-      scudo::PageReleaseContext Partial(
-          BlockSize, RegionSize, /*NumberOfRegions=*/1U,
-          /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+      scudo::PageReleaseContext Partial(BlockSize, /*NumberOfRegions=*/1U,
+                                        /*ReleaseSize=*/RegionSize - PageSize,
+                                        ReleaseBase);
       Partial.ensurePageMapAllocated();
 
       EXPECT_GE(Full.PageMap.getBufferSize(), Partial.PageMap.getBufferSize());


        


More information about the llvm-commits mailing list