[compiler-rt] 56dafd0 - [scudo] Add PageReleaseContext to convey page usage status.

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 13 16:35:20 PDT 2022


Author: Chia-hung Duan
Date: 2022-10-13T23:35:06Z
New Revision: 56dafd024ca7f586a31be3ee858e6b3588a1e131

URL: https://github.com/llvm/llvm-project/commit/56dafd024ca7f586a31be3ee858e6b3588a1e131
DIFF: https://github.com/llvm/llvm-project/commit/56dafd024ca7f586a31be3ee858e6b3588a1e131.diff

LOG: [scudo] Add PageReleaseContext to convey page usage status.

PageReleaseContext contains all the information needed for determing if
a page can be released. Splitting out the context increases the flexibility
of heterogenous free lists in the future. Also rename PackedCounterArray to
PageMap.

Reviewed By: cryptoad, cferris

Differential Revision: https://reviews.llvm.org/D133895

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h
    compiler-rt/lib/scudo/standalone/release.cpp
    compiler-rt/lib/scudo/standalone/release.h
    compiler-rt/lib/scudo/standalone/tests/release_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 9bc97ec539822..d9905a0571747 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -478,7 +478,7 @@ template <typename Config> class SizeClassAllocator32 {
       return reinterpret_cast<uptr>(CompactPtr);
     };
     releaseFreeMemoryToOS(Sci->FreeList, RegionSize, NumberOfRegions, BlockSize,
-                          &Recorder, DecompactPtr, SkipRegion);
+                          Recorder, DecompactPtr, SkipRegion);
     if (Recorder.getReleasedRangesCount() > 0) {
       Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
       Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index e03c2b56a9b79..d52a1d9a6c4e3 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -474,7 +474,7 @@ template <typename Config> class SizeClassAllocator64 {
     };
     auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
     releaseFreeMemoryToOS(Region->FreeList, Region->AllocatedUser, 1U,
-                          BlockSize, &Recorder, DecompactPtr, SkipRegion);
+                          BlockSize, Recorder, DecompactPtr, SkipRegion);
 
     if (Recorder.getReleasedRangesCount() > 0) {
       Region->ReleaseInfo.PushedBlocksAtLastRelease =

diff  --git a/compiler-rt/lib/scudo/standalone/release.cpp b/compiler-rt/lib/scudo/standalone/release.cpp
index 5d7c6c5fc110b..3f40dbec6d7a8 100644
--- a/compiler-rt/lib/scudo/standalone/release.cpp
+++ b/compiler-rt/lib/scudo/standalone/release.cpp
@@ -10,7 +10,7 @@
 
 namespace scudo {
 
-HybridMutex PackedCounterArray::Mutex = {};
-uptr PackedCounterArray::StaticBuffer[PackedCounterArray::StaticBufferCount];
+HybridMutex RegionPageMap::Mutex = {};
+uptr RegionPageMap::StaticBuffer[RegionPageMap::StaticBufferCount];
 
 } // namespace scudo

diff  --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index fdb62b4412643..12cc8d2c7173b 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -41,22 +41,49 @@ class ReleaseRecorder {
   MapPlatformData *Data = nullptr;
 };
 
-// A packed array of Counters. Each counter occupies 2^N bits, enough to store
-// counter's MaxValue. Ctor will try to use a static buffer first, and if that
-// fails (the buffer is too small or already locked), will allocate the
+// A Region page map is used to record the usage of pages in the regions. It
+// implements a packed array of Counters. Each counter occupies 2^N bits, enough
+// to store counter's MaxValue. Ctor will try to use a static buffer first, and
+// if that fails (the buffer is too small or already locked), will allocate the
 // required Buffer via map(). The caller is expected to check whether the
 // initialization was successful by checking isAllocated() result. For
 // performance sake, none of the accessors check the validity of the arguments,
 // It is assumed that Index is always in [0, N) range and the value is not
 // incremented past MaxValue.
-class PackedCounterArray {
+class RegionPageMap {
 public:
-  PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
-                     uptr MaxValue)
-      : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
-    DCHECK_GT(Regions, 0);
-    DCHECK_GT(NumCounters, 0);
+  RegionPageMap()
+      : Regions(0),
+        NumCounters(0),
+        CounterSizeBitsLog(0),
+        CounterMask(0),
+        PackingRatioLog(0),
+        BitOffsetMask(0),
+        SizePerRegion(0),
+        BufferSize(0),
+        Buffer(nullptr) {}
+  RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
+    reset(NumberOfRegions, CountersPerRegion, MaxValue);
+  }
+  ~RegionPageMap() {
+    if (!isAllocated())
+      return;
+    if (Buffer == &StaticBuffer[0])
+      Mutex.unlock();
+    else
+      unmap(reinterpret_cast<void *>(Buffer),
+            roundUpTo(BufferSize, getPageSizeCached()));
+    Buffer = nullptr;
+  }
+
+  void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
+    DCHECK_GT(NumberOfRegion, 0);
+    DCHECK_GT(CountersPerRegion, 0);
     DCHECK_GT(MaxValue, 0);
+
+    Regions = NumberOfRegion;
+    NumCounters = CountersPerRegion;
+
     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
     // Rounding counter storage size up to the power of two allows for using
     // bit shifts calculating particular counter's Index and offset.
@@ -85,15 +112,6 @@ class PackedCounterArray {
               "scudo:counters", MAP_ALLOWNOMEM, &MapData));
     }
   }
-  ~PackedCounterArray() {
-    if (!isAllocated())
-      return;
-    if (Buffer == &StaticBuffer[0])
-      Mutex.unlock();
-    else
-      unmap(reinterpret_cast<void *>(Buffer),
-            roundUpTo(BufferSize, getPageSizeCached()), 0, &MapData);
-  }
 
   bool isAllocated() const { return !!Buffer; }
 
@@ -112,6 +130,7 @@ class PackedCounterArray {
     const uptr Index = I >> PackingRatioLog;
     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+    DCHECK_EQ(isAllCounted(Region, I), false);
     Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
                                               << BitOffset;
   }
@@ -123,13 +142,28 @@ class PackedCounterArray {
       inc(Region, I);
   }
 
+  // Set the counter to the max value. Note that the max number of blocks in a
+  // page may vary. To provide an easier way to tell if all the blocks are
+  // counted for 
diff erent pages, set to the same max value to denote the
+  // all-counted status.
+  void setAsAllCounted(uptr Region, uptr I) const {
+    DCHECK_LE(get(Region, I), CounterMask);
+    const uptr Index = I >> PackingRatioLog;
+    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+    DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+    Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
+  }
+  bool isAllCounted(uptr Region, uptr I) const {
+    return get(Region, I) == CounterMask;
+  }
+
   uptr getBufferSize() const { return BufferSize; }
 
   static const uptr StaticBufferCount = 2048U;
 
 private:
-  const uptr Regions;
-  const uptr NumCounters;
+  uptr Regions;
+  uptr NumCounters;
   uptr CounterSizeBitsLog;
   uptr CounterMask;
   uptr PackingRatioLog;
@@ -146,11 +180,11 @@ class PackedCounterArray {
 
 template <class ReleaseRecorderT> class FreePagesRangeTracker {
 public:
-  explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+  explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
 
-  void processNextPage(bool Freed) {
-    if (Freed) {
+  void processNextPage(bool Released) {
+    if (Released) {
       if (!InRange) {
         CurrentRangeStatePage = CurrentPage;
         InRange = true;
@@ -171,113 +205,138 @@ template <class ReleaseRecorderT> class FreePagesRangeTracker {
 private:
   void closeOpenedRange() {
     if (InRange) {
-      Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
-                                     (CurrentPage << PageSizeLog));
+      Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+                                    (CurrentPage << PageSizeLog));
       InRange = false;
     }
   }
 
-  ReleaseRecorderT *const Recorder;
+  ReleaseRecorderT &Recorder;
   const uptr PageSizeLog;
   bool InRange = false;
   uptr CurrentPage = 0;
   uptr CurrentRangeStatePage = 0;
 };
 
-template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
-          typename SkipRegionT>
-NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
-                      uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
-                      ReleaseRecorderT *Recorder, DecompactPtrT DecompactPtr,
-                      SkipRegionT SkipRegion) {
-  const uptr PageSize = getPageSizeCached();
-
-  // Figure out the number of chunks per page and whether we can take a fast
-  // path (the number of chunks per page is the same for all pages).
-  uptr FullPagesBlockCountMax;
-  bool SameBlockCountPerPage;
-  if (BlockSize <= PageSize) {
-    if (PageSize % BlockSize == 0) {
-      // Same number of chunks per page, no cross overs.
-      FullPagesBlockCountMax = PageSize / BlockSize;
-      SameBlockCountPerPage = true;
-    } else if (BlockSize % (PageSize % BlockSize) == 0) {
-      // Some chunks are crossing page boundaries, which means that the page
-      // contains one or two partial chunks, but all pages contain the same
-      // number of chunks.
-      FullPagesBlockCountMax = PageSize / BlockSize + 1;
-      SameBlockCountPerPage = true;
-    } else {
-      // Some chunks are crossing page boundaries, which means that the page
-      // contains one or two partial chunks.
-      FullPagesBlockCountMax = PageSize / BlockSize + 2;
-      SameBlockCountPerPage = false;
-    }
-  } else {
-    if (BlockSize % PageSize == 0) {
-      // One chunk covers multiple pages, no cross overs.
-      FullPagesBlockCountMax = 1;
-      SameBlockCountPerPage = true;
+struct PageReleaseContext {
+  PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
+      BlockSize(BlockSize),
+      RegionSize(RegionSize),
+      NumberOfRegions(NumberOfRegions) {
+    PageSize = getPageSizeCached();
+    if (BlockSize <= PageSize) {
+      if (PageSize % BlockSize == 0) {
+        // Same number of chunks per page, no cross overs.
+        FullPagesBlockCountMax = PageSize / BlockSize;
+        SameBlockCountPerPage = true;
+      } else if (BlockSize % (PageSize % BlockSize) == 0) {
+        // Some chunks are crossing page boundaries, which means that the page
+        // contains one or two partial chunks, but all pages contain the same
+        // number of chunks.
+        FullPagesBlockCountMax = PageSize / BlockSize + 1;
+        SameBlockCountPerPage = true;
+      } else {
+        // Some chunks are crossing page boundaries, which means that the page
+        // contains one or two partial chunks.
+        FullPagesBlockCountMax = PageSize / BlockSize + 2;
+        SameBlockCountPerPage = false;
+      }
     } else {
-      // One chunk covers multiple pages, Some chunks are crossing page
-      // boundaries. Some pages contain one chunk, some contain two.
-      FullPagesBlockCountMax = 2;
-      SameBlockCountPerPage = false;
+      if (BlockSize % PageSize == 0) {
+        // One chunk covers multiple pages, no cross overs.
+        FullPagesBlockCountMax = 1;
+        SameBlockCountPerPage = true;
+      } else {
+        // One chunk covers multiple pages, Some chunks are crossing page
+        // boundaries. Some pages contain one chunk, some contain two.
+        FullPagesBlockCountMax = 2;
+        SameBlockCountPerPage = false;
+      }
     }
+
+    PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
+    PageSizeLog = getLog2(PageSize);
+    RoundedRegionSize = PagesCount << PageSizeLog;
+    RoundedSize = NumberOfRegions * RoundedRegionSize;
+
+    PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
+    DCHECK(PageMap.isAllocated());
   }
 
-  const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
-  PackedCounterArray Counters(NumberOfRegions, PagesCount,
-                              FullPagesBlockCountMax);
-  if (!Counters.isAllocated())
-    return;
-
-  const uptr PageSizeLog = getLog2(PageSize);
-  const uptr RoundedRegionSize = PagesCount << PageSizeLog;
-  const uptr RoundedSize = NumberOfRegions * RoundedRegionSize;
-
-  // Iterate over free chunks and count how many free chunks affect each
-  // allocated page.
-  if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
-    // Each chunk affects one page only.
-    for (const auto &It : FreeList) {
-      for (u16 I = 0; I < It.getCount(); I++) {
-        const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
-        if (P >= RoundedSize)
-          continue;
-        const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
-        const uptr PInRegion = P - RegionIndex * RegionSize;
-        Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
+  template<class TransferBatchT, typename DecompactPtrT>
+  void markFreeBlocks(const IntrusiveList<TransferBatchT> &FreeList,
+                      DecompactPtrT DecompactPtr, uptr Base) {
+    // Iterate over free chunks and count how many free chunks affect each
+    // allocated page.
+    if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+      // Each chunk affects one page only.
+      for (const auto &It : FreeList) {
+        for (u16 I = 0; I < It.getCount(); I++) {
+          const uptr P = DecompactPtr(It.get(I)) - Base;
+          if (P >= RoundedSize)
+            continue;
+          const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
+          const uptr PInRegion = P - RegionIndex * RegionSize;
+          PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
+        }
       }
-    }
-  } else {
-    // In all other cases chunks might affect more than one page.
-    DCHECK_GE(RegionSize, BlockSize);
-    const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
-    for (const auto &It : FreeList) {
-      for (u16 I = 0; I < It.getCount(); I++) {
-        const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
-        if (P >= RoundedSize)
-          continue;
-        const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
-        uptr PInRegion = P - RegionIndex * RegionSize;
-        Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
-                          (PInRegion + BlockSize - 1) >> PageSizeLog);
-        // The last block in a region might straddle a page, so if it's
-        // free, we mark the following "pretend" memory block(s) as free.
-        if (PInRegion == LastBlockInRegion) {
-          PInRegion += BlockSize;
-          while (PInRegion < RoundedRegionSize) {
-            Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
-                              (PInRegion + BlockSize - 1) >> PageSizeLog);
+    } else {
+      // In all other cases chunks might affect more than one page.
+      DCHECK_GE(RegionSize, BlockSize);
+      const uptr LastBlockInRegion =
+          ((RegionSize / BlockSize) - 1U) * BlockSize;
+      for (const auto &It : FreeList) {
+        for (u16 I = 0; I < It.getCount(); I++) {
+          const uptr P = DecompactPtr(It.get(I)) - Base;
+          if (P >= RoundedSize)
+            continue;
+          const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
+          uptr PInRegion = P - RegionIndex * RegionSize;
+          PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
+                            (PInRegion + BlockSize - 1) >> PageSizeLog);
+          // The last block in a region might straddle a page, so if it's
+          // free, we mark the following "pretend" memory block(s) as free.
+          if (PInRegion == LastBlockInRegion) {
             PInRegion += BlockSize;
+            while (PInRegion < RoundedRegionSize) {
+              PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
+                                (PInRegion + BlockSize - 1) >> PageSizeLog);
+              PInRegion += BlockSize;
+            }
           }
         }
       }
     }
   }
 
+  uptr BlockSize;
+  uptr RegionSize;
+  uptr NumberOfRegions;
+  uptr PageSize;
+  uptr PagesCount;
+  uptr PageSizeLog;
+  uptr RoundedRegionSize;
+  uptr RoundedSize;
+  uptr FullPagesBlockCountMax;
+  bool SameBlockCountPerPage;
+  RegionPageMap PageMap;
+};
+
+// Try to release the page which doesn't have any in-used block, i.e., they are
+// all free blocks. The `PageMap` will record the number of free blocks in each
+// page.
+template <class ReleaseRecorderT, typename SkipRegionT>
+NOINLINE void
+releaseFreeMemoryToOS(PageReleaseContext &Context,
+                      ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
+  const uptr PageSize = Context.PageSize;
+  const uptr BlockSize = Context.BlockSize;
+  const uptr PagesCount = Context.PagesCount;
+  const uptr NumberOfRegions = Context.NumberOfRegions;
+  const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
+  const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
+  RegionPageMap &PageMap = Context.PageMap;
+
   // Iterate over pages detecting ranges of pages with chunk Counters equal
   // to the expected number of chunks for the particular page.
   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
@@ -288,9 +347,12 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
         RangeTracker.skipPages(PagesCount);
         continue;
       }
-      for (uptr J = 0; J < PagesCount; J++)
-        RangeTracker.processNextPage(Counters.get(I, J) ==
-                                     FullPagesBlockCountMax);
+      for (uptr J = 0; J < PagesCount; J++) {
+        const bool CanRelease = PageMap.get(I, J) == FullPagesBlockCountMax;
+        if (CanRelease)
+          PageMap.setAsAllCounted(I, J);
+        RangeTracker.processNextPage(CanRelease);
+      }
     }
   } else {
     // Slow path, go through the pages keeping count how many chunks affect
@@ -322,13 +384,30 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
           }
         }
         PrevPageBoundary = PageBoundary;
-        RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
+        const bool CanRelease = PageMap.get(I, J) == BlocksPerPage;
+        if (CanRelease)
+          PageMap.setAsAllCounted(I, J);
+        RangeTracker.processNextPage(CanRelease);
       }
     }
   }
   RangeTracker.finish();
 }
 
+// An overload releaseFreeMemoryToOS which doesn't require the page usage
+// information after releasing.
+template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
+          typename SkipRegionT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
+                      uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
+                      ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
+                      SkipRegionT SkipRegion) {
+  PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+  Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
+  releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+}
+
 } // namespace scudo
 
 #endif // SCUDO_RELEASE_H_

diff  --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
index 3b62b639e46b2..0c3c043d1e087 100644
--- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp
@@ -18,19 +18,19 @@
 #include <random>
 #include <set>
 
-TEST(ScudoReleaseTest, PackedCounterArray) {
+TEST(ScudoReleaseTest, RegionPageMap) {
   for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
     // Various valid counter's max values packed into one word.
-    scudo::PackedCounterArray Counters2N(1U, 1U, 1UL << I);
-    EXPECT_EQ(sizeof(scudo::uptr), Counters2N.getBufferSize());
+    scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
+    EXPECT_EQ(sizeof(scudo::uptr), PageMap2N.getBufferSize());
     // Check the "all bit set" values too.
-    scudo::PackedCounterArray Counters2N1_1(1U, 1U, ~0UL >> I);
-    EXPECT_EQ(sizeof(scudo::uptr), Counters2N1_1.getBufferSize());
+    scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
+    EXPECT_EQ(sizeof(scudo::uptr), PageMap2N1_1.getBufferSize());
     // Verify the packing ratio, the counter is Expected to be packed into the
     // closest power of 2 bits.
-    scudo::PackedCounterArray Counters(1U, SCUDO_WORDSIZE, 1UL << I);
+    scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
     EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1),
-              Counters.getBufferSize());
+              PageMap.getBufferSize());
   }
 
   // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
@@ -38,20 +38,20 @@ TEST(ScudoReleaseTest, PackedCounterArray) {
     // Make sure counters request one memory page for the buffer.
     const scudo::uptr NumCounters =
         (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
-    scudo::PackedCounterArray Counters(1U, NumCounters,
+    scudo::RegionPageMap PageMap(1U, NumCounters,
                                        1UL << ((1UL << I) - 1));
-    Counters.inc(0U, 0U);
+    PageMap.inc(0U, 0U);
     for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
-      EXPECT_EQ(0UL, Counters.get(0U, C));
-      Counters.inc(0U, C);
-      EXPECT_EQ(1UL, Counters.get(0U, C - 1));
+      EXPECT_EQ(0UL, PageMap.get(0U, C));
+      PageMap.inc(0U, C);
+      EXPECT_EQ(1UL, PageMap.get(0U, C - 1));
     }
-    EXPECT_EQ(0UL, Counters.get(0U, NumCounters - 1));
-    Counters.inc(0U, NumCounters - 1);
+    EXPECT_EQ(0UL, PageMap.get(0U, NumCounters - 1));
+    PageMap.inc(0U, NumCounters - 1);
     if (I > 0) {
-      Counters.incRange(0u, 0U, NumCounters - 1);
+      PageMap.incRange(0u, 0U, NumCounters - 1);
       for (scudo::uptr C = 0; C < NumCounters; C++)
-        EXPECT_EQ(2UL, Counters.get(0U, C));
+        EXPECT_EQ(2UL, PageMap.get(0U, C));
     }
   }
 }
@@ -102,7 +102,7 @@ TEST(ScudoReleaseTest, FreePagesRangeTracker) {
 
   for (auto TestCase : TestCases) {
     StringRangeRecorder Recorder;
-    RangeTracker Tracker(&Recorder);
+    RangeTracker Tracker(Recorder);
     for (scudo::uptr I = 0; TestCase[I] != 0; I++)
       Tracker.processNextPage(TestCase[I] == 'x');
     Tracker.finish();
@@ -152,6 +152,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
   typedef FreeBatch<SizeClassMap> Batch;
   const scudo::uptr PagesCount = 1024;
   const scudo::uptr PageSize = scudo::getPageSizeCached();
+  const scudo::uptr PageSizeLog = scudo::getLog2(PageSize);
   std::mt19937 R;
   scudo::u32 RandState = 42;
 
@@ -195,8 +196,12 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
     auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
     auto DecompactPtr = [](scudo::uptr P) { return P; };
     ReleasedPagesRecorder Recorder;
-    releaseFreeMemoryToOS(FreeList, MaxBlocks * BlockSize, 1U, BlockSize,
-                          &Recorder, DecompactPtr, SkipRegion);
+    scudo::PageReleaseContext Context(BlockSize,
+                                      /*RegionSize=*/MaxBlocks * BlockSize,
+                                      /*NumberOfRegions=*/1U);
+    Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
+    releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+    scudo::RegionPageMap &PageMap = Context.PageMap;
 
     // Verify that there are no released pages touched by used chunks and all
     // ranges of free chunks big enough to contain the entire memory pages had
@@ -223,6 +228,8 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
           const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
                                     Recorder.ReportedPages.end();
           EXPECT_EQ(false, PageReleased);
+          EXPECT_EQ(false,
+                    PageMap.isAllCounted(0, (J * PageSize) >> PageSizeLog));
         }
 
         if (InFreeRange) {
@@ -234,6 +241,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
             const bool PageReleased =
                 Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
             EXPECT_EQ(true, PageReleased);
+            EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
             VerifiedReleasedPages++;
             P += PageSize;
           }
@@ -251,6 +259,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
         const bool PageReleased =
             Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
         EXPECT_EQ(true, PageReleased);
+        EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
         VerifiedReleasedPages++;
         P += PageSize;
       }


        


More information about the llvm-commits mailing list