[compiler-rt] 9dc5b32 - Revert "Revert "[scudo] Fix the calculating of memory group usage""

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 28 23:27:44 PDT 2022


Author: Chia-hung Duan
Date: 2022-10-29T06:26:50Z
New Revision: 9dc5b322d591ff01499a06c95f0caaf14b59f2f3

URL: https://github.com/llvm/llvm-project/commit/9dc5b322d591ff01499a06c95f0caaf14b59f2f3
DIFF: https://github.com/llvm/llvm-project/commit/9dc5b322d591ff01499a06c95f0caaf14b59f2f3.diff

LOG: Revert "Revert "[scudo] Fix the calculating of memory group usage""

This reverts commit 69fe7abb393ba7d6ee9c8ff1429316845b5bad37.

Fixed the arguments order while calling batchGroupBase()

Differential Revision: https://reviews.llvm.org/D136995

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 6e791a127df66..a3d908cee9e52 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -708,8 +708,10 @@ template <typename Config> class SizeClassAllocator32 {
       if (AllocatedGroupSize == 0)
         continue;
 
+      // TransferBatches are pushed in front of BG.Batches. The first one may
+      // not have all caches used.
       const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
-                             BG.Batches.back()->getCount();
+                             BG.Batches.front()->getCount();
       const uptr BytesInBG = NumBlocks * BlockSize;
       // Given the randomness property, we try to release the pages only if the
       // bytes used by free blocks exceed certain proportion of allocated

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index a3684c9d45864..d06a047fa6596 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -369,7 +369,7 @@ template <typename Config> class SizeClassAllocator64 {
   }
 
   static uptr compactPtrGroup(CompactPtrT CompactPtr) {
-    return CompactPtr >> (GroupSizeLog - CompactPtrScale);
+    return static_cast<uptr>(CompactPtr) >> (GroupSizeLog - CompactPtrScale);
   }
   static uptr batchGroupBase(uptr Base, uptr GroupId) {
     return (GroupId << GroupSizeLog) + Base;
@@ -702,16 +702,49 @@ template <typename Config> class SizeClassAllocator64 {
           BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
       if (PushedBytesDelta * BlockSize < PageSize)
         continue;
+
+      // Group boundary does not necessarily have the same alignment as Region.
+      // It may sit across a Region boundary. Which means that we may have the
+      // following two cases,
+      //
+      // 1. Group boundary sits before RegionBeg.
+      //
+      //                (BatchGroupBeg)
+      // batchGroupBase  RegionBeg       BatchGroupEnd
+      //        |            |                |
+      //        v            v                v
+      //        +------------+----------------+
+      //         \                           /
+      //          ------   GroupSize   ------
+      //
+      // 2. Group boundary sits after RegionBeg.
+      //
+      //               (BatchGroupBeg)
+      //    RegionBeg  batchGroupBase               BatchGroupEnd
+      //        |           |                             |
+      //        v           v                             v
+      //        +-----------+-----------------------------+
+      //                     \                           /
+      //                      ------   GroupSize   ------
+      //
+      // Note that in the first case, the group range before RegionBeg is never
+      // used. Therefore, while calculating the used group size, we should
+      // exclude that part to get the correct size.
+      const uptr BatchGroupBeg =
+          Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
+      DCHECK_GE(AllocatedUserEnd, BatchGroupBeg);
       const uptr BatchGroupEnd =
-          batchGroupBase(BG.GroupId, CompactPtrBase) + GroupSize;
+          batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
       const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
-                                          ? GroupSize
-                                          : AllocatedUserEnd - BatchGroupEnd;
+                                          ? BatchGroupEnd - BatchGroupBeg
+                                          : AllocatedUserEnd - BatchGroupBeg;
       if (AllocatedGroupSize == 0)
         continue;
 
+      // TransferBatches are pushed in front of BG.Batches. The first one may
+      // not have all caches used.
       const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
-                             BG.Batches.back()->getCount();
+                             BG.Batches.front()->getCount();
       const uptr BytesInBG = NumBlocks * BlockSize;
       // Given the randomness property, we try to release the pages only if the
       // bytes used by free blocks exceed certain proportion of group size. Note


        


More information about the llvm-commits mailing list