[compiler-rt] 5beaa73 - [scudo] Group poppedBlocks/pushedBlocks into BlocksInfo (NFC)

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 15 15:39:49 PDT 2023


Author: Chia-hung Duan
Date: 2023-06-15T22:38:48Z
New Revision: 5beaa7339fc5441d33bfb505703e56bbda3801e8

URL: https://github.com/llvm/llvm-project/commit/5beaa7339fc5441d33bfb505703e56bbda3801e8
DIFF: https://github.com/llvm/llvm-project/commit/5beaa7339fc5441d33bfb505703e56bbda3801e8.diff

LOG: [scudo] Group poppedBlocks/pushedBlocks into BlocksInfo (NFC)

Create a new BlocksInfo to contain a list of blocks, poppedBlocks and
pushedBlocks. This is the preparation of adding new lock for operations
on freelist.

Differential Revision: https://reviews.llvm.org/D149143

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index f67a0498ada87..2ef26a55bf7c6 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -154,7 +154,7 @@ template <typename Config> class SizeClassAllocator32 {
       // if `populateFreeList` succeeded, we are supposed to get free blocks.
       DCHECK_NE(B, nullptr);
     }
-    Sci->Stats.PoppedBlocks += B->getCount();
+    Sci->FreeListInfo.PoppedBlocks += B->getCount();
     return B;
   }
 
@@ -175,7 +175,7 @@ template <typename Config> class SizeClassAllocator32 {
       if (Size == 1 && !populateFreeList(C, ClassId, Sci))
         return;
       pushBlocksImpl(C, ClassId, Sci, Array, Size);
-      Sci->Stats.PushedBlocks += Size;
+      Sci->FreeListInfo.PushedBlocks += Size;
       return;
     }
 
@@ -201,7 +201,7 @@ template <typename Config> class SizeClassAllocator32 {
     ScopedLock L(Sci->Mutex);
     pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
 
-    Sci->Stats.PushedBlocks += Size;
+    Sci->FreeListInfo.PushedBlocks += Size;
     if (ClassId != SizeClassMap::BatchClassId)
       releaseToOSMaybe(Sci, ClassId);
   }
@@ -267,8 +267,8 @@ template <typename Config> class SizeClassAllocator32 {
       SizeClassInfo *Sci = getSizeClassInfo(I);
       ScopedLock L(Sci->Mutex);
       TotalMapped += Sci->AllocatedUser;
-      PoppedBlocks += Sci->Stats.PoppedBlocks;
-      PushedBlocks += Sci->Stats.PushedBlocks;
+      PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
+      PushedBlocks += Sci->FreeListInfo.PushedBlocks;
     }
     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
                 "remains %zu\n",
@@ -322,11 +322,6 @@ template <typename Config> class SizeClassAllocator32 {
   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
   typedef FlatByteMap<NumRegions> ByteMap;
 
-  struct SizeClassStats {
-    uptr PoppedBlocks;
-    uptr PushedBlocks;
-  };
-
   struct ReleaseToOsInfo {
     uptr BytesInFreeListAtLastCheckpoint;
     uptr RangesReleased;
@@ -334,12 +329,17 @@ template <typename Config> class SizeClassAllocator32 {
     u64 LastReleaseAtNs;
   };
 
+  struct BlocksInfo {
+    SinglyLinkedList<BatchGroup> BlockList = {};
+    uptr PoppedBlocks = 0;
+    uptr PushedBlocks = 0;
+  };
+
   struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
     HybridMutex Mutex;
-    SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
+    BlocksInfo FreeListInfo GUARDED_BY(Mutex);
     uptr CurrentRegion GUARDED_BY(Mutex);
     uptr CurrentRegionAllocated GUARDED_BY(Mutex);
-    SizeClassStats Stats GUARDED_BY(Mutex);
     u32 RandState;
     uptr AllocatedUser GUARDED_BY(Mutex);
     // Lowest & highest region index allocated for this size class, to avoid
@@ -416,13 +416,13 @@ template <typename Config> class SizeClassAllocator32 {
 
   // Push the blocks to their batch group. The layout will be like,
   //
-  // FreeList - > BG -> BG -> BG
-  //              |     |     |
-  //              v     v     v
-  //              TB    TB    TB
-  //              |
-  //              v
-  //              TB
+  // FreeListInfo.BlockList - > BG -> BG -> BG
+  //                            |     |     |
+  //                            v     v     v
+  //                            TB    TB    TB
+  //                            |
+  //                            v
+  //                            TB
   //
   // Each BlockGroup(BG) will associate with unique group id and the free blocks
   // are managed by a list of TransferBatch(TB). To reduce the time of inserting
@@ -533,13 +533,13 @@ template <typename Config> class SizeClassAllocator32 {
       BG->PushedBlocks += Size;
     };
 
-    BatchGroup *Cur = Sci->FreeList.front();
+    BatchGroup *Cur = Sci->FreeListInfo.BlockList.front();
 
     if (ClassId == SizeClassMap::BatchClassId) {
       if (Cur == nullptr) {
         // Don't need to classify BatchClassId.
         Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
-        Sci->FreeList.push_front(Cur);
+        Sci->FreeListInfo.BlockList.push_front(Cur);
       }
       InsertBlocks(Cur, Array, Size);
       return;
@@ -559,9 +559,9 @@ template <typename Config> class SizeClassAllocator32 {
         compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
       Cur = CreateGroup(compactPtrGroupBase(Array[0]));
       if (Prev == nullptr)
-        Sci->FreeList.push_front(Cur);
+        Sci->FreeListInfo.BlockList.push_front(Cur);
       else
-        Sci->FreeList.insert(Prev, Cur);
+        Sci->FreeListInfo.BlockList.insert(Prev, Cur);
     }
 
     // All the blocks are from the same group, just push without checking group
@@ -592,7 +592,7 @@ template <typename Config> class SizeClassAllocator32 {
             compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
           Cur = CreateGroup(compactPtrGroupBase(Array[I]));
           DCHECK_NE(Prev, nullptr);
-          Sci->FreeList.insert(Prev, Cur);
+          Sci->FreeListInfo.BlockList.insert(Prev, Cur);
         }
 
         Count = 1;
@@ -610,10 +610,11 @@ template <typename Config> class SizeClassAllocator32 {
   // The region mutex needs to be held while calling this method.
   TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
       REQUIRES(Sci->Mutex) {
-    if (Sci->FreeList.empty())
+    if (Sci->FreeListInfo.BlockList.empty())
       return nullptr;
 
-    SinglyLinkedList<TransferBatch> &Batches = Sci->FreeList.front()->Batches;
+    SinglyLinkedList<TransferBatch> &Batches =
+        Sci->FreeListInfo.BlockList.front()->Batches;
     DCHECK(!Batches.empty());
 
     TransferBatch *B = Batches.front();
@@ -622,8 +623,8 @@ template <typename Config> class SizeClassAllocator32 {
     DCHECK_GT(B->getCount(), 0U);
 
     if (Batches.empty()) {
-      BatchGroup *BG = Sci->FreeList.front();
-      Sci->FreeList.pop_front();
+      BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+      Sci->FreeListInfo.BlockList.pop_front();
 
       // We don't keep BatchGroup with zero blocks to avoid empty-checking while
       // allocating. Note that block used by constructing BatchGroup is recorded
@@ -728,13 +729,15 @@ template <typename Config> class SizeClassAllocator32 {
       REQUIRES(Sci->Mutex) {
     if (Sci->AllocatedUser == 0)
       return;
-    const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+    const uptr InUse =
+        Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
     const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
                 "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
-                Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
-                AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
+                Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
+                InUse, AvailableChunks, Rss >> 10,
+                Sci->ReleaseInfo.RangesReleased);
   }
 
   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
@@ -743,10 +746,11 @@ template <typename Config> class SizeClassAllocator32 {
     const uptr BlockSize = getSizeByClassId(ClassId);
     const uptr PageSize = getPageSizeCached();
 
-    DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+    DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
     const uptr BytesInFreeList =
         Sci->AllocatedUser -
-        (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
+        (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
+            BlockSize;
 
     if (UNLIKELY(BytesInFreeList == 0))
       return 0;
@@ -823,7 +827,7 @@ template <typename Config> class SizeClassAllocator32 {
     auto DecompactPtr = [](CompactPtrT CompactPtr) {
       return reinterpret_cast<uptr>(CompactPtr);
     };
-    for (BatchGroup &BG : Sci->FreeList) {
+    for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
       const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
       // The `GroupSize` may not be divided by `BlockSize`, which means there is
       // an unused space at the end of Region. Exclude that space to avoid

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 7eaaae37c61c2..9dc98d9afd8c1 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -156,7 +156,7 @@ template <typename Config> class SizeClassAllocator64 {
       ScopedLock L(Region->Mutex);
       TransferBatch *B = popBatchImpl(C, ClassId, Region);
       if (LIKELY(B)) {
-        Region->Stats.PoppedBlocks += B->getCount();
+        Region->FreeListInfo.PoppedBlocks += B->getCount();
         return B;
       }
 
@@ -168,7 +168,7 @@ template <typename Config> class SizeClassAllocator64 {
         B = popBatchImpl(C, ClassId, Region);
         // if `populateFreeList` succeeded, we are supposed to get free blocks.
         DCHECK_NE(B, nullptr);
-        Region->Stats.PoppedBlocks += B->getCount();
+        Region->FreeListInfo.PoppedBlocks += B->getCount();
         return B;
       }
     }
@@ -202,7 +202,8 @@ template <typename Config> class SizeClassAllocator64 {
         // cause a recursive allocation). However, The number of free blocks may
         // be less than two. Therefore, populate the free list before inserting
         // the blocks.
-        const bool NeedToRefill = Size == 1U && Region->FreeList.empty();
+        const bool NeedToRefill =
+            Size == 1U && Region->FreeListInfo.BlockList.empty();
         // If BatchClass has been exhausted, the program should have been
         // aborted.
         DCHECK(!Region->Exhausted);
@@ -213,7 +214,7 @@ template <typename Config> class SizeClassAllocator64 {
           PrintStats = true;
         } else {
           pushBlocksImpl(C, SizeClassMap::BatchClassId, Region, Array, Size);
-          Region->Stats.PushedBlocks += Size;
+          Region->FreeListInfo.PushedBlocks += Size;
         }
       }
 
@@ -255,7 +256,7 @@ template <typename Config> class SizeClassAllocator64 {
     ScopedLock L(Region->Mutex);
     pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
 
-    Region->Stats.PushedBlocks += Size;
+    Region->FreeListInfo.PushedBlocks += Size;
     if (ClassId != SizeClassMap::BatchClassId)
       releaseToOSMaybe(Region, ClassId);
   }
@@ -306,8 +307,8 @@ template <typename Config> class SizeClassAllocator64 {
       ScopedLock L(Region->Mutex);
       if (Region->MappedUser)
         TotalMapped += Region->MappedUser;
-      PoppedBlocks += Region->Stats.PoppedBlocks;
-      PushedBlocks += Region->Stats.PushedBlocks;
+      PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
+      PushedBlocks += Region->FreeListInfo.PushedBlocks;
     }
     Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
                 "allocations; remains %zu\n",
@@ -429,11 +430,6 @@ template <typename Config> class SizeClassAllocator64 {
   // Fill at most this number of batches from the newly map'd memory.
   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
 
-  struct RegionStats {
-    uptr PoppedBlocks;
-    uptr PushedBlocks;
-  };
-
   struct ReleaseToOsInfo {
     uptr BytesInFreeListAtLastCheckpoint;
     uptr RangesReleased;
@@ -441,12 +437,17 @@ template <typename Config> class SizeClassAllocator64 {
     u64 LastReleaseAtNs;
   };
 
+  struct BlocksInfo {
+    SinglyLinkedList<BatchGroup> BlockList = {};
+    uptr PoppedBlocks = 0;
+    uptr PushedBlocks = 0;
+  };
+
   struct UnpaddedRegionInfo {
     HybridMutex Mutex;
-    SinglyLinkedList<BatchGroup> FreeList GUARDED_BY(Mutex);
+    BlocksInfo FreeListInfo GUARDED_BY(Mutex);
     // This is initialized before thread creation.
     uptr RegionBeg = 0;
-    RegionStats Stats GUARDED_BY(Mutex) = {};
     u32 RandState GUARDED_BY(Mutex) = 0;
     // Bytes mapped for user memory.
     uptr MappedUser GUARDED_BY(Mutex) = 0;
@@ -514,13 +515,13 @@ template <typename Config> class SizeClassAllocator64 {
 
   // Push the blocks to their batch group. The layout will be like,
   //
-  // FreeList - > BG -> BG -> BG
-  //              |     |     |
-  //              v     v     v
-  //              TB    TB    TB
-  //              |
-  //              v
-  //              TB
+  // FreeListInfo.BlockList - > BG -> BG -> BG
+  //                            |     |     |
+  //                            v     v     v
+  //                            TB    TB    TB
+  //                            |
+  //                            v
+  //                            TB
   //
   // Each BlockGroup(BG) will associate with unique group id and the free blocks
   // are managed by a list of TransferBatch(TB). To reduce the time of inserting
@@ -631,13 +632,13 @@ template <typename Config> class SizeClassAllocator64 {
       BG->PushedBlocks += Size;
     };
 
-    BatchGroup *Cur = Region->FreeList.front();
+    BatchGroup *Cur = Region->FreeListInfo.BlockList.front();
 
     if (ClassId == SizeClassMap::BatchClassId) {
       if (Cur == nullptr) {
         // Don't need to classify BatchClassId.
         Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
-        Region->FreeList.push_front(Cur);
+        Region->FreeListInfo.BlockList.push_front(Cur);
       }
       InsertBlocks(Cur, Array, Size);
       return;
@@ -657,9 +658,9 @@ template <typename Config> class SizeClassAllocator64 {
         compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
       Cur = CreateGroup(compactPtrGroup(Array[0]));
       if (Prev == nullptr)
-        Region->FreeList.push_front(Cur);
+        Region->FreeListInfo.BlockList.push_front(Cur);
       else
-        Region->FreeList.insert(Prev, Cur);
+        Region->FreeListInfo.BlockList.insert(Prev, Cur);
     }
 
     // All the blocks are from the same group, just push without checking group
@@ -690,7 +691,7 @@ template <typename Config> class SizeClassAllocator64 {
             compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
           Cur = CreateGroup(compactPtrGroup(Array[I]));
           DCHECK_NE(Prev, nullptr);
-          Region->FreeList.insert(Prev, Cur);
+          Region->FreeListInfo.BlockList.insert(Prev, Cur);
         }
 
         Count = 1;
@@ -708,11 +709,11 @@ template <typename Config> class SizeClassAllocator64 {
   // The region mutex needs to be held while calling this method.
   TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
       REQUIRES(Region->Mutex) {
-    if (Region->FreeList.empty())
+    if (Region->FreeListInfo.BlockList.empty())
       return nullptr;
 
     SinglyLinkedList<TransferBatch> &Batches =
-        Region->FreeList.front()->Batches;
+        Region->FreeListInfo.BlockList.front()->Batches;
     DCHECK(!Batches.empty());
 
     TransferBatch *B = Batches.front();
@@ -721,8 +722,8 @@ template <typename Config> class SizeClassAllocator64 {
     DCHECK_GT(B->getCount(), 0U);
 
     if (Batches.empty()) {
-      BatchGroup *BG = Region->FreeList.front();
-      Region->FreeList.pop_front();
+      BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+      Region->FreeListInfo.BlockList.pop_front();
 
       // We don't keep BatchGroup with zero blocks to avoid empty-checking while
       // allocating. Note that block used by constructing BatchGroup is recorded
@@ -821,15 +822,17 @@ template <typename Config> class SizeClassAllocator64 {
       REQUIRES(Region->Mutex) {
     if (Region->MappedUser == 0)
       return;
-    const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+    const uptr InUse =
+        Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
     const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
     Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
                 "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
                 "released: %6zuK region: 0x%zx (0x%zx)\n",
                 Region->Exhausted ? "F" : " ", ClassId,
                 getSizeByClassId(ClassId), Region->MappedUser >> 10,
-                Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
-                TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
+                Region->FreeListInfo.PoppedBlocks,
+                Region->FreeListInfo.PushedBlocks, InUse, TotalChunks,
+                Rss >> 10, Region->ReleaseInfo.RangesReleased,
                 Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
                 getRegionBaseByClassId(ClassId));
   }
@@ -840,10 +843,12 @@ template <typename Config> class SizeClassAllocator64 {
     const uptr BlockSize = getSizeByClassId(ClassId);
     const uptr PageSize = getPageSizeCached();
 
-    DCHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
+    DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+              Region->FreeListInfo.PushedBlocks);
     const uptr BytesInFreeList =
-        Region->AllocatedUser -
-        (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
+        Region->AllocatedUser - (Region->FreeListInfo.PoppedBlocks -
+                                 Region->FreeListInfo.PushedBlocks) *
+                                    BlockSize;
 
     if (UNLIKELY(BytesInFreeList == 0))
       return 0;
@@ -924,7 +929,7 @@ template <typename Config> class SizeClassAllocator64 {
 
     // This is only used for debugging to ensure the consistency of the number
     // of groups.
-    uptr NumberOfBatchGroups = Region->FreeList.size();
+    uptr NumberOfBatchGroups = Region->FreeListInfo.BlockList.size();
 
     // We are examining each group and will take the minimum distance to the
     // release threshold as the next Region::TryReleaseThreshold(). Note that if
@@ -933,7 +938,8 @@ template <typename Config> class SizeClassAllocator64 {
     // the comment on `SmallerBlockReleasePageDelta` for more details.
     uptr MinDistToThreshold = GroupSize;
 
-    for (BatchGroup *BG = Region->FreeList.front(), *Prev = nullptr;
+    for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
+                    *Prev = nullptr;
          BG != nullptr;) {
       // Group boundary is always GroupSize-aligned from CompactPtr base. The
       // layout of memory groups is like,
@@ -1025,7 +1031,8 @@ template <typename Config> class SizeClassAllocator64 {
       }
 
       // If `BG` is the first BatchGroup in the list, we only need to advance
-      // `BG` and call FreeList::pop_front(). No update is needed for `Prev`.
+      // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
+      // for `Prev`.
       //
       //         (BG)   (BG->Next)
       // Prev     Cur      BG
@@ -1036,7 +1043,7 @@ template <typename Config> class SizeClassAllocator64 {
       //          +--+    +--+
       //
       // Otherwise, `Prev` will be used to extract the `Cur` from the
-      // `FreeList`.
+      // `FreeListInfo.BlockList`.
       //
       //         (BG)   (BG->Next)
       // Prev     Cur      BG
@@ -1046,7 +1053,7 @@ template <typename Config> class SizeClassAllocator64 {
       //  |  | -> |X | -> |  | -> ...
       //  +--+    +--+    +--+
       //
-      // After FreeList::extract(),
+      // After FreeListInfo.BlockList::extract(),
       //
       // Prev     Cur       BG
       //   |       |        |
@@ -1070,9 +1077,9 @@ template <typename Config> class SizeClassAllocator64 {
       Cur->BytesInBGAtLastCheckpoint = BytesInBG;
 
       if (Prev != nullptr)
-        Region->FreeList.extract(Prev, Cur);
+        Region->FreeListInfo.BlockList.extract(Prev, Cur);
       else
-        Region->FreeList.pop_front();
+        Region->FreeListInfo.BlockList.pop_front();
       GroupToRelease.push_back(Cur);
     }
 
@@ -1164,12 +1171,15 @@ template <typename Config> class SizeClassAllocator64 {
     }
     Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
 
-    // Merge GroupToRelease back to the Region::FreeList. Note that both
-    // `Region->FreeList` and `GroupToRelease` are sorted.
-    for (BatchGroup *BG = Region->FreeList.front(), *Prev = nullptr;;) {
+    // Merge GroupToRelease back to the Region::FreeListInfo.BlockList. Note
+    // that both `Region->FreeListInfo.BlockList` and `GroupToRelease` are
+    // sorted.
+    for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
+                    *Prev = nullptr;
+         ;) {
       if (BG == nullptr || GroupToRelease.empty()) {
         if (!GroupToRelease.empty())
-          Region->FreeList.append_back(&GroupToRelease);
+          Region->FreeListInfo.BlockList.append_back(&GroupToRelease);
         break;
       }
 
@@ -1188,7 +1198,7 @@ template <typename Config> class SizeClassAllocator64 {
       // `GroupToRelease::front()` (which is `Cur` below)  before `BG`.
       //
       //   1. If `Prev` is nullptr, we simply push `Cur` to the front of
-      //      FreeList.
+      //      FreeListInfo.BlockList.
       //   2. Otherwise, use `insert()` which inserts an element next to `Prev`.
       //
       // Afterwards, we don't need to advance `BG` because the order between
@@ -1196,18 +1206,18 @@ template <typename Config> class SizeClassAllocator64 {
       BatchGroup *Cur = GroupToRelease.front();
       GroupToRelease.pop_front();
       if (Prev == nullptr)
-        Region->FreeList.push_front(Cur);
+        Region->FreeListInfo.BlockList.push_front(Cur);
       else
-        Region->FreeList.insert(Prev, Cur);
+        Region->FreeListInfo.BlockList.insert(Prev, Cur);
       DCHECK_EQ(Cur->Next, BG);
       Prev = Cur;
     }
 
-    DCHECK_EQ(Region->FreeList.size(), NumberOfBatchGroups);
+    DCHECK_EQ(Region->FreeListInfo.BlockList.size(), NumberOfBatchGroups);
     (void)NumberOfBatchGroups;
 
     if (SCUDO_DEBUG) {
-      BatchGroup *Prev = Region->FreeList.front();
+      BatchGroup *Prev = Region->FreeListInfo.BlockList.front();
       for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
            Prev = Cur, Cur = Cur->Next) {
         CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);


        


More information about the llvm-commits mailing list