[compiler-rt] 99aebb6 - [NFC][sanitizer] Don't store region_base_ in MemoryMapper

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 13 15:59:46 PDT 2021


Author: Vitaly Buka
Date: 2021-07-13T15:59:36-07:00
New Revision: 99aebb62fb4f2a39c7f03579facf3a1e176b245d

URL: https://github.com/llvm/llvm-project/commit/99aebb62fb4f2a39c7f03579facf3a1e176b245d
DIFF: https://github.com/llvm/llvm-project/commit/99aebb62fb4f2a39c7f03579facf3a1e176b245d.diff

LOG: [NFC][sanitizer] Don't store region_base_ in MemoryMapper

Part of D105778

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
    compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 03f26f0ba837..1eedf32e7366 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -47,9 +47,7 @@ class MemoryMapper {
  public:
   typedef typename Allocator::CompactPtrT CompactPtrT;
 
-  MemoryMapper(const Allocator &allocator, uptr class_id)
-      : allocator_(allocator),
-        region_base_(allocator.GetRegionBeginBySizeClass(class_id)) {}
+  explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
 
   uptr GetReleasedRangesCount() const { return released_ranges_count_; }
 
@@ -68,9 +66,10 @@ class MemoryMapper {
   }
 
   // Releases [from, to) range of pages back to OS.
-  void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
-    const uptr from_page = allocator_.CompactPtrToPointer(region_base_, from);
-    const uptr to_page = allocator_.CompactPtrToPointer(region_base_, to);
+  void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
+    const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
+    const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
+    const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
     ReleaseMemoryPagesToOS(from_page, to_page);
     released_ranges_count_++;
     released_bytes_ += to_page - from_page;
@@ -78,7 +77,6 @@ class MemoryMapper {
 
  private:
   const Allocator &allocator_;
-  const uptr region_base_ = 0;
   uptr released_ranges_count_ = 0;
   uptr released_bytes_ = 0;
 };
@@ -480,12 +478,10 @@ class SizeClassAllocator64 {
   template <class MemoryMapperT>
   class FreePagesRangeTracker {
    public:
-    explicit FreePagesRangeTracker(MemoryMapperT *mapper)
+    FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
         : memory_mapper(mapper),
-          page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
-          in_the_range(false),
-          current_page(0),
-          current_range_start_page(0) {}
+          class_id(class_id),
+          page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
 
     void NextPage(bool freed) {
       if (freed) {
@@ -507,17 +503,18 @@ class SizeClassAllocator64 {
     void CloseOpenedRange() {
       if (in_the_range) {
         memory_mapper->ReleasePageRangeToOS(
-            current_range_start_page << page_size_scaled_log,
+            class_id, current_range_start_page << page_size_scaled_log,
             current_page << page_size_scaled_log);
         in_the_range = false;
       }
     }
 
-    MemoryMapperT *const memory_mapper;
-    const uptr page_size_scaled_log;
-    bool in_the_range;
-    uptr current_page;
-    uptr current_range_start_page;
+    MemoryMapperT *const memory_mapper = nullptr;
+    const uptr class_id = 0;
+    const uptr page_size_scaled_log = 0;
+    bool in_the_range = false;
+    uptr current_page = 0;
+    uptr current_range_start_page = 0;
   };
 
   // Iterates over the free_array to identify memory pages containing freed
@@ -528,7 +525,8 @@ class SizeClassAllocator64 {
   static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
                                     uptr free_array_count, uptr chunk_size,
                                     uptr allocated_pages_count,
-                                    MemoryMapper *memory_mapper) {
+                                    MemoryMapper *memory_mapper,
+                                    uptr class_id) {
     const uptr page_size = GetPageSizeCached();
 
     // Figure out the number of chunks per page and whether we can take a fast
@@ -590,7 +588,7 @@ class SizeClassAllocator64 {
 
     // Iterate over pages detecting ranges of pages with chunk counters equal
     // to the expected number of chunks for the particular page.
-    FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper);
+    FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
     if (same_chunk_count_per_page) {
       // Fast path, every page has the same number of chunks affecting it.
       for (uptr i = 0; i < counters.GetCount(); i++)
@@ -892,12 +890,12 @@ class SizeClassAllocator64 {
       }
     }
 
-    MemoryMapper<ThisT> memory_mapper(*this, class_id);
+    MemoryMapper<ThisT> memory_mapper(*this);
 
     ReleaseFreeMemoryToOS(
         GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
         RoundUpTo(region->allocated_user, page_size) / page_size,
-        &memory_mapper);
+        &memory_mapper, class_id);
 
     if (memory_mapper.GetReleasedRangesCount() > 0) {
       region->rtoi.n_freed_at_last_release = region->stats.n_freed;

diff  --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
index c980c9dd9a41..a5076da5aa18 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
@@ -1243,7 +1243,7 @@ class RangeRecorder {
             Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
         last_page_reported(0) {}
 
-  void ReleasePageRangeToOS(u32 from, u32 to) {
+  void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
     from >>= page_size_scaled_log;
     to >>= page_size_scaled_log;
     ASSERT_LT(from, to);
@@ -1283,7 +1283,7 @@ TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
 
   for (auto test_case : test_cases) {
     RangeRecorder range_recorder;
-    RangeTracker tracker(&range_recorder);
+    RangeTracker tracker(&range_recorder, 1);
     for (int i = 0; test_case[i] != 0; i++)
       tracker.NextPage(test_case[i] == 'x');
     tracker.Done();
@@ -1309,7 +1309,7 @@ class ReleasedPagesTrackingMemoryMapper {
     free(buffer);
   }
 
-  void ReleasePageRangeToOS(u32 from, u32 to) {
+  void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
     uptr page_size_scaled =
         GetPageSizeCached() >> Allocator64::kCompactPtrScale;
     for (u32 i = from; i < to; i += page_size_scaled)
@@ -1353,7 +1353,7 @@ void TestReleaseFreeMemoryToOS() {
 
     Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
                                      chunk_size, kAllocatedPagesCount,
-                                     &memory_mapper);
+                                     &memory_mapper, class_id);
 
     // Verify that there are no released pages touched by used chunks and all
     // ranges of free chunks big enough to contain the entire memory pages had


        


More information about the llvm-commits mailing list