[compiler-rt] 832ba20 - sanitizer_common: optimize memory drain

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 13 16:28:59 PDT 2021


Author: Dmitry Vyukov
Date: 2021-07-13T16:28:48-07:00
New Revision: 832ba20710ee09b00161ea72cf80c9af800fda63

URL: https://github.com/llvm/llvm-project/commit/832ba20710ee09b00161ea72cf80c9af800fda63
DIFF: https://github.com/llvm/llvm-project/commit/832ba20710ee09b00161ea72cf80c9af800fda63.diff

LOG: sanitizer_common: optimize memory drain

Currently we allocate MemoryMapper per size class.
MemoryMapper mmap's and munmap's internal buffer.
This results in 50 mmap/munmap calls under the global
allocator mutex. Reuse MemoryMapper and the buffer
for all size classes. This radically reduces number of
mmap/munmap calls. Smaller size classes tend to have
more objects allocated, so it's highly likely that
the buffer allocated for the first size class will
be enough for all subsequent size classes.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D105778

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index 5f436e753818..e495c56f0377 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -63,9 +63,10 @@ struct SizeClassAllocator64LocalCache {
   }
 
   void Drain(SizeClassAllocator *allocator) {
+    MemoryMapperT memory_mapper(*allocator);
     for (uptr i = 1; i < kNumClasses; i++) {
       PerClass *c = &per_class_[i];
-      while (c->count > 0) Drain(c, allocator, i, c->count);
+      while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
     }
   }
 
@@ -105,17 +106,19 @@ struct SizeClassAllocator64LocalCache {
     c->count = num_requested_chunks;
     return true;
   }
+
   NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
                              uptr class_id) {
-    Drain(c, allocator, class_id, c->max_count / 2);
+    MemoryMapperT memory_mapper(*allocator);
+    Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
   }
 
-  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
-                      uptr count) {
+  void Drain(MemoryMapperT *memory_mapper, PerClass *c,
+             SizeClassAllocator *allocator, uptr class_id, uptr count) {
     CHECK_GE(c->count, count);
     const uptr first_idx_to_drain = c->count - count;
     c->count -= count;
-    allocator->ReturnToAllocator(&stats_, class_id,
+    allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
                                  &c->chunks[first_idx_to_drain], count);
   }
 };

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 1eedf32e7366..1e540a180d4e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -49,20 +49,33 @@ class MemoryMapper {
 
   explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
 
-  uptr GetReleasedRangesCount() const { return released_ranges_count_; }
+  ~MemoryMapper() {
+    if (buffer_)
+      UnmapOrDie(buffer_, buffer_size_);
+  }
 
-  uptr GetReleasedBytes() const { return released_bytes_; }
+  bool GetAndResetStats(uptr &ranges, uptr &bytes) {
+    ranges = released_ranges_count_;
+    released_ranges_count_ = 0;
+    bytes = released_bytes_;
+    released_bytes_ = 0;
+    return ranges != 0;
+  }
 
   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
     // TODO(alekseyshl): The idea to explore is to check if we have enough
     // space between num_freed_chunks*sizeof(CompactPtrT) and
     // mapped_free_array to fit buffer_size bytes and use that space instead
     // of mapping a temporary one.
-    return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
-  }
-
-  void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
-    UnmapOrDie(buffer, buffer_size);
+    if (buffer_size_ < buffer_size) {
+      if (buffer_)
+        UnmapOrDie(buffer_, buffer_size_);
+      buffer_ = MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
+      buffer_size_ = buffer_size;
+    } else {
+      internal_memset(buffer_, 0, buffer_size);
+    }
+    return buffer_;
   }
 
   // Releases [from, to) range of pages back to OS.
@@ -79,6 +92,8 @@ class MemoryMapper {
   const Allocator &allocator_;
   uptr released_ranges_count_ = 0;
   uptr released_bytes_ = 0;
+  void *buffer_ = nullptr;
+  uptr buffer_size_ = 0;
 };
 
 template <class Params>
@@ -160,9 +175,10 @@ class SizeClassAllocator64 {
   }
 
   void ForceReleaseToOS() {
+    MemoryMapperT memory_mapper(*this);
     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
       BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
-      MaybeReleaseToOS(class_id, true /*force*/);
+      MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
     }
   }
 
@@ -171,7 +187,8 @@ class SizeClassAllocator64 {
       alignment <= SizeClassMap::kMaxSize;
   }
 
-  NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+  NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
+                                  AllocatorStats *stat, uptr class_id,
                                   const CompactPtrT *chunks, uptr n_chunks) {
     RegionInfo *region = GetRegionInfo(class_id);
     uptr region_beg = GetRegionBeginBySizeClass(class_id);
@@ -194,7 +211,7 @@ class SizeClassAllocator64 {
     region->num_freed_chunks = new_num_freed_chunks;
     region->stats.n_freed += n_chunks;
 
-    MaybeReleaseToOS(class_id, false /*force*/);
+    MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
   }
 
   NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
@@ -429,11 +446,6 @@ class SizeClassAllocator64 {
       buffer = reinterpret_cast<u64*>(
           memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
     }
-    ~PackedCounterArray() {
-      if (buffer) {
-        memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
-      }
-    }
 
     bool IsAllocated() const {
       return !!buffer;
@@ -866,7 +878,8 @@ class SizeClassAllocator64 {
   //
   // TODO(morehouse): Support a callback on memory release so HWASan can release
   // aliases as well.
-  void MaybeReleaseToOS(uptr class_id, bool force) {
+  void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
+                        bool force) {
     RegionInfo *region = GetRegionInfo(class_id);
     const uptr chunk_size = ClassIdToSize(class_id);
     const uptr page_size = GetPageSizeCached();
@@ -890,17 +903,16 @@ class SizeClassAllocator64 {
       }
     }
 
-    MemoryMapper<ThisT> memory_mapper(*this);
-
     ReleaseFreeMemoryToOS(
         GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
-        RoundUpTo(region->allocated_user, page_size) / page_size,
-        &memory_mapper, class_id);
+        RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
+        class_id);
 
-    if (memory_mapper.GetReleasedRangesCount() > 0) {
+    uptr ranges, bytes;
+    if (memory_mapper->GetAndResetStats(ranges, bytes)) {
       region->rtoi.n_freed_at_last_release = region->stats.n_freed;
-      region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
-      region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+      region->rtoi.num_releases += ranges;
+      region->rtoi.last_released_bytes = bytes;
     }
     region->rtoi.last_release_at_ns = MonotonicNanoTime();
   }


        


More information about the llvm-commits mailing list