[compiler-rt] bca0cf7 - [sanitizer] Support dynamic premapped R/W range in primary allocator.

Matt Morehouse via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 22 14:45:46 PDT 2021


Author: Matt Morehouse
Date: 2021-03-22T14:44:52-07:00
New Revision: bca0cf768b6021124f5e5315be333c2f45f14fca

URL: https://github.com/llvm/llvm-project/commit/bca0cf768b6021124f5e5315be333c2f45f14fca
DIFF: https://github.com/llvm/llvm-project/commit/bca0cf768b6021124f5e5315be333c2f45f14fca.diff

LOG: [sanitizer] Support dynamic premapped R/W range in primary allocator.

The main use case for this change is HWASan aliasing mode, which premaps
the alias space adjacent to the dynamic shadow.  With this change, the
primary allocator can allocate from the alias space instead of a
separate region.

Reviewed By: vitalybuka, eugenis

Differential Revision: https://reviews.llvm.org/D98293

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
    compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index 33f89d6d4992..eb836bc47876 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -35,9 +35,9 @@ class CombinedAllocator {
     secondary_.InitLinkerInitialized();
   }
 
-  void Init(s32 release_to_os_interval_ms) {
+  void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
     stats_.Init();
-    primary_.Init(release_to_os_interval_ms);
+    primary_.Init(release_to_os_interval_ms, heap_start);
     secondary_.Init();
   }
 

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index b90dabbf7769..fb5394cd39c4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -119,7 +119,8 @@ class SizeClassAllocator32 {
   typedef SizeClassAllocator32<Params> ThisT;
   typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
 
-  void Init(s32 release_to_os_interval_ms) {
+  void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
+    CHECK(!heap_start);
     possible_regions.Init();
     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
   }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 26753b6c8aeb..db30e138154a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -69,25 +69,45 @@ class SizeClassAllocator64 {
     return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
   }
 
-  void Init(s32 release_to_os_interval_ms) {
+  // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
+  // at heap_start and places the heap there.  This mode requires kSpaceBeg ==
+  // ~(uptr)0.
+  void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
     uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
-    if (kUsingConstantSpaceBeg) {
-      CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
-      CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
-                                             PrimaryAllocatorName, kSpaceBeg));
+    PremappedHeap = heap_start != 0;
+    if (PremappedHeap) {
+      CHECK(!kUsingConstantSpaceBeg);
+      NonConstSpaceBeg = heap_start;
+      uptr RegionInfoSize = AdditionalSize();
+      RegionInfoSpace =
+          address_range.Init(RegionInfoSize, PrimaryAllocatorName);
+      CHECK_NE(RegionInfoSpace, ~(uptr)0);
+      CHECK_EQ(RegionInfoSpace,
+               address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
+                                      "SizeClassAllocator: region info"));
+      MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
     } else {
-      // Combined allocator expects that an 2^N allocation is always aligned to
-      // 2^N. For this to work, the start of the space needs to be aligned as
-      // high as the largest size class (which also needs to be a power of 2).
-      NonConstSpaceBeg = address_range.InitAligned(
-          TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
-      CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+      if (kUsingConstantSpaceBeg) {
+        CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
+        CHECK_EQ(kSpaceBeg,
+                 address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
+                                    kSpaceBeg));
+      } else {
+        // Combined allocator expects that an 2^N allocation is always aligned
+        // to 2^N. For this to work, the start of the space needs to be aligned
+        // as high as the largest size class (which also needs to be a power of
+        // 2).
+        NonConstSpaceBeg = address_range.InitAligned(
+            TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
+        CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+      }
+      RegionInfoSpace = SpaceEnd();
+      MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
+                           "SizeClassAllocator: region info");
     }
     SetReleaseToOSIntervalMs(release_to_os_interval_ms);
-    MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
-                         "SizeClassAllocator: region info");
     // Check that the RegionInfo array is aligned on the CacheLine size.
-    DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
+    DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
   }
 
   s32 ReleaseToOSIntervalMs() const {
@@ -596,6 +616,11 @@ class SizeClassAllocator64 {
 
   atomic_sint32_t release_to_os_interval_ms_;
 
+  uptr RegionInfoSpace;
+
+  // True if the user has already mapped the entire heap R/W.
+  bool PremappedHeap;
+
   struct Stats {
     uptr n_allocated;
     uptr n_freed;
@@ -625,7 +650,7 @@ class SizeClassAllocator64 {
 
   RegionInfo *GetRegionInfo(uptr class_id) const {
     DCHECK_LT(class_id, kNumClasses);
-    RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
+    RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
     return &regions[class_id];
   }
 
@@ -650,6 +675,9 @@ class SizeClassAllocator64 {
   }
 
   bool MapWithCallback(uptr beg, uptr size, const char *name) {
+    if (PremappedHeap)
+      return beg >= NonConstSpaceBeg &&
+             beg + size <= NonConstSpaceBeg + kSpaceSize;
     uptr mapped = address_range.Map(beg, size, name);
     if (UNLIKELY(!mapped))
       return false;
@@ -659,11 +687,18 @@ class SizeClassAllocator64 {
   }
 
   void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
+    if (PremappedHeap) {
+      CHECK_GE(beg, NonConstSpaceBeg);
+      CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
+      return;
+    }
     CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
     MapUnmapCallback().OnMap(beg, size);
   }
 
   void UnmapWithCallbackOrDie(uptr beg, uptr size) {
+    if (PremappedHeap)
+      return;
     MapUnmapCallback().OnUnmap(beg, size);
     address_range.Unmap(beg, size);
   }
@@ -832,6 +867,9 @@ class SizeClassAllocator64 {
 
   // Attempts to release RAM occupied by freed chunks back to OS. The region is
   // expected to be locked.
+  //
+  // TODO(morehouse): Support a callback on memory release so HWASan can release
+  // aliases as well.
   void MaybeReleaseToOS(uptr class_id, bool force) {
     RegionInfo *region = GetRegionInfo(class_id);
     const uptr chunk_size = ClassIdToSize(class_id);

diff  --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
index 590e477678ea..7c95b785987e 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
@@ -196,9 +196,9 @@ TEST(SanitizerCommon, DenseSizeClassMap) {
 }
 
 template <class Allocator>
-void TestSizeClassAllocator() {
+void TestSizeClassAllocator(uptr premapped_heap = 0) {
   Allocator *a = new Allocator;
-  a->Init(kReleaseToOSIntervalNever);
+  a->Init(kReleaseToOSIntervalNever, premapped_heap);
   typename Allocator::AllocatorCache cache;
   memset(&cache, 0, sizeof(cache));
   cache.Init(0);
@@ -265,6 +265,25 @@ void TestSizeClassAllocator() {
 }
 
 #if SANITIZER_CAN_USE_ALLOCATOR64
+
+// Allocates kAllocatorSize aligned bytes on construction and frees it on
+// destruction.
+class ScopedPremappedHeap {
+ public:
+  ScopedPremappedHeap() {
+    BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
+    AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
+  }
+
+  ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
+
+  uptr Addr() { return AlignedAddr; }
+
+ private:
+  void *BasePtr;
+  uptr AlignedAddr;
+};
+
 // These tests can fail on Windows if memory is somewhat full and lit happens
 // to run them all at the same time. FIXME: Make them not flaky and reenable.
 #if !SANITIZER_WINDOWS
@@ -276,6 +295,11 @@ TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
   TestSizeClassAllocator<Allocator64Dynamic>();
 }
 
+TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
+  ScopedPremappedHeap h;
+  TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
+}
+
 #if !SANITIZER_ANDROID
 //FIXME(kostyak): find values so that those work on Android as well.
 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
@@ -320,9 +344,9 @@ TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
 }
 
 template <class Allocator>
-void SizeClassAllocatorMetadataStress() {
+void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
   Allocator *a = new Allocator;
-  a->Init(kReleaseToOSIntervalNever);
+  a->Init(kReleaseToOSIntervalNever, premapped_heap);
   typename Allocator::AllocatorCache cache;
   memset(&cache, 0, sizeof(cache));
   cache.Init(0);
@@ -361,6 +385,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
 }
 
+TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
+  ScopedPremappedHeap h;
+  SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
+}
+
 #if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
   SizeClassAllocatorMetadataStress<Allocator64Compact>();
@@ -374,9 +403,10 @@ TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
 }
 
 template <class Allocator>
-void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
+void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
+                                           uptr premapped_heap = 0) {
   Allocator *a = new Allocator;
-  a->Init(kReleaseToOSIntervalNever);
+  a->Init(kReleaseToOSIntervalNever, premapped_heap);
   typename Allocator::AllocatorCache cache;
   memset(&cache, 0, sizeof(cache));
   cache.Init(0);
@@ -408,6 +438,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
       1ULL << (SANITIZER_ANDROID ? 31 : 33));
 }
+TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
+  ScopedPremappedHeap h;
+  SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
+      1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
+}
 #if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
@@ -624,10 +659,10 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
 }
 
 template <class PrimaryAllocator>
-void TestCombinedAllocator() {
+void TestCombinedAllocator(uptr premapped_heap = 0) {
   typedef CombinedAllocator<PrimaryAllocator> Allocator;
   Allocator *a = new Allocator;
-  a->Init(kReleaseToOSIntervalNever);
+  a->Init(kReleaseToOSIntervalNever, premapped_heap);
   std::mt19937 r;
 
   typename Allocator::AllocatorCache cache;
@@ -698,6 +733,11 @@ TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
   TestCombinedAllocator<Allocator64Dynamic>();
 }
 
+TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
+  ScopedPremappedHeap h;
+  TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
+}
+
 #if !SANITIZER_ANDROID
 TEST(SanitizerCommon, CombinedAllocator64Compact) {
   TestCombinedAllocator<Allocator64Compact>();
@@ -714,12 +754,12 @@ TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
 }
 
 template <class Allocator>
-void TestSizeClassAllocatorLocalCache() {
+void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
   using AllocatorCache = typename Allocator::AllocatorCache;
   AllocatorCache cache;
   Allocator *a = new Allocator();
 
-  a->Init(kReleaseToOSIntervalNever);
+  a->Init(kReleaseToOSIntervalNever, premapped_heap);
   memset(&cache, 0, sizeof(cache));
   cache.Init(0);
 
@@ -759,6 +799,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
 }
 
+TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
+  ScopedPremappedHeap h;
+  TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
+}
+
 #if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
   TestSizeClassAllocatorLocalCache<Allocator64Compact>();
@@ -891,9 +936,9 @@ void IterationTestCallback(uptr chunk, void *arg) {
 }
 
 template <class Allocator>
-void TestSizeClassAllocatorIteration() {
+void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
   Allocator *a = new Allocator;
-  a->Init(kReleaseToOSIntervalNever);
+  a->Init(kReleaseToOSIntervalNever, premapped_heap);
   typename Allocator::AllocatorCache cache;
   memset(&cache, 0, sizeof(cache));
   cache.Init(0);
@@ -942,6 +987,10 @@ TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
   TestSizeClassAllocatorIteration<Allocator64Dynamic>();
 }
+TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
+  ScopedPremappedHeap h;
+  TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
+}
 #endif
 #endif
 


        


More information about the llvm-commits mailing list