[compiler-rt] r305972 - [Sanitizers] 32 bit allocator respects allocator_may_return_null flag

Alex Shlyapnikov via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 21 17:02:37 PDT 2017


Author: alekseyshl
Date: Wed Jun 21 19:02:37 2017
New Revision: 305972

URL: http://llvm.org/viewvc/llvm-project?rev=305972&view=rev
Log:
[Sanitizers] 32 bit allocator respects allocator_may_return_null flag

Summary:
Make SizeClassAllocator32 return nullptr when it encounters OOM, which
allows the entire sanitizer's allocator to follow allocator_may_return_null=1
policy, even for small allocations (LargeMmapAllocator is already fixed
by D34243).

Will add a test for OOM in primary allocator later, when
SizeClassAllocator64 can gracefully handle OOM too.

Reviewers: eugenis

Subscribers: kubamracek, llvm-commits

Differential Revision: https://reviews.llvm.org/D34433

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h?rev=305972&r1=305971&r2=305972&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h Wed Jun 21 19:02:37 2017
@@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache {
     CHECK_NE(class_id, 0UL);
     CHECK_LT(class_id, kNumClasses);
     PerClass *c = &per_class_[class_id];
-    if (UNLIKELY(c->count == 0))
-      Refill(allocator, class_id);
+    if (UNLIKELY(c->count == 0)) {
+      if (UNLIKELY(!Refill(allocator, class_id)))
+        return nullptr;
+    }
     stats_.Add(AllocatorStatAllocated, c->class_size);
     void *res = c->batch[--c->count];
     PREFETCH(c->batch[c->count - 1]);
@@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache {
       Deallocate(allocator, batch_class_id, b);
   }
 
-  NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+  NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
     InitCache();
     PerClass *c = &per_class_[class_id];
     TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+    if (UNLIKELY(!b))
+      return false;
     CHECK_GT(b->Count(), 0);
     b->CopyToArray(c->batch);
     c->count = b->Count();
     DestroyBatch(class_id, allocator, b);
+    return true;
   }
 
   NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
@@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache {
     uptr first_idx_to_drain = c->count - cnt;
     TransferBatch *b = CreateBatch(
         class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+    // Failure to allocate a batch while releasing memory is non recoverable.
+    // TODO(alekseys): Figure out how to do it without allocating a new batch.
+    if (UNLIKELY(!b))
+      DieOnFailure::OnOOM();
     b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
                     &c->batch[first_idx_to_drain], cnt);
     c->count -= cnt;

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h?rev=305972&r1=305971&r2=305972&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h Wed Jun 21 19:02:37 2017
@@ -24,7 +24,8 @@ template<class SizeClassAllocator> struc
 // be returned by MmapOrDie().
 //
 // Region:
-//   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+//   a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
+//                                                             kRegionSize).
 // Since the regions are aligned by kRegionSize, there are exactly
 // kNumPossibleRegions possible regions in the address space and so we keep
 // a ByteMap possible_regions to store the size classes of each Region.
@@ -149,8 +150,9 @@ class SizeClassAllocator32 {
     CHECK_LT(class_id, kNumClasses);
     SizeClassInfo *sci = GetSizeClassInfo(class_id);
     SpinMutexLock l(&sci->mutex);
-    if (sci->free_list.empty())
-      PopulateFreeList(stat, c, sci, class_id);
+    if (sci->free_list.empty() &&
+        UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
+      return nullptr;
     CHECK(!sci->free_list.empty());
     TransferBatch *b = sci->free_list.front();
     sci->free_list.pop_front();
@@ -277,8 +279,10 @@ class SizeClassAllocator32 {
 
   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
     CHECK_LT(class_id, kNumClasses);
-    uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
-                                      "SizeClassAllocator32"));
+    uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
+        kRegionSize, kRegionSize, "SizeClassAllocator32"));
+    if (UNLIKELY(!res))
+      return 0;
     MapUnmapCallback().OnMap(res, kRegionSize);
     stat->Add(AllocatorStatMapped, kRegionSize);
     CHECK_EQ(0U, (res & (kRegionSize - 1)));
@@ -291,16 +295,20 @@ class SizeClassAllocator32 {
     return &size_class_info_array[class_id];
   }
 
-  void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+  bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
                         SizeClassInfo *sci, uptr class_id) {
     uptr size = ClassIdToSize(class_id);
     uptr reg = AllocateRegion(stat, class_id);
+    if (UNLIKELY(!reg))
+      return false;
     uptr n_chunks = kRegionSize / (size + kMetadataSize);
     uptr max_count = TransferBatch::MaxCached(class_id);
     TransferBatch *b = nullptr;
     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
       if (!b) {
         b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+        if (!b)
+          return false;
         b->Clear();
       }
       b->Add((void*)i);
@@ -314,6 +322,7 @@ class SizeClassAllocator32 {
       CHECK_GT(b->Count(), 0);
       sci->free_list.push_back(b);
     }
+    return true;
   }
 
   ByteMap possible_regions;

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h?rev=305972&r1=305971&r2=305972&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h Wed Jun 21 19:02:37 2017
@@ -95,7 +95,9 @@ void *MmapFixedOrDie(uptr fixed_addr, up
 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
 void *MmapNoAccess(uptr size);
 // Map aligned chunk of address space; size and alignment are powers of two.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
+// Dies on all but out of memory errors, in the latter case returns nullptr.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+                                   const char *mem_type);
 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
 // unaccessible memory.
 bool MprotectNoAccess(uptr addr, uptr size);

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc?rev=305972&r1=305971&r2=305972&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc Wed Jun 21 19:02:37 2017
@@ -164,11 +164,14 @@ void *MmapOrDieOnFatalError(uptr size, c
 // We want to map a chunk of address space aligned to 'alignment'.
 // We do it by maping a bit more and then unmaping redundant pieces.
 // We probably can do it with fewer syscalls in some OS-dependent way.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+                                   const char *mem_type) {
   CHECK(IsPowerOfTwo(size));
   CHECK(IsPowerOfTwo(alignment));
   uptr map_size = size + alignment;
-  uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+  uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
+  if (!map_res)
+    return nullptr;
   uptr map_end = map_res + map_size;
   uptr res = map_res;
   if (res & (alignment - 1))  // Not aligned.

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc?rev=305972&r1=305971&r2=305972&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc Wed Jun 21 19:02:37 2017
@@ -131,18 +131,24 @@ void UnmapOrDie(void *addr, uptr size) {
   }
 }
 
+static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
+                                     const char *mmap_type) {
+  error_t last_error = GetLastError();
+  if (last_error == ERROR_NOT_ENOUGH_MEMORY)
+    return nullptr;
+  ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
+}
+
 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
   void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
-  if (rv == 0) {
-    error_t last_error = GetLastError();
-    if (last_error != ERROR_NOT_ENOUGH_MEMORY)
-      ReportMmapFailureAndDie(size, mem_type, "allocate", last_error);
-  }
+  if (rv == 0)
+    return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
   return rv;
 }
 
 // We want to map a chunk of address space aligned to 'alignment'.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+                                   const char *mem_type) {
   CHECK(IsPowerOfTwo(size));
   CHECK(IsPowerOfTwo(alignment));
 
@@ -152,7 +158,7 @@ void *MmapAlignedOrDie(uptr size, uptr a
   uptr mapped_addr =
       (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
   if (!mapped_addr)
-    ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+    return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
 
   // If we got it right on the first try, return. Otherwise, unmap it and go to
   // the slow path.
@@ -172,8 +178,7 @@ void *MmapAlignedOrDie(uptr size, uptr a
     mapped_addr =
         (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
     if (!mapped_addr)
-      ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
-                              GetLastError());
+      return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
 
     // Find the aligned address.
     uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
@@ -191,7 +196,7 @@ void *MmapAlignedOrDie(uptr size, uptr a
 
   // Fail if we can't make this work quickly.
   if (retries == kMaxRetries && mapped_addr == 0)
-    ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+    return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
 
   return (void *)mapped_addr;
 }

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc?rev=305972&r1=305971&r2=305972&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc Wed Jun 21 19:02:37 2017
@@ -72,12 +72,12 @@ TEST(SanitizerCommon, SortTest) {
   EXPECT_TRUE(IsSorted(array, 2));
 }
 
-TEST(SanitizerCommon, MmapAlignedOrDie) {
+TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {
   uptr PageSize = GetPageSizeCached();
   for (uptr size = 1; size <= 32; size *= 2) {
     for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
       for (int iter = 0; iter < 100; iter++) {
-        uptr res = (uptr)MmapAlignedOrDie(
+        uptr res = (uptr)MmapAlignedOrDieOnFatalError(
             size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
         EXPECT_EQ(0U, res % (alignment * PageSize));
         internal_memset((void*)res, 1, size * PageSize);




More information about the llvm-commits mailing list