[compiler-rt] r322784 - [Sanitizers] Make common allocator agnostic to failure handling modes.

Alex Shlyapnikov via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 17 15:20:36 PST 2018


Author: alekseyshl
Date: Wed Jan 17 15:20:36 2018
New Revision: 322784

URL: http://llvm.org/viewvc/llvm-project?rev=322784&view=rev
Log:
[Sanitizers] Make common allocator agnostic to failure handling modes.

Summary:
Make common allocator agnostic to failure handling modes and move the
decision up to the particular sanitizer's allocator, where the context
is available (call stack, parameters, return nullptr/crash mode etc.)

It simplifies the common allocator and allows the particular sanitizer's
allocator to generate more specific and detailed error reports (which
will be implemented later).

The behavior is largely the same, except one case, the violation of the
common allocator's check for "size + alignment" overflow is now reportied
as OOM instead of "bad request". It feels like a worthy tradeoff and
"size + alignment" is huge in this case anyway (thus, can be interpreted
as not enough memory to satisfy the request). There's also a Report()
statement added there.

Reviewers: eugenis

Subscribers: kubamracek, llvm-commits, #sanitizers

Differential Revision: https://reviews.llvm.org/D42198

Modified:
    compiler-rt/trunk/lib/asan/asan_allocator.cc
    compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc
    compiler-rt/trunk/lib/lsan/lsan_allocator.cc
    compiler-rt/trunk/lib/msan/msan_allocator.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc

Modified: compiler-rt/trunk/lib/asan/asan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator.cc Wed Jan 17 15:20:36 2018
@@ -398,7 +398,7 @@ struct Allocator {
     if (UNLIKELY(!asan_inited))
       AsanInitFromRtl();
     if (RssLimitExceeded())
-      return AsanAllocator::FailureHandler::OnOOM();
+      return ReturnNullOrDieOnFailure::OnOOM();
     Flags &fl = *flags();
     CHECK(stack);
     const uptr min_alignment = SHADOW_GRANULARITY;
@@ -433,7 +433,7 @@ struct Allocator {
     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
       Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
              (void*)size);
-      return AsanAllocator::FailureHandler::OnBadRequest();
+      return ReturnNullOrDieOnFailure::OnBadRequest();
     }
 
     AsanThread *t = GetCurrentThread();
@@ -446,8 +446,8 @@ struct Allocator {
       AllocatorCache *cache = &fallback_allocator_cache;
       allocated = allocator.Allocate(cache, needed_size, 8);
     }
-    if (!allocated)
-      return nullptr;
+    if (UNLIKELY(!allocated))
+      return ReturnNullOrDieOnFailure::OnOOM();
 
     if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
       // Heap poisoning is enabled, but the allocator provides an unpoisoned
@@ -660,8 +660,8 @@ struct Allocator {
   }
 
   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
-    if (CheckForCallocOverflow(size, nmemb))
-      return AsanAllocator::FailureHandler::OnBadRequest();
+    if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
+      return ReturnNullOrDieOnFailure::OnBadRequest();
     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
     // If the memory comes from the secondary allocator no need to clear it
     // as it comes directly from mmap.
@@ -883,7 +883,7 @@ void *asan_pvalloc(uptr size, BufferedSt
   uptr PageSize = GetPageSizeCached();
   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
     errno = errno_ENOMEM;
-    return AsanAllocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   // pvalloc(0) should allocate one page.
   size = size ? RoundUpTo(size, PageSize) : PageSize;
@@ -895,7 +895,7 @@ void *asan_memalign(uptr alignment, uptr
                     AllocType alloc_type) {
   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
     errno = errno_EINVAL;
-    return AsanAllocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   return SetErrnoOnNull(
       instance.Allocate(size, alignment, stack, alloc_type, true));
@@ -904,7 +904,7 @@ void *asan_memalign(uptr alignment, uptr
 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
                         BufferedStackTrace *stack) {
   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
-    AsanAllocator::FailureHandler::OnBadRequest();
+    ReturnNullOrDieOnFailure::OnBadRequest();
     return errno_EINVAL;
   }
   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);

Modified: compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc (original)
+++ compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc Wed Jan 17 15:20:36 2018
@@ -128,7 +128,7 @@ static void *HwasanAllocate(StackTrace *
   if (size > kMaxAllowedMallocSize) {
     Report("WARNING: HWAddressSanitizer failed to allocate %p bytes\n",
            (void *)size);
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   HwasanThread *t = GetCurrentThread();
   void *allocated;
@@ -140,6 +140,8 @@ static void *HwasanAllocate(StackTrace *
     AllocatorCache *cache = &fallback_allocator_cache;
     allocated = allocator.Allocate(cache, size, alignment);
   }
+  if (UNLIKELY(!allocated))
+    return ReturnNullOrDieOnFailure::OnOOM();
   Metadata *meta =
       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
   meta->state = CHUNK_ALLOCATED;

Modified: compiler-rt/trunk/lib/lsan/lsan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_allocator.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_allocator.cc (original)
+++ compiler-rt/trunk/lib/lsan/lsan_allocator.cc Wed Jan 17 15:20:36 2018
@@ -76,9 +76,11 @@ void *Allocate(const StackTrace &stack,
     size = 1;
   if (size > kMaxAllowedMallocSize) {
     Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+  if (UNLIKELY(!p))
+    return ReturnNullOrDieOnFailure::OnOOM();
   // Do not rely on the allocator to clear the memory (it's slow).
   if (cleared && allocator.FromPrimary(p))
     memset(p, 0, size);
@@ -90,7 +92,7 @@ void *Allocate(const StackTrace &stack,
 
 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
   if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   size *= nmemb;
   return Allocate(stack, size, 1, true);
 }
@@ -108,7 +110,7 @@ void *Reallocate(const StackTrace &stack
   if (new_size > kMaxAllowedMallocSize) {
     Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
     allocator.Deallocate(GetAllocatorCache(), p);
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
   RegisterAllocation(stack, p, new_size);
@@ -129,7 +131,7 @@ uptr GetMallocUsableSize(const void *p)
 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
     errno = errno_EINVAL;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
 }

Modified: compiler-rt/trunk/lib/msan/msan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_allocator.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_allocator.cc (original)
+++ compiler-rt/trunk/lib/msan/msan_allocator.cc Wed Jan 17 15:20:36 2018
@@ -141,7 +141,7 @@ static void *MsanAllocate(StackTrace *st
   if (size > kMaxAllowedMallocSize) {
     Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
            (void *)size);
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   MsanThread *t = GetCurrentThread();
   void *allocated;
@@ -153,6 +153,8 @@ static void *MsanAllocate(StackTrace *st
     AllocatorCache *cache = &fallback_allocator_cache;
     allocated = allocator.Allocate(cache, size, alignment);
   }
+  if (UNLIKELY(!allocated))
+    return ReturnNullOrDieOnFailure::OnOOM();
   Metadata *meta =
       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
   meta->requested_size = size;
@@ -236,7 +238,7 @@ void *msan_malloc(uptr size, StackTrace
 
 void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
   if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
-    return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+    return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest());
   return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true));
 }
 
@@ -258,7 +260,7 @@ void *msan_pvalloc(uptr size, StackTrace
   uptr PageSize = GetPageSizeCached();
   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
     errno = errno_ENOMEM;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   // pvalloc(0) should allocate one page.
   size = size ? RoundUpTo(size, PageSize) : PageSize;
@@ -268,7 +270,7 @@ void *msan_pvalloc(uptr size, StackTrace
 void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
     errno = errno_EINVAL;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
 }
@@ -276,7 +278,7 @@ void *msan_aligned_alloc(uptr alignment,
 void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
     errno = errno_EINVAL;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
 }
@@ -284,7 +286,7 @@ void *msan_memalign(uptr alignment, uptr
 int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
                         StackTrace *stack) {
   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
-    Allocator::FailureHandler::OnBadRequest();
+    ReturnNullOrDieOnFailure::OnBadRequest();
     return errno_EINVAL;
   }
   void *ptr = MsanAllocate(stack, size, alignment, false);

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc Wed Jan 17 15:20:36 2018
@@ -140,8 +140,8 @@ void *InternalAlloc(uptr size, InternalA
   if (size + sizeof(u64) < size)
     return nullptr;
   void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
-  if (!p)
-    return nullptr;
+  if (UNLIKELY(!p))
+    return DieOnFailure::OnOOM();
   ((u64*)p)[0] = kBlockMagic;
   return (char*)p + sizeof(u64);
 }
@@ -155,16 +155,17 @@ void *InternalRealloc(void *addr, uptr s
   size = size + sizeof(u64);
   CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
   void *p = RawInternalRealloc(addr, size, cache);
-  if (!p)
-    return nullptr;
+  if (UNLIKELY(!p))
+    return DieOnFailure::OnOOM();
   return (char*)p + sizeof(u64);
 }
 
 void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
   if (UNLIKELY(CheckForCallocOverflow(count, size)))
-    return InternalAllocator::FailureHandler::OnBadRequest();
+    return DieOnFailure::OnBadRequest();
   void *p = InternalAlloc(count * size, cache);
-  if (p) internal_memset(p, 0, count * size);
+  if (LIKELY(p))
+    internal_memset(p, 0, count * size);
   return p;
 }
 

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h Wed Jan 17 15:20:36 2018
@@ -24,8 +24,6 @@ template <class PrimaryAllocator, class
           class SecondaryAllocator>  // NOLINT
 class CombinedAllocator {
  public:
-  typedef typename SecondaryAllocator::FailureHandler FailureHandler;
-
   void InitLinkerInitialized(s32 release_to_os_interval_ms) {
     primary_.Init(release_to_os_interval_ms);
     secondary_.InitLinkerInitialized();
@@ -42,8 +40,12 @@ class CombinedAllocator {
     // Returning 0 on malloc(0) may break a lot of code.
     if (size == 0)
       size = 1;
-    if (size + alignment < size)
-      return FailureHandler::OnBadRequest();
+    if (size + alignment < size) {
+      Report("WARNING: %s: CombinedAllocator allocation overflow: "
+             "0x%zx bytes with 0x%zx alignment requested\n",
+             SanitizerToolName, size, alignment);
+      return nullptr;
+    }
     uptr original_size = size;
     // If alignment requirements are to be fulfilled by the frontend allocator
     // rather than by the primary or secondary, passing an alignment lower than
@@ -62,8 +64,6 @@ class CombinedAllocator {
       res = cache->Allocate(&primary_, primary_.ClassID(size));
     else
       res = secondary_.Allocate(&stats_, original_size, alignment);
-    if (!res)
-      return FailureHandler::OnOOM();
     if (alignment > 8)
       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
     return res;

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h Wed Jan 17 15:20:36 2018
@@ -47,7 +47,7 @@ typedef SizeClassAllocatorLocalCache<Pri
     InternalAllocatorCache;
 
 typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
-                          LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
+                          LargeMmapAllocator<NoOpMapUnmapCallback>
                          > InternalAllocator;
 
 void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h Wed Jan 17 15:20:36 2018
@@ -17,12 +17,9 @@
 // This class can (de)allocate only large chunks of memory using mmap/unmap.
 // The main purpose of this allocator is to cover large and rare allocation
 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback,
-          class FailureHandlerT = ReturnNullOrDieOnFailure>
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
 class LargeMmapAllocator {
  public:
-  typedef FailureHandlerT FailureHandler;
-
   void InitLinkerInitialized() {
     page_size_ = GetPageSizeCached();
   }
@@ -38,12 +35,16 @@ class LargeMmapAllocator {
     if (alignment > page_size_)
       map_size += alignment;
     // Overflow.
-    if (map_size < size)
-      return FailureHandler::OnBadRequest();
+    if (map_size < size) {
+      Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
+             "0x%zx bytes with 0x%zx alignment requested\n",
+             SanitizerToolName, map_size, alignment);
+      return nullptr;
+    }
     uptr map_beg = reinterpret_cast<uptr>(
         MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
     if (!map_beg)
-      return FailureHandler::OnOOM();
+      return nullptr;
     CHECK(IsAligned(map_beg, page_size_));
     MapUnmapCallback().OnMap(map_beg, map_size);
     uptr map_end = map_beg + map_size;

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Wed Jan 17 15:20:36 2018
@@ -444,7 +444,7 @@ TEST(SanitizerCommon, SizeClassAllocator
 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
   TestMapUnmapCallback::map_count = 0;
   TestMapUnmapCallback::unmap_count = 0;
-  LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a;
+  LargeMmapAllocator<TestMapUnmapCallback> a;
   a.Init();
   AllocatorStats stats;
   stats.Init();
@@ -482,7 +482,7 @@ TEST(SanitizerCommon, SizeClassAllocator
 #endif
 
 TEST(SanitizerCommon, LargeMmapAllocator) {
-  LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+  LargeMmapAllocator<NoOpMapUnmapCallback> a;
   a.Init();
   AllocatorStats stats;
   stats.Init();
@@ -565,7 +565,6 @@ void TestCombinedAllocator() {
   typedef
       CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
       Allocator;
-  SetAllocatorMayReturnNull(true);
   Allocator *a = new Allocator;
   a->Init(kReleaseToOSIntervalNever);
   std::mt19937 r;
@@ -579,11 +578,7 @@ void TestCombinedAllocator() {
   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
-
-  // Set to false
-  SetAllocatorMayReturnNull(false);
-  EXPECT_DEATH(a->Allocate(&cache, -1, 1),
-               "allocator is terminating the process");
+  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
 
   const uptr kNumAllocs = 100000;
   const uptr kNumIter = 10;
@@ -893,7 +888,7 @@ TEST(SanitizerCommon, SizeClassAllocator
 }
 
 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
-  LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+  LargeMmapAllocator<NoOpMapUnmapCallback> a;
   a.Init();
   AllocatorStats stats;
   stats.Init();
@@ -920,7 +915,7 @@ TEST(SanitizerCommon, LargeMmapAllocator
 }
 
 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
-  LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+  LargeMmapAllocator<NoOpMapUnmapCallback> a;
   a.Init();
   AllocatorStats stats;
   stats.Init();

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc?rev=322784&r1=322783&r2=322784&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc Wed Jan 17 15:20:36 2018
@@ -153,10 +153,10 @@ static void SignalUnsafeCall(ThreadState
 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
                           bool signal) {
   if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
   if (UNLIKELY(p == 0))
-    return 0;
+    return ReturnNullOrDieOnFailure::OnOOM();
   if (ctx && ctx->initialized)
     OnUserAlloc(thr, pc, (uptr)p, sz, true);
   if (signal)
@@ -179,7 +179,7 @@ void *user_alloc(ThreadState *thr, uptr
 
 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
   if (UNLIKELY(CheckForCallocOverflow(size, n)))
-    return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+    return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest());
   void *p = user_alloc_internal(thr, pc, n * size);
   if (p)
     internal_memset(p, 0, n * size);
@@ -224,7 +224,7 @@ void *user_realloc(ThreadState *thr, upt
 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
   if (UNLIKELY(!IsPowerOfTwo(align))) {
     errno = errno_EINVAL;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
 }
@@ -232,7 +232,7 @@ void *user_memalign(ThreadState *thr, up
 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
                         uptr sz) {
   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
-    Allocator::FailureHandler::OnBadRequest();
+    ReturnNullOrDieOnFailure::OnBadRequest();
     return errno_EINVAL;
   }
   void *ptr = user_alloc_internal(thr, pc, sz, align);
@@ -246,7 +246,7 @@ int user_posix_memalign(ThreadState *thr
 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
     errno = errno_EINVAL;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
 }
@@ -259,7 +259,7 @@ void *user_pvalloc(ThreadState *thr, upt
   uptr PageSize = GetPageSizeCached();
   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
     errno = errno_ENOMEM;
-    return Allocator::FailureHandler::OnBadRequest();
+    return ReturnNullOrDieOnFailure::OnBadRequest();
   }
   // pvalloc(0) should allocate one page.
   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;




More information about the llvm-commits mailing list