[compiler-rt] 652707a - [nsan] Use sanitizer allocator

via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 12 13:56:43 PDT 2024


Author: Fangrui Song
Date: 2024-08-12T13:56:40-07:00
New Revision: 652707a6457eeb3927a1fe82e6b2cbc2a1fa22f5

URL: https://github.com/llvm/llvm-project/commit/652707a6457eeb3927a1fe82e6b2cbc2a1fa22f5
DIFF: https://github.com/llvm/llvm-project/commit/652707a6457eeb3927a1fe82e6b2cbc2a1fa22f5.diff

LOG: [nsan] Use sanitizer allocator

* The performance is better than the glibc allocator.
* Allocator interface functions, sanitizer allocator options, and
  MallocHooks/FreeHooks are supported.
* Shadow memory has specific memory layout requirement. Using libc
  allocator could lead to conflicts.
* When we add a mmap interceptor for reliability (the VMA could reuse a
  previously released VMA that is poisoned): glibc may invoke an
  internal system call to call unmmap, which cannot be intercepted. We
  will not be able to return the shadow memory to the OS.

Similar to dfsan https://reviews.llvm.org/D101204 . Also intercept
operator new/delete to be similar to other sanitizers using the
sanitizer allocator. The align_val_t overload of operator new has
slightly less overhead.

Pull Request: https://github.com/llvm/llvm-project/pull/102764

Added: 
    compiler-rt/lib/nsan/nsan_allocator.cpp
    compiler-rt/lib/nsan/nsan_allocator.h
    compiler-rt/lib/nsan/nsan_new_delete.cpp
    compiler-rt/test/nsan/Posix/allocator_mapping.cpp
    compiler-rt/test/nsan/allocator_interface.cpp
    compiler-rt/test/nsan/malloc_hook.cpp
    compiler-rt/test/nsan/new_delete_test.cpp

Modified: 
    compiler-rt/lib/nsan/CMakeLists.txt
    compiler-rt/lib/nsan/nsan.cpp
    compiler-rt/lib/nsan/nsan.h
    compiler-rt/lib/nsan/nsan_flags.inc
    compiler-rt/lib/nsan/nsan_malloc_linux.cpp
    compiler-rt/lib/nsan/nsan_platform.h
    compiler-rt/lib/nsan/nsan_thread.cpp
    compiler-rt/lib/nsan/nsan_thread.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/nsan/CMakeLists.txt b/compiler-rt/lib/nsan/CMakeLists.txt
index fa9f02abdf0801..2846f0292307b3 100644
--- a/compiler-rt/lib/nsan/CMakeLists.txt
+++ b/compiler-rt/lib/nsan/CMakeLists.txt
@@ -4,9 +4,11 @@ include_directories(..)
 
 set(NSAN_SOURCES
   nsan.cpp
+  nsan_allocator.cpp
   nsan_flags.cpp
   nsan_interceptors.cpp
   nsan_malloc_linux.cpp
+  nsan_new_delete.cpp
   nsan_stats.cpp
   nsan_suppressions.cpp
   nsan_thread.cpp

diff  --git a/compiler-rt/lib/nsan/nsan.cpp b/compiler-rt/lib/nsan/nsan.cpp
index 7d10681a1bc917..bfa55c317cfe79 100644
--- a/compiler-rt/lib/nsan/nsan.cpp
+++ b/compiler-rt/lib/nsan/nsan.cpp
@@ -807,6 +807,7 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __nsan_init() {
   if (nsan_initialized)
     return;
   nsan_init_is_running = true;
+  SanitizerToolName = "NumericalStabilitySanitizer";
 
   InitializeFlags();
   InitializeSuppressions();
@@ -814,11 +815,12 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __nsan_init() {
 
   DisableCoreDumperIfNecessary();
 
-  if (!MmapFixedNoReserve(TypesAddr(), UnusedAddr() - TypesAddr()))
+  if (!MmapFixedNoReserve(TypesAddr(), AllocatorAddr() - TypesAddr()))
     Die();
 
   InitializeInterceptors();
   NsanTSDInit(NsanTSDDtor);
+  NsanAllocatorInit();
 
   NsanThread *main_thread = NsanThread::Create(nullptr, nullptr);
   SetCurrentThread(main_thread);

diff  --git a/compiler-rt/lib/nsan/nsan.h b/compiler-rt/lib/nsan/nsan.h
index 4e88ef4c00974d..08dd02746be65a 100644
--- a/compiler-rt/lib/nsan/nsan.h
+++ b/compiler-rt/lib/nsan/nsan.h
@@ -51,6 +51,14 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
 __nsan_default_options();
 }
 
+// Unwind the stack for fatal error, as the parameter `stack` is
+// empty without origins.
+#define GET_FATAL_STACK_TRACE_IF_EMPTY(STACK)                                  \
+  if (nsan_initialized && (STACK)->size == 0) {                                \
+    (STACK)->Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr,  \
+                    common_flags()->fast_unwind_on_fatal);                     \
+  }
+
 namespace __nsan {
 
 extern bool nsan_initialized;

diff  --git a/compiler-rt/lib/nsan/nsan_allocator.cpp b/compiler-rt/lib/nsan/nsan_allocator.cpp
new file mode 100644
index 00000000000000..19004ad7dc8dbe
--- /dev/null
+++ b/compiler-rt/lib/nsan/nsan_allocator.cpp
@@ -0,0 +1,340 @@
+//===- nsan_allocator.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// NumericalStabilitySanitizer allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "nsan_allocator.h"
+#include "interception/interception.h"
+#include "nsan.h"
+#include "nsan_flags.h"
+#include "nsan_platform.h"
+#include "nsan_thread.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+
+using namespace __nsan;
+
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
+
+namespace {
+struct Metadata {
+  uptr requested_size;
+};
+
+struct NsanMapUnmapCallback {
+  void OnMap(uptr p, uptr size) const {}
+  void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+                      uptr user_size) const {}
+  void OnUnmap(uptr p, uptr size) const {}
+};
+
+const uptr kMaxAllowedMallocSize = 1ULL << 40;
+
+// Allocator64 parameters. Deliberately using a short name.
+struct AP64 {
+  static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+  static const uptr kSpaceSize = 0x40000000000; // 4T.
+  static const uptr kMetadataSize = sizeof(Metadata);
+  using SizeClassMap = DefaultSizeClassMap;
+  using MapUnmapCallback = NsanMapUnmapCallback;
+  static const uptr kFlags = 0;
+  using AddressSpaceView = LocalAddressSpaceView;
+};
+} // namespace
+
+using PrimaryAllocator = SizeClassAllocator64<AP64>;
+using Allocator = CombinedAllocator<PrimaryAllocator>;
+using AllocatorCache = Allocator::AllocatorCache;
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static StaticSpinMutex fallback_mutex;
+
+static uptr max_malloc_size;
+
+void __nsan::NsanAllocatorInit() {
+  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+  allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+  if (common_flags()->max_allocation_size_mb)
+    max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+                          kMaxAllowedMallocSize);
+  else
+    max_malloc_size = kMaxAllowedMallocSize;
+}
+
+static AllocatorCache *GetAllocatorCache(NsanThreadLocalMallocStorage *ms) {
+  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
+  return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
+}
+
+void NsanThreadLocalMallocStorage::Init() {
+  allocator.InitCache(GetAllocatorCache(this));
+}
+
+void NsanThreadLocalMallocStorage::CommitBack() {
+  allocator.SwallowCache(GetAllocatorCache(this));
+  allocator.DestroyCache(GetAllocatorCache(this));
+}
+
+static void *NsanAllocate(uptr size, uptr alignment, bool zero) {
+  if (UNLIKELY(size > max_malloc_size)) {
+    if (AllocatorMayReturnNull()) {
+      Report("WARNING: NumericalStabilitySanitizer failed to allocate 0x%zx "
+             "bytes\n",
+             size);
+      return nullptr;
+    }
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
+  }
+  if (UNLIKELY(IsRssLimitExceeded())) {
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportRssLimitExceeded(&stack);
+  }
+
+  void *allocated;
+  if (NsanThread *t = GetCurrentThread()) {
+    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+    allocated = allocator.Allocate(cache, size, alignment);
+  } else {
+    SpinMutexLock l(&fallback_mutex);
+    AllocatorCache *cache = &fallback_allocator_cache;
+    allocated = allocator.Allocate(cache, size, alignment);
+  }
+  if (UNLIKELY(!allocated)) {
+    SetAllocatorOutOfMemory();
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportOutOfMemory(size, &stack);
+  }
+  auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+  meta->requested_size = size;
+  if (zero && allocator.FromPrimary(allocated))
+    REAL(memset)(allocated, 0, size);
+  __nsan_set_value_unknown(allocated, size);
+  RunMallocHooks(allocated, size);
+  return allocated;
+}
+
+void __nsan::NsanDeallocate(void *p) {
+  DCHECK(p);
+  RunFreeHooks(p);
+  auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
+  uptr size = meta->requested_size;
+  meta->requested_size = 0;
+  if (flags().poison_in_free)
+    __nsan_set_value_unknown(p, size);
+  if (NsanThread *t = GetCurrentThread()) {
+    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+    allocator.Deallocate(cache, p);
+  } else {
+    // In a just created thread, glibc's _dl_deallocate_tls might reach here
+    // before nsan_current_thread is set.
+    SpinMutexLock l(&fallback_mutex);
+    AllocatorCache *cache = &fallback_allocator_cache;
+    allocator.Deallocate(cache, p);
+  }
+}
+
+static void *NsanReallocate(void *ptr, uptr new_size, uptr alignment) {
+  Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(ptr));
+  uptr old_size = meta->requested_size;
+  uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(ptr);
+  if (new_size <= actually_allocated_size) {
+    // We are not reallocating here.
+    meta->requested_size = new_size;
+    if (new_size > old_size)
+      __nsan_set_value_unknown((u8 *)ptr + old_size, new_size - old_size);
+    return ptr;
+  }
+  void *new_p = NsanAllocate(new_size, alignment, false);
+  if (new_p) {
+    uptr memcpy_size = Min(new_size, old_size);
+    REAL(memcpy)(new_p, ptr, memcpy_size);
+    __nsan_copy_values(new_p, ptr, memcpy_size);
+    NsanDeallocate(ptr);
+  }
+  return new_p;
+}
+
+static void *NsanCalloc(uptr nmemb, uptr size) {
+  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportCallocOverflow(nmemb, size, &stack);
+  }
+  return NsanAllocate(nmemb * size, sizeof(u64), true);
+}
+
+static const void *AllocationBegin(const void *p) {
+  if (!p)
+    return nullptr;
+  void *beg = allocator.GetBlockBegin(p);
+  if (!beg)
+    return nullptr;
+  auto *b = reinterpret_cast<Metadata *>(allocator.GetMetaData(beg));
+  if (!b)
+    return nullptr;
+  if (b->requested_size == 0)
+    return nullptr;
+
+  return beg;
+}
+
+static uptr AllocationSizeFast(const void *p) {
+  return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
+}
+
+static uptr AllocationSize(const void *p) {
+  if (!p)
+    return 0;
+  if (allocator.GetBlockBegin(p) != p)
+    return 0;
+  return AllocationSizeFast(p);
+}
+
+void *__nsan::nsan_malloc(uptr size) {
+  return SetErrnoOnNull(NsanAllocate(size, sizeof(u64), false));
+}
+
+void *__nsan::nsan_calloc(uptr nmemb, uptr size) {
+  return SetErrnoOnNull(NsanCalloc(nmemb, size));
+}
+
+void *__nsan::nsan_realloc(void *ptr, uptr size) {
+  if (!ptr)
+    return SetErrnoOnNull(NsanAllocate(size, sizeof(u64), false));
+  if (size == 0) {
+    NsanDeallocate(ptr);
+    return nullptr;
+  }
+  return SetErrnoOnNull(NsanReallocate(ptr, size, sizeof(u64)));
+}
+
+void *__nsan::nsan_reallocarray(void *ptr, uptr nmemb, uptr size) {
+  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+    errno = errno_ENOMEM;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportReallocArrayOverflow(nmemb, size, &stack);
+  }
+  return nsan_realloc(ptr, nmemb * size);
+}
+
+void *__nsan::nsan_valloc(uptr size) {
+  return SetErrnoOnNull(NsanAllocate(size, GetPageSizeCached(), false));
+}
+
+void *__nsan::nsan_pvalloc(uptr size) {
+  uptr PageSize = GetPageSizeCached();
+  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+    errno = errno_ENOMEM;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportPvallocOverflow(size, &stack);
+  }
+  // pvalloc(0) should allocate one page.
+  size = size ? RoundUpTo(size, PageSize) : PageSize;
+  return SetErrnoOnNull(NsanAllocate(size, PageSize, false));
+}
+
+void *__nsan::nsan_aligned_alloc(uptr alignment, uptr size) {
+  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+    errno = errno_EINVAL;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+  }
+  return SetErrnoOnNull(NsanAllocate(size, alignment, false));
+}
+
+void *__nsan::nsan_memalign(uptr alignment, uptr size) {
+  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+    errno = errno_EINVAL;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportInvalidAllocationAlignment(alignment, &stack);
+  }
+  return SetErrnoOnNull(NsanAllocate(size, alignment, false));
+}
+
+int __nsan::nsan_posix_memalign(void **memptr, uptr alignment, uptr size) {
+  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+    if (AllocatorMayReturnNull())
+      return errno_EINVAL;
+    BufferedStackTrace stack;
+    ReportInvalidPosixMemalignAlignment(alignment, &stack);
+  }
+  void *ptr = NsanAllocate(size, alignment, false);
+  if (UNLIKELY(!ptr))
+    // OOM error is already taken care of by NsanAllocate.
+    return errno_ENOMEM;
+  DCHECK(IsAligned((uptr)ptr, alignment));
+  *memptr = ptr;
+  return 0;
+}
+
+extern "C" {
+uptr __sanitizer_get_current_allocated_bytes() {
+  uptr stats[AllocatorStatCount];
+  allocator.GetStats(stats);
+  return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+  uptr stats[AllocatorStatCount];
+  allocator.GetStats(stats);
+  return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+const void *__sanitizer_get_allocated_begin(const void *p) {
+  return AllocationBegin(p);
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+  DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+  uptr ret = AllocationSizeFast(p);
+  DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+  return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
+}

diff  --git a/compiler-rt/lib/nsan/nsan_allocator.h b/compiler-rt/lib/nsan/nsan_allocator.h
new file mode 100644
index 00000000000000..d41560493c1a74
--- /dev/null
+++ b/compiler-rt/lib/nsan/nsan_allocator.h
@@ -0,0 +1,41 @@
+//===-- nsan_allocator.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NSAN_ALLOCATOR_H
+#define NSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __nsan {
+
+struct NsanThreadLocalMallocStorage {
+  // Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
+  alignas(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+  void Init();
+  void CommitBack();
+
+private:
+  // These objects are allocated via mmap() and are zero-initialized.
+  NsanThreadLocalMallocStorage() {}
+};
+
+void NsanAllocatorInit();
+void NsanDeallocate(void *ptr);
+
+void *nsan_malloc(uptr size);
+void *nsan_calloc(uptr nmemb, uptr size);
+void *nsan_realloc(void *ptr, uptr size);
+void *nsan_reallocarray(void *ptr, uptr nmemb, uptr size);
+void *nsan_valloc(uptr size);
+void *nsan_pvalloc(uptr size);
+void *nsan_aligned_alloc(uptr alignment, uptr size);
+void *nsan_memalign(uptr alignment, uptr size);
+int nsan_posix_memalign(void **memptr, uptr alignment, uptr size);
+
+} // namespace __nsan
+#endif // NSAN_ALLOCATOR_H

diff  --git a/compiler-rt/lib/nsan/nsan_flags.inc b/compiler-rt/lib/nsan/nsan_flags.inc
index 63c15475f6754b..658cd5b3b01bf4 100644
--- a/compiler-rt/lib/nsan/nsan_flags.inc
+++ b/compiler-rt/lib/nsan/nsan_flags.inc
@@ -46,4 +46,5 @@ NSAN_FLAG(bool, enable_loadtracking_stats, false,
           "If true, compute load tracking stats, i.e. for each load from "
           "memory, the number of times nsan resumed from the original value "
           "due to invalid or unknown types.")
+NSAN_FLAG(bool, poison_in_free, true, "")
 NSAN_FLAG(bool, print_stats_on_exit, false, "If true, print stats on exit.")

diff  --git a/compiler-rt/lib/nsan/nsan_malloc_linux.cpp b/compiler-rt/lib/nsan/nsan_malloc_linux.cpp
index 02f52e7be07fac..c97591e4ac1593 100644
--- a/compiler-rt/lib/nsan/nsan_malloc_linux.cpp
+++ b/compiler-rt/lib/nsan/nsan_malloc_linux.cpp
@@ -12,14 +12,16 @@
 
 #include "interception/interception.h"
 #include "nsan.h"
+#include "nsan_allocator.h"
 #include "sanitizer_common/sanitizer_allocator_dlsym.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_platform.h"
 #include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
 
 #if !SANITIZER_APPLE && !SANITIZER_WINDOWS
 using namespace __sanitizer;
-using __nsan::nsan_initialized;
+using namespace __nsan;
 
 namespace {
 struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
@@ -28,78 +30,53 @@ struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
 } // namespace
 
 INTERCEPTOR(void *, aligned_alloc, uptr align, uptr size) {
-  void *res = REAL(aligned_alloc)(align, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_aligned_alloc(align, size);
 }
 
 INTERCEPTOR(void *, calloc, uptr nmemb, uptr size) {
   if (DlsymAlloc::Use())
     return DlsymAlloc::Callocate(nmemb, size);
-
-  void *res = REAL(calloc)(nmemb, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), nmemb * size);
-  return res;
+  return nsan_calloc(nmemb, size);
 }
 
 INTERCEPTOR(void, free, void *ptr) {
+  if (UNLIKELY(!ptr))
+    return;
   if (DlsymAlloc::PointerIsMine(ptr))
     return DlsymAlloc::Free(ptr);
-  REAL(free)(ptr);
+  NsanDeallocate(ptr);
 }
 
 INTERCEPTOR(void *, malloc, uptr size) {
   if (DlsymAlloc::Use())
     return DlsymAlloc::Allocate(size);
-  void *res = REAL(malloc)(size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_malloc(size);
 }
 
 INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
   if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
     return DlsymAlloc::Realloc(ptr, size);
-  void *res = REAL(realloc)(ptr, size);
-  // TODO: We might want to copy the types from the original allocation
-  // (although that would require that we know its size).
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_realloc(ptr, size);
 }
 
 #if SANITIZER_INTERCEPT_REALLOCARRAY
 INTERCEPTOR(void *, reallocarray, void *ptr, uptr nmemb, uptr size) {
-  void *res = REAL(reallocarray)(ptr, nmemb, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), nmemb * size);
-  return res;
+  return nsan_reallocarray(ptr, nmemb, size);
 }
 #endif // SANITIZER_INTERCEPT_REALLOCARRAY
 
 INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr size) {
-  int res = REAL(posix_memalign)(memptr, align, size);
-  if (res == 0 && *memptr)
-    __nsan_set_value_unknown(static_cast<u8 *>(*memptr), size);
-  return res;
+  return nsan_posix_memalign(memptr, align, size);
 }
 
 // Deprecated allocation functions (memalign, etc).
 #if SANITIZER_INTERCEPT_MEMALIGN
 INTERCEPTOR(void *, memalign, uptr align, uptr size) {
-  void *const res = REAL(memalign)(align, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_memalign(align, size);
 }
 
 INTERCEPTOR(void *, __libc_memalign, uptr align, uptr size) {
-  void *const res = REAL(__libc_memalign)(align, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_memalign(align, size);
 }
 #endif
 

diff  --git a/compiler-rt/lib/nsan/nsan_new_delete.cpp b/compiler-rt/lib/nsan/nsan_new_delete.cpp
new file mode 100644
index 00000000000000..f203a583f2c448
--- /dev/null
+++ b/compiler-rt/lib/nsan/nsan_new_delete.cpp
@@ -0,0 +1,126 @@
+//===-- nsan_new_delete.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "nsan.h"
+#include "nsan_allocator.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+
+#include <stddef.h>
+
+using namespace __nsan;
+
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+#define OPERATOR_NEW_BODY(nothrow)                                             \
+  void *res = nsan_malloc(size);                                               \
+  if (!nothrow && UNLIKELY(!res)) {                                            \
+    BufferedStackTrace stack;                                                  \
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);                                    \
+    ReportOutOfMemory(size, &stack);                                           \
+  }                                                                            \
+  return res
+#define OPERATOR_NEW_BODY_ALIGN(nothrow)                                       \
+  void *res = nsan_memalign((uptr)align, size);                                \
+  if (!nothrow && UNLIKELY(!res)) {                                            \
+    BufferedStackTrace stack;                                                  \
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);                                    \
+    ReportOutOfMemory(size, &stack);                                           \
+  }                                                                            \
+  return res;
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(/*nothrow=*/false); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(/*nothrow=*/false); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const &) {
+  OPERATOR_NEW_BODY(/*nothrow=*/true);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const &) {
+  OPERATOR_NEW_BODY(/*nothrow=*/true);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align) {
+  OPERATOR_NEW_BODY_ALIGN(/*nothrow=*/false);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align) {
+  OPERATOR_NEW_BODY_ALIGN(/*nothrow=*/false);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align,
+                   std::nothrow_t const &) {
+  OPERATOR_NEW_BODY_ALIGN(/*nothrow=*/true);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align,
+                     std::nothrow_t const &) {
+  OPERATOR_NEW_BODY_ALIGN(/*nothrow=*/true);
+}
+
+#define OPERATOR_DELETE_BODY                                                   \
+  if (ptr)                                                                     \
+  NsanDeallocate(ptr)
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const &) {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &) {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align,
+                     std::nothrow_t const &) {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align,
+                       std::nothrow_t const &) {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT {
+  OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size,
+                       std::align_val_t align) NOEXCEPT {
+  OPERATOR_DELETE_BODY;
+}

diff  --git a/compiler-rt/lib/nsan/nsan_platform.h b/compiler-rt/lib/nsan/nsan_platform.h
index c9d4cacd8c8898..cc9d63ecb4a9ba 100644
--- a/compiler-rt/lib/nsan/nsan_platform.h
+++ b/compiler-rt/lib/nsan/nsan_platform.h
@@ -40,7 +40,9 @@ namespace __nsan {
 // |                    |
 // |       unused       |
 // |                    |
-// +--------------------+ 0x400000000000 (kUnusedAddr)
+// +--------------------+ 0x440000008000
+// |     allocator      |
+// +--------------------+ 0x400000000000 (kHeapMemBeg)
 // |   shadow memory    |
 // +--------------------+ 0x200000000000 (kShadowAddr)
 // |   shadow types     |
@@ -79,7 +81,7 @@ enum {
 struct Mapping {
   // FIXME: kAppAddr == 0x700000000000 ?
   static const uptr kAppAddr = 0x700000008000;
-  static const uptr kUnusedAddr = 0x400000000000;
+  static const uptr kHeapMemBeg = 0x400000000000;
   static const uptr kShadowAddr = 0x200000000000;
   static const uptr kTypesAddr = 0x100000000000;
   static const uptr kShadowMask = ~0x700000000000;
@@ -90,7 +92,7 @@ struct Mapping {
 
 enum MappingType {
   MAPPING_APP_ADDR,
-  MAPPING_UNUSED_ADDR,
+  MAPPING_ALLOCATOR_ADDR,
   MAPPING_SHADOW_ADDR,
   MAPPING_TYPES_ADDR,
   MAPPING_SHADOW_MASK
@@ -100,8 +102,8 @@ template <typename Mapping, int Type> uptr MappingImpl() {
   switch (Type) {
   case MAPPING_APP_ADDR:
     return Mapping::kAppAddr;
-  case MAPPING_UNUSED_ADDR:
-    return Mapping::kUnusedAddr;
+  case MAPPING_ALLOCATOR_ADDR:
+    return Mapping::kHeapMemBeg;
   case MAPPING_SHADOW_ADDR:
     return Mapping::kShadowAddr;
   case MAPPING_TYPES_ADDR:
@@ -119,7 +121,7 @@ ALWAYS_INLINE
 uptr AppAddr() { return MappingArchImpl<MAPPING_APP_ADDR>(); }
 
 ALWAYS_INLINE
-uptr UnusedAddr() { return MappingArchImpl<MAPPING_UNUSED_ADDR>(); }
+uptr AllocatorAddr() { return MappingArchImpl<MAPPING_ALLOCATOR_ADDR>(); }
 
 ALWAYS_INLINE
 uptr ShadowAddr() { return MappingArchImpl<MAPPING_SHADOW_ADDR>(); }

diff  --git a/compiler-rt/lib/nsan/nsan_thread.cpp b/compiler-rt/lib/nsan/nsan_thread.cpp
index 273c46831cf381..85706aea80ebd1 100644
--- a/compiler-rt/lib/nsan/nsan_thread.cpp
+++ b/compiler-rt/lib/nsan/nsan_thread.cpp
@@ -55,6 +55,7 @@ void NsanThread::ClearShadowForThreadStackAndTLS() {
 void NsanThread::Init() {
   SetThreadStackAndTls();
   ClearShadowForThreadStackAndTLS();
+  malloc_storage().Init();
 }
 
 void NsanThread::TSDDtor(void *tsd) {
@@ -63,6 +64,7 @@ void NsanThread::TSDDtor(void *tsd) {
 }
 
 void NsanThread::Destroy() {
+  malloc_storage().CommitBack();
   // We also clear the shadow on thread destruction because
   // some code may still be executing in later TSD destructors
   // and we don't want it to have any poisoned stack.

diff  --git a/compiler-rt/lib/nsan/nsan_thread.h b/compiler-rt/lib/nsan/nsan_thread.h
index 18f24fd6f1d78a..143e61f37db96b 100644
--- a/compiler-rt/lib/nsan/nsan_thread.h
+++ b/compiler-rt/lib/nsan/nsan_thread.h
@@ -9,6 +9,7 @@
 #ifndef NSAN_THREAD_H
 #define NSAN_THREAD_H
 
+#include "nsan_allocator.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_posix.h"
 
@@ -34,6 +35,8 @@ class NsanThread {
   void StartSwitchFiber(uptr bottom, uptr size);
   void FinishSwitchFiber(uptr *bottom_old, uptr *size_old);
 
+  NsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+
   int destructor_iterations_;
   __sanitizer_sigset_t starting_sigset_;
 
@@ -56,6 +59,8 @@ class NsanThread {
 
   uptr tls_begin_;
   uptr tls_end_;
+
+  NsanThreadLocalMallocStorage malloc_storage_;
 };
 
 NsanThread *GetCurrentThread();

diff  --git a/compiler-rt/test/nsan/Posix/allocator_mapping.cpp b/compiler-rt/test/nsan/Posix/allocator_mapping.cpp
new file mode 100644
index 00000000000000..3a3e655e259d0b
--- /dev/null
+++ b/compiler-rt/test/nsan/Posix/allocator_mapping.cpp
@@ -0,0 +1,30 @@
+/// From msan/allocator_mapping.cpp
+/// Test that a module constructor can not map memory over the NSan heap
+/// (without MAP_FIXED, of course).
+// RUN: %clangxx_nsan -O0 %s -o %t_1
+// RUN: %clangxx_nsan -O0 -DHEAP_ADDRESS=$(%run %t_1) %s -o %t_2 && %run %t_2
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#ifdef HEAP_ADDRESS
+struct A {
+  A() {
+    void *const hint = reinterpret_cast<void *>(HEAP_ADDRESS);
+    void *p = mmap(hint, 4096, PROT_READ | PROT_WRITE,
+                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    // This address must be already mapped. Check that mmap() succeeds, but at a
+    // 
diff erent address.
+    assert(p != reinterpret_cast<void *>(-1));
+    assert(p != hint);
+  }
+} a;
+#endif
+
+int main() {
+  void *p = malloc(10);
+  printf("0x%zx\n", reinterpret_cast<size_t>(p) & (~0xfff));
+  free(p);
+}

diff  --git a/compiler-rt/test/nsan/allocator_interface.cpp b/compiler-rt/test/nsan/allocator_interface.cpp
new file mode 100644
index 00000000000000..e658bd07291ad7
--- /dev/null
+++ b/compiler-rt/test/nsan/allocator_interface.cpp
@@ -0,0 +1,45 @@
+/// From sanitizer_common/TestCases/allocator_interface.cpp
+// RUN: %clangxx_nsan %s -o %t && %run %t 1234
+// RUN: %clangxx_nsan %s -o %t && %run %t 5678910
+
+#include <assert.h>
+#include <sanitizer/allocator_interface.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>
+
+void Test(int size) {
+  auto allocated_bytes_before = __sanitizer_get_current_allocated_bytes();
+  int *p = (int *)malloc(size);
+  assert(__sanitizer_get_estimated_allocated_size(size) >= size);
+  assert(__sanitizer_get_ownership(p));
+  assert(!__sanitizer_get_ownership(&p));
+  assert(__sanitizer_get_allocated_size(p) == size);
+  assert(__sanitizer_get_allocated_size_fast(p) == size);
+  assert(__sanitizer_get_allocated_begin(p) == p);
+  assert(__sanitizer_get_allocated_begin(p + 1) == p);
+  assert(__sanitizer_get_current_allocated_bytes() >=
+         size + allocated_bytes_before);
+  assert(__sanitizer_get_current_allocated_bytes() <=
+         2 * size + allocated_bytes_before);
+  assert(__sanitizer_get_heap_size() >= size);
+  free(p);
+
+  // These are not implemented.
+  assert(__sanitizer_get_unmapped_bytes() <= 1);
+  assert(__sanitizer_get_free_bytes() > 0);
+
+  __sanitizer_purge_allocator();
+}
+
+int main(int argc, char **argv) {
+  int size = atoi(argv[1]);
+
+  Test(size);
+
+  // Check the thread local caches work as well.
+  std::thread t(Test, size);
+  t.join();
+
+  return 0;
+}

diff  --git a/compiler-rt/test/nsan/malloc_hook.cpp b/compiler-rt/test/nsan/malloc_hook.cpp
new file mode 100644
index 00000000000000..3dcbfd845e7588
--- /dev/null
+++ b/compiler-rt/test/nsan/malloc_hook.cpp
@@ -0,0 +1,57 @@
+// RUN: %clangxx_nsan %s -o %t && %run %t 2>&1 | FileCheck %s
+
+#include <sanitizer/allocator_interface.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+extern "C" {
+const volatile void *global_ptr;
+
+#define WRITE(s) write(1, s, sizeof(s))
+
+// Note: avoid calling functions that allocate memory in malloc/free
+// to avoid infinite recursion.
+void __sanitizer_malloc_hook(const volatile void *ptr, size_t sz) {
+  if (__sanitizer_get_ownership(ptr) && sz == 4) {
+    WRITE("MallocHook\n");
+    global_ptr = ptr;
+  }
+}
+void __sanitizer_free_hook(const volatile void *ptr) {
+  if (__sanitizer_get_ownership(ptr) && ptr == global_ptr)
+    WRITE("FreeHook\n");
+}
+} // extern "C"
+
+volatile int *x;
+
+void MallocHook1(const volatile void *ptr, size_t sz) { WRITE("MH1\n"); }
+void MallocHook2(const volatile void *ptr, size_t sz) { WRITE("MH2\n"); }
+void FreeHook1(const volatile void *ptr) { WRITE("FH1\n"); }
+void FreeHook2(const volatile void *ptr) { WRITE("FH2\n"); }
+// Call this function with uninitialized arguments to poison
+// TLS shadow for function parameters before calling operator
+// new and, eventually, user-provided hook.
+__attribute__((noinline)) void allocate(int *unused1, int *unused2) {
+  x = new int;
+}
+
+int main() {
+  __sanitizer_install_malloc_and_free_hooks(MallocHook1, FreeHook1);
+  __sanitizer_install_malloc_and_free_hooks(MallocHook2, FreeHook2);
+  int *undef1, *undef2;
+  allocate(undef1, undef2);
+  // CHECK: MallocHook
+  // CHECK: MH1
+  // CHECK: MH2
+  // Check that malloc hook was called with correct argument.
+  if (global_ptr != (void *)x) {
+    _exit(1);
+  }
+  *x = 0;
+  delete x;
+  // CHECK: FreeHook
+  // CHECK: FH1
+  // CHECK: FH2
+  return 0;
+}

diff  --git a/compiler-rt/test/nsan/new_delete_test.cpp b/compiler-rt/test/nsan/new_delete_test.cpp
new file mode 100644
index 00000000000000..356145e9bf21f0
--- /dev/null
+++ b/compiler-rt/test/nsan/new_delete_test.cpp
@@ -0,0 +1,77 @@
+/// From sanitizer_common/TestCases/Linux/new_delete_test.cpp
+// RUN: %clangxx_nsan -fno-sized-deallocation -O0 %s -o %t && %run %t
+// RUN: %clangxx_nsan -fsized-deallocation -O0 %s -o %t && %run %t
+
+#include <cstddef>
+
+namespace std {
+struct nothrow_t {};
+static const nothrow_t nothrow;
+enum class align_val_t : size_t {};
+} // namespace std
+
+void *operator new(size_t);
+void *operator new[](size_t);
+void *operator new(size_t, std::nothrow_t const &);
+void *operator new[](size_t, std::nothrow_t const &);
+void *operator new(size_t, std::align_val_t);
+void *operator new[](size_t, std::align_val_t);
+void *operator new(size_t, std::align_val_t, std::nothrow_t const &);
+void *operator new[](size_t, std::align_val_t, std::nothrow_t const &);
+
+void operator delete(void *) throw();
+void operator delete[](void *) throw();
+void operator delete(void *, std::nothrow_t const &);
+void operator delete[](void *, std::nothrow_t const &);
+void operator delete(void *, size_t) throw();
+void operator delete[](void *, size_t) throw();
+void operator delete(void *, std::align_val_t) throw();
+void operator delete[](void *, std::align_val_t) throw();
+void operator delete(void *, std::align_val_t, std::nothrow_t const &);
+void operator delete[](void *, std::align_val_t, std::nothrow_t const &);
+void operator delete(void *, size_t, std::align_val_t) throw();
+void operator delete[](void *, size_t, std::align_val_t) throw();
+
+template <typename T> inline T *break_optimization(T *arg) {
+  __asm__ __volatile__("" : : "r"(arg) : "memory");
+  return arg;
+}
+
+struct S12 {
+  int a, b, c;
+};
+struct alignas(128) S12_128 {
+  int a, b, c;
+};
+struct alignas(256) S12_256 {
+  int a, b, c;
+};
+struct alignas(512) S1024_512 {
+  char a[1024];
+};
+struct alignas(1024) S1024_1024 {
+  char a[1024];
+};
+
+int main(int argc, char **argv) {
+  delete break_optimization(new S12);
+  operator delete(break_optimization(new S12), std::nothrow);
+  delete[] break_optimization(new S12[100]);
+  operator delete[](break_optimization(new S12[100]), std::nothrow);
+
+  delete break_optimization(new S12_128);
+  operator delete(break_optimization(new S12_128),
+                  std::align_val_t(alignof(S12_128)));
+  operator delete(break_optimization(new S12_128),
+                  std::align_val_t(alignof(S12_128)), std::nothrow);
+  operator delete(break_optimization(new S12_128), sizeof(S12_128),
+                  std::align_val_t(alignof(S12_128)));
+
+  delete[] break_optimization(new S12_128[100]);
+  operator delete[](break_optimization(new S12_128[100]),
+                    std::align_val_t(alignof(S12_128)));
+  operator delete[](break_optimization(new S12_128[100]),
+                    std::align_val_t(alignof(S12_128)), std::nothrow);
+  operator delete[](break_optimization(new S12_128[100]), sizeof(S12_128[100]),
+                    std::align_val_t(alignof(S12_128)));
+}


        


More information about the llvm-commits mailing list