[llvm-branch-commits] [nsan] Use sanitizer allocator (PR #102764)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sat Aug 10 10:38:10 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: Fangrui Song (MaskRay)

<details>
<summary>Changes</summary>

* The performance is better than the glibc allocator.
* Allocator interface functions, sanitizer allocator options, and
  MallocHooks/FreeHooks are supported.
* Shadow memory has specific memory layout requirement. Using libc
  allocator could lead to conflicts.
* When we intercept mmap for reliability (the VMA could reuse a
  previously released VMA that is poisoned): glibc may invoke an
  internal system call to call unmmap, which cannot be intercepted. We
  will not be able to return the shadow memory to the OS.

Similar to dfsan https://reviews.llvm.org/D101204


---

Patch is 28.97 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/102764.diff


13 Files Affected:

- (modified) compiler-rt/lib/nsan/CMakeLists.txt (+2) 
- (modified) compiler-rt/lib/nsan/nsan.cpp (+2-1) 
- (modified) compiler-rt/lib/nsan/nsan.h (+8) 
- (added) compiler-rt/lib/nsan/nsan_allocator.cpp (+334) 
- (added) compiler-rt/lib/nsan/nsan_allocator.h (+41) 
- (modified) compiler-rt/lib/nsan/nsan_malloc_linux.cpp (+14-37) 
- (modified) compiler-rt/lib/nsan/nsan_platform.h (+8-6) 
- (modified) compiler-rt/lib/nsan/nsan_thread.cpp (+2) 
- (modified) compiler-rt/lib/nsan/nsan_thread.h (+5) 
- (added) compiler-rt/test/nsan/Posix/allocator_mapping.cpp (+30) 
- (added) compiler-rt/test/nsan/allocator_interface.cpp (+45) 
- (added) compiler-rt/test/nsan/malloc_hook.cpp (+57) 
- (added) compiler-rt/test/nsan/new_delete_test.cpp (+71) 


``````````diff
diff --git a/compiler-rt/lib/nsan/CMakeLists.txt b/compiler-rt/lib/nsan/CMakeLists.txt
index fa9f02abdf080..2846f0292307b 100644
--- a/compiler-rt/lib/nsan/CMakeLists.txt
+++ b/compiler-rt/lib/nsan/CMakeLists.txt
@@ -4,9 +4,11 @@ include_directories(..)
 
 set(NSAN_SOURCES
   nsan.cpp
+  nsan_allocator.cpp
   nsan_flags.cpp
   nsan_interceptors.cpp
   nsan_malloc_linux.cpp
+  nsan_new_delete.cpp
   nsan_stats.cpp
   nsan_suppressions.cpp
   nsan_thread.cpp
diff --git a/compiler-rt/lib/nsan/nsan.cpp b/compiler-rt/lib/nsan/nsan.cpp
index 7d10681a1bc91..46b41bfce3a33 100644
--- a/compiler-rt/lib/nsan/nsan.cpp
+++ b/compiler-rt/lib/nsan/nsan.cpp
@@ -814,11 +814,12 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __nsan_init() {
 
   DisableCoreDumperIfNecessary();
 
-  if (!MmapFixedNoReserve(TypesAddr(), UnusedAddr() - TypesAddr()))
+  if (!MmapFixedNoReserve(TypesAddr(), AllocatorAddr() - TypesAddr()))
     Die();
 
   InitializeInterceptors();
   NsanTSDInit(NsanTSDDtor);
+  NsanAllocatorInit();
 
   NsanThread *main_thread = NsanThread::Create(nullptr, nullptr);
   SetCurrentThread(main_thread);
diff --git a/compiler-rt/lib/nsan/nsan.h b/compiler-rt/lib/nsan/nsan.h
index 4e88ef4c00974..08dd02746be65 100644
--- a/compiler-rt/lib/nsan/nsan.h
+++ b/compiler-rt/lib/nsan/nsan.h
@@ -51,6 +51,14 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
 __nsan_default_options();
 }
 
+// Unwind the stack for fatal error, as the parameter `stack` is
+// empty without origins.
+#define GET_FATAL_STACK_TRACE_IF_EMPTY(STACK)                                  \
+  if (nsan_initialized && (STACK)->size == 0) {                                \
+    (STACK)->Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr,  \
+                    common_flags()->fast_unwind_on_fatal);                     \
+  }
+
 namespace __nsan {
 
 extern bool nsan_initialized;
diff --git a/compiler-rt/lib/nsan/nsan_allocator.cpp b/compiler-rt/lib/nsan/nsan_allocator.cpp
new file mode 100644
index 0000000000000..ebd47bdbf5280
--- /dev/null
+++ b/compiler-rt/lib/nsan/nsan_allocator.cpp
@@ -0,0 +1,334 @@
+//===- nsan_allocator.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// NumericalStabilitySanitizer allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "nsan_allocator.h"
+#include "interception/interception.h"
+#include "nsan.h"
+#include "nsan_platform.h"
+#include "nsan_thread.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
+
+using namespace __nsan;
+
+namespace {
+struct Metadata {
+  uptr requested_size;
+};
+
+struct NsanMapUnmapCallback {
+  void OnMap(uptr p, uptr size) const {}
+  void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+                      uptr user_size) const {}
+  void OnUnmap(uptr p, uptr size) const {}
+};
+
+const uptr kMaxAllowedMallocSize = 1ULL << 40;
+
+// Allocator64 parameters. Deliberately using a short name.
+struct AP64 {
+  static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+  static const uptr kSpaceSize = 0x40000000000; // 4T.
+  static const uptr kMetadataSize = sizeof(Metadata);
+  using SizeClassMap = DefaultSizeClassMap;
+  using MapUnmapCallback = NsanMapUnmapCallback;
+  static const uptr kFlags = 0;
+  using AddressSpaceView = LocalAddressSpaceView;
+};
+} // namespace
+
+using PrimaryAllocator = SizeClassAllocator64<AP64>;
+using Allocator = CombinedAllocator<PrimaryAllocator>;
+using AllocatorCache = Allocator::AllocatorCache;
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static StaticSpinMutex fallback_mutex;
+
+static uptr max_malloc_size;
+
+void __nsan::NsanAllocatorInit() {
+  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+  allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+  if (common_flags()->max_allocation_size_mb)
+    max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+                          kMaxAllowedMallocSize);
+  else
+    max_malloc_size = kMaxAllowedMallocSize;
+}
+
+static AllocatorCache *GetAllocatorCache(NsanThreadLocalMallocStorage *ms) {
+  CHECK(ms);
+  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
+  return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
+}
+
+void NsanThreadLocalMallocStorage::Init() {
+  allocator.InitCache(GetAllocatorCache(this));
+}
+
+void NsanThreadLocalMallocStorage::CommitBack() {
+  allocator.SwallowCache(GetAllocatorCache(this));
+  allocator.DestroyCache(GetAllocatorCache(this));
+}
+
+static void *NsanAllocate(uptr size, uptr alignment, bool zero) {
+  if (UNLIKELY(size > max_malloc_size)) {
+    if (AllocatorMayReturnNull()) {
+      Report("WARNING: NumericalStabilitySanitizer failed to allocate 0x%zx "
+             "bytes\n",
+             size);
+      return nullptr;
+    }
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
+  }
+  if (UNLIKELY(IsRssLimitExceeded())) {
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportRssLimitExceeded(&stack);
+  }
+  NsanThread *t = GetCurrentThread();
+  void *allocated;
+  if (t) {
+    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+    allocated = allocator.Allocate(cache, size, alignment);
+  } else {
+    SpinMutexLock l(&fallback_mutex);
+    AllocatorCache *cache = &fallback_allocator_cache;
+    allocated = allocator.Allocate(cache, size, alignment);
+  }
+  if (UNLIKELY(!allocated)) {
+    SetAllocatorOutOfMemory();
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportOutOfMemory(size, &stack);
+  }
+  auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+  meta->requested_size = size;
+  if (zero && allocator.FromPrimary(allocated))
+    REAL(memset)(allocated, 0, size);
+  __nsan_set_value_unknown(allocated, size);
+  RunMallocHooks(allocated, size);
+  return allocated;
+}
+
+void __nsan::NsanDeallocate(void *p) {
+  DCHECK(p);
+  RunFreeHooks(p);
+  auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
+  meta->requested_size = 0;
+  if (NsanThread *t = GetCurrentThread()) {
+    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+    allocator.Deallocate(cache, p);
+  } else {
+    SpinMutexLock l(&fallback_mutex);
+    AllocatorCache *cache = &fallback_allocator_cache;
+    allocator.Deallocate(cache, p);
+  }
+}
+
+static void *NsanReallocate(void *ptr, uptr new_size, uptr alignment) {
+  Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(ptr));
+  uptr old_size = meta->requested_size;
+  uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(ptr);
+  if (new_size <= actually_allocated_size) {
+    // We are not reallocating here.
+    meta->requested_size = new_size;
+    if (new_size > old_size)
+      __nsan_set_value_unknown((u8 *)ptr + old_size, new_size - old_size);
+    return ptr;
+  }
+  void *new_p = NsanAllocate(new_size, alignment, false);
+  if (new_p) {
+    __nsan_copy_values(new_p, ptr, Min(new_size, old_size));
+    NsanDeallocate(ptr);
+  }
+  return new_p;
+}
+
+static void *NsanCalloc(uptr nmemb, uptr size) {
+  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportCallocOverflow(nmemb, size, &stack);
+  }
+  return NsanAllocate(nmemb * size, sizeof(u64), true);
+}
+
+static const void *AllocationBegin(const void *p) {
+  if (!p)
+    return nullptr;
+  void *beg = allocator.GetBlockBegin(p);
+  if (!beg)
+    return nullptr;
+  Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+  if (!b)
+    return nullptr;
+  if (b->requested_size == 0)
+    return nullptr;
+
+  return beg;
+}
+
+static uptr AllocationSize(const void *p) {
+  if (!p)
+    return 0;
+  const void *beg = allocator.GetBlockBegin(p);
+  if (beg != p)
+    return 0;
+  Metadata *b = (Metadata *)allocator.GetMetaData(p);
+  return b->requested_size;
+}
+
+static uptr AllocationSizeFast(const void *p) {
+  return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
+}
+
+void *__nsan::nsan_malloc(uptr size) {
+  return SetErrnoOnNull(NsanAllocate(size, sizeof(u64), false));
+}
+
+void *__nsan::nsan_calloc(uptr nmemb, uptr size) {
+  return SetErrnoOnNull(NsanCalloc(nmemb, size));
+}
+
+void *__nsan::nsan_realloc(void *ptr, uptr size) {
+  if (!ptr)
+    return SetErrnoOnNull(NsanAllocate(size, sizeof(u64), false));
+  if (size == 0) {
+    NsanDeallocate(ptr);
+    return nullptr;
+  }
+  return SetErrnoOnNull(NsanReallocate(ptr, size, sizeof(u64)));
+}
+
+void *__nsan::nsan_reallocarray(void *ptr, uptr nmemb, uptr size) {
+  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+    errno = errno_ENOMEM;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportReallocArrayOverflow(nmemb, size, &stack);
+  }
+  return nsan_realloc(ptr, nmemb * size);
+}
+
+void *__nsan::nsan_valloc(uptr size) {
+  return SetErrnoOnNull(NsanAllocate(size, GetPageSizeCached(), false));
+}
+
+void *__nsan::nsan_pvalloc(uptr size) {
+  uptr PageSize = GetPageSizeCached();
+  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+    errno = errno_ENOMEM;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportPvallocOverflow(size, &stack);
+  }
+  // pvalloc(0) should allocate one page.
+  size = size ? RoundUpTo(size, PageSize) : PageSize;
+  return SetErrnoOnNull(NsanAllocate(size, PageSize, false));
+}
+
+void *__nsan::nsan_aligned_alloc(uptr alignment, uptr size) {
+  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+    errno = errno_EINVAL;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+  }
+  return SetErrnoOnNull(NsanAllocate(size, alignment, false));
+}
+
+void *__nsan::nsan_memalign(uptr alignment, uptr size) {
+  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+    errno = errno_EINVAL;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    BufferedStackTrace stack;
+    GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
+    ReportInvalidAllocationAlignment(alignment, &stack);
+  }
+  return SetErrnoOnNull(NsanAllocate(size, alignment, false));
+}
+
+int __nsan::nsan_posix_memalign(void **memptr, uptr alignment, uptr size) {
+  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+    if (AllocatorMayReturnNull())
+      return errno_EINVAL;
+    BufferedStackTrace stack;
+    ReportInvalidPosixMemalignAlignment(alignment, &stack);
+  }
+  void *ptr = NsanAllocate(size, alignment, false);
+  if (UNLIKELY(!ptr))
+    // OOM error is already taken care of by NsanAllocate.
+    return errno_ENOMEM;
+  CHECK(IsAligned((uptr)ptr, alignment));
+  *memptr = ptr;
+  return 0;
+}
+
+extern "C" {
+uptr __sanitizer_get_current_allocated_bytes() {
+  uptr stats[AllocatorStatCount];
+  allocator.GetStats(stats);
+  return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+  uptr stats[AllocatorStatCount];
+  allocator.GetStats(stats);
+  return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+const void *__sanitizer_get_allocated_begin(const void *p) {
+  return AllocationBegin(p);
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+  DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+  uptr ret = AllocationSizeFast(p);
+  DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+  return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
+}
diff --git a/compiler-rt/lib/nsan/nsan_allocator.h b/compiler-rt/lib/nsan/nsan_allocator.h
new file mode 100644
index 0000000000000..d41560493c1a7
--- /dev/null
+++ b/compiler-rt/lib/nsan/nsan_allocator.h
@@ -0,0 +1,41 @@
+//===-- nsan_allocator.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NSAN_ALLOCATOR_H
+#define NSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __nsan {
+
+struct NsanThreadLocalMallocStorage {
+  // Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
+  alignas(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+  void Init();
+  void CommitBack();
+
+private:
+  // These objects are allocated via mmap() and are zero-initialized.
+  NsanThreadLocalMallocStorage() {}
+};
+
+void NsanAllocatorInit();
+void NsanDeallocate(void *ptr);
+
+void *nsan_malloc(uptr size);
+void *nsan_calloc(uptr nmemb, uptr size);
+void *nsan_realloc(void *ptr, uptr size);
+void *nsan_reallocarray(void *ptr, uptr nmemb, uptr size);
+void *nsan_valloc(uptr size);
+void *nsan_pvalloc(uptr size);
+void *nsan_aligned_alloc(uptr alignment, uptr size);
+void *nsan_memalign(uptr alignment, uptr size);
+int nsan_posix_memalign(void **memptr, uptr alignment, uptr size);
+
+} // namespace __nsan
+#endif // NSAN_ALLOCATOR_H
diff --git a/compiler-rt/lib/nsan/nsan_malloc_linux.cpp b/compiler-rt/lib/nsan/nsan_malloc_linux.cpp
index 02f52e7be07fa..c97591e4ac159 100644
--- a/compiler-rt/lib/nsan/nsan_malloc_linux.cpp
+++ b/compiler-rt/lib/nsan/nsan_malloc_linux.cpp
@@ -12,14 +12,16 @@
 
 #include "interception/interception.h"
 #include "nsan.h"
+#include "nsan_allocator.h"
 #include "sanitizer_common/sanitizer_allocator_dlsym.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_platform.h"
 #include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
 
 #if !SANITIZER_APPLE && !SANITIZER_WINDOWS
 using namespace __sanitizer;
-using __nsan::nsan_initialized;
+using namespace __nsan;
 
 namespace {
 struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
@@ -28,78 +30,53 @@ struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
 } // namespace
 
 INTERCEPTOR(void *, aligned_alloc, uptr align, uptr size) {
-  void *res = REAL(aligned_alloc)(align, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_aligned_alloc(align, size);
 }
 
 INTERCEPTOR(void *, calloc, uptr nmemb, uptr size) {
   if (DlsymAlloc::Use())
     return DlsymAlloc::Callocate(nmemb, size);
-
-  void *res = REAL(calloc)(nmemb, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), nmemb * size);
-  return res;
+  return nsan_calloc(nmemb, size);
 }
 
 INTERCEPTOR(void, free, void *ptr) {
+  if (UNLIKELY(!ptr))
+    return;
   if (DlsymAlloc::PointerIsMine(ptr))
     return DlsymAlloc::Free(ptr);
-  REAL(free)(ptr);
+  NsanDeallocate(ptr);
 }
 
 INTERCEPTOR(void *, malloc, uptr size) {
   if (DlsymAlloc::Use())
     return DlsymAlloc::Allocate(size);
-  void *res = REAL(malloc)(size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_malloc(size);
 }
 
 INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
   if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
     return DlsymAlloc::Realloc(ptr, size);
-  void *res = REAL(realloc)(ptr, size);
-  // TODO: We might want to copy the types from the original allocation
-  // (although that would require that we know its size).
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_realloc(ptr, size);
 }
 
 #if SANITIZER_INTERCEPT_REALLOCARRAY
 INTERCEPTOR(void *, reallocarray, void *ptr, uptr nmemb, uptr size) {
-  void *res = REAL(reallocarray)(ptr, nmemb, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), nmemb * size);
-  return res;
+  return nsan_reallocarray(ptr, nmemb, size);
 }
 #endif // SANITIZER_INTERCEPT_REALLOCARRAY
 
 INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr size) {
-  int res = REAL(posix_memalign)(memptr, align, size);
-  if (res == 0 && *memptr)
-    __nsan_set_value_unknown(static_cast<u8 *>(*memptr), size);
-  return res;
+  return nsan_posix_memalign(memptr, align, size);
 }
 
 // Deprecated allocation functions (memalign, etc).
 #if SANITIZER_INTERCEPT_MEMALIGN
 INTERCEPTOR(void *, memalign, uptr align, uptr size) {
-  void *const res = REAL(memalign)(align, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_memalign(align, size);
 }
 
 INTERCEPTOR(void *, __libc_memalign, uptr align, uptr size) {
-  void *const res = REAL(__libc_memalign)(align, size);
-  if (res)
-    __nsan_set_value_unknown(static_cast<u8 *>(res), size);
-  return res;
+  return nsan_memalign(align, size);
 }
 #endif
 
diff --git a/compiler-rt/lib/nsan/nsan_platform.h b/compiler-rt/lib/nsan/nsan_platform.h
index c9d4cacd8c889..cc9d63ecb4a9b 100644
--- a/compiler-rt/lib/nsan/nsan_platform.h
+++ b/compiler-rt/lib/nsan/nsan_platform.h
@@ -40,7 +40,9 @@ namespace __nsan {
 // |                    |
 // |       unused       |
 // |                    |
-// +--------------------+ 0x400000000000 (kUnusedAddr)
+// +--------------------+ 0x440000008000
+// |     allocator      |
+// +--------------------+ 0x400000000000 (kHeapMemBeg)
 // |   shadow memory    |
 // +--------------------+ 0x200000000000 (kShadowAddr)
 // |   shadow types     |
@@ -79,7 +81,7 @@ enum {
 struct Mapping {
   // FIXME: kAppAddr == 0x700000000000 ?
   static const uptr kAppAddr = 0x700000008000;
-  static const uptr kUnusedAddr = 0x400000000000;
+  static const uptr kHeapMemBeg = 0x400000000000;
   static const uptr kShadowAddr = 0x200000000000;
   static const uptr kTypesAddr = 0x100000000000;
   static const uptr kShadowMask = ~0x700000000000;
@@ -90,7 +92,7 @@ struct Mapping {
 
 enum MappingType {
   MAPPING_APP_ADDR,
-  MAPPING_UNUSED_ADDR,
+  MAPPING_ALLOCATOR_ADDR,
   MAPPING_SHADOW_ADDR,
   MAPPING_TYPES_ADDR,
   MAPPING_SHADOW_MASK
@@ -100,8 +102,8 @@ template <typename Mapping, int Type> uptr MappingImpl() {
   switch (Type) {
   case MAPPING_APP_ADDR:
     return Mapping::kAppAddr;
-  case MAPPING_UNUSED_ADDR:
-    return Mapping::kUnusedAddr;
+  case MAPPING_ALLOCATOR_ADDR:
+    return Mapping::kHeapMemBeg;
   case MAPPING_SHADOW_ADDR:
     return Mapping::kShadowAddr;
   case MAPPING_TYPES_ADDR:
@@ -119,7 +121,7 @@ ALWAYS_INLINE
 uptr AppAddr() { return MappingArchImpl<MAPPING_APP_ADDR>(); }
 
 ALWAYS_INLINE
-uptr UnusedAddr() { return MappingArchImpl<MAPPING_UNUSED_ADDR>(); }
+uptr AllocatorAddr() { return MappingArchImpl<MAPPING_ALLOCATOR_ADDR>(); }
 
 ALWAYS_INLINE
 uptr ShadowAddr() { return MappingArchImpl<MAPPING_SHADOW_ADDR>(); }
diff --git a/compiler-rt/lib/nsan/nsan_thread.cpp b/compiler-rt/lib/nsan/nsan_thread.cpp
index 211c87afa9d5c..38de7bce50d30 100644
--- a/compiler-rt/lib/nsan/nsan_thread.cpp
+++ b/compiler-rt/lib/nsan/nsan_thread.cpp
@@ -45,6 +45,7 @@ void NsanThread::ClearShadowForThreadStackAndTLS() {
 void NsanThread::Init() {
   SetThreadStackAndTls();
...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/102764


More information about the llvm-branch-commits mailing list