[compiler-rt] 1fb612d - [dfsan] Add a DFSan allocator
Jianzhou Zhao via llvm-commits
llvm-commits at lists.llvm.org
Tue May 4 17:52:14 PDT 2021
Author: Jianzhou Zhao
Date: 2021-05-05T00:51:45Z
New Revision: 1fb612d060e7dc72610b4e83238e5561752fe737
URL: https://github.com/llvm/llvm-project/commit/1fb612d060e7dc72610b4e83238e5561752fe737
DIFF: https://github.com/llvm/llvm-project/commit/1fb612d060e7dc72610b4e83238e5561752fe737.diff
LOG: [dfsan] Add a DFSan allocator
This is a part of https://reviews.llvm.org/D101204
Reviewed By: morehouse
Differential Revision: https://reviews.llvm.org/D101666
Added:
compiler-rt/lib/dfsan/dfsan_allocator.cpp
compiler-rt/lib/dfsan/dfsan_allocator.h
Modified:
compiler-rt/lib/dfsan/CMakeLists.txt
compiler-rt/lib/dfsan/dfsan.cpp
compiler-rt/lib/dfsan/dfsan.h
compiler-rt/lib/dfsan/dfsan_flags.inc
compiler-rt/lib/dfsan/dfsan_thread.cpp
compiler-rt/lib/dfsan/dfsan_thread.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/dfsan/CMakeLists.txt b/compiler-rt/lib/dfsan/CMakeLists.txt
index fa6823c9c00c..5c8e7fdfd335 100644
--- a/compiler-rt/lib/dfsan/CMakeLists.txt
+++ b/compiler-rt/lib/dfsan/CMakeLists.txt
@@ -3,6 +3,7 @@ include_directories(..)
# Runtime library sources and build flags.
set(DFSAN_RTL_SOURCES
dfsan.cpp
+ dfsan_allocator.cpp
dfsan_chained_origin_depot.cpp
dfsan_custom.cpp
dfsan_interceptors.cpp
@@ -11,6 +12,7 @@ set(DFSAN_RTL_SOURCES
set(DFSAN_RTL_HEADERS
dfsan.h
+ dfsan_allocator.h
dfsan_chained_origin_depot.h
dfsan_flags.inc
dfsan_flags.h
diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp
index e60703cc4067..fb162e22f6b7 100644
--- a/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/compiler-rt/lib/dfsan/dfsan.cpp
@@ -540,10 +540,17 @@ static void SetOrigin(const void *dst, uptr size, u32 origin) {
*(u32 *)(end - kOriginAlign) = origin;
}
-static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr,
- uptr size) {
- dfsan_label *labelp = (dfsan_label *)shadow_addr;
- for (; size != 0; --size, ++labelp) {
+static void WriteShadowInRange(dfsan_label label, uptr beg_shadow_addr,
+ uptr end_shadow_addr) {
+ // TODO: After changing dfsan_label to 8bit, use internal_memset when label
+ // is not 0.
+ dfsan_label *labelp = (dfsan_label *)beg_shadow_addr;
+ if (label) {
+ for (; (uptr)labelp < end_shadow_addr; ++labelp) *labelp = label;
+ return;
+ }
+
+ for (; (uptr)labelp < end_shadow_addr; ++labelp) {
// Don't write the label if it is already the value we need it to be.
// In a program where most addresses are not labeled, it is common that
// a page of shadow memory is entirely zeroed. The Linux copy-on-write
@@ -552,13 +559,18 @@ static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr,
// the value written does not change the value in memory. Avoiding the
// write when both |label| and |*labelp| are zero dramatically reduces
// the amount of real memory used by large programs.
- if (label == *labelp)
+ if (!*labelp)
continue;
- *labelp = label;
+ *labelp = 0;
}
}
+static void WriteShadowWithSize(dfsan_label label, uptr shadow_addr,
+ uptr size) {
+ WriteShadowInRange(label, shadow_addr, shadow_addr + size * sizeof(label));
+}
+
#define RET_CHAIN_ORIGIN(id) \
GET_CALLER_PC_BP_SP; \
(void)sp; \
@@ -597,6 +609,21 @@ SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(const void *dst,
__dfsan_mem_origin_transfer(dst, src, len);
}
+namespace __dfsan {
+
+bool dfsan_inited = false;
+bool dfsan_init_is_running = false;
+
+void dfsan_copy_memory(void *dst, const void *src, uptr size) {
+ internal_memcpy(dst, src, size);
+ internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src),
+ size * sizeof(dfsan_label));
+ if (__dfsan_get_track_origins())
+ dfsan_mem_origin_transfer(dst, src, size);
+}
+
+} // namespace __dfsan
+
// If the label s is tainted, set the size bytes from the address p to be a new
// origin chain with the previous ID o and the current stack trace. This is
// used by instrumentation to reduce code size when too much code is inserted.
@@ -610,63 +637,64 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin(
}
}
-// Releases the pages within the origin address range, and sets the origin
-// addresses not on the pages to be 0.
-static void ReleaseOrClearOrigins(void *addr, uptr size) {
+// Releases the pages within the origin address range.
+static void ReleaseOrigins(void *addr, uptr size) {
const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr);
const void *end_addr = (void *)((uptr)addr + size);
const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr);
+
+ if (end_origin_addr - beg_origin_addr <
+ common_flags()->clear_shadow_mmap_threshold)
+ return;
+
const uptr page_size = GetPageSizeCached();
const uptr beg_aligned = RoundUpTo(beg_origin_addr, page_size);
const uptr end_aligned = RoundDownTo(end_origin_addr, page_size);
- // dfsan_set_label can be called from the following cases
- // 1) mapped ranges by new/delete and malloc/free. This case has origin memory
- // size > 50k, and happens less frequently.
- // 2) zero-filling internal data structures by utility libraries. This case
- // has origin memory size < 16k, and happens more often.
- // Set kNumPagesThreshold to be 4 to avoid releasing small pages.
- const int kNumPagesThreshold = 4;
- if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned)
- return;
-
- ReleaseMemoryPagesToOS(beg_aligned, end_aligned);
+ if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned))
+ Die();
}
-void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) {
+// Releases the pages within the shadow address range, and sets
+// the shadow addresses not on the pages to be 0.
+static void ReleaseOrClearShadows(void *addr, uptr size) {
const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
+ const void *end_addr = (void *)((uptr)addr + size);
+ const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr);
+
+ if (end_shadow_addr - beg_shadow_addr <
+ common_flags()->clear_shadow_mmap_threshold)
+ return WriteShadowWithSize(0, beg_shadow_addr, size);
+
+ const uptr page_size = GetPageSizeCached();
+ const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size);
+ const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size);
+
+ if (beg_aligned >= end_aligned) {
+ WriteShadowWithSize(0, beg_shadow_addr, size);
+ } else {
+ if (beg_aligned != beg_shadow_addr)
+ WriteShadowInRange(0, beg_shadow_addr, beg_aligned);
+ if (end_aligned != end_shadow_addr)
+ WriteShadowInRange(0, end_aligned, end_shadow_addr);
+ if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned))
+ Die();
+ }
+}
+void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) {
if (0 != label) {
- WriteShadowIfDifferent(label, beg_shadow_addr, size);
+ const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
+ WriteShadowWithSize(label, beg_shadow_addr, size);
if (__dfsan_get_track_origins())
SetOrigin(addr, size, origin);
return;
}
if (__dfsan_get_track_origins())
- ReleaseOrClearOrigins(addr, size);
+ ReleaseOrigins(addr, size);
- // If label is 0, releases the pages within the shadow address range, and sets
- // the shadow addresses not on the pages to be 0.
- const void *end_addr = (void *)((uptr)addr + size);
- const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr);
- const uptr page_size = GetPageSizeCached();
- const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size);
- const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size);
-
- // dfsan_set_label can be called from the following cases
- // 1) mapped ranges by new/delete and malloc/free. This case has shadow memory
- // size > 100k, and happens less frequently.
- // 2) zero-filling internal data structures by utility libraries. This case
- // has shadow memory size < 32k, and happens more often.
- // Set kNumPagesThreshold to be 8 to avoid releasing small pages.
- const int kNumPagesThreshold = 8;
- if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned)
- return WriteShadowIfDifferent(label, beg_shadow_addr, size);
-
- WriteShadowIfDifferent(label, beg_shadow_addr, beg_aligned - beg_shadow_addr);
- ReleaseMemoryPagesToOS(beg_aligned, end_aligned);
- WriteShadowIfDifferent(label, end_aligned, end_shadow_addr - end_aligned);
+ ReleaseOrClearShadows(addr, size);
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label(
@@ -916,6 +944,12 @@ static void RegisterDfsanFlags(FlagParser *parser, Flags *f) {
static void InitializeFlags() {
SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
flags().SetDefaults();
FlagParser parser;
@@ -981,7 +1015,13 @@ extern "C" void dfsan_flush() {
Die();
}
-static void dfsan_init(int argc, char **argv, char **envp) {
+static void DFsanInit(int argc, char **argv, char **envp) {
+ CHECK(!dfsan_init_is_running);
+ if (dfsan_inited)
+ return;
+ dfsan_init_is_running = true;
+ SanitizerToolName = "DataflowSanitizer";
+
InitializeFlags();
::InitializePlatformEarly();
@@ -995,7 +1035,7 @@ static void dfsan_init(int argc, char **argv, char **envp) {
// will load our executable in the middle of our unused region. This mostly
// works so long as the program doesn't use too much memory. We support this
// case by disabling memory protection when ASLR is disabled.
- uptr init_addr = (uptr)&dfsan_init;
+ uptr init_addr = (uptr)&DFsanInit;
if (!(init_addr >= UnusedAddr() && init_addr < AppAddr()))
MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr());
@@ -1008,14 +1048,27 @@ static void dfsan_init(int argc, char **argv, char **envp) {
// Set up threads
DFsanTSDInit(DFsanTSDDtor);
+
+ dfsan_allocator_init();
+
DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr);
SetCurrentThread(main_thread);
main_thread->ThreadStart();
__dfsan_label_info[kInitializingLabel].desc = "<init label>";
+
+ dfsan_init_is_running = false;
+ dfsan_inited = true;
}
+namespace __dfsan {
+
+void dfsan_init() { DFsanInit(0, nullptr, nullptr); }
+
+} // namespace __dfsan
+
#if SANITIZER_CAN_USE_PREINIT_ARRAY
-__attribute__((section(".preinit_array"), used))
-static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init;
+__attribute__((section(".preinit_array"),
+ used)) static void (*dfsan_init_ptr)(int, char **,
+ char **) = DFsanInit;
#endif
diff --git a/compiler-rt/lib/dfsan/dfsan.h b/compiler-rt/lib/dfsan/dfsan.h
index f29314a710ea..54048a763be1 100644
--- a/compiler-rt/lib/dfsan/dfsan.h
+++ b/compiler-rt/lib/dfsan/dfsan.h
@@ -62,6 +62,9 @@ void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
namespace __dfsan {
+extern bool dfsan_inited;
+extern bool dfsan_init_is_running;
+
void InitializeInterceptors();
inline dfsan_label *shadow_for(void *ptr) {
@@ -95,6 +98,23 @@ inline bool has_valid_shadow_addr(const void *ptr) {
return is_shadow_addr_valid((uptr)ptr_s);
}
+void dfsan_copy_memory(void *dst, const void *src, uptr size);
+
+void dfsan_allocator_init();
+void dfsan_deallocate(void *ptr);
+
+void *dfsan_malloc(uptr size);
+void *dfsan_calloc(uptr nmemb, uptr size);
+void *dfsan_realloc(void *ptr, uptr size);
+void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size);
+void *dfsan_valloc(uptr size);
+void *dfsan_pvalloc(uptr size);
+void *dfsan_aligned_alloc(uptr alignment, uptr size);
+void *dfsan_memalign(uptr alignment, uptr size);
+int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size);
+
+void dfsan_init();
+
} // namespace __dfsan
#endif // DFSAN_H
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
new file mode 100644
index 000000000000..a18cdb14b93a
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -0,0 +1,291 @@
+//===-- dfsan_allocator.cpp -------------------------- --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataflowSanitizer.
+//
+// DataflowSanitizer allocator.
+//===----------------------------------------------------------------------===//
+
+#include "dfsan_allocator.h"
+
+#include "dfsan.h"
+#include "dfsan_flags.h"
+#include "dfsan_thread.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+
+namespace __dfsan {
+
+struct Metadata {
+ uptr requested_size;
+};
+
+struct DFsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
+ void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
+};
+
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ // TODO: DFSan assumes application memory starts from 0x700000008000. For
+ // unknown reason, the sanitizer allocator does not support any start address
+ // between 0x701000000000 and 0x700000008000. After switching to fast8labels
+ // mode, DFSan memory layout will be changed to the same to MSan's. Then we
+ // set the start address to 0x700000000000 as MSan.
+ static const uptr kSpaceBeg = 0x701000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef DFsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static StaticSpinMutex fallback_mutex;
+
+static uptr max_malloc_size;
+
+void dfsan_allocator_init() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ if (common_flags()->max_allocation_size_mb)
+ max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+ kMaxAllowedMallocSize);
+ else
+ max_malloc_size = kMaxAllowedMallocSize;
+}
+
+AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
+ return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
+}
+
+void DFsanThreadLocalMallocStorage::CommitBack() {
+ allocator.SwallowCache(GetAllocatorCache(this));
+}
+
+static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) {
+ if (size > max_malloc_size) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n",
+ size);
+ return nullptr;
+ }
+ BufferedStackTrace stack;
+ ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
+ }
+ DFsanThread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, size, alignment);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, size, alignment);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportOutOfMemory(size, &stack);
+ }
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+ meta->requested_size = size;
+ if (zeroise) {
+ internal_memset(allocated, 0, size);
+ dfsan_set_label(0, allocated, size);
+ } else if (flags().zero_in_malloc) {
+ dfsan_set_label(0, allocated, size);
+ }
+ return allocated;
+}
+
+void dfsan_deallocate(void *p) {
+ CHECK(p);
+ Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
+ uptr size = meta->requested_size;
+ meta->requested_size = 0;
+ if (flags().zero_in_free)
+ dfsan_set_label(0, p, size);
+ DFsanThread *t = GetCurrentThread();
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocator.Deallocate(cache, p);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, p);
+ }
+}
+
+void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) {
+ Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(old_p));
+ uptr old_size = meta->requested_size;
+ uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
+ if (new_size <= actually_allocated_size) {
+ // We are not reallocating here.
+ meta->requested_size = new_size;
+ if (new_size > old_size && flags().zero_in_malloc)
+ dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size);
+ return old_p;
+ }
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/);
+ if (new_p) {
+ dfsan_copy_memory(new_p, old_p, memcpy_size);
+ dfsan_deallocate(old_p);
+ }
+ return new_p;
+}
+
+void *DFsanCalloc(uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportCallocOverflow(nmemb, size, &stack);
+ }
+ return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/);
+}
+
+static uptr AllocationSize(const void *p) {
+ if (!p)
+ return 0;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (beg != p)
+ return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(p);
+ return b->requested_size;
+}
+
+void *dfsan_malloc(uptr size) {
+ return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/));
+}
+
+void *dfsan_calloc(uptr nmemb, uptr size) {
+ return SetErrnoOnNull(DFsanCalloc(nmemb, size));
+}
+
+void *dfsan_realloc(void *ptr, uptr size) {
+ if (!ptr)
+ return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/));
+ if (size == 0) {
+ dfsan_deallocate(ptr);
+ return nullptr;
+ }
+ return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64)));
+}
+
+void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportReallocArrayOverflow(nmemb, size, &stack);
+ }
+ return dfsan_realloc(ptr, nmemb * size);
+}
+
+void *dfsan_valloc(uptr size) {
+ return SetErrnoOnNull(
+ DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/));
+}
+
+void *dfsan_pvalloc(uptr size) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportPvallocOverflow(size, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/));
+}
+
+void *dfsan_aligned_alloc(uptr alignment, uptr size) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+ }
+ return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/));
+}
+
+void *dfsan_memalign(uptr alignment, uptr size) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportInvalidAllocationAlignment(alignment, &stack);
+ }
+ return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/));
+}
+
+int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ BufferedStackTrace stack;
+ ReportInvalidPosixMemalignAlignment(alignment, &stack);
+ }
+ void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by DFsanAllocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+} // namespace __dfsan
+
+using namespace __dfsan;
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.h b/compiler-rt/lib/dfsan/dfsan_allocator.h
new file mode 100644
index 000000000000..3b4171b6314d
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.h
@@ -0,0 +1,30 @@
+//===-- dfsan_allocator.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataflowSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_ALLOCATOR_H
+#define DFSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __dfsan {
+
+struct DFsanThreadLocalMallocStorage {
+ ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+ void CommitBack();
+
+ private:
+ // These objects are allocated via mmap() and are zero-initialized.
+ DFsanThreadLocalMallocStorage() {}
+};
+
+} // namespace __dfsan
+#endif // DFSAN_ALLOCATOR_H
diff --git a/compiler-rt/lib/dfsan/dfsan_flags.inc b/compiler-rt/lib/dfsan/dfsan_flags.inc
index 4ac4ef736e45..f27228a647fc 100644
--- a/compiler-rt/lib/dfsan/dfsan_flags.inc
+++ b/compiler-rt/lib/dfsan/dfsan_flags.inc
@@ -40,3 +40,7 @@ DFSAN_FLAG(int, store_context_size, 20,
"The depth limit of origin tracking stack traces.")
DFSAN_FLAG(bool, check_origin_invariant, false,
"Whether to check if the origin invariant holds.")
+DFSAN_FLAG(bool, zero_in_malloc, true,
+ "Whether to zero shadow space of new allocated memory.")
+DFSAN_FLAG(bool, zero_in_free, true,
+ "Whether to zero shadow space of deallocated memory.")
diff --git a/compiler-rt/lib/dfsan/dfsan_thread.cpp b/compiler-rt/lib/dfsan/dfsan_thread.cpp
index aa1209aafc33..6869cf231587 100644
--- a/compiler-rt/lib/dfsan/dfsan_thread.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_thread.cpp
@@ -3,6 +3,7 @@
#include <pthread.h>
#include "dfsan.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
namespace __dfsan {
@@ -24,16 +25,30 @@ DFsanThread *DFsanThread::Create(void *start_routine_trampoline,
void DFsanThread::SetThreadStackAndTls() {
uptr tls_size = 0;
uptr stack_size = 0;
- uptr tls_begin;
- GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin,
+ GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_,
&tls_size);
stack_.top = stack_.bottom + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
int local;
CHECK(AddrIsInStack((uptr)&local));
}
-void DFsanThread::Init() { SetThreadStackAndTls(); }
+void DFsanThread::ClearShadowForThreadStackAndTLS() {
+ dfsan_set_label(0, (void *)stack_.bottom, stack_.top - stack_.bottom);
+ if (tls_begin_ != tls_end_)
+ dfsan_set_label(0, (void *)tls_begin_, tls_end_ - tls_begin_);
+ DTLS *dtls = DTLS_Get();
+ CHECK_NE(dtls, 0);
+ ForEachDVT(dtls, [](const DTLS::DTV &dtv, int id) {
+ dfsan_set_label(0, (void *)(dtv.beg), dtv.size);
+ });
+}
+
+void DFsanThread::Init() {
+ SetThreadStackAndTls();
+ ClearShadowForThreadStackAndTLS();
+}
void DFsanThread::TSDDtor(void *tsd) {
DFsanThread *t = (DFsanThread *)tsd;
@@ -41,8 +56,14 @@ void DFsanThread::TSDDtor(void *tsd) {
}
void DFsanThread::Destroy() {
+ malloc_storage().CommitBack();
+ // We also clear the shadow on thread destruction because
+ // some code may still be executing in later TSD destructors
+ // and we don't want it to have any poisoned stack.
+ ClearShadowForThreadStackAndTLS();
uptr size = RoundUpTo(sizeof(DFsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
+ DTLS_Destroy();
}
thread_return_t DFsanThread::ThreadStart() {
diff --git a/compiler-rt/lib/dfsan/dfsan_thread.h b/compiler-rt/lib/dfsan/dfsan_thread.h
index 616bbc52661c..8dde626f5569 100644
--- a/compiler-rt/lib/dfsan/dfsan_thread.h
+++ b/compiler-rt/lib/dfsan/dfsan_thread.h
@@ -14,6 +14,7 @@
#ifndef DFSAN_THREAD_H
#define DFSAN_THREAD_H
+#include "dfsan_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __dfsan {
@@ -34,16 +35,21 @@ class DFsanThread {
uptr stack_top();
uptr stack_bottom();
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
bool IsMainThread() { return start_routine_ == nullptr; }
bool InSignalHandler() { return in_signal_handler_; }
void EnterSignalHandler() { in_signal_handler_++; }
void LeaveSignalHandler() { in_signal_handler_--; }
+ DFsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+
int destructor_iterations_;
private:
void SetThreadStackAndTls();
+ void ClearShadowForThreadStackAndTLS();
struct StackBounds {
uptr bottom;
uptr top;
@@ -59,7 +65,12 @@ class DFsanThread {
StackBounds stack_;
+ uptr tls_begin_;
+ uptr tls_end_;
+
unsigned in_signal_handler_;
+
+ DFsanThreadLocalMallocStorage malloc_storage_;
};
DFsanThread *GetCurrentThread();
More information about the llvm-commits
mailing list