[PATCH] [TSan] Allocate fd table in user heap instead of using internal allocator
Alexey Samsonov
samsonov at google.com
Tue Apr 16 05:11:01 PDT 2013
Address comments by dvyukov@ and eugenis@
Hi dvyukov,
http://llvm-reviews.chandlerc.com/D663
CHANGE SINCE LAST DIFF
http://llvm-reviews.chandlerc.com/D663?vs=1613&id=1642#toc
Files:
lib/sanitizer_common/sanitizer_allocator.cc
lib/sanitizer_common/tests/sanitizer_allocator_test.cc
lib/sanitizer_common/sanitizer_common.h
lib/tsan/rtl/tsan_platform_linux.cc
Index: lib/sanitizer_common/sanitizer_allocator.cc
===================================================================
--- lib/sanitizer_common/sanitizer_allocator.cc
+++ lib/sanitizer_common/sanitizer_allocator.cc
@@ -11,29 +11,61 @@
// run-time libraries.
// This allocator that is used inside run-times.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator.h"
#include "sanitizer_common.h"
-// FIXME: We should probably use more low-level allocator that would
-// mmap some pages and split them into chunks to fulfill requests.
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-extern "C" void *__libc_malloc(__sanitizer::uptr size);
-extern "C" void __libc_free(void *ptr);
-# define LIBC_MALLOC __libc_malloc
-# define LIBC_FREE __libc_free
-#else // SANITIZER_LINUX && !SANITIZER_ANDROID
-# include <stdlib.h>
-# define LIBC_MALLOC malloc
-# define LIBC_FREE free
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
-
namespace __sanitizer {
-const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+#if SANITIZER_WORDSIZE == 32 || (defined(TSAN_GO) && SANITIZER_WINDOWS)
+typedef SizeClassAllocator32<0, (1ULL << 32), 16, CompactSizeClassMap>
+PrimaryAllocator;
+#else
+# if defined(__powerpc64__)
+static const uptr kInternalAllocatorSpace = 0x0d0000000000ULL;
+# else
+static const uptr kInternalAllocatorSpace = 0x690000000000ULL;
+# endif
+static const uptr kInternalAllocatorSize = 0x10000000000ULL; // 1T
+typedef SizeClassAllocator64<kInternalAllocatorSpace, kInternalAllocatorSize,
+ 0 /* metadata */, CompactSizeClassMap>
+PrimaryAllocator;
+#endif
+
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+static Allocator internal_allocator;
+static AllocatorCache internal_allocator_cache;
+static BlockingMutex internal_allocator_mu;
+static bool internal_allocator_inited;
+
+static const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+
+void GetInternalAllocatorLocation(u64 *space, u64 *size) {
+#if SANITIZER_WORDSIZE == 64
+ *space = kInternalAllocatorSpace;
+ *size = kInternalAllocatorSize;
+#elif SANITIZER_WORDSIZE == 32
+ *space = 0;
+ *size = (1ULL << 32);
+#endif
+}
void *InternalAlloc(uptr size) {
if (size + sizeof(u64) < size)
return 0;
- void *p = LIBC_MALLOC(size + sizeof(u64));
+ void *p = 0;
+ {
+ BlockingMutexLock l(&internal_allocator_mu);
+ if (!internal_allocator_inited) {
+ internal_allocator.Init();
+ internal_allocator_inited = true;
+ }
+ p = internal_allocator.Allocate(&internal_allocator_cache,
+ size + sizeof(u64), 8, false);
+ }
if (p == 0)
return 0;
((u64*)p)[0] = kBlockMagic;
@@ -44,9 +76,11 @@
if (addr == 0)
return;
addr = (char*)addr - sizeof(u64);
- CHECK_EQ(((u64*)addr)[0], kBlockMagic);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
((u64*)addr)[0] = 0;
- LIBC_FREE(addr);
+ BlockingMutexLock l(&internal_allocator_mu);
+ CHECK(internal_allocator_inited);
+ internal_allocator.Deallocate(&internal_allocator_cache, addr);
}
// LowLevelAllocator
Index: lib/sanitizer_common/tests/sanitizer_allocator_test.cc
===================================================================
--- lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -574,6 +574,19 @@
}
}
+TEST(Allocator, AllocatorLocationTest) {
+ u64 space, size;
+ GetInternalAllocatorLocation(&space, &size);
+ CHECK_GT(size, 0);
+ u64 end = space + size;
+ if (SANITIZER_WORDSIZE == 64) {
+ CHECK_GT(space, 0);
+ CHECK_LE(end, 0x800000000000ULL);
+ } else {
+ CHECK_LE(end, 0x100000000ULL);
+ }
+}
+
class IterationTestCallback {
public:
explicit IterationTestCallback(std::set<void *> *chunks)
Index: lib/sanitizer_common/sanitizer_common.h
===================================================================
--- lib/sanitizer_common/sanitizer_common.h
+++ lib/sanitizer_common/sanitizer_common.h
@@ -61,6 +61,9 @@
// Internal allocator
void *InternalAlloc(uptr size);
void InternalFree(void *p);
+// Use this to make sure tool mappings doesn't overlap with memory reserved
+// for internal allocator.
+void GetInternalAllocatorLocation(u64 *space, u64 *size);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
Index: lib/tsan/rtl/tsan_platform_linux.cc
===================================================================
--- lib/tsan/rtl/tsan_platform_linux.cc
+++ lib/tsan/rtl/tsan_platform_linux.cc
@@ -257,12 +257,15 @@
#ifndef TSAN_GO
static void CheckPIE() {
// Ensure that the binary is indeed compiled with -pie.
+ u64 internal_alloc_beg, internal_alloc_size;
+ GetInternalAllocatorLocation(&internal_alloc_beg, &internal_alloc_size);
MemoryMappingLayout proc_maps(true);
uptr start, end;
if (proc_maps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
/*protection*/0)) {
- if ((u64)start < kLinuxAppMemBeg) {
+ if ((u64)start < kLinuxAppMemBeg &&
+ (u64)start != internal_alloc_beg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
"something is mapped at 0x%zx < 0x%zx)\n",
start, kLinuxAppMemBeg);
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D663.2.patch
Type: text/x-patch
Size: 5469 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20130416/e58d216b/attachment.bin>
More information about the llvm-commits
mailing list