[compiler-rt] r232029 - tsan: fix crash during __tsan_java_move

Dmitry Vyukov dvyukov at google.com
Thu Mar 12 04:24:16 PDT 2015


Author: dvyukov
Date: Thu Mar 12 06:24:16 2015
New Revision: 232029

URL: http://llvm.org/viewvc/llvm-project?rev=232029&view=rev
Log:
tsan: fix crash during __tsan_java_move

Munmap interceptor did not reset meta shadow for the range,
and __tsan_java_move crashed because it encountered
non-zero meta shadow for the destination.


Added:
    compiler-rt/trunk/test/tsan/java_heap_init.cc
Modified:
    compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc?rev=232029&r1=232028&r2=232029&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc Thu Mar 12 06:24:16 2015
@@ -797,6 +797,7 @@ TSAN_INTERCEPTOR(void*, mmap64, void *ad
 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
   SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
   DontNeedShadowFor((uptr)addr, sz);
+  ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz);
   int res = REAL(munmap)(addr, sz);
   return res;
 }

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc?rev=232029&r1=232028&r2=232029&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc Thu Mar 12 06:24:16 2015
@@ -80,7 +80,8 @@ uptr MetaMap::FreeBlock(ThreadState *thr
   return sz;
 }
 
-void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+  bool has_something = false;
   u32 *meta = MemToMeta(p);
   u32 *end = MemToMeta(p + sz);
   if (end == meta)
@@ -91,6 +92,7 @@ void MetaMap::FreeRange(ThreadState *thr
     for (;;) {
       if (idx == 0)
         break;
+      has_something = true;
       if (idx & kFlagBlock) {
         block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
         break;
@@ -106,6 +108,62 @@ void MetaMap::FreeRange(ThreadState *thr
       }
     }
   }
+  return has_something;
+}
+
+// ResetRange removes all meta objects from the range.
+// It is called for large mmap-ed regions. The function is best-effort wrt
+// freeing of meta objects, because we don't want to page in the whole range
+// which can be huge. The function probes pages one-by-one until it finds a page
+// without meta objects, at this point it stops freeing meta objects. Because
+// thread stacks grow top-down, we do the same starting from end as well.
+void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+  const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+  const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+  if (sz <= 4 * kPageSize) {
+    // If the range is small, just do the normal free procedure.
+    FreeRange(thr, pc, p, sz);
+    return;
+  }
+  // First, round both ends of the range to page size.
+  uptr diff = RoundUp(p, kPageSize) - p;
+  if (diff != 0) {
+    FreeRange(thr, pc, p, diff);
+    p += diff;
+    sz -= diff;
+  }
+  diff = p + sz - RoundDown(p + sz, kPageSize);
+  if (diff != 0) {
+    FreeRange(thr, pc, p + sz - diff, diff);
+    sz -= diff;
+  }
+  // Now we must have a non-empty page-aligned range.
+  CHECK_GT(sz, 0);
+  CHECK_EQ(p, RoundUp(p, kPageSize));
+  CHECK_EQ(sz, RoundUp(sz, kPageSize));
+  const uptr p0 = p;
+  const uptr sz0 = sz;
+  // Probe start of the range.
+  while (sz > 0) {
+    bool has_something = FreeRange(thr, pc, p, kPageSize);
+    p += kPageSize;
+    sz -= kPageSize;
+    if (!has_something)
+      break;
+  }
+  // Probe end of the range.
+  while (sz > 0) {
+    bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
+    sz -= kPageSize;
+    if (!has_something)
+      break;
+  }
+  // Finally, page out the whole range (including the parts that we've just
+  // freed). Note: we can't simply madvise, because we need to leave a zeroed
+  // range (otherwise __tsan_java_move can crash if it encounters a left-over
+  // meta objects in java heap).
+  UnmapOrDie((void*)p0, sz0);
+  MmapFixedNoReserve(p0, sz0);
 }
 
 MBlock* MetaMap::GetBlock(uptr p) {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h?rev=232029&r1=232028&r2=232029&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h Thu Mar 12 06:24:16 2015
@@ -73,7 +73,8 @@ class MetaMap {
 
   void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
   uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
-  void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+  bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+  void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
   MBlock* GetBlock(uptr p);
 
   SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,

Added: compiler-rt/trunk/test/tsan/java_heap_init.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/java_heap_init.cc?rev=232029&view=auto
==============================================================================
--- compiler-rt/trunk/test/tsan/java_heap_init.cc (added)
+++ compiler-rt/trunk/test/tsan/java_heap_init.cc Thu Mar 12 06:24:16 2015
@@ -0,0 +1,28 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
+#include "java.h"
+#include <errno.h>
+#include <sys/mman.h>
+
+int main() {
+  // Test that munmap interceptor resets meta shadow for the memory range.
+  // Previously __tsan_java_move failed because it encountered non-zero meta
+  // shadow for the destination.
+  int const kHeapSize = 1024 * 1024;
+  jptr jheap = (jptr)mmap(0, kHeapSize, PROT_READ | PROT_WRITE,
+      MAP_ANON | MAP_PRIVATE, -1, 0);
+  if (jheap == (jptr)MAP_FAILED)
+    return printf("mmap failed with %d\n", errno);
+  __atomic_store_n((int*)jheap, 1, __ATOMIC_RELEASE);
+  munmap((void*)jheap, kHeapSize);
+  jheap = (jptr)mmap((void*)jheap, kHeapSize, PROT_READ | PROT_WRITE,
+      MAP_ANON | MAP_PRIVATE, -1, 0);
+  if (jheap == (jptr)MAP_FAILED)
+    return printf("second mmap failed with %d\n", errno);
+  __tsan_java_init(jheap, kHeapSize);
+  __tsan_java_move(jheap + 16, jheap, 16);
+  printf("DONE\n");
+  return __tsan_java_fini();
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
+// CHECK: DONE





More information about the llvm-commits mailing list