[compiler-rt] r209810 - tsan: refactor storage of meta information for heap blocks and sync objects

Dmitry Vyukov dvyukov at google.com
Thu May 29 06:50:54 PDT 2014


Author: dvyukov
Date: Thu May 29 08:50:54 2014
New Revision: 209810

URL: http://llvm.org/viewvc/llvm-project?rev=209810&view=rev
Log:
tsan: refactor storage of meta information for heap blocks and sync objects
The new storage (MetaMap) is based on direct shadow (instead of a hashmap + per-block lists).
This solves a number of problems:
 - eliminates quadratic behaviour in SyncTab::GetAndLock (https://code.google.com/p/thread-sanitizer/issues/detail?id=26)
 - eliminates contention in SyncTab
 - eliminates contention in internal allocator during allocation of sync objects
 - removes a bunch of ad-hoc code in java interface
 - reduces java shadow from 2x to 1/2x
 - allows to memorize heap block meta info for Java and Go
 - allows to cleanup sync object meta info for Go
 - which in turn enabled deadlock detector for Go



Added:
    compiler-rt/trunk/lib/tsan/rtl/tsan_dense_alloc.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h
    compiler-rt/trunk/lib/tsan/tests/unit/tsan_dense_alloc_test.cc
Removed:
    compiler-rt/trunk/test/tsan/oob_race.cc
Modified:
    compiler-rt/trunk/lib/tsan/CMakeLists.txt
    compiler-rt/trunk/lib/tsan/check_memcpy.sh
    compiler-rt/trunk/lib/tsan/go/build.bat
    compiler-rt/trunk/lib/tsan/go/buildgo.sh
    compiler-rt/trunk/lib/tsan/rtl/tsan_clock.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_clock.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_platform_linux.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_vector.h
    compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc
    compiler-rt/trunk/lib/tsan/tests/unit/tsan_sync_test.cc
    compiler-rt/trunk/test/tsan/java_alloc.cc
    compiler-rt/trunk/test/tsan/java_lock_rec_race.cc
    compiler-rt/trunk/test/tsan/java_race.cc
    compiler-rt/trunk/test/tsan/java_race_move.cc
    compiler-rt/trunk/test/tsan/mutexset7.cc

Modified: compiler-rt/trunk/lib/tsan/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/CMakeLists.txt?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/CMakeLists.txt (original)
+++ compiler-rt/trunk/lib/tsan/CMakeLists.txt Thu May 29 08:50:54 2014
@@ -38,6 +38,7 @@ set(TSAN_SOURCES
   rtl/tsan_rtl_mutex.cc
   rtl/tsan_rtl_report.cc
   rtl/tsan_rtl_thread.cc
+  rtl/tsan_stack_trace.cc
   rtl/tsan_stat.cc
   rtl/tsan_suppressions.cc
   rtl/tsan_symbolize.cc
@@ -54,6 +55,7 @@ endif()
 set(TSAN_HEADERS
   rtl/tsan_clock.h
   rtl/tsan_defs.h
+  rtl/tsan_dense_alloc.h
   rtl/tsan_fd.h
   rtl/tsan_flags.h
   rtl/tsan_ignoreset.h
@@ -67,6 +69,7 @@ set(TSAN_HEADERS
   rtl/tsan_platform.h
   rtl/tsan_report.h
   rtl/tsan_rtl.h
+  rtl/tsan_stack_trace.h
   rtl/tsan_stat.h
   rtl/tsan_suppressions.h
   rtl/tsan_symbolize.h

Modified: compiler-rt/trunk/lib/tsan/check_memcpy.sh
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/check_memcpy.sh?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/check_memcpy.sh (original)
+++ compiler-rt/trunk/lib/tsan/check_memcpy.sh Thu May 29 08:50:54 2014
@@ -17,7 +17,14 @@ EXE=$SRC.exe
 $CXX $SRC $CFLAGS -c -o $OBJ
 $CXX $OBJ $LDFLAGS -o $EXE
 
-NCALL=$(objdump -d $EXE | egrep "callq .*__interceptor_mem(cpy|set)" | wc -l)
+NCALL=$(objdump -d $EXE | egrep "callq .*<__interceptor_mem(cpy|set)>" | wc -l)
+if [ "$NCALL" != "0" ]; then
+  echo FAIL: found $NCALL memcpy/memset calls
+  exit 1
+fi
+
+# tail calls
+NCALL=$(objdump -d $EXE | egrep "jmpq .*<__interceptor_mem(cpy|set)>" | wc -l)
 if [ "$NCALL" != "0" ]; then
   echo FAIL: found $NCALL memcpy/memset calls
   exit 1

Modified: compiler-rt/trunk/lib/tsan/go/build.bat
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/go/build.bat?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/go/build.bat (original)
+++ compiler-rt/trunk/lib/tsan/go/build.bat Thu May 29 08:50:54 2014
@@ -1,4 +1,4 @@
-type tsan_go.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc > gotsan.cc
+type tsan_go.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc > gotsan.cc
 
 gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 -Wno-error=attributes -Wno-attributes -Wno-format -DTSAN_DEBUG=0 -O3 -fomit-frame-pointer
 

Modified: compiler-rt/trunk/lib/tsan/go/buildgo.sh
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/go/buildgo.sh?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/go/buildgo.sh (original)
+++ compiler-rt/trunk/lib/tsan/go/buildgo.sh Thu May 29 08:50:54 2014
@@ -12,6 +12,7 @@ SRCS="
 	../rtl/tsan_rtl_mutex.cc
 	../rtl/tsan_rtl_report.cc
 	../rtl/tsan_rtl_thread.cc
+	../rtl/tsan_stack_trace.cc
 	../rtl/tsan_stat.cc
 	../rtl/tsan_suppressions.cc
 	../rtl/tsan_sync.cc

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_clock.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_clock.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_clock.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_clock.cc Thu May 29 08:50:54 2014
@@ -330,6 +330,11 @@ SyncClock::SyncClock()
 
 void SyncClock::Reset() {
   clk_.Reset();
+  Zero();
+}
+
+void SyncClock::Zero() {
+  clk_.Resize(0);
   release_store_tid_ = kInvalidTid;
   release_store_reused_ = 0;
   for (uptr i = 0; i < kDirtyTids; i++)

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_clock.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_clock.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_clock.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_clock.h Thu May 29 08:50:54 2014
@@ -38,6 +38,7 @@ class SyncClock {
   }
 
   void Reset();
+  void Zero();
 
   void DebugDump(int(*printf)(const char *s, ...));
 

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h Thu May 29 08:50:54 2014
@@ -66,6 +66,13 @@ const uptr kShadowSize = 8;
 // Shadow memory is kShadowMultiplier times larger than user memory.
 const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
 
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
 #if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY
 const bool kCollectHistory = false;
 #else
@@ -167,7 +174,15 @@ struct ReportStack;
 class ReportDesc;
 class RegionAlloc;
 class StackTrace;
-struct MBlock;
+
+// Descriptor of user's memory block.
+struct MBlock {
+  u64  siz;
+  u32  stk;
+  u16  tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
 
 }  // namespace __tsan
 

Added: compiler-rt/trunk/lib/tsan/rtl/tsan_dense_alloc.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_dense_alloc.h?rev=209810&view=auto
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_dense_alloc.h (added)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_dense_alloc.h Thu May 29 08:50:54 2014
@@ -0,0 +1,136 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+  static const uptr kSize = 128;
+  typedef u32 IndexT;
+  uptr pos;
+  IndexT cache[kSize];
+  template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+  typedef DenseSlabAllocCache Cache;
+  typedef typename Cache::IndexT IndexT;
+
+  DenseSlabAlloc() {
+    // Check that kL1Size and kL2Size are sane.
+    CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+    CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+    CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+    // Check that it makes sense to use the dense alloc.
+    CHECK_GE(sizeof(T), sizeof(IndexT));
+    internal_memset(map_, 0, sizeof(map_));
+    freelist_ = 0;
+    fillpos_ = 0;
+  }
+
+  ~DenseSlabAlloc() {
+    for (uptr i = 0; i < kL1Size; i++) {
+      if (map_[i] != 0)
+        UnmapOrDie(map_[i], kL2Size * sizeof(T));
+    }
+  }
+
+  IndexT Alloc(Cache *c) {
+    if (c->pos == 0)
+      Refill(c);
+    return c->cache[--c->pos];
+  }
+
+  void Free(Cache *c, IndexT idx) {
+    if (c->pos == Cache::kSize)
+      Drain(c);
+    c->cache[c->pos++] = idx;
+  }
+
+  T *Map(IndexT idx) {
+    DCHECK_NE(idx, 0);
+    DCHECK_LE(idx, kL1Size * kL2Size);
+    return &map_[idx / kL2Size][idx % kL2Size];
+  }
+
+  void FlushCache(Cache *c) {
+    SpinMutexLock lock(&mtx_);
+    while (c->pos) {
+      IndexT idx = c->cache[--c->pos];
+      *(IndexT*)Map(idx) = freelist_;
+      freelist_ = idx;
+    }
+  }
+
+  void InitCache(Cache *c) {
+    c->pos = 0;
+    internal_memset(c->cache, 0, sizeof(c->cache));
+  }
+
+ private:
+  T *map_[kL1Size];
+  SpinMutex mtx_;
+  IndexT freelist_;
+  uptr fillpos_;
+
+  void Refill(Cache *c) {
+    SpinMutexLock lock(&mtx_);
+    if (freelist_ == 0) {
+      if (fillpos_ == kL1Size) {
+        Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+        Die();
+      }
+      T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+      // Reserve 0 as invalid index.
+      IndexT start = fillpos_ == 0 ? 1 : 0;
+      for (IndexT i = start; i < kL2Size; i++) {
+        new(batch + i) T();
+        *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+      }
+      *(IndexT*)(batch + kL2Size - 1) = 0;
+      freelist_ = fillpos_ * kL2Size + start;
+      map_[fillpos_++] = batch;
+    }
+    for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+      IndexT idx = freelist_;
+      c->cache[c->pos++] = idx;
+      freelist_ = *(IndexT*)Map(idx);
+    }
+  }
+
+  void Drain(Cache *c) {
+    SpinMutexLock lock(&mtx_);
+    for (uptr i = 0; i < Cache::kSize / 2; i++) {
+      IndexT idx = c->cache[--c->pos];
+      *(IndexT*)Map(idx) = freelist_;
+      freelist_ = idx;
+    }
+  }
+};
+
+}  // namespace __tsan
+
+#endif  // TSAN_DENSE_ALLOC_H

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc Thu May 29 08:50:54 2014
@@ -47,8 +47,8 @@ static bool bogusfd(int fd) {
   return fd < 0 || fd >= kTableSize;
 }
 
-static FdSync *allocsync() {
-  FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+  FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync));
   atomic_store(&s->rc, 1, memory_order_relaxed);
   return s;
 }
@@ -65,10 +65,7 @@ static void unref(ThreadState *thr, uptr
       CHECK_NE(s, &fdctx.globsync);
       CHECK_NE(s, &fdctx.filesync);
       CHECK_NE(s, &fdctx.socksync);
-      SyncVar *v = ctx->synctab.GetAndRemove(thr, pc, (uptr)s);
-      if (v)
-        DestroyAndFree(v);
-      internal_free(s);
+      user_free(thr, pc, s);
     }
   }
 }
@@ -219,7 +216,7 @@ void FdDup(ThreadState *thr, uptr pc, in
 
 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
   DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
-  FdSync *s = allocsync();
+  FdSync *s = allocsync(thr, pc);
   init(thr, pc, rfd, ref(s));
   init(thr, pc, wfd, ref(s));
   unref(thr, pc, s);
@@ -229,7 +226,7 @@ void FdEventCreate(ThreadState *thr, upt
   DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
   if (bogusfd(fd))
     return;
-  init(thr, pc, fd, allocsync());
+  init(thr, pc, fd, allocsync(thr, pc));
 }
 
 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
@@ -250,7 +247,7 @@ void FdPollCreate(ThreadState *thr, uptr
   DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
   if (bogusfd(fd))
     return;
-  init(thr, pc, fd, allocsync());
+  init(thr, pc, fd, allocsync(thr, pc));
 }
 
 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc Thu May 29 08:50:54 2014
@@ -191,6 +191,7 @@ ScopedInterceptor::~ScopedInterceptor()
   if (!thr_->ignore_interceptors) {
     ProcessPendingSignals(thr_);
     FuncExit(thr_);
+    CheckNoLocks(thr_);
   }
 }
 
@@ -1705,7 +1706,7 @@ static void CallUserSignalHandler(Thread
     ScopedReport rep(ReportTypeErrnoInSignal);
     if (!IsFiredSuppression(ctx, rep, stack)) {
       rep.AddStack(&stack, true);
-      OutputReport(ctx, rep);
+      OutputReport(thr, rep);
     }
   }
   errno = saved_errno;

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc Thu May 29 08:50:54 2014
@@ -40,6 +40,7 @@ class ScopedAnnotation {
 
   ~ScopedAnnotation() {
     FuncExit(thr_);
+    CheckNoLocks(thr_);
   }
  private:
   ThreadState *const thr_;

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc Thu May 29 08:50:54 2014
@@ -291,7 +291,7 @@ static T AtomicLoad(ThreadState *thr, up
     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
     return NoTsanAtomicLoad(a, mo);
   }
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
   AcquireImpl(thr, pc, &s->clock);
   T v = NoTsanAtomicLoad(a, mo);
   s->mtx.ReadUnlock();
@@ -325,7 +325,7 @@ static void AtomicStore(ThreadState *thr
     return;
   }
   __sync_synchronize();
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -339,7 +339,7 @@ static T AtomicRMW(ThreadState *thr, upt
   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
   SyncVar *s = 0;
   if (mo != mo_relaxed) {
-    s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
     thr->fast_state.IncrementEpoch();
     // Can't increment epoch w/o writing to the trace as well.
     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -463,7 +463,7 @@ static bool AtomicCAS(ThreadState *thr,
   SyncVar *s = 0;
   bool write_lock = mo != mo_acquire && mo != mo_consume;
   if (mo != mo_relaxed) {
-    s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
+    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
     thr->fast_state.IncrementEpoch();
     // Can't increment epoch w/o writing to the trace as well.
     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc Thu May 29 08:50:54 2014
@@ -22,54 +22,17 @@
 
 using namespace __tsan;  // NOLINT
 
-namespace __tsan {
-
-const uptr kHeapShadow = 0x300000000000ull;
-const uptr kHeapAlignment = 8;
-
-struct BlockDesc {
-  bool begin;
-  Mutex mtx;
-  SyncVar *head;
-
-  BlockDesc()
-      : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
-      , head() {
-    CHECK_EQ(begin, false);
-    begin = true;
-  }
+const jptr kHeapAlignment = 8;
 
-  ~BlockDesc() {
-    CHECK_EQ(begin, true);
-    begin = false;
-    ThreadState *thr = cur_thread();
-    SyncVar *s = head;
-    while (s) {
-      SyncVar *s1 = s->next;
-      StatInc(thr, StatSyncDestroyed);
-      s->mtx.Lock();
-      s->mtx.Unlock();
-      thr->mset.Remove(s->GetId());
-      DestroyAndFree(s);
-      s = s1;
-    }
-  }
-};
+namespace __tsan {
 
 struct JavaContext {
   const uptr heap_begin;
   const uptr heap_size;
-  BlockDesc *heap_shadow;
 
   JavaContext(jptr heap_begin, jptr heap_size)
       : heap_begin(heap_begin)
       , heap_size(heap_size) {
-    uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
-    heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
-    if ((uptr)heap_shadow != kHeapShadow) {
-      Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
-      Die();
-    }
   }
 };
 
@@ -93,63 +56,6 @@ class ScopedJavaFunc {
 static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
 static JavaContext *jctx;
 
-static BlockDesc *getblock(uptr addr) {
-  uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
-  return &jctx->heap_shadow[i];
-}
-
-static uptr USED getmem(BlockDesc *b) {
-  uptr i = b - jctx->heap_shadow;
-  uptr p = jctx->heap_begin + i * kHeapAlignment;
-  CHECK_GE(p, jctx->heap_begin);
-  CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
-  return p;
-}
-
-static BlockDesc *getblockbegin(uptr addr) {
-  for (BlockDesc *b = getblock(addr);; b--) {
-    CHECK_GE(b, jctx->heap_shadow);
-    if (b->begin)
-      return b;
-  }
-  return 0;
-}
-
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
-                     bool write_lock, bool create) {
-  if (jctx == 0 || addr < jctx->heap_begin
-      || addr >= jctx->heap_begin + jctx->heap_size)
-    return 0;
-  BlockDesc *b = getblockbegin(addr);
-  DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
-  Lock l(&b->mtx);
-  SyncVar *s = b->head;
-  for (; s; s = s->next) {
-    if (s->addr == addr) {
-      DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
-      break;
-    }
-  }
-  if (s == 0 && create) {
-    DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
-    s = ctx->synctab.Create(thr, pc, addr);
-    s->next = b->head;
-    b->head = s;
-  }
-  if (s) {
-    if (write_lock)
-      s->mtx.Lock();
-    else
-      s->mtx.ReadLock();
-  }
-  return s;
-}
-
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
-  // We do not destroy Java mutexes other than in __tsan_java_free().
-  return 0;
-}
-
 }  // namespace __tsan
 
 #define SCOPED_JAVA_FUNC(func) \
@@ -192,8 +98,7 @@ void __tsan_java_alloc(jptr ptr, jptr si
   CHECK_GE(ptr, jctx->heap_begin);
   CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
 
-  BlockDesc *b = getblock(ptr);
-  new(b) BlockDesc();
+  OnUserAlloc(thr, pc, ptr, size, false);
 }
 
 void __tsan_java_free(jptr ptr, jptr size) {
@@ -206,12 +111,7 @@ void __tsan_java_free(jptr ptr, jptr siz
   CHECK_GE(ptr, jctx->heap_begin);
   CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
 
-  BlockDesc *beg = getblock(ptr);
-  BlockDesc *end = getblock(ptr + size);
-  for (BlockDesc *b = beg; b != end; b++) {
-    if (b->begin)
-      b->~BlockDesc();
-  }
+  ctx->metamap.FreeRange(thr, pc, ptr, size);
 }
 
 void __tsan_java_move(jptr src, jptr dst, jptr size) {
@@ -230,35 +130,15 @@ void __tsan_java_move(jptr src, jptr dst
 
   // Assuming it's not running concurrently with threads that do
   // memory accesses and mutex operations (stop-the-world phase).
-  {  // NOLINT
-    BlockDesc *s = getblock(src);
-    BlockDesc *d = getblock(dst);
-    BlockDesc *send = getblock(src + size);
-    for (; s != send; s++, d++) {
-      CHECK_EQ(d->begin, false);
-      if (s->begin) {
-        DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
-        new(d) BlockDesc;
-        d->head = s->head;
-        for (SyncVar *sync = d->head; sync; sync = sync->next) {
-          uptr newaddr = sync->addr - src + dst;
-          DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
-          sync->addr = newaddr;
-        }
-        s->head = 0;
-        s->~BlockDesc();
-      }
-    }
-  }
+  ctx->metamap.MoveMemory(src, dst, size);
 
-  {  // NOLINT
-    u64 *s = (u64*)MemToShadow(src);
-    u64 *d = (u64*)MemToShadow(dst);
-    u64 *send = (u64*)MemToShadow(src + size);
-    for (; s != send; s++, d++) {
-      *d = *s;
-      *s = 0;
-    }
+  // Move shadow.
+  u64 *s = (u64*)MemToShadow(src);
+  u64 *d = (u64*)MemToShadow(dst);
+  u64 *send = (u64*)MemToShadow(src + size);
+  for (; s != send; s++, d++) {
+    *d = *s;
+    *s = 0;
   }
 }
 

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc Thu May 29 08:50:54 2014
@@ -29,32 +29,6 @@ extern "C" void WEAK __tsan_free_hook(vo
 
 namespace __tsan {
 
-COMPILER_CHECK(sizeof(MBlock) == 16);
-
-void MBlock::Lock() {
-  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
-  uptr v = atomic_load(a, memory_order_relaxed);
-  for (int iter = 0;; iter++) {
-    if (v & 1) {
-      if (iter < 10)
-        proc_yield(20);
-      else
-        internal_sched_yield();
-      v = atomic_load(a, memory_order_relaxed);
-      continue;
-    }
-    if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
-      break;
-  }
-}
-
-void MBlock::Unlock() {
-  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
-  uptr v = atomic_load(a, memory_order_relaxed);
-  DCHECK(v & 1);
-  atomic_store(a, v & ~1, memory_order_relaxed);
-}
-
 struct MapUnmapCallback {
   void OnMap(uptr p, uptr size) const { }
   void OnUnmap(uptr p, uptr size) const {
@@ -96,7 +70,7 @@ static void SignalUnsafeCall(ThreadState
   ScopedReport rep(ReportTypeSignalUnsafe);
   if (!IsFiredSuppression(ctx, rep, stack)) {
     rep.AddStack(&stack, true);
-    OutputReport(ctx, rep);
+    OutputReport(thr, rep);
   }
 }
 
@@ -106,43 +80,36 @@ void *user_alloc(ThreadState *thr, uptr
   void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
   if (p == 0)
     return 0;
-  MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
-  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
-  if (ctx && ctx->initialized) {
-    if (thr->ignore_reads_and_writes == 0)
-      MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
-    else
-      MemoryResetRange(thr, pc, (uptr)p, sz);
-  }
-  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+  if (ctx && ctx->initialized)
+    OnUserAlloc(thr, pc, (uptr)p, sz, true);
   SignalUnsafeCall(thr, pc);
   return p;
 }
 
 void user_free(ThreadState *thr, uptr pc, void *p) {
-  CHECK_NE(p, (void*)0);
-  DPrintf("#%d: free(%p)\n", thr->tid, p);
-  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
-  if (b->ListHead()) {
-    MBlock::ScopedLock l(b);
-    for (SyncVar *s = b->ListHead(); s;) {
-      SyncVar *res = s;
-      s = s->next;
-      StatInc(thr, StatSyncDestroyed);
-      res->mtx.Lock();
-      res->mtx.Unlock();
-      DestroyAndFree(res);
-    }
-    b->ListReset();
-  }
-  if (ctx && ctx->initialized) {
-    if (thr->ignore_reads_and_writes == 0)
-      MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
-  }
+  if (ctx && ctx->initialized)
+    OnUserFree(thr, pc, (uptr)p, true);
   allocator()->Deallocate(&thr->alloc_cache, p);
   SignalUnsafeCall(thr, pc);
 }
 
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+  ctx->metamap.AllocBlock(thr, pc, p, sz);
+  if (write && thr->ignore_reads_and_writes == 0)
+    MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+  else
+    MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+  CHECK_NE(p, (void*)0);
+  uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+  DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+  if (write && thr->ignore_reads_and_writes == 0)
+    MemoryRangeFreed(thr, pc, (uptr)p, sz);
+}
+
 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
   void *p2 = 0;
   // FIXME: Handle "shrinking" more efficiently,
@@ -152,9 +119,8 @@ void *user_realloc(ThreadState *thr, upt
     if (p2 == 0)
       return 0;
     if (p) {
-      MBlock *b = user_mblock(thr, p);
-      CHECK_NE(b, 0);
-      internal_memcpy(p2, p, min(b->Size(), sz));
+      uptr oldsz = user_alloc_usable_size(thr, pc, p);
+      internal_memcpy(p2, p, min(oldsz, sz));
     }
   }
   if (p)
@@ -165,17 +131,8 @@ void *user_realloc(ThreadState *thr, upt
 uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
   if (p == 0)
     return 0;
-  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
-  return b ? b->Size() : 0;
-}
-
-MBlock *user_mblock(ThreadState *thr, void *p) {
-  CHECK_NE(p, 0);
-  Allocator *a = allocator();
-  void *b = a->GetBlockBegin(p);
-  if (b == 0)
-    return 0;
-  return (MBlock*)a->GetMetaData(b);
+  MBlock *b = ctx->metamap.GetBlock((uptr)p);
+  return b ? b->siz : 0;
 }
 
 void invoke_malloc_hook(void *ptr, uptr size) {
@@ -247,16 +204,14 @@ bool __tsan_get_ownership(void *p) {
 uptr __tsan_get_allocated_size(void *p) {
   if (p == 0)
     return 0;
-  p = allocator()->GetBlockBegin(p);
-  if (p == 0)
-    return 0;
-  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
-  return b->Size();
+  MBlock *b = ctx->metamap.GetBlock((uptr)p);
+  return b->siz;
 }
 
 void __tsan_on_thread_idle() {
   ThreadState *thr = cur_thread();
   allocator()->SwallowCache(&thr->alloc_cache);
   internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
+  ctx->metamap.OnThreadIdle(thr);
 }
 }  // extern "C"

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h Thu May 29 08:50:54 2014
@@ -32,9 +32,6 @@ void user_free(ThreadState *thr, uptr pc
 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
 void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
 uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
-// Given the pointer p into a valid allocated block,
-// returns the descriptor of the block.
-MBlock *user_mblock(ThreadState *thr, void *p);
 
 // Invoking malloc/free hooks that may be installed by the user.
 void invoke_malloc_hook(void *ptr, uptr size);
@@ -62,7 +59,6 @@ enum MBlockType {
   MBlockSuppression,
   MBlockExpectRace,
   MBlockSignal,
-  MBlockFD,
   MBlockJmpBuf,
 
   // This must be the last.

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.cc Thu May 29 08:50:54 2014
@@ -31,13 +31,13 @@ static MutexType CanLockTab[MutexTypeCou
   /*0  MutexTypeInvalid*/     {},
   /*1  MutexTypeTrace*/       {MutexTypeLeaf},
   /*2  MutexTypeThreads*/     {MutexTypeReport},
-  /*3  MutexTypeReport*/      {MutexTypeSyncTab, MutexTypeSyncVar,
+  /*3  MutexTypeReport*/      {MutexTypeSyncVar,
                                MutexTypeMBlock, MutexTypeJavaMBlock},
   /*4  MutexTypeSyncVar*/     {MutexTypeDDetector},
-  /*5  MutexTypeSyncTab*/     {MutexTypeSyncVar},
+  /*5  MutexTypeSyncTab*/     {},  // unused
   /*6  MutexTypeSlab*/        {MutexTypeLeaf},
   /*7  MutexTypeAnnotations*/ {},
-  /*8  MutexTypeAtExit*/      {MutexTypeSyncTab},
+  /*8  MutexTypeAtExit*/      {MutexTypeSyncVar},
   /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
   /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
   /*11 MutexTypeDDetector*/   {},
@@ -161,8 +161,20 @@ void InternalDeadlockDetector::Unlock(Mu
   CHECK(locked_[t]);
   locked_[t] = 0;
 }
+
+void InternalDeadlockDetector::CheckNoLocks() {
+  for (int i = 0; i != MutexTypeCount; i++) {
+    CHECK_EQ(locked_[i], 0);
+  }
+}
 #endif
 
+void CheckNoLocks(ThreadState *thr) {
+#if TSAN_DEBUG && !TSAN_GO
+  thr->internal_deadlock_detector.CheckNoLocks();
+#endif
+}
+
 const uptr kUnlocked = 0;
 const uptr kWriteLock = 1;
 const uptr kReadLock = 2;

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.h Thu May 29 08:50:54 2014
@@ -71,6 +71,7 @@ class InternalDeadlockDetector {
   InternalDeadlockDetector();
   void Lock(MutexType t);
   void Unlock(MutexType t);
+  void CheckNoLocks();
  private:
   u64 seq_;
   u64 locked_[MutexTypeCount];
@@ -78,6 +79,10 @@ class InternalDeadlockDetector {
 
 void InitializeMutex();
 
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
 }  // namespace __tsan
 
 #endif  // TSAN_MUTEX_H

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_platform.h Thu May 29 08:50:54 2014
@@ -16,7 +16,9 @@
 C++ linux memory layout:
 0000 0000 0000 - 03c0 0000 0000: protected
 03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 6000 0000 0000: protected
+1000 0000 0000 - 3000 0000 0000: protected
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: protected
 6000 0000 0000 - 6200 0000 0000: traces
 6200 0000 0000 - 7d00 0000 0000: -
 7d00 0000 0000 - 7e00 0000 0000: heap
@@ -27,7 +29,9 @@ C++ COMPAT linux memory layout:
 0400 0000 0000 - 1000 0000 0000: shadow
 1000 0000 0000 - 2900 0000 0000: protected
 2900 0000 0000 - 2c00 0000 0000: modules
-2c00 0000 0000 - 6000 0000 0000: -
+2c00 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
 6000 0000 0000 - 6200 0000 0000: traces
 6200 0000 0000 - 7d00 0000 0000: -
 7d00 0000 0000 - 7e00 0000 0000: heap
@@ -40,7 +44,9 @@ Go linux and darwin memory layout:
 00c0 0000 0000 - 00e0 0000 0000: heap
 00e0 0000 0000 - 1000 0000 0000: -
 1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 6000 0000 0000: -
+1460 0000 0000 - 2000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
 6000 0000 0000 - 6200 0000 0000: traces
 6200 0000 0000 - 7fff ffff ffff: -
 
@@ -51,7 +57,8 @@ Go windows memory layout:
 00e0 0000 0000 - 0100 0000 0000: -
 0100 0000 0000 - 0560 0000 0000: shadow
 0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07ff ffff ffff: -
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 07ff ffff ffff: -
 */
 
 #ifndef TSAN_PLATFORM_H
@@ -68,20 +75,28 @@ static const uptr kLinuxAppMemBeg = 0x00
 static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
 # if SANITIZER_WINDOWS
 static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-# else
+static const uptr kMetaShadow     = 0x076000000000ULL;
+static const uptr kMetaSize       = 0x007000000000ULL;
+# else  // if SANITIZER_WINDOWS
 static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-# endif
+static const uptr kMetaShadow     = 0x300000000000ULL;
+static const uptr kMetaSize       = 0x100000000000ULL;
+# endif  // if SANITIZER_WINDOWS
+#else  // defined(TSAN_GO)
+static const uptr kMetaShadow     = 0x300000000000ULL;
+static const uptr kMetaSize       = 0x100000000000ULL;
 // TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
 // when memory addresses are of the 0x2axxxxxxxxxx form.
 // The option is enabled with 'setarch x86_64 -L'.
-#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+# if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
 static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
 static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
 static const uptr kAppMemGapBeg   = 0x2c0000000000ULL;
 static const uptr kAppMemGapEnd   = 0x7d0000000000ULL;
-#else
+# else
 static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
 static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
+# endif
 #endif
 
 static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
@@ -96,10 +111,16 @@ const uptr kTraceMemSize = 0x02000000000
 // This has to be a macro to allow constant initialization of constants below.
 #ifndef TSAN_GO
 #define MemToShadow(addr) \
-    (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
+    ((((uptr)addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
+#define MemToMeta(addr) \
+    (u32*)(((((uptr)addr) & ~(kLinuxAppMemMsk | (kMetaShadowCell - 1))) \
+    / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
 #else
 #define MemToShadow(addr) \
-    ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
+    (((((uptr)addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
+#define MemToMeta(addr) \
+    (u32*)(((((uptr)addr) & ~(kMetaShadowCell - 1)) \
+    / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
 #endif
 
 static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_platform_linux.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_platform_linux.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_platform_linux.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_platform_linux.cc Thu May 29 08:50:54 2014
@@ -61,34 +61,49 @@ namespace __tsan {
 
 const uptr kPageSize = 4096;
 
+enum {
+  MemTotal  = 0,
+  MemShadow = 1,
+  MemMeta   = 2,
+  MemFile   = 3,
+  MemMmap   = 4,
+  MemTrace  = 5,
+  MemHeap   = 6,
+  MemOther  = 7,
+  MemCount  = 8,
+};
+
 void FillProfileCallback(uptr start, uptr rss, bool file,
                          uptr *mem, uptr stats_size) {
-  CHECK_EQ(7, stats_size);
-  mem[6] += rss;  // total
+  mem[MemTotal] += rss;
   start >>= 40;
-  if (start < 0x10)  // shadow
-    mem[0] += rss;
-  else if (start >= 0x20 && start < 0x30)  // compat modules
-    mem[file ? 1 : 2] += rss;
-  else if (start >= 0x7e)  // modules
-    mem[file ? 1 : 2] += rss;
-  else if (start >= 0x60 && start < 0x62)  // traces
-    mem[3] += rss;
-  else if (start >= 0x7d && start < 0x7e)  // heap
-    mem[4] += rss;
-  else  // other
-    mem[5] += rss;
+  if (start < 0x10)
+    mem[MemShadow] += rss;
+  else if (start >= 0x20 && start < 0x30)
+    mem[file ? MemFile : MemMmap] += rss;
+  else if (start >= 0x30 && start < 0x40)
+    mem[MemMeta] += rss;
+  else if (start >= 0x7e)
+    mem[file ? MemFile : MemMmap] += rss;
+  else if (start >= 0x60 && start < 0x62)
+    mem[MemTrace] += rss;
+  else if (start >= 0x7d && start < 0x7e)
+    mem[MemHeap] += rss;
+  else
+    mem[MemOther] += rss;
 }
 
 void WriteMemoryProfile(char *buf, uptr buf_size) {
-  uptr mem[7] = {};
+  uptr mem[MemCount] = {};
   __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
   char *buf_pos = buf;
   char *buf_end = buf + buf_size;
   buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
-      "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
-      mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
-      mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
+      "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+      " trace:%zd heap:%zd other:%zd\n",
+      mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+      mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+      mem[MemHeap] >> 20, mem[MemOther] >> 20);
   struct mallinfo mi = __libc_mallinfo();
   buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
       "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
@@ -123,9 +138,7 @@ static void ProtectRange(uptr beg, uptr
     Die();
   }
 }
-#endif
 
-#ifndef TSAN_GO
 // Mark shadow for .rodata sections with the special kShadowRodata marker.
 // Accesses to .rodata can't race, so this saves time, memory and trace space.
 static void MapRodata() {
@@ -184,6 +197,7 @@ static void MapRodata() {
 }
 
 void InitializeShadowMemory() {
+  // Map memory shadow.
   uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
     kLinuxShadowEnd - kLinuxShadowBeg);
   if (shadow != kLinuxShadowBeg) {
@@ -192,23 +206,48 @@ void InitializeShadowMemory() {
                "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
     Die();
   }
+  DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+      kLinuxShadowBeg, kLinuxShadowEnd,
+      (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+
+  // Map meta shadow.
+  if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) {
+    Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n",
+        kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow);
+    Die();
+  }
+  if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) {
+    Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
+        kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize);
+    Die();
+  }
+  uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize);
+  if (meta != kMetaShadow) {
+    Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+    Printf("FATAL: Make sure to compile with -fPIE and "
+               "to link with -pie (%p, %p).\n", meta, kMetaShadow);
+    Die();
+  }
+  DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+      kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30);
+
+  // Protect gaps.
   const uptr kClosedLowBeg  = 0x200000;
   const uptr kClosedLowEnd  = kLinuxShadowBeg - 1;
   const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
-  const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
+  const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin),
+      kMetaShadow);
+
   ProtectRange(kClosedLowBeg, kClosedLowEnd);
   ProtectRange(kClosedMidBeg, kClosedMidEnd);
-  DPrintf("kClosedLow   %zx-%zx (%zuGB)\n",
+  VPrintf(2, "kClosedLow   %zx-%zx (%zuGB)\n",
       kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
-  DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
-      kLinuxShadowBeg, kLinuxShadowEnd,
-      (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
-  DPrintf("kClosedMid   %zx-%zx (%zuGB)\n",
+  VPrintf(2, "kClosedMid   %zx-%zx (%zuGB)\n",
       kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
-  DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
+  VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
       kLinuxAppMemBeg, kLinuxAppMemEnd,
       (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
-  DPrintf("stack        %zx\n", (uptr)&shadow);
+  VPrintf(2, "stack: %zx\n", (uptr)&shadow);
 
   MapRodata();
 }

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc Thu May 29 08:50:54 2014
@@ -131,6 +131,7 @@ static void BackgroundThread(void *arg)
 
   fd_t mprof_fd = kInvalidFd;
   if (flags()->profile_memory && flags()->profile_memory[0]) {
+    // FIXME(dvyukov): support stdout/stderr
     InternalScopedBuffer<char> filename(4096);
     internal_snprintf(filename.data(), filename.size(), "%s.%d",
         flags()->profile_memory, (int)internal_getpid());
@@ -144,6 +145,7 @@ static void BackgroundThread(void *arg)
   }
 
   u64 last_flush = NanoTime();
+  u64 last_rss_check = NanoTime();
   uptr last_rss = 0;
   for (int i = 0;
       atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
@@ -160,7 +162,9 @@ static void BackgroundThread(void *arg)
         last_flush = NanoTime();
       }
     }
-    if (flags()->memory_limit_mb > 0) {
+    // GetRSS can be expensive on huge programs, so don't do it every 100ms.
+    if (flags()->memory_limit_mb > 0 && last_rss_check + 1000 * kMs2Ns < now) {
+      last_rss_check = now;
       uptr rss = GetRSS();
       uptr limit = uptr(flags()->memory_limit_mb) << 20;
       if (flags()->verbosity > 0) {
@@ -222,6 +226,22 @@ void MapShadow(uptr addr, uptr size) {
   // so we can get away with unaligned mapping.
   // CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
   MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
+
+  // Meta shadow is 2:1, so tread carefully.
+  static uptr mapped_meta_end = 0;
+  uptr meta_begin = (uptr)MemToMeta(addr);
+  uptr meta_end = (uptr)MemToMeta(addr + size);
+  // windows wants 64K alignment
+  meta_begin = RoundDownTo(meta_begin, 64 << 10);
+  meta_end = RoundUpTo(meta_end, 64 << 10);
+  if (meta_end <= mapped_meta_end)
+    return;
+  if (meta_begin < mapped_meta_end)
+    meta_begin = mapped_meta_end;
+  MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+  mapped_meta_end = meta_end;
+  DPrintf("mapped meta shadow for (%p-%p) at (%p-%p)\n",
+      addr, addr+size, meta_begin, meta_end);
 }
 
 void MapThreadTrace(uptr addr, uptr size) {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h Thu May 29 08:50:54 2014
@@ -44,6 +44,7 @@
 #include "tsan_platform.h"
 #include "tsan_mutexset.h"
 #include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
 
 #if SANITIZER_WORDSIZE != 64
 # error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -51,77 +52,6 @@
 
 namespace __tsan {
 
-// Descriptor of user's memory block.
-struct MBlock {
-  /*
-  u64 mtx : 1;  // must be first
-  u64 lst : 44;
-  u64 stk : 31;  // on word boundary
-  u64 tid : kTidBits;
-  u64 siz : 128 - 1 - 31 - 44 - kTidBits;  // 39
-  */
-  u64 raw[2];
-
-  void Init(uptr siz, u32 tid, u32 stk) {
-    raw[0] = raw[1] = 0;
-    raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
-    raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
-    raw[0] |= (u64)stk << (1 + 44);
-    raw[1] |= (u64)stk >> (64 - 44 - 1);
-    DCHECK_EQ(Size(), siz);
-    DCHECK_EQ(Tid(), tid);
-    DCHECK_EQ(StackId(), stk);
-  }
-
-  u32 Tid() const {
-    return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
-  }
-
-  uptr Size() const {
-    return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
-  }
-
-  u32 StackId() const {
-    return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
-  }
-
-  SyncVar *ListHead() const {
-    return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
-  }
-
-  void ListPush(SyncVar *v) {
-    SyncVar *lst = ListHead();
-    v->next = lst;
-    u64 x = (u64)v ^ (u64)lst;
-    x = (x >> 3) << 1;
-    raw[0] ^= x;
-    DCHECK_EQ(ListHead(), v);
-  }
-
-  SyncVar *ListPop() {
-    SyncVar *lst = ListHead();
-    SyncVar *nxt = lst->next;
-    lst->next = 0;
-    u64 x = (u64)lst ^ (u64)nxt;
-    x = (x >> 3) << 1;
-    raw[0] ^= x;
-    DCHECK_EQ(ListHead(), nxt);
-    return lst;
-  }
-
-  void ListReset() {
-    SyncVar *lst = ListHead();
-    u64 x = (u64)lst;
-    x = (x >> 3) << 1;
-    raw[0] ^= x;
-    DCHECK_EQ(ListHead(), 0);
-  }
-
-  void Lock();
-  void Unlock();
-  typedef GenericScopedLock<MBlock> ScopedLock;
-};
-
 #ifndef TSAN_GO
 #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
 const uptr kAllocatorSpace = 0x7d0000000000ULL;
@@ -131,7 +61,7 @@ const uptr kAllocatorSpace = 0x7d0000000
 const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
 
 struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
     DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -457,6 +387,9 @@ struct ThreadState {
   bool in_signal_handler;
   SignalContext *signal_ctx;
 
+  DenseSlabAllocCache block_cache;
+  DenseSlabAllocCache sync_cache;
+
 #ifndef TSAN_GO
   u32 last_sleep_stack_id;
   ThreadClock last_sleep_clock;
@@ -530,7 +463,7 @@ struct Context {
   bool initialized;
   bool after_multithreaded_fork;
 
-  SyncTab synctab;
+  MetaMap metamap;
 
   Mutex report_mtx;
   int nreported;
@@ -628,7 +561,7 @@ void ForkParentAfter(ThreadState *thr, u
 void ForkChildAfter(ThreadState *thr, uptr pc);
 
 void ReportRace(ThreadState *thr);
-bool OutputReport(Context *ctx, const ScopedReport &srep);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
 bool IsFiredSuppression(Context *ctx,
                         const ScopedReport &srep,
                         const StackTrace &trace);
@@ -657,9 +590,8 @@ void PrintCurrentStackSlow();  // uses l
 void Initialize(ThreadState *thr);
 int Finalize(ThreadState *thr);
 
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
-                     bool write_lock, bool create);
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
 
 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc Thu May 29 08:50:54 2014
@@ -59,7 +59,7 @@ static void ReportMutexMisuse(ThreadStat
   trace.ObtainCurrent(thr, pc);
   rep.AddStack(&trace, true);
   rep.AddLocation(addr, 1);
-  OutputReport(ctx, rep);
+  OutputReport(thr, rep);
 }
 
 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
@@ -72,10 +72,12 @@ void MutexCreate(ThreadState *thr, uptr
     MemoryWrite(thr, pc, addr, kSizeLog1);
     thr->is_freeing = false;
   }
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   s->is_rw = rw;
   s->is_recursive = recursive;
   s->is_linker_init = linker_init;
+  if (kCppMode && s->creation_stack_id == 0)
+    s->creation_stack_id = CurrentStackId(thr, pc);
   s->mtx.Unlock();
 }
 
@@ -88,37 +90,54 @@ void MutexDestroy(ThreadState *thr, uptr
   if (IsGlobalVar(addr))
     return;
 #endif
-  SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
-  if (s == 0)
-    return;
-  if (flags()->detect_deadlocks) {
-    Callback cb(thr, pc);
-    ctx->dd->MutexDestroy(&cb, &s->dd);
-  }
   if (IsAppMem(addr)) {
     CHECK(!thr->is_freeing);
     thr->is_freeing = true;
     MemoryWrite(thr, pc, addr, kSizeLog1);
     thr->is_freeing = false;
   }
+  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+  if (s == 0)
+    return;
+  if (flags()->detect_deadlocks) {
+    Callback cb(thr, pc);
+    ctx->dd->MutexDestroy(&cb, &s->dd);
+    ctx->dd->MutexInit(&cb, &s->dd);
+  }
+  bool unlock_locked = false;
   if (flags()->report_destroy_locked
       && s->owner_tid != SyncVar::kInvalidTid
       && !s->is_broken) {
     s->is_broken = true;
+    unlock_locked = true;
+  }
+  u64 mid = s->GetId();
+  u32 last_lock = s->last_lock;
+  if (!unlock_locked)
+    s->Reset();  // must not reset it before the report is printed
+  s->mtx.Unlock();
+  if (unlock_locked) {
     ThreadRegistryLock l(ctx->thread_registry);
     ScopedReport rep(ReportTypeMutexDestroyLocked);
-    rep.AddMutex(s);
+    rep.AddMutex(mid);
     StackTrace trace;
     trace.ObtainCurrent(thr, pc);
     rep.AddStack(&trace);
-    FastState last(s->last_lock);
+    FastState last(last_lock);
     RestoreStack(last.tid(), last.epoch(), &trace, 0);
     rep.AddStack(&trace, true);
-    rep.AddLocation(s->addr, 1);
-    OutputReport(ctx, rep);
+    rep.AddLocation(addr, 1);
+    OutputReport(thr, rep);
+  }
+  if (unlock_locked) {
+    SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+    if (s != 0) {
+      s->Reset();
+      s->mtx.Unlock();
+    }
   }
-  thr->mset.Remove(s->GetId());
-  DestroyAndFree(s);
+  thr->mset.Remove(mid);
+  // s will be destroyed and freed in MetaMap::FreeBlock.
 }
 
 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
@@ -126,7 +145,7 @@ void MutexLock(ThreadState *thr, uptr pc
   CHECK_GT(rec, 0);
   if (IsAppMem(addr))
     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   thr->fast_state.IncrementEpoch();
   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
   bool report_double_lock = false;
@@ -170,7 +189,7 @@ int MutexUnlock(ThreadState *thr, uptr p
   DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
   if (IsAppMem(addr))
     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   thr->fast_state.IncrementEpoch();
   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
   int rec = 0;
@@ -213,7 +232,7 @@ void MutexReadLock(ThreadState *thr, upt
   StatInc(thr, StatMutexReadLock);
   if (IsAppMem(addr))
     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
   thr->fast_state.IncrementEpoch();
   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
   bool report_bad_lock = false;
@@ -248,7 +267,7 @@ void MutexReadUnlock(ThreadState *thr, u
   StatInc(thr, StatMutexReadUnlock);
   if (IsAppMem(addr))
     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   thr->fast_state.IncrementEpoch();
   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
   bool report_bad_unlock = false;
@@ -279,7 +298,7 @@ void MutexReadOrWriteUnlock(ThreadState
   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
   if (IsAppMem(addr))
     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   bool write = true;
   bool report_bad_unlock = false;
   if (s->owner_tid == SyncVar::kInvalidTid) {
@@ -324,7 +343,7 @@ void MutexReadOrWriteUnlock(ThreadState
 
 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   s->owner_tid = SyncVar::kInvalidTid;
   s->recursion = 0;
   s->mtx.Unlock();
@@ -334,7 +353,7 @@ void Acquire(ThreadState *thr, uptr pc,
   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
   AcquireImpl(thr, pc, &s->clock);
   s->mtx.ReadUnlock();
 }
@@ -361,7 +380,7 @@ void Release(ThreadState *thr, uptr pc,
   DPrintf("#%d: Release %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -373,7 +392,7 @@ void ReleaseStore(ThreadState *thr, uptr
   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -465,7 +484,7 @@ void ReportDeadlock(ThreadState *thr, up
       rep.AddStack(&stacks[i], true);
     }
   }
-  OutputReport(ctx, rep);
+  OutputReport(thr, rep);
 }
 
 }  // namespace __tsan

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc Thu May 29 08:50:54 2014
@@ -179,7 +179,8 @@ void ScopedReport::AddMemoryAccess(uptr
   mop->write = s.IsWrite();
   mop->atomic = s.IsAtomic();
   mop->stack = SymbolizeStack(*stack);
-  mop->stack->suppressable = true;
+  if (mop->stack)
+    mop->stack->suppressable = true;
   for (uptr i = 0; i < mset->Size(); i++) {
     MutexSet::Desc d = mset->Get(i);
     u64 mid = this->AddMutex(d.id);
@@ -279,7 +280,7 @@ u64 ScopedReport::AddMutex(u64 id) {
   u64 uid = 0;
   u64 mid = id;
   uptr addr = SyncVar::SplitId(id, &uid);
-  SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false);
+  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
   // Check that the mutex is still alive.
   // Another mutex can be created at the same address,
   // so check uid as well.
@@ -290,7 +291,7 @@ u64 ScopedReport::AddMutex(u64 id) {
     AddDeadMutex(id);
   }
   if (s)
-    s->mtx.ReadUnlock();
+    s->mtx.Unlock();
   return mid;
 }
 
@@ -330,21 +331,26 @@ void ScopedReport::AddLocation(uptr addr
     return;
   }
   MBlock *b = 0;
-  if (allocator()->PointerIsMine((void*)addr)
-      && (b = user_mblock(0, (void*)addr))) {
-    ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
+  Allocator *a = allocator();
+  if (a->PointerIsMine((void*)addr)) {
+    void *block_begin = a->GetBlockBegin((void*)addr);
+    if (block_begin)
+      b = ctx->metamap.GetBlock((uptr)block_begin);
+  }
+  if (b != 0) {
+    ThreadContext *tctx = FindThreadByTidLocked(b->tid);
     void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
     ReportLocation *loc = new(mem) ReportLocation();
     rep_->locs.PushBack(loc);
     loc->type = ReportLocationHeap;
     loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
-    loc->size = b->Size();
-    loc->tid = tctx ? tctx->tid : b->Tid();
+    loc->size = b->siz;
+    loc->tid = tctx ? tctx->tid : b->tid;
     loc->name = 0;
     loc->file = 0;
     loc->line = 0;
     loc->stack = 0;
-    loc->stack = SymbolizeStackId(b->StackId());
+    loc->stack = SymbolizeStackId(b->stk);
     if (tctx)
       AddThread(tctx);
     return;
@@ -500,7 +506,7 @@ static void AddRacyStacks(ThreadState *t
   }
 }
 
-bool OutputReport(Context *ctx, const ScopedReport &srep) {
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
   atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
   const ReportDesc *rep = srep.GetReport();
   Suppression *supp = 0;
@@ -517,8 +523,14 @@ bool OutputReport(Context *ctx, const Sc
     FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
     ctx->fired_suppressions.push_back(s);
   }
-  if (OnReport(rep, suppress_pc != 0))
-    return false;
+  {
+    bool old_is_freeing = thr->is_freeing;
+    thr->is_freeing = false;
+    bool suppressed = OnReport(rep, suppress_pc != 0);
+    thr->is_freeing = old_is_freeing;
+    if (suppressed)
+      return false;
+  }
   PrintReport(rep);
   ctx->nreported++;
   if (flags()->halt_on_error)
@@ -616,6 +628,8 @@ static bool RaceBetweenAtomicAndFree(Thr
 }
 
 void ReportRace(ThreadState *thr) {
+  CheckNoLocks(thr);
+
   // Symbolizer makes lots of intercepted calls. If we try to process them,
   // at best it will cause deadlocks on internal mutexes.
   ScopedIgnoreInterceptors ignore;
@@ -700,7 +714,7 @@ void ReportRace(ThreadState *thr) {
   }
 #endif
 
-  if (!OutputReport(ctx, rep))
+  if (!OutputReport(thr, rep))
     return;
 
   AddRacyStacks(thr, traces, addr_min, addr_max);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc Thu May 29 08:50:54 2014
@@ -207,7 +207,7 @@ void ThreadFinalize(ThreadState *thr) {
     ScopedReport rep(ReportTypeThreadLeak);
     rep.AddThread(leaks[i].tctx, true);
     rep.SetCount(leaks[i].count);
-    OutputReport(ctx, rep);
+    OutputReport(thr, rep);
   }
 #endif
 }

Added: compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc?rev=209810&view=auto
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc (added)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc Thu May 29 08:50:54 2014
@@ -0,0 +1,112 @@
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+//#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+StackTrace::StackTrace()
+    : n_()
+    , s_()
+    , c_() {
+}
+
+StackTrace::StackTrace(uptr *buf, uptr cnt)
+    : n_()
+    , s_(buf)
+    , c_(cnt) {
+  CHECK_NE(buf, 0);
+  CHECK_NE(cnt, 0);
+}
+
+StackTrace::~StackTrace() {
+  Reset();
+}
+
+void StackTrace::Reset() {
+  if (s_ && !c_) {
+    CHECK_NE(n_, 0);
+    internal_free(s_);
+    s_ = 0;
+  }
+  n_ = 0;
+}
+
+void StackTrace::Init(const uptr *pcs, uptr cnt) {
+  Reset();
+  if (cnt == 0)
+    return;
+  if (c_) {
+    CHECK_NE(s_, 0);
+    CHECK_LE(cnt, c_);
+  } else {
+    s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+  }
+  n_ = cnt;
+  internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+}
+
+void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
+  Reset();
+  n_ = thr->shadow_stack_pos - thr->shadow_stack;
+  if (n_ + !!toppc == 0)
+    return;
+  uptr start = 0;
+  if (c_) {
+    CHECK_NE(s_, 0);
+    if (n_ + !!toppc > c_) {
+      start = n_ - c_ + !!toppc;
+      n_ = c_ - !!toppc;
+    }
+  } else {
+    // Cap potentially huge stacks.
+    if (n_ + !!toppc > kTraceStackSize) {
+      start = n_ - kTraceStackSize + !!toppc;
+      n_ = kTraceStackSize - !!toppc;
+    }
+    s_ = (uptr*)internal_alloc(MBlockStackTrace,
+                               (n_ + !!toppc) * sizeof(s_[0]));
+  }
+  for (uptr i = 0; i < n_; i++)
+    s_[i] = thr->shadow_stack[start + i];
+  if (toppc) {
+    s_[n_] = toppc;
+    n_++;
+  }
+}
+
+void StackTrace::CopyFrom(const StackTrace& other) {
+  Reset();
+  Init(other.Begin(), other.Size());
+}
+
+bool StackTrace::IsEmpty() const {
+  return n_ == 0;
+}
+
+uptr StackTrace::Size() const {
+  return n_;
+}
+
+uptr StackTrace::Get(uptr i) const {
+  CHECK_LT(i, n_);
+  return s_[i];
+}
+
+const uptr *StackTrace::Begin() const {
+  return s_;
+}
+
+}  // namespace __tsan

Added: compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h?rev=209810&view=auto
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h (added)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h Thu May 29 08:50:54 2014
@@ -0,0 +1,54 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+//#include "sanitizer_common/sanitizer_atomic.h"
+//#include "sanitizer_common/sanitizer_common.h"
+//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "tsan_defs.h"
+//#include "tsan_clock.h"
+//#include "tsan_mutex.h"
+//#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+class StackTrace {
+ public:
+  StackTrace();
+  // Initialized the object in "static mode",
+  // in this mode it never calls malloc/free but uses the provided buffer.
+  StackTrace(uptr *buf, uptr cnt);
+  ~StackTrace();
+  void Reset();
+
+  void Init(const uptr *pcs, uptr cnt);
+  void ObtainCurrent(ThreadState *thr, uptr toppc);
+  bool IsEmpty() const;
+  uptr Size() const;
+  uptr Get(uptr i) const;
+  const uptr *Begin() const;
+  void CopyFrom(const StackTrace& other);
+
+ private:
+  uptr n_;
+  uptr *s_;
+  const uptr c_;
+
+  StackTrace(const StackTrace&);
+  void operator = (const StackTrace&);
+};
+
+}  // namespace __tsan
+
+#endif  // TSAN_STACK_TRACE_H

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc Thu May 29 08:50:54 2014
@@ -19,293 +19,192 @@ namespace __tsan {
 
 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
 
-SyncVar::SyncVar(uptr addr, u64 uid)
-  : mtx(MutexTypeSyncVar, StatMtxSyncVar)
-  , addr(addr)
-  , uid(uid)
-  , creation_stack_id()
-  , owner_tid(kInvalidTid)
-  , last_lock()
-  , recursion()
-  , is_rw()
-  , is_recursive()
-  , is_broken()
-  , is_linker_init() {
-}
-
-SyncTab::Part::Part()
-  : mtx(MutexTypeSyncTab, StatMtxSyncTab)
-  , val() {
-}
-
-SyncTab::SyncTab() {
-}
-
-SyncTab::~SyncTab() {
-  for (int i = 0; i < kPartCount; i++) {
-    while (tab_[i].val) {
-      SyncVar *tmp = tab_[i].val;
-      tab_[i].val = tmp->next;
-      DestroyAndFree(tmp);
-    }
-  }
-}
-
-SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
-                                     uptr addr, bool write_lock) {
-  return GetAndLock(thr, pc, addr, write_lock, true);
-}
-
-SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
-  return GetAndLock(0, 0, addr, write_lock, false);
+SyncVar::SyncVar()
+    : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+  Reset();
 }
 
-SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
-  StatInc(thr, StatSyncCreated);
-  void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
-  const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
-  SyncVar *res = new(mem) SyncVar(addr, uid);
-  res->creation_stack_id = 0;
-  if (!kGoMode)  // Go does not use them
-    res->creation_stack_id = CurrentStackId(thr, pc);
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+  this->addr = addr;
+  this->uid = uid;
+
+  creation_stack_id = 0;
+  if (kCppMode)  // Go does not use them
+    creation_stack_id = CurrentStackId(thr, pc);
   if (flags()->detect_deadlocks)
-    DDMutexInit(thr, pc, res);
-  return res;
+    DDMutexInit(thr, pc, this);
 }
 
-SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
-                             uptr addr, bool write_lock, bool create) {
-#ifndef TSAN_GO
-  {  // NOLINT
-    SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
-    if (res)
-      return res;
-  }
-
-  // Here we ask only PrimaryAllocator, because
-  // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
-  // the hashmap anyway.
-  if (PrimaryAllocator::PointerIsMine((void*)addr)) {
-    MBlock *b = user_mblock(thr, (void*)addr);
-    CHECK_NE(b, 0);
-    MBlock::ScopedLock l(b);
-    SyncVar *res = 0;
-    for (res = b->ListHead(); res; res = res->next) {
-      if (res->addr == addr)
-        break;
-    }
-    if (res == 0) {
-      if (!create)
-        return 0;
-      res = Create(thr, pc, addr);
-      b->ListPush(res);
-    }
-    if (write_lock)
-      res->mtx.Lock();
-    else
-      res->mtx.ReadLock();
-    return res;
-  }
-#endif
-
-  Part *p = &tab_[PartIdx(addr)];
-  {
-    ReadLock l(&p->mtx);
-    for (SyncVar *res = p->val; res; res = res->next) {
-      if (res->addr == addr) {
-        if (write_lock)
-          res->mtx.Lock();
-        else
-          res->mtx.ReadLock();
-        return res;
-      }
-    }
-  }
-  if (!create)
+void SyncVar::Reset() {
+  addr = 0;
+  uid = 0;
+  creation_stack_id = 0;
+  owner_tid = kInvalidTid;
+  last_lock = 0;
+  recursion = 0;
+  is_rw = 0;
+  is_recursive = 0;
+  is_broken = 0;
+  is_linker_init = 0;
+  next = 0;
+
+  clock.Zero();
+  read_clock.Reset();
+}
+
+MetaMap::MetaMap() {
+  atomic_store(&uid_gen_, 0, memory_order_relaxed);
+}
+
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+  u32 idx = block_alloc_.Alloc(&thr->block_cache);
+  MBlock *b = block_alloc_.Map(idx);
+  b->siz = sz;
+  b->tid = thr->tid;
+  b->stk = CurrentStackId(thr, pc);
+  u32 *meta = MemToMeta(p);
+  DCHECK_EQ(*meta, 0);
+  *meta = idx | kFlagBlock;
+}
+
+uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+  MBlock* b = GetBlock(p);
+  if (b == 0)
     return 0;
-  {
-    Lock l(&p->mtx);
-    SyncVar *res = p->val;
-    for (; res; res = res->next) {
-      if (res->addr == addr)
-        break;
-    }
-    if (res == 0) {
-      res = Create(thr, pc, addr);
-      res->next = p->val;
-      p->val = res;
-    }
-    if (write_lock)
-      res->mtx.Lock();
-    else
-      res->mtx.ReadLock();
-    return res;
-  }
+  uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+  FreeRange(thr, pc, p, sz);
+  return sz;
 }
 
-SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
-#ifndef TSAN_GO
-  {  // NOLINT
-    SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
-    if (res)
-      return res;
-  }
-  if (PrimaryAllocator::PointerIsMine((void*)addr)) {
-    MBlock *b = user_mblock(thr, (void*)addr);
-    CHECK_NE(b, 0);
-    SyncVar *res = 0;
-    {
-      MBlock::ScopedLock l(b);
-      res = b->ListHead();
-      if (res) {
-        if (res->addr == addr) {
-          if (res->is_linker_init)
-            return 0;
-          b->ListPop();
-        } else {
-          SyncVar **prev = &res->next;
-          res = *prev;
-          while (res) {
-            if (res->addr == addr) {
-              if (res->is_linker_init)
-                return 0;
-              *prev = res->next;
-              break;
-            }
-            prev = &res->next;
-            res = *prev;
-          }
-        }
-        if (res) {
-          StatInc(thr, StatSyncDestroyed);
-          res->mtx.Lock();
-          res->mtx.Unlock();
-        }
-      }
-    }
-    return res;
-  }
-#endif
-
-  Part *p = &tab_[PartIdx(addr)];
-  SyncVar *res = 0;
-  {
-    Lock l(&p->mtx);
-    SyncVar **prev = &p->val;
-    res = *prev;
-    while (res) {
-      if (res->addr == addr) {
-        if (res->is_linker_init)
-          return 0;
-        *prev = res->next;
+void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+  u32 *meta = MemToMeta(p);
+  u32 *end = MemToMeta(p + sz);
+  if (end == meta)
+    end++;
+  for (; meta < end; meta++) {
+    u32 idx = *meta;
+    *meta = 0;
+    for (;;) {
+      if (idx == 0)
+        break;
+      if (idx & kFlagBlock) {
+        block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
         break;
+      } else if (idx & kFlagSync) {
+        DCHECK(idx & kFlagSync);
+        SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+        u32 next = s->next;
+        s->Reset();
+        sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+        idx = next;
+      } else {
+        CHECK(0);
       }
-      prev = &res->next;
-      res = *prev;
     }
   }
-  if (res) {
-    StatInc(thr, StatSyncDestroyed);
-    res->mtx.Lock();
-    res->mtx.Unlock();
-  }
-  return res;
-}
-
-int SyncTab::PartIdx(uptr addr) {
-  return (addr >> 3) % kPartCount;
-}
-
-StackTrace::StackTrace()
-    : n_()
-    , s_()
-    , c_() {
-}
-
-StackTrace::StackTrace(uptr *buf, uptr cnt)
-    : n_()
-    , s_(buf)
-    , c_(cnt) {
-  CHECK_NE(buf, 0);
-  CHECK_NE(cnt, 0);
 }
 
-StackTrace::~StackTrace() {
-  Reset();
+MBlock* MetaMap::GetBlock(uptr p) {
+  u32 *meta = MemToMeta(p);
+  u32 idx = *meta;
+  for (;;) {
+    if (idx == 0)
+      return 0;
+    if (idx & kFlagBlock)
+      return block_alloc_.Map(idx & ~kFlagMask);
+    DCHECK(idx & kFlagSync);
+    SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+    idx = s->next;
+  }
 }
 
-void StackTrace::Reset() {
-  if (s_ && !c_) {
-    CHECK_NE(n_, 0);
-    internal_free(s_);
-    s_ = 0;
-  }
-  n_ = 0;
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+                              uptr addr, bool write_lock) {
+  return GetAndLock(thr, pc, addr, write_lock, true);
 }
 
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
-  Reset();
-  if (cnt == 0)
-    return;
-  if (c_) {
-    CHECK_NE(s_, 0);
-    CHECK_LE(cnt, c_);
-  } else {
-    s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
-  }
-  n_ = cnt;
-  internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
+  return GetAndLock(0, 0, addr, true, false);
 }
 
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
-  Reset();
-  n_ = thr->shadow_stack_pos - thr->shadow_stack;
-  if (n_ + !!toppc == 0)
-    return;
-  uptr start = 0;
-  if (c_) {
-    CHECK_NE(s_, 0);
-    if (n_ + !!toppc > c_) {
-      start = n_ - c_ + !!toppc;
-      n_ = c_ - !!toppc;
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
+                             uptr addr, bool write_lock, bool create) {
+  u32 *meta = MemToMeta(addr);
+  u32 idx0 = *meta;
+  u32 myidx = 0;
+  SyncVar *mys = 0;
+  for (;;) {
+    u32 idx = *meta;
+    for (;;) {
+      if (idx == 0)
+        break;
+      if (idx & kFlagBlock)
+        break;
+      DCHECK(idx & kFlagSync);
+      SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+      if (s->addr == addr) {
+        if (myidx != 0) {
+          mys->Reset();
+          sync_alloc_.Free(&thr->sync_cache, myidx);
+        }
+        if (write_lock)
+          s->mtx.Lock();
+        else
+          s->mtx.ReadLock();
+        return s;
+      }
+      idx = s->next;
     }
-  } else {
-    // Cap potentially huge stacks.
-    if (n_ + !!toppc > kTraceStackSize) {
-      start = n_ - kTraceStackSize + !!toppc;
-      n_ = kTraceStackSize - !!toppc;
+    if (!create)
+      return 0;
+    if (*meta != idx0)
+      continue;
+
+    if (myidx == 0) {
+      const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+      myidx = sync_alloc_.Alloc(&thr->sync_cache);
+      mys = sync_alloc_.Map(myidx);
+      mys->Init(thr, pc, addr, uid);
+    }
+    mys->next = idx0;
+    if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+        myidx | kFlagSync, memory_order_release)) {
+      if (write_lock)
+        mys->mtx.Lock();
+      else
+        mys->mtx.ReadLock();
+      return mys;
     }
-    s_ = (uptr*)internal_alloc(MBlockStackTrace,
-                               (n_ + !!toppc) * sizeof(s_[0]));
-  }
-  for (uptr i = 0; i < n_; i++)
-    s_[i] = thr->shadow_stack[start + i];
-  if (toppc) {
-    s_[n_] = toppc;
-    n_++;
   }
 }
 
-void StackTrace::CopyFrom(const StackTrace& other) {
-  Reset();
-  Init(other.Begin(), other.Size());
-}
-
-bool StackTrace::IsEmpty() const {
-  return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
-  return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
-  CHECK_LT(i, n_);
-  return s_[i];
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+  // Here we assume that src and dst do not overlap,
+  // and there are no concurrent accesses to the regions (e.g. stop-the-world).
+  uptr diff = dst - src;
+  u32 *src_meta = MemToMeta(src);
+  u32 *dst_meta = MemToMeta(dst);
+  u32 *src_meta_end = MemToMeta(src + sz);
+  for (; src_meta != src_meta_end; src_meta++, dst_meta++) {
+    CHECK_EQ(*dst_meta, 0);
+    u32 idx = *src_meta;
+    *src_meta = 0;
+    *dst_meta = idx;
+    // Patch the addresses in sync objects.
+    while (idx != 0) {
+      if (idx & kFlagBlock)
+        break;
+      CHECK(idx & kFlagSync);
+      SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+      s->addr += diff;
+      idx = s->next;
+    }
+  }
 }
 
-const uptr *StackTrace::Begin() const {
-  return s_;
+void MetaMap::OnThreadIdle(ThreadState *thr) {
+  block_alloc_.FlushCache(&thr->block_cache);
+  sync_alloc_.FlushCache(&thr->sync_cache);
 }
 
 }  // namespace __tsan

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_sync.h Thu May 29 08:50:54 2014
@@ -16,46 +16,21 @@
 #include "sanitizer_common/sanitizer_atomic.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
-#include "tsan_clock.h"
 #include "tsan_defs.h"
+#include "tsan_clock.h"
 #include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
 
 namespace __tsan {
 
-class StackTrace {
- public:
-  StackTrace();
-  // Initialized the object in "static mode",
-  // in this mode it never calls malloc/free but uses the provided buffer.
-  StackTrace(uptr *buf, uptr cnt);
-  ~StackTrace();
-  void Reset();
-
-  void Init(const uptr *pcs, uptr cnt);
-  void ObtainCurrent(ThreadState *thr, uptr toppc);
-  bool IsEmpty() const;
-  uptr Size() const;
-  uptr Get(uptr i) const;
-  const uptr *Begin() const;
-  void CopyFrom(const StackTrace& other);
-
- private:
-  uptr n_;
-  uptr *s_;
-  const uptr c_;
-
-  StackTrace(const StackTrace&);
-  void operator = (const StackTrace&);
-};
-
 struct SyncVar {
-  explicit SyncVar(uptr addr, u64 uid);
+  SyncVar();
 
   static const int kInvalidTid = -1;
 
+  uptr addr;  // overwritten by DenseSlabAlloc freelist
   Mutex mtx;
-  uptr addr;
-  const u64 uid;  // Globally unique id.
+  u64 uid;  // Globally unique id.
   u32 creation_stack_id;
   int owner_tid;  // Set only by exclusive owners.
   u64 last_lock;
@@ -64,13 +39,16 @@ struct SyncVar {
   bool is_recursive;
   bool is_broken;
   bool is_linker_init;
-  SyncVar *next;  // In SyncTab hashtable.
+  u32 next;  // in MetaMap
   DDMutex dd;
   SyncClock read_clock;  // Used for rw mutexes only.
   // The clock is placed last, so that it is situated on a different cache line
   // with the mtx. This reduces contention for hot sync objects.
   SyncClock clock;
 
+  void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+  void Reset();
+
   u64 GetId() const {
     // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
     return GetLsb((u64)addr | (uid << 47), 61);
@@ -85,40 +63,39 @@ struct SyncVar {
   }
 };
 
-class SyncTab {
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+   Currently it maps pointers to heap block descriptors and sync var descs.
+   It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
  public:
-  SyncTab();
-  ~SyncTab();
+  MetaMap();
+
+  void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+  uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
+  void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+  MBlock* GetBlock(uptr p);
 
   SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
                               uptr addr, bool write_lock);
-  SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+  SyncVar* GetIfExistsAndLock(uptr addr);
 
-  // If the SyncVar does not exist, returns 0.
-  SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+  void MoveMemory(uptr src, uptr dst, uptr sz);
 
-  SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
+  void OnThreadIdle(ThreadState *thr);
 
  private:
-  struct Part {
-    Mutex mtx;
-    SyncVar *val;
-    char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)];  // NOLINT
-    Part();
-  };
-
-  // FIXME: Implement something more sane.
-  static const int kPartCount = 1009;
-  Part tab_[kPartCount];
+  static const u32 kFlagMask  = 3 << 30;
+  static const u32 kFlagBlock = 1 << 30;
+  static const u32 kFlagSync  = 2 << 30;
+  typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+  typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+  BlockAlloc block_alloc_;
+  SyncAlloc sync_alloc_;
   atomic_uint64_t uid_gen_;
 
-  int PartIdx(uptr addr);
-
-  SyncVar* GetAndLock(ThreadState *thr, uptr pc,
-                      uptr addr, bool write_lock, bool create);
-
-  SyncTab(const SyncTab&);  // Not implemented.
-  void operator = (const SyncTab&);  // Not implemented.
+  SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+                      bool create);
 };
 
 }  // namespace __tsan

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h Thu May 29 08:50:54 2014
@@ -15,7 +15,7 @@
 
 #include "tsan_defs.h"
 #include "tsan_mutex.h"
-#include "tsan_sync.h"
+#include "tsan_stack_trace.h"
 #include "tsan_mutexset.h"
 
 namespace __tsan {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_vector.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_vector.h?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_vector.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_vector.h Thu May 29 08:50:54 2014
@@ -78,6 +78,10 @@ class Vector {
   }
 
   void Resize(uptr size) {
+    if (size == 0) {
+      end_ = begin_;
+      return;
+    }
     uptr old_size = Size();
     EnsureSize(size);
     if (old_size < size) {
@@ -100,7 +104,7 @@ class Vector {
       return;
     }
     uptr cap0 = last_ - begin_;
-    uptr cap = 2 * cap0;
+    uptr cap = cap0 * 5 / 4;  // 25% growth
     if (cap == 0)
       cap = 16;
     if (cap < size)

Added: compiler-rt/trunk/lib/tsan/tests/unit/tsan_dense_alloc_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/tests/unit/tsan_dense_alloc_test.cc?rev=209810&view=auto
==============================================================================
--- compiler-rt/trunk/lib/tsan/tests/unit/tsan_dense_alloc_test.cc (added)
+++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_dense_alloc_test.cc Thu May 29 08:50:54 2014
@@ -0,0 +1,55 @@
+//===-- tsan_dense_alloc_test.cc ------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_dense_alloc.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "gtest/gtest.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <map>
+
+namespace __tsan {
+
+TEST(DenseSlabAlloc, Basic) {
+  typedef DenseSlabAlloc<int, 128, 128> Alloc;
+  typedef Alloc::Cache Cache;
+  typedef Alloc::IndexT IndexT;
+  const int N = 1000;
+
+  Alloc alloc;
+  Cache cache;
+  alloc.InitCache(&cache);
+
+  IndexT blocks[N];
+  for (int ntry = 0; ntry < 3; ntry++) {
+    for (int i = 0; i < N; i++) {
+      IndexT idx = alloc.Alloc(&cache);
+      blocks[i] = idx;
+      EXPECT_NE(idx, 0);
+      int *v = alloc.Map(idx);
+      *v = i;
+    }
+
+    for (int i = 0; i < N; i++) {
+      IndexT idx = blocks[i];
+      int *v = alloc.Map(idx);
+      EXPECT_EQ(*v, i);
+      alloc.Free(&cache, idx);
+    }
+
+    alloc.FlushCache(&cache);
+  }
+}
+
+}  // namespace __tsan

Modified: compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc (original)
+++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc Thu May 29 08:50:54 2014
@@ -51,20 +51,8 @@ TEST(Mman, User) {
   char *p2 = (char*)user_alloc(thr, pc, 20);
   EXPECT_NE(p2, (char*)0);
   EXPECT_NE(p2, p);
-  MBlock *b = user_mblock(thr, p);
-  EXPECT_NE(b, (MBlock*)0);
-  EXPECT_EQ(b->Size(), (uptr)10);
-  MBlock *b2 = user_mblock(thr, p2);
-  EXPECT_NE(b2, (MBlock*)0);
-  EXPECT_EQ(b2->Size(), (uptr)20);
-  for (int i = 0; i < 10; i++) {
-    p[i] = 42;
-    EXPECT_EQ(b, user_mblock(thr, p + i));
-  }
-  for (int i = 0; i < 20; i++) {
-    ((char*)p2)[i] = 42;
-    EXPECT_EQ(b2, user_mblock(thr, p2 + i));
-  }
+  EXPECT_EQ(user_alloc_usable_size(thr, pc, p), (uptr)10);
+  EXPECT_EQ(user_alloc_usable_size(thr, pc, p2), (uptr)20);
   user_free(thr, pc, p);
   user_free(thr, pc, p2);
 }

Modified: compiler-rt/trunk/lib/tsan/tests/unit/tsan_sync_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/tests/unit/tsan_sync_test.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/tests/unit/tsan_sync_test.cc (original)
+++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_sync_test.cc Thu May 29 08:50:54 2014
@@ -12,53 +12,100 @@
 //===----------------------------------------------------------------------===//
 #include "tsan_sync.h"
 #include "tsan_rtl.h"
-#include "tsan_mman.h"
 #include "gtest/gtest.h"
 
-#include <stdlib.h>
-#include <stdint.h>
-#include <map>
-
 namespace __tsan {
 
-TEST(Sync, Table) {
-  const uintptr_t kIters = 512*1024;
-  const uintptr_t kRange = 10000;
+TEST(MetaMap, Basic) {
+  ThreadState *thr = cur_thread();
+  MetaMap *m = &ctx->metamap;
+  u64 block[1] = {};  // fake malloc block
+  m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
+  MBlock *mb = m->GetBlock((uptr)&block[0]);
+  EXPECT_NE(mb, (MBlock*)0);
+  EXPECT_EQ(mb->siz, 1 * sizeof(u64));
+  EXPECT_EQ(mb->tid, thr->tid);
+  uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]);
+  EXPECT_EQ(sz, 1 * sizeof(u64));
+  mb = m->GetBlock((uptr)&block[0]);
+  EXPECT_EQ(mb, (MBlock*)0);
+}
+
+TEST(MetaMap, FreeRange) {
+  ThreadState *thr = cur_thread();
+  MetaMap *m = &ctx->metamap;
+  u64 block[4] = {};  // fake malloc block
+  m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
+  m->AllocBlock(thr, 0, (uptr)&block[1], 3 * sizeof(u64));
+  MBlock *mb1 = m->GetBlock((uptr)&block[0]);
+  EXPECT_EQ(mb1->siz, 1 * sizeof(u64));
+  MBlock *mb2 = m->GetBlock((uptr)&block[1]);
+  EXPECT_EQ(mb2->siz, 3 * sizeof(u64));
+  m->FreeRange(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
+  mb1 = m->GetBlock((uptr)&block[0]);
+  EXPECT_EQ(mb1, (MBlock*)0);
+  mb2 = m->GetBlock((uptr)&block[1]);
+  EXPECT_EQ(mb2, (MBlock*)0);
+}
 
+TEST(MetaMap, Sync) {
   ThreadState *thr = cur_thread();
-  uptr pc = 0;
+  MetaMap *m = &ctx->metamap;
+  u64 block[4] = {};  // fake malloc block
+  m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
+  SyncVar *s1 = m->GetIfExistsAndLock((uptr)&block[0]);
+  EXPECT_EQ(s1, (SyncVar*)0);
+  s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
+  EXPECT_NE(s1, (SyncVar*)0);
+  EXPECT_EQ(s1->addr, (uptr)&block[0]);
+  s1->mtx.Unlock();
+  SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[1], false);
+  EXPECT_NE(s2, (SyncVar*)0);
+  EXPECT_EQ(s2->addr, (uptr)&block[1]);
+  s2->mtx.ReadUnlock();
+  m->FreeBlock(thr, 0, (uptr)&block[0]);
+  s1 = m->GetIfExistsAndLock((uptr)&block[0]);
+  EXPECT_EQ(s1, (SyncVar*)0);
+  s2 = m->GetIfExistsAndLock((uptr)&block[1]);
+  EXPECT_EQ(s2, (SyncVar*)0);
+  m->OnThreadIdle(thr);
+}
 
-  SyncTab tab;
-  SyncVar *golden[kRange] = {};
-  unsigned seed = 0;
-  for (uintptr_t i = 0; i < kIters; i++) {
-    uintptr_t addr = rand_r(&seed) % (kRange - 1) + 1;
-    if (rand_r(&seed) % 2) {
-      // Get or add.
-      SyncVar *v = tab.GetOrCreateAndLock(thr, pc, addr, true);
-      EXPECT_TRUE(golden[addr] == 0 || golden[addr] == v);
-      EXPECT_EQ(v->addr, addr);
-      golden[addr] = v;
-      v->mtx.Unlock();
-    } else {
-      // Remove.
-      SyncVar *v = tab.GetAndRemove(thr, pc, addr);
-      EXPECT_EQ(golden[addr], v);
-      if (v) {
-        EXPECT_EQ(v->addr, addr);
-        golden[addr] = 0;
-        DestroyAndFree(v);
-      }
-    }
-  }
-  for (uintptr_t addr = 0; addr < kRange; addr++) {
-    if (golden[addr] == 0)
-      continue;
-    SyncVar *v = tab.GetAndRemove(thr, pc, addr);
-    EXPECT_EQ(v, golden[addr]);
-    EXPECT_EQ(v->addr, addr);
-    DestroyAndFree(v);
-  }
+TEST(MetaMap, MoveMemory) {
+  ThreadState *thr = cur_thread();
+  MetaMap *m = &ctx->metamap;
+  u64 block1[4] = {};  // fake malloc block
+  u64 block2[4] = {};  // fake malloc block
+  m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64));
+  m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64));
+  SyncVar *s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[0], true);
+  s1->mtx.Unlock();
+  SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[1], true);
+  s2->mtx.Unlock();
+  m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64));
+  MBlock *mb1 = m->GetBlock((uptr)&block1[0]);
+  EXPECT_EQ(mb1, (MBlock*)0);
+  MBlock *mb2 = m->GetBlock((uptr)&block1[3]);
+  EXPECT_EQ(mb2, (MBlock*)0);
+  mb1 = m->GetBlock((uptr)&block2[0]);
+  EXPECT_NE(mb1, (MBlock*)0);
+  EXPECT_EQ(mb1->siz, 3 * sizeof(u64));
+  mb2 = m->GetBlock((uptr)&block2[3]);
+  EXPECT_NE(mb2, (MBlock*)0);
+  EXPECT_EQ(mb2->siz, 1 * sizeof(u64));
+  s1 = m->GetIfExistsAndLock((uptr)&block1[0]);
+  EXPECT_EQ(s1, (SyncVar*)0);
+  s2 = m->GetIfExistsAndLock((uptr)&block1[1]);
+  EXPECT_EQ(s2, (SyncVar*)0);
+  s1 = m->GetIfExistsAndLock((uptr)&block2[0]);
+  EXPECT_NE(s1, (SyncVar*)0);
+  EXPECT_EQ(s1->addr, (uptr)&block2[0]);
+  s1->mtx.Unlock();
+  s2 = m->GetIfExistsAndLock((uptr)&block2[1]);
+  EXPECT_NE(s2, (SyncVar*)0);
+  EXPECT_EQ(s2->addr, (uptr)&block2[1]);
+  s2->mtx.Unlock();
+  m->FreeRange(thr, 0, (uptr)&block2[0], 4 * sizeof(u64));
 }
 
 }  // namespace __tsan

Modified: compiler-rt/trunk/test/tsan/java_alloc.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/java_alloc.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/test/tsan/java_alloc.cc (original)
+++ compiler-rt/trunk/test/tsan/java_alloc.cc Thu May 29 08:50:54 2014
@@ -19,14 +19,20 @@ void *Thread(void *p) {
 }
 
 int main() {
-  jptr jheap = (jptr)malloc(kHeapSize);
+  jptr jheap = (jptr)malloc(kHeapSize + 8) + 8;
   __tsan_java_init(jheap, kHeapSize);
   pthread_t th;
   pthread_create(&th, 0, Thread, (void*)(jheap + kHeapSize / 4));
   stress(jheap);
   pthread_join(th, 0);
-  printf("OK\n");
-  return __tsan_java_fini();
+  if (__tsan_java_fini() != 0) {
+    printf("FAILED\n");
+    return 1;
+  }
+  printf("DONE\n");
+  return 0;
 }
 
 // CHECK-NOT: WARNING: ThreadSanitizer: data race
+// CHECK-NOT: FAILED
+// CHECK: DONE

Modified: compiler-rt/trunk/test/tsan/java_lock_rec_race.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/java_lock_rec_race.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/test/tsan/java_lock_rec_race.cc (original)
+++ compiler-rt/trunk/test/tsan/java_lock_rec_race.cc Thu May 29 08:50:54 2014
@@ -25,7 +25,7 @@ void *Thread(void *p) {
 
 int main() {
   int const kHeapSize = 1024 * 1024;
-  void *jheap = malloc(kHeapSize);
+  void *jheap = (char*)malloc(kHeapSize + 8) + 8;
   __tsan_java_init((jptr)jheap, kHeapSize);
   const int kBlockSize = 16;
   __tsan_java_alloc((jptr)jheap, kBlockSize);

Modified: compiler-rt/trunk/test/tsan/java_race.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/java_race.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/test/tsan/java_race.cc (original)
+++ compiler-rt/trunk/test/tsan/java_race.cc Thu May 29 08:50:54 2014
@@ -8,7 +8,7 @@ void *Thread(void *p) {
 
 int main() {
   int const kHeapSize = 1024 * 1024;
-  void *jheap = malloc(kHeapSize);
+  void *jheap = (char*)malloc(kHeapSize + 8) + 8;
   __tsan_java_init((jptr)jheap, kHeapSize);
   const int kBlockSize = 16;
   __tsan_java_alloc((jptr)jheap, kBlockSize);

Modified: compiler-rt/trunk/test/tsan/java_race_move.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/java_race_move.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/test/tsan/java_race_move.cc (original)
+++ compiler-rt/trunk/test/tsan/java_race_move.cc Thu May 29 08:50:54 2014
@@ -12,7 +12,7 @@ void *Thread(void *p) {
 
 int main() {
   int const kHeapSize = 1024 * 1024;
-  void *jheap = malloc(kHeapSize);
+  void *jheap = (char*)malloc(kHeapSize + 8) + 8;
   __tsan_java_init((jptr)jheap, kHeapSize);
   const int kBlockSize = 64;
   int const kMove = 1024;

Modified: compiler-rt/trunk/test/tsan/mutexset7.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/mutexset7.cc?rev=209810&r1=209809&r2=209810&view=diff
==============================================================================
--- compiler-rt/trunk/test/tsan/mutexset7.cc (original)
+++ compiler-rt/trunk/test/tsan/mutexset7.cc Thu May 29 08:50:54 2014
@@ -13,12 +13,13 @@ void *Thread1(void *x) {
 }
 
 void *Thread2(void *x) {
-  pthread_mutex_t mtx;
-  pthread_mutex_init(&mtx, 0);
-  pthread_mutex_lock(&mtx);
+  pthread_mutex_t *mtx = new pthread_mutex_t;
+  pthread_mutex_init(mtx, 0);
+  pthread_mutex_lock(mtx);
   Global--;
-  pthread_mutex_unlock(&mtx);
-  pthread_mutex_destroy(&mtx);
+  pthread_mutex_unlock(mtx);
+  pthread_mutex_destroy(mtx);
+  delete mtx;
   return NULL;
 }
 

Removed: compiler-rt/trunk/test/tsan/oob_race.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/oob_race.cc?rev=209809&view=auto
==============================================================================
--- compiler-rt/trunk/test/tsan/oob_race.cc (original)
+++ compiler-rt/trunk/test/tsan/oob_race.cc (removed)
@@ -1,26 +0,0 @@
-// RUN: %clangxx_tsan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s
-#include <pthread.h>
-#include <stdio.h>
-#include <unistd.h>
-
-const long kOffset = 64*1024;
-
-void *Thread(void *p) {
-  sleep(1);
-  ((char*)p)[-kOffset] = 43;
-  return 0;
-}
-
-int main() {
-  char *volatile p0 = new char[16];
-  delete[] p0;
-  char *p = new char[32];
-  pthread_t th;
-  pthread_create(&th, 0, Thread, p);
-  p[-kOffset] = 42;
-  pthread_join(th, 0);
-}
-
-// Used to crash with CHECK failed.
-// CHECK: WARNING: ThreadSanitizer: data race
-





More information about the llvm-commits mailing list