[compiler-rt] r174165 - tsan: add flag to not report races between atomic and plain memory accesses

Dmitry Vyukov dvyukov at google.com
Fri Feb 1 02:06:56 PST 2013


Author: dvyukov
Date: Fri Feb  1 04:06:56 2013
New Revision: 174165

URL: http://llvm.org/viewvc/llvm-project?rev=174165&view=rev
Log:
tsan: add flag to not report races between atomic and plain memory accesses

Modified:
    compiler-rt/trunk/lib/tsan/rtl/tsan_flags.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_flags.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_flags.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_flags.cc?rev=174165&r1=174164&r2=174165&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_flags.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_flags.cc Fri Feb  1 04:06:56 2013
@@ -45,6 +45,7 @@ void InitializeFlags(Flags *f, const cha
   f->report_thread_leaks = true;
   f->report_destroy_locked = true;
   f->report_signal_unsafe = true;
+  f->report_atomic_races = true;
   f->force_seq_cst_atomics = false;
   f->strip_path_prefix = "";
   f->suppressions = "";
@@ -72,6 +73,7 @@ void InitializeFlags(Flags *f, const cha
   ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
   ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
   ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
+  ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
   ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
   ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
   ParseFlag(env, &f->suppressions, "suppressions");

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_flags.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_flags.h?rev=174165&r1=174164&r2=174165&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_flags.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_flags.h Fri Feb  1 04:06:56 2013
@@ -43,6 +43,8 @@ struct Flags {
   // Report violations of async signal-safety
   // (e.g. malloc() call from a signal handler).
   bool report_signal_unsafe;
+  // Report races between atomic and plain memory accesses.
+  bool report_atomic_races;
   // If set, all atomics are effectively sequentially consistent (seq_cst),
   // regardless of what user actually specified.
   bool force_seq_cst_atomics;

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc?rev=174165&r1=174164&r2=174165&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc Fri Feb  1 04:06:56 2013
@@ -244,7 +244,8 @@ static T AtomicLoad(ThreadState *thr, up
   // This fast-path is critical for performance.
   // Assume the access is atomic.
   if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
-    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+    if (flags()->report_atomic_races)
+      MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
     return *a;
   }
   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
@@ -253,6 +254,7 @@ static T AtomicLoad(ThreadState *thr, up
   T v = *a;
   s->mtx.ReadUnlock();
   __sync_synchronize();
+  if (flags()->report_atomic_races)
     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
   return v;
 }
@@ -261,7 +263,8 @@ template<typename T>
 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
     morder mo) {
   CHECK(IsStoreOrder(mo));
-  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+  if (flags()->report_atomic_races)
+    MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
   // This fast-path is critical for performance.
   // Assume the access is atomic.
   // Strictly saying even relaxed store cuts off release sequence,
@@ -283,7 +286,8 @@ static void AtomicStore(ThreadState *thr
 
 template<typename T, T (*F)(volatile T *v, T op)>
 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
-  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+  if (flags()->report_atomic_races)
+    MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
   thr->clock.set(thr->tid, thr->fast_state.epoch());
   if (IsAcqRelOrder(mo))
@@ -343,7 +347,8 @@ template<typename T>
 static bool AtomicCAS(ThreadState *thr, uptr pc,
     volatile T *a, T *c, T v, morder mo, morder fmo) {
   (void)fmo;  // Unused because llvm does not pass it yet.
-  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+  if (flags()->report_atomic_races)
+    MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
   thr->clock.set(thr->tid, thr->fast_state.epoch());
   if (IsAcqRelOrder(mo))





More information about the llvm-commits mailing list