[llvm-commits] [compiler-rt] r156766 - in /compiler-rt/trunk/lib/tsan/rtl: tsan_interface_atomic.cc tsan_interface_atomic.h tsan_stat.h

Dmitry Vyukov dvyukov at google.com
Mon May 14 08:33:00 PDT 2012


Author: dvyukov
Date: Mon May 14 10:33:00 2012
New Revision: 156766

URL: http://llvm.org/viewvc/llvm-project?rev=156766&view=rev
Log:
tsan: add more atomics to public interface (fetch_or/and/xor + 1-,2-byte versions)

Modified:
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc?rev=156766&r1=156765&r2=156766&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc Mon May 14 10:33:00 2012
@@ -112,6 +112,39 @@
 }
 
 template<typename T>
+static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
+    morder mo) {
+  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+    Release(thr, pc, (uptr)a);
+  v = __sync_fetch_and_and(a, v);
+  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+    Acquire(thr, pc, (uptr)a);
+  return v;
+}
+
+template<typename T>
+static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
+    morder mo) {
+  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+    Release(thr, pc, (uptr)a);
+  v = __sync_fetch_and_or(a, v);
+  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+    Acquire(thr, pc, (uptr)a);
+  return v;
+}
+
+template<typename T>
+static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
+    morder mo) {
+  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
+    Release(thr, pc, (uptr)a);
+  v = __sync_fetch_and_xor(a, v);
+  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
+    Acquire(thr, pc, (uptr)a);
+  return v;
+}
+
+template<typename T>
 static bool AtomicCAS(ThreadState *thr, uptr pc,
     volatile T *a, T *c, T v, morder mo) {
   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
@@ -162,6 +195,14 @@
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+  SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+  SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
@@ -170,6 +211,14 @@
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
@@ -178,6 +227,64 @@
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+  SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+  SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+  SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+  SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+  SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+  SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+  SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+  SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+    morder mo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+    morder mo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
     morder mo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo);
@@ -188,6 +295,26 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo);
 }
 
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
+    morder mo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+    morder mo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+    morder mo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+    morder mo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo);
+}
+
 void __tsan_atomic_thread_fence(morder mo) {
   char* a;
   SCOPED_ATOMIC(Fence, mo);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h?rev=156766&r1=156765&r2=156766&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h Mon May 14 10:33:00 2012
@@ -49,16 +49,64 @@
 void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
     __tsan_memory_order mo);
 
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
 
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
 
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
     __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h?rev=156766&r1=156765&r2=156766&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h Mon May 14 10:33:00 2012
@@ -73,6 +73,9 @@
   StatAtomicStore,
   StatAtomicExchange,
   StatAtomicFetchAdd,
+  StatAtomicFetchAnd,
+  StatAtomicFetchOr,
+  StatAtomicFetchXor,
   StatAtomicCAS,
   StatAtomicFence,
   StatAtomicRelaxed,





More information about the llvm-commits mailing list