[llvm-commits] [compiler-rt] r168683 - in /compiler-rt/trunk/lib/tsan/rtl: tsan_interface_atomic.cc tsan_interface_atomic.h tsan_stat.cc tsan_stat.h

Dmitry Vyukov dvyukov at google.com
Mon Nov 26 23:41:27 PST 2012


Author: dvyukov
Date: Tue Nov 27 01:41:27 2012
New Revision: 168683

URL: http://llvm.org/viewvc/llvm-project?rev=168683&view=rev
Log:
tsan: add 128-bit atomic operations

Modified:
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_stat.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc?rev=168683&r1=168682&r2=168683&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc Tue Nov 27 01:41:27 2012
@@ -47,6 +47,7 @@
 typedef __tsan_atomic16 a16;
 typedef __tsan_atomic32 a32;
 typedef __tsan_atomic64 a64;
+typedef __tsan_atomic128 a128;
 const morder mo_relaxed = __tsan_memory_order_relaxed;
 const morder mo_consume = __tsan_memory_order_consume;
 const morder mo_acquire = __tsan_memory_order_acquire;
@@ -60,7 +61,8 @@
   StatInc(thr, size == 1 ? StatAtomic1
              : size == 2 ? StatAtomic2
              : size == 4 ? StatAtomic4
-             :             StatAtomic8);
+             : size == 8 ? StatAtomic8
+             :             StatAtomic16);
   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
              : mo == mo_consume ? StatAtomicConsume
              : mo == mo_acquire ? StatAtomicAcquire
@@ -296,6 +298,10 @@
   SCOPED_ATOMIC(Load, a, mo);
 }
 
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+  SCOPED_ATOMIC(Load, a, mo);
+}
+
 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(Store, a, v, mo);
 }
@@ -312,6 +318,10 @@
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(Store, a, v, mo);
+}
+
 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
@@ -328,6 +338,10 @@
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
@@ -344,6 +358,10 @@
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
@@ -360,6 +378,10 @@
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
@@ -376,6 +398,10 @@
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
@@ -392,6 +418,10 @@
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
@@ -408,6 +438,10 @@
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
@@ -424,6 +458,10 @@
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
@@ -444,6 +482,11 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+    morder mo, morder fmo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
@@ -464,6 +507,11 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+    morder mo, morder fmo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
@@ -483,6 +531,11 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+    morder mo, morder fmo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
 void __tsan_atomic_thread_fence(morder mo) {
   char* a;
   SCOPED_ATOMIC(Fence, mo);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h?rev=168683&r1=168682&r2=168683&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h Tue Nov 27 01:41:27 2012
@@ -17,10 +17,11 @@
 extern "C" {
 #endif
 
-typedef char  __tsan_atomic8;
-typedef short __tsan_atomic16;  // NOLINT
-typedef int   __tsan_atomic32;
-typedef long  __tsan_atomic64;  // NOLINT
+typedef char     __tsan_atomic8;
+typedef short    __tsan_atomic16;  // NOLINT
+typedef int      __tsan_atomic32;
+typedef long     __tsan_atomic64;  // NOLINT
+typedef __int128 __tsan_atomic128;
 
 // Part of ABI, do not change.
 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
@@ -41,6 +42,8 @@
     __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
     __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+    __tsan_memory_order mo);
 
 void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
     __tsan_memory_order mo);
@@ -50,6 +53,8 @@
     __tsan_memory_order mo);
 void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
     __tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+    __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -59,6 +64,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -68,6 +75,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -77,6 +86,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -86,6 +97,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -95,6 +108,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -104,6 +119,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
@@ -113,6 +130,8 @@
     __tsan_atomic32 v, __tsan_memory_order mo);
 __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
@@ -126,6 +145,9 @@
 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
     __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
 
 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
@@ -139,6 +161,9 @@
 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
     __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
 
 __tsan_atomic8 __tsan_atomic8_compare_exchange_val(
     volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
@@ -152,6 +177,9 @@
 __tsan_atomic64 __tsan_atomic64_compare_exchange_val(
     volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
     __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+    volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
 
 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
 void __tsan_atomic_signal_fence(__tsan_memory_order mo);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_stat.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stat.cc?rev=168683&r1=168682&r2=168683&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stat.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stat.cc Tue Nov 27 01:41:27 2012
@@ -94,6 +94,7 @@
   name[StatAtomic2]                      = "            size 2                ";
   name[StatAtomic4]                      = "            size 4                ";
   name[StatAtomic8]                      = "            size 8                ";
+  name[StatAtomic16]                     = "            size 16               ";
 
   name[StatInterceptor]                  = "Interceptors                      ";
   name[StatInt_longjmp]                  = "  longjmp                         ";

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h?rev=168683&r1=168682&r2=168683&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stat.h Tue Nov 27 01:41:27 2012
@@ -90,6 +90,7 @@
   StatAtomic2,
   StatAtomic4,
   StatAtomic8,
+  StatAtomic16,
 
   // Interceptors.
   StatInterceptor,





More information about the llvm-commits mailing list