[llvm-commits] [compiler-rt] r168693 - in /compiler-rt/trunk/lib/tsan/rtl: tsan_interface_atomic.cc tsan_interface_atomic.h

Dmitry Vyukov dvyukov at google.com
Tue Nov 27 01:35:44 PST 2012


Author: dvyukov
Date: Tue Nov 27 03:35:44 2012
New Revision: 168693

URL: http://llvm.org/viewvc/llvm-project?rev=168693&view=rev
Log:
tsan: fix compilation for dead old compilers (why we are supporting them at all?..)

Modified:
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc?rev=168693&r1=168692&r2=168693&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc Tue Nov 27 03:35:44 2012
@@ -298,9 +298,11 @@
   SCOPED_ATOMIC(Load, a, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
   SCOPED_ATOMIC(Load, a, mo);
 }
+#endif
 
 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(Store, a, v, mo);
@@ -318,9 +320,11 @@
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(Store, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(Exchange, a, v, mo);
@@ -338,9 +342,11 @@
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
@@ -358,9 +364,11 @@
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchSub, a, v, mo);
@@ -378,9 +386,11 @@
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
@@ -398,9 +408,11 @@
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchOr, a, v, mo);
@@ -418,9 +430,11 @@
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchXor, a, v, mo);
@@ -438,9 +452,11 @@
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
+#endif
 
 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchNand, a, v, mo);
@@ -458,9 +474,11 @@
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
+#endif
 
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
@@ -482,10 +500,12 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+#if __TSAN_HAS_INT128
 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
+#endif
 
 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
@@ -507,10 +527,12 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+#if __TSAN_HAS_INT128
 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
+#endif
 
 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
     morder mo, morder fmo) {
@@ -531,10 +553,12 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+#if __TSAN_HAS_INT128
 a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
+#endif
 
 void __tsan_atomic_thread_fence(morder mo) {
   char* a;

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h?rev=168693&r1=168692&r2=168693&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.h Tue Nov 27 03:35:44 2012
@@ -21,7 +21,19 @@
 typedef short    __tsan_atomic16;  // NOLINT
 typedef int      __tsan_atomic32;
 typedef long     __tsan_atomic64;  // NOLINT
+
+#if (defined(__clang__) && defined(__clang_major__) \
+      && defined(__clang_minor__) && __clang__ >= 1 && __clang_major__ >= 3 \
+      && __clang_minor__ >= 3) \
+    || (defined(__GNUC__) && defined(__GNUC_MINOR__) \
+      && defined(__GNUC_PATCHLEVEL__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 6 \
+      && __GNUC_PATCHLEVEL__ >= 3)
 typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char     __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
 
 // Part of ABI, do not change.
 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup





More information about the llvm-commits mailing list