[compiler-rt] r190991 - asan/tsan/msan: use builtin atomic operations when available
Dmitry Vyukov
dvyukov at google.com
Wed Sep 18 23:02:56 PDT 2013
Author: dvyukov
Date: Thu Sep 19 01:02:56 2013
New Revision: 190991
URL: http://llvm.org/viewvc/llvm-project?rev=190991&view=rev
Log:
asan/tsan/msan: use builtin atomic operations when available
this should fix episodic crashes on ARM/PPC
x86_32 is still broken
Modified:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_atomic_clang.h
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_atomic_clang.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_atomic_clang.h?rev=190991&r1=190990&r2=190991&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_atomic_clang.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_atomic_clang.h Thu Sep 19 01:02:56 2013
@@ -15,6 +15,18 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#define ATOMIC_ORDER(mo) \
+ ((mo) == memory_order_relaxed ? __ATOMIC_RELAXED : \
+ (mo) == memory_order_consume ? __ATOMIC_CONSUME : \
+ (mo) == memory_order_acquire ? __ATOMIC_ACQUIRE : \
+ (mo) == memory_order_release ? __ATOMIC_RELEASE : \
+ (mo) == memory_order_acq_rel ? __ATOMIC_ACQ_REL : \
+ __ATOMIC_SEQ_CST)
+
namespace __sanitizer {
INLINE void atomic_signal_fence(memory_order) {
@@ -41,7 +53,16 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
- // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
+// Use builtin atomic operations if available.
+// But not on x86_64 because they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281).
+// And not on x86_32 because they are not implemented
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// Have to use them on ARM/PPC/etc, because our implementation lacks necessary
+// memory fences.
+#if __has_builtin(__atomic_load_n) && !defined(__x86_64__) && !defined(__i386__)
+ v = __atomic_load_n(&a->val_dont_use, ATOMIC_ORDER(mo));
+#else
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@@ -49,6 +70,7 @@ INLINE typename T::Type atomic_load(
v = a->val_dont_use;
atomic_signal_fence(memory_order_seq_cst);
}
+#endif
return v;
}
@@ -57,7 +79,11 @@ INLINE void atomic_store(volatile T *a,
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
- // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
+// See the comment in atomic_load.
+#if __has_builtin(__atomic_store_n) && !defined(__x86_64__) \
+ && !defined(__i386__)
+ __atomic_store_n(&a->val_dont_use, v, ATOMIC_ORDER(mo));
+#else
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
@@ -67,6 +93,7 @@ INLINE void atomic_store(volatile T *a,
}
if (mo == memory_order_seq_cst)
atomic_thread_fence(memory_order_seq_cst);
+#endif
}
template<typename T>
@@ -121,4 +148,6 @@ INLINE bool atomic_compare_exchange_weak
} // namespace __sanitizer
+#undef ATOMIC_ORDER
+
#endif // SANITIZER_ATOMIC_CLANG_H
More information about the llvm-commits
mailing list