[compiler-rt] [compiler-rt] Use __atomic builtins whenever possible (PR #84439)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 7 23:36:56 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-compiler-rt-sanitizer
Author: Alexander Richardson (arichardson)
<details>
<summary>Changes</summary>
The code in this file dates back to 2012 when Clang's support for atomic
builtins was still quite limited. The bugs referenced in the comment
at the top of the file have long been fixed and using the compiler
builtins directly should now generate slightly better code.
Additionally, this allows using the atomic builtin header for platforms
where the __sync_builtins are lacking (e.g. Arm Morello).
We also have to touch the non-clang codepaths here since the only way we
can make this work easily is by making the memory_order enum match the
compiler-provided macros, so we have to update the debug checks that
assumed the enum was always a bitflag.
---
Full diff: https://github.com/llvm/llvm-project/pull/84439.diff
6 Files Affected:
- (modified) compiler-rt/lib/sanitizer_common/sanitizer_atomic.h (+9)
- (modified) compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h (+17-32)
- (modified) compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h (+8-8)
- (modified) compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h (+4-4)
- (modified) compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h (+4-4)
- (modified) compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h (+3-4)
``````````diff
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
index 46f06957228c9b..2dab4e4b8725d7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
@@ -18,12 +18,21 @@
namespace __sanitizer {
enum memory_order {
+#ifdef __ATOMIC_SEQ_CST
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+#else
memory_order_relaxed = 1 << 0,
memory_order_consume = 1 << 1,
memory_order_acquire = 1 << 2,
memory_order_release = 1 << 3,
memory_order_acq_rel = 1 << 4,
memory_order_seq_cst = 1 << 5
+#endif
};
struct atomic_uint8_t {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
index 4318d64d16cfa2..e4f02b7f6f685c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -22,52 +22,37 @@
namespace __sanitizer {
-// We would like to just use compiler builtin atomic operations
-// for loads and stores, but they are mostly broken in clang:
-// - they lead to vastly inefficient code generation
-// (http://llvm.org/bugs/show_bug.cgi?id=17281)
-// - 64-bit atomic operations are not implemented on x86_32
-// (http://llvm.org/bugs/show_bug.cgi?id=15034)
-// - they are not implemented on ARM
-// error: undefined reference to '__atomic_load_4'
+// We use the compiler builtin atomic operations for loads and stores, which
+// generates correct code for all architectures, but may require libatomic
+// on platforms where e.g. 64-bit atomics are not supported natively.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
-inline void atomic_signal_fence(memory_order) {
- __asm__ __volatile__("" ::: "memory");
-}
+inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
-inline void atomic_thread_fence(memory_order) {
- __sync_synchronize();
-}
+inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
-template<typename T>
-inline typename T::Type atomic_fetch_add(volatile T *a,
- typename T::Type v, memory_order mo) {
- (void)mo;
+template <typename T>
+inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
+ memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
- return __sync_fetch_and_add(&a->val_dont_use, v);
+ return __atomic_fetch_add(&a->val_dont_use, v, mo);
}
-template<typename T>
-inline typename T::Type atomic_fetch_sub(volatile T *a,
- typename T::Type v, memory_order mo) {
+template <typename T>
+inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
+ memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
- return __sync_fetch_and_add(&a->val_dont_use, -v);
+ return __atomic_fetch_sub(&a->val_dont_use, v, mo);
}
-template<typename T>
-inline typename T::Type atomic_exchange(volatile T *a,
- typename T::Type v, memory_order mo) {
+template <typename T>
+inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
+ memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
- if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
- __sync_synchronize();
- v = __sync_lock_test_and_set(&a->val_dont_use, v);
- if (mo == memory_order_seq_cst)
- __sync_synchronize();
- return v;
+ return __atomic_exchange_n(&a->val_dont_use, v, mo);
}
template <typename T>
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
index f3d3052e5b7c5c..5fc2bf25f3cd51 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -40,8 +40,8 @@ template <>
inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type ret;
@@ -66,8 +66,8 @@ inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)ptr % sizeof(*ptr)));
typedef atomic_uint64_t::Type Type;
@@ -89,8 +89,8 @@ inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
template <>
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type zero = 0;
@@ -102,8 +102,8 @@ inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
template <>
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)ptr % sizeof(*ptr)));
__spin_lock(&lock.lock);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
index 557082a636b879..5ace0d2213275d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -24,8 +24,8 @@ inline void proc_yield(int cnt) {
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
+ mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
@@ -58,8 +58,8 @@ inline typename T::Type atomic_load(
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
index b81a354d209872..966463acd04b8a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
@@ -26,8 +26,8 @@ inline void proc_yield(int cnt) {
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
+ mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
@@ -71,8 +71,8 @@ inline typename T::Type atomic_load(
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
index 31317adcdfc99f..ed82f62a1c96c3 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
@@ -70,8 +70,8 @@ inline void proc_yield(int cnt) {
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_consume
+ || mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
@@ -87,8 +87,7 @@ inline typename T::Type atomic_load(
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
``````````
</details>
https://github.com/llvm/llvm-project/pull/84439
More information about the llvm-commits
mailing list