[llvm-branch-commits] Revert "[nfc][tsan] Move out `morder` params from __VA_ARGS__ (#114916)" (PR #115030)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Nov 5 09:23:00 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-compiler-rt-sanitizer
Author: Vitaly Buka (vitalybuka)
<details>
<summary>Changes</summary>
This reverts commit a9f829a3d7556593e0814080c8e33eca09e3a51e.
---
Patch is 25.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/115030.diff
1 Files Affected:
- (modified) compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp (+126-126)
``````````diff
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 67106f59f6e7f5..6190e315f72c34 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -248,30 +248,30 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
struct OpLoad {
template <typename T>
- static T NoTsanAtomic(morder mo, const volatile T *a) {
+ static T NoTsanAtomic(const volatile T *a, morder mo) {
return atomic_load(to_atomic(a), to_mo(mo));
}
#if __TSAN_HAS_INT128 && !SANITIZER_GO
- static a128 NoTsanAtomic(morder mo, const volatile a128 *a) {
+ static a128 NoTsanAtomic(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
}
#endif
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
+ static T Atomic(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
DCHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo)) {
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
kAccessRead | kAccessAtomic);
- return NoTsanAtomic(mo, a);
+ return NoTsanAtomic(a, mo);
}
// Don't create sync object if it does not exist yet. For example, an atomic
// pointer is initialized to nullptr and then periodically acquire-loaded.
- T v = NoTsanAtomic(mo, a);
+ T v = NoTsanAtomic(a, mo);
SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
if (s) {
SlotLocker locker(thr);
@@ -279,7 +279,7 @@ struct OpLoad {
thr->clock.Acquire(s->clock);
// Re-read under sync mutex because we need a consistent snapshot
// of the value and the clock we acquire.
- v = NoTsanAtomic(mo, a);
+ v = NoTsanAtomic(a, mo);
}
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
kAccessRead | kAccessAtomic);
@@ -289,19 +289,19 @@ struct OpLoad {
struct OpStore {
template <typename T>
- static void NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static void NoTsanAtomic(volatile T *a, T v, morder mo) {
atomic_store(to_atomic(a), v, to_mo(mo));
}
#if __TSAN_HAS_INT128 && !SANITIZER_GO
- static void NoTsanAtomic(morder mo, volatile a128 *a, a128 v) {
+ static void NoTsanAtomic(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
}
#endif
template <typename T>
- static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static void Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
DCHECK(IsStoreOrder(mo));
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
kAccessWrite | kAccessAtomic);
@@ -310,7 +310,7 @@ struct OpStore {
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
if (!IsReleaseOrder(mo)) {
- NoTsanAtomic(mo, a, v);
+ NoTsanAtomic(a, v, mo);
return;
}
SlotLocker locker(thr);
@@ -318,7 +318,7 @@ struct OpStore {
auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
Lock lock(&s->mtx);
thr->clock.ReleaseStore(&s->clock);
- NoTsanAtomic(mo, a, v);
+ NoTsanAtomic(a, v, mo);
}
IncrementEpoch(thr);
}
@@ -326,96 +326,96 @@ struct OpStore {
struct OpExchange {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_xchg(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
}
};
struct OpFetchAdd {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_add(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
}
};
struct OpFetchSub {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_sub(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
}
};
struct OpFetchAnd {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_and(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
}
};
struct OpFetchOr {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_or(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
}
};
struct OpFetchXor {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_xor(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
}
};
struct OpFetchNand {
template <typename T>
- static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ static T NoTsanAtomic(volatile T *a, T v, morder mo) {
return func_nand(a, v);
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
}
};
struct OpCAS {
template <typename T>
- static bool NoTsanAtomic(morder mo, morder fmo, volatile T *a, T *c, T v) {
+ static bool NoTsanAtomic(volatile T *a, T *c, T v, morder mo, morder fmo) {
return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
}
#if __TSAN_HAS_INT128
- static bool NoTsanAtomic(morder mo, morder fmo, volatile a128 *a, a128 *c,
- a128 v) {
+ static bool NoTsanAtomic(volatile a128 *a, a128 *c, a128 v, morder mo,
+ morder fmo) {
a128 old = *c;
a128 cur = func_cas(a, old, v);
if (cur == old)
@@ -426,14 +426,14 @@ struct OpCAS {
#endif
template <typename T>
- static T NoTsanAtomic(morder mo, morder fmo, volatile T *a, T c, T v) {
- NoTsanAtomic(mo, fmo, a, &c, v);
+ static T NoTsanAtomic(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomic(a, &c, v, mo, fmo);
return c;
}
template <typename T>
- static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
- volatile T *a, T *c, T v) {
+ static bool Atomic(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
+ morder mo, morder fmo) {
// 31.7.2.18: "The failure argument shall not be memory_order_release
// nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
// (mo_relaxed) when those are used.
@@ -475,9 +475,9 @@ struct OpCAS {
}
template <typename T>
- static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
- volatile T *a, T c, T v) {
- Atomic(thr, pc, mo, fmo, a, &c, v);
+ static T Atomic(ThreadState *thr, uptr pc, volatile T *a, T c, T v, morder mo,
+ morder fmo) {
+ Atomic(thr, pc, a, &c, v, mo, fmo);
return c;
}
};
@@ -517,351 +517,351 @@ static morder convert_morder(morder mo) {
return (morder)(mo & 0x7fff);
}
-# define ATOMIC_IMPL(func, mo, ...) \
- ThreadState *const thr = cur_thread(); \
- ProcessPendingSignals(thr); \
- if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
- return Op##func::NoTsanAtomic(mo, ##__VA_ARGS__); \
- return Op##func::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), \
- ##__VA_ARGS__);
+# define ATOMIC_IMPL(func, ...) \
+ ThreadState *const thr = cur_thread(); \
+ ProcessPendingSignals(thr); \
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
+ return Op##func::NoTsanAtomic(__VA_ARGS__); \
+ mo = convert_morder(mo); \
+ return Op##func::Atomic(thr, GET_CALLER_PC(), __VA_ARGS__);
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
- ATOMIC_IMPL(Load, mo, a);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
- ATOMIC_IMPL(Load, mo, a);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
- ATOMIC_IMPL(Load, mo, a);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
- ATOMIC_IMPL(Load, mo, a);
+ ATOMIC_IMPL(Load, a, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
- ATOMIC_IMPL(Load, mo, a);
+ ATOMIC_IMPL(Load, a, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(Store, mo, a, v);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(Store, mo, a, v);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(Store, mo, a, v);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(Store, mo, a, v);
+ ATOMIC_IMPL(Store, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(Store, mo, a, v);
+ ATOMIC_IMPL(Store, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(Exchange, mo, a, v);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(Exchange, mo, a, v);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(Exchange, mo, a, v);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(Exchange, mo, a, v);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(Exchange, mo, a, v);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, mo, a, v);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, mo, a, v);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, mo, a, v);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, mo, a, v);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, mo, a, v);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchSub, mo, a, v);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchSub, mo, a, v);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchSub, mo, a, v);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchSub, mo, a, v);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchSub, mo, a, v);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, mo, a, v);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, mo, a, v);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, mo, a, v);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, mo, a, v);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, mo, a, v);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchOr, mo, a, v);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchOr, mo, a, v);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchOr, mo, a, v);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchOr, mo, a, v);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchOr, mo, a, v);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchXor, mo, a, v);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchXor, mo, a, v);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchXor, mo, a, v);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchXor, mo, a, v);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchXor, mo, a, v);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchNand, mo, a, v);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchNand, mo, a, v);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchNand, mo, a, v);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchNand, mo, a, v);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchNand, mo, a, v);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
# endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
morder fmo) {
- ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/115030
More information about the llvm-branch-commits
mailing list