[llvm-branch-commits] [tsan] Don't use `enum __tsan_memory_order` in tsan interface (PR #114724)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sun Nov 3 16:20:20 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-compiler-rt-sanitizer
Author: Vitaly Buka (vitalybuka)
<details>
<summary>Changes</summary>
In C++ it's UB to use undeclared values as enum.
And there is support `__ATOMIC_HLE_ACQUIRE` and
`__ATOMIC_HLE_RELEASE` need such values.
Internal implementation was switched to `class
enum`, where that behavior is defined. But
interface is C, so we just switch to `int`.
---
Patch is 32.28 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/114724.diff
4 Files Affected:
- (modified) compiler-rt/include/sanitizer/tsan_interface_atomic.h (+82-87)
- (modified) compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp (+59-57)
- (modified) compiler-rt/lib/tsan/rtl/tsan_interface.h (+8-8)
- (modified) compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp (+46-32)
``````````diff
diff --git a/compiler-rt/include/sanitizer/tsan_interface_atomic.h b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
index de3a1c3936097d..74ed91efade040 100644
--- a/compiler-rt/include/sanitizer/tsan_interface_atomic.h
+++ b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
@@ -43,183 +43,178 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo);
__tsan_atomic16 SANITIZER_CDECL
-__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo);
__tsan_atomic32 SANITIZER_CDECL
-__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo);
__tsan_atomic64 SANITIZER_CDECL
-__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
- const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL
+__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo);
#endif
void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v,
- __tsan_memory_order mo);
+ __tsan_atomic8 v, int mo);
void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v,
- __tsan_memory_order mo);
+ __tsan_atomic16 v, int mo);
void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v,
- __tsan_memory_order mo);
+ __tsan_atomic32 v, int mo);
void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v,
- __tsan_memory_order mo);
+ __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v,
- __tsan_memory_order mo);
+ __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo,
+ int fail_mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo,
+ int fail_mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo,
+ int fail_mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
-void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo);
#ifdef __cplusplus
} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index 9db0eebd923696..1f6e0ab9f49347 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -40,62 +40,64 @@ int setcontext(const ucontext_t *ucp);
namespace __tsan {
-// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
-// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
-// actually aliases of each other, and we cannot have different interceptors for
-// them, because they're actually the same function. Thus, we have to stay
-// conservative and treat the non-barrier versions as mo_acq_rel.
-static constexpr morder kMacOrderBarrier = mo_acq_rel;
-static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
-static constexpr morder kMacFailureOrder = mo_relaxed;
-
-#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
- }
-
-#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
- }
-
-#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
- }
+// The non-barrier versions of OSAtomic* functions are semantically
+// morder::relaxed, but the two variants (e.g. OSAtomicAdd32 and
+// OSAtomicAdd32Barrier) are actually aliases of each other, and we cannot have
+// different interceptors for them, because they're actually the same function.
+// Thus, we have to stay conservative and treat the non-barrier versions as
+// morder::acq_rel.
+static constexpr morder kMacOrderBarrier = morder::acq_rel;
+static constexpr morder kMacOrderNonBarrier = morder::acq_rel;
+static constexpr morder kMacFailureOrder = morder::relaxed;
+
+# define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
-#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
- mo) \
- TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
- }
+# define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
-#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
- m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderBarrier) \
- m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
- kMacOrderBarrier)
-
-#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
- m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderBarrier) \
- m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
- __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+# define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
+# define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/114724
More information about the llvm-branch-commits
mailing list