[llvm-branch-commits] [compiler-rt] a3efa16 - Revert "Revert "[tsan] Don't use `enum __tsan_memory_order` in tsan interface…"

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Nov 5 09:44:41 PST 2024


Author: Vitaly Buka
Date: 2024-11-05T09:44:36-08:00
New Revision: a3efa16d8fd2efc5d1ed8bc984e58c908202b048

URL: https://github.com/llvm/llvm-project/commit/a3efa16d8fd2efc5d1ed8bc984e58c908202b048
DIFF: https://github.com/llvm/llvm-project/commit/a3efa16d8fd2efc5d1ed8bc984e58c908202b048.diff

LOG: Revert "Revert "[tsan] Don't use `enum __tsan_memory_order` in tsan interface…"

This reverts commit b14c436311e3ff78f61dd59c90486432d13bf38e.

Added: 
    

Modified: 
    compiler-rt/include/sanitizer/tsan_interface_atomic.h
    compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
    compiler-rt/lib/tsan/rtl/tsan_interface.h
    compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/include/sanitizer/tsan_interface_atomic.h b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
index de3a1c3936097d..74ed91efade040 100644
--- a/compiler-rt/include/sanitizer/tsan_interface_atomic.h
+++ b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
@@ -43,183 +43,178 @@ typedef enum {
 } __tsan_memory_order;
 
 __tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo);
 __tsan_atomic16 SANITIZER_CDECL
-__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo);
 __tsan_atomic32 SANITIZER_CDECL
-__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo);
 __tsan_atomic64 SANITIZER_CDECL
-__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo);
 #if __TSAN_HAS_INT128
-__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
-    const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL
+__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo);
 #endif
 
 void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
-                                          __tsan_atomic8 v,
-                                          __tsan_memory_order mo);
+                                          __tsan_atomic8 v, int mo);
 void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
-                                           __tsan_atomic16 v,
-                                           __tsan_memory_order mo);
+                                           __tsan_atomic16 v, int mo);
 void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
-                                           __tsan_atomic32 v,
-                                           __tsan_memory_order mo);
+                                           __tsan_atomic32 v, int mo);
 void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
-                                           __tsan_atomic64 v,
-                                           __tsan_memory_order mo);
+                                           __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
-                                            __tsan_atomic128 v,
-                                            __tsan_memory_order mo);
+                                            __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
 int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+    int fail_mo);
 #if __TSAN_HAS_INT128
 int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
     volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    int mo, int fail_mo);
 #endif
 
 int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+    int fail_mo);
 #if __TSAN_HAS_INT128
 int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
     volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    int mo, int fail_mo);
 #endif
 
 __tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo,
+    int fail_mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo,
+    int fail_mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo,
+    int fail_mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo,
+    int fail_mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
     volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    int mo, int fail_mo);
 #endif
 
-void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo);
 
 #ifdef __cplusplus
 } // extern "C"

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index e0e4c5b9d36cd3..b4257e76c3b903 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -40,14 +40,15 @@ int setcontext(const ucontext_t *ucp);
 
 namespace __tsan {
 
-// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
-// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
-// actually aliases of each other, and we cannot have 
diff erent interceptors for
-// them, because they're actually the same function.  Thus, we have to stay
-// conservative and treat the non-barrier versions as mo_acq_rel.
-static constexpr morder kMacOrderBarrier = mo_acq_rel;
-static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
-static constexpr morder kMacFailureOrder = mo_relaxed;
+// The non-barrier versions of OSAtomic* functions are semantically
+// morder::relaxed, but the two variants (e.g. OSAtomicAdd32 and
+// OSAtomicAdd32Barrier) are actually aliases of each other, and we cannot have
+// 
diff erent interceptors for them, because they're actually the same function.
+// Thus, we have to stay conservative and treat the non-barrier versions as
+// morder::acq_rel.
+static constexpr morder kMacOrderBarrier = morder::acq_rel;
+static constexpr morder kMacOrderNonBarrier = morder::acq_rel;
+static constexpr morder kMacFailureOrder = morder::relaxed;
 
 #  define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
     TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                 \
@@ -464,7 +465,7 @@ struct fake_shared_weak_count {
 // Shared and weak pointers in C++ maintain reference counts via atomics in
 // libc++.dylib, which are TSan-invisible, and this leads to false positives in
 // destructor code. These interceptors re-implements the whole functions so that
-// the mo_acq_rel semantics of the atomic decrement are visible.
+// the morder::acq_rel semantics of the atomic decrement are visible.
 //
 // Unfortunately, the interceptors cannot simply Acquire/Release some sync
 // object and call the original function, because it would have a race between
@@ -479,11 +480,11 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
 
   SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
                           o);
-  if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+  if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, morder::release) == 0) {
     Acquire(thr, pc, (uptr)&o->shared_owners);
     o->on_zero_shared();
-    if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
-        0) {
+    if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1,
+                                  morder::release) == 0) {
       Acquire(thr, pc, (uptr)&o->shared_weak_owners);
       o->on_zero_shared_weak();
     }
@@ -496,7 +497,7 @@ STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
     return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
 
   SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
-  if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+  if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, morder::release) == 0) {
     Acquire(thr, pc, (uptr)&o->shared_owners);
     o->on_zero_shared();
     return true;

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_interface.h b/compiler-rt/lib/tsan/rtl/tsan_interface.h
index 2b8a13ddb842cc..9751e891b6d90c 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -219,193 +219,193 @@ __extension__ typedef __int128 a128;
 
 // Part of ABI, do not change.
 // https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
-typedef enum {
-  mo_relaxed,
-  mo_consume,
-  mo_acquire,
-  mo_release,
-  mo_acq_rel,
-  mo_seq_cst
-} morder;
+enum class morder : int {
+  relaxed,
+  consume,
+  acquire,
+  release,
+  acq_rel,
+  seq_cst
+};
 
 struct ThreadState;
 
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+a8 __tsan_atomic8_load(const volatile a8 *a, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+a16 __tsan_atomic16_load(const volatile a16 *a, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+a32 __tsan_atomic32_load(const volatile a32 *a, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+a64 __tsan_atomic64_load(const volatile a64 *a, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+a128 __tsan_atomic128_load(const volatile a128 *a, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
-                                           morder mo, morder fmo);
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
+                                           int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
-                                            morder mo, morder fmo);
+                                            int mo, int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
-                                            morder mo, morder fmo);
+                                            int mo, int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
-                                            morder mo, morder fmo);
+                                            int mo, int fmo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
-                                             morder mo, morder fmo);
+                                             int mo, int fmo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
-                                         morder fmo);
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
+                                         int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
-                                          morder mo, morder fmo);
+                                          int mo, int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
-                                          morder mo, morder fmo);
+                                          int mo, int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
-                                          morder mo, morder fmo);
+                                          int mo, int fmo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
-                                           morder mo, morder fmo);
+                                           int mo, int fmo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
-                                       morder fmo);
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
+                                       int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
-                                         morder mo, morder fmo);
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
+                                         int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
-                                         morder mo, morder fmo);
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
+                                         int fmo);
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
-                                         morder mo, morder fmo);
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
+                                         int fmo);
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
-                                           morder mo, morder fmo);
+                                           int mo, int fmo);
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo);
+void __tsan_atomic_thread_fence(int mo);
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_signal_fence(morder mo);
+void __tsan_atomic_signal_fence(int mo);
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 67106f59f6e7f5..283eaee3b8ba7a 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -34,26 +34,28 @@ static StaticSpinMutex mutex128;
 
 #if SANITIZER_DEBUG
 static bool IsLoadOrder(morder mo) {
-  return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire ||
-         mo == mo_seq_cst;
+  return mo == morder::relaxed || mo == morder::consume ||
+         mo == morder::acquire || mo == morder::seq_cst;
 }
 
 static bool IsStoreOrder(morder mo) {
-  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
+  return mo == morder::relaxed || mo == morder::release ||
+         mo == morder::seq_cst;
 }
 #endif
 
 static bool IsReleaseOrder(morder mo) {
-  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
+  return mo == morder::release || mo == morder::acq_rel ||
+         mo == morder::seq_cst;
 }
 
 static bool IsAcquireOrder(morder mo) {
-  return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel ||
-         mo == mo_seq_cst;
+  return mo == morder::consume || mo == morder::acquire ||
+         mo == morder::acq_rel || mo == morder::seq_cst;
 }
 
 static bool IsAcqRelOrder(morder mo) {
-  return mo == mo_acq_rel || mo == mo_seq_cst;
+  return mo == morder::acq_rel || mo == morder::seq_cst;
 }
 
 template <typename T>
@@ -205,17 +207,17 @@ static atomic_uint64_t *to_atomic(const volatile a64 *a) {
 
 static memory_order to_mo(morder mo) {
   switch (mo) {
-    case mo_relaxed:
+    case morder::relaxed:
       return memory_order_relaxed;
-    case mo_consume:
+    case morder::consume:
       return memory_order_consume;
-    case mo_acquire:
+    case morder::acquire:
       return memory_order_acquire;
-    case mo_release:
+    case morder::release:
       return memory_order_release;
-    case mo_acq_rel:
+    case morder::acq_rel:
       return memory_order_acq_rel;
-    case mo_seq_cst:
+    case morder::seq_cst:
       return memory_order_seq_cst;
   }
   DCHECK(0);
@@ -227,7 +229,7 @@ namespace {
 template <typename T, T (*F)(volatile T *v, T op)>
 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
-  if (LIKELY(mo == mo_relaxed))
+  if (LIKELY(mo == morder::relaxed))
     return F(a, v);
   SlotLocker locker(thr);
   {
@@ -436,12 +438,12 @@ struct OpCAS {
                      volatile T *a, T *c, T v) {
     // 31.7.2.18: "The failure argument shall not be memory_order_release
     // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
-    // (mo_relaxed) when those are used.
+    // (morder::relaxed) when those are used.
     DCHECK(IsLoadOrder(fmo));
 
     MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
                  kAccessWrite | kAccessAtomic);
-    if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
+    if (LIKELY(mo == morder::relaxed && fmo == morder::relaxed)) {
       T cc = *c;
       T pr = func_cas(a, cc, v);
       if (pr == cc)
@@ -502,7 +504,7 @@ struct OpFence {
 
 static morder convert_morder(morder mo) {
   if (flags()->force_seq_cst_atomics)
-    return (morder)mo_seq_cst;
+    return (morder)morder::seq_cst;
 
   // Filter out additional memory order flags:
   // MEMMODEL_SYNC        = 1 << 15
@@ -514,7 +516,7 @@ static morder convert_morder(morder mo) {
   // since we use __sync_ atomics for actual atomic operations,
   // we can safely ignore it as well. It also subtly affects semantics,
   // but we don't model the 
diff erence.
-  return (morder)(mo & 0x7fff);
+  return static_cast<morder>(static_cast<int>(mo) & 0x7fff);
 }
 
 #  define ATOMIC_IMPL(func, mo, ...)                                  \
@@ -527,349 +529,351 @@ static morder convert_morder(morder mo) {
 
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
-  ATOMIC_IMPL(Load, mo, a);
+a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
+  ATOMIC_IMPL(Load, static_cast<morder>(mo), a);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
-  ATOMIC_IMPL(Load, mo, a);
+a16 __tsan_atomic16_load(const volatile a16 *a, int mo) {
+  ATOMIC_IMPL(Load, static_cast<morder>(mo), a);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
-  ATOMIC_IMPL(Load, mo, a);
+a32 __tsan_atomic32_load(const volatile a32 *a, int mo) {
+  ATOMIC_IMPL(Load, static_cast<morder>(mo), a);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
-  ATOMIC_IMPL(Load, mo, a);
+a64 __tsan_atomic64_load(const volatile a64 *a, int mo) {
+  ATOMIC_IMPL(Load, static_cast<morder>(mo), a);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
-  ATOMIC_IMPL(Load, mo, a);
+a128 __tsan_atomic128_load(const volatile a128 *a, int mo) {
+  ATOMIC_IMPL(Load, static_cast<morder>(mo), a);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(Store, mo, a, v);
+void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(Store, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(Store, mo, a, v);
+void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(Store, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(Store, mo, a, v);
+void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(Store, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(Store, mo, a, v);
+void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(Store, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(Store, mo, a, v);
+void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(Store, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(Exchange, mo, a, v);
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(Exchange, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(Exchange, mo, a, v);
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(Exchange, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(Exchange, mo, a, v);
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(Exchange, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(Exchange, mo, a, v);
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(Exchange, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(Exchange, mo, a, v);
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(Exchange, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(FetchAdd, mo, a, v);
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(FetchAdd, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(FetchAdd, mo, a, v);
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(FetchAdd, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(FetchAdd, mo, a, v);
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(FetchAdd, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(FetchAdd, mo, a, v);
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(FetchAdd, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(FetchAdd, mo, a, v);
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(FetchAdd, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(FetchSub, mo, a, v);
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(FetchSub, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(FetchSub, mo, a, v);
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(FetchSub, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(FetchSub, mo, a, v);
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(FetchSub, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(FetchSub, mo, a, v);
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(FetchSub, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(FetchSub, mo, a, v);
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(FetchSub, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(FetchAnd, mo, a, v);
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(FetchAnd, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(FetchAnd, mo, a, v);
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(FetchAnd, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(FetchAnd, mo, a, v);
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(FetchAnd, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(FetchAnd, mo, a, v);
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(FetchAnd, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(FetchAnd, mo, a, v);
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(FetchAnd, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(FetchOr, mo, a, v);
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(FetchOr, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(FetchOr, mo, a, v);
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(FetchOr, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(FetchOr, mo, a, v);
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(FetchOr, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(FetchOr, mo, a, v);
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(FetchOr, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(FetchOr, mo, a, v);
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(FetchOr, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(FetchXor, mo, a, v);
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(FetchXor, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(FetchXor, mo, a, v);
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(FetchXor, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(FetchXor, mo, a, v);
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(FetchXor, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(FetchXor, mo, a, v);
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(FetchXor, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(FetchXor, mo, a, v);
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(FetchXor, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
-  ATOMIC_IMPL(FetchNand, mo, a, v);
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo) {
+  ATOMIC_IMPL(FetchNand, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
-  ATOMIC_IMPL(FetchNand, mo, a, v);
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo) {
+  ATOMIC_IMPL(FetchNand, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
-  ATOMIC_IMPL(FetchNand, mo, a, v);
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo) {
+  ATOMIC_IMPL(FetchNand, static_cast<morder>(mo), a, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
-  ATOMIC_IMPL(FetchNand, mo, a, v);
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo) {
+  ATOMIC_IMPL(FetchNand, static_cast<morder>(mo), a, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
-  ATOMIC_IMPL(FetchNand, mo, a, v);
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo) {
+  ATOMIC_IMPL(FetchNand, static_cast<morder>(mo), a, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
-                                           morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
+                                           int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
-                                            morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                            int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
-                                            morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                            int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
-                                            morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                            int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
-                                             morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                             int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
-                                         morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
+                                         int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
-                                          morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                          int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
-                                          morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                          int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
-                                          morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                          int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
-                                           morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                           int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
-                                       morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
+                                       int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
-                                         morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
+                                         int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
-                                         morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
+                                         int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
-                                         morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
+                                         int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 
 #  if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
-                                           morder mo, morder fmo) {
-  ATOMIC_IMPL(CAS, mo, fmo, a, c, v);
+                                           int mo, int fmo) {
+  ATOMIC_IMPL(CAS, static_cast<morder>(mo), static_cast<morder>(fmo), a, c, v);
 }
 #  endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
+void __tsan_atomic_thread_fence(int mo) {
+  ATOMIC_IMPL(Fence, static_cast<morder>(mo));
+}
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_signal_fence(morder mo) {}
+void __tsan_atomic_signal_fence(int mo) {}
 }  // extern "C"
 
 #else  // #if !SANITIZER_GO
@@ -897,69 +901,69 @@ void __tsan_atomic_signal_fence(morder mo) {}
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Load, *(a32 *)(a + 8), mo_acquire, *(a32 **)a);
+  ATOMIC_RET(Load, *(a32 *)(a + 8), morder::acquire, *(a32 **)a);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Load, *(a64 *)(a + 8), mo_acquire, *(a64 **)a);
+  ATOMIC_RET(Load, *(a64 *)(a + 8), morder::acquire, *(a64 **)a);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC(Store, mo_release, *(a32 **)a, *(a32 *)(a + 8));
+  ATOMIC(Store, morder::release, *(a32 **)a, *(a32 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC(Store, mo_release, *(a64 **)a, *(a64 *)(a + 8));
+  ATOMIC(Store, morder::release, *(a64 **)a, *(a64 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchAdd, *(a32 *)(a + 16), mo_acq_rel, *(a32 **)a,
+  ATOMIC_RET(FetchAdd, *(a32 *)(a + 16), morder::acq_rel, *(a32 **)a,
              *(a32 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchAdd, *(a64 *)(a + 16), mo_acq_rel, *(a64 **)a,
+  ATOMIC_RET(FetchAdd, *(a64 *)(a + 16), morder::acq_rel, *(a64 **)a,
              *(a64 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), mo_acq_rel, *(a32 **)a,
+  ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), morder::acq_rel, *(a32 **)a,
              *(a32 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), mo_acq_rel, *(a64 **)a,
+  ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), morder::acq_rel, *(a64 **)a,
              *(a64 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchOr, *(a32 *)(a + 16), mo_acq_rel, *(a32 **)a,
+  ATOMIC_RET(FetchOr, *(a32 *)(a + 16), morder::acq_rel, *(a32 **)a,
              *(a32 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(FetchOr, *(a64 *)(a + 16), mo_acq_rel, *(a64 **)a,
+  ATOMIC_RET(FetchOr, *(a64 *)(a + 16), morder::acq_rel, *(a64 **)a,
              *(a64 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Exchange, *(a32 *)(a + 16), mo_acq_rel, *(a32 **)a,
+  ATOMIC_RET(Exchange, *(a32 *)(a + 16), morder::acq_rel, *(a32 **)a,
              *(a32 *)(a + 8));
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
-  ATOMIC_RET(Exchange, *(a64 *)(a + 16), mo_acq_rel, *(a64 **)a,
+  ATOMIC_RET(Exchange, *(a64 *)(a + 16), morder::acq_rel, *(a64 **)a,
              *(a64 *)(a + 8));
 }
 
@@ -968,7 +972,7 @@ void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
                                          u8 *a) {
   a32 cur = 0;
   a32 cmp = *(a32 *)(a + 8);
-  ATOMIC_RET(CAS, cur, mo_acq_rel, mo_acquire, *(a32 **)a, cmp,
+  ATOMIC_RET(CAS, cur, morder::acq_rel, morder::acquire, *(a32 **)a, cmp,
              *(a32 *)(a + 12));
   *(bool *)(a + 16) = (cur == cmp);
 }
@@ -978,7 +982,7 @@ void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
                                          u8 *a) {
   a64 cur = 0;
   a64 cmp = *(a64 *)(a + 8);
-  ATOMIC_RET(CAS, cur, mo_acq_rel, mo_acquire, *(a64 **)a, cmp,
+  ATOMIC_RET(CAS, cur, morder::acq_rel, morder::acquire, *(a64 **)a, cmp,
              *(a64 *)(a + 16));
   *(bool *)(a + 24) = (cur == cmp);
 }


        


More information about the llvm-branch-commits mailing list