[libcxx-commits] [libcxx] [libc++] Implement C++20 atomic_ref (PR #76647)

Damien L-G via libcxx-commits libcxx-commits at lists.llvm.org
Mon Jan 1 08:25:40 PST 2024


https://github.com/dalg24 updated https://github.com/llvm/llvm-project/pull/76647

>From 8efcc01851aa0d8ff20068af3f1f7aff7aa4267d Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sat, 30 Dec 2023 21:40:37 -0500
Subject: [PATCH 01/12] [libc++][atomic_ref] Refactor atomic_base_impl class
 and friends

so it can be reused to implement atomic_ref.
Salvaged/adapted from https://reviews.llvm.org/D72240

Co-Authored-By: Benjamin Trapani
---
 libcxx/include/__atomic/cxx_atomic_impl.h | 409 ++++------------------
 libcxx/include/__config                   |   4 +-
 2 files changed, 66 insertions(+), 347 deletions(-)

diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 1a0b808a0cb1c4..18c85aec4eef27 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -15,8 +15,12 @@
 #include <__memory/addressof.h>
 #include <__type_traits/conditional.h>
 #include <__type_traits/is_assignable.h>
+#include <__type_traits/is_pointer.h>
+#include <__type_traits/is_reference.h>
 #include <__type_traits/is_trivially_copyable.h>
+#include <__type_traits/is_volatile.h>
 #include <__type_traits/remove_const.h>
+#include <__type_traits/remove_reference.h>
 #include <cstddef>
 #include <cstring>
 
@@ -58,9 +62,27 @@ struct __cxx_atomic_base_impl {
   }
 #  endif // _LIBCPP_CXX03_LANG
   _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {}
+  using __contained_t = _Tp;
   _Tp __a_value;
 };
 
+template <typename _Tp, template <typename> class _TemplateTp>
+struct __is_instantiation_of : false_type {};
+
+template <typename _Tp, template <typename> class _TemplateTp>
+struct __is_instantiation_of<_TemplateTp<_Tp>, _TemplateTp> : true_type {};
+
+template <typename _Tp,
+          typename = typename enable_if<__is_instantiation_of<typename remove_volatile<typename _Tp::__base>::type,
+                                                              __cxx_atomic_base_impl>::value,
+                                        bool>::type>
+struct __cxx_atomic_base_impl_traits {
+  static constexpr bool __is_value_volatile = is_volatile<_Tp>::value;
+  static constexpr bool __is_value_ref      = is_reference<typename _Tp::__contained_t>::value;
+  using __underlying_t = typename remove_volatile<typename remove_reference<typename _Tp::__contained_t>::type>::type;
+  static constexpr bool __is_value_pointer = is_pointer<__underlying_t>::value;
+};
+
 _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
   // Avoid switch statement to make this a constexpr.
   return __order == memory_order_relaxed
@@ -87,13 +109,15 @@ _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory
                                 : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
 }
 
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+template <typename _Tp, typename enable_if<__cxx_atomic_base_impl_traits<_Tp>::__is_value_volatile, bool>::type = 0>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) {
   __cxx_atomic_assign_volatile(__a->__a_value, __val);
 }
 
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+template <typename _Tp, typename enable_if<!__cxx_atomic_base_impl_traits<_Tp>::__is_value_volatile, bool>::type = 0>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) {
   __a->__a_value = __val;
 }
 
@@ -107,53 +131,28 @@ _LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order
 
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
+__cxx_atomic_store(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val, memory_order __order) {
   __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
-  __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
-  _Tp __ret;
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
+__cxx_atomic_load(const _Tp* __a, memory_order __order) {
+  typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __ret;
   __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
   return __ret;
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load_inplace(
+    const _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __dst, memory_order __order) {
   __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
-  __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
-  _Tp __ret;
-  __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
-  return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
-  _Tp __ret;
-  __atomic_exchange(
-      std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
-  return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
-  _Tp __ret;
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_exchange(
+    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, memory_order __order) {
+  typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __ret;
   __atomic_exchange(
       std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
   return __ret;
@@ -161,9 +160,9 @@ _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a
 
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
-    volatile __cxx_atomic_base_impl<_Tp>* __a,
-    _Tp* __expected,
-    _Tp __value,
+    _Tp* __a,
+    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected,
+    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value,
     memory_order __success,
     memory_order __failure) {
   return __atomic_compare_exchange(
@@ -175,23 +174,11 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
       __to_gcc_failure_order(__failure));
 }
 
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
-    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
-  return __atomic_compare_exchange(
-      std::addressof(__a->__a_value),
-      __expected,
-      std::addressof(__value),
-      false,
-      __to_gcc_order(__success),
-      __to_gcc_failure_order(__failure));
-}
-
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
-    volatile __cxx_atomic_base_impl<_Tp>* __a,
-    _Tp* __expected,
-    _Tp __value,
+    _Tp* __a,
+    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected,
+    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value,
     memory_order __success,
     memory_order __failure) {
   return __atomic_compare_exchange(
@@ -203,18 +190,6 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
       __to_gcc_failure_order(__failure));
 }
 
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
-    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
-  return __atomic_compare_exchange(
-      std::addressof(__a->__a_value),
-      __expected,
-      std::addressof(__value),
-      true,
-      __to_gcc_order(__success),
-      __to_gcc_failure_order(__failure));
-}
-
 template <typename _Tp>
 struct __skip_amt {
   enum { value = 1 };
@@ -233,302 +208,44 @@ template <typename _Tp, int n>
 struct __skip_amt<_Tp[n]> {};
 
 template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
-  return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
-  return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
-  return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
+__cxx_atomic_fetch_add(_Tp* __a, _Td __delta, memory_order __order) {
+  return __atomic_fetch_add(
+      std::addressof(__a->__a_value),
+      __delta * __skip_amt<typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t>::value,
+      __to_gcc_order(__order));
 }
 
 template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
-  return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
-  return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
+__cxx_atomic_fetch_sub(_Tp* __a, _Td __delta, memory_order __order) {
+  return __atomic_fetch_sub(
+      std::addressof(__a->__a_value),
+      __delta * __skip_amt<typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t>::value,
+      __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_fetch_and(
+    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, memory_order __order) {
   return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_fetch_or(
+    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, memory_order __order) {
   return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
-  return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
-  return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_fetch_xor(
+    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, memory_order __order) {
   return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
 }
 
 #  define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
 
-#elif defined(_LIBCPP_HAS_C_ATOMIC_IMP)
-
-template <typename _Tp>
-struct __cxx_atomic_base_impl {
-  _LIBCPP_HIDE_FROM_ABI
-#  ifndef _LIBCPP_CXX03_LANG
-  __cxx_atomic_base_impl() _NOEXCEPT = default;
-#  else
-  __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {
-  }
-#  endif // _LIBCPP_CXX03_LANG
-  _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp __value) _NOEXCEPT : __a_value(__value) {}
-  _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value;
-};
-
-#  define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
-
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
-  __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
-}
-
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
-  __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT {
-  __c11_atomic_init(std::addressof(__a->__a_value), __val);
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) _NOEXCEPT {
-  __c11_atomic_init(std::addressof(__a->__a_value), __val);
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT {
-  __c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) _NOEXCEPT {
-  __c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT {
-  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
-  return __c11_atomic_load(
-      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
-  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
-  return __c11_atomic_load(
-      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const volatile* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
-  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
-  *__dst           = __c11_atomic_load(
-      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
-  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
-  *__dst           = __c11_atomic_load(
-      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_exchange(
-      std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_exchange(
-      std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
-}
-
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR memory_order __to_failure_order(memory_order __order) {
-  // Avoid switch statement to make this a constexpr.
-  return __order == memory_order_release
-           ? memory_order_relaxed
-           : (__order == memory_order_acq_rel ? memory_order_acquire : __order);
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
-    __cxx_atomic_base_impl<_Tp> volatile* __a,
-    _Tp* __expected,
-    _Tp __value,
-    memory_order __success,
-    memory_order __failure) _NOEXCEPT {
-  return __c11_atomic_compare_exchange_strong(
-      std::addressof(__a->__a_value),
-      __expected,
-      __value,
-      static_cast<__memory_order_underlying_t>(__success),
-      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
-    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure)
-    _NOEXCEPT {
-  return __c11_atomic_compare_exchange_strong(
-      std::addressof(__a->__a_value),
-      __expected,
-      __value,
-      static_cast<__memory_order_underlying_t>(__success),
-      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
-    __cxx_atomic_base_impl<_Tp> volatile* __a,
-    _Tp* __expected,
-    _Tp __value,
-    memory_order __success,
-    memory_order __failure) _NOEXCEPT {
-  return __c11_atomic_compare_exchange_weak(
-      std::addressof(__a->__a_value),
-      __expected,
-      __value,
-      static_cast<__memory_order_underlying_t>(__success),
-      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
-    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure)
-    _NOEXCEPT {
-  return __c11_atomic_compare_exchange_weak(
-      std::addressof(__a->__a_value),
-      __expected,
-      __value,
-      static_cast<__memory_order_underlying_t>(__success),
-      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_add(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_add(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp*
-__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_add(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp*
-__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_add(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_sub(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_sub(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp*
-__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_sub(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp*
-__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_sub(
-      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_and(
-      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_and(
-      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_or(
-      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_or(
-      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
-}
-
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_xor(
-      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
-}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
-  return __c11_atomic_fetch_xor(
-      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
-}
-
-#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
-
+#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP
 #ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
 
 template <typename _Tp>
@@ -813,7 +530,7 @@ template <typename _Tp,
 template <typename _Tp, typename _Base = __cxx_atomic_base_impl<_Tp> >
 #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS
 struct __cxx_atomic_impl : public _Base {
-  static_assert(is_trivially_copyable<_Tp>::value, "std::atomic<T> requires that 'T' be a trivially copyable type");
+  using __base = _Base;
 
   _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default;
   _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT : _Base(__value) {}
diff --git a/libcxx/include/__config b/libcxx/include/__config
index adff13e714cb64..03e2eb729cb708 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -1128,7 +1128,9 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c
 
 #  if __has_feature(cxx_atomic) || __has_extension(c_atomic) || __has_keyword(_Atomic)
 #    define _LIBCPP_HAS_C_ATOMIC_IMP
-#  elif defined(_LIBCPP_COMPILER_GCC)
+#  endif
+
+#  if defined(_LIBCPP_COMPILER_GCC) || (__has_builtin(__atomic_load) && __has_builtin(__atomic_store) && __has_builtin(__atomic_exchange) && __has_builtin(__atomic_compare_exchange))
 #    define _LIBCPP_HAS_GCC_ATOMIC_IMP
 #  endif
 

>From 3b478a423a4beb228a08e5768997ba25eac683ef Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sat, 30 Dec 2023 21:47:43 -0500
Subject: [PATCH 02/12] [libc++][atomic_ref] Enable atomic load/exchange for
 non-default constructible types

---
 libcxx/include/__atomic/cxx_atomic_impl.h | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 18c85aec4eef27..56cd703c258944 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -138,9 +138,11 @@ __cxx_atomic_store(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__unde
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
 __cxx_atomic_load(const _Tp* __a, memory_order __order) {
-  typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __ret;
-  __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
-  return __ret;
+  using _Ret = typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t;
+  alignas(alignof(_Ret)) unsigned char __mem[sizeof(_Ret)];
+  __atomic_load(
+      std::addressof(__a->__a_value), std::addressof(*reinterpret_cast<_Ret*>(__mem)), __to_gcc_order(__order));
+  return *reinterpret_cast<_Ret*>(__mem);
 }
 
 template <typename _Tp>
@@ -152,10 +154,14 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load_inplace(
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_exchange(
     _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, memory_order __order) {
-  typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __ret;
+  using _Ret = typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t;
+  alignas(alignof(_Ret)) unsigned char __mem[sizeof(_Ret)];
   __atomic_exchange(
-      std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
-  return __ret;
+      std::addressof(__a->__a_value),
+      std::addressof(__value),
+      std::addressof(*reinterpret_cast<_Ret*>(__mem)),
+      __to_gcc_order(__order));
+  return *reinterpret_cast<_Ret*>(__mem);
 }
 
 template <typename _Tp>

>From 1992da2c536ab287bb5be4f76854f48e56ac7f91 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sat, 30 Dec 2023 21:50:09 -0500
Subject: [PATCH 03/12] [libc++][atomic_ref] Add
 _LIBCPP_CHECK_WAIT_MEMORY_ORDER macro

---
 libcxx/include/__atomic/check_memory_order.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/libcxx/include/__atomic/check_memory_order.h b/libcxx/include/__atomic/check_memory_order.h
index 3012aec0521b38..536f764a619026 100644
--- a/libcxx/include/__atomic/check_memory_order.h
+++ b/libcxx/include/__atomic/check_memory_order.h
@@ -27,4 +27,8 @@
   _LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel,                                 \
                            "memory order argument to atomic operation is invalid")
 
+#define _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__m)                                                                           \
+  _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel,                                 \
+                           "memory order argument to atomic operation is invalid")
+
 #endif // _LIBCPP___ATOMIC_CHECK_MEMORY_ORDER_H

>From 78e13717fb90b27ecf7bb278caed39f931472cb3 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sat, 30 Dec 2023 21:51:40 -0500
Subject: [PATCH 04/12] [libc++][atomic_ref] Implement C++20 atomic_ref

---
 libcxx/include/CMakeLists.txt        |   1 +
 libcxx/include/__atomic/atomic_ref.h | 238 +++++++++++++++++++++++++++
 libcxx/include/atomic                |   1 +
 3 files changed, 240 insertions(+)
 create mode 100644 libcxx/include/__atomic/atomic_ref.h

diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 0fe3ab44d2466e..5d7e4ca98b1f75 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -236,6 +236,7 @@ set(files
   __atomic/atomic_flag.h
   __atomic/atomic_init.h
   __atomic/atomic_lock_free.h
+  __atomic/atomic_ref.h
   __atomic/atomic_sync.h
   __atomic/check_memory_order.h
   __atomic/contention_t.h
diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
new file mode 100644
index 00000000000000..b4299804eccc31
--- /dev/null
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -0,0 +1,238 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//                        Kokkos v. 4.0
+//       Copyright (2022) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H
+#define _LIBCPP___ATOMIC_ATOMIC_REF_H
+
+#include <__assert>
+#include <__atomic/check_memory_order.h>
+#include <__atomic/cxx_atomic_impl.h>
+#include <__atomic/is_always_lock_free.h>
+#include <__config>
+#include <__memory/addressof.h>
+#include <__type_traits/is_floating_point.h>
+#include <__type_traits/is_function.h>
+#include <__type_traits/is_nothrow_constructible.h>
+#include <__type_traits/is_same.h>
+#include <cinttypes>
+#include <concepts>
+#include <cstddef>
+#include <limits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#  pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER >= 20
+
+template <class _Tp, bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>, bool = is_floating_point_v<_Tp>>
+struct __atomic_ref_base {
+  mutable __cxx_atomic_impl<_Tp&> __a_;
+
+  using value_type = _Tp;
+
+  static constexpr size_t required_alignment = alignof(_Tp);
+
+  static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<_Tp>::__value;
+
+  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __cxx_atomic_is_lock_free(sizeof(_Tp)); }
+
+  _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
+      _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
+    _LIBCPP_ASSERT_UNCATEGORIZED(
+        __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
+        "memory order argument to atomic store operation is invalid");
+    __cxx_atomic_store(&__a_, __desired, __order);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
+    store(__desired);
+    return __desired;
+  }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
+      _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
+    _LIBCPP_ASSERT_UNCATEGORIZED(
+        __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+            __order == memory_order::seq_cst,
+        "memory order argument to atomic load operation is invalid");
+    return __cxx_atomic_load(&__a_, __order);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+    return __cxx_atomic_exchange(&__a_, __desired, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+    _LIBCPP_ASSERT_UNCATEGORIZED(
+        __failure == memory_order::relaxed || __failure == memory_order::consume ||
+            __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+        "failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+    return __cxx_atomic_compare_exchange_weak(&__a_, &__expected, __desired, __success, __failure);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+    _LIBCPP_ASSERT_UNCATEGORIZED(
+        __failure == memory_order::relaxed || __failure == memory_order::consume ||
+            __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+        "failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+    return __cxx_atomic_compare_exchange_strong(&__a_, &__expected, __desired, __success, __failure);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+    return __cxx_atomic_compare_exchange_weak(&__a_, &__expected, __desired, __order, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+    return __cxx_atomic_compare_exchange_strong(&__a_, &__expected, __desired, __order, __order);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
+      _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
+    _LIBCPP_ASSERT_UNCATEGORIZED(
+        __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+            __order == memory_order::seq_cst,
+        "memory order argument to atomic wait operation is invalid");
+    __cxx_atomic_wait(addressof(__a_), __old, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { __cxx_atomic_notify_one(addressof(__a_)); }
+  _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { __cxx_atomic_notify_all(addressof(__a_)); }
+
+  _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __a_(__obj) {}
+};
+
+template <class _Tp>
+struct __atomic_ref_base<_Tp, /*_IsIntegral=*/true, /*_IsFloatingPoint=*/false>
+    : public __atomic_ref_base<_Tp, false, false> {
+  using __base = __atomic_ref_base<_Tp, false, false>;
+
+  using difference_type = __base::value_type;
+
+  _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __base(__obj) {}
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_and(&this->__a_, __arg, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_or(&this->__a_, __arg, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_xor(&this->__a_, __arg, __order);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
+};
+
+template <class _Tp>
+struct __atomic_ref_base<_Tp, /*_IsIntegral=*/false, /*_IsFloatingPoint=*/true>
+    : public __atomic_ref_base<_Tp, false, false> {
+  using __base = __atomic_ref_base<_Tp, false, false>;
+
+  using difference_type = __base::value_type;
+
+  _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __base(__obj) {}
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+};
+
+template <class _Tp>
+struct atomic_ref : public __atomic_ref_base<_Tp> {
+  static_assert(is_trivially_copyable<_Tp>::value, "std::atomic_ref<T> requires that 'T' be a trivially copyable type");
+
+  using __base = __atomic_ref_base<_Tp>;
+
+  _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+    _LIBCPP_ASSERT_UNCATEGORIZED((uintptr_t)addressof(__obj) % __base::required_alignment == 0,
+                                 "atomic_ref ctor: referenced object must be aligned to required_alignment");
+  }
+
+  _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+  atomic_ref& operator=(const atomic_ref&) = delete;
+};
+
+template <class _Tp>
+struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
+  using __base = __atomic_ref_base<_Tp*>;
+
+  using difference_type = ptrdiff_t;
+
+  _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+    return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+
+  _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
+
+  _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
+
+  atomic_ref& operator=(const atomic_ref&) = delete;
+};
+
+#endif // _LIBCPP_STD_VER >= 20
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP__ATOMIC_ATOMIC_REF_H
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index 2e8f5b521a55eb..b71033b8a43f3b 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -594,6 +594,7 @@ template <class T>
 #include <__atomic/atomic_flag.h>
 #include <__atomic/atomic_init.h>
 #include <__atomic/atomic_lock_free.h>
+#include <__atomic/atomic_ref.h>
 #include <__atomic/atomic_sync.h>
 #include <__atomic/check_memory_order.h>
 #include <__atomic/contention_t.h>

>From 45d0e1538d73de5f6447a52e00ccc61f234a5f01 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sat, 30 Dec 2023 21:54:12 -0500
Subject: [PATCH 05/12] [libc++][atomic_ref] Add tests for atomic_ref

---
 .../assert.compare_exchange_strong.pass.cpp   |  63 +++++++++
 .../assert.compare_exchange_weak.pass.cpp     |  63 +++++++++
 .../atomics/atomics.ref/assert.ctor.pass.cpp  |  38 +++++
 .../atomics/atomics.ref/assert.load.pass.cpp  |  60 ++++++++
 .../atomics/atomics.ref/assert.store.pass.cpp |  68 +++++++++
 .../atomics/atomics.ref/assert.wait.pass.cpp  |  60 ++++++++
 .../std/atomics/atomics.ref/assign.pass.cpp   |  50 +++++++
 .../atomics.ref/bitwise_and_assign.pass.cpp   |  47 +++++++
 .../atomics.ref/bitwise_or_assign.pass.cpp    |  47 +++++++
 .../atomics.ref/bitwise_xor_assign.pass.cpp   |  47 +++++++
 .../compare_exchange_strong.pass.cpp          |  83 +++++++++++
 .../compare_exchange_weak.pass.cpp            |  84 +++++++++++
 .../std/atomics/atomics.ref/convert.pass.cpp  |  47 +++++++
 .../atomics.ref/ctor.explicit.verify.cpp      |  34 +++++
 .../std/atomics/atomics.ref/ctor.pass.cpp     |  46 ++++++
 .../atomics/atomics.ref/deduction.pass.cpp    |  39 ++++++
 .../std/atomics/atomics.ref/exchange.pass.cpp |  48 +++++++
 .../atomics/atomics.ref/fetch_add.pass.cpp    |  75 ++++++++++
 .../atomics/atomics.ref/fetch_and.pass.cpp    |  56 ++++++++
 .../std/atomics/atomics.ref/fetch_or.pass.cpp |  54 +++++++
 .../atomics/atomics.ref/fetch_sub.pass.cpp    |  75 ++++++++++
 .../atomics/atomics.ref/fetch_xor.pass.cpp    |  54 +++++++
 .../atomics.ref/increment_decrement.pass.cpp  |  77 ++++++++++
 .../atomics.ref/is_always_lock_free.pass.cpp  |  47 +++++++
 .../std/atomics/atomics.ref/load.pass.cpp     |  48 +++++++
 .../atomics/atomics.ref/member_types.pass.cpp | 132 ++++++++++++++++++
 .../atomics/atomics.ref/notify_all.pass.cpp   |  86 ++++++++++++
 .../atomics/atomics.ref/notify_one.pass.cpp   |  54 +++++++
 .../operator_minus_equals.pass.cpp            |  64 +++++++++
 .../atomics.ref/operator_plus_equals.pass.cpp |  64 +++++++++
 .../atomics.ref/required_alignment.pass.cpp   |  34 +++++
 .../std/atomics/atomics.ref/store.pass.cpp    |  50 +++++++
 .../std/atomics/atomics.ref/type.verify.cpp   |  26 ++++
 .../std/atomics/atomics.ref/wait.pass.cpp     |  65 +++++++++
 34 files changed, 1985 insertions(+)
 create mode 100644 libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
 create mode 100644 libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
 create mode 100644 libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
 create mode 100644 libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
 create mode 100644 libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
 create mode 100644 libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/ctor.explicit.verify.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/load.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/store.pass.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/type.verify.cpp
 create mode 100644 libcxx/test/std/atomics/atomics.ref/wait.pass.cpp

diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
new file mode 100644
index 00000000000000..3a991c9351cd85
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
@@ -0,0 +1,63 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none
+// XFAIL: availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// bool compare_exchange_strong(T& expected, T desired, memory_order success, memory_order failure) const noexcept;
+//
+// Preconditions: failure is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "check_assertion.h"
+
+template <typename T>
+void test_compare_exchange_strong_invalid_memory_order() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+    T t(T(2));
+    a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_relaxed);
+  }
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        T t(T(2));
+        a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_release);
+      }()),
+      "memory order argument to strong atomic compare-and-exchange operation is invalid");
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        T t(T(2));
+        a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_acq_rel);
+      }()),
+      "memory order argument to strong atomic compare-and-exchange operation is invalid");
+}
+
+int main(int, char**) {
+  test_compare_exchange_strong_invalid_memory_order<int>();
+  test_compare_exchange_strong_invalid_memory_order<float>();
+  test_compare_exchange_strong_invalid_memory_order<int*>();
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_compare_exchange_strong_invalid_memory_order<X>();
+
+  return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
new file mode 100644
index 00000000000000..c9506f556129ee
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
@@ -0,0 +1,63 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none
+// XFAIL: availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// bool compare_exchange_weak(T& expected, T desired, memory_order success, memory_order failure) const noexcept;
+//
+// Preconditions: failure is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "check_assertion.h"
+
+template <typename T>
+void test_compare_exchange_weak_invalid_memory_order() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+    T t(T(2));
+    a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_relaxed);
+  }
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        T t(T(2));
+        a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_release);
+      }()),
+      "memory order argument to weak atomic compare-and-exchange operation is invalid");
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        T t(T(2));
+        a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_acq_rel);
+      }()),
+      "memory order argument to weak atomic compare-and-exchange operation is invalid");
+}
+
+int main(int, char**) {
+  test_compare_exchange_weak_invalid_memory_order<int>();
+  test_compare_exchange_weak_invalid_memory_order<float>();
+  test_compare_exchange_weak_invalid_memory_order<int*>();
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_compare_exchange_weak_invalid_memory_order<X>();
+
+  return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
new file mode 100644
index 00000000000000..3705167181519c
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
@@ -0,0 +1,38 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none
+// XFAIL: availability-verbose_abort-missing
+
+// <atomic>
+
+// atomic_ref(T& obj);
+//
+// Preconditions: The referenced object is aligned to required_alignment.
+
+#include <atomic>
+
+#include "check_assertion.h"
+
+int main(int, char**) {
+  {
+    char c[8];
+    float* f = new (c) float(3.14f);
+    [[maybe_unused]] std::atomic_ref<float> r(*f);
+  }
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        char c[8];
+        float* f = new (c + 1) float(3.14f);
+        [[maybe_unused]] std::atomic_ref<float> r(*f);
+      }()),
+      "atomic_ref ctor: referenced object must be aligned to required_alignment");
+
+  return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
new file mode 100644
index 00000000000000..4181b1c12c7db4
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
@@ -0,0 +1,60 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none
+// XFAIL: availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// T load(memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "check_assertion.h"
+
+template <typename T>
+void test_load_invalid_memory_order() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+    (void)a.load(std::memory_order_relaxed);
+  }
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        (void)a.load(std::memory_order_release);
+      }()),
+      "memory order argument to atomic load operation is invalid");
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        (void)a.load(std::memory_order_acq_rel);
+      }()),
+      "memory order argument to atomic load operation is invalid");
+}
+
+int main(int, char**) {
+  test_load_invalid_memory_order<int>();
+  test_load_invalid_memory_order<float>();
+  test_load_invalid_memory_order<int*>();
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_load_invalid_memory_order<X>();
+
+  return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
new file mode 100644
index 00000000000000..f543bcc35295f7
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
@@ -0,0 +1,68 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none
+// XFAIL: availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// void store(T desired, memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::release, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "check_assertion.h"
+
+template <typename T>
+void test_store_invalid_memory_order() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+    a.store(T(2), std::memory_order_relaxed);
+  }
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        a.store(T(2), std::memory_order_consume);
+      }()),
+      "memory order argument to atomic store operation is invalid");
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        a.store(T(2), std::memory_order_acquire);
+      }()),
+      "memory order argument to atomic store operation is invalid");
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        a.store(T(2), std::memory_order_acq_rel);
+      }()),
+      "memory order argument to atomic store operation is invalid");
+}
+
+int main(int, char**) {
+  test_store_invalid_memory_order<int>();
+  test_store_invalid_memory_order<float>();
+  test_store_invalid_memory_order<int*>();
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_store_invalid_memory_order<X>();
+
+  return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
new file mode 100644
index 00000000000000..2b1c9208527471
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
@@ -0,0 +1,60 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none
+// XFAIL: availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// void wait(T old, memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "check_assertion.h"
+
+template <typename T>
+void test_wait_invalid_memory_order() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+    a.wait(T(2), std::memory_order_relaxed);
+  }
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        a.wait(T(2), std::memory_order_release);
+      }()),
+      "memory order argument to atomic wait operation is invalid");
+
+  TEST_LIBCPP_ASSERT_FAILURE(
+      ([] {
+        T x(T(1));
+        std::atomic_ref<T> a(x);
+        a.wait(T(2), std::memory_order_acq_rel);
+      }()),
+      "memory order argument to atomic wait operation is invalid");
+}
+
+int main(int, char**) {
+  test_wait_invalid_memory_order<int>();
+  test_wait_invalid_memory_order<float>();
+  test_wait_invalid_memory_order<int*>();
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_wait_invalid_memory_order<X>();
+
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
new file mode 100644
index 00000000000000..95d29df70fe160
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
@@ -0,0 +1,50 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// T operator=(T) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_assign() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  a = T(2);
+  assert(x == T(2));
+
+  ASSERT_NOEXCEPT(a = T(0));
+  static_assert(std::is_nothrow_assignable_v<std::atomic_ref<T>, T>);
+
+  static_assert(!std::is_copy_assignable_v<std::atomic_ref<T>>);
+}
+
+void test() {
+  test_assign<int>();
+
+  test_assign<float>();
+
+  test_assign<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_assign<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
new file mode 100644
index 00000000000000..68c64b9ab34589
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
@@ -0,0 +1,47 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type operator&=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_and_assign = requires { std::declval<T const>() &= std::declval<T>(); };
+
+static_assert(!has_bitwise_and_assign<std::atomic_ref<float>>);
+static_assert(!has_bitwise_and_assign<std::atomic_ref<int*>>);
+static_assert(!has_bitwise_and_assign<std::atomic_ref<const int*>>);
+static_assert(!has_bitwise_and_assign<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_bitwise_and_assign<std::atomic_ref<X>>);
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert((a &= T(2)) == T(0));
+  assert(x == T(0));
+  ASSERT_NOEXCEPT(a &= T(0));
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
new file mode 100644
index 00000000000000..20ec80697c70ed
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
@@ -0,0 +1,47 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type operator|=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_or_assign = requires { std::declval<T const>() |= std::declval<T>(); };
+
+static_assert(!has_bitwise_or_assign<std::atomic_ref<float>>);
+static_assert(!has_bitwise_or_assign<std::atomic_ref<int*>>);
+static_assert(!has_bitwise_or_assign<std::atomic_ref<const int*>>);
+static_assert(!has_bitwise_or_assign<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_bitwise_or_assign<std::atomic_ref<X>>);
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert((a |= T(2)) == T(3));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a &= T(0));
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
new file mode 100644
index 00000000000000..2d41bf01f95629
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
@@ -0,0 +1,47 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type operator|=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_xor_assign = requires { std::declval<T const>() ^= std::declval<T>(); };
+
+static_assert(!has_bitwise_xor_assign<std::atomic_ref<float>>);
+static_assert(!has_bitwise_xor_assign<std::atomic_ref<int*>>);
+static_assert(!has_bitwise_xor_assign<std::atomic_ref<const int*>>);
+static_assert(!has_bitwise_xor_assign<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_bitwise_xor_assign<std::atomic_ref<X>>);
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert((a ^= T(2)) == T(3));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a ^= T(0));
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp b/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
new file mode 100644
index 00000000000000..b9dc84c04f7668
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
@@ -0,0 +1,83 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// bool compare_exchange_strong(T&, T, memory_order, memory_order) const noexcept;
+// bool compare_exchange_strong(T&, T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_compare_exchange_strong() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+
+    T t(T(1));
+    assert(a.compare_exchange_strong(t, T(2)) == true);
+    assert(a == T(2));
+    assert(t == T(1));
+    assert(a.compare_exchange_strong(t, T(3)) == false);
+    assert(a == T(2));
+    assert(t == T(2));
+
+    ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2)));
+  }
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+
+    T t(T(1));
+    assert(a.compare_exchange_strong(t, T(2), std::memory_order_seq_cst) == true);
+    assert(a == T(2));
+    assert(t == T(1));
+    assert(a.compare_exchange_strong(t, T(3), std::memory_order_seq_cst) == false);
+    assert(a == T(2));
+    assert(t == T(2));
+
+    ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2), std::memory_order_seq_cst));
+  }
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+
+    T t(T(1));
+    assert(a.compare_exchange_strong(t, T(2), std::memory_order_release, std::memory_order_relaxed) == true);
+    assert(a == T(2));
+    assert(t == T(1));
+    assert(a.compare_exchange_strong(t, T(3), std::memory_order_release, std::memory_order_relaxed) == false);
+    assert(a == T(2));
+    assert(t == T(2));
+
+    ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2), std::memory_order_release, std::memory_order_relaxed));
+  }
+}
+
+void test() {
+  test_compare_exchange_strong<int>();
+
+  test_compare_exchange_strong<float>();
+
+  test_compare_exchange_strong<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_compare_exchange_strong<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp b/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
new file mode 100644
index 00000000000000..3d16be5e3a1499
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
@@ -0,0 +1,84 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// bool compare_exchange_weak(T&, T, memory_order, memory_order) const noexcept;
+// bool compare_exchange_weak(T&, T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_compare_exchange_weak() {
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+
+    T t(T(1));
+    assert(a.compare_exchange_weak(t, T(2)) == true);
+    assert(a == T(2));
+    assert(t == T(1));
+    assert(a.compare_exchange_weak(t, T(3)) == false);
+    assert(a == T(2));
+    assert(t == T(2));
+
+    ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2)));
+  }
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+
+    T t(T(1));
+    assert(a.compare_exchange_weak(t, T(2), std::memory_order_seq_cst) == true);
+    assert(a == T(2));
+    assert(t == T(1));
+    assert(a.compare_exchange_weak(t, T(3), std::memory_order_seq_cst) == false);
+    assert(a == T(2));
+    assert(t == T(2));
+
+    ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2), std::memory_order_seq_cst));
+  }
+  {
+    T x(T(1));
+    std::atomic_ref<T> a(x);
+
+    T t(T(1));
+    assert(a.compare_exchange_weak(t, T(2), std::memory_order_release, std::memory_order_relaxed) == true);
+    assert(a == T(2));
+    assert(t == T(1));
+    assert(a.compare_exchange_weak(t, T(3), std::memory_order_release, std::memory_order_relaxed) == false);
+    assert(a == T(2));
+    assert(t == T(2));
+
+    ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2), std::memory_order_release, std::memory_order_relaxed));
+  }
+}
+
+void test() {
+  test_compare_exchange_weak<int>();
+
+  test_compare_exchange_weak<float>();
+
+  test_compare_exchange_weak<int*>();
+
+  struct X {
+    int i;
+    //X() = default;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_compare_exchange_weak<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp b/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
new file mode 100644
index 00000000000000..cdfd3442eac806
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
@@ -0,0 +1,47 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// operator T() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_convert() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a == T(1));
+
+  ASSERT_NOEXCEPT(T(a));
+  static_assert(std::is_nothrow_convertible_v<std::atomic_ref<T>, T>);
+}
+
+void test() {
+  test_convert<int>();
+
+  test_convert<float>();
+
+  test_convert<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_convert<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/ctor.explicit.verify.cpp b/libcxx/test/std/atomics/atomics.ref/ctor.explicit.verify.cpp
new file mode 100644
index 00000000000000..3f1c133c643d65
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/ctor.explicit.verify.cpp
@@ -0,0 +1,34 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// explicit atomic_ref(T&);
+
+#include <atomic>
+
+template <class T>
+void test(std::atomic_ref<T>) {}
+
+void explicit_ctor() {
+  int i = 0;
+  // expected-error-re@*:* {{{{.*}}no matching function for call to 'test'}}
+  test<int>(i);
+
+  float f = 0.f;
+  // expected-error-re@*:* {{{{.*}}no matching function for call to 'test'}}
+  test<float>(f);
+
+  int* p = &i;
+  // expected-error-re@*:* {{{{.*}}no matching function for call to 'test'}}
+  test<int*>(p);
+
+  struct X {
+  } x;
+  // expected-error-re@*:* {{{{.*}}no matching function for call to 'test'}}
+  test<X>(x);
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp b/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
new file mode 100644
index 00000000000000..42890f4a537fc7
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
@@ -0,0 +1,46 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// explicit atomic_ref(T&);
+
+#include <atomic>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+auto makeAtomicRef(T& obj) {
+  // check that the constructor is explicit
+  static_assert(!std::is_convertible_v<T, std::atomic_ref<T>>);
+  static_assert(std::is_constructible_v<std::atomic_ref<T>, T&>);
+  return std::atomic_ref<T>(obj);
+}
+
+void test() {
+  int i = 0;
+  (void)makeAtomicRef(i);
+
+  float f = 0.f;
+  (void)makeAtomicRef(f);
+
+  int* p = &i;
+  (void)makeAtomicRef(p);
+
+  struct X {
+  } x;
+  (void)makeAtomicRef(x);
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp b/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
new file mode 100644
index 00000000000000..62cfcc08aa0420
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
@@ -0,0 +1,39 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// ADDITIONAL_COMPILE_FLAGS: -Wno-ctad-maybe-unsupported
+
+// <atomic>
+
+#include <atomic>
+#include <type_traits>
+
+void test() {
+  int i = 0;
+  std::atomic_ref a0(i);
+  static_assert(std::is_same_v<decltype(a0), std::atomic_ref<int>>);
+
+  float f = 0.f;
+  std::atomic_ref a1(f);
+  static_assert(std::is_same_v<decltype(a1), std::atomic_ref<float>>);
+
+  int* p = &i;
+  std::atomic_ref a2(p);
+  static_assert(std::is_same_v<decltype(a2), std::atomic_ref<int*>>);
+
+  struct X {
+  } x;
+  std::atomic_ref a3(x);
+  static_assert(std::is_same_v<decltype(a3), std::atomic_ref<X>>);
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp b/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
new file mode 100644
index 00000000000000..75055bd8679bc3
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
@@ -0,0 +1,48 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// T exchange(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_exchange() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.exchange(T(2)) == T(1));
+  ASSERT_NOEXCEPT(a.exchange(T(2)));
+
+  assert(a.exchange(T(3), std::memory_order_seq_cst) == T(2));
+  ASSERT_NOEXCEPT(a.exchange(T(3), std::memory_order_seq_cst));
+}
+
+void test() {
+  test_exchange<int>();
+
+  test_exchange<float>();
+
+  test_exchange<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_exchange<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
new file mode 100644
index 00000000000000..439d5a305dd6b7
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
@@ -0,0 +1,75 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type fetch_add(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+// floating-point-type fetch_add(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+// T* fetch_add(difference_type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_add = requires {
+  std::declval<T const>().fetch_add(std::declval<T>());
+  std::declval<T const>().fetch_add(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+static_assert(!has_fetch_add<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_fetch_add<std::atomic_ref<X>>);
+
+template <typename T>
+void test_arithmetic() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.fetch_add(T(2)) == T(1));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a.fetch_add(T(0)));
+
+  assert(a.fetch_add(T(4), std::memory_order_relaxed) == T(3));
+  assert(x == T(7));
+  ASSERT_NOEXCEPT(a.fetch_add(T(0), std::memory_order_relaxed));
+}
+
+template <typename T>
+void test_pointer() {
+  using X = std::remove_pointer_t<T>;
+  X t[9]  = {};
+  T p{&t[1]};
+  std::atomic_ref<T> a(p);
+
+  assert(a.fetch_add(2) == &t[1]);
+  assert(a == &t[3]);
+  ASSERT_NOEXCEPT(a.fetch_add(0));
+
+  assert(a.fetch_add(4, std::memory_order_relaxed) == &t[3]);
+  assert(a == &t[7]);
+  ASSERT_NOEXCEPT(a.fetch_add(0, std::memory_order_relaxed));
+}
+
+void test() {
+  test_arithmetic<int>();
+  test_arithmetic<float>();
+
+  test_pointer<int*>();
+  test_pointer<const int*>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
new file mode 100644
index 00000000000000..d837bc8f423c98
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
@@ -0,0 +1,56 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type fetch_and(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_and = requires {
+  std::declval<T const>().fetch_and(std::declval<T>());
+  std::declval<T const>().fetch_and(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+static_assert(!has_fetch_and<std::atomic_ref<float>>);
+static_assert(!has_fetch_and<std::atomic_ref<int*>>);
+static_assert(!has_fetch_and<std::atomic_ref<const int*>>);
+static_assert(!has_fetch_and<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_fetch_and<std::atomic_ref<X>>);
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.fetch_and(T(2)) == T(1));
+  assert(x == T(0));
+  ASSERT_NOEXCEPT(a.fetch_and(T(0)));
+
+  x = T(1);
+
+  assert(a.fetch_and(T(2), std::memory_order_relaxed) == T(1));
+  assert(x == T(0));
+  ASSERT_NOEXCEPT(a.fetch_and(T(0), std::memory_order_relaxed));
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
new file mode 100644
index 00000000000000..88a836810b002e
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
@@ -0,0 +1,54 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type fetch_or(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_or = requires {
+  std::declval<T const>().fetch_or(std::declval<T>());
+  std::declval<T const>().fetch_or(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+static_assert(!has_fetch_or<std::atomic_ref<float>>);
+static_assert(!has_fetch_or<std::atomic_ref<int*>>);
+static_assert(!has_fetch_or<std::atomic_ref<const int*>>);
+static_assert(!has_fetch_or<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_fetch_or<std::atomic_ref<X>>);
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.fetch_or(T(2)) == T(1));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a.fetch_or(T(0)));
+
+  assert(a.fetch_or(T(2), std::memory_order_relaxed) == T(3));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a.fetch_or(T(0), std::memory_order_relaxed));
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
new file mode 100644
index 00000000000000..e9fc7ea7cb5ff1
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
@@ -0,0 +1,75 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type fetch_sub(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+// floating-point-type fetch_sub(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+// T* fetch_sub(difference_type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_sub = requires {
+  std::declval<T const>().fetch_sub(std::declval<T>());
+  std::declval<T const>().fetch_sub(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+static_assert(!has_fetch_sub<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_fetch_sub<std::atomic_ref<X>>);
+
+template <typename T>
+void test_arithmetic() {
+  T x(T(7));
+  std::atomic_ref<T> a(x);
+
+  assert(a.fetch_sub(T(4)) == T(7));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a.fetch_sub(T(0)));
+
+  assert(a.fetch_sub(T(2), std::memory_order_relaxed) == T(3));
+  assert(x == T(1));
+  ASSERT_NOEXCEPT(a.fetch_sub(T(0), std::memory_order_relaxed));
+}
+
+template <typename T>
+void test_pointer() {
+  using X = std::remove_pointer_t<T>;
+  X t[9]  = {};
+  T p{&t[7]};
+  std::atomic_ref<T> a(p);
+
+  assert(a.fetch_sub(4) == &t[7]);
+  assert(a == &t[3]);
+  ASSERT_NOEXCEPT(a.fetch_sub(0));
+
+  assert(a.fetch_sub(2, std::memory_order_relaxed) == &t[3]);
+  assert(a == &t[1]);
+  ASSERT_NOEXCEPT(a.fetch_sub(0, std::memory_order_relaxed));
+}
+
+void test() {
+  test_arithmetic<int>();
+  test_arithmetic<float>();
+
+  test_pointer<int*>();
+  test_pointer<const int*>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
new file mode 100644
index 00000000000000..2e2f913e9e242a
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
@@ -0,0 +1,54 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type fetch_xor(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_xor = requires {
+  std::declval<T const>().fetch_xor(std::declval<T>());
+  std::declval<T const>().fetch_xor(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+static_assert(!has_fetch_xor<std::atomic_ref<float>>);
+static_assert(!has_fetch_xor<std::atomic_ref<int*>>);
+static_assert(!has_fetch_xor<std::atomic_ref<const int*>>);
+static_assert(!has_fetch_xor<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_fetch_xor<std::atomic_ref<X>>);
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.fetch_xor(T(2)) == T(1));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a.fetch_xor(T(0)));
+
+  assert(a.fetch_xor(T(2), std::memory_order_relaxed) == T(3));
+  assert(x == T(1));
+  ASSERT_NOEXCEPT(a.fetch_xor(T(0), std::memory_order_relaxed));
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp b/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
new file mode 100644
index 00000000000000..dd1bcaa6f554f0
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
@@ -0,0 +1,77 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type operator++(int) const noexcept;
+// integral-type operator--(int) const noexcept;
+// integral-type operator++() const noexcept;
+// integral-type operator--() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_pre_increment_operator = requires { ++std::declval<T const>(); };
+
+template <typename T>
+concept has_post_increment_operator = requires { std::declval<T const>()++; };
+
+template <typename T>
+concept has_pre_decrement_operator = requires { --std::declval<T const>(); };
+
+template <typename T>
+concept has_post_decrement_operator = requires { std::declval<T const>()--; };
+
+template <typename T>
+constexpr bool does_not_have_increment_nor_decrement_operators() {
+  return !has_pre_increment_operator<T> && !has_pre_decrement_operator<T> && !has_post_increment_operator<T> &&
+         !has_post_decrement_operator<T>;
+}
+
+static_assert(does_not_have_increment_nor_decrement_operators<float>());
+static_assert(does_not_have_increment_nor_decrement_operators<int*>());
+static_assert(does_not_have_increment_nor_decrement_operators<const int*>());
+static_assert(does_not_have_increment_nor_decrement_operators<bool>());
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(does_not_have_increment_nor_decrement_operators<X>());
+
+template <typename T>
+void test_integral() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(++a == T(2));
+  assert(x == T(2));
+  ASSERT_NOEXCEPT(++a);
+
+  assert(--a == T(1));
+  assert(x == T(1));
+  ASSERT_NOEXCEPT(--a);
+
+  assert(a++ == T(1));
+  assert(x == T(2));
+  ASSERT_NOEXCEPT(++a);
+
+  assert(a-- == T(2));
+  assert(x == T(1));
+  ASSERT_NOEXCEPT(--a);
+}
+
+void test() { test_integral<int>(); }
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp b/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
new file mode 100644
index 00000000000000..7f0548d2512481
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
@@ -0,0 +1,47 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// static constexpr bool is_always_lock_free;
+// bool is_lock_free() const noexcept;
+
+#include <atomic>
+#include <cassert>
+
+#include "test_macros.h"
+
+template <typename T>
+void checkAlwaysLockFree(std::atomic_ref<T> a) {
+  if (std::atomic_ref<T>::is_always_lock_free) {
+    assert(a.is_lock_free());
+  }
+  ASSERT_NOEXCEPT(a.is_lock_free());
+}
+
+void test() {
+  int i = 0;
+  checkAlwaysLockFree(std::atomic_ref<int>(i));
+
+  float f = 0.f;
+  checkAlwaysLockFree(std::atomic_ref<float>(f));
+
+  int* p = &i;
+  checkAlwaysLockFree(std::atomic_ref<int*>(p));
+
+  struct X {
+  } x;
+  checkAlwaysLockFree(std::atomic_ref<X>(x));
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/load.pass.cpp b/libcxx/test/std/atomics/atomics.ref/load.pass.cpp
new file mode 100644
index 00000000000000..fe3aaaf1edcf8a
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/load.pass.cpp
@@ -0,0 +1,48 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// T load(memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_load() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.load() == T(1));
+  ASSERT_NOEXCEPT(a.load());
+
+  assert(a.load(std::memory_order_seq_cst) == T(1));
+  ASSERT_NOEXCEPT(a.load(std::memory_order_seq_cst));
+}
+
+void test() {
+  test_load<int>();
+
+  test_load<float>();
+
+  test_load<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_load<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp b/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
new file mode 100644
index 00000000000000..d4e2f0126d6216
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
@@ -0,0 +1,132 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
+
+// <atomic>
+
+// template <class T>
+// struct atomic_ref
+// {
+//    using value_type = T;
+//    using difference_type = value_type;      // only for atomic_ref<Integral> and
+//                                             // atomic_ref<Floating> specializations
+//    using difference_type = std::ptrdiff_t;  // only for atomic_ref<T*> specializations
+//
+//    explicit atomic_ref(T&);
+//    atomic_ref(const atomic_ref&) noexcept;
+//    atomic_ref& operator=(const atomic_ref&) = delete;
+// };
+
+#include <atomic>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <class T>
+concept has_difference_type = requires { typename T::difference_type; };
+
+template <class T>
+void check_member_types() {
+  if constexpr ((std::is_integral_v<T> && !std::is_same_v<T, bool>) || std::is_floating_point_v<T>) {
+    ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+    ASSERT_SAME_TYPE(typename std::atomic_ref<T>::difference_type, T);
+  } else if constexpr (std::is_pointer_v<T>) {
+    ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+    ASSERT_SAME_TYPE(typename std::atomic_ref<T>::difference_type, std::ptrdiff_t);
+  } else {
+    ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+    static_assert(!has_difference_type<std::atomic_ref<T>>);
+  }
+}
+
+template <class T>
+void test() {
+  // value_type and difference_type (except for primary template)
+  check_member_types<T>();
+
+  static_assert(std::is_nothrow_copy_constructible_v<std::atomic_ref<T>>);
+
+  static_assert(!std::is_copy_assignable_v<std::atomic_ref<T>>);
+
+  // explicit constructor
+  static_assert(!std::is_convertible_v<T, std::atomic_ref<T>>);
+  static_assert(std::is_constructible_v<std::atomic_ref<T>, T&>);
+}
+
+void testall() {
+  // Primary template
+  struct Empty {};
+  test<Empty>();
+  struct Trivial {
+    int a;
+    float b;
+  };
+  test<Trivial>();
+  test<bool>();
+
+  // Partial specialization for pointer types
+  test<void*>();
+
+  // Specialization for integral types
+  // + character types
+  test<char>();
+  test<char8_t>();
+  test<char16_t>();
+  test<char32_t>();
+  test<wchar_t>();
+  // + standard signed integer types
+  test<signed char>();
+  test<short>();
+  test<int>();
+  test<long>();
+  test<long long>();
+  // + standard unsigned integer types
+  test<unsigned char>();
+  test<unsigned short>();
+  test<unsigned int>();
+  test<unsigned long>();
+  test<unsigned long long>();
+  // + any other types needed by the typedefs in the header <cstdint>
+  test<int8_t>();
+  test<int16_t>();
+  test<int32_t>();
+  test<int64_t>();
+  test<int_fast8_t>();
+  test<int_fast16_t>();
+  test<int_fast32_t>();
+  test<int_fast64_t>();
+  test<int_least8_t>();
+  test<int_least16_t>();
+  test<int_least32_t>();
+  test<int_least64_t>();
+  test<intmax_t>();
+  test<intptr_t>();
+  test<uint8_t>();
+  test<uint16_t>();
+  test<uint32_t>();
+  test<uint64_t>();
+  test<uint_fast8_t>();
+  test<uint_fast16_t>();
+  test<uint_fast32_t>();
+  test<uint_fast64_t>();
+  test<uint_least8_t>();
+  test<uint_least16_t>();
+  test<uint_least32_t>();
+  test<uint_least64_t>();
+  test<uintmax_t>();
+  test<uintptr_t>();
+
+  // Specialization for floating-point types
+  // + floating-point types
+  test<float>();
+  test<double>();
+  test<long double>();
+  // + TODO extended floating-point types
+}
+
+int main(int, char**) { return 0; }
diff --git a/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp b/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
new file mode 100644
index 00000000000000..3ea9de240a192a
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
@@ -0,0 +1,86 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// void notify_all() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+void test_notify_all() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  bool done                      = false;
+  std::atomic<int> started_num   = 0;
+  std::atomic<int> wait_done_num = 0;
+
+  constexpr auto number_of_threads = 8;
+  std::vector<std::thread> threads;
+  threads.reserve(number_of_threads);
+
+  for (auto j = 0; j < number_of_threads; ++j) {
+    threads.push_back(support::make_test_thread([&a, &started_num, &done, &wait_done_num] {
+      started_num.fetch_add(1, std::memory_order::relaxed);
+
+      a.wait(T(1));
+      wait_done_num.fetch_add(1, std::memory_order::relaxed);
+
+      // likely to fail if wait did not block
+      assert(done);
+    }));
+  }
+
+  while (started_num.load(std::memory_order::relaxed) != number_of_threads) {
+    std::this_thread::yield();
+  }
+
+  std::this_thread::sleep_for(std::chrono::milliseconds(1));
+
+  done = true;
+  a.store(T(3));
+  a.notify_all();
+
+  // notify_all should unblock all the threads so that the loop below won't stuck
+  while (wait_done_num.load(std::memory_order::relaxed) != number_of_threads) {
+    std::this_thread::yield();
+  }
+
+  for (auto& thread : threads) {
+    thread.join();
+  }
+
+  ASSERT_NOEXCEPT(a.notify_all());
+}
+
+void test() {
+  test_notify_all<int>();
+
+  test_notify_all<float>();
+
+  test_notify_all<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_notify_all<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp b/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
new file mode 100644
index 00000000000000..a3ae251ab770c8
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
@@ -0,0 +1,54 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// void notify_one() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+void test_notify_one() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  std::thread t = support::make_test_thread([&]() {
+    a.store(T(3));
+    a.notify_one();
+  });
+  a.wait(T(1));
+  assert(a.load() == T(3));
+  t.join();
+  ASSERT_NOEXCEPT(a.notify_one());
+}
+
+void test() {
+  test_notify_one<int>();
+
+  test_notify_one<float>();
+
+  test_notify_one<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_notify_one<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
new file mode 100644
index 00000000000000..2570d07da0f582
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
@@ -0,0 +1,64 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type operator-=(integral-type) const noexcept;
+// floating-point-type operator-=(floating-point-type) const noexcept;
+// T* operator-=(difference_type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_operator_minus_equals = requires { std::declval<T const>() -= std::declval<T>(); };
+
+static_assert(!has_operator_minus_equals<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_operator_minus_equals<std::atomic_ref<X>>);
+
+template <typename T>
+void test_arithmetic() {
+  T x(T(3));
+  std::atomic_ref<T> a(x);
+
+  assert((a -= T(2)) == T(1));
+  assert(x == T(1));
+  ASSERT_NOEXCEPT(a -= T(0));
+}
+
+template <typename T>
+void test_pointer() {
+  using X = std::remove_pointer_t<T>;
+  X t[9]  = {};
+  T p{&t[3]};
+  std::atomic_ref<T> a(p);
+
+  assert((a -= 2) == &t[1]);
+  assert(a == &t[1]);
+  ASSERT_NOEXCEPT(a -= 0);
+}
+
+void test() {
+  test_arithmetic<int>();
+  test_arithmetic<float>();
+
+  test_pointer<int*>();
+  test_pointer<const int*>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
new file mode 100644
index 00000000000000..7c097e568349c7
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
@@ -0,0 +1,64 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// integral-type operator+=(integral-type) const noexcept;
+// floating-point-type operator+=(floating-point-type) const noexcept;
+// T* operator+=(difference_type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+concept has_operator_plus_equals = requires { std::declval<T const>() += std::declval<T>(); };
+
+static_assert(!has_operator_plus_equals<std::atomic_ref<bool>>);
+struct X {
+  int i;
+  X(int ii) noexcept : i(ii) {}
+  bool operator==(X o) const { return i == o.i; }
+};
+static_assert(!has_operator_plus_equals<std::atomic_ref<X>>);
+
+template <typename T>
+void test_arithmetic() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert((a += T(2)) == T(3));
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a += T(0));
+}
+
+template <typename T>
+void test_pointer() {
+  using X = std::remove_pointer_t<T>;
+  X t[9]  = {};
+  T p{&t[1]};
+  std::atomic_ref<T> a(p);
+
+  assert((a += 2) == &t[3]);
+  assert(a == &t[3]);
+  ASSERT_NOEXCEPT(a += 0);
+}
+
+void test() {
+  test_arithmetic<int>();
+  test_arithmetic<float>();
+
+  test_pointer<int*>();
+  test_pointer<const int*>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp b/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
new file mode 100644
index 00000000000000..341c6d18707cdd
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
@@ -0,0 +1,34 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// static constexpr size_t required_alignment;
+
+#include <atomic>
+
+template <typename T>
+void checkRequiredAlignment() {
+  static_assert(std::atomic_ref<T>::required_alignment >= alignof(T));
+}
+
+void test() {
+  checkRequiredAlignment<int>();
+  checkRequiredAlignment<float>();
+  checkRequiredAlignment<int*>();
+  struct Empty {};
+  checkRequiredAlignment<Empty>();
+  struct Trivial {
+    int a;
+  };
+  checkRequiredAlignment<Trivial>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/store.pass.cpp b/libcxx/test/std/atomics/atomics.ref/store.pass.cpp
new file mode 100644
index 00000000000000..102311bb43b236
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/store.pass.cpp
@@ -0,0 +1,50 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// void store(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <typename T>
+void test_store() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  a.store(T(2));
+  assert(x == T(2));
+  ASSERT_NOEXCEPT(a.store(T(1)));
+
+  a.store(T(3), std::memory_order_seq_cst);
+  assert(x == T(3));
+  ASSERT_NOEXCEPT(a.store(T(0), std::memory_order_seq_cst));
+}
+
+void test() {
+  test_store<int>();
+
+  test_store<float>();
+
+  test_store<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_store<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/type.verify.cpp b/libcxx/test/std/atomics/atomics.ref/type.verify.cpp
new file mode 100644
index 00000000000000..9a8b036ffd1f8c
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/type.verify.cpp
@@ -0,0 +1,26 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic_ref>
+
+// template<class T>
+// class atomic_ref;
+
+// The program is ill-formed if is_trivially_copyable_v<T> is false.
+
+#include <atomic>
+
+void trivially_copyable() {
+  struct X {
+    X() = default;
+    X(X const&) {} // -> not trivially copyable
+  } x;
+  // expected-error-re@*:* {{static assertion failed {{.*}}atomic_ref<T> requires that 'T' be a trivially copyable type}}
+  std::atomic_ref<X> r(x);
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp b/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp
new file mode 100644
index 00000000000000..51d3a79dd59a5e
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp
@@ -0,0 +1,65 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// void wait(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+void test_wait() {
+  T x(T(1));
+  std::atomic_ref<T> a(x);
+
+  assert(a.load() == T(1));
+  a.wait(T(0));
+  std::thread t1 = support::make_test_thread([&]() {
+    a.store(T(3));
+    a.notify_one();
+  });
+  a.wait(T(1));
+  assert(a.load() == T(3));
+  t1.join();
+  ASSERT_NOEXCEPT(a.wait(T(0)));
+
+  assert(a.load() == T(3));
+  a.wait(T(0), std::memory_order_seq_cst);
+  std::thread t2 = support::make_test_thread([&]() {
+    a.store(T(5));
+    a.notify_one();
+  });
+  a.wait(T(3), std::memory_order_seq_cst);
+  assert(a.load() == T(5));
+  t2.join();
+  ASSERT_NOEXCEPT(a.wait(T(0), std::memory_order_seq_cst));
+}
+
+void test() {
+  test_wait<int>();
+
+  test_wait<float>();
+
+  test_wait<int*>();
+
+  struct X {
+    int i;
+    X(int ii) noexcept : i(ii) {}
+    bool operator==(X o) const { return i == o.i; }
+  };
+  test_wait<X>();
+}
+
+int main(int, char**) {
+  test();
+  return 0;
+}

>From 3147ebfd8afe77a7ca79f6d32a62d15040cacf8e Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sun, 31 Dec 2023 08:41:09 -0500
Subject: [PATCH 06/12] [libc++][atomic_ref] Fix shadow warning

---
 libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp        | 4 ++--
 libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp        | 4 ++--
 .../std/atomics/atomics.ref/operator_minus_equals.pass.cpp    | 4 ++--
 .../std/atomics/atomics.ref/operator_plus_equals.pass.cpp     | 4 ++--
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
index 439d5a305dd6b7..84ad4f54a6b5cd 100644
--- a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
@@ -47,8 +47,8 @@ void test_arithmetic() {
 
 template <typename T>
 void test_pointer() {
-  using X = std::remove_pointer_t<T>;
-  X t[9]  = {};
+  using U = std::remove_pointer_t<T>;
+  U t[9]  = {};
   T p{&t[1]};
   std::atomic_ref<T> a(p);
 
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
index e9fc7ea7cb5ff1..031f6f78a37c0c 100644
--- a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
@@ -47,8 +47,8 @@ void test_arithmetic() {
 
 template <typename T>
 void test_pointer() {
-  using X = std::remove_pointer_t<T>;
-  X t[9]  = {};
+  using U = std::remove_pointer_t<T>;
+  U t[9]  = {};
   T p{&t[7]};
   std::atomic_ref<T> a(p);
 
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
index 2570d07da0f582..e7e25efb7acd27 100644
--- a/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
+++ b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
@@ -40,8 +40,8 @@ void test_arithmetic() {
 
 template <typename T>
 void test_pointer() {
-  using X = std::remove_pointer_t<T>;
-  X t[9]  = {};
+  using U = std::remove_pointer_t<T>;
+  U t[9]  = {};
   T p{&t[3]};
   std::atomic_ref<T> a(p);
 
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
index 7c097e568349c7..1aeadffafaef84 100644
--- a/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
+++ b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
@@ -40,8 +40,8 @@ void test_arithmetic() {
 
 template <typename T>
 void test_pointer() {
-  using X = std::remove_pointer_t<T>;
-  X t[9]  = {};
+  using U = std::remove_pointer_t<T>;
+  U t[9]  = {};
   T p{&t[1]};
   std::atomic_ref<T> a(p);
 

>From 00de27857a3a9743ddc48e5721b7d918795f074d Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sun, 31 Dec 2023 21:51:25 -0500
Subject: [PATCH 07/12] [libc++][atomic_ref] Reimplement atomic_ref in terms of
 the GCC __atomic builtins

---
 libcxx/include/__atomic/atomic_ref.h | 77 ++++++++++++++++++++--------
 1 file changed, 55 insertions(+), 22 deletions(-)

diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
index b4299804eccc31..aaf8b1a6420f02 100644
--- a/libcxx/include/__atomic/atomic_ref.h
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -19,7 +19,6 @@
 
 #include <__assert>
 #include <__atomic/check_memory_order.h>
-#include <__atomic/cxx_atomic_impl.h>
 #include <__atomic/is_always_lock_free.h>
 #include <__config>
 #include <__memory/addressof.h>
@@ -45,7 +44,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
 
 template <class _Tp, bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>, bool = is_floating_point_v<_Tp>>
 struct __atomic_ref_base {
-  mutable __cxx_atomic_impl<_Tp&> __a_;
+  _Tp* __ptr_;
 
   using value_type = _Tp;
 
@@ -60,7 +59,7 @@ struct __atomic_ref_base {
     _LIBCPP_ASSERT_UNCATEGORIZED(
         __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
         "memory order argument to atomic store operation is invalid");
-    __cxx_atomic_store(&__a_, __desired, __order);
+    __atomic_store(__ptr_, std::addressof(__desired), __to_gcc_order(__order));
   }
 
   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
@@ -74,13 +73,19 @@ struct __atomic_ref_base {
         __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
             __order == memory_order::seq_cst,
         "memory order argument to atomic load operation is invalid");
-    return __cxx_atomic_load(&__a_, __order);
+    alignas(_Tp) unsigned char __mem[sizeof(_Tp)];
+    auto* __ret = reinterpret_cast<_Tp*>(__mem);
+    __atomic_load(__ptr_, __ret, __to_gcc_order(__order));
+    return *__ret;
   }
 
   _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
 
   _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
-    return __cxx_atomic_exchange(&__a_, __desired, __order);
+    alignas(_Tp) unsigned char __mem[sizeof(_Tp)];
+    auto* __ret = reinterpret_cast<_Tp*>(__mem);
+    __atomic_exchange(__ptr_, std::addressof(__desired), __ret, __to_gcc_order(__order));
+    return *__ret;
   }
   _LIBCPP_HIDE_FROM_ABI bool
   compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
@@ -89,7 +94,13 @@ struct __atomic_ref_base {
         __failure == memory_order::relaxed || __failure == memory_order::consume ||
             __failure == memory_order::acquire || __failure == memory_order::seq_cst,
         "failure memory order argument to weak atomic compare-and-exchange operation is invalid");
-    return __cxx_atomic_compare_exchange_weak(&__a_, &__expected, __desired, __success, __failure);
+    return __atomic_compare_exchange(
+        __ptr_,
+        std::addressof(__expected),
+        std::addressof(__desired),
+        true,
+        __to_gcc_order(__success),
+        __to_gcc_order(__failure));
   }
   _LIBCPP_HIDE_FROM_ABI bool
   compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
@@ -98,16 +109,34 @@ struct __atomic_ref_base {
         __failure == memory_order::relaxed || __failure == memory_order::consume ||
             __failure == memory_order::acquire || __failure == memory_order::seq_cst,
         "failure memory order argument to strong atomic compare-and-exchange operation is invalid");
-    return __cxx_atomic_compare_exchange_strong(&__a_, &__expected, __desired, __success, __failure);
+    return __atomic_compare_exchange(
+        __ptr_,
+        std::addressof(__expected),
+        std::addressof(__desired),
+        false,
+        __to_gcc_order(__success),
+        __to_gcc_order(__failure));
   }
 
   _LIBCPP_HIDE_FROM_ABI bool
   compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
-    return __cxx_atomic_compare_exchange_weak(&__a_, &__expected, __desired, __order, __order);
+    return __atomic_compare_exchange(
+        __ptr_,
+        std::addressof(__expected),
+        std::addressof(__desired),
+        true,
+        __to_gcc_order(__order),
+        __to_gcc_failure_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI bool
   compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
-    return __cxx_atomic_compare_exchange_strong(&__a_, &__expected, __desired, __order, __order);
+    return __atomic_compare_exchange(
+        __ptr_,
+        std::addressof(__expected),
+        std::addressof(__desired),
+        false,
+        __to_gcc_order(__order),
+        __to_gcc_failure_order(__order));
   }
 
   _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
@@ -116,12 +145,16 @@ struct __atomic_ref_base {
         __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
             __order == memory_order::seq_cst,
         "memory order argument to atomic wait operation is invalid");
-    __cxx_atomic_wait(addressof(__a_), __old, __order);
+    // FIXME
+  }
+  _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept {
+    // FIXME
+  }
+  _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept {
+    // FIXME
   }
-  _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { __cxx_atomic_notify_one(addressof(__a_)); }
-  _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { __cxx_atomic_notify_all(addressof(__a_)); }
 
-  _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __a_(__obj) {}
+  _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(&__obj) {}
 };
 
 template <class _Tp>
@@ -136,19 +169,19 @@ struct __atomic_ref_base<_Tp, /*_IsIntegral=*/true, /*_IsFloatingPoint=*/false>
   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
 
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+    return __atomic_fetch_add(this->__ptr_, __arg, __to_gcc_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+    return __atomic_fetch_sub(this->__ptr_, __arg, __to_gcc_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_and(&this->__a_, __arg, __order);
+    return __atomic_fetch_and(this->__ptr_, __arg, __to_gcc_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_or(&this->__a_, __arg, __order);
+    return __atomic_fetch_or(this->__ptr_, __arg, __to_gcc_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_xor(&this->__a_, __arg, __order);
+    return __atomic_fetch_xor(this->__ptr_, __arg, __to_gcc_order(__order));
   }
 
   _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
@@ -174,10 +207,10 @@ struct __atomic_ref_base<_Tp, /*_IsIntegral=*/false, /*_IsFloatingPoint=*/true>
   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
 
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+    return __atomic_fetch_add(this->__ptr_, __arg, __to_gcc_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+    return __atomic_fetch_sub(this->__ptr_, __arg, __to_gcc_order(__order));
   }
 
   _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
@@ -209,10 +242,10 @@ struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
   using difference_type = ptrdiff_t;
 
   _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+    return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), __to_gcc_order(__order));
   }
   _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
-    return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+    return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), __to_gcc_order(__order));
   }
 
   _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }

>From 67614969d284db3c6b1f92205f9d657e55753b54 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sun, 31 Dec 2023 21:53:56 -0500
Subject: [PATCH 08/12] [libc++][atomic_ref] Revert all changes to
 __atomic/cxx_atomic_impl.h

---
 libcxx/include/__atomic/cxx_atomic_impl.h | 423 ++++++++++++++++++----
 1 file changed, 350 insertions(+), 73 deletions(-)

diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 56cd703c258944..1a0b808a0cb1c4 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -15,12 +15,8 @@
 #include <__memory/addressof.h>
 #include <__type_traits/conditional.h>
 #include <__type_traits/is_assignable.h>
-#include <__type_traits/is_pointer.h>
-#include <__type_traits/is_reference.h>
 #include <__type_traits/is_trivially_copyable.h>
-#include <__type_traits/is_volatile.h>
 #include <__type_traits/remove_const.h>
-#include <__type_traits/remove_reference.h>
 #include <cstddef>
 #include <cstring>
 
@@ -62,27 +58,9 @@ struct __cxx_atomic_base_impl {
   }
 #  endif // _LIBCPP_CXX03_LANG
   _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {}
-  using __contained_t = _Tp;
   _Tp __a_value;
 };
 
-template <typename _Tp, template <typename> class _TemplateTp>
-struct __is_instantiation_of : false_type {};
-
-template <typename _Tp, template <typename> class _TemplateTp>
-struct __is_instantiation_of<_TemplateTp<_Tp>, _TemplateTp> : true_type {};
-
-template <typename _Tp,
-          typename = typename enable_if<__is_instantiation_of<typename remove_volatile<typename _Tp::__base>::type,
-                                                              __cxx_atomic_base_impl>::value,
-                                        bool>::type>
-struct __cxx_atomic_base_impl_traits {
-  static constexpr bool __is_value_volatile = is_volatile<_Tp>::value;
-  static constexpr bool __is_value_ref      = is_reference<typename _Tp::__contained_t>::value;
-  using __underlying_t = typename remove_volatile<typename remove_reference<typename _Tp::__contained_t>::type>::type;
-  static constexpr bool __is_value_pointer = is_pointer<__underlying_t>::value;
-};
-
 _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
   // Avoid switch statement to make this a constexpr.
   return __order == memory_order_relaxed
@@ -109,15 +87,13 @@ _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory
                                 : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
 }
 
-template <typename _Tp, typename enable_if<__cxx_atomic_base_impl_traits<_Tp>::__is_value_volatile, bool>::type = 0>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) {
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
   __cxx_atomic_assign_volatile(__a->__a_value, __val);
 }
 
-template <typename _Tp, typename enable_if<!__cxx_atomic_base_impl_traits<_Tp>::__is_value_volatile, bool>::type = 0>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) {
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
   __a->__a_value = __val;
 }
 
@@ -131,44 +107,63 @@ _LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order
 
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_store(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val, memory_order __order) {
+__cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
   __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
-__cxx_atomic_load(const _Tp* __a, memory_order __order) {
-  using _Ret = typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t;
-  alignas(alignof(_Ret)) unsigned char __mem[sizeof(_Ret)];
-  __atomic_load(
-      std::addressof(__a->__a_value), std::addressof(*reinterpret_cast<_Ret*>(__mem)), __to_gcc_order(__order));
-  return *reinterpret_cast<_Ret*>(__mem);
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
+  __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load_inplace(
-    const _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __dst, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
+  _Tp __ret;
+  __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
+  return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
   __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_exchange(
-    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, memory_order __order) {
-  using _Ret = typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t;
-  alignas(alignof(_Ret)) unsigned char __mem[sizeof(_Ret)];
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+  __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
+  _Tp __ret;
+  __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
+  return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
+  _Tp __ret;
   __atomic_exchange(
-      std::addressof(__a->__a_value),
-      std::addressof(__value),
-      std::addressof(*reinterpret_cast<_Ret*>(__mem)),
-      __to_gcc_order(__order));
-  return *reinterpret_cast<_Ret*>(__mem);
+      std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
+  return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
+  _Tp __ret;
+  __atomic_exchange(
+      std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
+  return __ret;
 }
 
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
-    _Tp* __a,
-    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected,
-    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value,
+    volatile __cxx_atomic_base_impl<_Tp>* __a,
+    _Tp* __expected,
+    _Tp __value,
     memory_order __success,
     memory_order __failure) {
   return __atomic_compare_exchange(
@@ -180,11 +175,23 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
       __to_gcc_failure_order(__failure));
 }
 
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
+  return __atomic_compare_exchange(
+      std::addressof(__a->__a_value),
+      __expected,
+      std::addressof(__value),
+      false,
+      __to_gcc_order(__success),
+      __to_gcc_failure_order(__failure));
+}
+
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
-    _Tp* __a,
-    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected,
-    typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value,
+    volatile __cxx_atomic_base_impl<_Tp>* __a,
+    _Tp* __expected,
+    _Tp __value,
     memory_order __success,
     memory_order __failure) {
   return __atomic_compare_exchange(
@@ -196,6 +203,18 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
       __to_gcc_failure_order(__failure));
 }
 
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
+  return __atomic_compare_exchange(
+      std::addressof(__a->__a_value),
+      __expected,
+      std::addressof(__value),
+      true,
+      __to_gcc_order(__success),
+      __to_gcc_failure_order(__failure));
+}
+
 template <typename _Tp>
 struct __skip_amt {
   enum { value = 1 };
@@ -214,44 +233,302 @@ template <typename _Tp, int n>
 struct __skip_amt<_Tp[n]> {};
 
 template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
-__cxx_atomic_fetch_add(_Tp* __a, _Td __delta, memory_order __order) {
-  return __atomic_fetch_add(
-      std::addressof(__a->__a_value),
-      __delta * __skip_amt<typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t>::value,
-      __to_gcc_order(__order));
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+  return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
 }
 
 template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
-__cxx_atomic_fetch_sub(_Tp* __a, _Td __delta, memory_order __order) {
-  return __atomic_fetch_sub(
-      std::addressof(__a->__a_value),
-      __delta * __skip_amt<typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t>::value,
-      __to_gcc_order(__order));
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+  return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+  return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+  return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_fetch_and(
-    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
   return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_fetch_or(
-    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+  return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
   return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
 }
 
 template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_fetch_xor(
-    _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+  return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+  return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
   return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
 }
 
 #  define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
 
-#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP
+#elif defined(_LIBCPP_HAS_C_ATOMIC_IMP)
+
+template <typename _Tp>
+struct __cxx_atomic_base_impl {
+  _LIBCPP_HIDE_FROM_ABI
+#  ifndef _LIBCPP_CXX03_LANG
+  __cxx_atomic_base_impl() _NOEXCEPT = default;
+#  else
+  __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {
+  }
+#  endif // _LIBCPP_CXX03_LANG
+  _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp __value) _NOEXCEPT : __a_value(__value) {}
+  _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value;
+};
+
+#  define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
+
+_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
+  __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
+}
+
+_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
+  __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT {
+  __c11_atomic_init(std::addressof(__a->__a_value), __val);
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) _NOEXCEPT {
+  __c11_atomic_init(std::addressof(__a->__a_value), __val);
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT {
+  __c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) _NOEXCEPT {
+  __c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT {
+  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
+  return __c11_atomic_load(
+      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
+  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
+  return __c11_atomic_load(
+      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const volatile* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
+  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
+  *__dst           = __c11_atomic_load(
+      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
+  using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
+  *__dst           = __c11_atomic_load(
+      const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_exchange(
+      std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_exchange(
+      std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
+}
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR memory_order __to_failure_order(memory_order __order) {
+  // Avoid switch statement to make this a constexpr.
+  return __order == memory_order_release
+           ? memory_order_relaxed
+           : (__order == memory_order_acq_rel ? memory_order_acquire : __order);
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+    __cxx_atomic_base_impl<_Tp> volatile* __a,
+    _Tp* __expected,
+    _Tp __value,
+    memory_order __success,
+    memory_order __failure) _NOEXCEPT {
+  return __c11_atomic_compare_exchange_strong(
+      std::addressof(__a->__a_value),
+      __expected,
+      __value,
+      static_cast<__memory_order_underlying_t>(__success),
+      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure)
+    _NOEXCEPT {
+  return __c11_atomic_compare_exchange_strong(
+      std::addressof(__a->__a_value),
+      __expected,
+      __value,
+      static_cast<__memory_order_underlying_t>(__success),
+      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+    __cxx_atomic_base_impl<_Tp> volatile* __a,
+    _Tp* __expected,
+    _Tp __value,
+    memory_order __success,
+    memory_order __failure) _NOEXCEPT {
+  return __c11_atomic_compare_exchange_weak(
+      std::addressof(__a->__a_value),
+      __expected,
+      __value,
+      static_cast<__memory_order_underlying_t>(__success),
+      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+    __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure)
+    _NOEXCEPT {
+  return __c11_atomic_compare_exchange_weak(
+      std::addressof(__a->__a_value),
+      __expected,
+      __value,
+      static_cast<__memory_order_underlying_t>(__success),
+      static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_add(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_add(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp*
+__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_add(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp*
+__cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_add(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_sub(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_sub(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp*
+__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_sub(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp*
+__cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_sub(
+      std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_and(
+      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_and(
+      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_or(
+      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_or(
+      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_xor(
+      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+  return __c11_atomic_fetch_xor(
+      std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+
+#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
+
 #ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
 
 template <typename _Tp>
@@ -536,7 +813,7 @@ template <typename _Tp,
 template <typename _Tp, typename _Base = __cxx_atomic_base_impl<_Tp> >
 #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS
 struct __cxx_atomic_impl : public _Base {
-  using __base = _Base;
+  static_assert(is_trivially_copyable<_Tp>::value, "std::atomic<T> requires that 'T' be a trivially copyable type");
 
   _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default;
   _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT : _Base(__value) {}

>From 3ab86e5bc2ea6a6875cc8512e7b5b0e13fabb1a2 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Sun, 31 Dec 2023 23:05:18 -0500
Subject: [PATCH 09/12] [libc++][atomic_ref] Forgotten to revert changes to
 __config

---
 libcxx/include/__config | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/libcxx/include/__config b/libcxx/include/__config
index 03e2eb729cb708..adff13e714cb64 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -1128,9 +1128,7 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c
 
 #  if __has_feature(cxx_atomic) || __has_extension(c_atomic) || __has_keyword(_Atomic)
 #    define _LIBCPP_HAS_C_ATOMIC_IMP
-#  endif
-
-#  if defined(_LIBCPP_COMPILER_GCC) || (__has_builtin(__atomic_load) && __has_builtin(__atomic_store) && __has_builtin(__atomic_exchange) && __has_builtin(__atomic_compare_exchange))
+#  elif defined(_LIBCPP_COMPILER_GCC)
 #    define _LIBCPP_HAS_GCC_ATOMIC_IMP
 #  endif
 

>From 8927c8eee8145e75873ad5ceb718d92ae50fe4e7 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Mon, 1 Jan 2024 11:20:44 -0500
Subject: [PATCH 10/12] [libc++][atomic_ref] move __to_gcc_[failure_]order to
 its own header file

---
 libcxx/include/CMakeLists.txt             |  1 +
 libcxx/include/__atomic/atomic_ref.h      |  1 +
 libcxx/include/__atomic/cxx_atomic_impl.h | 27 +-----------
 libcxx/include/__atomic/to_gcc_order.h    | 53 +++++++++++++++++++++++
 libcxx/include/module.modulemap.in        |  1 +
 5 files changed, 57 insertions(+), 26 deletions(-)
 create mode 100644 libcxx/include/__atomic/to_gcc_order.h

diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 5d7e4ca98b1f75..2a83dda25b2f38 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -245,6 +245,7 @@ set(files
   __atomic/is_always_lock_free.h
   __atomic/kill_dependency.h
   __atomic/memory_order.h
+  __atomic/to_gcc_order.h
   __availability
   __bit/bit_cast.h
   __bit/bit_ceil.h
diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
index aaf8b1a6420f02..9a2659c8ed1cf9 100644
--- a/libcxx/include/__atomic/atomic_ref.h
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -20,6 +20,7 @@
 #include <__assert>
 #include <__atomic/check_memory_order.h>
 #include <__atomic/is_always_lock_free.h>
+#include <__atomic/to_gcc_order.h>
 #include <__config>
 #include <__memory/addressof.h>
 #include <__type_traits/is_floating_point.h>
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 1a0b808a0cb1c4..be5c1497873cca 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -11,6 +11,7 @@
 
 #include <__atomic/is_always_lock_free.h>
 #include <__atomic/memory_order.h>
+#include <__atomic/to_gcc_order.h>
 #include <__config>
 #include <__memory/addressof.h>
 #include <__type_traits/conditional.h>
@@ -61,32 +62,6 @@ struct __cxx_atomic_base_impl {
   _Tp __a_value;
 };
 
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
-  // Avoid switch statement to make this a constexpr.
-  return __order == memory_order_relaxed
-           ? __ATOMIC_RELAXED
-           : (__order == memory_order_acquire
-                  ? __ATOMIC_ACQUIRE
-                  : (__order == memory_order_release
-                         ? __ATOMIC_RELEASE
-                         : (__order == memory_order_seq_cst
-                                ? __ATOMIC_SEQ_CST
-                                : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME))));
-}
-
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
-  // Avoid switch statement to make this a constexpr.
-  return __order == memory_order_relaxed
-           ? __ATOMIC_RELAXED
-           : (__order == memory_order_acquire
-                  ? __ATOMIC_ACQUIRE
-                  : (__order == memory_order_release
-                         ? __ATOMIC_RELAXED
-                         : (__order == memory_order_seq_cst
-                                ? __ATOMIC_SEQ_CST
-                                : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
-}
-
 template <typename _Tp>
 _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
   __cxx_atomic_assign_volatile(__a->__a_value, __val);
diff --git a/libcxx/include/__atomic/to_gcc_order.h b/libcxx/include/__atomic/to_gcc_order.h
new file mode 100644
index 00000000000000..550c495a7185b1
--- /dev/null
+++ b/libcxx/include/__atomic/to_gcc_order.h
@@ -0,0 +1,53 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_TO_GCC_ORDER_H
+#define _LIBCPP___ATOMIC_TO_GCC_ORDER_H
+
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#  pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) &&                             \
+    defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST)
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
+  // Avoid switch statement to make this a constexpr.
+  return __order == memory_order_relaxed
+           ? __ATOMIC_RELAXED
+           : (__order == memory_order_acquire
+                  ? __ATOMIC_ACQUIRE
+                  : (__order == memory_order_release
+                         ? __ATOMIC_RELEASE
+                         : (__order == memory_order_seq_cst
+                                ? __ATOMIC_SEQ_CST
+                                : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME))));
+}
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
+  // Avoid switch statement to make this a constexpr.
+  return __order == memory_order_relaxed
+           ? __ATOMIC_RELAXED
+           : (__order == memory_order_acquire
+                  ? __ATOMIC_ACQUIRE
+                  : (__order == memory_order_release
+                         ? __ATOMIC_RELAXED
+                         : (__order == memory_order_seq_cst
+                                ? __ATOMIC_SEQ_CST
+                                : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
+}
+
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ATOMIC_TO_GCC_ORDER_H
diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in
index d10670d4faaffc..adb3eed0cb0c0b 100644
--- a/libcxx/include/module.modulemap.in
+++ b/libcxx/include/module.modulemap.in
@@ -1103,6 +1103,7 @@ module std_private_atomic_fence               [system] { header "__atomic/fence.
 module std_private_atomic_is_always_lock_free [system] { header "__atomic/is_always_lock_free.h" }
 module std_private_atomic_kill_dependency     [system] { header "__atomic/kill_dependency.h" }
 module std_private_atomic_memory_order        [system] { header "__atomic/memory_order.h" }
+module std_private_atomic_to_gcc_order        [system] { header "__atomic/to_gcc_order.h" }
 
 module std_private_bit_bit_cast       [system] { header "__bit/bit_cast.h" }
 module std_private_bit_bit_ceil       [system] { header "__bit/bit_ceil.h" }

>From bc6a18760e2bc9f58b275611b076033884c4ded1 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Mon, 1 Jan 2024 11:22:34 -0500
Subject: [PATCH 11/12] [libc++][atomic_ref] Fixup is[_always]_lock_free

---
 libcxx/include/__atomic/atomic_ref.h | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
index 9a2659c8ed1cf9..febe1d8d78adad 100644
--- a/libcxx/include/__atomic/atomic_ref.h
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -19,7 +19,6 @@
 
 #include <__assert>
 #include <__atomic/check_memory_order.h>
-#include <__atomic/is_always_lock_free.h>
 #include <__atomic/to_gcc_order.h>
 #include <__config>
 #include <__memory/addressof.h>
@@ -51,9 +50,9 @@ struct __atomic_ref_base {
 
   static constexpr size_t required_alignment = alignof(_Tp);
 
-  static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<_Tp>::__value;
+  static constexpr bool is_always_lock_free = __atomic_always_lock_free(sizeof(_Tp), 0);
 
-  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __cxx_atomic_is_lock_free(sizeof(_Tp)); }
+  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); }
 
   _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
       _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {

>From 838bcc18addcf5a47e719e265eb7eddf3e1f9d49 Mon Sep 17 00:00:00 2001
From: Damien L-G <dalg24 at gmail.com>
Date: Mon, 1 Jan 2024 11:24:20 -0500
Subject: [PATCH 12/12] [libc++][atomic_ref] Fixup unused variables warnings in
 wait

---
 libcxx/include/__atomic/atomic_ref.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
index febe1d8d78adad..830fade76c7d2f 100644
--- a/libcxx/include/__atomic/atomic_ref.h
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -146,6 +146,8 @@ struct __atomic_ref_base {
             __order == memory_order::seq_cst,
         "memory order argument to atomic wait operation is invalid");
     // FIXME
+    (void)__old;
+    (void)__order;
   }
   _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept {
     // FIXME



More information about the libcxx-commits mailing list