[libcxx-commits] [libcxx] de7fbfe - [libc++] Floating Point Atomic (#67799)
via libcxx-commits
libcxx-commits at lists.llvm.org
Wed Nov 22 03:48:54 PST 2023
Author: Hui
Date: 2023-11-22T11:48:49Z
New Revision: de7fbfeef5984ed3dc178957fbfaf4b1bb95fa94
URL: https://github.com/llvm/llvm-project/commit/de7fbfeef5984ed3dc178957fbfaf4b1bb95fa94
DIFF: https://github.com/llvm/llvm-project/commit/de7fbfeef5984ed3dc178957fbfaf4b1bb95fa94.diff
LOG: [libc++] Floating Point Atomic (#67799)
- implement P0020R6 Floating Point Atomic
Differential Revision: https://reviews.llvm.org/D153981
Added:
libcxx/test/libcxx/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/assign.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_strong.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_weak.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/copy.compile.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/ctor.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/exchange.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_add.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_sub.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/load.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_all.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_one.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.float.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.minus_equals.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.plus_equals.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/store.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/test_helper.h
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp
libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/wait.pass.cpp
Modified:
libcxx/docs/ReleaseNotes/18.rst
libcxx/docs/Status/Cxx20Papers.csv
libcxx/include/__atomic/atomic.h
libcxx/include/__atomic/cxx_atomic_impl.h
libcxx/include/atomic
libcxx/utils/libcxx/test/features.py
Removed:
################################################################################
diff --git a/libcxx/docs/ReleaseNotes/18.rst b/libcxx/docs/ReleaseNotes/18.rst
index 5f5ff83ca551240..c28a5b73f7ec68b 100644
--- a/libcxx/docs/ReleaseNotes/18.rst
+++ b/libcxx/docs/ReleaseNotes/18.rst
@@ -49,6 +49,7 @@ Implemented Papers
- P2614R2 - Deprecate ``numeric_limits::has_denorm``
- P0053R7 - C++ Synchronized Buffered Ostream (in the experimental library)
- P2467R1 - Support exclusive mode for fstreams
+- P0020R6 - Floating Point Atomic
Improvements and New Features
diff --git a/libcxx/docs/Status/Cxx20Papers.csv b/libcxx/docs/Status/Cxx20Papers.csv
index 07f53ee995ea16b..7aff860c68cf98e 100644
--- a/libcxx/docs/Status/Cxx20Papers.csv
+++ b/libcxx/docs/Status/Cxx20Papers.csv
@@ -2,7 +2,7 @@
"`P0463R1 <https://wg21.link/P0463R1>`__","LWG","Endian just Endian","Toronto","|Complete|","7.0"
"`P0674R1 <https://wg21.link/P0674R1>`__","LWG","Extending make_shared to Support Arrays","Toronto","|Complete|","15.0"
"","","","","","",""
-"`P0020R6 <https://wg21.link/P0020R6>`__","LWG","Floating Point Atomic","Albuquerque","",""
+"`P0020R6 <https://wg21.link/P0020R6>`__","LWG","Floating Point Atomic","Albuquerque","|Complete|","18.0"
"`P0053R7 <https://wg21.link/P0053R7>`__","LWG","C++ Synchronized Buffered Ostream","Albuquerque","|Complete|","18.0"
"`P0202R3 <https://wg21.link/P0202R3>`__","LWG","Add constexpr modifiers to functions in <algorithm> and <utility> Headers","Albuquerque","|Complete|","12.0"
"`P0415R1 <https://wg21.link/P0415R1>`__","LWG","Constexpr for ``std::complex``\ ","Albuquerque","|Complete|","16.0"
diff --git a/libcxx/include/__atomic/atomic.h b/libcxx/include/__atomic/atomic.h
index 47de6b958a96c1b..449802a2e30405e 100644
--- a/libcxx/include/__atomic/atomic.h
+++ b/libcxx/include/__atomic/atomic.h
@@ -14,11 +14,17 @@
#include <__atomic/cxx_atomic_impl.h>
#include <__atomic/memory_order.h>
#include <__config>
+#include <__functional/operations.h>
#include <__memory/addressof.h>
+#include <__type_traits/is_floating_point.h>
#include <__type_traits/is_function.h>
#include <__type_traits/is_same.h>
+#include <__type_traits/remove_const.h>
#include <__type_traits/remove_pointer.h>
+#include <__type_traits/remove_volatile.h>
+#include <__utility/forward.h>
#include <cstddef>
+#include <cstring>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -136,6 +142,136 @@ struct atomic<_Tp*>
atomic& operator=(const atomic&) volatile = delete;
};
+#if _LIBCPP_STD_VER >= 20
+template <class _Tp>
+ requires is_floating_point_v<_Tp>
+struct atomic<_Tp> : __atomic_base<_Tp> {
+ private:
+ _LIBCPP_HIDE_FROM_ABI static constexpr bool __is_fp80_long_double() {
+ // Only x87-fp80 long double has 64-bit mantissa
+ return __LDBL_MANT_DIG__ == 64 && std::is_same_v<_Tp, long double>;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI static constexpr bool __has_rmw_builtin() {
+# ifndef _LIBCPP_COMPILER_CLANG_BASED
+ return false;
+# else
+ // The builtin __cxx_atomic_fetch_add errors during compilation for
+ // long double on platforms with fp80 format.
+ // For more details, see
+ // lib/Sema/SemaChecking.cpp function IsAllowedValueType
+ // LLVM Parser does not allow atomicrmw with x86_fp80 type.
+ // if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
+ // &Context.getTargetInfo().getLongDoubleFormat() ==
+ // &llvm::APFloat::x87DoubleExtended())
+ // For more info
+ // https://github.com/llvm/llvm-project/issues/68602
+ // https://reviews.llvm.org/D53965
+ return !__is_fp80_long_double();
+# endif
+ }
+
+ template <class _This, class _Operation, class _BuiltinOp>
+ _LIBCPP_HIDE_FROM_ABI static _Tp
+ __rmw_op(_This&& __self, _Tp __operand, memory_order __m, _Operation __operation, _BuiltinOp __builtin_op) {
+ if constexpr (__has_rmw_builtin()) {
+ return __builtin_op(std::addressof(std::forward<_This>(__self).__a_), __operand, __m);
+ } else {
+ _Tp __old = __self.load(memory_order_relaxed);
+ _Tp __new = __operation(__old, __operand);
+ while (!__self.compare_exchange_weak(__old, __new, __m, memory_order_relaxed)) {
+# ifdef _LIBCPP_COMPILER_CLANG_BASED
+ if constexpr (__is_fp80_long_double()) {
+ // https://github.com/llvm/llvm-project/issues/47978
+ // clang bug: __old is not updated on failure for atomic<long double>::compare_exchange_weak
+ // Note __old = __self.load(memory_order_relaxed) will not work
+ std::__cxx_atomic_load_inplace(std::addressof(__self.__a_), &__old, memory_order_relaxed);
+ }
+# endif
+ __new = __operation(__old, __operand);
+ }
+ return __old;
+ }
+ }
+
+ template <class _This>
+ _LIBCPP_HIDE_FROM_ABI static _Tp __fetch_add(_This&& __self, _Tp __operand, memory_order __m) {
+ auto __builtin_op = [](auto __a, auto __builtin_operand, auto __order) {
+ return std::__cxx_atomic_fetch_add(__a, __builtin_operand, __order);
+ };
+ return __rmw_op(std::forward<_This>(__self), __operand, __m, std::plus<>{}, __builtin_op);
+ }
+
+ template <class _This>
+ _LIBCPP_HIDE_FROM_ABI static _Tp __fetch_sub(_This&& __self, _Tp __operand, memory_order __m) {
+ auto __builtin_op = [](auto __a, auto __builtin_operand, auto __order) {
+ return std::__cxx_atomic_fetch_sub(__a, __builtin_operand, __order);
+ };
+ return __rmw_op(std::forward<_This>(__self), __operand, __m, std::minus<>{}, __builtin_op);
+ }
+
+ public:
+ using __base = __atomic_base<_Tp>;
+ using value_type = _Tp;
+ using
diff erence_type = value_type;
+
+ _LIBCPP_HIDE_FROM_ABI constexpr atomic() noexcept = default;
+ _LIBCPP_HIDE_FROM_ABI constexpr atomic(_Tp __d) noexcept : __base(__d) {}
+
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __d) volatile noexcept
+ requires __base::is_always_lock_free
+ {
+ __base::store(__d);
+ return __d;
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __d) noexcept {
+ __base::store(__d);
+ return __d;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept
+ requires __base::is_always_lock_free
+ {
+ return __fetch_add(*this, __op, __m);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept {
+ return __fetch_add(*this, __op, __m);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept
+ requires __base::is_always_lock_free
+ {
+ return __fetch_sub(*this, __op, __m);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept {
+ return __fetch_sub(*this, __op, __m);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) volatile noexcept
+ requires __base::is_always_lock_free
+ {
+ return fetch_add(__op) + __op;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) noexcept { return fetch_add(__op) + __op; }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile noexcept
+ requires __base::is_always_lock_free
+ {
+ return fetch_sub(__op) - __op;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) noexcept { return fetch_sub(__op) - __op; }
+};
+
+#endif // _LIBCPP_STD_VER >= 20
+
// atomic_is_lock_free
template <class _Tp>
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index d670fddc3934cdd..5d724669fee875a 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -128,6 +128,18 @@ _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a,
return __ret;
}
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+ __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+ __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
+}
+
template <typename _Tp>
_LIBCPP_HIDE_FROM_ABI
_Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
@@ -362,6 +374,21 @@ _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __ord
const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const volatile* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
+ using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
+ *__dst = __c11_atomic_load(
+ const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
+}
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(__cxx_atomic_base_impl<_Tp> const* __a, _Tp* __dst, memory_order __order) _NOEXCEPT {
+ using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
+ *__dst = __c11_atomic_load(
+ const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
+}
+
template<class _Tp>
_LIBCPP_HIDE_FROM_ABI
_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
@@ -558,6 +585,16 @@ struct __cxx_atomic_lock_impl {
__unlock();
return __old;
}
+ _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const volatile {
+ __lock();
+ __cxx_atomic_assign_volatile(*__dst, __a_value);
+ __unlock();
+ }
+ _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const {
+ __lock();
+ *__dst = __a_value;
+ __unlock();
+ }
};
template <typename _Tp>
@@ -597,6 +634,16 @@ _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
return __a->__read();
}
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) {
+ __a->__read_inplace(__dst);
+}
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) {
+ __a->__read_inplace(__dst);
+}
+
template <typename _Tp>
_LIBCPP_HIDE_FROM_ABI
_Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index 2f122a707bdc33a..7bed8fd8bacfc5c 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -262,6 +262,72 @@ struct atomic<T*>
void notify_all() noexcept;
};
+template<>
+struct atomic<floating-point-type> { // since C++20
+ using value_type = floating-point-type;
+ using
diff erence_type = value_type;
+
+ static constexpr bool is_always_lock_free = implementation-defined;
+ bool is_lock_free() const volatile noexcept;
+ bool is_lock_free() const noexcept;
+
+ constexpr atomic() noexcept;
+ constexpr atomic(floating-point-type) noexcept;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ void store(floating-point-type, memory_order = memory_order::seq_cst) volatile noexcept;
+ void store(floating-point-type, memory_order = memory_order::seq_cst) noexcept;
+ floating-point-type operator=(floating-point-type) volatile noexcept;
+ floating-point-type operator=(floating-point-type) noexcept;
+ floating-point-type load(memory_order = memory_order::seq_cst) volatile noexcept;
+ floating-point-type load(memory_order = memory_order::seq_cst) noexcept;
+ operator floating-point-type() volatile noexcept;
+ operator floating-point-type() noexcept;
+
+ floating-point-type exchange(floating-point-type,
+ memory_order = memory_order::seq_cst) volatile noexcept;
+ floating-point-type exchange(floating-point-type,
+ memory_order = memory_order::seq_cst) noexcept;
+ bool compare_exchange_weak(floating-point-type&, floating-point-type,
+ memory_order, memory_order) volatile noexcept;
+ bool compare_exchange_weak(floating-point-type&, floating-point-type,
+ memory_order, memory_order) noexcept;
+ bool compare_exchange_strong(floating-point-type&, floating-point-type,
+ memory_order, memory_order) volatile noexcept;
+ bool compare_exchange_strong(floating-point-type&, floating-point-type,
+ memory_order, memory_order) noexcept;
+ bool compare_exchange_weak(floating-point-type&, floating-point-type,
+ memory_order = memory_order::seq_cst) volatile noexcept;
+ bool compare_exchange_weak(floating-point-type&, floating-point-type,
+ memory_order = memory_order::seq_cst) noexcept;
+ bool compare_exchange_strong(floating-point-type&, floating-point-type,
+ memory_order = memory_order::seq_cst) volatile noexcept;
+ bool compare_exchange_strong(floating-point-type&, floating-point-type,
+ memory_order = memory_order::seq_cst) noexcept;
+
+ floating-point-type fetch_add(floating-point-type,
+ memory_order = memory_order::seq_cst) volatile noexcept;
+ floating-point-type fetch_add(floating-point-type,
+ memory_order = memory_order::seq_cst) noexcept;
+ floating-point-type fetch_sub(floating-point-type,
+ memory_order = memory_order::seq_cst) volatile noexcept;
+ floating-point-type fetch_sub(floating-point-type,
+ memory_order = memory_order::seq_cst) noexcept;
+
+ floating-point-type operator+=(floating-point-type) volatile noexcept;
+ floating-point-type operator+=(floating-point-type) noexcept;
+ floating-point-type operator-=(floating-point-type) volatile noexcept;
+ floating-point-type operator-=(floating-point-type) noexcept;
+
+ void wait(floating-point-type, memory_order = memory_order::seq_cst) const volatile noexcept;
+ void wait(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+ void notify_one() volatile noexcept;
+ void notify_one() noexcept;
+ void notify_all() volatile noexcept;
+ void notify_all() noexcept;
+};
// [atomics.nonmembers], non-member functions
template<class T>
diff --git a/libcxx/test/libcxx/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp b/libcxx/test/libcxx/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp
new file mode 100644
index 000000000000000..46511530c7c7582
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp
@@ -0,0 +1,51 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// static constexpr bool is_always_lock_free = implementation-defined;
+// bool is_lock_free() const volatile noexcept;
+// bool is_lock_free() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+
+#include "test_macros.h"
+
+template <class T>
+void test() {
+ // static constexpr bool is_always_lock_free = implementation-defined;
+ {
+ bool r = std::atomic<T>::is_always_lock_free;
+ assert(r == __atomic_always_lock_free(sizeof(std::__cxx_atomic_impl<T>), 0));
+ }
+
+ // bool is_lock_free() const volatile noexcept;
+ {
+ const volatile std::atomic<T> a;
+ bool r = a.is_lock_free();
+ assert(r == __cxx_atomic_is_lock_free(sizeof(std::__cxx_atomic_impl<T>)));
+ }
+
+ // bool is_lock_free() const noexcept;
+ {
+ const std::atomic<T> a;
+ bool r = a.is_lock_free();
+ assert(r == __cxx_atomic_is_lock_free(sizeof(std::__cxx_atomic_impl<T>)));
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/assign.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/assign.pass.cpp
new file mode 100644
index 000000000000000..8efb556cb5d99a5
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/assign.pass.cpp
@@ -0,0 +1,62 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// floating-point-type operator=(floating-point-type) volatile noexcept;
+// floating-point-type operator=(floating-point-type) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T>
+concept HasVolatileAssign = requires(volatile std::atomic<T>& a, T t) { a = t; };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ static_assert(HasVolatileAssign<T> == std::atomic<T>::is_always_lock_free);
+
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>() = (T(0))));
+
+ // assignment
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ std::same_as<T> decltype(auto) r = (a = T(1.2));
+ assert(a.load() == T(1.2));
+ assert(r == T(1.2));
+ }
+
+ // memory_order::seq_cst
+ {
+ auto assign = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x = new_val; };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(assign, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_strong.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_strong.pass.cpp
new file mode 100644
index 000000000000000..839d79d3a411052
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_strong.pass.cpp
@@ -0,0 +1,226 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// bool compare_exchange_strong(T& expected, T desired,
+// memory_order success, memory_order failure) volatile noexcept;
+// bool compare_exchange_strong(T& expected, T desired,
+// memory_order success, memory_order failure) noexcept;
+// bool compare_exchange_strong(T& expected, T desired,
+// memory_order order = memory_order::seq_cst) volatile noexcept;
+// bool compare_exchange_strong(T& expected, T desired,
+// memory_order order = memory_order::seq_cst) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T, class... Args>
+concept HasVolatileCompareExchangeStrong =
+ requires(volatile std::atomic<T>& a, T t, Args... args) { a.compare_exchange_strong(t, t, args...); };
+
+template <class T, template <class> class MaybeVolatile, class... Args>
+concept HasNoexceptCompareExchangeStrong = requires(MaybeVolatile<std::atomic<T>>& a, T t, Args... args) {
+ { a.compare_exchange_strong(t, t, args...) } noexcept;
+};
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t, class... MemoryOrder>
+void testBasic(MemoryOrder... memory_order) {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileCompareExchangeStrong<T, MemoryOrder...> == std::atomic<T>::is_always_lock_free);
+ static_assert(HasNoexceptCompareExchangeStrong<T, MaybeVolatile, MemoryOrder...>);
+
+ // compare pass
+ {
+ MaybeVolatile<std::atomic<T>> a(T(1.2));
+ T expected(1.2);
+ const T desired(2.3);
+ std::same_as<bool> decltype(auto) r = a.compare_exchange_strong(expected, desired, memory_order...);
+
+ assert(r);
+ assert(a.load() == desired);
+ assert(expected == T(1.2));
+ }
+
+ // compare fail
+ {
+ MaybeVolatile<std::atomic<T>> a(T(1.2));
+ T expected(1.5);
+ const T desired(2.3);
+ std::same_as<bool> decltype(auto) r = a.compare_exchange_strong(expected, desired, memory_order...);
+
+ assert(!r);
+ assert(a.load() == T(1.2));
+ assert(expected == T(1.2));
+ }
+}
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ testBasic<T, MaybeVolatile>();
+ testBasic<T, MaybeVolatile>(std::memory_order::relaxed);
+ testBasic<T, MaybeVolatile>(std::memory_order::relaxed, std::memory_order_relaxed);
+
+ // test success memory order release
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::release, std::memory_order_relaxed);
+ assert(r);
+ };
+
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto store_one_arg = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::release);
+ assert(r);
+ };
+ test_acquire_release<T, MaybeVolatile>(store_one_arg, load);
+ }
+
+ // test success memory order acquire
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acquire, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acquire)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load_one_arg);
+ }
+
+ // test success memory order acq_rel
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::acq_rel, std::memory_order_relaxed);
+ assert(r);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acq_rel, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto store_one_arg = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::acq_rel);
+ assert(r);
+ };
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acq_rel)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store_one_arg, load_one_arg);
+ }
+
+ // test success memory seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::seq_cst, std::memory_order_relaxed);
+ assert(r);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::seq_cst, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T, MaybeVolatile>(store, load);
+
+ auto store_one_arg = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::seq_cst, std::memory_order_relaxed);
+ assert(r);
+ };
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::seq_cst, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T, MaybeVolatile>(store_one_arg, load_one_arg);
+ }
+
+ // test fail memory order acquire
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order_relaxed, std::memory_order_acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order_acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load_one_arg);
+
+ // acq_rel replaced by acquire
+ auto load_one_arg_acq_rel = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order_acq_rel);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load_one_arg_acq_rel);
+ }
+
+ // test fail memory order seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::seq_cst); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order_relaxed, std::memory_order::seq_cst);
+ assert(!r);
+ return result;
+ };
+ test_seq_cst<T, MaybeVolatile>(store, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ // https://github.com/llvm/llvm-project/issues/47978
+ // test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_weak.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_weak.pass.cpp
new file mode 100644
index 000000000000000..097210cc4a9db26
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/compare_exchange_weak.pass.cpp
@@ -0,0 +1,244 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// bool compare_exchange_weak(T& expected, T desired,
+// memory_order success, memory_order failure) volatile noexcept;
+// bool compare_exchange_weak(T& expected, T desired,
+// memory_order success, memory_order failure) noexcept;
+// bool compare_exchange_weak(T& expected, T desired,
+// memory_order order = memory_order::seq_cst) volatile noexcept;
+// bool compare_exchange_weak(T& expected, T desired,
+// memory_order order = memory_order::seq_cst) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T, class... Args>
+concept HasVolatileCompareExchangeWeak =
+ requires(volatile std::atomic<T>& a, T t, Args... args) { a.compare_exchange_weak(t, t, args...); };
+
+template <class T, template <class> class MaybeVolatile, class... Args>
+concept HasNoexceptCompareExchangeWeak = requires(MaybeVolatile<std::atomic<T>>& a, T t, Args... args) {
+ { a.compare_exchange_weak(t, t, args...) } noexcept;
+};
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t, class... MemoryOrder>
+void testBasic(MemoryOrder... memory_order) {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileCompareExchangeWeak<T, MemoryOrder...> == std::atomic<T>::is_always_lock_free);
+ static_assert(HasNoexceptCompareExchangeWeak<T, MaybeVolatile, MemoryOrder...>);
+
+ // compare pass
+ {
+ MaybeVolatile<std::atomic<T>> a(T(1.2));
+ T expected(1.2);
+ const T desired(2.3);
+ std::same_as<bool> decltype(auto) r = a.compare_exchange_weak(expected, desired, memory_order...);
+
+ // could be false spuriously
+ if (r) {
+ assert(a.load() == desired);
+ }
+ // if r is true, expected should be unmodified (1.2)
+ // if r is false, the original value of a (1.2) is written to expected
+ assert(expected == T(1.2));
+ }
+
+ // compare fail
+ {
+ MaybeVolatile<std::atomic<T>> a(T(1.2));
+ T expected(1.5);
+ const T desired(2.3);
+ std::same_as<bool> decltype(auto) r = a.compare_exchange_weak(expected, desired, memory_order...);
+
+ assert(!r);
+ assert(a.load() == T(1.2));
+
+ // bug
+ // https://github.com/llvm/llvm-project/issues/47978
+ if constexpr (!std::same_as<T, long double>) {
+ assert(expected == T(1.2));
+ }
+ }
+}
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ testBasic<T, MaybeVolatile>();
+ testBasic<T, MaybeVolatile>(std::memory_order::relaxed);
+ testBasic<T, MaybeVolatile>(std::memory_order::relaxed, std::memory_order_relaxed);
+
+ // test success memory order release
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::release, std::memory_order_relaxed)) {
+ }
+ };
+
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto store_one_arg = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::release)) {
+ }
+ };
+ test_acquire_release<T, MaybeVolatile>(store_one_arg, load);
+ }
+
+ // test success memory order acquire
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acquire, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acquire)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load_one_arg);
+ }
+
+ // test success memory order acq_rel
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::acq_rel, std::memory_order_relaxed)) {
+ }
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acq_rel, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto store_one_arg = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::acq_rel)) {
+ }
+ };
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acq_rel)) {
+ }
+ return val;
+ };
+ test_acquire_release<T, MaybeVolatile>(store_one_arg, load_one_arg);
+ }
+
+ // test success memory seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::seq_cst, std::memory_order_relaxed)) {
+ }
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::seq_cst, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T, MaybeVolatile>(store, load);
+
+ auto store_one_arg = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::seq_cst, std::memory_order_relaxed)) {
+ }
+ };
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::seq_cst, std::memory_order_relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T, MaybeVolatile>(store_one_arg, load_one_arg);
+ }
+
+ // test fail memory order acquire
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order_relaxed, std::memory_order_acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+
+ auto load_one_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order_acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load_one_arg);
+
+ // acq_rel replaced by acquire
+ auto load_one_arg_acq_rel = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order_acq_rel);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load_one_arg_acq_rel);
+ }
+
+ // test fail memory order seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::seq_cst); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(-9999.99);
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order_relaxed, std::memory_order::seq_cst);
+ assert(!r);
+ return result;
+ };
+ test_seq_cst<T, MaybeVolatile>(store, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+
+// https://github.com/llvm/llvm-project/issues/47978
+#ifndef TEST_COMPILER_CLANG
+ test<long double>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/copy.compile.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/copy.compile.pass.cpp
new file mode 100644
index 000000000000000..3984d621ad3ab04
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/copy.compile.pass.cpp
@@ -0,0 +1,29 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// atomic(const atomic&) = delete;
+// atomic& operator=(const atomic&) = delete;
+// atomic& operator=(const atomic&) volatile = delete;
+
+#include <atomic>
+#include <type_traits>
+
+template <class T>
+void test() {
+ static_assert(!std::is_copy_assignable_v<std::atomic<T>>);
+ static_assert(!std::is_copy_constructible_v<std::atomic<T>>);
+ static_assert(!std::is_move_constructible_v<std::atomic<T>>);
+ static_assert(!std::is_move_assignable_v<std::atomic<T>>);
+ static_assert(!std::is_assignable_v<volatile std::atomic<T>&, const std::atomic<T>&>);
+}
+
+template void test<float>();
+template void test<double>();
+template void test<long double>();
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/ctor.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/ctor.pass.cpp
new file mode 100644
index 000000000000000..febabb4f2678261
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/ctor.pass.cpp
@@ -0,0 +1,68 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// constexpr atomic() noexcept;
+// constexpr atomic(floating-point-type) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <class T>
+constinit std::atomic<T> a1 = T();
+
+template <class T>
+constinit std::atomic<T> a2 = T(5.2);
+
+template <class T>
+constexpr void testOne() {
+ static_assert(std::is_nothrow_constructible_v<std::atomic<T>>);
+ static_assert(std::is_nothrow_constructible_v<std::atomic<T>, T>);
+
+ // constexpr atomic() noexcept;
+ {
+ std::atomic<T> a = {};
+ if (!TEST_IS_CONSTANT_EVALUATED) {
+ assert(a.load() == T(0));
+ }
+ }
+
+ // constexpr atomic(floating-point-type) noexcept;
+ {
+ std::atomic<T> a = T(5.2);
+ if (!TEST_IS_CONSTANT_EVALUATED) {
+ assert(a.load() == T(5.2));
+ }
+ }
+
+ // test constinit
+ if (!TEST_IS_CONSTANT_EVALUATED) {
+ assert(a1<T> == T(0.0));
+ assert(a2<T> == T(5.2));
+ }
+}
+
+constexpr bool test() {
+ testOne<float>();
+ testOne<double>();
+ testOne<long double>();
+ return true;
+}
+
+int main(int, char**) {
+ test();
+ static_assert(test());
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/exchange.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/exchange.pass.cpp
new file mode 100644
index 000000000000000..4bd50396213d8b6
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/exchange.pass.cpp
@@ -0,0 +1,77 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// Clang's support for atomic operations on long double is broken. See https://github.com/llvm/llvm-project/issues/72893
+// XFAIL: tsan, msan
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// T exchange(T, memory_order = memory_order::seq_cst) volatile noexcept;
+// T exchange(T, memory_order = memory_order::seq_cst) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T>
+concept HasVolatileExchange = requires(volatile std::atomic<T>& a, T t) { a.exchange(t); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileExchange<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>() = (T(0))));
+
+ // exchange
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ std::same_as<T> decltype(auto) r = a.exchange(T(1.2), std::memory_order::relaxed);
+ assert(a.load() == T(1.2));
+ assert(r == T(3.1));
+ }
+
+ // memory_order::release
+ {
+ auto exchange = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) {
+ x.exchange(new_val, std::memory_order::release);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(exchange, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto exchange_no_arg = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.exchange(new_val); };
+ auto exchange_with_order = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) {
+ x.exchange(new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(exchange_no_arg, load);
+ test_seq_cst<T, MaybeVolatile>(exchange_with_order, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_add.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_add.pass.cpp
new file mode 100644
index 000000000000000..bb0b7c5b5847290
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_add.pass.cpp
@@ -0,0 +1,118 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// XFAIL: LIBCXX-AIX-FIXME
+// Clang's support for atomic operations on long double is broken. See https://github.com/llvm/llvm-project/issues/72893
+// XFAIL: tsan
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// floating-point-type fetch_add(floating-point-type,
+// memory_order = memory_order::seq_cst) volatile noexcept;
+// floating-point-type fetch_add(floating-point-type,
+// memory_order = memory_order::seq_cst) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+template <class T>
+concept HasVolatileFetchAdd = requires(volatile std::atomic<T>& a, T t) { a.fetch_add(t); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ static_assert(HasVolatileFetchAdd<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().fetch_add(T(0))));
+
+ // fetch_add
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ std::same_as<T> decltype(auto) r = a.fetch_add(T(1.2), std::memory_order::relaxed);
+ assert(r == T(3.1));
+ assert(a.load() == T(3.1) + T(1.2));
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // fetch_add concurrent
+ {
+ constexpr auto number_of_threads = 4;
+ constexpr auto loop = 1000;
+
+ MaybeVolatile<std::atomic<T>> at;
+
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at]() {
+ for (auto j = 0; j < loop; ++j) {
+ at.fetch_add(T(1.234), std::memory_order::relaxed);
+ }
+ }));
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ const auto times = [](T t, int n) {
+ T res(0);
+ for (auto i = 0; i < n; ++i) {
+ res += t;
+ }
+ return res;
+ };
+
+ assert(at.load() == times(1.234, number_of_threads * loop));
+ }
+#endif
+
+ // memory_order::release
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ x.fetch_add(new_val - old_val, std::memory_order::release);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto fetch_add = [](MaybeVolatile<std::atomic<T>>& x, T old_value, T new_val) { x.fetch_add(new_val - old_value); };
+ auto fetch_add_with_order = [](MaybeVolatile<std::atomic<T>>& x, T old_value, T new_val) {
+ x.fetch_add(new_val - old_value, std::memory_order::seq_cst);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(fetch_add, load);
+ test_seq_cst<T, MaybeVolatile>(fetch_add_with_order, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_sub.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_sub.pass.cpp
new file mode 100644
index 000000000000000..b847cedf404a1c4
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/fetch_sub.pass.cpp
@@ -0,0 +1,119 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// XFAIL: LIBCXX-AIX-FIXME
+// Clang's support for atomic operations on long double is broken. See https://github.com/llvm/llvm-project/issues/72893
+// XFAIL: tsan
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// floating-point-type fetch_sub(floating-point-type,
+// memory_order = memory_order::seq_cst) volatile noexcept;
+// floating-point-type fetch_sub(floating-point-type,
+// memory_order = memory_order::seq_cst) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+concept HasVolatileFetchSub = requires(volatile std::atomic<T>& a, T t) { a.fetch_sub(t); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ static_assert(HasVolatileFetchSub<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().fetch_sub(T(0))));
+
+ // fetch_sub
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ std::same_as<T> decltype(auto) r = a.fetch_sub(T(1.2), std::memory_order::relaxed);
+ assert(r == T(3.1));
+ assert(a.load() == T(3.1) - T(1.2));
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // fetch_sub concurrent
+ {
+ constexpr auto number_of_threads = 4;
+ constexpr auto loop = 1000;
+
+ MaybeVolatile<std::atomic<T>> at;
+
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at]() {
+ for (auto j = 0; j < loop; ++j) {
+ at.fetch_sub(T(1.234), std::memory_order::relaxed);
+ }
+ }));
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ const auto accu_neg = [](T t, int n) {
+ T res(0);
+ for (auto i = 0; i < n; ++i) {
+ res -= t;
+ }
+ return res;
+ };
+
+ assert(at.load() == accu_neg(1.234, number_of_threads * loop));
+ }
+#endif
+
+ // memory_order::release
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T old_val, T new_val) {
+ x.fetch_sub(old_val - new_val, std::memory_order::release);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto fetch_sub = [](MaybeVolatile<std::atomic<T>>& x, T old_value, T new_val) { x.fetch_sub(old_value - new_val); };
+ auto fetch_sub_with_order = [](MaybeVolatile<std::atomic<T>>& x, T old_value, T new_val) {
+ x.fetch_sub(old_value - new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](auto& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(fetch_sub, load);
+ test_seq_cst<T, MaybeVolatile>(fetch_sub_with_order, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/load.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/load.pass.cpp
new file mode 100644
index 000000000000000..fa085275de2995b
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/load.pass.cpp
@@ -0,0 +1,140 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// Clang's support for atomic operations on long double is broken. See https://github.com/llvm/llvm-project/issues/72893
+// XFAIL: tsan
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// floating-point-type load(memory_order = memory_order::seq_cst) volatile noexcept;
+// floating-point-type load(memory_order = memory_order::seq_cst) noexcept;
+
+#include <algorithm>
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <ranges>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+concept HasVolatileLoad = requires(volatile std::atomic<T>& a, T t) { a.load(); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileLoad<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().load()));
+
+ // load
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ a.store(T(1.2));
+ std::same_as<T> decltype(auto) r = a.load(std::memory_order::relaxed);
+ assert(r == T(1.2));
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // memory_order::relaxed
+ {
+ constexpr auto number_of_threads = 4;
+ constexpr auto loop = 1000;
+
+ MaybeVolatile<std::atomic<T>> at(T(-1.0));
+
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at, i]() {
+ for (auto j = 0; j < loop; ++j) {
+ at.store(T(i));
+ }
+ }));
+ }
+
+ while (at.load(std::memory_order::relaxed) == T(-1.0)) {
+ std::this_thread::yield();
+ }
+
+ for (auto i = 0; i < loop; ++i) {
+ auto r = at.load(std::memory_order_relaxed);
+ assert(std::ranges::any_of(std::views::iota(0, number_of_threads), [r](auto j) { return r == T(j); }));
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+
+ // memory_order::comsume
+ {
+ std::unique_ptr<T> p = std::make_unique<T>(T(0.0));
+ MaybeVolatile<std::atomic<T>> at(T(0.0));
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at, &p] {
+ while (at.load(std::memory_order::consume) == T(0.0)) {
+ std::this_thread::yield();
+ }
+ assert(*p == T(1.0)); // the write from other thread should be visible
+ }));
+ }
+
+ *p = T(1.0);
+ at.store(*p, std::memory_order_release);
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+#endif
+
+ // memory_order::acquire
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val); };
+ auto load_no_arg = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ auto load_with_order = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::seq_cst); };
+ test_seq_cst<T, MaybeVolatile>(store, load_no_arg);
+ test_seq_cst<T, MaybeVolatile>(store, load_with_order);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp
new file mode 100644
index 000000000000000..447e0f86500272b
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/lockfree.pass.cpp
@@ -0,0 +1,61 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// static constexpr bool is_always_lock_free = implementation-defined;
+// bool is_lock_free() const volatile noexcept;
+// bool is_lock_free() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <class T>
+concept isLockFreeNoexcept = requires(T t) {
+ { t.is_lock_free() } noexcept;
+};
+
+template <class T>
+void test() {
+ static_assert(isLockFreeNoexcept<const std::atomic<T>&>);
+ static_assert(isLockFreeNoexcept<const volatile std::atomic<T>&>);
+
+ // static constexpr bool is_always_lock_free = implementation-defined;
+ { [[maybe_unused]] constexpr std::same_as<const bool> decltype(auto) r = std::atomic<T>::is_always_lock_free; }
+
+ // bool is_lock_free() const volatile noexcept;
+ {
+ const volatile std::atomic<T> a;
+ std::same_as<bool> decltype(auto) r = a.is_lock_free();
+ if (std::atomic<T>::is_always_lock_free) {
+ assert(r);
+ }
+ }
+
+ // bool is_lock_free() const noexcept;
+ {
+ const std::atomic<T> a;
+ std::same_as<bool> decltype(auto) r = a.is_lock_free();
+ if (std::atomic<T>::is_always_lock_free) {
+ assert(r);
+ }
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_all.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_all.pass.cpp
new file mode 100644
index 000000000000000..e2320ed0b70ed13
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_all.pass.cpp
@@ -0,0 +1,98 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// void notify_all() volatile noexcept;
+// void notify_all() noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T>
+concept HasVolatileNotifyAll = requires(volatile std::atomic<T>& a, T t) { a.notify_all(); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileNotifyAll<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().notify_all()));
+
+ // bug?? wait can also fail for long double ??
+ // should x87 80bit long double work at all?
+ if constexpr (!std::same_as<T, long double>) {
+ for (auto i = 0; i < 100; ++i) {
+ const T old = 3.1;
+ MaybeVolatile<std::atomic<T>> a(old);
+
+ bool done = false;
+ std::atomic<int> started_num = 0;
+ std::atomic<int> wait_done_num = 0;
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto j = 0; j < number_of_threads; ++j) {
+ threads.push_back(support::make_test_thread([&a, &started_num, old, &done, &wait_done_num] {
+ started_num.fetch_add(1, std::memory_order::relaxed);
+
+ a.wait(old);
+ wait_done_num.fetch_add(1, std::memory_order::relaxed);
+
+ // likely to fail if wait did not block
+ assert(done);
+ }));
+ }
+
+ while (started_num.load(std::memory_order::relaxed) != number_of_threads) {
+ std::this_thread::yield();
+ }
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+
+ done = true;
+ a.store(T(9.9));
+ a.notify_all();
+
+ // notify_all should unblock all the threads so that the loop below won't stuck
+ while (wait_done_num.load(std::memory_order::relaxed) != number_of_threads) {
+ std::this_thread::yield();
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_one.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_one.pass.cpp
new file mode 100644
index 000000000000000..058424f5cee0593
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/notify_one.pass.cpp
@@ -0,0 +1,82 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// void notify_one() volatile noexcept;
+// void notify_one() noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T>
+concept HasVolatileNotifyOne = requires(volatile std::atomic<T>& a, T t) { a.notify_one(); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileNotifyOne<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().notify_one()));
+
+ // bug?? wait can also fail for long double ??
+ // should x87 80bit long double work at all?
+ if constexpr (!std::same_as<T, long double>) {
+ for (auto i = 0; i < 100; ++i) {
+ const T old = 3.1;
+ MaybeVolatile<std::atomic<T>> a(old);
+
+ std::atomic_bool started = false;
+ bool done = false;
+
+ auto t = support::make_test_thread([&a, &started, old, &done] {
+ started.store(true, std::memory_order::relaxed);
+
+ a.wait(old);
+
+ // likely to fail if wait did not block
+ assert(done);
+ });
+
+ while (!started.load(std::memory_order::relaxed)) {
+ std::this_thread::yield();
+ }
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+
+ done = true;
+ a.store(T(9.9));
+ a.notify_one();
+ t.join();
+ }
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.float.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.float.pass.cpp
new file mode 100644
index 000000000000000..54c62ba8d091810
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.float.pass.cpp
@@ -0,0 +1,58 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// operator floating-point-type() volatile noexcept;
+// operator floating-point-type() noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(std::is_convertible_v<volatile std::atomic<T>&, T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(T(std::declval<MaybeVolatile<std::atomic<T>>&>())));
+
+ // operator float
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ T r = a;
+ assert(r == T(3.1));
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val); };
+ auto op_float = [](MaybeVolatile<std::atomic<T>>& x) -> T { return x; };
+ test_seq_cst<T, MaybeVolatile>(store, op_float);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.minus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.minus_equals.pass.cpp
new file mode 100644
index 000000000000000..18c42d7b9bb13cb
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.minus_equals.pass.cpp
@@ -0,0 +1,102 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// XFAIL: LIBCXX-AIX-FIXME
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// floating-point-type operator-=(floating-point-type) volatile noexcept;
+// floating-point-type operator-=(floating-point-type) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+concept HasVolatileMinusEquals = requires(volatile std::atomic<T>& a, T t) { a -= t; };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ static_assert(HasVolatileMinusEquals<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>() -= T(0)));
+
+ // -=
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ std::same_as<T> decltype(auto) r = a -= T(1.2);
+ assert(r == T(3.1) - T(1.2));
+ assert(a.load() == T(3.1) - T(1.2));
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // -= concurrent
+ {
+ constexpr auto number_of_threads = 4;
+ constexpr auto loop = 1000;
+
+ MaybeVolatile<std::atomic<T>> at;
+
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at]() {
+ for (auto j = 0; j < loop; ++j) {
+ at -= T(1.234);
+ }
+ }));
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ const auto accu_neg = [](T t, int n) {
+ T res(0);
+ for (auto i = 0; i < n; ++i) {
+ res -= t;
+ }
+ return res;
+ };
+
+ assert(at.load() == accu_neg(1.234, number_of_threads * loop));
+ }
+#endif
+
+ // memory_order::seq_cst
+ {
+ auto minus_equals = [](MaybeVolatile<std::atomic<T>>& x, T old_value, T new_val) { x -= (old_value - new_val); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(minus_equals, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.plus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.plus_equals.pass.cpp
new file mode 100644
index 000000000000000..6026d2392d9377e
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/operator.plus_equals.pass.cpp
@@ -0,0 +1,102 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: target={{.+}}-windows-gnu
+// XFAIL: LIBCXX-AIX-FIXME
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// floating-point-type operator+=(floating-point-type) volatile noexcept;
+// floating-point-type operator+=(floating-point-type) noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+concept HasVolatilePlusEquals = requires(volatile std::atomic<T>& a, T t) { a += t; };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ static_assert(HasVolatilePlusEquals<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>() += T(0)));
+
+ // +=
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ std::same_as<T> decltype(auto) r = a += T(1.2);
+ assert(r == T(3.1) + T(1.2));
+ assert(a.load() == T(3.1) + T(1.2));
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // += concurrent
+ {
+ constexpr auto number_of_threads = 4;
+ constexpr auto loop = 1000;
+
+ MaybeVolatile<std::atomic<T>> at;
+
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at]() {
+ for (auto j = 0; j < loop; ++j) {
+ at += T(1.234);
+ }
+ }));
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ const auto times = [](T t, int n) {
+ T res(0);
+ for (auto i = 0; i < n; ++i) {
+ res += t;
+ }
+ return res;
+ };
+
+ assert(at.load() == times(1.234, number_of_threads * loop));
+ }
+
+ // memory_order::seq_cst
+ {
+ auto plus_equals = [](MaybeVolatile<std::atomic<T>>& x, T old_value, T new_val) { x += (new_val - old_value); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(plus_equals, load);
+ }
+#endif
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/store.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/store.pass.cpp
new file mode 100644
index 000000000000000..0bb1f00840a890f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/store.pass.cpp
@@ -0,0 +1,114 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// Clang's support for atomic operations on long double is broken. See https://github.com/llvm/llvm-project/issues/72893
+// XFAIL: tsan
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// void store(floating-point-type, memory_order = memory_order::seq_cst) volatile noexcept;
+// void store(floating-point-type, memory_order = memory_order::seq_cst) noexcept;
+
+#include <algorithm>
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <ranges>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+concept HasVolatileStore = requires(volatile std::atomic<T>& a, T t) { a.store(t); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileStore<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().store(T(0))));
+
+ // store
+ {
+ MaybeVolatile<std::atomic<T>> a(3.1);
+ a.store(T(1.2), std::memory_order::relaxed);
+ assert(a.load() == T(1.2));
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // memory_order::relaxed
+ {
+ constexpr auto number_of_threads = 4;
+ constexpr auto loop = 1000;
+
+ MaybeVolatile<std::atomic<T>> at(T(-1.0));
+
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+ for (auto i = 0; i < number_of_threads; ++i) {
+ threads.push_back(support::make_test_thread([&at, i]() {
+ for (auto j = 0; j < loop; ++j) {
+ at.store(T(i), std::memory_order_relaxed);
+ }
+ }));
+ }
+
+ while (at.load() == T(-1.0)) {
+ std::this_thread::yield();
+ }
+
+ for (auto i = 0; i < loop; ++i) {
+ auto r = at.load();
+ assert(std::ranges::any_of(std::views::iota(0, number_of_threads), [r](auto j) { return r == T(j); }));
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+#endif
+
+ // memory_order::release
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store_no_arg = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val); };
+ auto store_with_order = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) {
+ x.store(new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) { return x.load(); };
+ test_seq_cst<T, MaybeVolatile>(store_no_arg, load);
+ test_seq_cst<T, MaybeVolatile>(store_with_order, load);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/test_helper.h b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/test_helper.h
new file mode 100644
index 000000000000000..b831b774fbd3073
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/test_helper.h
@@ -0,0 +1,122 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TEST_STD_ATOMICS_ATOMICS_TYPES_FLOAT_TEST_HELPER_H
+#define TEST_STD_ATOMICS_ATOMICS_TYPES_FLOAT_TEST_HELPER_H
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <vector>
+
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+bool approximately_equals(T x, T y) {
+ T epsilon = 0.001;
+ return std::abs(x - y) < epsilon;
+}
+
+// Test that all threads see the exact same sequence of events
+// Test will pass 100% if store_op and load_op are correctly
+// affecting the memory with seq_cst order
+template <class T, template <class> class MaybeVolatile, class StoreOp, class LoadOp>
+void test_seq_cst(StoreOp store_op, LoadOp load_op) {
+#ifndef TEST_HAS_NO_THREADS
+ for (int i = 0; i < 100; ++i) {
+ T old_value = 0.0;
+ T new_value = 1.0;
+
+ MaybeVolatile<std::atomic<T>> x(old_value);
+ MaybeVolatile<std::atomic<T>> y(old_value);
+
+ std::atomic_bool x_updated_first(false);
+ std::atomic_bool y_updated_first(false);
+
+ auto t1 = support::make_test_thread([&] { store_op(x, old_value, new_value); });
+
+ auto t2 = support::make_test_thread([&] { store_op(y, old_value, new_value); });
+
+ auto t3 = support::make_test_thread([&] {
+ while (!approximately_equals(load_op(x), new_value)) {
+ std::this_thread::yield();
+ }
+ if (!approximately_equals(load_op(y), new_value)) {
+ x_updated_first.store(true, std::memory_order_relaxed);
+ }
+ });
+
+ auto t4 = support::make_test_thread([&] {
+ while (!approximately_equals(load_op(y), new_value)) {
+ std::this_thread::yield();
+ }
+ if (!approximately_equals(load_op(x), new_value)) {
+ y_updated_first.store(true, std::memory_order_relaxed);
+ }
+ });
+
+ t1.join();
+ t2.join();
+ t3.join();
+ t4.join();
+ // thread 3 and thread 4 cannot see
diff erent orders of storing x and y
+ assert(!(x_updated_first && y_updated_first));
+ }
+#else
+ (void)store_op;
+ (void)load_op;
+#endif
+}
+
+// Test that all writes before the store are seen by other threads after the load
+// Test will pass 100% if store_op and load_op are correctly
+// affecting the memory with acquire-release order
+template <class T, template <class> class MaybeVolatile, class StoreOp, class LoadOp>
+void test_acquire_release(StoreOp store_op, LoadOp load_op) {
+#ifndef TEST_HAS_NO_THREADS
+ for (auto i = 0; i < 100; ++i) {
+ T old_value = 0.0;
+ T new_value = 1.0;
+
+ MaybeVolatile<std::atomic<T>> at(old_value);
+ int non_atomic = 5;
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto j = 0; j < number_of_threads; ++j) {
+ threads.push_back(support::make_test_thread([&at, &non_atomic, load_op, new_value] {
+ while (!approximately_equals(load_op(at), new_value)) {
+ std::this_thread::yield();
+ }
+ // Other thread's writes before the release store are visible
+ // in this thread's read after the acquire load
+ assert(non_atomic == 6);
+ }));
+ }
+
+ non_atomic = 6;
+ store_op(at, old_value, new_value);
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+#else
+ (void)store_op;
+ (void)load_op;
+#endif
+}
+
+#endif // TEST_STD_ATOMICS_ATOMICS_TYPES_FLOAT_TEST_HELPER_H
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp
new file mode 100644
index 000000000000000..1a4e6dfe0b31553
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp
@@ -0,0 +1,28 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// using value_type = floating-point-type;
+// using
diff erence_type = value_type;
+// The atomic floating-point specializations are standard-layout structs. They each have a trivial destructor.
+
+#include <atomic>
+#include <type_traits>
+
+template <class T>
+void test() {
+ static_assert(std::is_same_v<typename std::atomic<T>::value_type, T>);
+ static_assert(std::is_same_v<typename std::atomic<T>::
diff erence_type, T>);
+ static_assert(std::is_standard_layout_v<std::atomic<T>>);
+ static_assert(std::is_trivially_destructible_v<std::atomic<T>>);
+}
+
+template void test<float>();
+template void test<double>();
+template void test<long double>();
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/wait.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/wait.pass.cpp
new file mode 100644
index 000000000000000..ce730b792a21e6a
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/wait.pass.cpp
@@ -0,0 +1,124 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: availability-synchronization_library-missing
+// Clang's support for atomic operations on long double is broken. See https://github.com/llvm/llvm-project/issues/72893
+// XFAIL: tsan, msan
+// ADDITIONAL_COMPILE_FLAGS(has-latomic): -latomic
+
+// void wait(T old, memory_order order = memory_order::seq_cst) const volatile noexcept;
+// void wait(T old, memory_order order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+#include <vector>
+
+#include "test_helper.h"
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+concept HasVolatileWait = requires(volatile std::atomic<T>& a, T t) { a.wait(T()); };
+
+template <class T, template <class> class MaybeVolatile = std::type_identity_t>
+void test_impl() {
+ // Uncomment the test after P1831R1 is implemented
+ // static_assert(HasVolatileWait<T> == std::atomic<T>::is_always_lock_free);
+ static_assert(noexcept(std::declval<MaybeVolatile<std::atomic<T>>&>().wait(T())));
+
+ // wait with
diff erent value
+ {
+ MaybeVolatile<std::atomic<T>> a(T(3.1));
+ a.wait(T(1.1), std::memory_order::relaxed);
+ }
+
+#ifndef TEST_HAS_NO_THREADS
+ // equal at the beginning and changed later
+ // bug?? wait can also fail for long double ??
+ // should x87 80bit long double work at all?
+ if constexpr (!std::same_as<T, long double>) {
+ for (auto i = 0; i < 100; ++i) {
+ const T old = 3.1;
+ MaybeVolatile<std::atomic<T>> a(old);
+
+ std::atomic_bool started = false;
+ bool done = false;
+
+ auto t = support::make_test_thread([&a, &started, old, &done] {
+ started.store(true, std::memory_order::relaxed);
+
+ a.wait(old);
+
+ // likely to fail if wait did not block
+ assert(done);
+ });
+
+ while (!started.load(std::memory_order::relaxed)) {
+ std::this_thread::yield();
+ }
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+
+ done = true;
+ a.store(T(9.9));
+ a.notify_all();
+ t.join();
+ }
+ }
+#endif
+
+ // memory_order::acquire
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(9999.999), std::memory_order::acquire);
+ return result;
+ };
+ test_acquire_release<T, MaybeVolatile>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](MaybeVolatile<std::atomic<T>>& x, T, T new_val) { x.store(new_val); };
+ auto load_no_arg = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(9999.999));
+ return result;
+ };
+ auto load_with_order = [](MaybeVolatile<std::atomic<T>>& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(9999.999), std::memory_order::seq_cst);
+ return result;
+ };
+ test_seq_cst<T, MaybeVolatile>(store, load_no_arg);
+ test_seq_cst<T, MaybeVolatile>(store, load_with_order);
+ }
+}
+
+template <class T>
+void test() {
+ test_impl<T>();
+ if constexpr (std::atomic<T>::is_always_lock_free) {
+ test_impl<T, std::add_volatile_t>();
+ }
+}
+
+int main(int, char**) {
+ test<float>();
+ test<double>();
+ test<long double>();
+
+ return 0;
+}
diff --git a/libcxx/utils/libcxx/test/features.py b/libcxx/utils/libcxx/test/features.py
index 29822f55521360b..e854aed66513806 100644
--- a/libcxx/utils/libcxx/test/features.py
+++ b/libcxx/utils/libcxx/test/features.py
@@ -110,6 +110,16 @@ def _getAndroidDeviceApi(cfg):
name="verify-support",
when=lambda cfg: hasCompileFlag(cfg, "-Xclang -verify-ignore-unexpected"),
),
+ Feature(
+ name="has-latomic",
+ when=lambda cfg: sourceBuilds(
+ cfg,
+ """
+ int main(int, char**) { return 0; }
+ """,
+ ["-latomic"],
+ ),
+ ),
Feature(
name="non-lockfree-atomics",
when=lambda cfg: sourceBuilds(
More information about the libcxx-commits
mailing list