[libcxx-commits] [libcxx] [libc++] Implement C++20 atomic_ref (PR #76647)
via libcxx-commits
libcxx-commits at lists.llvm.org
Sat Dec 30 19:31:52 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-libcxx
Author: Damien L-G (dalg24)
<details>
<summary>Changes</summary>
Implement `atomic_ref` class template by reusing `atomic_base_impl`.
Based on the work from https://reviews.llvm.org/D72240
---
Patch is 99.23 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/76647.diff
40 Files Affected:
- (modified) libcxx/include/CMakeLists.txt (+1)
- (added) libcxx/include/__atomic/atomic_ref.h (+286)
- (modified) libcxx/include/__atomic/check_memory_order.h (+4)
- (modified) libcxx/include/__atomic/cxx_atomic_impl.h (+73-350)
- (modified) libcxx/include/__config (+3-1)
- (modified) libcxx/include/atomic (+1)
- (added) libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp (+63)
- (added) libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp (+63)
- (added) libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp (+39)
- (added) libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp (+60)
- (added) libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp (+68)
- (added) libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp (+60)
- (added) libcxx/test/std/atomics/atomics.ref/assign.pass.cpp (+50)
- (added) libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp (+49)
- (added) libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp (+49)
- (added) libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp (+49)
- (added) libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp (+83)
- (added) libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp (+84)
- (added) libcxx/test/std/atomics/atomics.ref/convert.pass.cpp (+47)
- (added) libcxx/test/std/atomics/atomics.ref/ctor.explicit.verify.cpp (+34)
- (added) libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp (+46)
- (added) libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp (+39)
- (added) libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp (+48)
- (added) libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp (+75)
- (added) libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp (+56)
- (added) libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp (+54)
- (added) libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp (+75)
- (added) libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp (+54)
- (added) libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp (+85)
- (added) libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp (+47)
- (added) libcxx/test/std/atomics/atomics.ref/load.pass.cpp (+48)
- (added) libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp (+134)
- (added) libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp (+86)
- (added) libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp (+54)
- (added) libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp (+66)
- (added) libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp (+66)
- (added) libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp (+34)
- (added) libcxx/test/std/atomics/atomics.ref/store.pass.cpp (+50)
- (added) libcxx/test/std/atomics/atomics.ref/type.verify.cpp (+26)
- (added) libcxx/test/std/atomics/atomics.ref/wait.pass.cpp (+65)
``````````diff
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 0fe3ab44d2466e..5d7e4ca98b1f75 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -236,6 +236,7 @@ set(files
__atomic/atomic_flag.h
__atomic/atomic_init.h
__atomic/atomic_lock_free.h
+ __atomic/atomic_ref.h
__atomic/atomic_sync.h
__atomic/check_memory_order.h
__atomic/contention_t.h
diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
new file mode 100644
index 00000000000000..6f2467cad5a24e
--- /dev/null
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -0,0 +1,286 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H
+#define _LIBCPP___ATOMIC_ATOMIC_REF_H
+
+#include <__assert>
+#include <__atomic/check_memory_order.h>
+#include <__atomic/cxx_atomic_impl.h>
+#include <__atomic/is_always_lock_free.h>
+#include <__config>
+#include <__memory/addressof.h>
+#include <__type_traits/is_floating_point.h>
+#include <__type_traits/is_function.h>
+#include <__type_traits/is_nothrow_constructible.h>
+#include <__type_traits/is_same.h>
+#include <cinttypes>
+#include <concepts>
+#include <cstddef>
+#include <limits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER >= 20
+
+template <class _Tp, bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>, bool = is_floating_point_v<_Tp>>
+struct __atomic_ref_base {
+ mutable __cxx_atomic_impl<_Tp&> __a_;
+
+ using value_type = _Tp;
+
+ static constexpr size_t required_alignment = alignof(_Tp);
+
+ static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<_Tp>::__value;
+
+ _LIBCPP_HIDE_FROM_ABI
+ bool is_lock_free() const noexcept { return __cxx_atomic_is_lock_free(sizeof(_Tp)); }
+
+ _LIBCPP_HIDE_FROM_ABI
+ void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_UNCATEGORIZED(
+ __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
+ "memory order argument to atomic store operation is invalid");
+ __cxx_atomic_store(&__a_, __desired, __order);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator=(_Tp __desired) const noexcept {
+ store(__desired);
+ return __desired;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp load(memory_order __order = memory_order::seq_cst) const noexcept _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_UNCATEGORIZED(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "memory order argument to atomic load operation is invalid");
+ return __cxx_atomic_load(&__a_, __order);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ operator _Tp() const noexcept { return load(); }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __cxx_atomic_exchange(&__a_, __desired, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+ _LIBCPP_ASSERT_UNCATEGORIZED(
+ __failure == memory_order::relaxed || __failure == memory_order::consume ||
+ __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+ "failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+ return __cxx_atomic_compare_exchange_weak(&__a_, &__expected, __desired, __success, __failure);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+ _LIBCPP_ASSERT_UNCATEGORIZED(
+ __failure == memory_order::relaxed || __failure == memory_order::consume ||
+ __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+ "failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+ return __cxx_atomic_compare_exchange_strong(&__a_, &__expected, __desired, __success, __failure);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __cxx_atomic_compare_exchange_weak(&__a_, &__expected, __desired, __order, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __cxx_atomic_compare_exchange_strong(&__a_, &__expected, __desired, __order, __order);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_UNCATEGORIZED(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "memory order argument to atomic wait operation is invalid");
+ __cxx_atomic_wait(addressof(__a_), __old, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ void notify_one() const noexcept { __cxx_atomic_notify_one(addressof(__a_)); }
+ _LIBCPP_HIDE_FROM_ABI
+ void notify_all() const noexcept { __cxx_atomic_notify_all(addressof(__a_)); }
+
+ _LIBCPP_HIDE_FROM_ABI
+ __atomic_ref_base(_Tp& __obj) : __a_(__obj) {}
+};
+
+template <class _Tp>
+struct __atomic_ref_base<_Tp, /*_IsIntegral=*/true, /*_IsFloatingPoint=*/false>
+ : public __atomic_ref_base<_Tp, false, false> {
+ using __base = __atomic_ref_base<_Tp, false, false>;
+
+ using difference_type = __base::value_type;
+
+ _LIBCPP_HIDE_FROM_ABI
+ __atomic_ref_base(_Tp& __obj) : __base(__obj) {}
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_and(&this->__a_, __arg, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_or(&this->__a_, __arg, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_xor(&this->__a_, __arg, __order);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
+};
+
+template <class _Tp>
+struct __atomic_ref_base<_Tp, /*_IsIntegral=*/false, /*_IsFloatingPoint=*/true>
+ : public __atomic_ref_base<_Tp, false, false> {
+ using __base = __atomic_ref_base<_Tp, false, false>;
+
+ using difference_type = __base::value_type;
+
+ _LIBCPP_HIDE_FROM_ABI
+ __atomic_ref_base(_Tp& __obj) : __base(__obj) {}
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+};
+
+template <class _Tp>
+struct atomic_ref : public __atomic_ref_base<_Tp> {
+ static_assert(is_trivially_copyable<_Tp>::value, "std::atomic_ref<T> requires that 'T' be a trivially copyable type");
+
+ using __base = __atomic_ref_base<_Tp>;
+
+ _LIBCPP_HIDE_FROM_ABI
+ explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_UNCATEGORIZED((uintptr_t)addressof(__obj) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+};
+
+template <class _Tp>
+struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
+ using __base = __atomic_ref_base<_Tp*>;
+
+ using difference_type = ptrdiff_t;
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_add(&this->__a_, __arg, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __cxx_atomic_fetch_sub(&this->__a_, __arg, __order);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator++(int) const noexcept { return fetch_add(1); }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator--(int) const noexcept { return fetch_sub(1); }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+
+ _LIBCPP_HIDE_FROM_ABI
+ explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
+
+ _LIBCPP_HIDE_FROM_ABI
+ _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+};
+
+#endif // _LIBCPP_STD_VER >= 20
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP__ATOMIC_ATOMIC_REF_H
diff --git a/libcxx/include/__atomic/check_memory_order.h b/libcxx/include/__atomic/check_memory_order.h
index 3012aec0521b38..536f764a619026 100644
--- a/libcxx/include/__atomic/check_memory_order.h
+++ b/libcxx/include/__atomic/check_memory_order.h
@@ -27,4 +27,8 @@
_LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel, \
"memory order argument to atomic operation is invalid")
+#define _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__m) \
+ _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel, \
+ "memory order argument to atomic operation is invalid")
+
#endif // _LIBCPP___ATOMIC_CHECK_MEMORY_ORDER_H
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 1a0b808a0cb1c4..56cd703c258944 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -15,8 +15,12 @@
#include <__memory/addressof.h>
#include <__type_traits/conditional.h>
#include <__type_traits/is_assignable.h>
+#include <__type_traits/is_pointer.h>
+#include <__type_traits/is_reference.h>
#include <__type_traits/is_trivially_copyable.h>
+#include <__type_traits/is_volatile.h>
#include <__type_traits/remove_const.h>
+#include <__type_traits/remove_reference.h>
#include <cstddef>
#include <cstring>
@@ -58,9 +62,27 @@ struct __cxx_atomic_base_impl {
}
# endif // _LIBCPP_CXX03_LANG
_LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {}
+ using __contained_t = _Tp;
_Tp __a_value;
};
+template <typename _Tp, template <typename> class _TemplateTp>
+struct __is_instantiation_of : false_type {};
+
+template <typename _Tp, template <typename> class _TemplateTp>
+struct __is_instantiation_of<_TemplateTp<_Tp>, _TemplateTp> : true_type {};
+
+template <typename _Tp,
+ typename = typename enable_if<__is_instantiation_of<typename remove_volatile<typename _Tp::__base>::type,
+ __cxx_atomic_base_impl>::value,
+ bool>::type>
+struct __cxx_atomic_base_impl_traits {
+ static constexpr bool __is_value_volatile = is_volatile<_Tp>::value;
+ static constexpr bool __is_value_ref = is_reference<typename _Tp::__contained_t>::value;
+ using __underlying_t = typename remove_volatile<typename remove_reference<typename _Tp::__contained_t>::type>::type;
+ static constexpr bool __is_value_pointer = is_pointer<__underlying_t>::value;
+};
+
_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
// Avoid switch statement to make this a constexpr.
return __order == memory_order_relaxed
@@ -87,13 +109,15 @@ _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory
: (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+template <typename _Tp, typename enable_if<__cxx_atomic_base_impl_traits<_Tp>::__is_value_volatile, bool>::type = 0>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) {
__cxx_atomic_assign_volatile(__a->__a_value, __val);
}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+template <typename _Tp, typename enable_if<!__cxx_atomic_base_impl_traits<_Tp>::__is_value_volatile, bool>::type = 0>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) {
__a->__a_value = __val;
}
@@ -107,77 +131,46 @@ _LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order
template <typename _Tp>
_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
+__cxx_atomic_store(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val, memory_order __order) {
__atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
- __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t
+__cxx_atomic_load(const _Tp* __a, memory_order __order) {
+ using _Ret = typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t;
+ alignas(alignof(_Ret)) unsigned char __mem[sizeof(_Ret)];
+ __atomic_load(
+ std::addressof(__a->__a_value), std::addressof(*reinterpret_cast<_Ret*>(__mem)), __to_gcc_order(__order));
+ return *reinterpret_cast<_Ret*>(__mem);
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
- _Tp __ret;
- __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load_inplace(
+ const _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __dst, memory_order __order) {
__atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
}
template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
- __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
- _Tp __ret;
- __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
- _Tp __ret;
+_LIBCPP_HIDE_FROM_ABI typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __cxx_atomic_exchange(
+ _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, memory_order __order) {
+ using _Ret = typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t;
+ alignas(alignof(_Ret)) unsigned char __mem[sizeof(_Ret)];
__atomic_exchange(
- std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
- _Tp __ret;
- __atomic_exchange(
- std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
- volatile __cxx_atomic_base_impl<_Tp>* __a,
- _Tp* __expected,
- _Tp __value,
- memory_order __success,
- memory_order __failure) {
- return __atomic_compare_exchange(
std::addressof(__a->__a_value),
- __expected,
std::addressof(__value),
- false,
- __to_gcc_order(__success),
- __to_gcc_failure_order(__failure));
+ std::addressof(*reinterpret_cast<_Ret*>(__mem)),
+ __to_gcc_order(__order));
+ return *reinterpret_cast<_Ret*>(__mem);
}
template <typename _Tp>
_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
- __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
+ _Tp* __a,
+ typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected,
+ typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value,
+ memory_order __success,
+ memory_order __failure) {
return __atomic_compare_...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/76647
More information about the libcxx-commits
mailing list