[libcxx-commits] [libcxx] 3a63407 - [libc++] Make __atomic_base into an implementation detail of std::atomic (#115764)
via libcxx-commits
libcxx-commits at lists.llvm.org
Tue Nov 19 15:35:18 PST 2024
Author: Louis Dionne
Date: 2024-11-20T00:35:14+01:00
New Revision: 3a63407686313f46f9abc664fd10b01f4359ee27
URL: https://github.com/llvm/llvm-project/commit/3a63407686313f46f9abc664fd10b01f4359ee27
DIFF: https://github.com/llvm/llvm-project/commit/3a63407686313f46f9abc664fd10b01f4359ee27.diff
LOG: [libc++] Make __atomic_base into an implementation detail of std::atomic (#115764)
The __atomic_base base class is only useful to conditionalize the
operations we provide inside std::atomic. It shouldn't be used directly
from other places in the library which can use std::atomic directly
instead.
Since we've granularized our includes, using std::atomic directly should
not make much of a difference compile-time wise.
This patch starts using std::atomic directly from other classes like
std::barrier and std::latch. Changing this shouldn't be an ABI break
since both classes have the same size and layout.
The benefits of this patch are isolating other parts of the code base
from implementation details of std::atomic and simplifying the mental
model for std::atomic's layers of implementation by making it clear that
__atomic_base is only an implementation detail of std::atomic.
Added:
Modified:
libcxx/include/CMakeLists.txt
libcxx/include/__atomic/atomic.h
libcxx/include/atomic
libcxx/include/barrier
libcxx/include/latch
libcxx/include/module.modulemap
libcxx/include/semaphore
libcxx/src/barrier.cpp
Removed:
libcxx/include/__atomic/atomic_base.h
################################################################################
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 83168d153a7f41..0ae031e5365aef 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -205,7 +205,6 @@ set(files
__assert
__atomic/aliases.h
__atomic/atomic.h
- __atomic/atomic_base.h
__atomic/atomic_flag.h
__atomic/atomic_init.h
__atomic/atomic_lock_free.h
diff --git a/libcxx/include/__atomic/atomic.h b/libcxx/include/__atomic/atomic.h
index 113475cb1f0079..ae0475693f22b4 100644
--- a/libcxx/include/__atomic/atomic.h
+++ b/libcxx/include/__atomic/atomic.h
@@ -9,9 +9,10 @@
#ifndef _LIBCPP___ATOMIC_ATOMIC_H
#define _LIBCPP___ATOMIC_ATOMIC_H
-#include <__atomic/atomic_base.h>
+#include <__atomic/atomic_sync.h>
#include <__atomic/check_memory_order.h>
#include <__atomic/cxx_atomic_impl.h>
+#include <__atomic/is_always_lock_free.h>
#include <__atomic/memory_order.h>
#include <__config>
#include <__cstddef/ptr
diff _t.h>
@@ -21,6 +22,7 @@
#include <__type_traits/is_floating_point.h>
#include <__type_traits/is_function.h>
#include <__type_traits/is_integral.h>
+#include <__type_traits/is_nothrow_constructible.h>
#include <__type_traits/is_same.h>
#include <__type_traits/remove_const.h>
#include <__type_traits/remove_pointer.h>
@@ -34,6 +36,197 @@
_LIBCPP_BEGIN_NAMESPACE_STD
+template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
+struct __atomic_base // false
+{
+ mutable __cxx_atomic_impl<_Tp> __a_;
+
+#if _LIBCPP_STD_VER >= 17
+ static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value;
+#endif
+
+ _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const volatile _NOEXCEPT {
+ return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));
+ }
+ _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const _NOEXCEPT {
+ return static_cast<__atomic_base const volatile*>(this)->is_lock_free();
+ }
+ _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
+ std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
+ std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
+ return std::__cxx_atomic_load(std::addressof(__a_), __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
+ return std::__cxx_atomic_load(std::addressof(__a_), __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI operator _Tp() const volatile _NOEXCEPT { return load(); }
+ _LIBCPP_HIDE_FROM_ABI operator _Tp() const _NOEXCEPT { return load(); }
+ _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+ return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+ return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+ return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+ return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+ }
+
+#if _LIBCPP_STD_VER >= 20
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const
+ volatile _NOEXCEPT {
+ std::__atomic_wait(*this, __v, __m);
+ }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+ wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
+ std::__atomic_wait(*this, __v, __m);
+ }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
+ std::__atomic_notify_one(*this);
+ }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
+ std::__atomic_notify_all(*this);
+ }
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
+#endif // _LIBCPP_STD_VER >= 20
+
+#if _LIBCPP_STD_VER >= 20
+ _LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
+#else
+ _LIBCPP_HIDE_FROM_ABI __atomic_base() _NOEXCEPT = default;
+#endif
+
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {}
+
+ __atomic_base(const __atomic_base&) = delete;
+};
+
+// atomic<Integral>
+
+template <class _Tp>
+struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
+ using __base = __atomic_base<_Tp, false>;
+
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __atomic_base() _NOEXCEPT = default;
+
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {}
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+ return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+ return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) volatile _NOEXCEPT { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) _NOEXCEPT { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) volatile _NOEXCEPT { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) _NOEXCEPT { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++() volatile _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++() _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--() volatile _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--() _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) volatile _NOEXCEPT { return fetch_add(__op) + __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) _NOEXCEPT { return fetch_add(__op) + __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile _NOEXCEPT { return fetch_sub(__op) - __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) volatile _NOEXCEPT { return fetch_and(__op) & __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) _NOEXCEPT { return fetch_and(__op) & __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) volatile _NOEXCEPT { return fetch_or(__op) | __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) _NOEXCEPT { return fetch_or(__op) | __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) volatile _NOEXCEPT { return fetch_xor(__op) ^ __op; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
+};
+
+// Here we need _IsIntegral because the default template argument is not enough
+// e.g __atomic_base<int> is __atomic_base<int, true>, which inherits from
+// __atomic_base<int, false> and the caller of the wait function is
+// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
+template <class _Tp, bool _IsIntegral>
+struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
+ static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
+ return __a.load(__order);
+ }
+
+ static _LIBCPP_HIDE_FROM_ABI _Tp
+ __atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) {
+ return __this.load(__order);
+ }
+
+ static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>*
+ __atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) {
+ return std::addressof(__a.__a_);
+ }
+
+ static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>*
+ __atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) {
+ return std::addressof(__this.__a_);
+ }
+};
+
template <class _Tp>
struct atomic : public __atomic_base<_Tp> {
using __base = __atomic_base<_Tp>;
@@ -123,6 +316,9 @@ struct atomic<_Tp*> : public __atomic_base<_Tp*> {
atomic& operator=(const atomic&) volatile = delete;
};
+template <class _Tp>
+struct __atomic_waitable_traits<atomic<_Tp> > : __atomic_waitable_traits<__atomic_base<_Tp> > {};
+
#if _LIBCPP_STD_VER >= 20
template <class _Tp>
requires is_floating_point_v<_Tp>
diff --git a/libcxx/include/__atomic/atomic_base.h b/libcxx/include/__atomic/atomic_base.h
deleted file mode 100644
index 93f5c4cff0d1bc..00000000000000
--- a/libcxx/include/__atomic/atomic_base.h
+++ /dev/null
@@ -1,223 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ATOMIC_ATOMIC_BASE_H
-#define _LIBCPP___ATOMIC_ATOMIC_BASE_H
-
-#include <__atomic/atomic_sync.h>
-#include <__atomic/check_memory_order.h>
-#include <__atomic/cxx_atomic_impl.h>
-#include <__atomic/is_always_lock_free.h>
-#include <__atomic/memory_order.h>
-#include <__config>
-#include <__memory/addressof.h>
-#include <__type_traits/is_integral.h>
-#include <__type_traits/is_nothrow_constructible.h>
-#include <__type_traits/is_same.h>
-#include <version>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
-struct __atomic_base // false
-{
- mutable __cxx_atomic_impl<_Tp> __a_;
-
-#if _LIBCPP_STD_VER >= 17
- static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value;
-#endif
-
- _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const volatile _NOEXCEPT {
- return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));
- }
- _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const _NOEXCEPT {
- return static_cast<__atomic_base const volatile*>(this)->is_lock_free();
- }
- _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
- _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
- std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
- }
- _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
- _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
- std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
- _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
- return std::__cxx_atomic_load(std::addressof(__a_), __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
- _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
- return std::__cxx_atomic_load(std::addressof(__a_), __m);
- }
- _LIBCPP_HIDE_FROM_ABI operator _Tp() const volatile _NOEXCEPT { return load(); }
- _LIBCPP_HIDE_FROM_ABI operator _Tp() const _NOEXCEPT { return load(); }
- _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
- }
- _LIBCPP_HIDE_FROM_ABI bool
- compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
- _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
- return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
- }
- _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
- _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
- return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
- }
- _LIBCPP_HIDE_FROM_ABI bool
- compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
- _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
- return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
- }
- _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
- _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
- return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
- }
- _LIBCPP_HIDE_FROM_ABI bool
- compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
- }
- _LIBCPP_HIDE_FROM_ABI bool
- compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
- }
- _LIBCPP_HIDE_FROM_ABI bool
- compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
- }
- _LIBCPP_HIDE_FROM_ABI bool
- compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
- }
-
-#if _LIBCPP_STD_VER >= 20
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const
- volatile _NOEXCEPT {
- std::__atomic_wait(*this, __v, __m);
- }
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
- wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
- std::__atomic_wait(*this, __v, __m);
- }
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
- std::__atomic_notify_one(*this);
- }
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
- std::__atomic_notify_all(*this);
- }
- _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
-#endif // _LIBCPP_STD_VER >= 20
-
-#if _LIBCPP_STD_VER >= 20
- _LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
-#else
- _LIBCPP_HIDE_FROM_ABI __atomic_base() _NOEXCEPT = default;
-#endif
-
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {}
-
- __atomic_base(const __atomic_base&) = delete;
-};
-
-// atomic<Integral>
-
-template <class _Tp>
-struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
- using __base = __atomic_base<_Tp, false>;
-
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __atomic_base() _NOEXCEPT = default;
-
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {}
-
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
- return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
- return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
- }
-
- _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) volatile _NOEXCEPT { return fetch_add(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) _NOEXCEPT { return fetch_add(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) volatile _NOEXCEPT { return fetch_sub(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) _NOEXCEPT { return fetch_sub(_Tp(1)); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator++() volatile _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator++() _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--() volatile _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator--() _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
- _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) volatile _NOEXCEPT { return fetch_add(__op) + __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) _NOEXCEPT { return fetch_add(__op) + __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile _NOEXCEPT { return fetch_sub(__op) - __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) volatile _NOEXCEPT { return fetch_and(__op) & __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) _NOEXCEPT { return fetch_and(__op) & __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) volatile _NOEXCEPT { return fetch_or(__op) | __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) _NOEXCEPT { return fetch_or(__op) | __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) volatile _NOEXCEPT { return fetch_xor(__op) ^ __op; }
- _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
-};
-
-// Here we need _IsIntegral because the default template argument is not enough
-// e.g __atomic_base<int> is __atomic_base<int, true>, which inherits from
-// __atomic_base<int, false> and the caller of the wait function is
-// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
-template <class _Tp, bool _IsIntegral>
-struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
- static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
- return __a.load(__order);
- }
-
- static _LIBCPP_HIDE_FROM_ABI _Tp
- __atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) {
- return __this.load(__order);
- }
-
- static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>*
- __atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) {
- return std::addressof(__a.__a_);
- }
-
- static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>*
- __atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) {
- return std::addressof(__this.__a_);
- }
-};
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // _LIBCPP___ATOMIC_ATOMIC_BASE_H
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index 716d198bc236bb..d4adf277c49c7a 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -591,7 +591,6 @@ template <class T>
#include <__atomic/aliases.h>
#include <__atomic/atomic.h>
-#include <__atomic/atomic_base.h>
#include <__atomic/atomic_flag.h>
#include <__atomic/atomic_init.h>
#include <__atomic/atomic_lock_free.h>
diff --git a/libcxx/include/barrier b/libcxx/include/barrier
index c7df0e9e6e8d43..980eae06ab140f 100644
--- a/libcxx/include/barrier
+++ b/libcxx/include/barrier
@@ -50,7 +50,7 @@ namespace std
#if _LIBCPP_HAS_THREADS
# include <__assert>
-# include <__atomic/atomic_base.h>
+# include <__atomic/atomic.h>
# include <__atomic/memory_order.h>
# include <__cstddef/ptr
diff _t.h>
# include <__memory/unique_ptr.h>
@@ -109,9 +109,9 @@ template <class _CompletionF>
class __barrier_base {
ptr
diff _t __expected_;
unique_ptr<__barrier_algorithm_base, void (*)(__barrier_algorithm_base*)> __base_;
- __atomic_base<ptr
diff _t> __expected_adjustment_;
+ atomic<ptr
diff _t> __expected_adjustment_;
_CompletionF __completion_;
- __atomic_base<__barrier_phase_t> __phase_;
+ atomic<__barrier_phase_t> __phase_;
public:
using arrival_token = __barrier_phase_t;
@@ -167,10 +167,10 @@ Two versions of this algorithm are provided:
template <class _CompletionF>
class __barrier_base {
- __atomic_base<ptr
diff _t> __expected;
- __atomic_base<ptr
diff _t> __arrived;
+ atomic<ptr
diff _t> __expected;
+ atomic<ptr
diff _t> __arrived;
_CompletionF __completion;
- __atomic_base<bool> __phase;
+ atomic<bool> __phase;
public:
using arrival_token = bool;
@@ -212,7 +212,7 @@ class __barrier_base<__empty_completion> {
static constexpr uint64_t __phase_bit = 1ull << 63;
static constexpr uint64_t __arrived_mask = (__phase_bit - 1) & ~__expected_mask;
- __atomic_base<uint64_t> __phase_arrived_expected;
+ atomic<uint64_t> __phase_arrived_expected;
static _LIBCPP_HIDE_FROM_ABI constexpr uint64_t __init(ptr
diff _t __count) _NOEXCEPT {
return ((uint64_t(1u << 31) - __count) << 32) | (uint64_t(1u << 31) - __count);
diff --git a/libcxx/include/latch b/libcxx/include/latch
index 90cca27c50c376..1860ed816c8562 100644
--- a/libcxx/include/latch
+++ b/libcxx/include/latch
@@ -45,7 +45,7 @@ namespace std
#if _LIBCPP_HAS_THREADS
# include <__assert>
-# include <__atomic/atomic_base.h>
+# include <__atomic/atomic.h>
# include <__atomic/atomic_sync.h>
# include <__atomic/memory_order.h>
# include <__cstddef/ptr
diff _t.h>
@@ -64,7 +64,7 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
class latch {
- __atomic_base<ptr
diff _t> __a_;
+ atomic<ptr
diff _t> __a_;
public:
static _LIBCPP_HIDE_FROM_ABI constexpr ptr
diff _t max() noexcept { return numeric_limits<ptr
diff _t>::max(); }
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index 139c0a83666435..4e06a68c6a6b61 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -844,7 +844,6 @@ module std [system] {
module atomic {
module aliases { header "__atomic/aliases.h" }
- module atomic_base { header "__atomic/atomic_base.h" }
module atomic_flag { header "__atomic/atomic_flag.h" }
module atomic_init { header "__atomic/atomic_init.h" }
module atomic_lock_free { header "__atomic/atomic_lock_free.h" }
diff --git a/libcxx/include/semaphore b/libcxx/include/semaphore
index 05c85bc810603e..c594df459c93fc 100644
--- a/libcxx/include/semaphore
+++ b/libcxx/include/semaphore
@@ -50,7 +50,7 @@ using binary_semaphore = counting_semaphore<1>; // since C++20
#if _LIBCPP_HAS_THREADS
# include <__assert>
-# include <__atomic/atomic_base.h>
+# include <__atomic/atomic.h>
# include <__atomic/atomic_sync.h>
# include <__atomic/memory_order.h>
# include <__chrono/time_point.h>
@@ -83,7 +83,7 @@ functions. It avoids contention against users' own use of those facilities.
# define _LIBCPP_SEMAPHORE_MAX (numeric_limits<ptr
diff _t>::max())
class __atomic_semaphore_base {
- __atomic_base<ptr
diff _t> __a_;
+ atomic<ptr
diff _t> __a_;
public:
_LIBCPP_HIDE_FROM_ABI constexpr explicit __atomic_semaphore_base(ptr
diff _t __count) : __a_(__count) {}
diff --git a/libcxx/src/barrier.cpp b/libcxx/src/barrier.cpp
index 69601bfeec0546..b97c7bd73b74cd 100644
--- a/libcxx/src/barrier.cpp
+++ b/libcxx/src/barrier.cpp
@@ -17,7 +17,7 @@ class __barrier_algorithm_base {
public:
struct alignas(64) /* naturally-align the heap state */ __state_t {
struct {
- __atomic_base<__barrier_phase_t> __phase{0};
+ atomic<__barrier_phase_t> __phase{0};
} __tickets[64];
};
More information about the libcxx-commits
mailing list