[libcxx-commits] [libcxx] [libc++] Make __atomic_base into an implementation detail of std::atomic (PR #115764)

Louis Dionne via libcxx-commits libcxx-commits at lists.llvm.org
Mon Nov 11 12:35:32 PST 2024


https://github.com/ldionne created https://github.com/llvm/llvm-project/pull/115764

The __atomic_base base class is only useful to conditionalize the operations we provide inside std::atomic. It shouldn't be used directly from other places in the library which can use std::atomic directly instead.

Since we've granularized our includes, using std::atomic directly should not make much of a difference compile-time wise.

This patch starts using std::atomic directly from other classes like std::barrier and std::latch. Changing this shouldn't be an ABI break since both classes have the same size and layout.

The benefits of this patch are isolating other parts of the code base from implementation details of std::atomic and simplifying the mental model for std::atomic's layers of implementation by making it clear that __atomic_base is only an implementation detail of std::atomic.

>From 923cba7323e101c3d5d52a6cf7d01ffb5aa38fcd Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Mon, 11 Nov 2024 15:24:51 -0500
Subject: [PATCH] [libc++] Make __atomic_base into an implementation detail of
 std::atomic

The __atomic_base base class is only useful to conditionalize the
operations we provide inside std::atomic. It shouldn't be used
directly from other places in the library which can use std::atomic
directly instead.

Since we've granularized our includes, using std::atomic directly
should not make much of a difference compile-time wise.

This patch starts using std::atomic directly from other classes like
std::barrier and std::latch. Changing this shouldn't be an ABI break
since both classes have the same size and layout.

The benefits of this patch are isolating other parts of the code base
from implementation details of std::atomic and simplifying the mental
model for std::atomic's layers of implementation by making it clear
that __atomic_base is only an implementation detail of std::atomic.
---
 libcxx/include/CMakeLists.txt         |   1 -
 libcxx/include/__atomic/atomic.h      | 198 ++++++++++++++++++++++-
 libcxx/include/__atomic/atomic_base.h | 223 --------------------------
 libcxx/include/atomic                 |   1 -
 libcxx/include/barrier                |  14 +-
 libcxx/include/latch                  |   4 +-
 libcxx/include/module.modulemap       |   1 -
 libcxx/include/semaphore              |   4 +-
 libcxx/src/barrier.cpp                |   2 +-
 9 files changed, 209 insertions(+), 239 deletions(-)
 delete mode 100644 libcxx/include/__atomic/atomic_base.h

diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 6dd392685c18ee..eee1644e8edf72 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -205,7 +205,6 @@ set(files
   __assert
   __atomic/aliases.h
   __atomic/atomic.h
-  __atomic/atomic_base.h
   __atomic/atomic_flag.h
   __atomic/atomic_init.h
   __atomic/atomic_lock_free.h
diff --git a/libcxx/include/__atomic/atomic.h b/libcxx/include/__atomic/atomic.h
index 113475cb1f0079..ae0475693f22b4 100644
--- a/libcxx/include/__atomic/atomic.h
+++ b/libcxx/include/__atomic/atomic.h
@@ -9,9 +9,10 @@
 #ifndef _LIBCPP___ATOMIC_ATOMIC_H
 #define _LIBCPP___ATOMIC_ATOMIC_H
 
-#include <__atomic/atomic_base.h>
+#include <__atomic/atomic_sync.h>
 #include <__atomic/check_memory_order.h>
 #include <__atomic/cxx_atomic_impl.h>
+#include <__atomic/is_always_lock_free.h>
 #include <__atomic/memory_order.h>
 #include <__config>
 #include <__cstddef/ptrdiff_t.h>
@@ -21,6 +22,7 @@
 #include <__type_traits/is_floating_point.h>
 #include <__type_traits/is_function.h>
 #include <__type_traits/is_integral.h>
+#include <__type_traits/is_nothrow_constructible.h>
 #include <__type_traits/is_same.h>
 #include <__type_traits/remove_const.h>
 #include <__type_traits/remove_pointer.h>
@@ -34,6 +36,197 @@
 
 _LIBCPP_BEGIN_NAMESPACE_STD
 
+template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
+struct __atomic_base // false
+{
+  mutable __cxx_atomic_impl<_Tp> __a_;
+
+#if _LIBCPP_STD_VER >= 17
+  static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value;
+#endif
+
+  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const volatile _NOEXCEPT {
+    return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));
+  }
+  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const _NOEXCEPT {
+    return static_cast<__atomic_base const volatile*>(this)->is_lock_free();
+  }
+  _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+      _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
+    std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+      _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
+    std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
+      _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
+    return std::__cxx_atomic_load(std::addressof(__a_), __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+      _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
+    return std::__cxx_atomic_load(std::addressof(__a_), __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI operator _Tp() const volatile _NOEXCEPT { return load(); }
+  _LIBCPP_HIDE_FROM_ABI operator _Tp() const _NOEXCEPT { return load(); }
+  _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
+      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
+      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
+      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
+      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
+    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI bool
+  compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
+  }
+
+#if _LIBCPP_STD_VER >= 20
+  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const
+      volatile _NOEXCEPT {
+    std::__atomic_wait(*this, __v, __m);
+  }
+  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
+  wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
+    std::__atomic_wait(*this, __v, __m);
+  }
+  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
+    std::__atomic_notify_one(*this);
+  }
+  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
+  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
+    std::__atomic_notify_all(*this);
+  }
+  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
+#endif //  _LIBCPP_STD_VER >= 20
+
+#if _LIBCPP_STD_VER >= 20
+  _LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
+#else
+  _LIBCPP_HIDE_FROM_ABI __atomic_base() _NOEXCEPT = default;
+#endif
+
+  _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {}
+
+  __atomic_base(const __atomic_base&) = delete;
+};
+
+// atomic<Integral>
+
+template <class _Tp>
+struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
+  using __base = __atomic_base<_Tp, false>;
+
+  _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __atomic_base() _NOEXCEPT = default;
+
+  _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {}
+
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
+    return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
+  }
+  _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
+    return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
+  }
+
+  _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) volatile _NOEXCEPT { return fetch_add(_Tp(1)); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) _NOEXCEPT { return fetch_add(_Tp(1)); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) volatile _NOEXCEPT { return fetch_sub(_Tp(1)); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) _NOEXCEPT { return fetch_sub(_Tp(1)); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator++() volatile _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator++() _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator--() volatile _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator--() _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) volatile _NOEXCEPT { return fetch_add(__op) + __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) _NOEXCEPT { return fetch_add(__op) + __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile _NOEXCEPT { return fetch_sub(__op) - __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) volatile _NOEXCEPT { return fetch_and(__op) & __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) _NOEXCEPT { return fetch_and(__op) & __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) volatile _NOEXCEPT { return fetch_or(__op) | __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) _NOEXCEPT { return fetch_or(__op) | __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) volatile _NOEXCEPT { return fetch_xor(__op) ^ __op; }
+  _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
+};
+
+// Here we need _IsIntegral because the default template argument is not enough
+// e.g  __atomic_base<int> is __atomic_base<int, true>, which inherits from
+// __atomic_base<int, false> and the caller of the wait function is
+// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
+template <class _Tp, bool _IsIntegral>
+struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
+  static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
+    return __a.load(__order);
+  }
+
+  static _LIBCPP_HIDE_FROM_ABI _Tp
+  __atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) {
+    return __this.load(__order);
+  }
+
+  static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>*
+  __atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) {
+    return std::addressof(__a.__a_);
+  }
+
+  static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>*
+  __atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) {
+    return std::addressof(__this.__a_);
+  }
+};
+
 template <class _Tp>
 struct atomic : public __atomic_base<_Tp> {
   using __base          = __atomic_base<_Tp>;
@@ -123,6 +316,9 @@ struct atomic<_Tp*> : public __atomic_base<_Tp*> {
   atomic& operator=(const atomic&) volatile = delete;
 };
 
+template <class _Tp>
+struct __atomic_waitable_traits<atomic<_Tp> > : __atomic_waitable_traits<__atomic_base<_Tp> > {};
+
 #if _LIBCPP_STD_VER >= 20
 template <class _Tp>
   requires is_floating_point_v<_Tp>
diff --git a/libcxx/include/__atomic/atomic_base.h b/libcxx/include/__atomic/atomic_base.h
deleted file mode 100644
index 93f5c4cff0d1bc..00000000000000
--- a/libcxx/include/__atomic/atomic_base.h
+++ /dev/null
@@ -1,223 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ATOMIC_ATOMIC_BASE_H
-#define _LIBCPP___ATOMIC_ATOMIC_BASE_H
-
-#include <__atomic/atomic_sync.h>
-#include <__atomic/check_memory_order.h>
-#include <__atomic/cxx_atomic_impl.h>
-#include <__atomic/is_always_lock_free.h>
-#include <__atomic/memory_order.h>
-#include <__config>
-#include <__memory/addressof.h>
-#include <__type_traits/is_integral.h>
-#include <__type_traits/is_nothrow_constructible.h>
-#include <__type_traits/is_same.h>
-#include <version>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-#  pragma GCC system_header
-#endif
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
-struct __atomic_base // false
-{
-  mutable __cxx_atomic_impl<_Tp> __a_;
-
-#if _LIBCPP_STD_VER >= 17
-  static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value;
-#endif
-
-  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const volatile _NOEXCEPT {
-    return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));
-  }
-  _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const _NOEXCEPT {
-    return static_cast<__atomic_base const volatile*>(this)->is_lock_free();
-  }
-  _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
-      _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
-    std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
-      _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) {
-    std::__cxx_atomic_store(std::addressof(__a_), __d, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
-      _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
-    return std::__cxx_atomic_load(std::addressof(__a_), __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
-      _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) {
-    return std::__cxx_atomic_load(std::addressof(__a_), __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI operator _Tp() const volatile _NOEXCEPT { return load(); }
-  _LIBCPP_HIDE_FROM_ABI operator _Tp() const _NOEXCEPT { return load(); }
-  _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_exchange(std::addressof(__a_), __d, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool
-  compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
-      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
-    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
-      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
-    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool
-  compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT
-      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
-    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT
-      _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) {
-    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __s, __f);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool
-  compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool
-  compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_compare_exchange_weak(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool
-  compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI bool
-  compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_compare_exchange_strong(std::addressof(__a_), std::addressof(__e), __d, __m, __m);
-  }
-
-#if _LIBCPP_STD_VER >= 20
-  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const
-      volatile _NOEXCEPT {
-    std::__atomic_wait(*this, __v, __m);
-  }
-  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
-  wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT {
-    std::__atomic_wait(*this, __v, __m);
-  }
-  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT {
-    std::__atomic_notify_one(*this);
-  }
-  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); }
-  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT {
-    std::__atomic_notify_all(*this);
-  }
-  _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); }
-#endif //  _LIBCPP_STD_VER >= 20
-
-#if _LIBCPP_STD_VER >= 20
-  _LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
-#else
-  _LIBCPP_HIDE_FROM_ABI __atomic_base() _NOEXCEPT = default;
-#endif
-
-  _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {}
-
-  __atomic_base(const __atomic_base&) = delete;
-};
-
-// atomic<Integral>
-
-template <class _Tp>
-struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
-  using __base = __atomic_base<_Tp, false>;
-
-  _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __atomic_base() _NOEXCEPT = default;
-
-  _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {}
-
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_fetch_add(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_fetch_sub(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_fetch_and(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_fetch_or(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT {
-    return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
-  }
-  _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT {
-    return std::__cxx_atomic_fetch_xor(std::addressof(this->__a_), __op, __m);
-  }
-
-  _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) volatile _NOEXCEPT { return fetch_add(_Tp(1)); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) _NOEXCEPT { return fetch_add(_Tp(1)); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) volatile _NOEXCEPT { return fetch_sub(_Tp(1)); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) _NOEXCEPT { return fetch_sub(_Tp(1)); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator++() volatile _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator++() _NOEXCEPT { return fetch_add(_Tp(1)) + _Tp(1); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator--() volatile _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator--() _NOEXCEPT { return fetch_sub(_Tp(1)) - _Tp(1); }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) volatile _NOEXCEPT { return fetch_add(__op) + __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __op) _NOEXCEPT { return fetch_add(__op) + __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) volatile _NOEXCEPT { return fetch_sub(__op) - __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __op) _NOEXCEPT { return fetch_sub(__op) - __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) volatile _NOEXCEPT { return fetch_and(__op) & __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __op) _NOEXCEPT { return fetch_and(__op) & __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) volatile _NOEXCEPT { return fetch_or(__op) | __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __op) _NOEXCEPT { return fetch_or(__op) | __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) volatile _NOEXCEPT { return fetch_xor(__op) ^ __op; }
-  _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; }
-};
-
-// Here we need _IsIntegral because the default template argument is not enough
-// e.g  __atomic_base<int> is __atomic_base<int, true>, which inherits from
-// __atomic_base<int, false> and the caller of the wait function is
-// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
-template <class _Tp, bool _IsIntegral>
-struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
-  static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
-    return __a.load(__order);
-  }
-
-  static _LIBCPP_HIDE_FROM_ABI _Tp
-  __atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) {
-    return __this.load(__order);
-  }
-
-  static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>*
-  __atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) {
-    return std::addressof(__a.__a_);
-  }
-
-  static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>*
-  __atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) {
-    return std::addressof(__this.__a_);
-  }
-};
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // _LIBCPP___ATOMIC_ATOMIC_BASE_H
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index 716d198bc236bb..d4adf277c49c7a 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -591,7 +591,6 @@ template <class T>
 
 #include <__atomic/aliases.h>
 #include <__atomic/atomic.h>
-#include <__atomic/atomic_base.h>
 #include <__atomic/atomic_flag.h>
 #include <__atomic/atomic_init.h>
 #include <__atomic/atomic_lock_free.h>
diff --git a/libcxx/include/barrier b/libcxx/include/barrier
index c7df0e9e6e8d43..980eae06ab140f 100644
--- a/libcxx/include/barrier
+++ b/libcxx/include/barrier
@@ -50,7 +50,7 @@ namespace std
 #if _LIBCPP_HAS_THREADS
 
 #  include <__assert>
-#  include <__atomic/atomic_base.h>
+#  include <__atomic/atomic.h>
 #  include <__atomic/memory_order.h>
 #  include <__cstddef/ptrdiff_t.h>
 #  include <__memory/unique_ptr.h>
@@ -109,9 +109,9 @@ template <class _CompletionF>
 class __barrier_base {
   ptrdiff_t __expected_;
   unique_ptr<__barrier_algorithm_base, void (*)(__barrier_algorithm_base*)> __base_;
-  __atomic_base<ptrdiff_t> __expected_adjustment_;
+  atomic<ptrdiff_t> __expected_adjustment_;
   _CompletionF __completion_;
-  __atomic_base<__barrier_phase_t> __phase_;
+  atomic<__barrier_phase_t> __phase_;
 
 public:
   using arrival_token = __barrier_phase_t;
@@ -167,10 +167,10 @@ Two versions of this algorithm are provided:
 
 template <class _CompletionF>
 class __barrier_base {
-  __atomic_base<ptrdiff_t> __expected;
-  __atomic_base<ptrdiff_t> __arrived;
+  atomic<ptrdiff_t> __expected;
+  atomic<ptrdiff_t> __arrived;
   _CompletionF __completion;
-  __atomic_base<bool> __phase;
+  atomic<bool> __phase;
 
 public:
   using arrival_token = bool;
@@ -212,7 +212,7 @@ class __barrier_base<__empty_completion> {
   static constexpr uint64_t __phase_bit     = 1ull << 63;
   static constexpr uint64_t __arrived_mask  = (__phase_bit - 1) & ~__expected_mask;
 
-  __atomic_base<uint64_t> __phase_arrived_expected;
+  atomic<uint64_t> __phase_arrived_expected;
 
   static _LIBCPP_HIDE_FROM_ABI constexpr uint64_t __init(ptrdiff_t __count) _NOEXCEPT {
     return ((uint64_t(1u << 31) - __count) << 32) | (uint64_t(1u << 31) - __count);
diff --git a/libcxx/include/latch b/libcxx/include/latch
index 90cca27c50c376..1860ed816c8562 100644
--- a/libcxx/include/latch
+++ b/libcxx/include/latch
@@ -45,7 +45,7 @@ namespace std
 #if _LIBCPP_HAS_THREADS
 
 #  include <__assert>
-#  include <__atomic/atomic_base.h>
+#  include <__atomic/atomic.h>
 #  include <__atomic/atomic_sync.h>
 #  include <__atomic/memory_order.h>
 #  include <__cstddef/ptrdiff_t.h>
@@ -64,7 +64,7 @@ _LIBCPP_PUSH_MACROS
 _LIBCPP_BEGIN_NAMESPACE_STD
 
 class latch {
-  __atomic_base<ptrdiff_t> __a_;
+  atomic<ptrdiff_t> __a_;
 
 public:
   static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { return numeric_limits<ptrdiff_t>::max(); }
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index 2a4b9de436eab6..df57e90626bb10 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -844,7 +844,6 @@ module std [system] {
 
   module atomic {
     module aliases                { header "__atomic/aliases.h" }
-    module atomic_base            { header "__atomic/atomic_base.h" }
     module atomic_flag            { header "__atomic/atomic_flag.h" }
     module atomic_init            { header "__atomic/atomic_init.h" }
     module atomic_lock_free       { header "__atomic/atomic_lock_free.h" }
diff --git a/libcxx/include/semaphore b/libcxx/include/semaphore
index 05c85bc810603e..c594df459c93fc 100644
--- a/libcxx/include/semaphore
+++ b/libcxx/include/semaphore
@@ -50,7 +50,7 @@ using binary_semaphore = counting_semaphore<1>; // since C++20
 #if _LIBCPP_HAS_THREADS
 
 #  include <__assert>
-#  include <__atomic/atomic_base.h>
+#  include <__atomic/atomic.h>
 #  include <__atomic/atomic_sync.h>
 #  include <__atomic/memory_order.h>
 #  include <__chrono/time_point.h>
@@ -83,7 +83,7 @@ functions. It avoids contention against users' own use of those facilities.
 #    define _LIBCPP_SEMAPHORE_MAX (numeric_limits<ptrdiff_t>::max())
 
 class __atomic_semaphore_base {
-  __atomic_base<ptrdiff_t> __a_;
+  atomic<ptrdiff_t> __a_;
 
 public:
   _LIBCPP_HIDE_FROM_ABI constexpr explicit __atomic_semaphore_base(ptrdiff_t __count) : __a_(__count) {}
diff --git a/libcxx/src/barrier.cpp b/libcxx/src/barrier.cpp
index 69601bfeec0546..b97c7bd73b74cd 100644
--- a/libcxx/src/barrier.cpp
+++ b/libcxx/src/barrier.cpp
@@ -17,7 +17,7 @@ class __barrier_algorithm_base {
 public:
   struct alignas(64) /* naturally-align the heap state */ __state_t {
     struct {
-      __atomic_base<__barrier_phase_t> __phase{0};
+      atomic<__barrier_phase_t> __phase{0};
     } __tickets[64];
   };
 



More information about the libcxx-commits mailing list