[libcxx-commits] [libcxx] [libc++] Extract a clean base support API for std::atomic (PR #118129)
Louis Dionne via libcxx-commits
libcxx-commits at lists.llvm.org
Fri Nov 29 11:50:38 PST 2024
https://github.com/ldionne created https://github.com/llvm/llvm-project/pull/118129
This doesn't change the operations that std::atomic is based on, but it reorganizes the C11 / GCC implementation split to make it clearer what's the base support layer and what's not.
>From ccfb5fe0fe8a5d1f7d274861c38546c829d1f404 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Mon, 11 Nov 2024 15:57:11 -0500
Subject: [PATCH] [libc++] Extract a clean base support API for std::atomic
This doesn't change the operations that std::atomic is based on,
but it reorganizes the C11 / GCC implementation split to make it
clearer what's the base support layer and what's not.
---
libcxx/include/CMakeLists.txt | 4 +-
libcxx/include/__atomic/support.h | 124 ++++++++
.../{cxx_atomic_impl.h => support/c11.h} | 269 +-----------------
libcxx/include/__atomic/support/gcc.h | 265 +++++++++++++++++
libcxx/include/module.modulemap | 7 +-
5 files changed, 409 insertions(+), 260 deletions(-)
create mode 100644 libcxx/include/__atomic/support.h
rename libcxx/include/__atomic/{cxx_atomic_impl.h => support/c11.h} (52%)
create mode 100644 libcxx/include/__atomic/support/gcc.h
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 2bb6d263340d30..c56ec4792aa3aa 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -212,11 +212,13 @@ set(files
__atomic/atomic_sync.h
__atomic/check_memory_order.h
__atomic/contention_t.h
- __atomic/cxx_atomic_impl.h
__atomic/fence.h
__atomic/is_always_lock_free.h
__atomic/kill_dependency.h
__atomic/memory_order.h
+ __atomic/support/c11.h
+ __atomic/support/gcc.h
+ __atomic/support.h
__atomic/to_gcc_order.h
__bit/bit_cast.h
__bit/bit_ceil.h
diff --git a/libcxx/include/__atomic/support.h b/libcxx/include/__atomic/support.h
new file mode 100644
index 00000000000000..4b555ab483ca0e
--- /dev/null
+++ b/libcxx/include/__atomic/support.h
@@ -0,0 +1,124 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_SUPPORT_H
+#define _LIBCPP___ATOMIC_SUPPORT_H
+
+#include <__config>
+#include <__type_traits/is_trivially_copyable.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+//
+// This file implements base support for atomics on the platform.
+//
+// The following operations and types must be implemented (where _Atmc
+// is __cxx_atomic_base_impl for readability):
+//
+// clang-format off
+//
+// template <class _Tp>
+// struct __cxx_atomic_base_impl;
+//
+// #define __cxx_atomic_is_lock_free(__size)
+//
+// void __cxx_atomic_thread_fence(memory_order __order) noexcept;
+// void __cxx_atomic_signal_fence(memory_order __order) noexcept;
+//
+// template <class _Tp>
+// void __cxx_atomic_init(_Atmc<_Tp> volatile* __a, _Tp __val) noexcept;
+// template <class _Tp>
+// void __cxx_atomic_init(_Atmc<_Tp>* __a, _Tp __val) noexcept;
+//
+// template <class _Tp>
+// void __cxx_atomic_store(_Atmc<_Tp> volatile* __a, _Tp __val, memory_order __order) noexcept;
+// template <class _Tp>
+// void __cxx_atomic_store(_Atmc<_Tp>* __a, _Tp __val, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// _Tp __cxx_atomic_load(_Atmc<_Tp> const volatile* __a, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_load(_Atmc<_Tp> const* __a, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// void __cxx_atomic_load_inplace(_Atmc<_Tp> const volatile* __a, _Tp* __dst, memory_order __order) noexcept;
+// template <class _Tp>
+// void __cxx_atomic_load_inplace(_Atmc<_Tp> const* __a, _Tp* __dst, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// _Tp __cxx_atomic_exchange(_Atmc<_Tp> volatile* __a, _Tp __value, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_exchange(_Atmc<_Tp>* __a, _Tp __value, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// bool __cxx_atomic_compare_exchange_strong(_Atmc<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) noexcept;
+// template <class _Tp>
+// bool __cxx_atomic_compare_exchange_strong(_Atmc<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) noexcept;
+//
+// template <class _Tp>
+// bool __cxx_atomic_compare_exchange_weak(_Atmc<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) noexcept;
+// template <class _Tp>
+// bool __cxx_atomic_compare_exchange_weak(_Atmc<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) noexcept;
+//
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_add(_Atmc<_Tp> volatile* __a, _Tp __delta, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_add(_Atmc<_Tp>* __a, _Tp __delta, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// _Tp* __cxx_atomic_fetch_add(_Atmc<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp* __cxx_atomic_fetch_add(_Atmc<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_sub(_Atmc<_Tp> volatile* __a, _Tp __delta, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_sub(_Atmc<_Tp>* __a, _Tp __delta, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp* __cxx_atomic_fetch_sub(_Atmc<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp* __cxx_atomic_fetch_sub(_Atmc<_Tp*>* __a, ptrdiff_t __delta, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_and(_Atmc<_Tp> volatile* __a, _Tp __pattern, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_and(_Atmc<_Tp>* __a, _Tp __pattern, memory_order __order) noexcept;
+//
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_or(_Atmc<_Tp> volatile* __a, _Tp __pattern, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_or(_Atmc<_Tp>* __a, _Tp __pattern, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_xor(_Atmc<_Tp> volatile* __a, _Tp __pattern, memory_order __order) noexcept;
+// template <class _Tp>
+// _Tp __cxx_atomic_fetch_xor(_Atmc<_Tp>* __a, _Tp __pattern, memory_order __order) noexcept;
+//
+// clang-format on
+//
+
+#if _LIBCPP_HAS_GCC_ATOMIC_IMP
+# include <__atomic/support/gcc.h>
+#elif _LIBCPP_HAS_C_ATOMIC_IMP
+# include <__atomic/support/c11.h>
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <typename _Tp, typename _Base = __cxx_atomic_base_impl<_Tp> >
+struct __cxx_atomic_impl : public _Base {
+ static_assert(is_trivially_copyable<_Tp>::value, "std::atomic<T> requires that 'T' be a trivially copyable type");
+
+ _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT : _Base(__value) {}
+};
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ATOMIC_SUPPORT_H
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/support/c11.h
similarity index 52%
rename from libcxx/include/__atomic/cxx_atomic_impl.h
rename to libcxx/include/__atomic/support/c11.h
index acffb0fa8f5d9f..177a075be40739 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/support/c11.h
@@ -6,276 +6,39 @@
//
//===----------------------------------------------------------------------===//
-#ifndef _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
-#define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
+#ifndef _LIBCPP___ATOMIC_SUPPORT_C11_H
+#define _LIBCPP___ATOMIC_SUPPORT_C11_H
#include <__atomic/memory_order.h>
-#include <__atomic/to_gcc_order.h>
#include <__config>
#include <__cstddef/ptrdiff_t.h>
#include <__memory/addressof.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_assignable.h>
-#include <__type_traits/is_trivially_copyable.h>
#include <__type_traits/remove_const.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-#if _LIBCPP_HAS_GCC_ATOMIC_IMP
-
-// [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because
-// the default operator= in an object is not volatile, a byte-by-byte copy
-// is required.
-template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) {
- __a_value = __val;
-}
-template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) {
- volatile char* __to = reinterpret_cast<volatile char*>(std::addressof(__a_value));
- volatile char* __end = __to + sizeof(_Tp);
- volatile const char* __from = reinterpret_cast<volatile const char*>(std::addressof(__val));
- while (__to != __end)
- *__to++ = *__from++;
-}
-
-template <typename _Tp>
-struct __cxx_atomic_base_impl {
- _LIBCPP_HIDE_FROM_ABI
-# ifndef _LIBCPP_CXX03_LANG
- __cxx_atomic_base_impl() _NOEXCEPT = default;
-# else
- __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {
- }
-# endif // _LIBCPP_CXX03_LANG
- _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {}
- _Tp __a_value;
-};
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
- __cxx_atomic_assign_volatile(__a->__a_value, __val);
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
- __a->__a_value = __val;
-}
-
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) {
- __atomic_thread_fence(__to_gcc_order(__order));
-}
-
-_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) {
- __atomic_signal_fence(__to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
- __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
- __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
- _Tp __ret;
- __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
- __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
- __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
- _Tp __ret;
- __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
- _Tp __ret;
- __atomic_exchange(
- std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
- _Tp __ret;
- __atomic_exchange(
- std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
- volatile __cxx_atomic_base_impl<_Tp>* __a,
- _Tp* __expected,
- _Tp __value,
- memory_order __success,
- memory_order __failure) {
- return __atomic_compare_exchange(
- std::addressof(__a->__a_value),
- __expected,
- std::addressof(__value),
- false,
- __to_gcc_order(__success),
- __to_gcc_failure_order(__failure));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
- __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
- return __atomic_compare_exchange(
- std::addressof(__a->__a_value),
- __expected,
- std::addressof(__value),
- false,
- __to_gcc_order(__success),
- __to_gcc_failure_order(__failure));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
- volatile __cxx_atomic_base_impl<_Tp>* __a,
- _Tp* __expected,
- _Tp __value,
- memory_order __success,
- memory_order __failure) {
- return __atomic_compare_exchange(
- std::addressof(__a->__a_value),
- __expected,
- std::addressof(__value),
- true,
- __to_gcc_order(__success),
- __to_gcc_failure_order(__failure));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
- __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
- return __atomic_compare_exchange(
- std::addressof(__a->__a_value),
- __expected,
- std::addressof(__value),
- true,
- __to_gcc_order(__success),
- __to_gcc_failure_order(__failure));
-}
-
-template <typename _Tp>
-struct __skip_amt {
- enum { value = 1 };
-};
-
-template <typename _Tp>
-struct __skip_amt<_Tp*> {
- enum { value = sizeof(_Tp) };
-};
-
-// FIXME: Haven't figured out what the spec says about using arrays with
-// atomic_fetch_add. Force a failure rather than creating bad behavior.
-template <typename _Tp>
-struct __skip_amt<_Tp[]> {};
-template <typename _Tp, int n>
-struct __skip_amt<_Tp[n]> {};
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
- return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
- return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
- return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
- return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
- return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
- return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
- return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
- return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
- return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
- return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
-}
-
-# define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
+//
+// This file implements support for C11-style atomics
+//
-#elif _LIBCPP_HAS_C_ATOMIC_IMP
+_LIBCPP_BEGIN_NAMESPACE_STD
template <typename _Tp>
struct __cxx_atomic_base_impl {
_LIBCPP_HIDE_FROM_ABI
-# ifndef _LIBCPP_CXX03_LANG
+#ifndef _LIBCPP_CXX03_LANG
__cxx_atomic_base_impl() _NOEXCEPT = default;
-# else
+#else
__cxx_atomic_base_impl() _NOEXCEPT : __a_value() {
}
-# endif // _LIBCPP_CXX03_LANG
+#endif // _LIBCPP_CXX03_LANG
_LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp __value) _NOEXCEPT : __a_value(__value) {}
_LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value;
};
-# define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
+#define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
__c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
@@ -496,16 +259,6 @@ __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_o
std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
}
-#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
-
-template <typename _Tp, typename _Base = __cxx_atomic_base_impl<_Tp> >
-struct __cxx_atomic_impl : public _Base {
- static_assert(is_trivially_copyable<_Tp>::value, "std::atomic<T> requires that 'T' be a trivially copyable type");
-
- _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default;
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT : _Base(__value) {}
-};
-
_LIBCPP_END_NAMESPACE_STD
-#endif // _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
+#endif // _LIBCPP___ATOMIC_SUPPORT_C11_H
diff --git a/libcxx/include/__atomic/support/gcc.h b/libcxx/include/__atomic/support/gcc.h
new file mode 100644
index 00000000000000..73c1b1c8070a41
--- /dev/null
+++ b/libcxx/include/__atomic/support/gcc.h
@@ -0,0 +1,265 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_SUPPORT_GCC_H
+#define _LIBCPP___ATOMIC_SUPPORT_GCC_H
+
+#include <__atomic/memory_order.h>
+#include <__atomic/to_gcc_order.h>
+#include <__config>
+#include <__memory/addressof.h>
+#include <__type_traits/enable_if.h>
+#include <__type_traits/is_assignable.h>
+#include <__type_traits/remove_const.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+//
+// This file implements support for GCC-style atomics
+//
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because
+// the default operator= in an object is not volatile, a byte-by-byte copy
+// is required.
+template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) {
+ __a_value = __val;
+}
+template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) {
+ volatile char* __to = reinterpret_cast<volatile char*>(std::addressof(__a_value));
+ volatile char* __end = __to + sizeof(_Tp);
+ volatile const char* __from = reinterpret_cast<volatile const char*>(std::addressof(__val));
+ while (__to != __end)
+ *__to++ = *__from++;
+}
+
+template <typename _Tp>
+struct __cxx_atomic_base_impl {
+ _LIBCPP_HIDE_FROM_ABI
+#ifndef _LIBCPP_CXX03_LANG
+ __cxx_atomic_base_impl() _NOEXCEPT = default;
+#else
+ __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {
+ }
+#endif // _LIBCPP_CXX03_LANG
+ _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {}
+ _Tp __a_value;
+};
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+ __cxx_atomic_assign_volatile(__a->__a_value, __val);
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+ __a->__a_value = __val;
+}
+
+_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) {
+ __atomic_thread_fence(__to_gcc_order(__order));
+}
+
+_LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) {
+ __atomic_signal_fence(__to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
+ __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) {
+ __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
+ _Tp __ret;
+ __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+ __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI void
+__cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) {
+ __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
+ _Tp __ret;
+ __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
+ _Tp __ret;
+ __atomic_exchange(
+ std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) {
+ _Tp __ret;
+ __atomic_exchange(
+ std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+ volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Tp* __expected,
+ _Tp __value,
+ memory_order __success,
+ memory_order __failure) {
+ return __atomic_compare_exchange(
+ std::addressof(__a->__a_value),
+ __expected,
+ std::addressof(__value),
+ false,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
+ __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
+ return __atomic_compare_exchange(
+ std::addressof(__a->__a_value),
+ __expected,
+ std::addressof(__value),
+ false,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+ volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Tp* __expected,
+ _Tp __value,
+ memory_order __success,
+ memory_order __failure) {
+ return __atomic_compare_exchange(
+ std::addressof(__a->__a_value),
+ __expected,
+ std::addressof(__value),
+ true,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
+ __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
+ return __atomic_compare_exchange(
+ std::addressof(__a->__a_value),
+ __expected,
+ std::addressof(__value),
+ true,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+struct __skip_amt {
+ enum { value = 1 };
+};
+
+template <typename _Tp>
+struct __skip_amt<_Tp*> {
+ enum { value = sizeof(_Tp) };
+};
+
+// FIXME: Haven't figured out what the spec says about using arrays with
+// atomic_fetch_add. Force a failure rather than creating bad behavior.
+template <typename _Tp>
+struct __skip_amt<_Tp[]> {};
+template <typename _Tp, int n>
+struct __skip_amt<_Tp[n]> {};
+
+template <typename _Tp, typename _Td>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+ return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+ return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+ return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) {
+ return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_HIDE_FROM_ABI _Tp
+__cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
+}
+
+#define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ATOMIC_SUPPORT_GCC_H
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index ed2b7fb1921642..1ef214465e8379 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -856,13 +856,18 @@ module std [system] {
}
module check_memory_order { header "__atomic/check_memory_order.h" }
module contention_t { header "__atomic/contention_t.h" }
- module cxx_atomic_impl { header "__atomic/cxx_atomic_impl.h" }
module fence { header "__atomic/fence.h" }
module is_always_lock_free { header "__atomic/is_always_lock_free.h" }
module kill_dependency { header "__atomic/kill_dependency.h" }
module memory_order { header "__atomic/memory_order.h" }
module to_gcc_order { header "__atomic/to_gcc_order.h" }
+ module support {
+ header "__atomic/support.h"
+ textual header "__atomic/support/c11.h"
+ textual header "__atomic/support/gcc.h"
+ }
+
header "atomic"
export *
}
More information about the libcxx-commits
mailing list