[libcxx-commits] [libcxx] [libc++] Refactor atomic_wait using lambdas (PR #115746)
Louis Dionne via libcxx-commits
libcxx-commits at lists.llvm.org
Mon Nov 11 10:03:15 PST 2024
https://github.com/ldionne updated https://github.com/llvm/llvm-project/pull/115746
>From 293a17319fb783c0b135398d89041b4f29bfee03 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Tue, 5 Nov 2024 14:37:01 -0500
Subject: [PATCH] [libc++] Refactor atomic_wait using lambdas
Now that we've dropped support for older C++ dialects in the
synchronization library, we can use lambdas to clarify some of
the code used to implement atomic_wait.
---
libcxx/include/__atomic/atomic_sync.h | 51 +++++++++++----------------
libcxx/include/latch | 5 +--
libcxx/include/semaphore | 5 +--
3 files changed, 27 insertions(+), 34 deletions(-)
diff --git a/libcxx/include/__atomic/atomic_sync.h b/libcxx/include/__atomic/atomic_sync.h
index 08f3497fef9f45..08ca5e00a9a6f8 100644
--- a/libcxx/include/__atomic/atomic_sync.h
+++ b/libcxx/include/__atomic/atomic_sync.h
@@ -57,18 +57,6 @@ struct __atomic_waitable< _Tp,
decltype(__atomic_waitable_traits<__decay_t<_Tp> >::__atomic_contention_address(
std::declval<const _Tp&>()))> > : true_type {};
-template <class _AtomicWaitable, class _Poll>
-struct __atomic_wait_poll_impl {
- const _AtomicWaitable& __a_;
- _Poll __poll_;
- memory_order __order_;
-
- _LIBCPP_HIDE_FROM_ABI bool operator()() const {
- auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a_, __order_);
- return __poll_(__current_val);
- }
-};
-
#if _LIBCPP_HAS_THREADS
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*) _NOEXCEPT;
@@ -144,11 +132,16 @@ struct __atomic_wait_backoff_impl {
// value. The predicate function must not return `false` spuriously.
template <class _AtomicWaitable, class _Poll>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
-__atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) {
+__atomic_wait_unless(const _AtomicWaitable& __a, memory_order __order, _Poll&& __poll) {
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
- __atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_impl = {__a, __poll, __order};
__atomic_wait_backoff_impl<_AtomicWaitable, __decay_t<_Poll> > __backoff_fn = {__a, __poll, __order};
- std::__libcpp_thread_poll_with_backoff(__poll_impl, __backoff_fn);
+ std::__libcpp_thread_poll_with_backoff(
+ /* poll */
+ [&]() {
+ auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a, __order);
+ return __poll(__current_val);
+ },
+ /* backoff */ __backoff_fn);
}
template <class _AtomicWaitable>
@@ -166,9 +159,14 @@ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _
#else // _LIBCPP_HAS_THREADS
template <class _AtomicWaitable, class _Poll>
-_LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) {
- __atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_fn = {__a, __poll, __order};
- std::__libcpp_thread_poll_with_backoff(__poll_fn, __spinning_backoff_policy());
+_LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless(const _AtomicWaitable& __a, memory_order __order, _Poll&& __poll) {
+ std::__libcpp_thread_poll_with_backoff(
+ /* poll */
+ [&]() {
+ auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a, __order);
+ return __poll(__current_val);
+ },
+ /* backoff */ __spinning_backoff_policy());
}
template <class _AtomicWaitable>
@@ -184,20 +182,13 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp c
return std::memcmp(std::addressof(__lhs), std::addressof(__rhs), sizeof(_Tp)) == 0;
}
-template <class _Tp>
-struct __atomic_compare_unequal_to {
- _Tp __val_;
- _LIBCPP_HIDE_FROM_ABI bool operator()(const _Tp& __arg) const {
- return !std::__cxx_nonatomic_compare_equal(__arg, __val_);
- }
-};
-
-template <class _AtomicWaitable, class _Up>
+template <class _AtomicWaitable, class _Tp>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void
-__atomic_wait(_AtomicWaitable& __a, _Up __val, memory_order __order) {
+__atomic_wait(_AtomicWaitable& __a, _Tp __val, memory_order __order) {
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
- __atomic_compare_unequal_to<_Up> __nonatomic_equal = {__val};
- std::__atomic_wait_unless(__a, __nonatomic_equal, __order);
+ std::__atomic_wait_unless(__a, __order, [&](_Tp const& __current) {
+ return !std::__cxx_nonatomic_compare_equal(__current, __val);
+ });
}
_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/include/latch b/libcxx/include/latch
index 90cca27c50c376..5de59a37e4deec 100644
--- a/libcxx/include/latch
+++ b/libcxx/include/latch
@@ -99,8 +99,9 @@ public:
return try_wait_impl(__value);
}
inline _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait() const {
- std::__atomic_wait_unless(
- __a_, [this](ptrdiff_t& __value) -> bool { return try_wait_impl(__value); }, memory_order_acquire);
+ std::__atomic_wait_unless(__a_, memory_order_acquire, [this](ptrdiff_t& __value) -> bool {
+ return try_wait_impl(__value);
+ });
}
inline _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void arrive_and_wait(ptrdiff_t __update = 1) {
_LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(__update >= 0, "latch::arrive_and_wait called with a negative value");
diff --git a/libcxx/include/semaphore b/libcxx/include/semaphore
index 05c85bc810603e..c4b4028fad7c61 100644
--- a/libcxx/include/semaphore
+++ b/libcxx/include/semaphore
@@ -96,8 +96,9 @@ public:
}
}
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void acquire() {
- std::__atomic_wait_unless(
- __a_, [this](ptrdiff_t& __old) { return __try_acquire_impl(__old); }, memory_order_relaxed);
+ std::__atomic_wait_unless(__a_, memory_order_relaxed, [this](ptrdiff_t& __old) {
+ return __try_acquire_impl(__old);
+ });
}
template <class _Rep, class _Period>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool
More information about the libcxx-commits
mailing list