[libcxx-commits] [libcxx] [libc++] Allows any types of size 4 and 8 to use native platform ulock_wait (PR #161086)
via libcxx-commits
libcxx-commits at lists.llvm.org
Sat Nov 15 12:34:03 PST 2025
================
@@ -92,119 +110,224 @@ static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const vo
* limit its use to architectures where long and int64_t are synonyms.
*/
-static void
-__libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, __cxx_contention_t __val) {
- _umtx_op(const_cast<__cxx_atomic_contention_t*>(__ptr), UMTX_OP_WAIT, __val, nullptr, nullptr);
+template <std::size_t _Size>
+static void __platform_wait_on_address(void const volatile* __ptr, void const* __val) {
+ static_assert(_Size == 8, "Can only wait on 8 bytes value");
+ char buffer[_Size];
+ std::memcpy(&buffer, const_cast<const void*>(__val), _Size);
+ _umtx_op(const_cast<void*>(__ptr), UMTX_OP_WAIT, *reinterpret_cast<__cxx_contention_t*>(&buffer), nullptr, nullptr);
}
-static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr, bool __notify_one) {
- _umtx_op(const_cast<__cxx_atomic_contention_t*>(__ptr), UMTX_OP_WAKE, __notify_one ? 1 : INT_MAX, nullptr, nullptr);
+template <std::size_t _Size>
+static void __platform_wake_by_address(void const volatile* __ptr, bool __notify_one) {
+ static_assert(_Size == 8, "Can only wake up on 8 bytes value");
+ _umtx_op(const_cast<void*>(__ptr), UMTX_OP_WAKE, __notify_one ? 1 : INT_MAX, nullptr, nullptr);
}
#else // <- Add other operating systems here
// Baseline is just a timed backoff
-static void
-__libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, __cxx_contention_t __val) {
+template <std::size_t _Size>
+static void __platform_wait_on_address(void const volatile* __ptr, void const* __val) {
__libcpp_thread_poll_with_backoff(
- [=]() -> bool { return !__cxx_nonatomic_compare_equal(__cxx_atomic_load(__ptr, memory_order_relaxed), __val); },
+ [=]() -> bool { return std::memcmp(const_cast<const void*>(__ptr), __val, _Size) != 0; },
__libcpp_timed_backoff_policy());
}
-static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile*, bool) {}
+template <std::size_t _Size>
+static void __platform_wake_by_address(void const volatile*, bool) {}
#endif // __linux__
-static constexpr size_t __libcpp_contention_table_size = (1 << 8); /* < there's no magic in this number */
-
-struct alignas(64) /* aim to avoid false sharing */ __libcpp_contention_table_entry {
- __cxx_atomic_contention_t __contention_state;
- __cxx_atomic_contention_t __platform_state;
- inline constexpr __libcpp_contention_table_entry() : __contention_state(0), __platform_state(0) {}
-};
-
-static __libcpp_contention_table_entry __libcpp_contention_table[__libcpp_contention_table_size];
-
-static hash<void const volatile*> __libcpp_contention_hasher;
-
-static __libcpp_contention_table_entry* __libcpp_contention_state(void const volatile* p) {
- return &__libcpp_contention_table[__libcpp_contention_hasher(p) & (__libcpp_contention_table_size - 1)];
-}
+// =============================
+// Local hidden helper functions
+// =============================
/* Given an atomic to track contention and an atomic to actually wait on, which may be
the same atomic, we try to detect contention to avoid spuriously calling the platform. */
-static void __libcpp_contention_notify(__cxx_atomic_contention_t volatile* __contention_state,
- __cxx_atomic_contention_t const volatile* __platform_state,
- bool __notify_one) {
- if (0 != __cxx_atomic_load(__contention_state, memory_order_seq_cst))
+template <std::size_t _Size>
+static void __contention_notify(
+ __cxx_atomic_contention_t volatile* __waiter_count, void const volatile* __address_to_notify, bool __notify_one) {
+ if (0 != __cxx_atomic_load(__waiter_count, memory_order_seq_cst))
// We only call 'wake' if we consumed a contention bit here.
- __libcpp_platform_wake_by_address(__platform_state, __notify_one);
-}
-static __cxx_contention_t
-__libcpp_contention_monitor_for_wait(__cxx_atomic_contention_t volatile* /*__contention_state*/,
- __cxx_atomic_contention_t const volatile* __platform_state) {
- // We will monitor this value.
- return __cxx_atomic_load(__platform_state, memory_order_acquire);
-}
-static void __libcpp_contention_wait(__cxx_atomic_contention_t volatile* __contention_state,
- __cxx_atomic_contention_t const volatile* __platform_state,
- __cxx_contention_t __old_value) {
- __cxx_atomic_fetch_add(__contention_state, __cxx_contention_t(1), memory_order_relaxed);
+ __platform_wake_by_address<_Size>(__address_to_notify, __notify_one);
+}
+
+template <std::size_t _Size>
+static void __contention_wait(__cxx_atomic_contention_t volatile* __waiter_count,
+ void const volatile* __address_to_wait,
+ void const* __old_value) {
+ __cxx_atomic_fetch_add(__waiter_count, __cxx_contention_t(1), memory_order_relaxed);
// https://llvm.org/PR109290
// There are no platform guarantees of a memory barrier in the platform wait implementation
__cxx_atomic_thread_fence(memory_order_seq_cst);
// We sleep as long as the monitored value hasn't changed.
- __libcpp_platform_wait_on_address(__platform_state, __old_value);
- __cxx_atomic_fetch_sub(__contention_state, __cxx_contention_t(1), memory_order_release);
+ __platform_wait_on_address<_Size>(__address_to_wait, __old_value);
+ __cxx_atomic_fetch_sub(__waiter_count, __cxx_contention_t(1), memory_order_release);
+}
+
+#if defined(__APPLE__) && defined(__aarch64__)
+constexpr size_t __cache_line_size = 128;
+#else
+constexpr size_t __cache_line_size = 64;
+#endif
----------------
huixie90 wrote:
and clang documented the values are unstable
https://clang.llvm.org/docs/LanguageExtensions.html#gcc-destructive-size-and-gcc-constructive-size
I don't feel very comfortable to use them , even though we don't have ABI problems here (since it is purely in dylib).
https://github.com/llvm/llvm-project/pull/161086
More information about the libcxx-commits
mailing list