[libcxx-commits] [libcxx] [libc++][test] add benchmarks for `std::atomic::wait` (PR #70571)
via libcxx-commits
libcxx-commits at lists.llvm.org
Sat Dec 2 09:25:27 PST 2023
https://github.com/huixie90 updated https://github.com/llvm/llvm-project/pull/70571
>From 61ecdd6302b123d7f5ee3aa1f0211994de92314f Mon Sep 17 00:00:00 2001
From: Hui <hui.xie0621 at gmail.com>
Date: Sat, 28 Oct 2023 21:55:37 +0100
Subject: [PATCH 1/4] [libc++][test] add benchmarks for `std::atomic::wait`
---
libcxx/benchmarks/CMakeLists.txt | 1 +
libcxx/benchmarks/atomic_wait.bench.cpp | 163 ++++++++++++++++++++++++
2 files changed, 164 insertions(+)
create mode 100644 libcxx/benchmarks/atomic_wait.bench.cpp
diff --git a/libcxx/benchmarks/CMakeLists.txt b/libcxx/benchmarks/CMakeLists.txt
index 4307f6b57831f..79808fd0d1a24 100644
--- a/libcxx/benchmarks/CMakeLists.txt
+++ b/libcxx/benchmarks/CMakeLists.txt
@@ -196,6 +196,7 @@ set(BENCHMARK_TESTS
algorithms/sort.bench.cpp
algorithms/sort_heap.bench.cpp
algorithms/stable_sort.bench.cpp
+ atomic_wait.bench.cpp
libcxxabi/dynamic_cast.bench.cpp
libcxxabi/dynamic_cast_old_stress.bench.cpp
allocation.bench.cpp
diff --git a/libcxx/benchmarks/atomic_wait.bench.cpp b/libcxx/benchmarks/atomic_wait.bench.cpp
new file mode 100644
index 0000000000000..4ec030ab78068
--- /dev/null
+++ b/libcxx/benchmarks/atomic_wait.bench.cpp
@@ -0,0 +1,163 @@
+//===----------------------------------------------------------------------===//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <atomic>
+#include <numeric>
+#include <thread>
+
+#include "benchmark/benchmark.h"
+#include "make_test_thread.h"
+
+#include <iostream>
+
+using namespace std::chrono_literals;
+
+void BM_atomic_wait_one_thread_one_atomic_wait(benchmark::State& state) {
+ std::atomic<std::uint64_t> a;
+ auto thread_func = [&](std::stop_token st) {
+ while (!st.stop_requested()) {
+ a.fetch_add(1, std::memory_order_relaxed);
+ a.notify_all();
+ }
+ };
+
+ std::uint64_t total_loop_test_param = state.range(0);
+
+ auto thread = support::make_test_jthread(thread_func);
+
+ for (auto _ : state) {
+ for (std::uint64_t i = 0; i < total_loop_test_param; ++i) {
+ auto old = a.load(std::memory_order_relaxed);
+ a.wait(old);
+ }
+ }
+}
+BENCHMARK(BM_atomic_wait_one_thread_one_atomic_wait)->RangeMultiplier(2)->Range(1 << 10, 1 << 24);
+
+void BM_atomic_wait_multi_thread_one_atomic_wait(benchmark::State& state) {
+ std::atomic<std::uint64_t> a;
+ auto notify_func = [&](std::stop_token st) {
+ while (!st.stop_requested()) {
+ a.fetch_add(1, std::memory_order_relaxed);
+ a.notify_all();
+ }
+ };
+
+ std::uint64_t total_loop_test_param = state.range(0);
+ constexpr auto num_waiting_threads = 15;
+ std::vector<std::jthread> wait_threads;
+ wait_threads.reserve(num_waiting_threads);
+
+ auto notify_thread = support::make_test_jthread(notify_func);
+
+ std::atomic<std::uint64_t> start_flag = 0;
+ std::atomic<std::uint64_t> done_count = 0;
+ auto wait_func = [&a, &start_flag, &done_count, total_loop_test_param](std::stop_token st) {
+ auto old_start = 0;
+ while (!st.stop_requested()) {
+ start_flag.wait(old_start);
+ old_start = start_flag.load();
+ for (std::uint64_t i = 0; i < total_loop_test_param; ++i) {
+ auto old = a.load(std::memory_order_relaxed);
+ a.wait(old);
+ }
+ done_count.fetch_add(1);
+ }
+ };
+
+ for (size_t i = 0; i < num_waiting_threads; ++i) {
+ wait_threads.emplace_back(support::make_test_jthread(wait_func));
+ }
+
+ for (auto _ : state) {
+ done_count = 0;
+ start_flag.fetch_add(1);
+ start_flag.notify_all();
+ while (done_count < num_waiting_threads) {
+ std::this_thread::yield();
+ }
+ }
+ for (auto& t : wait_threads) {
+ t.request_stop();
+ }
+ start_flag.fetch_add(1);
+ start_flag.notify_all();
+ for (auto& t : wait_threads) {
+ t.join();
+ }
+}
+BENCHMARK(BM_atomic_wait_multi_thread_one_atomic_wait)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
+
+// hardware_destructive_interference_size not implemented yet for clang
+// alignas 128 to cover the cases for x86 and apple arm to prevent false sharing
+// from the test itself
+struct alignas(128) Atomic {
+ std::atomic<std::uint64_t> at{0};
+};
+
+void BM_atomic_wait_multi_thread_wait_different_atomics(benchmark::State& state) {
+ const std::uint64_t total_loop_test_param = state.range(0);
+ constexpr std::uint64_t num_atomics = 7;
+ std::vector<Atomic> atomics(num_atomics);
+
+ auto notify_func = [&](std::stop_token st, size_t idx) {
+ while (!st.stop_requested()) {
+ atomics[idx].at.fetch_add(1, std::memory_order_relaxed);
+ atomics[idx].at.notify_all();
+ }
+ };
+
+ std::atomic<std::uint64_t> start_flag = 0;
+ std::atomic<std::uint64_t> done_count = 0;
+
+ auto wait_func = [&, total_loop_test_param](std::stop_token st, size_t idx) {
+ auto old_start = 0;
+ while (!st.stop_requested()) {
+ start_flag.wait(old_start);
+ old_start = start_flag.load();
+ for (std::uint64_t i = 0; i < total_loop_test_param; ++i) {
+ auto old = atomics[idx].at.load(std::memory_order_relaxed);
+ atomics[idx].at.wait(old);
+ }
+ done_count.fetch_add(1);
+ }
+ };
+
+ std::vector<std::jthread> notify_threads;
+ notify_threads.reserve(num_atomics);
+
+ std::vector<std::jthread> wait_threads;
+ wait_threads.reserve(num_atomics);
+
+ for (size_t i = 0; i < num_atomics; ++i) {
+ notify_threads.emplace_back(support::make_test_jthread(notify_func, i));
+ }
+
+ for (size_t i = 0; i < num_atomics; ++i) {
+ wait_threads.emplace_back(support::make_test_jthread(wait_func, i));
+ }
+
+ for (auto _ : state) {
+ done_count = 0;
+ start_flag.fetch_add(1);
+ start_flag.notify_all();
+ while (done_count < num_atomics) {
+ std::this_thread::yield();
+ }
+ }
+ for (auto& t : wait_threads) {
+ t.request_stop();
+ }
+ start_flag.fetch_add(1);
+ start_flag.notify_all();
+ for (auto& t : wait_threads) {
+ t.join();
+ }
+}
+BENCHMARK(BM_atomic_wait_multi_thread_wait_different_atomics)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
+
+BENCHMARK_MAIN();
>From 79e55f997a48970b921b17936196f7411dc6e4dd Mon Sep 17 00:00:00 2001
From: Hui <hui.xie0621 at gmail.com>
Date: Wed, 22 Nov 2023 17:04:37 +0000
Subject: [PATCH 2/4] benchmark mutex vs atomic
---
libcxx/benchmarks/CMakeLists.txt | 1 +
.../atomic_wait_vs_mutex_lock.bench.cpp | 96 +++++++++++++++++++
2 files changed, 97 insertions(+)
create mode 100644 libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
diff --git a/libcxx/benchmarks/CMakeLists.txt b/libcxx/benchmarks/CMakeLists.txt
index 79808fd0d1a24..7bb02e4cefce0 100644
--- a/libcxx/benchmarks/CMakeLists.txt
+++ b/libcxx/benchmarks/CMakeLists.txt
@@ -197,6 +197,7 @@ set(BENCHMARK_TESTS
algorithms/sort_heap.bench.cpp
algorithms/stable_sort.bench.cpp
atomic_wait.bench.cpp
+ atomic_wait_vs_mutex_lock.bench.cpp
libcxxabi/dynamic_cast.bench.cpp
libcxxabi/dynamic_cast_old_stress.bench.cpp
allocation.bench.cpp
diff --git a/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp b/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
new file mode 100644
index 0000000000000..14fb6940fa417
--- /dev/null
+++ b/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
@@ -0,0 +1,96 @@
+//===----------------------------------------------------------------------===//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <atomic>
+#include <mutex>
+#include <numeric>
+#include <thread>
+
+#include "benchmark/benchmark.h"
+#include "make_test_thread.h"
+
+#include <iostream>
+
+using namespace std::chrono_literals;
+
+struct AtomicLock {
+ std::atomic<bool>& locked_;
+
+ AtomicLock(const AtomicLock&) = delete;
+ AtomicLock& operator=(const AtomicLock&) = delete;
+
+ AtomicLock(std::atomic<bool>& l) : locked_(l) { lock(); }
+ ~AtomicLock() { unlock(); }
+
+ void lock() {
+ while (true) {
+ locked_.wait(true, std::memory_order_relaxed);
+ bool expected = false;
+ if (locked_.compare_exchange_weak(expected, true, std::memory_order_acquire, std::memory_order_relaxed))
+ break;
+ }
+ }
+
+ void unlock() {
+ locked_.store(false, std::memory_order_release);
+ locked_.notify_all();
+ }
+};
+
+using LockState = std::atomic<bool>;
+using Lock = AtomicLock;
+
+// using LockState = std::mutex;
+// using Lock = std::unique_lock<std::mutex>;
+
+void BM_multi_thread_lock_unlock(benchmark::State& state) {
+ std::uint64_t total_loop_test_param = state.range(0);
+ constexpr auto num_threads = 15;
+ std::vector<std::jthread> threads;
+ threads.reserve(num_threads);
+
+ std::atomic<std::uint64_t> start_flag = 0;
+ std::atomic<std::uint64_t> done_count = 0;
+
+ LockState lock_state{};
+
+ auto func = [&start_flag, &done_count, &lock_state, total_loop_test_param](std::stop_token st) {
+ auto old_start = 0;
+ while (!st.stop_requested()) {
+ start_flag.wait(old_start);
+ old_start = start_flag.load();
+ for (std::uint64_t i = 0; i < total_loop_test_param; ++i) {
+ Lock l{lock_state};
+ }
+ done_count.fetch_add(1);
+ }
+ };
+
+ for (size_t i = 0; i < num_threads; ++i) {
+ threads.emplace_back(support::make_test_jthread(func));
+ }
+
+ for (auto _ : state) {
+ done_count = 0;
+ start_flag.fetch_add(1);
+ start_flag.notify_all();
+ while (done_count < num_threads) {
+ std::this_thread::yield();
+ }
+ }
+ for (auto& t : threads) {
+ t.request_stop();
+ }
+ start_flag.fetch_add(1);
+ start_flag.notify_all();
+ for (auto& t : threads) {
+ t.join();
+ }
+}
+BENCHMARK(BM_multi_thread_lock_unlock)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
+
+BENCHMARK_MAIN();
>From 0e4b0e29f51fc6dd8ab51322e8f92bd1230140d1 Mon Sep 17 00:00:00 2001
From: Hui <hui.xie0621 at gmail.com>
Date: Sat, 2 Dec 2023 17:11:47 +0000
Subject: [PATCH 3/4] review comments
---
libcxx/benchmarks/atomic_wait.bench.cpp | 19 +++++--------------
.../atomic_wait_vs_mutex_lock.bench.cpp | 2 --
2 files changed, 5 insertions(+), 16 deletions(-)
diff --git a/libcxx/benchmarks/atomic_wait.bench.cpp b/libcxx/benchmarks/atomic_wait.bench.cpp
index 4ec030ab78068..4a06a45739377 100644
--- a/libcxx/benchmarks/atomic_wait.bench.cpp
+++ b/libcxx/benchmarks/atomic_wait.bench.cpp
@@ -12,8 +12,6 @@
#include "benchmark/benchmark.h"
#include "make_test_thread.h"
-#include <iostream>
-
using namespace std::chrono_literals;
void BM_atomic_wait_one_thread_one_atomic_wait(benchmark::State& state) {
@@ -92,22 +90,15 @@ void BM_atomic_wait_multi_thread_one_atomic_wait(benchmark::State& state) {
}
BENCHMARK(BM_atomic_wait_multi_thread_one_atomic_wait)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
-// hardware_destructive_interference_size not implemented yet for clang
-// alignas 128 to cover the cases for x86 and apple arm to prevent false sharing
-// from the test itself
-struct alignas(128) Atomic {
- std::atomic<std::uint64_t> at{0};
-};
-
void BM_atomic_wait_multi_thread_wait_different_atomics(benchmark::State& state) {
const std::uint64_t total_loop_test_param = state.range(0);
constexpr std::uint64_t num_atomics = 7;
- std::vector<Atomic> atomics(num_atomics);
+ std::vector<std::atomic<std::uint64_t>> atomics(num_atomics);
auto notify_func = [&](std::stop_token st, size_t idx) {
while (!st.stop_requested()) {
- atomics[idx].at.fetch_add(1, std::memory_order_relaxed);
- atomics[idx].at.notify_all();
+ atomics[idx].fetch_add(1, std::memory_order_relaxed);
+ atomics[idx].notify_all();
}
};
@@ -120,8 +111,8 @@ void BM_atomic_wait_multi_thread_wait_different_atomics(benchmark::State& state)
start_flag.wait(old_start);
old_start = start_flag.load();
for (std::uint64_t i = 0; i < total_loop_test_param; ++i) {
- auto old = atomics[idx].at.load(std::memory_order_relaxed);
- atomics[idx].at.wait(old);
+ auto old = atomics[idx].load(std::memory_order_relaxed);
+ atomics[idx].wait(old);
}
done_count.fetch_add(1);
}
diff --git a/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp b/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
index 14fb6940fa417..8042539650a15 100644
--- a/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
+++ b/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
@@ -13,8 +13,6 @@
#include "benchmark/benchmark.h"
#include "make_test_thread.h"
-#include <iostream>
-
using namespace std::chrono_literals;
struct AtomicLock {
>From 98f8f5a36300de8f49ba82334eee782f059f9cee Mon Sep 17 00:00:00 2001
From: Hui <hui.xie0621 at gmail.com>
Date: Sat, 2 Dec 2023 17:25:06 +0000
Subject: [PATCH 4/4] review comments
---
.../atomic_wait_vs_mutex_lock.bench.cpp | 23 +++++++++++++++----
1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp b/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
index 8042539650a15..c60fcd579488c 100644
--- a/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
+++ b/libcxx/benchmarks/atomic_wait_vs_mutex_lock.bench.cpp
@@ -5,6 +5,10 @@
//
//===----------------------------------------------------------------------===//
+// To run this test, build libcxx and cxx-benchmarks targets
+// cd third-party/benchmark/tools
+// ./compare.py filters ../../../build/libcxx/benchmarks/atomic_wait_vs_mutex_lock.libcxx.out BM_atomic_wait BM_mutex
+
#include <atomic>
#include <mutex>
#include <numeric>
@@ -39,13 +43,14 @@ struct AtomicLock {
}
};
-using LockState = std::atomic<bool>;
-using Lock = AtomicLock;
+// using LockState = std::atomic<bool>;
+// using Lock = AtomicLock;
// using LockState = std::mutex;
// using Lock = std::unique_lock<std::mutex>;
-void BM_multi_thread_lock_unlock(benchmark::State& state) {
+template <class LockState, class Lock>
+void test_multi_thread_lock_unlock(benchmark::State& state) {
std::uint64_t total_loop_test_param = state.range(0);
constexpr auto num_threads = 15;
std::vector<std::jthread> threads;
@@ -61,9 +66,12 @@ void BM_multi_thread_lock_unlock(benchmark::State& state) {
while (!st.stop_requested()) {
start_flag.wait(old_start);
old_start = start_flag.load();
+
+ // main things under test: locking and unlocking in the loop
for (std::uint64_t i = 0; i < total_loop_test_param; ++i) {
Lock l{lock_state};
}
+
done_count.fetch_add(1);
}
};
@@ -89,6 +97,13 @@ void BM_multi_thread_lock_unlock(benchmark::State& state) {
t.join();
}
}
-BENCHMARK(BM_multi_thread_lock_unlock)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
+
+void BM_atomic_wait(benchmark::State& state) { test_multi_thread_lock_unlock<std::atomic<bool>, AtomicLock>(state); }
+BENCHMARK(BM_atomic_wait)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
+
+void BM_mutex(benchmark::State& state) {
+ test_multi_thread_lock_unlock<std::mutex, std::unique_lock<std::mutex>>(state);
+}
+BENCHMARK(BM_mutex)->RangeMultiplier(2)->Range(1 << 10, 1 << 20);
BENCHMARK_MAIN();
More information about the libcxx-commits
mailing list