[libcxx-commits] [libcxx] [libc++] Refactor tests for std::condition_variable (PR #91530)

via libcxx-commits libcxx-commits at lists.llvm.org
Wed May 8 13:32:13 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-libcxx

Author: Louis Dionne (ldionne)

<details>
<summary>Changes</summary>

These tests have always been flaky, which led us to using ALLOW_RETRIES on them. However, while investigating #<!-- -->89083 (using Github provided macOS builders), these tests surfaced as being basically unworkably flaky in that environment.

This patch solves that problem by refactoring the tests to make them succeed deterministically.

---

Patch is 64.33 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/91530.diff


10 Files Affected:

- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp (+80-66) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp (+130-71) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp (+88-41) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp (+88-88) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp (+131-84) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp (+92-69) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp (+136-77) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp (+96-38) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp (+104-79) 
- (modified) libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp (+159-87) 


``````````diff
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
index 42150207c3c4d..d70e027a702c4 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
@@ -5,9 +5,8 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
 
 // <condition_variable>
 
@@ -19,77 +18,92 @@
 //              const chrono::duration<Rep, Period>& rel_time);
 
 #include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
 #include <mutex>
 #include <thread>
-#include <chrono>
-#include <cassert>
 
 #include "make_test_thread.h"
 #include "test_macros.h"
 
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-bool expect_timeout = false;
-
-void f()
-{
-    typedef std::chrono::system_clock Clock;
-    typedef std::chrono::milliseconds milliseconds;
-    std::unique_lock<std::mutex> lk(mut);
-    assert(test2 == 0);
-    test1 = 1;
-    cv.notify_one();
-    Clock::time_point t0 = Clock::now();
-    Clock::time_point wait_end = t0 + milliseconds(250);
-    Clock::duration d;
-    do {
-        d = wait_end - Clock::now();
-        if (d <= milliseconds(0)) break;
-    } while (test2 == 0 && cv.wait_for(lk, d) == std::cv_status::no_timeout);
-    Clock::time_point t1 = Clock::now();
-    if (!expect_timeout)
-    {
-        assert(t1 - t0 < milliseconds(250));
-        assert(test2 != 0);
-    }
-    else
-    {
-        assert(t1 - t0 - milliseconds(250) < milliseconds(50));
-        assert(test2 == 0);
-    }
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+  std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+  f();
+  std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+  return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
 }
 
-int main(int, char**)
-{
-    {
-        std::unique_lock<std::mutex> lk(mut);
-        std::thread t = support::make_test_thread(f);
-        assert(test1 == 0);
-        while (test1 == 0)
-            cv.wait(lk);
-        assert(test1 != 0);
-        test2 = 1;
-        lk.unlock();
-        cv.notify_one();
-        t.join();
-    }
-    test1 = 0;
-    test2 = 0;
-    expect_timeout = true;
-    {
-        std::unique_lock<std::mutex> lk(mut);
-        std::thread t = support::make_test_thread(f);
-        assert(test1 == 0);
-        while (test1 == 0)
-            cv.wait(lk);
-        assert(test1 != 0);
-        lk.unlock();
-        t.join();
-    }
+int main(int, char**) {
+  // Test unblocking via a call to notify_one() in another thread.
+  //
+  // To test this, we set a very long timeout in wait_for() and we wait
+  // again in case we get awoken spuriously. Note that it can actually
+  // happen that we get awoken spuriously and fail to recognize it
+  // (making this test useless), but the likelihood should be small.
+  {
+    std::atomic<bool> ready           = false;
+    std::atomic<bool> likely_spurious = true;
+    auto timeout                      = std::chrono::seconds(3600);
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      auto elapsed = measure([&] {
+        ready = true;
+        do {
+          std::cv_status result = cv.wait_for(lock, timeout);
+          assert(result == std::cv_status::no_timeout);
+        } while (likely_spurious);
+      });
+
+      // This can technically fail if we have many spurious awakenings, but in practice the
+      // tolerance is so high that it shouldn't be a problem.
+      assert(elapsed < timeout);
+    });
+
+    std::thread t2 = support::make_test_thread([&] {
+      while (!ready) {
+        // spin
+      }
+
+      // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+      // so we can notify it while it is waiting.
+      std::unique_lock<std::mutex> lock(mutex);
+      cv.notify_one();
+      likely_spurious = false;
+      lock.unlock();
+    });
+
+    t2.join();
+    t1.join();
+  }
+
+  // Test unblocking via a timeout.
+  //
+  // To test this, we create a thread that waits on a condition variable
+  // with a certain timeout, and we never awaken it. To guard against
+  // spurious wakeups, we wait again whenever we are awoken for a reason
+  // other than a timeout.
+  {
+    auto timeout = std::chrono::milliseconds(250);
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      std::cv_status result;
+      do {
+        auto elapsed = measure([&] { result = cv.wait_for(lock, timeout); });
+        if (result == std::cv_status::timeout)
+          assert(elapsed >= timeout);
+      } while (result != std::cv_status::timeout);
+    });
+
+    t1.join();
+  }
 
   return 0;
 }
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
index 872bcb6d8a57d..dae31fb458862 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
@@ -5,9 +5,8 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
 
 // <condition_variable>
 
@@ -20,82 +19,142 @@
 //              Predicate pred);
 
 #include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
 #include <mutex>
 #include <thread>
-#include <chrono>
-#include <cassert>
 
 #include "make_test_thread.h"
 #include "test_macros.h"
 
-class Pred
-{
-    int& i_;
-public:
-    explicit Pred(int& i) : i_(i) {}
-
-    bool operator()() {return i_ != 0;}
-};
-
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
-    typedef std::chrono::system_clock Clock;
-    typedef std::chrono::milliseconds milliseconds;
-    std::unique_lock<std::mutex> lk(mut);
-    assert(test2 == 0);
-    test1 = 1;
-    cv.notify_one();
-    Clock::time_point t0 = Clock::now();
-    bool r = cv.wait_for(lk, milliseconds(250), Pred(test2));
-    ((void)r); // Prevent unused warning
-    Clock::time_point t1 = Clock::now();
-    if (runs == 0)
-    {
-        assert(t1 - t0 < milliseconds(250));
-        assert(test2 != 0);
-    }
-    else
-    {
-        assert(t1 - t0 - milliseconds(250) < milliseconds(50));
-        assert(test2 == 0);
-    }
-    ++runs;
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+  std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+  f();
+  std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+  return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
 }
 
-int main(int, char**)
-{
-    {
-        std::unique_lock<std::mutex>lk(mut);
-        std::thread t = support::make_test_thread(f);
-        assert(test1 == 0);
-        while (test1 == 0)
-            cv.wait(lk);
-        assert(test1 != 0);
-        test2 = 1;
-        lk.unlock();
-        cv.notify_one();
-        t.join();
-    }
-    test1 = 0;
-    test2 = 0;
-    {
-        std::unique_lock<std::mutex>lk(mut);
-        std::thread t = support::make_test_thread(f);
-        assert(test1 == 0);
-        while (test1 == 0)
-            cv.wait(lk);
-        assert(test1 != 0);
-        lk.unlock();
-        t.join();
-    }
+int main(int, char**) {
+  // Test unblocking via a call to notify_one() in another thread.
+  //
+  // To test this, we set a very long timeout in wait_for() and we try to minimize
+  // the likelihood that we got awoken by a spurious wakeup by updating the
+  // likely_spurious flag only immediately before we perform the notification.
+  {
+    std::atomic<bool> ready           = false;
+    std::atomic<bool> likely_spurious = true;
+    auto timeout                      = std::chrono::seconds(3600);
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      auto elapsed = measure([&] {
+        ready       = true;
+        bool result = cv.wait_for(lock, timeout, [&] { return !likely_spurious; });
+        assert(result); // return value should be true since we didn't time out
+      });
+      assert(elapsed < timeout);
+    });
+
+    std::thread t2 = support::make_test_thread([&] {
+      while (!ready) {
+        // spin
+      }
+
+      // Acquire the same mutex as t1. This ensures that the condition variable has started
+      // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+      // simply use it as a signal that the condition variable has started waiting.
+      std::unique_lock<std::mutex> lock(mutex);
+      lock.unlock();
+
+      likely_spurious = false;
+      cv.notify_one();
+    });
+
+    t2.join();
+    t1.join();
+  }
+
+  // Test unblocking via a timeout.
+  //
+  // To test this, we create a thread that waits on a condition variable with a certain
+  // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+  // which means that we can't get out of the wait via a spurious wakeup.
+  {
+    auto timeout             = std::chrono::milliseconds(250);
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      auto elapsed = measure([&] {
+        bool result = cv.wait_for(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+        assert(!result); // return value should be false since the predicate returns false after the timeout
+      });
+      assert(elapsed >= timeout);
+    });
+
+    t1.join();
+  }
+
+  // Test unblocking via a spurious wakeup.
+  //
+  // To test this, we set a fairly long timeout in wait_for() and we basically never
+  // wake up the condition variable. This way, we are hoping to get out of the wait
+  // via a spurious wakeup.
+  //
+  // However, since spurious wakeups are not required to even happen, this test is
+  // only trying to trigger that code path, but not actually asserting that it is
+  // taken. In particular, we do need to eventually ensure we get out of the wait
+  // by standard means, so we actually wake up the thread at the end.
+  {
+    std::atomic<bool> ready  = false;
+    std::atomic<bool> awoken = false;
+    auto timeout             = std::chrono::seconds(3600);
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      auto elapsed = measure([&] {
+        ready       = true;
+        bool result = cv.wait_for(lock, timeout, [&] { return true; });
+        awoken      = true;
+        assert(result); // return value should be true since we didn't time out
+      });
+      assert(elapsed < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+    });
+
+    std::thread t2 = support::make_test_thread([&] {
+      while (!ready) {
+        // spin
+      }
+
+      // Acquire the same mutex as t1. This ensures that the condition variable has started
+      // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+      // simply use it as a signal that the condition variable has started waiting.
+      std::unique_lock<std::mutex> lock(mutex);
+      lock.unlock();
+
+      // Give some time for t1 to be awoken spuriously so that code path is used.
+      std::this_thread::sleep_for(std::chrono::seconds(1));
+
+      // We would want to assert that the thread has been awoken after this time,
+      // however nothing guarantees us that it ever gets spuriously awoken, so
+      // we can't really check anything. This is still left here as documentation.
+      assert(awoken || !awoken);
+
+      // Whatever happened, actually awaken the condition variable to ensure the test
+      // doesn't keep running until the timeout.
+      cv.notify_one();
+    });
+
+    t2.join();
+    t1.join();
+  }
 
   return 0;
 }
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
index 15feba55616b0..b99d14a9cc355 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
@@ -5,9 +5,8 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
 
 // <condition_variable>
 
@@ -17,51 +16,99 @@
 //   void wait(unique_lock<mutex>& lock, Predicate pred);
 
 #include <condition_variable>
+#include <atomic>
+#include <cassert>
 #include <mutex>
 #include <thread>
-#include <functional>
-#include <cassert>
 
 #include "make_test_thread.h"
 #include "test_macros.h"
 
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-class Pred
-{
-    int& i_;
-public:
-    explicit Pred(int& i) : i_(i) {}
-
-    bool operator()() {return i_ != 0;}
-};
-
-void f()
-{
-    std::unique_lock<std::mutex> lk(mut);
-    assert(test2 == 0);
-    test1 = 1;
-    cv.notify_one();
-    cv.wait(lk, Pred(test2));
-    assert(test2 != 0);
-}
+int main(int, char**) {
+  // Test unblocking via a call to notify_one() in another thread.
+  //
+  // To test this, we try to minimize the likelihood that we got awoken by a
+  // spurious wakeup by updating the likely_spurious flag only immediately
+  // before we perform the notification.
+  {
+    std::atomic<bool> ready           = false;
+    std::atomic<bool> likely_spurious = true;
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      ready = true;
+      cv.wait(lock, [&] { return !likely_spurious; });
+    });
+
+    std::thread t2 = support::make_test_thread([&] {
+      while (!ready) {
+        // spin
+      }
+
+      // Acquire the same mutex as t1. This ensures that the condition variable has started
+      // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+      // simply use it as a signal that the condition variable has started waiting.
+      std::unique_lock<std::mutex> lock(mutex);
+      lock.unlock();
+
+      likely_spurious = false;
+      cv.notify_one();
+    });
+
+    t2.join();
+    t1.join();
+  }
+
+  // Test unblocking via a spurious wakeup.
+  //
+  // To test this, we basically never wake up the condition variable. This way, we
+  // are hoping to get out of the wait via a spurious wakeup.
+  //
+  // However, since spurious wakeups are not required to even happen, this test is
+  // only trying to trigger that code path, but not actually asserting that it is
+  // taken. In particular, we do need to eventually ensure we get out of the wait
+  // by standard means, so we actually wake up the thread at the end.
+  {
+    std::atomic<bool> ready  = false;
+    std::atomic<bool> awoken = false;
+    std::condition_variable cv;
+    std::mutex mutex;
+
+    std::thread t1 = support::make_test_thread([&] {
+      std::unique_lock<std::mutex> lock(mutex);
+      ready = true;
+      cv.wait(lock, [&] { return true; });
+      awoken = true;
+    });
+
+    std::thread t2 = support::make_test_thread([&] {
+      while (!ready) {
+        // spin
+      }
+
+      // Acquire the same mutex as t1. This ensures that the condition variable has started
+      // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+      // simply use it as a signal that the condition variable has started waiting.
+      std::unique_lock<std::mutex> lock(mutex);
+      lock.unlock();
+
+      // Give some time for t1 to be awoken spuriously so that code path is used.
+      std::this_thread::sleep_for(std::chrono::seconds(1));
+
+      // We would want to assert that the thread has been awoken after this time,
+      // however nothing guarantees us that it ever gets spuriously awoken, so
+      // we can't really check anything. This is still left here as documentation.
+      assert(awoken || !awoken);
+
+      // Whatever happened, actually awaken the condition variable to ensure the test finishes.
+      cv.notify_one();
+    });
 
-int main(int, char**)
-{
-    std::unique_lock<std::mutex>lk(mut);
-    std::thread t = support::make_test_thread(f);
-    assert(test1 == 0);
-    while (test1 == 0)
-        cv.wait(lk);
-    assert(test1 != 0);
-    test2 = 1;
-    lk.unlock();
-    cv.notify_one();
-    t.join();
+    t2.join();
+    t1.join();
+  }
 
   return 0;
 }
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
index 03205e68dca67..cab043941c25d 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
@@ -5,9 +5,8 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
 
 // <condition_variable>
 
@@ -19,100 +18,101 @@
 //              const chrono::time_point<Clock, Duration>& abs_time);
 
 #include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
 #include <mutex>
 #include <thread>
-#include <chrono>
-#include <cassert>
 
 #include "make_test_thread.h"
 #include "test_macros.h"
 
-struct TestClock
-{
-    typedef std::chrono::milliseconds duration;
-    typedef duration::rep             rep;
-    typedef duration::period          period;
-    typedef std::chrono::time_point<TestClock> time_point;
-    static const bool is_steady =  true;
-
-    static time_point now()
-    {
-        using namespace std::chrono;
-        return time_point(duration_cast<duration>(
-                steady_clock::now().time_since_epoch()
-                                                 ));
-    }
+struct TestClock {
+  typedef std::chrono::milliseconds duration;
+  typedef duration::rep rep;
+  typedef duration::period period;
+  typedef std::chrono::time_point<TestClock> time_point;
+  static const bool is_steady = true;
+
+  static time_point now() {
+    using namespace std::chrono;
+    return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+  }
 };
 
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-template <typename Clock>
-void f()
-{
-    std::unique_lock<std::mutex> lk(mut);
-    assert(test2 == 0);
-    test1 = 1;
-    cv.notify_one();
-    typename Clock::time_point t0 = Clock::now();
-    typename Clock::time_point t = t0 + std::chrono::milliseconds(250);
-    while (test2 == 0 && cv.wait_until(lk, t) == std::cv_status::no_timeout)
-        ;
-    typename Clock::time_point t1 = Clock::now();
-    if (runs == 0)
-    {
-        assert(t1 - t0 < std::chrono::milliseconds(250));
-        assert(test2 != 0);
-    }
-    else
-    {
-        assert(t1 - t0 - std::chrono::milliseconds(250) < std::chrono::milliseconds(50));
-        assert(test2 == 0);
-    }
-    ++runs;
-}
+template <class Clock>
+void test() {
...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/91530


More information about the libcxx-commits mailing list