[libcxx-commits] [libcxx] r372034 - Revert "Implement std::condition_variable via pthread_cond_clockwait() where available"
Nico Weber via libcxx-commits
libcxx-commits at lists.llvm.org
Mon Sep 16 15:11:47 PDT 2019
When reverting something, please say why you're reverting in the revert
commit message.
On Mon, Sep 16, 2019 at 5:18 PM Dan Albert via libcxx-commits <
libcxx-commits at lists.llvm.org> wrote:
> Author: danalbert
> Date: Mon Sep 16 14:20:32 2019
> New Revision: 372034
>
> URL: http://llvm.org/viewvc/llvm-project?rev=372034&view=rev
> Log:
> Revert "Implement std::condition_variable via pthread_cond_clockwait()
> where available"
>
> This reverts commit 5e37d7f9ff257ec62d733d3d94b11f03e0fe51ca.
>
> Modified:
> libcxx/trunk/include/__config
> libcxx/trunk/include/__mutex_base
>
> libcxx/trunk/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
>
> Modified: libcxx/trunk/include/__config
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/__config?rev=372034&r1=372033&r2=372034&view=diff
>
> ==============================================================================
> --- libcxx/trunk/include/__config (original)
> +++ libcxx/trunk/include/__config Mon Sep 16 14:20:32 2019
> @@ -1087,12 +1087,6 @@ _LIBCPP_FUNC_VIS extern "C" void __sanit
> # endif // _LIBCPP_HAS_THREAD_API
> #endif // _LIBCPP_HAS_NO_THREADS
>
> -#if defined(_LIBCPP_HAS_THREAD_API_PTHREAD)
> -# if (defined(__ANDROID__) && __ANDROID_API__ >= 30) ||
> _LIBCPP_GLIBC_PREREQ(2, 30)
> -# define _LIBCPP_HAS_COND_CLOCKWAIT
> -# endif
> -#endif
> -
> #if defined(_LIBCPP_HAS_NO_THREADS) &&
> defined(_LIBCPP_HAS_THREAD_API_PTHREAD)
> #error _LIBCPP_HAS_THREAD_API_PTHREAD may only be defined when \
> _LIBCPP_HAS_NO_THREADS is not defined.
>
> Modified: libcxx/trunk/include/__mutex_base
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/__mutex_base?rev=372034&r1=372033&r2=372034&view=diff
>
> ==============================================================================
> --- libcxx/trunk/include/__mutex_base (original)
> +++ libcxx/trunk/include/__mutex_base Mon Sep 16 14:20:32 2019
> @@ -15,7 +15,6 @@
> #include <system_error>
> #include <__threading_support>
>
> -#include <time.h>
>
> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
> #pragma GCC system_header
> @@ -338,75 +337,23 @@ public:
> private:
> void __do_timed_wait(unique_lock<mutex>& __lk,
> chrono::time_point<chrono::system_clock, chrono::nanoseconds>)
> _NOEXCEPT;
> -#if defined(_LIBCPP_HAS_COND_CLOCKWAIT)
> - void __do_timed_wait(unique_lock<mutex>& __lk,
> - chrono::time_point<chrono::steady_clock, chrono::nanoseconds>)
> _NOEXCEPT;
> -#endif
> - template <class _Clock>
> - void __do_timed_wait(unique_lock<mutex>& __lk,
> - chrono::time_point<_Clock, chrono::nanoseconds>) _NOEXCEPT;
> };
> #endif // !_LIBCPP_HAS_NO_THREADS
>
> -template <class _Rep, class _Period>
> +template <class _To, class _Rep, class _Period>
> inline _LIBCPP_INLINE_VISIBILITY
> typename enable_if
> <
> - is_floating_point<_Rep>::value,
> - chrono::nanoseconds
> + chrono::__is_duration<_To>::value,
> + _To
> >::type
> -__safe_nanosecond_cast(chrono::duration<_Rep, _Period> __d)
> +__ceil(chrono::duration<_Rep, _Period> __d)
> {
> using namespace chrono;
> - using __ratio = ratio_divide<_Period, nano>;
> - using __ns_rep = nanoseconds::rep;
> - _Rep __result_float = __d.count() * __ratio::num / __ratio::den;
> -
> - _Rep __result_max = numeric_limits<__ns_rep>::max();
> - if (__result_float >= __result_max) {
> - return nanoseconds::max();
> - }
> -
> - _Rep __result_min = numeric_limits<__ns_rep>::min();
> - if (__result_float <= __result_min) {
> - return nanoseconds::min();
> - }
> -
> - return nanoseconds(static_cast<__ns_rep>(__result_float));
> -}
> -
> -template <class _Rep, class _Period>
> -inline _LIBCPP_INLINE_VISIBILITY
> -typename enable_if
> -<
> - !is_floating_point<_Rep>::value,
> - chrono::nanoseconds
> ->::type
> -__safe_nanosecond_cast(chrono::duration<_Rep, _Period> __d)
> -{
> - using namespace chrono;
> - if (__d.count() == 0) {
> - return nanoseconds(0);
> - }
> -
> - using __ratio = ratio_divide<_Period, nano>;
> - using __ns_rep = nanoseconds::rep;
> - __ns_rep __result_max = std::numeric_limits<__ns_rep>::max();
> - if (__d.count() > 0 && __d.count() > __result_max / __ratio::num) {
> - return nanoseconds::max();
> - }
> -
> - __ns_rep __result_min = std::numeric_limits<__ns_rep>::min();
> - if (__d.count() < 0 && __d.count() < __result_min / __ratio::num) {
> - return nanoseconds::min();
> - }
> -
> - __ns_rep __result = __d.count() * __ratio::num / __ratio::den;
> - if (__result == 0) {
> - return nanoseconds(1);
> - }
> -
> - return nanoseconds(__result);
> + _To __r = duration_cast<_To>(__d);
> + if (__r < __d)
> + ++__r;
> + return __r;
> }
>
> #ifndef _LIBCPP_HAS_NO_THREADS
> @@ -424,15 +371,7 @@ condition_variable::wait_until(unique_lo
> const chrono::time_point<_Clock,
> _Duration>& __t)
> {
> using namespace chrono;
> - using __clock_tp_ns = time_point<_Clock, nanoseconds>;
> -
> - typename _Clock::time_point __now = _Clock::now();
> - if (__t <= __now)
> - return cv_status::timeout;
> -
> - __clock_tp_ns __t_ns =
> __clock_tp_ns(__safe_nanosecond_cast(__t.time_since_epoch()));
> -
> - __do_timed_wait(__lk, __t_ns);
> + wait_for(__lk, __t - _Clock::now());
> return _Clock::now() < __t ? cv_status::no_timeout :
> cv_status::timeout;
> }
>
> @@ -458,25 +397,15 @@ condition_variable::wait_for(unique_lock
> using namespace chrono;
> if (__d <= __d.zero())
> return cv_status::timeout;
> - using __ns_rep = nanoseconds::rep;
> + typedef time_point<system_clock, duration<long double, nano> >
> __sys_tpf;
> + typedef time_point<system_clock, nanoseconds> __sys_tpi;
> + __sys_tpf _Max = __sys_tpi::max();
> steady_clock::time_point __c_now = steady_clock::now();
> -
> -#if defined(_LIBCPP_HAS_COND_CLOCKWAIT)
> - using __clock_tp_ns = time_point<steady_clock, nanoseconds>;
> - __ns_rep __now_count_ns =
> __safe_nanosecond_cast(__c_now.time_since_epoch()).count();
> -#else
> - using __clock_tp_ns = time_point<system_clock, nanoseconds>;
> - __ns_rep __now_count_ns =
> __safe_nanosecond_cast(system_clock::now().time_since_epoch()).count();
> -#endif
> -
> - __ns_rep __d_ns_count = __safe_nanosecond_cast(__d).count();
> -
> - if (__now_count_ns > numeric_limits<__ns_rep>::max() - __d_ns_count) {
> - __do_timed_wait(__lk, __clock_tp_ns::max());
> - } else {
> - __do_timed_wait(__lk, __clock_tp_ns(nanoseconds(__now_count_ns +
> __d_ns_count)));
> - }
> -
> + system_clock::time_point __s_now = system_clock::now();
> + if (_Max - __d > __s_now)
> + __do_timed_wait(__lk, __s_now + __ceil<nanoseconds>(__d));
> + else
> + __do_timed_wait(__lk, __sys_tpi::max());
> return steady_clock::now() - __c_now < __d ? cv_status::no_timeout :
> cv_status::timeout;
> }
> @@ -492,46 +421,6 @@ condition_variable::wait_for(unique_lock
> _VSTD::move(__pred));
> }
>
> -#if defined(_LIBCPP_HAS_COND_CLOCKWAIT)
> -inline
> -void
> -condition_variable::__do_timed_wait(unique_lock<mutex>& __lk,
> - chrono::time_point<chrono::steady_clock, chrono::nanoseconds> __tp)
> _NOEXCEPT
> -{
> - using namespace chrono;
> - if (!__lk.owns_lock())
> - __throw_system_error(EPERM,
> - "condition_variable::timed wait: mutex not
> locked");
> - nanoseconds __d = __tp.time_since_epoch();
> - timespec __ts;
> - seconds __s = duration_cast<seconds>(__d);
> - using __ts_sec = decltype(__ts.tv_sec);
> - const __ts_sec __ts_sec_max = numeric_limits<__ts_sec>::max();
> - if (__s.count() < __ts_sec_max)
> - {
> - __ts.tv_sec = static_cast<__ts_sec>(__s.count());
> - __ts.tv_nsec = (__d - __s).count();
> - }
> - else
> - {
> - __ts.tv_sec = __ts_sec_max;
> - __ts.tv_nsec = giga::num - 1;
> - }
> - int __ec = pthread_cond_clockwait(&__cv_,
> __lk.mutex()->native_handle(), CLOCK_MONOTONIC, &__ts);
> - if (__ec != 0 && __ec != ETIMEDOUT)
> - __throw_system_error(__ec, "condition_variable timed_wait
> failed");
> -}
> -#endif // _LIBCPP_HAS_COND_CLOCKWAIT
> -
> -template <class _Clock>
> -inline
> -void
> -condition_variable::__do_timed_wait(unique_lock<mutex>& __lk,
> - chrono::time_point<_Clock, chrono::nanoseconds> __tp) _NOEXCEPT
> -{
> - wait_for(__lk, __tp - _Clock::now());
> -}
> -
> #endif // !_LIBCPP_HAS_NO_THREADS
>
> _LIBCPP_END_NAMESPACE_STD
>
> Modified:
> libcxx/trunk/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp?rev=372034&r1=372033&r2=372034&view=diff
>
> ==============================================================================
> ---
> libcxx/trunk/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
> (original)
> +++
> libcxx/trunk/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
> Mon Sep 16 14:20:32 2019
> @@ -25,12 +25,12 @@
>
> #include "test_macros.h"
>
> -struct TestClock
> +struct Clock
> {
> typedef std::chrono::milliseconds duration;
> typedef duration::rep rep;
> typedef duration::period period;
> - typedef std::chrono::time_point<TestClock> time_point;
> + typedef std::chrono::time_point<Clock> time_point;
> static const bool is_steady = true;
>
> static time_point now()
> @@ -50,40 +50,35 @@ int test2 = 0;
>
> int runs = 0;
>
> -template <typename Clock>
> void f()
> {
> std::unique_lock<std::mutex> lk(mut);
> assert(test2 == 0);
> test1 = 1;
> cv.notify_one();
> - typename Clock::time_point t0 = Clock::now();
> - typename Clock::time_point t = t0 + std::chrono::milliseconds(250);
> + Clock::time_point t0 = Clock::now();
> + Clock::time_point t = t0 + Clock::duration(250);
> while (test2 == 0 && cv.wait_until(lk, t) ==
> std::cv_status::no_timeout)
> ;
> - typename Clock::time_point t1 = Clock::now();
> + Clock::time_point t1 = Clock::now();
> if (runs == 0)
> {
> - assert(t1 - t0 < std::chrono::milliseconds(250));
> + assert(t1 - t0 < Clock::duration(250));
> assert(test2 != 0);
> }
> else
> {
> - assert(t1 - t0 - std::chrono::milliseconds(250) <
> std::chrono::milliseconds(50));
> + assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
> assert(test2 == 0);
> }
> ++runs;
> }
>
> -template <typename Clock>
> -void run_test()
> +int main(int, char**)
> {
> - runs = 0;
> - test1 = 0;
> - test2 = 0;
> {
> std::unique_lock<std::mutex>lk(mut);
> - std::thread t(f<Clock>);
> + std::thread t(f);
> assert(test1 == 0);
> while (test1 == 0)
> cv.wait(lk);
> @@ -97,7 +92,7 @@ void run_test()
> test2 = 0;
> {
> std::unique_lock<std::mutex>lk(mut);
> - std::thread t(f<Clock>);
> + std::thread t(f);
> assert(test1 == 0);
> while (test1 == 0)
> cv.wait(lk);
> @@ -105,12 +100,6 @@ void run_test()
> lk.unlock();
> t.join();
> }
> -}
>
> -int main(int, char**)
> -{
> - run_test<TestClock>();
> - run_test<std::chrono::steady_clock>();
> - run_test<std::chrono::system_clock>();
> - return 0;
> + return 0;
> }
>
>
> _______________________________________________
> libcxx-commits mailing list
> libcxx-commits at lists.llvm.org
> https://lists.llvm.org/cgi-bin/mailman/listinfo/libcxx-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/libcxx-commits/attachments/20190916/22534b3d/attachment-0001.html>
More information about the libcxx-commits
mailing list