[libcxx-commits] [libcxx] [llvm] [libc++] Move some macOS CI jobs to Github actions (PR #89083)
Louis Dionne via libcxx-commits
libcxx-commits at lists.llvm.org
Wed May 8 13:33:31 PDT 2024
https://github.com/ldionne updated https://github.com/llvm/llvm-project/pull/89083
>From 24334330628816a359e2256f56c80a1ed33271c3 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Wed, 17 Apr 2024 10:29:14 -0400
Subject: [PATCH 1/7] [libc++] Move some macOS CI jobs to Github actions
This is an attempt to decouple macOS CI testing from BuildKite, which
would make the maintenance of macOS CI easier and more accessible to
all contributors. Right now, the macOS CI is running entirely on machines
owned by the LLVM Foundation with only a small set of contributors having
direct access to them.
The story for performing back-deployment testing still needs to be
figured out, so for now we are retaining some jobs under BuildKite.
---
.github/workflows/libcxx-build-and-test.yaml | 35 +++++++++
libcxx/utils/ci/buildkite-pipeline.yml | 75 +++++---------------
2 files changed, 53 insertions(+), 57 deletions(-)
diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml
index 44a3d79c72c0a..bbf09a1786cba 100644
--- a/.github/workflows/libcxx-build-and-test.yaml
+++ b/.github/workflows/libcxx-build-and-test.yaml
@@ -82,6 +82,7 @@ jobs:
**/CMakeError.log
**/CMakeOutput.log
**/crash_diagnostics/*
+
stage2:
if: github.repository_owner == 'llvm'
runs-on: libcxx-runners-8-set
@@ -126,6 +127,7 @@ jobs:
**/CMakeError.log
**/CMakeOutput.log
**/crash_diagnostics/*
+
stage3:
if: github.repository_owner == 'llvm'
needs: [ stage1, stage2 ]
@@ -190,6 +192,39 @@ jobs:
**/CMakeError.log
**/CMakeOutput.log
**/crash_diagnostics/*
+
+ macos:
+ runs-on: macos-14
+ needs: [ stage1 ]
+ strategy:
+ fail-fast: false
+ matrix:
+ config: [
+ generic-cxx03,
+ generic-cxx23,
+ generic-modules,
+ apple-system
+ ]
+ steps:
+ - uses: actions/checkout at v4
+ - uses: maxim-lobanov/setup-xcode at v1
+ with:
+ xcode-version: 'latest-stable'
+ - uses: seanmiddleditch/gha-setup-ninja at master
+ - name: Build and test
+ run: |
+ bash libcxx/utils/ci/run-buildbot ${{ matrix.config }}
+ - uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
+ if: always() # Upload artifacts even if the build or test suite fails
+ with:
+ name: macos-${{ matrix.config }}-results
+ path: |
+ **/test-results.xml
+ **/*.abilist
+ **/CMakeError.log
+ **/CMakeOutput.log
+ **/crash_diagnostics/*
+
windows:
runs-on: windows-2022
needs: [ stage1 ]
diff --git a/libcxx/utils/ci/buildkite-pipeline.yml b/libcxx/utils/ci/buildkite-pipeline.yml
index 4bacdec8f8d6b..0e9a02ad081b1 100644
--- a/libcxx/utils/ci/buildkite-pipeline.yml
+++ b/libcxx/utils/ci/buildkite-pipeline.yml
@@ -56,47 +56,8 @@ environment_definitions:
steps:
-- group: ':mac: Apple'
+- group: ':mac: Apple Backdeployment'
steps:
- - label: MacOS x86_64
- command: libcxx/utils/ci/run-buildbot generic-cxx23
- agents:
- queue: libcxx-builders
- os: macos
- arch: x86_64
- <<: *common
-
- - label: MacOS arm64
- command: libcxx/utils/ci/run-buildbot generic-cxx23
- agents:
- queue: libcxx-builders
- os: macos
- arch: arm64
- <<: *common
-
- - label: MacOS with Modules
- command: libcxx/utils/ci/run-buildbot generic-modules
- agents:
- queue: libcxx-builders
- os: macos
- <<: *common
-
- - label: MacOS with C++03
- command: libcxx/utils/ci/run-buildbot generic-cxx03
- agents:
- queue: libcxx-builders
- os: macos
- <<: *common
-
- # Build with the configuration we use to generate libc++.dylib on Apple platforms
- - label: Apple system
- command: libcxx/utils/ci/run-buildbot apple-system
- agents:
- queue: libcxx-builders
- os: macos
- arch: arm64 # This can technically run on any architecture, but we have more resources on arm64 so we pin this job to arm64
- <<: *common
-
- label: Apple back-deployment macosx10.13
command: libcxx/utils/ci/run-buildbot apple-system-backdeployment-10.13
agents:
@@ -121,6 +82,23 @@ steps:
arch: x86_64 # TODO: Remove this once we are able to run back-deployment on arm64 again, since this isn't x86_64 specific
<<: *common
+ # TODO: Re-enable this once we've figured out how to run back-deployment testing on arm64 on recent OSes
+ # - label: "Apple back-deployment macosx11.0 arm64"
+ # command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-11.0"
+ # artifact_paths:
+ # - "**/test-results.xml"
+ # - "**/*.abilist"
+ # agents:
+ # queue: "libcxx-builders"
+ # os: "macos"
+ # arch: "arm64"
+ # retry:
+ # automatic:
+ # - exit_status: -1 # Agent was lost
+ # limit: 2
+ # timeout_in_minutes: 120
+
+
- group: ARM
steps:
- label: AArch64
@@ -230,20 +208,3 @@ steps:
queue: libcxx-builders
os: android
<<: *common
-
-
- # TODO: Re-enable this once we've figured out how to run back-deployment testing on arm64 on recent OSes
- # - label: "Apple back-deployment macosx11.0 arm64"
- # command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-11.0"
- # artifact_paths:
- # - "**/test-results.xml"
- # - "**/*.abilist"
- # agents:
- # queue: "libcxx-builders"
- # os: "macos"
- # arch: "arm64"
- # retry:
- # automatic:
- # - exit_status: -1 # Agent was lost
- # limit: 2
- # timeout_in_minutes: 120
>From ca9e500b6d8335af95af3a66ddd19b931aa143d5 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Tue, 7 May 2024 10:10:37 -0400
Subject: [PATCH 2/7] Reduce configuration to avoid spamming the bots while we
are testing
---
.github/workflows/libcxx-build-and-test.yaml | 306 +++++++++----------
1 file changed, 150 insertions(+), 156 deletions(-)
diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml
index bbf09a1786cba..e9453fa521acd 100644
--- a/.github/workflows/libcxx-build-and-test.yaml
+++ b/.github/workflows/libcxx-build-and-test.yaml
@@ -55,16 +55,10 @@ jobs:
fail-fast: false
matrix:
config: [
- 'generic-cxx03',
'generic-cxx26',
- 'generic-modules'
]
cc: [ 'clang-19' ]
cxx: [ 'clang++-19' ]
- include:
- - config: 'generic-gcc'
- cc: 'gcc-13'
- cxx: 'g++-13'
steps:
- uses: actions/checkout at v4
- name: ${{ matrix.config }}.${{ matrix.cxx }}
@@ -83,115 +77,115 @@ jobs:
**/CMakeOutput.log
**/crash_diagnostics/*
- stage2:
- if: github.repository_owner == 'llvm'
- runs-on: libcxx-runners-8-set
- needs: [ stage1 ]
- continue-on-error: false
- strategy:
- fail-fast: false
- matrix:
- config: [
- 'generic-cxx11',
- 'generic-cxx14',
- 'generic-cxx17',
- 'generic-cxx20',
- 'generic-cxx23'
- ]
- cc: [ 'clang-19' ]
- cxx: [ 'clang++-19' ]
- include:
- - config: 'generic-gcc-cxx11'
- cc: 'gcc-13'
- cxx: 'g++-13'
- - config: 'generic-cxx23'
- cc: 'clang-17'
- cxx: 'clang++-17'
- - config: 'generic-cxx26'
- cc: 'clang-18'
- cxx: 'clang++-18'
- steps:
- - uses: actions/checkout at v4
- - name: ${{ matrix.config }}
- run: libcxx/utils/ci/run-buildbot ${{ matrix.config }}
- env:
- CC: ${{ matrix.cc }}
- CXX: ${{ matrix.cxx }}
- - uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
- if: always() # Upload artifacts even if the build or test suite fails
- with:
- name: ${{ matrix.config }}-${{ matrix.cxx }}-results
- path: |
- **/test-results.xml
- **/*.abilist
- **/CMakeError.log
- **/CMakeOutput.log
- **/crash_diagnostics/*
+ # stage2:
+ # if: github.repository_owner == 'llvm'
+ # runs-on: libcxx-runners-8-set
+ # needs: [ stage1 ]
+ # continue-on-error: false
+ # strategy:
+ # fail-fast: false
+ # matrix:
+ # config: [
+ # 'generic-cxx11',
+ # 'generic-cxx14',
+ # 'generic-cxx17',
+ # 'generic-cxx20',
+ # 'generic-cxx23'
+ # ]
+ # cc: [ 'clang-19' ]
+ # cxx: [ 'clang++-19' ]
+ # include:
+ # - config: 'generic-gcc-cxx11'
+ # cc: 'gcc-13'
+ # cxx: 'g++-13'
+ # - config: 'generic-cxx23'
+ # cc: 'clang-17'
+ # cxx: 'clang++-17'
+ # - config: 'generic-cxx26'
+ # cc: 'clang-18'
+ # cxx: 'clang++-18'
+ # steps:
+ # - uses: actions/checkout at v4
+ # - name: ${{ matrix.config }}
+ # run: libcxx/utils/ci/run-buildbot ${{ matrix.config }}
+ # env:
+ # CC: ${{ matrix.cc }}
+ # CXX: ${{ matrix.cxx }}
+ # - uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
+ # if: always() # Upload artifacts even if the build or test suite fails
+ # with:
+ # name: ${{ matrix.config }}-${{ matrix.cxx }}-results
+ # path: |
+ # **/test-results.xml
+ # **/*.abilist
+ # **/CMakeError.log
+ # **/CMakeOutput.log
+ # **/crash_diagnostics/*
- stage3:
- if: github.repository_owner == 'llvm'
- needs: [ stage1, stage2 ]
- continue-on-error: false
- strategy:
- fail-fast: false
- max-parallel: 8
- matrix:
- config: [
- 'generic-abi-unstable',
- 'generic-hardening-mode-debug',
- 'generic-hardening-mode-extensive',
- 'generic-hardening-mode-fast',
- 'generic-hardening-mode-fast-with-abi-breaks',
- 'generic-merged',
- 'generic-modules-lsv',
- 'generic-no-exceptions',
- 'generic-no-experimental',
- 'generic-no-filesystem',
- 'generic-no-localization',
- 'generic-no-random_device',
- 'generic-no-threads',
- 'generic-no-tzdb',
- 'generic-no-unicode',
- 'generic-no-wide-characters',
- 'generic-no-rtti',
- 'generic-optimized-speed',
- 'generic-static',
- # TODO Find a better place for the benchmark and bootstrapping builds to live. They're either very expensive
- # or don't provide much value since the benchmark run results are too noise on the bots.
- 'benchmarks',
- 'bootstrapping-build'
- ]
- machine: [ 'libcxx-runners-8-set' ]
- include:
- - config: 'generic-cxx26'
- machine: libcxx-runners-8-set
- - config: 'generic-asan'
- machine: libcxx-runners-8-set
- - config: 'generic-tsan'
- machine: libcxx-runners-8-set
- - config: 'generic-ubsan'
- machine: libcxx-runners-8-set
- # Use a larger machine for MSAN to avoid timeout and memory allocation issues.
- - config: 'generic-msan'
- machine: libcxx-runners-8-set
- runs-on: ${{ matrix.machine }}
- steps:
- - uses: actions/checkout at v4
- - name: ${{ matrix.config }}
- run: libcxx/utils/ci/run-buildbot ${{ matrix.config }}
- env:
- CC: clang-19
- CXX: clang++-19
- - uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
- if: always()
- with:
- name: ${{ matrix.config }}-results
- path: |
- **/test-results.xml
- **/*.abilist
- **/CMakeError.log
- **/CMakeOutput.log
- **/crash_diagnostics/*
+ # stage3:
+ # if: github.repository_owner == 'llvm'
+ # needs: [ stage1, stage2 ]
+ # continue-on-error: false
+ # strategy:
+ # fail-fast: false
+ # max-parallel: 8
+ # matrix:
+ # config: [
+ # 'generic-abi-unstable',
+ # 'generic-hardening-mode-debug',
+ # 'generic-hardening-mode-extensive',
+ # 'generic-hardening-mode-fast',
+ # 'generic-hardening-mode-fast-with-abi-breaks',
+ # 'generic-merged',
+ # 'generic-modules-lsv',
+ # 'generic-no-exceptions',
+ # 'generic-no-experimental',
+ # 'generic-no-filesystem',
+ # 'generic-no-localization',
+ # 'generic-no-random_device',
+ # 'generic-no-threads',
+ # 'generic-no-tzdb',
+ # 'generic-no-unicode',
+ # 'generic-no-wide-characters',
+ # 'generic-no-rtti',
+ # 'generic-optimized-speed',
+ # 'generic-static',
+ # # TODO Find a better place for the benchmark and bootstrapping builds to live. They're either very expensive
+ # # or don't provide much value since the benchmark run results are too noise on the bots.
+ # 'benchmarks',
+ # 'bootstrapping-build'
+ # ]
+ # machine: [ 'libcxx-runners-8-set' ]
+ # include:
+ # - config: 'generic-cxx26'
+ # machine: libcxx-runners-8-set
+ # - config: 'generic-asan'
+ # machine: libcxx-runners-8-set
+ # - config: 'generic-tsan'
+ # machine: libcxx-runners-8-set
+ # - config: 'generic-ubsan'
+ # machine: libcxx-runners-8-set
+ # # Use a larger machine for MSAN to avoid timeout and memory allocation issues.
+ # - config: 'generic-msan'
+ # machine: libcxx-runners-8-set
+ # runs-on: ${{ matrix.machine }}
+ # steps:
+ # - uses: actions/checkout at v4
+ # - name: ${{ matrix.config }}
+ # run: libcxx/utils/ci/run-buildbot ${{ matrix.config }}
+ # env:
+ # CC: clang-19
+ # CXX: clang++-19
+ # - uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
+ # if: always()
+ # with:
+ # name: ${{ matrix.config }}-results
+ # path: |
+ # **/test-results.xml
+ # **/*.abilist
+ # **/CMakeError.log
+ # **/CMakeOutput.log
+ # **/crash_diagnostics/*
macos:
runs-on: macos-14
@@ -225,45 +219,45 @@ jobs:
**/CMakeOutput.log
**/crash_diagnostics/*
- windows:
- runs-on: windows-2022
- needs: [ stage1 ]
- strategy:
- fail-fast: false
- matrix:
- include:
- - { config: clang-cl-dll, mingw: false }
- - { config: clang-cl-static, mingw: false }
- - { config: clang-cl-no-vcruntime, mingw: false }
- - { config: clang-cl-debug, mingw: false }
- - { config: clang-cl-static-crt, mingw: false }
- - { config: mingw-dll, mingw: true }
- - { config: mingw-static, mingw: true }
- - { config: mingw-dll-i686, mingw: true }
- steps:
- - uses: actions/checkout at v4
- - name: Install dependencies
- run: |
- choco install -y ninja wget
- pip install psutil
- - name: Install a current LLVM
- if: ${{ matrix.mingw != true }}
- run: |
- choco install -y llvm --version=17.0.6
- - name: Install llvm-mingw
- if: ${{ matrix.mingw == true }}
- run: |
- curl -LO https://github.com/mstorsjo/llvm-mingw/releases/download/20231128/llvm-mingw-20231128-ucrt-x86_64.zip
- powershell Expand-Archive llvm-mingw*.zip -DestinationPath .
- del llvm-mingw*.zip
- mv llvm-mingw* c:\llvm-mingw
- echo "c:\llvm-mingw\bin" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append
- - name: Add Git Bash to the path
- run: |
- echo "c:\Program Files\Git\usr\bin" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append
- - name: Set up the MSVC dev environment
- if: ${{ matrix.mingw != true }}
- uses: ilammy/msvc-dev-cmd at v1
- - name: Build and test
- run: |
- bash libcxx/utils/ci/run-buildbot ${{ matrix.config }}
+ # windows:
+ # runs-on: windows-2022
+ # needs: [ stage1 ]
+ # strategy:
+ # fail-fast: false
+ # matrix:
+ # include:
+ # - { config: clang-cl-dll, mingw: false }
+ # - { config: clang-cl-static, mingw: false }
+ # - { config: clang-cl-no-vcruntime, mingw: false }
+ # - { config: clang-cl-debug, mingw: false }
+ # - { config: clang-cl-static-crt, mingw: false }
+ # - { config: mingw-dll, mingw: true }
+ # - { config: mingw-static, mingw: true }
+ # - { config: mingw-dll-i686, mingw: true }
+ # steps:
+ # - uses: actions/checkout at v4
+ # - name: Install dependencies
+ # run: |
+ # choco install -y ninja wget
+ # pip install psutil
+ # - name: Install a current LLVM
+ # if: ${{ matrix.mingw != true }}
+ # run: |
+ # choco install -y llvm --version=17.0.6
+ # - name: Install llvm-mingw
+ # if: ${{ matrix.mingw == true }}
+ # run: |
+ # curl -LO https://github.com/mstorsjo/llvm-mingw/releases/download/20231128/llvm-mingw-20231128-ucrt-x86_64.zip
+ # powershell Expand-Archive llvm-mingw*.zip -DestinationPath .
+ # del llvm-mingw*.zip
+ # mv llvm-mingw* c:\llvm-mingw
+ # echo "c:\llvm-mingw\bin" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append
+ # - name: Add Git Bash to the path
+ # run: |
+ # echo "c:\Program Files\Git\usr\bin" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append
+ # - name: Set up the MSVC dev environment
+ # if: ${{ matrix.mingw != true }}
+ # uses: ilammy/msvc-dev-cmd at v1
+ # - name: Build and test
+ # run: |
+ # bash libcxx/utils/ci/run-buildbot ${{ matrix.config }}
>From cc8fd43f289b5345a8934bf512f90bcef2a25824 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Tue, 7 May 2024 14:06:42 -0400
Subject: [PATCH 3/7] Install psutil
---
.github/workflows/libcxx-build-and-test.yaml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml
index e9453fa521acd..b0a258476732c 100644
--- a/.github/workflows/libcxx-build-and-test.yaml
+++ b/.github/workflows/libcxx-build-and-test.yaml
@@ -207,6 +207,7 @@ jobs:
- uses: seanmiddleditch/gha-setup-ninja at master
- name: Build and test
run: |
+ python3 -m pip install psutil
bash libcxx/utils/ci/run-buildbot ${{ matrix.config }}
- uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
if: always() # Upload artifacts even if the build or test suite fails
>From beac5df0b92f7b28bc1859c09029b11192757c8a Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Tue, 7 May 2024 15:22:03 -0400
Subject: [PATCH 4/7] Reduce strain on BuildKite
---
libcxx/utils/ci/buildkite-pipeline.yml | 284 ++++++++++++-------------
1 file changed, 142 insertions(+), 142 deletions(-)
diff --git a/libcxx/utils/ci/buildkite-pipeline.yml b/libcxx/utils/ci/buildkite-pipeline.yml
index 0e9a02ad081b1..3393e40310866 100644
--- a/libcxx/utils/ci/buildkite-pipeline.yml
+++ b/libcxx/utils/ci/buildkite-pipeline.yml
@@ -66,145 +66,145 @@ steps:
arch: x86_64 # We need to use x86_64 for back-deployment CI on this target since macOS didn't support arm64 back then
<<: *common
- - label: Apple back-deployment macosx10.15
- command: libcxx/utils/ci/run-buildbot apple-system-backdeployment-10.15
- agents:
- queue: libcxx-builders
- os: macos
- arch: x86_64 # We need to use x86_64 for back-deployment CI on this target since macOS didn't support arm64 back then
- <<: *common
-
- - label: Apple back-deployment with hardening enabled
- command: libcxx/utils/ci/run-buildbot apple-system-backdeployment-hardened-11.0
- agents:
- queue: libcxx-builders
- os: macos
- arch: x86_64 # TODO: Remove this once we are able to run back-deployment on arm64 again, since this isn't x86_64 specific
- <<: *common
-
- # TODO: Re-enable this once we've figured out how to run back-deployment testing on arm64 on recent OSes
- # - label: "Apple back-deployment macosx11.0 arm64"
- # command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-11.0"
- # artifact_paths:
- # - "**/test-results.xml"
- # - "**/*.abilist"
- # agents:
- # queue: "libcxx-builders"
- # os: "macos"
- # arch: "arm64"
- # retry:
- # automatic:
- # - exit_status: -1 # Agent was lost
- # limit: 2
- # timeout_in_minutes: 120
-
-
-- group: ARM
- steps:
- - label: AArch64
- command: libcxx/utils/ci/run-buildbot aarch64
- agents:
- queue: libcxx-builders-linaro-arm
- arch: aarch64
- <<: *common
-
- - label: AArch64 -fno-exceptions
- command: libcxx/utils/ci/run-buildbot aarch64-no-exceptions
- agents:
- queue: libcxx-builders-linaro-arm
- arch: aarch64
- <<: *common
-
- - label: Armv8
- command: libcxx/utils/ci/run-buildbot armv8
- agents:
- queue: libcxx-builders-linaro-arm
- arch: armv8l
- <<: *common
-
- - label: Armv8 -fno-exceptions
- command: libcxx/utils/ci/run-buildbot armv8-no-exceptions
- agents:
- queue: libcxx-builders-linaro-arm
- arch: armv8l
- <<: *common
-
- - label: Armv7
- command: libcxx/utils/ci/run-buildbot armv7
- agents:
- queue: libcxx-builders-linaro-arm
- arch: armv8l
- <<: *common
-
- - label: Armv7 -fno-exceptions
- command: libcxx/utils/ci/run-buildbot armv7-no-exceptions
- agents:
- queue: libcxx-builders-linaro-arm
- arch: armv8l
- <<: *common
-
- - label: Armv7-M picolibc
- command: libcxx/utils/ci/run-buildbot armv7m-picolibc
- agents:
- queue: libcxx-builders-linaro-arm
- arch: aarch64
- <<: *common
-
- - label: Armv7-M picolibc -fno-exceptions
- command: libcxx/utils/ci/run-buildbot armv7m-picolibc-no-exceptions
- agents:
- queue: libcxx-builders-linaro-arm
- arch: aarch64
- <<: *common
-
-- group: AIX
- steps:
- - label: AIX (32-bit)
- command: libcxx/utils/ci/run-buildbot aix
- env:
- CC: clang
- CXX: clang++
- OBJECT_MODE: '32'
- agents:
- queue: libcxx-builders
- os: aix
- <<: *common
-
- - label: AIX (64-bit)
- command: libcxx/utils/ci/run-buildbot aix
- env:
- CC: clang
- CXX: clang++
- OBJECT_MODE: '64'
- agents:
- queue: libcxx-builders
- os: aix
- <<: *common
-
-- group: ':freebsd: FreeBSD'
- steps:
- - label: FreeBSD 13 amd64
- command: libcxx/utils/ci/run-buildbot generic-cxx26
- env:
- CC: clang17
- CXX: clang++17
- agents:
- queue: libcxx-builders
- os: freebsd
- <<: *common
-
-- group: ':android: Android'
- steps:
- - label: Android 5.0, x86 NDK
- command: libcxx/utils/ci/run-buildbot android-ndk-21-def-x86
- agents:
- queue: libcxx-builders
- os: android
- <<: *common
-
- - label: Android 13, x86_64 NDK
- command: libcxx/utils/ci/run-buildbot android-ndk-33-goog-x86_64
- agents:
- queue: libcxx-builders
- os: android
- <<: *common
+# - label: Apple back-deployment macosx10.15
+# command: libcxx/utils/ci/run-buildbot apple-system-backdeployment-10.15
+# agents:
+# queue: libcxx-builders
+# os: macos
+# arch: x86_64 # We need to use x86_64 for back-deployment CI on this target since macOS didn't support arm64 back then
+# <<: *common
+
+# - label: Apple back-deployment with hardening enabled
+# command: libcxx/utils/ci/run-buildbot apple-system-backdeployment-hardened-11.0
+# agents:
+# queue: libcxx-builders
+# os: macos
+# arch: x86_64 # TODO: Remove this once we are able to run back-deployment on arm64 again, since this isn't x86_64 specific
+# <<: *common
+
+# # TODO: Re-enable this once we've figured out how to run back-deployment testing on arm64 on recent OSes
+# # - label: "Apple back-deployment macosx11.0 arm64"
+# # command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-11.0"
+# # artifact_paths:
+# # - "**/test-results.xml"
+# # - "**/*.abilist"
+# # agents:
+# # queue: "libcxx-builders"
+# # os: "macos"
+# # arch: "arm64"
+# # retry:
+# # automatic:
+# # - exit_status: -1 # Agent was lost
+# # limit: 2
+# # timeout_in_minutes: 120
+
+
+# - group: ARM
+# steps:
+# - label: AArch64
+# command: libcxx/utils/ci/run-buildbot aarch64
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: aarch64
+# <<: *common
+
+# - label: AArch64 -fno-exceptions
+# command: libcxx/utils/ci/run-buildbot aarch64-no-exceptions
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: aarch64
+# <<: *common
+
+# - label: Armv8
+# command: libcxx/utils/ci/run-buildbot armv8
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: armv8l
+# <<: *common
+
+# - label: Armv8 -fno-exceptions
+# command: libcxx/utils/ci/run-buildbot armv8-no-exceptions
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: armv8l
+# <<: *common
+
+# - label: Armv7
+# command: libcxx/utils/ci/run-buildbot armv7
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: armv8l
+# <<: *common
+
+# - label: Armv7 -fno-exceptions
+# command: libcxx/utils/ci/run-buildbot armv7-no-exceptions
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: armv8l
+# <<: *common
+
+# - label: Armv7-M picolibc
+# command: libcxx/utils/ci/run-buildbot armv7m-picolibc
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: aarch64
+# <<: *common
+
+# - label: Armv7-M picolibc -fno-exceptions
+# command: libcxx/utils/ci/run-buildbot armv7m-picolibc-no-exceptions
+# agents:
+# queue: libcxx-builders-linaro-arm
+# arch: aarch64
+# <<: *common
+
+# - group: AIX
+# steps:
+# - label: AIX (32-bit)
+# command: libcxx/utils/ci/run-buildbot aix
+# env:
+# CC: clang
+# CXX: clang++
+# OBJECT_MODE: '32'
+# agents:
+# queue: libcxx-builders
+# os: aix
+# <<: *common
+
+# - label: AIX (64-bit)
+# command: libcxx/utils/ci/run-buildbot aix
+# env:
+# CC: clang
+# CXX: clang++
+# OBJECT_MODE: '64'
+# agents:
+# queue: libcxx-builders
+# os: aix
+# <<: *common
+
+# - group: ':freebsd: FreeBSD'
+# steps:
+# - label: FreeBSD 13 amd64
+# command: libcxx/utils/ci/run-buildbot generic-cxx26
+# env:
+# CC: clang17
+# CXX: clang++17
+# agents:
+# queue: libcxx-builders
+# os: freebsd
+# <<: *common
+
+# - group: ':android: Android'
+# steps:
+# - label: Android 5.0, x86 NDK
+# command: libcxx/utils/ci/run-buildbot android-ndk-21-def-x86
+# agents:
+# queue: libcxx-builders
+# os: android
+# <<: *common
+
+# - label: Android 13, x86_64 NDK
+# command: libcxx/utils/ci/run-buildbot android-ndk-33-goog-x86_64
+# agents:
+# queue: libcxx-builders
+# os: android
+# <<: *common
>From 30e480e1f726b9e81dc97d6fdeb45dec285b38cb Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Tue, 7 May 2024 15:25:07 -0400
Subject: [PATCH 5/7] Try installing psutil in venv
---
.github/workflows/libcxx-build-and-test.yaml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml
index b0a258476732c..9450088cd09bf 100644
--- a/.github/workflows/libcxx-build-and-test.yaml
+++ b/.github/workflows/libcxx-build-and-test.yaml
@@ -207,6 +207,8 @@ jobs:
- uses: seanmiddleditch/gha-setup-ninja at master
- name: Build and test
run: |
+ python3 -m venv .venv
+ source .venv/bin/activate
python3 -m pip install psutil
bash libcxx/utils/ci/run-buildbot ${{ matrix.config }}
- uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0
>From 5331ac0fb818fd0585cb658ef13fa1472b2a0e90 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Wed, 8 May 2024 11:03:00 -0400
Subject: [PATCH 6/7] Try dropping DYLD_LIBRARY_PATH
---
libcxx/test/configs/apple-libc++-shared.cfg.in | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/libcxx/test/configs/apple-libc++-shared.cfg.in b/libcxx/test/configs/apple-libc++-shared.cfg.in
index 2d0aee3ae905e..1c730c8ff4c86 100644
--- a/libcxx/test/configs/apple-libc++-shared.cfg.in
+++ b/libcxx/test/configs/apple-libc++-shared.cfg.in
@@ -1,11 +1,11 @@
# Testing configuration for Apple's system libc++.
#
# This configuration differs from a normal LLVM shared library configuration in
-# that we must use DYLD_LIBRARY_PATH to run the tests against the just-built library,
-# since Apple's libc++ has an absolute install_name.
+# that we have a single include directory for the libc++ headers since Apple
+# system builds don't use a per-target include directory layout.
#
-# We also don't use a per-target include directory layout, so we have only one
-# include directory for the libc++ headers.
+# We also identify the variant of the standard library as 'apple-libc++', which
+# enables additional vendor-specific tests.
lit_config.load_config(config, '@CMAKE_CURRENT_BINARY_DIR@/cmake-bridge.cfg')
@@ -16,10 +16,10 @@ config.substitutions.append(('%{compile_flags}',
'-nostdinc++ -I %{include-dir} -I %{libcxx-dir}/test/support'
))
config.substitutions.append(('%{link_flags}',
- '-nostdlib++ -L %{lib-dir} -lc++'
+ '-nostdlib++ -L %{lib-dir} -Wl,-rpath,%{lib-dir} -lc++'
))
config.substitutions.append(('%{exec}',
- '%{executor} --execdir %T --env DYLD_LIBRARY_PATH=%{lib-dir} -- '
+ '%{executor} --execdir %T -- '
))
config.stdlib = 'apple-libc++'
>From 9d1f60340ca367355fe927da0a0b5692afacf9a5 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Wed, 8 May 2024 16:29:14 -0400
Subject: [PATCH 7/7] [libc++] Try to apply the condition variable test
improvements to see if they are less flaky now
---
.../wait_for.pass.cpp | 146 ++++++-----
.../wait_for_pred.pass.cpp | 201 +++++++++-----
.../wait_pred.pass.cpp | 129 ++++++---
.../wait_until.pass.cpp | 176 ++++++-------
.../wait_until_pred.pass.cpp | 215 +++++++++------
.../wait_for.pass.cpp | 161 +++++++-----
.../wait_for_pred.pass.cpp | 213 +++++++++------
.../wait_pred.pass.cpp | 134 +++++++---
.../wait_until.pass.cpp | 183 +++++++------
.../wait_until_pred.pass.cpp | 246 +++++++++++-------
10 files changed, 1104 insertions(+), 700 deletions(-)
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
index 42150207c3c4d..d70e027a702c4 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -19,77 +18,92 @@
// const chrono::duration<Rep, Period>& rel_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-bool expect_timeout = false;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point wait_end = t0 + milliseconds(250);
- Clock::duration d;
- do {
- d = wait_end - Clock::now();
- if (d <= milliseconds(0)) break;
- } while (test2 == 0 && cv.wait_for(lk, d) == std::cv_status::no_timeout);
- Clock::time_point t1 = Clock::now();
- if (!expect_timeout)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- expect_timeout = true;
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_for(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+ });
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ std::cv_status result;
+ do {
+ auto elapsed = measure([&] { result = cv.wait_for(lock, timeout); });
+ if (result == std::cv_status::timeout)
+ assert(elapsed >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
+
+ t1.join();
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
index 872bcb6d8a57d..dae31fb458862 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -20,82 +19,142 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
-};
-
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- bool r = cv.wait_for(lk, milliseconds(250), Pred(test2));
- ((void)r); // Prevent unused warning
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
- ++runs;
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ likely_spurious = false;
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ bool result = cv.wait_for(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ });
+ assert(elapsed >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_for() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> awoken = false;
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ assert(awoken || !awoken);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
index 15feba55616b0..b99d14a9cc355 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -17,51 +16,99 @@
// void wait(unique_lock<mutex>& lock, Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
#include <mutex>
#include <thread>
-#include <functional>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
-};
-
-void f()
-{
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- cv.wait(lk, Pred(test2));
- assert(test2 != 0);
-}
+int main(int, char**) {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we try to minimize the likelihood that we got awoken by a
+ // spurious wakeup by updating the likely_spurious flag only immediately
+ // before we perform the notification.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return !likely_spurious; });
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ likely_spurious = false;
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we basically never wake up the condition variable. This way, we
+ // are hoping to get out of the wait via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> awoken = false;
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return true; });
+ awoken = true;
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ assert(awoken || !awoken);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test finishes.
+ cv.notify_one();
+ });
-int main(int, char**)
-{
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
+ t2.join();
+ t1.join();
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
index 03205e68dca67..cab043941c25d 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -19,100 +18,101 @@
// const chrono::time_point<Clock, Duration>& abs_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct TestClock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<TestClock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-template <typename Clock>
-void f()
-{
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- typename Clock::time_point t0 = Clock::now();
- typename Clock::time_point t = t0 + std::chrono::milliseconds(250);
- while (test2 == 0 && cv.wait_until(lk, t) == std::cv_status::no_timeout)
- ;
- typename Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < std::chrono::milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - std::chrono::milliseconds(250) < std::chrono::milliseconds(50));
- assert(test2 == 0);
- }
- ++runs;
-}
+template <class Clock>
+void test() {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_until(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(Clock::now() < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ std::cv_status result;
+ do {
+ result = cv.wait_until(lock, timeout);
+ if (result == std::cv_status::timeout)
+ assert(Clock::now() >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
-template <typename Clock>
-void run_test()
-{
- runs = 0;
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f<Clock>);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f<Clock>);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+ t1.join();
+ }
}
-int main(int, char**)
-{
- run_test<TestClock>();
- run_test<std::chrono::steady_clock>();
- run_test<std::chrono::system_clock>();
- return 0;
+int main(int, char**) {
+ test<TestClock>();
+ test<std::chrono::steady_clock>();
+ test<std::chrono::system_clock>();
+ return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp
index fb8bd6e380693..c5441f98ccb15 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -20,99 +19,147 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct Clock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<Clock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
+template <class Clock>
+void test() {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
- bool operator()() {return i_ != 0;}
-};
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout);
+ });
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point t = t0 + Clock::duration(250);
- bool r = cv.wait_until(lk, t, Pred(test2));
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < Clock::duration(250));
- assert(test2 != 0);
- assert(r);
- }
- else
- {
- assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
- assert(test2 == 0);
- assert(!r);
- }
- ++runs;
-}
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ likely_spurious = false;
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
-int main(int, char**)
-{
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ bool result = cv.wait_until(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ assert(Clock::now() >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_until() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> awoken = false;
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ assert(awoken || !awoken);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+}
+int main(int, char**) {
+ test<TestClock>();
+ test<std::chrono::steady_clock>();
+ test<std::chrono::system_clock>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp
index 95acef90470ec..7110917d1a8b3 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -18,81 +17,105 @@
// wait_for(Lock& lock, const chrono::duration<Rep, Period>& rel_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-bool expect_timeout = false;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point wait_end = t0 + milliseconds(250);
- Clock::duration d;
- do {
- d = wait_end - Clock::now();
- if (d <= milliseconds(0)) break;
- } while (test2 == 0 && cv.wait_for(lk, d) == std::cv_status::no_timeout);
- Clock::time_point t1 = Clock::now();
- if (!expect_timeout)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
+};
+
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- expect_timeout = true;
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+template <class Lock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_for(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+ });
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ Lock lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ std::cv_status result;
+ do {
+ auto elapsed = measure([&] { result = cv.wait_for(lock, timeout); });
+ if (result == std::cv_status::timeout)
+ assert(elapsed >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
+
+ t1.join();
+ }
+}
+int main(int, char**) {
+ test<std::unique_lock<std::mutex>>();
+ test<std::unique_lock<std::timed_mutex>>();
+ test<MyLock<std::mutex>>();
+ test<MyLock<std::timed_mutex>>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp
index 0b560022bc67c..17fcdbdc7603c 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -19,89 +18,149 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
};
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-bool expect_result = false;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- bool result = cv.wait_for(lk, milliseconds(250), Pred(test2));
- assert(result == expect_result);
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
- ++runs;
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- expect_result = true;
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- expect_result = false;
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
-
- return 0;
+template <class Lock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ Lock lock(mutex);
+ lock.unlock();
+
+ likely_spurious = false;
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ bool result = cv.wait_for(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ });
+ assert(elapsed >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_for() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> awoken = false;
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ Lock lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ assert(awoken || !awoken);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
}
+
+int main(int, char**) { return 0; }
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp
index a5e28137bef8c..bb48ab9b34c49 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -17,55 +16,114 @@
// void wait(Lock& lock, Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
#include <mutex>
#include <thread>
-#include <functional>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable_any cv;
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
+};
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
+template <class Lock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
-L0 m0;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we try to minimize the likelihood that we got awoken by a
+ // spurious wakeup by updating the likely_spurious flag only immediately
+ // before we perform the notification.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ std::condition_variable_any cv;
+ Mutex mutex;
-int test1 = 0;
-int test2 = 0;
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return !likely_spurious; });
+ });
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
- bool operator()() {return i_ != 0;}
-};
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ Lock lock(mutex);
+ lock.unlock();
+
+ likely_spurious = false;
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we basically never wake up the condition variable. This way, we
+ // are hoping to get out of the wait via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> awoken = false;
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return true; });
+ awoken = true;
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ Lock lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ assert(awoken || !awoken);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test finishes.
+ cv.notify_one();
+ });
-void f()
-{
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- cv.wait(lk, Pred(test2));
- assert(test2 != 0);
+ t2.join();
+ t1.join();
+ }
}
-int main(int, char**)
-{
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
+int main(int, char**) {
+ test<std::unique_lock<std::mutex>>();
+ test<std::unique_lock<std::timed_mutex>>();
+ test<MyLock<std::mutex>>();
+ test<MyLock<std::timed_mutex>>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp
index 0f2334393d833..86471b11278db 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -18,93 +17,119 @@
// wait_until(Lock& lock, const chrono::time_point<Clock, Duration>& abs_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct Clock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<Clock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point t = t0 + Clock::duration(250);
- while (test2 == 0 && cv.wait_until(lk, t) == std::cv_status::no_timeout)
- ;
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < Clock::duration(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
- assert(test2 == 0);
- }
- ++runs;
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
+};
+
+template <class Lock, class Clock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_until(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(Clock::now() < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ Lock lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ std::cv_status result;
+ do {
+ result = cv.wait_until(lock, timeout);
+ if (result == std::cv_status::timeout)
+ assert(Clock::now() >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
+
+ t1.join();
+ }
}
-int main(int, char**)
-{
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ test<std::unique_lock<std::mutex>, TestClock>();
+ test<std::unique_lock<std::mutex>, std::chrono::steady_clock>();
+ test<std::unique_lock<std::mutex>, std::chrono::system_clock>();
+
+ test<std::unique_lock<std::timed_mutex>, TestClock>();
+ test<std::unique_lock<std::timed_mutex>, std::chrono::steady_clock>();
+ test<std::unique_lock<std::timed_mutex>, std::chrono::system_clock>();
+
+ test<MyLock<std::mutex>, TestClock>();
+ test<MyLock<std::mutex>, std::chrono::steady_clock>();
+ test<MyLock<std::mutex>, std::chrono::system_clock>();
+ test<MyLock<std::timed_mutex>, TestClock>();
+ test<MyLock<std::timed_mutex>, std::chrono::steady_clock>();
+ test<MyLock<std::timed_mutex>, std::chrono::system_clock>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp
index aa60ae4715df6..a27539326a3f0 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -20,103 +19,176 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct Clock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<Clock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
};
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point t = t0 + Clock::duration(250);
- bool r = cv.wait_until(lk, t, Pred(test2));
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < Clock::duration(250));
- assert(test2 != 0);
- assert(r);
- }
- else
- {
- assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
- assert(test2 == 0);
- assert(!r);
- }
- ++runs;
+template <class Lock, class Clock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> likely_spurious = true;
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ Lock lock(mutex);
+ lock.unlock();
+
+ likely_spurious = false;
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ bool result = cv.wait_until(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ assert(Clock::now() >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_until() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready = false;
+ std::atomic<bool> awoken = false;
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex). We don't actually need to hold the lock, we
+ // simply use it as a signal that the condition variable has started waiting.
+ Lock lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ assert(awoken || !awoken);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
}
-int main(int, char**)
-{
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ // Run on multiple threads to speed up the test, and because it ought to work anyways.
+ std::thread tests[] = {
+ support::make_test_thread([] {
+ test<std::unique_lock<std::mutex>, TestClock>();
+ test<std::unique_lock<std::mutex>, std::chrono::steady_clock>();
+ test<std::unique_lock<std::mutex>, std::chrono::system_clock>();
+ }),
+ support::make_test_thread([] {
+ test<std::unique_lock<std::timed_mutex>, TestClock>();
+ test<std::unique_lock<std::timed_mutex>, std::chrono::steady_clock>();
+ test<std::unique_lock<std::timed_mutex>, std::chrono::system_clock>();
+ }),
+ support::make_test_thread([] {
+ test<MyLock<std::mutex>, TestClock>();
+ test<MyLock<std::mutex>, std::chrono::steady_clock>();
+ test<MyLock<std::mutex>, std::chrono::system_clock>();
+ }),
+ support::make_test_thread([] {
+ test<MyLock<std::timed_mutex>, TestClock>();
+ test<MyLock<std::timed_mutex>, std::chrono::steady_clock>();
+ test<MyLock<std::timed_mutex>, std::chrono::system_clock>();
+ })};
+
+ for (std::thread& t : tests)
+ t.join();
return 0;
}
More information about the libcxx-commits
mailing list