[libcxx] r336635 - Update google-benchark to trunk
Chandler Carruth via cfe-commits
cfe-commits at lists.llvm.org
Tue Jul 10 02:50:17 PDT 2018
On Mon, Jul 9, 2018 at 9:06 PM Eric Fiselier via cfe-commits <
cfe-commits at lists.llvm.org> wrote:
> Author: ericwf
> Date: Mon Jul 9 21:02:00 2018
> New Revision: 336635
>
> URL: http://llvm.org/viewvc/llvm-project?rev=336635&view=rev
> Log:
> Update google-benchark to trunk
>
> Added:
> libcxx/trunk/utils/google-benchmark/cmake/split_list.cmake
> libcxx/trunk/utils/google-benchmark/docs/AssemblyTests.md
> libcxx/trunk/utils/google-benchmark/releasing.md
> libcxx/trunk/utils/google-benchmark/src/benchmark_main.cc
> - copied, changed from r336618,
> libcxx/trunk/utils/google-benchmark/src/counter.h
> libcxx/trunk/utils/google-benchmark/src/benchmark_register.h
> libcxx/trunk/utils/google-benchmark/src/thread_manager.h
> libcxx/trunk/utils/google-benchmark/src/thread_timer.h
> libcxx/trunk/utils/google-benchmark/test/BUILD
>
Did you intend to commit this file?
Just seems odd to have a BUILD file for the test directory but not the rest.
> libcxx/trunk/utils/google-benchmark/test/benchmark_gtest.cc
>
> libcxx/trunk/utils/google-benchmark/test/clobber_memory_assembly_test.cc
> libcxx/trunk/utils/google-benchmark/test/donotoptimize_assembly_test.cc
> libcxx/trunk/utils/google-benchmark/test/link_main_test.cc
> libcxx/trunk/utils/google-benchmark/test/state_assembly_test.cc
> libcxx/trunk/utils/google-benchmark/test/statistics_gtest.cc
> libcxx/trunk/utils/google-benchmark/test/string_util_gtest.cc
> libcxx/trunk/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
> libcxx/trunk/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
> libcxx/trunk/utils/google-benchmark/tools/strip_asm.py (with props)
> Removed:
> libcxx/trunk/utils/google-benchmark/test/statistics_test.cc
> Modified:
> libcxx/trunk/utils/google-benchmark/AUTHORS
> libcxx/trunk/utils/google-benchmark/CMakeLists.txt
> libcxx/trunk/utils/google-benchmark/CONTRIBUTORS
> libcxx/trunk/utils/google-benchmark/README.md
> libcxx/trunk/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake
> libcxx/trunk/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
> libcxx/trunk/utils/google-benchmark/cmake/GetGitVersion.cmake
> libcxx/trunk/utils/google-benchmark/cmake/HandleGTest.cmake
> libcxx/trunk/utils/google-benchmark/include/benchmark/benchmark.h
> libcxx/trunk/utils/google-benchmark/src/CMakeLists.txt
> libcxx/trunk/utils/google-benchmark/src/benchmark.cc
> libcxx/trunk/utils/google-benchmark/src/benchmark_api_internal.h
> libcxx/trunk/utils/google-benchmark/src/benchmark_register.cc
> libcxx/trunk/utils/google-benchmark/src/check.h
> libcxx/trunk/utils/google-benchmark/src/commandlineflags.cc
> libcxx/trunk/utils/google-benchmark/src/complexity.cc
> libcxx/trunk/utils/google-benchmark/src/counter.cc
> libcxx/trunk/utils/google-benchmark/src/counter.h
> libcxx/trunk/utils/google-benchmark/src/csv_reporter.cc
> libcxx/trunk/utils/google-benchmark/src/cycleclock.h
> libcxx/trunk/utils/google-benchmark/src/internal_macros.h
> libcxx/trunk/utils/google-benchmark/src/json_reporter.cc
> libcxx/trunk/utils/google-benchmark/src/log.h
> libcxx/trunk/utils/google-benchmark/src/re.h
> libcxx/trunk/utils/google-benchmark/src/reporter.cc
> libcxx/trunk/utils/google-benchmark/src/statistics.cc
> libcxx/trunk/utils/google-benchmark/src/string_util.cc
> libcxx/trunk/utils/google-benchmark/src/string_util.h
> libcxx/trunk/utils/google-benchmark/src/sysinfo.cc
> libcxx/trunk/utils/google-benchmark/src/timers.cc
> libcxx/trunk/utils/google-benchmark/test/CMakeLists.txt
> libcxx/trunk/utils/google-benchmark/test/basic_test.cc
> libcxx/trunk/utils/google-benchmark/test/benchmark_test.cc
> libcxx/trunk/utils/google-benchmark/test/complexity_test.cc
> libcxx/trunk/utils/google-benchmark/test/donotoptimize_test.cc
> libcxx/trunk/utils/google-benchmark/test/map_test.cc
> libcxx/trunk/utils/google-benchmark/test/multiple_ranges_test.cc
> libcxx/trunk/utils/google-benchmark/test/output_test.h
> libcxx/trunk/utils/google-benchmark/test/output_test_helper.cc
> libcxx/trunk/utils/google-benchmark/test/register_benchmark_test.cc
> libcxx/trunk/utils/google-benchmark/test/reporter_output_test.cc
> libcxx/trunk/utils/google-benchmark/test/skip_with_error_test.cc
> libcxx/trunk/utils/google-benchmark/test/templated_fixture_test.cc
> libcxx/trunk/utils/google-benchmark/test/user_counters_tabular_test.cc
> libcxx/trunk/utils/google-benchmark/test/user_counters_test.cc
> libcxx/trunk/utils/google-benchmark/tools/compare.py
> libcxx/trunk/utils/google-benchmark/tools/gbench/report.py
>
> Modified: libcxx/trunk/utils/google-benchmark/AUTHORS
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/AUTHORS?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/AUTHORS (original)
> +++ libcxx/trunk/utils/google-benchmark/AUTHORS Mon Jul 9 21:02:00 2018
> @@ -13,11 +13,13 @@ Arne Beer <arne at twobeer.de>
> Carto
> Christopher Seymour <chris.j.seymour at hotmail.com>
> David Coeurjolly <david.coeurjolly at liris.cnrs.fr>
> +Deniz Evrenci <denizevrenci at gmail.com>
> Dirac Research
> Dominik Czarnota <dominik.b.czarnota at gmail.com>
> Eric Fiselier <eric at efcs.ca>
> Eugene Zhuk <eugene.zhuk at gmail.com>
> Evgeny Safronov <division494 at gmail.com>
> +Federico Ficarelli <federico.ficarelli at gmail.com>
> Felix Homann <linuxaudio at showlabor.de>
> Google Inc.
> International Business Machines Corporation
> @@ -31,6 +33,7 @@ Kishan Kumar <kumar.kishan at outlook.com>
> Lei Xu <eddyxu at gmail.com>
> Matt Clarkson <mattyclarkson at gmail.com>
> Maxim Vafin <maxvafin at gmail.com>
> +MongoDB Inc.
> Nick Hutchinson <nshutchinson at gmail.com>
> Oleksandr Sochka <sasha.sochka at gmail.com>
> Paul Redmond <paul.redmond at gmail.com>
> @@ -38,6 +41,7 @@ Radoslav Yovchev <radoslav.tm at gmail.com>
> Roman Lebedev <lebedev.ri at gmail.com>
> Shuo Chen <chenshuo at chenshuo.com>
> Steinar H. Gunderson <sgunderson at bigfoot.com>
> +Stripe, Inc.
> Yixuan Qiu <yixuanq at gmail.com>
> Yusuke Suzuki <utatane.tea at gmail.com>
> Zbigniew Skowron <zbychs at gmail.com>
>
> Modified: libcxx/trunk/utils/google-benchmark/CMakeLists.txt
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/CMakeLists.txt?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/CMakeLists.txt (original)
> +++ libcxx/trunk/utils/google-benchmark/CMakeLists.txt Mon Jul 9 21:02:00
> 2018
> @@ -27,10 +27,48 @@ option(BENCHMARK_DOWNLOAD_DEPENDENCIES "
> # in cases where it is not possible to build or find a valid version of
> gtest.
> option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which
> depend on gtest" ON)
>
> +set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
> +function(should_enable_assembly_tests)
> + if(CMAKE_BUILD_TYPE)
> + string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
> + if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
> + # FIXME: The --coverage flag needs to be removed when building
> assembly
> + # tests for this to work.
> + return()
> + endif()
> + endif()
> + if (MSVC)
> + return()
> + elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
> + return()
> + elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
> + # FIXME: Make these work on 32 bit builds
> + return()
> + elseif(BENCHMARK_BUILD_32_BITS)
> + # FIXME: Make these work on 32 bit builds
> + return()
> + endif()
> + find_program(LLVM_FILECHECK_EXE FileCheck)
> + if (LLVM_FILECHECK_EXE)
> + set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm
> filecheck" FORCE)
> + message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
> + else()
> + message(STATUS "Failed to find LLVM FileCheck")
> + return()
> + endif()
> + set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
> +endfunction()
> +should_enable_assembly_tests()
> +
> +# This option disables the building and running of the assembly
> verification tests
> +option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the
> assembly tests"
> + ${ENABLE_ASSEMBLY_TESTS_DEFAULT})
> +
> # Make sure we can import out CMake functions
> list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
> list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
>
> +
> # Read the git tags to determine the project version
> include(GetGitVersion)
> get_git_version(GIT_VERSION)
> @@ -92,7 +130,6 @@ else()
>
> # Turn compiler warnings up to 11
> add_cxx_compiler_flag(-Wall)
> -
> add_cxx_compiler_flag(-Wextra)
> add_cxx_compiler_flag(-Wshadow)
> add_cxx_compiler_flag(-Werror RELEASE)
> @@ -101,8 +138,20 @@ else()
> add_cxx_compiler_flag(-pedantic)
> add_cxx_compiler_flag(-pedantic-errors)
> add_cxx_compiler_flag(-Wshorten-64-to-32)
> - add_cxx_compiler_flag(-Wfloat-equal)
> add_cxx_compiler_flag(-fstrict-aliasing)
> + # Disable warnings regarding deprecated parts of the library while
> building
> + # and testing those parts of the library.
> + add_cxx_compiler_flag(-Wno-deprecated-declarations)
> + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
> + # Intel silently ignores '-Wno-deprecated-declarations',
> + # warning no. 1786 must be explicitly disabled.
> + # See #631 for rationale.
> + add_cxx_compiler_flag(-wd1786)
> + endif()
> + # Disable deprecation warnings for release builds (when -Werror is
> enabled).
> + add_cxx_compiler_flag(-Wno-deprecated RELEASE)
> + add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO)
> + add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL)
> if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
> add_cxx_compiler_flag(-fno-exceptions)
> endif()
> @@ -178,7 +227,7 @@ if (BENCHMARK_USE_LIBCXX)
> # linker flags appear before all linker inputs and -lc++ must appear
> after.
> list(APPEND BENCHMARK_CXX_LIBRARIES c++)
> else()
> - message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for
> compiler")
> + message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported
> for compiler")
> endif()
> endif(BENCHMARK_USE_LIBCXX)
>
>
> Modified: libcxx/trunk/utils/google-benchmark/CONTRIBUTORS
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/CONTRIBUTORS?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/CONTRIBUTORS (original)
> +++ libcxx/trunk/utils/google-benchmark/CONTRIBUTORS Mon Jul 9 21:02:00
> 2018
> @@ -28,16 +28,19 @@ Billy Robert O'Neal III <billy.oneal at gma
> Chris Kennelly <ckennelly at google.com> <ckennelly at ckennelly.com>
> Christopher Seymour <chris.j.seymour at hotmail.com>
> David Coeurjolly <david.coeurjolly at liris.cnrs.fr>
> +Deniz Evrenci <denizevrenci at gmail.com>
> Dominic Hamon <dma at stripysock.com> <dominic at google.com>
> Dominik Czarnota <dominik.b.czarnota at gmail.com>
> Eric Fiselier <eric at efcs.ca>
> Eugene Zhuk <eugene.zhuk at gmail.com>
> Evgeny Safronov <division494 at gmail.com>
> +Federico Ficarelli <federico.ficarelli at gmail.com>
> Felix Homann <linuxaudio at showlabor.de>
> Ismael Jimenez Martinez <ismael.jimenez.martinez at gmail.com>
> Jern-Kuan Leong <jernkuan at gmail.com>
> JianXiong Zhou <zhoujianxiong2 at gmail.com>
> Joao Paulo Magalhaes <joaoppmagalhaes at gmail.com>
> +John Millikin <jmillikin at stripe.com>
> Jussi Knuuttila <jussi.knuuttila at gmail.com>
> Kai Wolf <kai.wolf at gmail.com>
> Kishan Kumar <kumar.kishan at outlook.com>
> @@ -53,6 +56,7 @@ Pierre Phaneuf <pphaneuf at google.com>
> Radoslav Yovchev <radoslav.tm at gmail.com>
> Raul Marin <rmrodriguez at cartodb.com>
> Ray Glover <ray.glover at uk.ibm.com>
> +Robert Guo <robert.guo at mongodb.com>
> Roman Lebedev <lebedev.ri at gmail.com>
> Shuo Chen <chenshuo at chenshuo.com>
> Tobias Ulvgård <tobias.ulvgard at dirac.se>
>
> Modified: libcxx/trunk/utils/google-benchmark/README.md
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/README.md?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/README.md (original)
> +++ libcxx/trunk/utils/google-benchmark/README.md Mon Jul 9 21:02:00 2018
> @@ -14,6 +14,8 @@ IRC channel: https://freenode.net #googl
>
> [Additional Tooling Documentation](docs/tools.md)
>
> +[Assembly Testing Documentation](docs/AssemblyTests.md)
> +
>
> ## Building
>
> @@ -21,7 +23,7 @@ The basic steps for configuring and buil
>
> ```bash
> $ git clone https://github.com/google/benchmark.git
> -# Benchmark requires GTest as a dependency. Add the source tree as a
> subdirectory.
> +# Benchmark requires Google Test as a dependency. Add the source tree as
> a subdirectory.
> $ git clone https://github.com/google/googletest.git benchmark/googletest
> $ mkdir build && cd build
> $ cmake -G <generator> [options] ../benchmark
> @@ -29,15 +31,13 @@ $ cmake -G <generator> [options] ../benc
> $ make
> ```
>
> -Note that Google Benchmark requires GTest to build and run the tests. This
> -dependency can be provided three ways:
> +Note that Google Benchmark requires Google Test to build and run the
> tests. This
> +dependency can be provided two ways:
>
> -* Checkout the GTest sources into `benchmark/googletest`.
> +* Checkout the Google Test sources into `benchmark/googletest` as above.
> * Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
> configuration, the library will automatically download and build any
> required
> dependencies.
> -* Otherwise, if nothing is done, CMake will use `find_package(GTest
> REQUIRED)`
> - to resolve the required GTest dependency.
>
> If you do not wish to build and run the tests, add
> `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
> to `CMAKE_ARGS`.
> @@ -59,6 +59,7 @@ Now, let's clone the repository and buil
> ```
> git clone https://github.com/google/benchmark.git
> cd benchmark
> +git clone https://github.com/google/googletest.git
> mkdir build
> cd build
> cmake .. -DCMAKE_BUILD_TYPE=RELEASE
> @@ -71,7 +72,7 @@ We need to install the library globally
> sudo make install
> ```
>
> -Now you have google/benchmark installed in your machine
> +Now you have google/benchmark installed in your machine
> Note: Don't forget to link to pthread library while building
>
> ## Stable and Experimental Library Versions
> @@ -86,6 +87,11 @@ to use, test, and provide feedback on th
> this branch. However, this branch provides no stability guarantees and
> reserves
> the right to change and break the API at any time.
>
> +## Prerequisite knowledge
> +
> +Before attempting to understand this framework one should ideally have
> some familiarity with the structure and format of the Google Test
> framework, upon which it is based. Documentation for Google Test, including
> a "Getting Started" (primer) guide, is available here:
> +
> https://github.com/google/googletest/blob/master/googletest/docs/primer.md
> +
>
> ## Example usage
> ### Basic usage
> @@ -112,7 +118,10 @@ BENCHMARK(BM_StringCopy);
> BENCHMARK_MAIN();
> ```
>
> -Don't forget to inform your linker to add benchmark library e.g. through
> `-lbenchmark` compilation flag.
> +Don't forget to inform your linker to add benchmark library e.g. through
> +`-lbenchmark` compilation flag. Alternatively, you may leave out the
> +`BENCHMARK_MAIN();` at the end of the source file and link against
> +`-lbenchmark_main` to get the same default behavior.
>
> The benchmark library will reporting the timing for the code within the
> `for(...)` loop.
>
> @@ -821,7 +830,7 @@ BM_SetInsert/1024/10
> The JSON format outputs human readable json split into two top level
> attributes.
> The `context` attribute contains information about the run in general,
> including
> information about the CPU and the date.
> -The `benchmarks` attribute contains a list of ever benchmark run. Example
> json
> +The `benchmarks` attribute contains a list of every benchmark run.
> Example json
> output looks like:
> ```json
> {
> @@ -893,8 +902,11 @@ If you are using gcc, you might need to
> If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
> `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
>
> ## Linking against the library
> -When using gcc, it is necessary to link against pthread to avoid runtime
> exceptions.
> -This is due to how gcc implements std::thread.
> +
> +When the library is built using GCC it is necessary to link with
> `-pthread`,
> +due to how GCC implements `std::thread`.
> +
> +For GCC 4.x failing to link to pthreads will lead to runtime exceptions,
> not linker errors.
> See [issue #67](https://github.com/google/benchmark/issues/67) for more
> details.
>
> ## Compiler Support
> @@ -928,8 +940,11 @@ sudo cpupower frequency-set --governor p
>
> # Known Issues
>
> -### Windows
> +### Windows with CMake
>
> * Users must manually link `shlwapi.lib`. Failure to do so may result
> in unresolved symbols.
>
> +### Solaris
> +
> +* Users must explicitly link with kstat library (-lkstat compilation
> flag).
>
> Modified:
> libcxx/trunk/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake
> (original)
> +++ libcxx/trunk/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake Mon
> Jul 9 21:02:00 2018
> @@ -62,3 +62,13 @@ function(add_required_cxx_compiler_flag
> message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the
> compiler")
> endif()
> endfunction()
> +
> +function(check_cxx_warning_flag FLAG)
> + mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
> + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
> + # Add -Werror to ensure the compiler generates an error if the warning
> flag
> + # doesn't exist.
> + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
> + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
> + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
> +endfunction()
>
> Modified: libcxx/trunk/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/cmake/CXXFeatureCheck.cmake?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
> (original)
> +++ libcxx/trunk/utils/google-benchmark/cmake/CXXFeatureCheck.cmake Mon
> Jul 9 21:02:00 2018
> @@ -27,25 +27,27 @@ function(cxx_feature_check FILE)
> return()
> endif()
>
> - message("-- Performing Test ${FEATURE}")
> - if(CMAKE_CROSSCOMPILING)
> - try_compile(COMPILE_${FEATURE}
> - ${CMAKE_BINARY_DIR}
> ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
> - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
> - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
> - if(COMPILE_${FEATURE})
> - message(WARNING
> - "If you see build failures due to cross compilation, try
> setting HAVE_${VAR} to 0")
> - set(RUN_${FEATURE} 0)
> + if (NOT DEFINED COMPILE_${FEATURE})
> + message("-- Performing Test ${FEATURE}")
> + if(CMAKE_CROSSCOMPILING)
> + try_compile(COMPILE_${FEATURE}
> + ${CMAKE_BINARY_DIR}
> ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
> + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
> + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
> + if(COMPILE_${FEATURE})
> + message(WARNING
> + "If you see build failures due to cross compilation, try
> setting HAVE_${VAR} to 0")
> + set(RUN_${FEATURE} 0)
> + else()
> + set(RUN_${FEATURE} 1)
> + endif()
> else()
> - set(RUN_${FEATURE} 1)
> + message("-- Performing Test ${FEATURE}")
> + try_run(RUN_${FEATURE} COMPILE_${FEATURE}
> + ${CMAKE_BINARY_DIR}
> ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
> + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
> + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
> endif()
> - else()
> - message("-- Performing Test ${FEATURE}")
> - try_run(RUN_${FEATURE} COMPILE_${FEATURE}
> - ${CMAKE_BINARY_DIR}
> ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
> - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
> - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
> endif()
>
> if(RUN_${FEATURE} EQUAL 0)
>
> Modified: libcxx/trunk/utils/google-benchmark/cmake/GetGitVersion.cmake
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/cmake/GetGitVersion.cmake?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/cmake/GetGitVersion.cmake
> (original)
> +++ libcxx/trunk/utils/google-benchmark/cmake/GetGitVersion.cmake Mon Jul
> 9 21:02:00 2018
> @@ -21,6 +21,7 @@ set(__get_git_version INCLUDED)
> function(get_git_version var)
> if(GIT_EXECUTABLE)
> execute_process(COMMAND ${GIT_EXECUTABLE} describe --match
> "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
> + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
> RESULT_VARIABLE status
> OUTPUT_VARIABLE GIT_VERSION
> ERROR_QUIET)
> @@ -33,9 +34,11 @@ function(get_git_version var)
>
> # Work out if the repository is dirty
> execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
> + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
> OUTPUT_QUIET
> ERROR_QUIET)
> execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only
> HEAD --
> + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
> OUTPUT_VARIABLE GIT_DIFF_INDEX
> ERROR_QUIET)
> string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
>
> Modified: libcxx/trunk/utils/google-benchmark/cmake/HandleGTest.cmake
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/cmake/HandleGTest.cmake?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/cmake/HandleGTest.cmake (original)
> +++ libcxx/trunk/utils/google-benchmark/cmake/HandleGTest.cmake Mon Jul 9
> 21:02:00 2018
> @@ -1,7 +1,5 @@
>
> -macro(split_list listname)
> - string(REPLACE ";" " " ${listname} "${${listname}}")
> -endmacro()
> +include(split_list)
>
> macro(build_external_gtest)
> include(ExternalProject)
> @@ -23,9 +21,22 @@ macro(build_external_gtest)
> if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE")
> set(GTEST_BUILD_TYPE "DEBUG")
> endif()
> + # FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where
> + # -Werror=unused-function fires during the build on OS X. This is a
> temporary
> + # workaround to keep our travis bots from failing. It should be removed
> + # once gtest is fixed.
> + if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
> + list(APPEND GTEST_FLAGS "-Wno-unused-function")
> + endif()
> split_list(GTEST_FLAGS)
> + set(EXCLUDE_FROM_ALL_OPT "")
> + set(EXCLUDE_FROM_ALL_VALUE "")
> + if (${CMAKE_VERSION} VERSION_GREATER "3.0.99")
> + set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL")
> + set(EXCLUDE_FROM_ALL_VALUE "ON")
> + endif()
> ExternalProject_Add(googletest
> - EXCLUDE_FROM_ALL ON
> + ${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE}
> GIT_REPOSITORY https://github.com/google/googletest.git
> GIT_TAG master
> PREFIX "${CMAKE_BINARY_DIR}/googletest"
> @@ -35,45 +46,68 @@ macro(build_external_gtest)
> -DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER}
> -DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER}
> -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
> + -DCMAKE_INSTALL_LIBDIR:PATH=<INSTALL_DIR>/lib
> -DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS}
> -Dgtest_force_shared_crt:BOOL=ON
> )
>
> ExternalProject_Get_Property(googletest install_dir)
> -
> - add_library(gtest UNKNOWN IMPORTED)
> - add_library(gtest_main UNKNOWN IMPORTED)
> + set(GTEST_INCLUDE_DIRS ${install_dir}/include)
> + file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS})
>
> set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}")
> set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
> -
> if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG")
> set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}")
> endif()
> - file(MAKE_DIRECTORY ${install_dir}/include)
> - set_target_properties(gtest PROPERTIES
> - IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest${LIB_SUFFIX}
> - INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
> - )
> - set_target_properties(gtest_main PROPERTIES
> - IMPORTED_LOCATION
> ${install_dir}/lib/${LIB_PREFIX}gtest_main${LIB_SUFFIX}
> - INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
> - )
> - add_dependencies(gtest googletest)
> - add_dependencies(gtest_main googletest)
> - set(GTEST_BOTH_LIBRARIES gtest gtest_main)
> - #set(GTEST_INCLUDE_DIRS ${install_dir}/include)
> +
> + # Use gmock_main instead of gtest_main because it initializes gtest as
> well.
> + # Note: The libraries are listed in reverse order of their dependancies.
> + foreach(LIB gtest gmock gmock_main)
> + add_library(${LIB} UNKNOWN IMPORTED)
> + set_target_properties(${LIB} PROPERTIES
> + IMPORTED_LOCATION
> ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX}
> + INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS}
> + INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}"
> + )
> + add_dependencies(${LIB} googletest)
> + list(APPEND GTEST_BOTH_LIBRARIES ${LIB})
> + endforeach()
> endmacro(build_external_gtest)
>
> if (BENCHMARK_ENABLE_GTEST_TESTS)
> if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest)
> + set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest")
> set(INSTALL_GTEST OFF CACHE INTERNAL "")
> set(INSTALL_GMOCK OFF CACHE INTERNAL "")
> add_subdirectory(${CMAKE_SOURCE_DIR}/googletest)
> - set(GTEST_BOTH_LIBRARIES gtest gtest_main)
> + set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main)
> + foreach(HEADER test mock)
> + # CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES,
> so we
> + # have to add the paths ourselves.
> + set(HFILE g${HEADER}/g${HEADER}.h)
> + set(HPATH ${GTEST_ROOT}/google${HEADER}/include)
> + find_path(HEADER_PATH_${HEADER} ${HFILE}
> + NO_DEFAULT_PATHS
> + HINTS ${HPATH}
> + )
> + if (NOT HEADER_PATH_${HEADER})
> + message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}")
> + endif()
> + list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}})
> + endforeach()
> elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES)
> build_external_gtest()
> else()
> find_package(GTest REQUIRED)
> + find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h
> + HINTS ${GTEST_INCLUDE_DIRS})
> + if (NOT GMOCK_INCLUDE_DIRS)
> + message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint
> ${GTEST_INCLUDE_DIRS}")
> + endif()
> + set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS})
> + # FIXME: We don't currently require the gmock library to build the
> tests,
> + # and it's likely we won't find it, so we don't try. As long as we've
> + # found the gmock/gmock.h header and gtest_main that should be good
> enough.
> endif()
> endif()
>
> Added: libcxx/trunk/utils/google-benchmark/cmake/split_list.cmake
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/cmake/split_list.cmake?rev=336635&view=auto
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/cmake/split_list.cmake (added)
> +++ libcxx/trunk/utils/google-benchmark/cmake/split_list.cmake Mon Jul 9
> 21:02:00 2018
> @@ -0,0 +1,3 @@
> +macro(split_list listname)
> + string(REPLACE ";" " " ${listname} "${${listname}}")
> +endmacro()
>
> Added: libcxx/trunk/utils/google-benchmark/docs/AssemblyTests.md
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/docs/AssemblyTests.md?rev=336635&view=auto
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/docs/AssemblyTests.md (added)
> +++ libcxx/trunk/utils/google-benchmark/docs/AssemblyTests.md Mon Jul 9
> 21:02:00 2018
> @@ -0,0 +1,147 @@
> +# Assembly Tests
> +
> +The Benchmark library provides a number of functions whose primary
> +purpose in to affect assembly generation, including `DoNotOptimize`
> +and `ClobberMemory`. In addition there are other functions,
> +such as `KeepRunning`, for which generating good assembly is paramount.
> +
> +For these functions it's important to have tests that verify the
> +correctness and quality of the implementation. This requires testing
> +the code generated by the compiler.
> +
> +This document describes how the Benchmark library tests compiler output,
> +as well as how to properly write new tests.
> +
> +
> +## Anatomy of a Test
> +
> +Writing a test has two steps:
> +
> +* Write the code you want to generate assembly for.
> +* Add `// CHECK` lines to match against the verified assembly.
> +
> +Example:
> +```c++
> +
> +// CHECK-LABEL: test_add:
> +extern "C" int test_add() {
> + extern int ExternInt;
> + return ExternInt + 1;
> +
> + // CHECK: movl ExternInt(%rip), %eax
> + // CHECK: addl %eax
> + // CHECK: ret
> +}
> +
> +```
> +
> +#### LLVM Filecheck
> +
> +[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
> +is used to test the generated assembly against the `// CHECK` lines
> +specified in the tests source file. Please see the documentation
> +linked above for information on how to write `CHECK` directives.
> +
> +#### Tips and Tricks:
> +
> +* Tests should match the minimal amount of output required to establish
> +correctness. `CHECK` directives don't have to match on the exact next line
> +after the previous match, so tests should omit checks for unimportant
> +bits of assembly. ([`CHECK-NEXT`](
> https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive
> )
> +can be used to ensure a match occurs exactly after the previous match).
> +
> +* The tests are compiled with `-O3 -g0`. So we're only testing the
> +optimized output.
> +
> +* The assembly output is further cleaned up using `tools/strip_asm.py`.
> +This removes comments, assembler directives, and unused labels before
> +the test is run.
> +
> +* The generated and stripped assembly file for a test is output under
> +`<build-directory>/test/<test-name>.s`
> +
> +* Filecheck supports using [`CHECK` prefixes](
> https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes
> )
> +to specify lines that should only match in certain situations.
> +The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
> +are only expected to match Clang or GCC's output respectively. Normal
> +`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
> +`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
> +`CHECK` lines)
> +
> +* Use `extern "C"` to disable name mangling for specific functions. This
> +makes them easier to name in the `CHECK` lines.
> +
> +
> +## Problems Writing Portable Tests
> +
> +Writing tests which check the code generated by a compiler are
> +inherently non-portable. Different compilers and even different compiler
> +versions may generate entirely different code. The Benchmark tests
> +must tolerate this.
> +
> +LLVM Filecheck provides a number of mechanisms to help write
> +"more portable" tests; including [matching using regular expressions](
> https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax
> ),
> +allowing the creation of [named variables](
> https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
> +for later matching, and [checking non-sequential matches](
> https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive
> ).
> +
> +#### Capturing Variables
> +
> +For example, say GCC stores a variable in a register but Clang stores
> +it in memory. To write a test that tolerates both cases we "capture"
> +the destination of the store, and then use the captured expression
> +to write the remainder of the test.
> +
> +```c++
> +// CHECK-LABEL: test_div_no_op_into_shr:
> +extern "C" void test_div_no_op_into_shr(int value) {
> + int divisor = 2;
> + benchmark::DoNotOptimize(divisor); // hide the value from the
> optimizer
> + return value / divisor;
> +
> + // CHECK: movl $2, [[DEST:.*]]
> + // CHECK: idivl [[DEST]]
> + // CHECK: ret
> +}
> +```
> +
> +#### Using Regular Expressions to Match Differing Output
> +
> +Often tests require testing assembly lines which may subtly differ
> +between compilers or compiler versions. A common example of this
> +is matching stack frame addresses. In this case regular expressions
> +can be used to match the differing bits of output. For example:
> +
> +```c++
> +int ExternInt;
> +struct Point { int x, y, z; };
> +
> +// CHECK-LABEL: test_store_point:
> +extern "C" void test_store_point() {
> + Point p{ExternInt, ExternInt, ExternInt};
> + benchmark::DoNotOptimize(p);
> +
> + // CHECK: movl ExternInt(%rip), %eax
> + // CHECK: movl %eax, -{{[0-9]+}}(%rsp)
> + // CHECK: movl %eax, -{{[0-9]+}}(%rsp)
> + // CHECK: movl %eax, -{{[0-9]+}}(%rsp)
> + // CHECK: ret
> +}
> +```
> +
> +## Current Requirements and Limitations
> +
> +The tests require Filecheck to be installed along the `PATH` of the
> +build machine. Otherwise the tests will be disabled.
> +
> +Additionally, as mentioned in the previous section, codegen tests are
> +inherently non-portable. Currently the tests are limited to:
> +
> +* x86_64 targets.
> +* Compiled with GCC or Clang
> +
> +Further work could be done, at least on a limited basis, to extend the
> +tests to other architectures and compilers (using `CHECK` prefixes).
> +
> +Furthermore, the tests fail for builds which specify additional flags
> +that modify code generation, including `--coverage` or `-fsanitize=`.
> +
>
> Modified: libcxx/trunk/utils/google-benchmark/include/benchmark/benchmark.h
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/include/benchmark/benchmark.h?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/include/benchmark/benchmark.h
> (original)
> +++ libcxx/trunk/utils/google-benchmark/include/benchmark/benchmark.h Mon
> Jul 9 21:02:00 2018
> @@ -164,7 +164,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMil
> #ifndef BENCHMARK_BENCHMARK_H_
> #define BENCHMARK_BENCHMARK_H_
>
> -
> // The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and
> newer.
> #if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >=
> 201103L)
> #define BENCHMARK_HAS_CXX11
> @@ -172,22 +171,23 @@ BENCHMARK(BM_test)->Unit(benchmark::kMil
>
> #include <stdint.h>
>
> +#include <algorithm>
> #include <cassert>
> #include <cstddef>
> #include <iosfwd>
> -#include <string>
> -#include <vector>
> #include <map>
> #include <set>
> +#include <string>
> +#include <vector>
>
> #if defined(BENCHMARK_HAS_CXX11)
> -#include <type_traits>
> #include <initializer_list>
> +#include <type_traits>
> #include <utility>
> #endif
>
> #if defined(_MSC_VER)
> -#include <intrin.h> // for _ReadWriteBarrier
> +#include <intrin.h> // for _ReadWriteBarrier
> #endif
>
> #ifndef BENCHMARK_HAS_CXX11
> @@ -226,13 +226,15 @@ BENCHMARK(BM_test)->Unit(benchmark::kMil
> #define BENCHMARK_INTERNAL_TOSTRING2(x) #x
> #define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
>
> -#if defined(__GNUC__)
> +#if defined(__GNUC__) || defined(__clang__)
> #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
> #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
> #else
> #define BENCHMARK_BUILTIN_EXPECT(x, y) x
> #define BENCHMARK_DEPRECATED_MSG(msg)
> -#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "("
> BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg))
> +#define BENCHMARK_WARNING_MSG(msg) \
> + __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
> + __LINE__) ") : warning note: " msg))
> #endif
>
> #if defined(__GNUC__) && !defined(__clang__)
> @@ -289,13 +291,11 @@ BENCHMARK_UNUSED static int stream_init_
>
> } // namespace internal
>
> -
> #if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
> - defined(EMSCRIPTN)
> -# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
> + defined(__EMSCRIPTEN__)
> +#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
> #endif
>
> -
> // The DoNotOptimize(...) function can be used to prevent a value or
> // expression from being optimized away by the compiler. This function is
> // intended to add little to no overhead.
> @@ -303,14 +303,18 @@ BENCHMARK_UNUSED static int stream_init_
> #ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
> template <class Tp>
> inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
> - // Clang doesn't like the 'X' constraint on `value` and certain GCC
> versions
> - // don't like the 'g' constraint. Attempt to placate them both.
> + asm volatile("" : : "r,m"(value) : "memory");
> +}
> +
> +template <class Tp>
> +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
> #if defined(__clang__)
> - asm volatile("" : : "g"(value) : "memory");
> + asm volatile("" : "+r,m"(value) : : "memory");
> #else
> - asm volatile("" : : "i,r,m"(value) : "memory");
> + asm volatile("" : "+m,r"(value) : : "memory");
> #endif
> }
> +
> // Force the compiler to flush pending writes to global memory. Acts as an
> // effective read/write barrier
> inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
> @@ -323,9 +327,7 @@ inline BENCHMARK_ALWAYS_INLINE void DoNo
> _ReadWriteBarrier();
> }
>
> -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
> - _ReadWriteBarrier();
> -}
> +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
> _ReadWriteBarrier(); }
> #else
> template <class Tp>
> inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
> @@ -334,39 +336,54 @@ inline BENCHMARK_ALWAYS_INLINE void DoNo
> // FIXME Add ClobberMemory() for non-gnu and non-msvc compilers
> #endif
>
> -
> -
> // This class is used for user-defined counters.
> class Counter {
> -public:
> -
> + public:
> enum Flags {
> - kDefaults = 0,
> + kDefaults = 0,
> // Mark the counter as a rate. It will be presented divided
> // by the duration of the benchmark.
> - kIsRate = 1,
> + kIsRate = 1U << 0U,
> // Mark the counter as a thread-average quantity. It will be
> // presented divided by the number of threads.
> - kAvgThreads = 2,
> + kAvgThreads = 1U << 1U,
> // Mark the counter as a thread-average rate. See above.
> - kAvgThreadsRate = kIsRate|kAvgThreads
> + kAvgThreadsRate = kIsRate | kAvgThreads,
> + // Mark the counter as a constant value, valid/same for *every*
> iteration.
> + // When reporting, it will be *multiplied* by the iteration count.
> + kIsIterationInvariant = 1U << 2U,
> + // Mark the counter as a constant rate.
> + // When reporting, it will be *multiplied* by the iteration count
> + // and then divided by the duration of the benchmark.
> + kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
> + // Mark the counter as a iteration-average quantity.
> + // It will be presented divided by the number of iterations.
> + kAvgIterations = 1U << 3U,
> + // Mark the counter as a iteration-average rate. See above.
> + kAvgIterationsRate = kIsRate | kAvgIterations
> };
>
> double value;
> - Flags flags;
> + Flags flags;
>
> BENCHMARK_ALWAYS_INLINE
> Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {}
>
> - BENCHMARK_ALWAYS_INLINE operator double const& () const { return value;
> }
> - BENCHMARK_ALWAYS_INLINE operator double & () { return value;
> }
> -
> + BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
> + BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
> };
>
> +// A helper for user code to create unforeseen combinations of Flags,
> without
> +// having to do this cast manually each time, or providing this operator.
> +Counter::Flags inline operator|(const Counter::Flags& LHS,
> + const Counter::Flags& RHS) {
> + return static_cast<Counter::Flags>(static_cast<int>(LHS) |
> + static_cast<int>(RHS));
> +}
> +
> // This is the container for the user-defined counters.
> typedef std::map<std::string, Counter> UserCounters;
>
> -
> // TimeUnit is passed to a benchmark in order to specify the order of
> magnitude
> // for the measured time.
> enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
> @@ -379,7 +396,7 @@ enum BigO { oNone, o1, oN, oNSquared, oN
>
> // BigOFunc is passed to a benchmark in order to specify the asymptotic
> // computational complexity for the benchmark.
> -typedef double(BigOFunc)(int);
> +typedef double(BigOFunc)(int64_t);
>
> // StatisticsFunc is passed to a benchmark in order to compute some
> descriptive
> // statistics over all the measurements of some type
> @@ -390,7 +407,7 @@ struct Statistics {
> StatisticsFunc* compute_;
>
> Statistics(std::string name, StatisticsFunc* compute)
> - : name_(name), compute_(compute) {}
> + : name_(name), compute_(compute) {}
> };
>
> namespace internal {
> @@ -399,14 +416,12 @@ class ThreadManager;
>
> enum ReportMode
> #if defined(BENCHMARK_HAS_CXX11)
> - : unsigned
> + : unsigned
> #else
> #endif
> - {
> - RM_Unspecified, // The mode has not been manually specified
> +{ RM_Unspecified, // The mode has not been manually specified
> RM_Default, // The mode is user-specified as default.
> - RM_ReportAggregatesOnly
> -};
> + RM_ReportAggregatesOnly };
> } // namespace internal
>
> // State is passed to a running Benchmark and contains state for the
> @@ -429,16 +444,19 @@ class State {
> // Returns true if the benchmark should continue through another
> iteration.
> // NOTE: A benchmark may not return from the test until KeepRunning()
> has
> // returned false.
> - bool KeepRunning() {
> - if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
> - StartKeepRunning();
> - }
> - bool const res = (--total_iterations_ != 0);
> - if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
> - FinishKeepRunning();
> - }
> - return res;
> - }
> + bool KeepRunning();
> +
> + // Returns true iff the benchmark should run n more iterations.
> + // REQUIRES: 'n' > 0.
> + // NOTE: A benchmark must not return from the test until
> KeepRunningBatch()
> + // has returned false.
> + // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
> + //
> + // Intended usage:
> + // while (state.KeepRunningBatch(1000)) {
> + // // process 1000 elements
> + // }
> + bool KeepRunningBatch(size_t n);
>
> // REQUIRES: timer is running and 'SkipWithError(...)' has not been
> called
> // by the current thread.
> @@ -505,10 +523,10 @@ class State {
> //
> // REQUIRES: a benchmark has exited its benchmarking loop.
> BENCHMARK_ALWAYS_INLINE
> - void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
> + void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
>
> BENCHMARK_ALWAYS_INLINE
> - size_t bytes_processed() const { return bytes_processed_; }
> + int64_t bytes_processed() const { return bytes_processed_; }
>
> // If this routine is called with complexity_n > 0 and complexity
> report is
> // requested for the
> @@ -516,10 +534,10 @@ class State {
> // and complexity_n will
> // represent the length of N.
> BENCHMARK_ALWAYS_INLINE
> - void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; }
> + void SetComplexityN(int64_t complexity_n) { complexity_n_ =
> complexity_n; }
>
> BENCHMARK_ALWAYS_INLINE
> - int complexity_length_n() { return complexity_n_; }
> + int64_t complexity_length_n() { return complexity_n_; }
>
> // If this routine is called with items > 0, then an items/s
> // label is printed on the benchmark report line for the currently
> @@ -528,10 +546,10 @@ class State {
> //
> // REQUIRES: a benchmark has exited its benchmarking loop.
> BENCHMARK_ALWAYS_INLINE
> - void SetItemsProcessed(size_t items) { items_processed_ = items; }
> + void SetItemsProcessed(int64_t items) { items_processed_ = items; }
>
> BENCHMARK_ALWAYS_INLINE
> - size_t items_processed() const { return items_processed_; }
> + int64_t items_processed() const { return items_processed_; }
>
> // If this routine is called, the specified label is printed at the
> // end of the benchmark report line for the currently executing
> @@ -539,7 +557,7 @@ class State {
> // static void BM_Compress(benchmark::State& state) {
> // ...
> // double compress = input_size / output_size;
> - // state.SetLabel(StringPrintf("compress:%.1f%%",
> 100.0*compression));
> + // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
> // }
> // Produces output that looks like:
> // BM_Compress 50 50 14115038 compress:27.3%
> @@ -553,33 +571,51 @@ class State {
>
> // Range arguments for this run. CHECKs if the argument has been set.
> BENCHMARK_ALWAYS_INLINE
> - int range(std::size_t pos = 0) const {
> + int64_t range(std::size_t pos = 0) const {
> assert(range_.size() > pos);
> return range_[pos];
> }
>
> BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
> - int range_x() const { return range(0); }
> + int64_t range_x() const { return range(0); }
>
> BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
> - int range_y() const { return range(1); }
> + int64_t range_y() const { return range(1); }
>
> BENCHMARK_ALWAYS_INLINE
> - size_t iterations() const { return (max_iterations - total_iterations_)
> + 1; }
> + size_t iterations() const {
> + if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
> + return 0;
> + }
> + return max_iterations - total_iterations_ + batch_leftover_;
> + }
> +
> + private
> + : // items we expect on the first cache line (ie 64 bytes of the
> struct)
> + // When total_iterations_ is 0, KeepRunning() and friends will return
> false.
> + // May be larger than max_iterations.
> + size_t total_iterations_;
> +
> + // When using KeepRunningBatch(), batch_leftover_ holds the number of
> + // iterations beyond max_iters that were run. Used to track
> + // completed_iterations_ accurately.
> + size_t batch_leftover_;
> +
> + public:
> + const size_t max_iterations;
>
> private:
> bool started_;
> bool finished_;
> - size_t total_iterations_;
> -
> - std::vector<int> range_;
> + bool error_occurred_;
>
> - size_t bytes_processed_;
> - size_t items_processed_;
> + private: // items we don't need on the first cache line
> + std::vector<int64_t> range_;
>
> - int complexity_n_;
> + int64_t bytes_processed_;
> + int64_t items_processed_;
>
> - bool error_occurred_;
> + int64_t complexity_n_;
>
> public:
> // Container for user-defined counters.
> @@ -588,27 +624,66 @@ class State {
> const int thread_index;
> // Number of threads concurrently executing the benchmark.
> const int threads;
> - const size_t max_iterations;
>
> // TODO(EricWF) make me private
> - State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
> + State(size_t max_iters, const std::vector<int64_t>& ranges, int
> thread_i,
> int n_threads, internal::ThreadTimer* timer,
> internal::ThreadManager* manager);
>
> private:
> void StartKeepRunning();
> + // Implementation of KeepRunning() and KeepRunningBatch().
> + // is_batch must be true unless n is 1.
> + bool KeepRunningInternal(size_t n, bool is_batch);
> void FinishKeepRunning();
> internal::ThreadTimer* timer_;
> internal::ThreadManager* manager_;
> BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
> };
>
> +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
> + return KeepRunningInternal(1, /*is_batch=*/false);
> +}
> +
> +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(size_t n) {
> + return KeepRunningInternal(n, /*is_batch=*/true);
> +}
> +
> +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(size_t n,
> + bool
> is_batch) {
> + // total_iterations_ is set to 0 by the constructor, and always set to a
> + // nonzero value by StartKepRunning().
> + assert(n > 0);
> + // n must be 1 unless is_batch is true.
> + assert(is_batch || n == 1);
> + if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
> + total_iterations_ -= n;
> + return true;
> + }
> + if (!started_) {
> + StartKeepRunning();
> + if (!error_occurred_ && total_iterations_ >= n) {
> + total_iterations_ -= n;
> + return true;
> + }
> + }
> + // For non-batch runs, total_iterations_ must be 0 by now.
> + if (is_batch && total_iterations_ != 0) {
> + batch_leftover_ = n - total_iterations_;
> + total_iterations_ = 0;
> + return true;
> + }
> + FinishKeepRunning();
> + return false;
> +}
> +
> struct State::StateIterator {
> struct BENCHMARK_UNUSED Value {};
> typedef std::forward_iterator_tag iterator_category;
> typedef Value value_type;
> typedef Value reference;
> typedef Value pointer;
> + typedef std::ptrdiff_t difference_type;
>
> private:
> friend class State;
> @@ -670,7 +745,7 @@ class Benchmark {
> // Run this benchmark once with "x" as the extra argument passed
> // to the function.
> // REQUIRES: The function passed to the constructor must accept an arg1.
> - Benchmark* Arg(int x);
> + Benchmark* Arg(int64_t x);
>
> // Run this benchmark with the given time unit for the generated output
> report
> Benchmark* Unit(TimeUnit unit);
> @@ -678,23 +753,23 @@ class Benchmark {
> // Run this benchmark once for a number of values picked from the
> // range [start..limit]. (start and limit are always picked.)
> // REQUIRES: The function passed to the constructor must accept an arg1.
> - Benchmark* Range(int start, int limit);
> + Benchmark* Range(int64_t start, int64_t limit);
>
> // Run this benchmark once for all values in the range [start..limit]
> with
> // specific step
> // REQUIRES: The function passed to the constructor must accept an arg1.
> - Benchmark* DenseRange(int start, int limit, int step = 1);
> + Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
>
> // Run this benchmark once with "args" as the extra arguments passed
> // to the function.
> // REQUIRES: The function passed to the constructor must accept arg1,
> arg2 ...
> - Benchmark* Args(const std::vector<int>& args);
> + Benchmark* Args(const std::vector<int64_t>& args);
>
> // Equivalent to Args({x, y})
> // NOTE: This is a legacy C++03 interface provided for compatibility
> only.
> // New code should use 'Args'.
> - Benchmark* ArgPair(int x, int y) {
> - std::vector<int> args;
> + Benchmark* ArgPair(int64_t x, int64_t y) {
> + std::vector<int64_t> args;
> args.push_back(x);
> args.push_back(y);
> return Args(args);
> @@ -703,7 +778,7 @@ class Benchmark {
> // Run this benchmark once for a number of values picked from the
> // ranges [start..limit]. (starts and limits are always picked.)
> // REQUIRES: The function passed to the constructor must accept arg1,
> arg2 ...
> - Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);
> + Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >&
> ranges);
>
> // Equivalent to ArgNames({name})
> Benchmark* ArgName(const std::string& name);
> @@ -715,8 +790,8 @@ class Benchmark {
> // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
> // NOTE: This is a legacy C++03 interface provided for compatibility
> only.
> // New code should use 'Ranges'.
> - Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) {
> - std::vector<std::pair<int, int> > ranges;
> + Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t
> hi2) {
> + std::vector<std::pair<int64_t, int64_t> > ranges;
> ranges.push_back(std::make_pair(lo1, hi1));
> ranges.push_back(std::make_pair(lo2, hi2));
> return Ranges(ranges);
> @@ -823,15 +898,13 @@ class Benchmark {
>
> int ArgsCnt() const;
>
> - static void AddRange(std::vector<int>* dst, int lo, int hi, int mult);
> -
> private:
> friend class BenchmarkFamilies;
>
> std::string name_;
> ReportMode report_mode_;
> - std::vector<std::string> arg_names_; // Args for all benchmark runs
> - std::vector<std::vector<int> > args_; // Args for all benchmark runs
> + std::vector<std::string> arg_names_; // Args for all benchmark
> runs
> + std::vector<std::vector<int64_t> > args_; // Args for all benchmark
> runs
> TimeUnit time_unit_;
> int range_multiplier_;
> double min_time_;
> @@ -1055,7 +1128,7 @@ class Fixture : public internal::Benchma
> class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
> public: \
> BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
> - this->SetName(#BaseClass"<" #a ">/" #Method); \
> + this->SetName(#BaseClass "<" #a ">/" #Method); \
> } \
> \
> protected: \
> @@ -1066,7 +1139,7 @@ class Fixture : public internal::Benchma
> class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
> public: \
> BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
> - this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \
> + this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
> } \
> \
> protected: \
> @@ -1078,14 +1151,15 @@ class Fixture : public internal::Benchma
> class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__>
> { \
> public:
> \
> BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() {
> \
> - this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method);
> \
> + this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method);
> \
> }
> \
>
> \
> protected:
> \
> virtual void BenchmarkCase(::benchmark::State&);
> \
> };
> #else
> -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a)
> BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
> +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
> + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
> #endif
>
> #define BENCHMARK_DEFINE_F(BaseClass, Method) \
> @@ -1105,7 +1179,8 @@ class Fixture : public internal::Benchma
> BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
> void BaseClass##_##Method##_Benchmark::BenchmarkCase
> #else
> -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a)
> BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
> +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
> + BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
> #endif
>
> #define BENCHMARK_REGISTER_F(BaseClass, Method) \
> @@ -1132,24 +1207,24 @@ class Fixture : public internal::Benchma
> void BaseClass##_##Method##_Benchmark::BenchmarkCase
>
> #ifdef BENCHMARK_HAS_CXX11
> -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
> +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
> BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
> - BENCHMARK_REGISTER_F(BaseClass, Method); \
> + BENCHMARK_REGISTER_F(BaseClass, Method); \
> void BaseClass##_##Method##_Benchmark::BenchmarkCase
> #else
> -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a)
> BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
> +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
> + BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
> #endif
>
> // Helper macro to create a main routine in a test that runs the
> benchmarks
> -#define BENCHMARK_MAIN() \
> - int main(int argc, char** argv) { \
> - ::benchmark::Initialize(&argc, argv); \
> +#define BENCHMARK_MAIN() \
> + int main(int argc, char** argv) { \
> + ::benchmark::Initialize(&argc, argv); \
> if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
> - ::benchmark::RunSpecifiedBenchmarks(); \
> - } \
> + ::benchmark::RunSpecifiedBenchmarks(); \
> + } \
> int main(int, char**)
>
> -
> // ------------------------------------------------------
> // Benchmark Reporters
>
> @@ -1186,7 +1261,7 @@ class BenchmarkReporter {
> CPUInfo const& cpu_info;
> // The number of chars in the longest benchmark name.
> size_t name_field_width;
> -
> + static const char* executable_name;
> Context();
> };
>
> @@ -1239,7 +1314,7 @@ class BenchmarkReporter {
> // Keep track of arguments to compute asymptotic complexity
> BigO complexity;
> BigOFunc* complexity_lambda;
> - int complexity_n;
> + int64_t complexity_n;
>
> // what statistics to compute from the measurements
> const std::vector<Statistics>* statistics;
> @@ -1309,17 +1384,19 @@ class BenchmarkReporter {
> // Simple reporter that outputs benchmark data to the console. This is the
> // default reporter used by RunSpecifiedBenchmarks().
> class ConsoleReporter : public BenchmarkReporter {
> -public:
> + public:
> enum OutputOptions {
> OO_None = 0,
> OO_Color = 1,
> OO_Tabular = 2,
> - OO_ColorTabular = OO_Color|OO_Tabular,
> + OO_ColorTabular = OO_Color | OO_Tabular,
> OO_Defaults = OO_ColorTabular
> };
> explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
> - : output_options_(opts_), name_field_width_(0),
> - prev_counters_(), printed_header_(false) {}
> + : output_options_(opts_),
> + name_field_width_(0),
> + prev_counters_(),
> + printed_header_(false) {}
>
> virtual bool ReportContext(const Context& context);
> virtual void ReportRuns(const std::vector<Run>& reports);
> @@ -1347,7 +1424,8 @@ class JSONReporter : public BenchmarkRep
> bool first_report_;
> };
>
> -class CSVReporter : public BenchmarkReporter {
> +class BENCHMARK_DEPRECATED_MSG("The CSV Reporter will be removed in a
> future release")
> + CSVReporter : public BenchmarkReporter {
> public:
> CSVReporter() : printed_header_(false) {}
> virtual bool ReportContext(const Context& context);
> @@ -1357,7 +1435,7 @@ class CSVReporter : public BenchmarkRepo
> void PrintRunData(const Run& report);
>
> bool printed_header_;
> - std::set< std::string > user_counter_names_;
> + std::set<std::string> user_counter_names_;
> };
>
> inline const char* GetTimeUnitString(TimeUnit unit) {
> @@ -1384,6 +1462,6 @@ inline double GetTimeUnitMultiplier(Time
> }
> }
>
> -} // namespace benchmark
> +} // namespace benchmark
>
> #endif // BENCHMARK_BENCHMARK_H_
>
> Added: libcxx/trunk/utils/google-benchmark/releasing.md
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/releasing.md?rev=336635&view=auto
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/releasing.md (added)
> +++ libcxx/trunk/utils/google-benchmark/releasing.md Mon Jul 9 21:02:00
> 2018
> @@ -0,0 +1,16 @@
> +# How to release
> +
> +* Make sure you're on master and synced to HEAD
> +* Ensure the project builds and tests run (sanity check only, obviously)
> + * `parallel -j0 exec ::: test/*_test` can help ensure everything at
> least
> + passes
> +* Prepare release notes
> + * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the
> list of
> + commits between the last annotated tag and HEAD
> + * Pick the most interesting.
> +* Create a release through github's interface
> + * Note this will create a lightweight tag.
> + * Update this to an annotated tag:
> + * `git pull --tags`
> + * `git tag -a -f <tag> <tag>`
> + * `git push --force origin`
>
> Modified: libcxx/trunk/utils/google-benchmark/src/CMakeLists.txt
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/src/CMakeLists.txt?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/src/CMakeLists.txt (original)
> +++ libcxx/trunk/utils/google-benchmark/src/CMakeLists.txt Mon Jul 9
> 21:02:00 2018
> @@ -11,6 +11,10 @@ file(GLOB
> *.cc
> ${PROJECT_SOURCE_DIR}/include/benchmark/*.h
> ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
> +file(GLOB BENCHMARK_MAIN "benchmark_main.cc")
> +foreach(item ${BENCHMARK_MAIN})
> + list(REMOVE_ITEM SOURCE_FILES "${item}")
> +endforeach()
>
> add_library(benchmark ${SOURCE_FILES})
> set_target_properties(benchmark PROPERTIES
> @@ -34,6 +38,23 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows
> target_link_libraries(benchmark Shlwapi)
> endif()
>
> +# We need extra libraries on Solaris
> +if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
> + target_link_libraries(benchmark kstat)
> +endif()
> +
> +# Benchmark main library
> +add_library(benchmark_main "benchmark_main.cc")
> +set_target_properties(benchmark_main PROPERTIES
> + OUTPUT_NAME "benchmark_main"
> + VERSION ${GENERIC_LIB_VERSION}
> + SOVERSION ${GENERIC_LIB_SOVERSION}
> +)
> +target_include_directories(benchmark PUBLIC
> + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
> + )
> +target_link_libraries(benchmark_main benchmark)
> +
> set(include_install_dir "include")
> set(lib_install_dir "lib/")
> set(bin_install_dir "bin/")
> @@ -51,7 +72,7 @@ set(namespace "${PROJECT_NAME}::")
>
> include(CMakePackageConfigHelpers)
> write_basic_package_version_file(
> - "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY
> SameMajorVersion
> + "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY
> SameMajorVersion
> )
>
> configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in"
> "${project_config}" @ONLY)
> @@ -60,7 +81,7 @@ configure_file("${PROJECT_SOURCE_DIR}/cm
> if (BENCHMARK_ENABLE_INSTALL)
> # Install target (will install the library to specified
> CMAKE_INSTALL_PREFIX variable)
> install(
> - TARGETS benchmark
> + TARGETS benchmark benchmark_main
> EXPORT ${targets_export_name}
> ARCHIVE DESTINATION ${lib_install_dir}
> LIBRARY DESTINATION ${lib_install_dir}
>
> Modified: libcxx/trunk/utils/google-benchmark/src/benchmark.cc
> URL:
> http://llvm.org/viewvc/llvm-project/libcxx/trunk/utils/google-benchmark/src/benchmark.cc?rev=336635&r1=336634&r2=336635&view=diff
>
> ==============================================================================
> --- libcxx/trunk/utils/google-benchmark/src/benchmark.cc (original)
> +++ libcxx/trunk/utils/google-benchmark/src/benchmark.cc Mon Jul 9
> 21:02:00 2018
> @@ -17,7 +17,9 @@
> #include "internal_macros.h"
>
> #ifndef BENCHMARK_OS_WINDOWS
> +#ifndef BENCHMARK_OS_FUCHSIA
> #include <sys/resource.h>
> +#endif
> #include <sys/time.h>
> #include <unistd.h>
> #endif
> @@ -27,10 +29,10 @@
> #include <condition_variable>
> #include <cstdio>
> #include <cstdlib>
> -#include <cstring>
> #include <fstream>
> #include <iostream>
> #include <memory>
> +#include <string>
> #include <thread>
>
> #include "check.h"
> @@ -44,7 +46,8 @@
> #include "re.h"
> #include "statistics.h"
> #include "string_util.h"
> -#include "timers.h"
> +#include "thread_manager.h"
> +#include "thread_timer.h"
>
> DEFINE_bool(benchmark_list_tests, false,
> "Print a list of benchmarks. This option overrides all other "
> @@ -82,7 +85,7 @@ DEFINE_string(benchmark_out_format, "jso
> "The format to use for file output. Valid values are "
> "'console', 'json', or 'csv'.");
>
> -DEFINE_string(benchmark_out, "", "The file to write additonal output to");
> +DEFINE_string(benchmark_out, "", "The file to write additional output
> to");
>
> DEFINE_string(benchmark_color, "auto",
> "Whether to use colors in the output. Valid values: "
> @@ -108,119 +111,11 @@ namespace internal {
>
> void UseCharPointer(char const volatile*) {}
>
> -class ThreadManager {
> - public:
> - ThreadManager(int num_threads)
> - : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
> -
> - Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
> - return benchmark_mutex_;
> - }
> -
> - bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
> - return start_stop_barrier_.wait();
> - }
> -
> - void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
> - start_stop_barrier_.removeThread();
> - if (--alive_threads_ == 0) {
> - MutexLock lock(end_cond_mutex_);
> - end_condition_.notify_all();
> - }
> - }
> -
> - void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
> - MutexLock lock(end_cond_mutex_);
> - end_condition_.wait(lock.native_handle(),
> - [this]() { return alive_threads_ == 0; });
> - }
> -
> - public:
> - struct Result {
> - double real_time_used = 0;
> - double cpu_time_used = 0;
> - double manual_time_used = 0;
> - int64_t bytes_processed = 0;
> - int64_t items_processed = 0;
> - int complexity_n = 0;
> - std::string report_label_;
> - std::string error_message_;
> - bool has_error_ = false;
> - UserCounters counters;
> - };
> - GUARDED_BY(GetBenchmarkMutex()) Result results;
> -
> - private:
> - mutable Mutex benchmark_mutex_;
> - std::atomic<int> alive_threads_;
> - Barrier start_stop_barrier_;
> - Mutex end_cond_mutex_;
> - Condition end_condition_;
> -};
> -
> -// Timer management class
> -class ThreadTimer {
> - public:
> - ThreadTimer() = default;
> -
> - // Called by each thread
> - void StartTimer() {
> - running_ = true;
> - start_real_time_ = ChronoClockNow();
> - start_cpu_time_ = ThreadCPUUsage();
> - }
> -
> - // Called by each thread
> - void StopTimer() {
> - CHECK(running_);
> - running_ = false;
> - real_time_used_ += ChronoClockNow() - start_real_time_;
> - // Floating point error can result in the subtraction producing a
> negative
> - // time. Guard against that.
> - cpu_time_used_ += std::max<double>(ThreadCPUUsage() -
> start_cpu_time_, 0);
> - }
> -
> - // Called by each thread
> - void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
> -
> - bool running() const { return running_; }
> -
> - // REQUIRES: timer is not running
> - double real_time_used() {
> - CHECK(!running_);
> - return real_time_used_;
> - }
> -
> - // REQUIRES: timer is not running
> - double cpu_time_used() {
> - CHECK(!running_);
> - return cpu_time_used_;
> - }
> -
> - // REQUIRES: timer is not running
> - double manual_time_used() {
> - CHECK(!running_);
> - return manual_time_used_;
> - }
> -
> - private:
> - bool running_ = false; // Is the timer running
> - double start_real_time_ = 0; // If running_
> - double start_cpu_time_ = 0; // If running_
> -
> - // Accumulated time so far (does not contain current slice if running_)
> - double real_time_used_ = 0;
> - double cpu_time_used_ = 0;
> - // Manually set iteration time. User sets this with
> SetIterationTime(seconds).
> - double manual_time_used_ = 0;
> -};
> -
> namespace {
>
> BenchmarkReporter::Run CreateRunReport(
> const benchmark::internal::Benchmark::Instance& b,
> - const internal::ThreadManager::Result& results, size_t iters,
> - double seconds) {
> + const internal::ThreadManager::Result& results, double seconds) {
> // Create report about this benchmark run.
> BenchmarkReporter::Run report;
>
> @@ -228,8 +123,8 @@ BenchmarkReporter::Run CreateRunReport(
> report.error_occurred = results.has_error_;
> report.error_message = results.error_message_;
> report.report_label = results.report_label_;
> - // Report the total iterations across all threads.
> - report.iterations = static_cast<int64_t>(iters) * b.threads;
> +
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/cfe-commits/attachments/20180710/e96b0b37/attachment-0001.html>
More information about the cfe-commits
mailing list