From c3f7d13ad27ad8d9ca5760e9fd5c0577c4bf60dd Mon Sep 17 00:00:00 2001 From: Juan Cruz Viotti Date: Tue, 28 Jan 2025 10:58:49 -0400 Subject: [PATCH] Stop consuming GoogleTest and GoogleBenchmark from Noa (#1488) Signed-off-by: Juan Cruz Viotti --- CMakeLists.txt | 7 +- DEPENDENCIES | 2 + cmake/FindGoogleBenchmark.cmake | 5 + cmake/FindGoogleTest.cmake | 6 + cmake/FindNoa.cmake | 13 - config.cmake.in | 1 - vendor/googlebenchmark.mask | 22 + vendor/googlebenchmark/CMakeLists.txt | 351 + vendor/googlebenchmark/LICENSE | 202 + .../cmake/AddCXXCompilerFlag.cmake | 78 + .../cmake/CXXFeatureCheck.cmake | 82 + vendor/googlebenchmark/cmake/Config.cmake.in | 11 + .../googlebenchmark/cmake/GetGitVersion.cmake | 36 + vendor/googlebenchmark/cmake/GoogleTest.cmake | 58 + .../googlebenchmark/cmake/GoogleTest.cmake.in | 59 + .../cmake/Modules/FindLLVMAr.cmake | 16 + .../cmake/Modules/FindLLVMNm.cmake | 16 + .../cmake/Modules/FindLLVMRanLib.cmake | 15 + .../cmake/Modules/FindPFM.cmake | 28 + vendor/googlebenchmark/cmake/benchmark.pc.in | 12 + .../cmake/benchmark_main.pc.in | 7 + .../googlebenchmark/cmake/gnu_posix_regex.cpp | 12 + .../cmake/llvm-toolchain.cmake | 8 + vendor/googlebenchmark/cmake/posix_regex.cpp | 14 + .../cmake/pthread_affinity.cpp | 16 + vendor/googlebenchmark/cmake/split_list.cmake | 3 + vendor/googlebenchmark/cmake/std_regex.cpp | 10 + vendor/googlebenchmark/cmake/steady_clock.cpp | 7 + .../cmake/thread_safety_attributes.cpp | 4 + .../include/benchmark/benchmark.h | 2060 +++++ .../include/benchmark/export.h | 47 + vendor/googlebenchmark/src/CMakeLists.txt | 180 + vendor/googlebenchmark/src/arraysize.h | 33 + vendor/googlebenchmark/src/benchmark.cc | 812 ++ .../src/benchmark_api_internal.cc | 118 + .../src/benchmark_api_internal.h | 87 + vendor/googlebenchmark/src/benchmark_main.cc | 18 + vendor/googlebenchmark/src/benchmark_name.cc | 59 + .../googlebenchmark/src/benchmark_register.cc | 521 ++ .../googlebenchmark/src/benchmark_register.h | 109 + .../googlebenchmark/src/benchmark_runner.cc | 525 ++ vendor/googlebenchmark/src/benchmark_runner.h | 136 + vendor/googlebenchmark/src/check.cc | 11 + vendor/googlebenchmark/src/check.h | 106 + vendor/googlebenchmark/src/colorprint.cc | 200 + vendor/googlebenchmark/src/colorprint.h | 33 + .../googlebenchmark/src/commandlineflags.cc | 298 + vendor/googlebenchmark/src/commandlineflags.h | 133 + vendor/googlebenchmark/src/complexity.cc | 255 + vendor/googlebenchmark/src/complexity.h | 55 + .../googlebenchmark/src/console_reporter.cc | 210 + vendor/googlebenchmark/src/counter.cc | 80 + vendor/googlebenchmark/src/counter.h | 32 + vendor/googlebenchmark/src/csv_reporter.cc | 169 + vendor/googlebenchmark/src/cycleclock.h | 243 + vendor/googlebenchmark/src/internal_macros.h | 111 + vendor/googlebenchmark/src/json_reporter.cc | 327 + vendor/googlebenchmark/src/log.h | 88 + vendor/googlebenchmark/src/mutex.h | 155 + vendor/googlebenchmark/src/perf_counters.cc | 284 + vendor/googlebenchmark/src/perf_counters.h | 200 + vendor/googlebenchmark/src/re.h | 158 + vendor/googlebenchmark/src/reporter.cc | 118 + vendor/googlebenchmark/src/statistics.cc | 214 + vendor/googlebenchmark/src/statistics.h | 44 + vendor/googlebenchmark/src/string_util.cc | 254 + vendor/googlebenchmark/src/string_util.h | 70 + vendor/googlebenchmark/src/sysinfo.cc | 871 +++ vendor/googlebenchmark/src/thread_manager.h | 63 + vendor/googlebenchmark/src/thread_timer.h | 86 + vendor/googlebenchmark/src/timers.cc | 284 + vendor/googlebenchmark/src/timers.h | 75 + vendor/googletest.mask | 19 + vendor/googletest/CMakeLists.txt | 36 + vendor/googletest/LICENSE | 28 + vendor/googletest/googlemock/CMakeLists.txt | 210 + .../googletest/googlemock/cmake/gmock.pc.in | 10 + .../googlemock/cmake/gmock_main.pc.in | 10 + .../googlemock/include/gmock/gmock-actions.h | 2321 ++++++ .../include/gmock/gmock-cardinalities.h | 159 + .../include/gmock/gmock-function-mocker.h | 519 ++ .../googlemock/include/gmock/gmock-matchers.h | 5624 +++++++++++++ .../include/gmock/gmock-more-actions.h | 659 ++ .../include/gmock/gmock-more-matchers.h | 120 + .../include/gmock/gmock-nice-strict.h | 277 + .../include/gmock/gmock-spec-builders.h | 2148 +++++ .../googlemock/include/gmock/gmock.h | 97 + .../include/gmock/internal/custom/README.md | 18 + .../internal/custom/gmock-generated-actions.h | 7 + .../gmock/internal/custom/gmock-matchers.h | 37 + .../gmock/internal/custom/gmock-port.h | 40 + .../gmock/internal/gmock-internal-utils.h | 489 ++ .../include/gmock/internal/gmock-port.h | 139 + .../include/gmock/internal/gmock-pp.h | 279 + vendor/googletest/googlemock/src/gmock-all.cc | 46 + .../googlemock/src/gmock-cardinalities.cc | 155 + .../googlemock/src/gmock-internal-utils.cc | 258 + .../googlemock/src/gmock-matchers.cc | 478 ++ .../googlemock/src/gmock-spec-builders.cc | 792 ++ vendor/googletest/googlemock/src/gmock.cc | 225 + .../googletest/googlemock/src/gmock_main.cc | 73 + vendor/googletest/googletest/CMakeLists.txt | 330 + .../googletest/cmake/Config.cmake.in | 13 + .../googletest/googletest/cmake/gtest.pc.in | 9 + .../googletest/cmake/gtest_main.pc.in | 10 + .../googletest/cmake/internal_utils.cmake | 334 + .../googletest/cmake/libgtest.la.in | 21 + .../include/gtest/gtest-assertion-result.h | 237 + .../include/gtest/gtest-death-test.h | 345 + .../googletest/include/gtest/gtest-matchers.h | 923 +++ .../googletest/include/gtest/gtest-message.h | 251 + .../include/gtest/gtest-param-test.h | 546 ++ .../googletest/include/gtest/gtest-printers.h | 1197 +++ .../googletest/include/gtest/gtest-spi.h | 250 + .../include/gtest/gtest-test-part.h | 192 + .../include/gtest/gtest-typed-test.h | 335 + .../googletest/include/gtest/gtest.h | 2338 ++++++ .../include/gtest/gtest_pred_impl.h | 279 + .../googletest/include/gtest/gtest_prod.h | 60 + .../include/gtest/internal/custom/README.md | 44 + .../gtest/internal/custom/gtest-port.h | 37 + .../gtest/internal/custom/gtest-printers.h | 42 + .../include/gtest/internal/custom/gtest.h | 37 + .../internal/gtest-death-test-internal.h | 306 + .../include/gtest/internal/gtest-filepath.h | 233 + .../include/gtest/internal/gtest-internal.h | 1521 ++++ .../include/gtest/internal/gtest-param-util.h | 1030 +++ .../include/gtest/internal/gtest-port-arch.h | 124 + .../include/gtest/internal/gtest-port.h | 2536 ++++++ .../include/gtest/internal/gtest-string.h | 178 + .../include/gtest/internal/gtest-type-util.h | 220 + vendor/googletest/googletest/src/gtest-all.cc | 49 + .../googletest/src/gtest-assertion-result.cc | 77 + .../googletest/src/gtest-death-test.cc | 1587 ++++ .../googletest/src/gtest-filepath.cc | 414 + .../googletest/src/gtest-internal-inl.h | 1230 +++ .../googletest/src/gtest-matchers.cc | 98 + .../googletest/googletest/src/gtest-port.cc | 1434 ++++ .../googletest/src/gtest-printers.cc | 555 ++ .../googletest/src/gtest-test-part.cc | 106 + .../googletest/src/gtest-typed-test.cc | 108 + vendor/googletest/googletest/src/gtest.cc | 6957 +++++++++++++++++ .../googletest/googletest/src/gtest_main.cc | 66 + 143 files changed, 52949 insertions(+), 16 deletions(-) create mode 100644 cmake/FindGoogleBenchmark.cmake create mode 100644 cmake/FindGoogleTest.cmake delete mode 100644 cmake/FindNoa.cmake create mode 100644 vendor/googlebenchmark.mask create mode 100644 vendor/googlebenchmark/CMakeLists.txt create mode 100644 vendor/googlebenchmark/LICENSE create mode 100644 vendor/googlebenchmark/cmake/AddCXXCompilerFlag.cmake create mode 100644 vendor/googlebenchmark/cmake/CXXFeatureCheck.cmake create mode 100644 vendor/googlebenchmark/cmake/Config.cmake.in create mode 100644 vendor/googlebenchmark/cmake/GetGitVersion.cmake create mode 100644 vendor/googlebenchmark/cmake/GoogleTest.cmake create mode 100644 vendor/googlebenchmark/cmake/GoogleTest.cmake.in create mode 100644 vendor/googlebenchmark/cmake/Modules/FindLLVMAr.cmake create mode 100644 vendor/googlebenchmark/cmake/Modules/FindLLVMNm.cmake create mode 100644 vendor/googlebenchmark/cmake/Modules/FindLLVMRanLib.cmake create mode 100644 vendor/googlebenchmark/cmake/Modules/FindPFM.cmake create mode 100644 vendor/googlebenchmark/cmake/benchmark.pc.in create mode 100644 vendor/googlebenchmark/cmake/benchmark_main.pc.in create mode 100644 vendor/googlebenchmark/cmake/gnu_posix_regex.cpp create mode 100644 vendor/googlebenchmark/cmake/llvm-toolchain.cmake create mode 100644 vendor/googlebenchmark/cmake/posix_regex.cpp create mode 100644 vendor/googlebenchmark/cmake/pthread_affinity.cpp create mode 100644 vendor/googlebenchmark/cmake/split_list.cmake create mode 100644 vendor/googlebenchmark/cmake/std_regex.cpp create mode 100644 vendor/googlebenchmark/cmake/steady_clock.cpp create mode 100644 vendor/googlebenchmark/cmake/thread_safety_attributes.cpp create mode 100644 vendor/googlebenchmark/include/benchmark/benchmark.h create mode 100644 vendor/googlebenchmark/include/benchmark/export.h create mode 100644 vendor/googlebenchmark/src/CMakeLists.txt create mode 100644 vendor/googlebenchmark/src/arraysize.h create mode 100644 vendor/googlebenchmark/src/benchmark.cc create mode 100644 vendor/googlebenchmark/src/benchmark_api_internal.cc create mode 100644 vendor/googlebenchmark/src/benchmark_api_internal.h create mode 100644 vendor/googlebenchmark/src/benchmark_main.cc create mode 100644 vendor/googlebenchmark/src/benchmark_name.cc create mode 100644 vendor/googlebenchmark/src/benchmark_register.cc create mode 100644 vendor/googlebenchmark/src/benchmark_register.h create mode 100644 vendor/googlebenchmark/src/benchmark_runner.cc create mode 100644 vendor/googlebenchmark/src/benchmark_runner.h create mode 100644 vendor/googlebenchmark/src/check.cc create mode 100644 vendor/googlebenchmark/src/check.h create mode 100644 vendor/googlebenchmark/src/colorprint.cc create mode 100644 vendor/googlebenchmark/src/colorprint.h create mode 100644 vendor/googlebenchmark/src/commandlineflags.cc create mode 100644 vendor/googlebenchmark/src/commandlineflags.h create mode 100644 vendor/googlebenchmark/src/complexity.cc create mode 100644 vendor/googlebenchmark/src/complexity.h create mode 100644 vendor/googlebenchmark/src/console_reporter.cc create mode 100644 vendor/googlebenchmark/src/counter.cc create mode 100644 vendor/googlebenchmark/src/counter.h create mode 100644 vendor/googlebenchmark/src/csv_reporter.cc create mode 100644 vendor/googlebenchmark/src/cycleclock.h create mode 100644 vendor/googlebenchmark/src/internal_macros.h create mode 100644 vendor/googlebenchmark/src/json_reporter.cc create mode 100644 vendor/googlebenchmark/src/log.h create mode 100644 vendor/googlebenchmark/src/mutex.h create mode 100644 vendor/googlebenchmark/src/perf_counters.cc create mode 100644 vendor/googlebenchmark/src/perf_counters.h create mode 100644 vendor/googlebenchmark/src/re.h create mode 100644 vendor/googlebenchmark/src/reporter.cc create mode 100644 vendor/googlebenchmark/src/statistics.cc create mode 100644 vendor/googlebenchmark/src/statistics.h create mode 100644 vendor/googlebenchmark/src/string_util.cc create mode 100644 vendor/googlebenchmark/src/string_util.h create mode 100644 vendor/googlebenchmark/src/sysinfo.cc create mode 100644 vendor/googlebenchmark/src/thread_manager.h create mode 100644 vendor/googlebenchmark/src/thread_timer.h create mode 100644 vendor/googlebenchmark/src/timers.cc create mode 100644 vendor/googlebenchmark/src/timers.h create mode 100644 vendor/googletest.mask create mode 100644 vendor/googletest/CMakeLists.txt create mode 100644 vendor/googletest/LICENSE create mode 100644 vendor/googletest/googlemock/CMakeLists.txt create mode 100644 vendor/googletest/googlemock/cmake/gmock.pc.in create mode 100644 vendor/googletest/googlemock/cmake/gmock_main.pc.in create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-actions.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-cardinalities.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-function-mocker.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-matchers.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-more-actions.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-more-matchers.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-nice-strict.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock-spec-builders.h create mode 100644 vendor/googletest/googlemock/include/gmock/gmock.h create mode 100644 vendor/googletest/googlemock/include/gmock/internal/custom/README.md create mode 100644 vendor/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h create mode 100644 vendor/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h create mode 100644 vendor/googletest/googlemock/include/gmock/internal/custom/gmock-port.h create mode 100644 vendor/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h create mode 100644 vendor/googletest/googlemock/include/gmock/internal/gmock-port.h create mode 100644 vendor/googletest/googlemock/include/gmock/internal/gmock-pp.h create mode 100644 vendor/googletest/googlemock/src/gmock-all.cc create mode 100644 vendor/googletest/googlemock/src/gmock-cardinalities.cc create mode 100644 vendor/googletest/googlemock/src/gmock-internal-utils.cc create mode 100644 vendor/googletest/googlemock/src/gmock-matchers.cc create mode 100644 vendor/googletest/googlemock/src/gmock-spec-builders.cc create mode 100644 vendor/googletest/googlemock/src/gmock.cc create mode 100644 vendor/googletest/googlemock/src/gmock_main.cc create mode 100644 vendor/googletest/googletest/CMakeLists.txt create mode 100644 vendor/googletest/googletest/cmake/Config.cmake.in create mode 100644 vendor/googletest/googletest/cmake/gtest.pc.in create mode 100644 vendor/googletest/googletest/cmake/gtest_main.pc.in create mode 100644 vendor/googletest/googletest/cmake/internal_utils.cmake create mode 100644 vendor/googletest/googletest/cmake/libgtest.la.in create mode 100644 vendor/googletest/googletest/include/gtest/gtest-assertion-result.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-death-test.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-matchers.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-message.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-param-test.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-printers.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-spi.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-test-part.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest-typed-test.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest_pred_impl.h create mode 100644 vendor/googletest/googletest/include/gtest/gtest_prod.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/custom/README.md create mode 100644 vendor/googletest/googletest/include/gtest/internal/custom/gtest-port.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/custom/gtest-printers.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/custom/gtest.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-filepath.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-internal.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-param-util.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-port-arch.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-port.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-string.h create mode 100644 vendor/googletest/googletest/include/gtest/internal/gtest-type-util.h create mode 100644 vendor/googletest/googletest/src/gtest-all.cc create mode 100644 vendor/googletest/googletest/src/gtest-assertion-result.cc create mode 100644 vendor/googletest/googletest/src/gtest-death-test.cc create mode 100644 vendor/googletest/googletest/src/gtest-filepath.cc create mode 100644 vendor/googletest/googletest/src/gtest-internal-inl.h create mode 100644 vendor/googletest/googletest/src/gtest-matchers.cc create mode 100644 vendor/googletest/googletest/src/gtest-port.cc create mode 100644 vendor/googletest/googletest/src/gtest-printers.cc create mode 100644 vendor/googletest/googletest/src/gtest-test-part.cc create mode 100644 vendor/googletest/googletest/src/gtest-typed-test.cc create mode 100644 vendor/googletest/googletest/src/gtest.cc create mode 100644 vendor/googletest/googletest/src/gtest_main.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 7857f0346..db6fc8c7c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,8 +18,9 @@ option(SOURCEMETA_CORE_INSTALL "Install the Sourcemeta Core library" ON) option(SOURCEMETA_CORE_ADDRESS_SANITIZER "Build Sourcemeta Core with an address sanitizer" OFF) option(SOURCEMETA_CORE_UNDEFINED_SANITIZER "Build Sourcemeta Core with an undefined behavior sanitizer" OFF) -find_package(Noa REQUIRED) +include(./vendor/noa/cmake/noa.cmake) +# TODO: Turn this into a re-usable utility CMake function if(SOURCEMETA_CORE_INSTALL) include(GNUInstallDirs) include(CMakePackageConfigHelpers) @@ -34,7 +35,7 @@ if(SOURCEMETA_CORE_INSTALL) "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" - COMPONENT sourcemeta_core_dev) + COMPONENT sourcemeta_${PROJECT_NAME}_dev) endif() if(SOURCEMETA_CORE_REGEX) @@ -95,6 +96,7 @@ endif() # Testing if(SOURCEMETA_CORE_TESTS) enable_testing() + find_package(GoogleTest REQUIRED) if(SOURCEMETA_CORE_REGEX) add_subdirectory(test/regex) @@ -137,6 +139,7 @@ if(SOURCEMETA_CORE_TESTS) endif() if(SOURCEMETA_CORE_BENCHMARK) + find_package(GoogleBenchmark REQUIRED) add_subdirectory(benchmark) endif() endif() diff --git a/DEPENDENCIES b/DEPENDENCIES index 044137f38..9904503a1 100644 --- a/DEPENDENCIES +++ b/DEPENDENCIES @@ -15,3 +15,5 @@ jsonschema-test-suite https://github.com/json-schema-org/JSON-Schema-Test-Suite uriparser https://github.com/uriparser/uriparser bfe94f6e54d0abb5afa7bb0411940b7242cb835a yaml https://github.com/yaml/libyaml 0.2.5 boost-regex https://github.com/boostorg/regex boost-1.86.0 +googletest https://github.com/google/googletest a7f443b80b105f940225332ed3c31f2790092f47 +googlebenchmark https://github.com/google/benchmark 378fe693a1ef51500db21b11ff05a8018c5f0e55 diff --git a/cmake/FindGoogleBenchmark.cmake b/cmake/FindGoogleBenchmark.cmake new file mode 100644 index 000000000..082a94e4d --- /dev/null +++ b/cmake/FindGoogleBenchmark.cmake @@ -0,0 +1,5 @@ +if(NOT Benchmark_FOUND) + set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "enable testing of the benchmark library") + add_subdirectory("${PROJECT_SOURCE_DIR}/vendor/googlebenchmark") + set(Benchmark_FOUND ON) +endif() diff --git a/cmake/FindGoogleTest.cmake b/cmake/FindGoogleTest.cmake new file mode 100644 index 000000000..18eb1184e --- /dev/null +++ b/cmake/FindGoogleTest.cmake @@ -0,0 +1,6 @@ +if(NOT GoogleTest_FOUND) + set(BUILD_GMOCK ON CACHE BOOL "enable googlemock") + set(INSTALL_GTEST OFF CACHE BOOL "disable installation") + add_subdirectory("${PROJECT_SOURCE_DIR}/vendor/googletest") + set(GoogleTest_FOUND ON) +endif() diff --git a/cmake/FindNoa.cmake b/cmake/FindNoa.cmake deleted file mode 100644 index ff709a9dd..000000000 --- a/cmake/FindNoa.cmake +++ /dev/null @@ -1,13 +0,0 @@ -if(NOT Noa_FOUND) - if(SOURCEMETA_CORE_INSTALL) - set(NOA_INSTALL ON CACHE BOOL "enable Noa installation") - else() - set(NOA_INSTALL OFF CACHE BOOL "disable Noa installation") - endif() - - set(NOA_GOOGLETEST ${SOURCEMETA_CORE_TESTS} CACHE BOOL "GoogleTest") - set(NOA_GOOGLEBENCHMARK ${SOURCEMETA_CORE_BENCHMARK} CACHE BOOL "GoogleBenchmark") - add_subdirectory("${PROJECT_SOURCE_DIR}/vendor/noa") - include("${PROJECT_SOURCE_DIR}/vendor/noa/cmake/noa.cmake") - set(Noa_FOUND ON) -endif() diff --git a/config.cmake.in b/config.cmake.in index 7d6809b46..b3e66e633 100644 --- a/config.cmake.in +++ b/config.cmake.in @@ -24,7 +24,6 @@ foreach(component ${SOURCEMETA_CORE_COMPONENTS}) find_dependency(uriparser) include("${CMAKE_CURRENT_LIST_DIR}/sourcemeta_core_uri.cmake") elseif(component STREQUAL "json") - find_dependency(noa) include("${CMAKE_CURRENT_LIST_DIR}/sourcemeta_core_json.cmake") elseif(component STREQUAL "jsonl") include("${CMAKE_CURRENT_LIST_DIR}/sourcemeta_core_json.cmake") diff --git a/vendor/googlebenchmark.mask b/vendor/googlebenchmark.mask new file mode 100644 index 000000000..9ccea5b38 --- /dev/null +++ b/vendor/googlebenchmark.mask @@ -0,0 +1,22 @@ +.github/ +bazel/ +bindings/ +docs/ +test/ +tools/ +AUTHORS +BUILD.bazel +CONTRIBUTING.md +CONTRIBUTORS +MODULE.bazel +WORKSPACE +WORKSPACE.bzlmod +_config.yml +appveyor.yml +pyproject.toml +setup.py +.clang-format +.clang-tidy +.pre-commit-config.yaml +.ycm_extra_conf.py +README.md diff --git a/vendor/googlebenchmark/CMakeLists.txt b/vendor/googlebenchmark/CMakeLists.txt new file mode 100644 index 000000000..216c1c921 --- /dev/null +++ b/vendor/googlebenchmark/CMakeLists.txt @@ -0,0 +1,351 @@ +# Require CMake 3.10. If available, use the policies up to CMake 3.22. +cmake_minimum_required (VERSION 3.10...3.22) + +project (benchmark VERSION 1.8.5 LANGUAGES CXX) + +option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) +option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) +option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) +option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) +option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." ON) +option(BENCHMARK_FORCE_WERROR "Build Release candidates with -Werror regardless of compiler issues." OFF) + +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI") + # PGC++ maybe reporting false positives. + set(BENCHMARK_ENABLE_WERROR OFF) +endif() +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "NVHPC") + set(BENCHMARK_ENABLE_WERROR OFF) +endif() +if(BENCHMARK_FORCE_WERROR) + set(BENCHMARK_ENABLE_WERROR ON) +endif(BENCHMARK_FORCE_WERROR) + +if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")) + option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) +else() + set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE) +endif() +option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) +option(BENCHMARK_ENABLE_DOXYGEN "Build documentation with Doxygen." OFF) +option(BENCHMARK_INSTALL_DOCS "Enable installation of documentation." ON) + +# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which +# may require downloading the source code. +option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF) + +# This option can be used to disable building and running unit tests which depend on gtest +# in cases where it is not possible to build or find a valid version of gtest. +option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) +option(BENCHMARK_USE_BUNDLED_GTEST "Use bundled GoogleTest. If disabled, the find_package(GTest) will be used." ON) + +option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF) + +# Export only public symbols +set(CMAKE_CXX_VISIBILITY_PRESET hidden) +set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) + +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and + # cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the + # undocumented, but working variable. + # See https://gitlab.kitware.com/cmake/cmake/-/issues/15170 + set(CMAKE_SYSTEM_PROCESSOR ${MSVC_CXX_ARCHITECTURE_ID}) + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ARM") + set(CMAKE_CROSSCOMPILING TRUE) + endif() +endif() + +set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF) +function(should_enable_assembly_tests) + if(CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) + if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") + # FIXME: The --coverage flag needs to be removed when building assembly + # tests for this to work. + return() + endif() + endif() + if (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC") + return() + elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + return() + elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8) + # FIXME: Make these work on 32 bit builds + return() + elseif(BENCHMARK_BUILD_32_BITS) + # FIXME: Make these work on 32 bit builds + return() + endif() + find_program(LLVM_FILECHECK_EXE FileCheck) + if (LLVM_FILECHECK_EXE) + set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE) + message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}") + else() + message(STATUS "Failed to find LLVM FileCheck") + return() + endif() + set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE) +endfunction() +should_enable_assembly_tests() + +# This option disables the building and running of the assembly verification tests +option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests" + ${ENABLE_ASSEMBLY_TESTS_DEFAULT}) + +# Make sure we can import out CMake functions +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules") +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + + +# Read the git tags to determine the project version +include(GetGitVersion) +get_git_version(GIT_VERSION) + +# If no git version can be determined, use the version +# from the project() command +if ("${GIT_VERSION}" STREQUAL "v0.0.0") + set(VERSION "v${benchmark_VERSION}") +else() + set(VERSION "${GIT_VERSION}") +endif() + +# Normalize version: drop "v" prefix, replace first "-" with ".", +# drop everything after second "-" (including said "-"). +string(STRIP ${VERSION} VERSION) +if(VERSION MATCHES v[^-]*-) + string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" NORMALIZED_VERSION ${VERSION}) +else() + string(REGEX REPLACE "v(.*)" "\\1" NORMALIZED_VERSION ${VERSION}) +endif() + +# Tell the user what versions we are using +message(STATUS "Google Benchmark version: ${VERSION}, normalized to ${NORMALIZED_VERSION}") + +# The version of the libraries +set(GENERIC_LIB_VERSION ${NORMALIZED_VERSION}) +string(SUBSTRING ${NORMALIZED_VERSION} 0 1 GENERIC_LIB_SOVERSION) + +# Import our CMake modules +include(AddCXXCompilerFlag) +include(CheckCXXCompilerFlag) +include(CheckLibraryExists) +include(CXXFeatureCheck) + +check_library_exists(rt shm_open "" HAVE_LIB_RT) + +if (BENCHMARK_BUILD_32_BITS) + add_required_cxx_compiler_flag(-m32) +endif() + +set(BENCHMARK_CXX_STANDARD 14) + +set(CMAKE_CXX_STANDARD ${BENCHMARK_CXX_STANDARD}) +set(CMAKE_CXX_STANDARD_REQUIRED YES) +set(CMAKE_CXX_EXTENSIONS OFF) + +if (MSVC) + # Turn compiler warnings up to 11 + string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + add_definitions(-D_CRT_SECURE_NO_WARNINGS) + + if (NOT BENCHMARK_ENABLE_EXCEPTIONS) + add_cxx_compiler_flag(-EHs-) + add_cxx_compiler_flag(-EHa-) + add_definitions(-D_HAS_EXCEPTIONS=0) + endif() + # Link time optimisation + if (BENCHMARK_ENABLE_LTO) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL") + set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG") + set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG") + set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG") + + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL") + string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}") + set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") + string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}") + set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") + string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}") + set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG") + + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL") + set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG") + set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG") + set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG") + endif() +else() + # Turn on Large-file Support + add_definitions(-D_FILE_OFFSET_BITS=64) + add_definitions(-D_LARGEFILE64_SOURCE) + add_definitions(-D_LARGEFILE_SOURCE) + # Turn compiler warnings up to 11 + add_cxx_compiler_flag(-Wall) + add_cxx_compiler_flag(-Wextra) + add_cxx_compiler_flag(-Wshadow) + add_cxx_compiler_flag(-Wfloat-equal) + add_cxx_compiler_flag(-Wold-style-cast) + add_cxx_compiler_flag(-Wconversion) + if(BENCHMARK_ENABLE_WERROR) + add_cxx_compiler_flag(-Werror) + endif() + if (NOT BENCHMARK_ENABLE_TESTING) + # Disable warning when compiling tests as gtest does not use 'override'. + add_cxx_compiler_flag(-Wsuggest-override) + endif() + add_cxx_compiler_flag(-pedantic) + add_cxx_compiler_flag(-pedantic-errors) + add_cxx_compiler_flag(-Wshorten-64-to-32) + add_cxx_compiler_flag(-fstrict-aliasing) + # Disable warnings regarding deprecated parts of the library while building + # and testing those parts of the library. + add_cxx_compiler_flag(-Wno-deprecated-declarations) + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" OR CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") + # Intel silently ignores '-Wno-deprecated-declarations', + # warning no. 1786 must be explicitly disabled. + # See #631 for rationale. + add_cxx_compiler_flag(-wd1786) + add_cxx_compiler_flag(-fno-finite-math-only) + endif() + # Disable deprecation warnings for release builds (when -Werror is enabled). + if(BENCHMARK_ENABLE_WERROR) + add_cxx_compiler_flag(-Wno-deprecated) + endif() + if (NOT BENCHMARK_ENABLE_EXCEPTIONS) + add_cxx_compiler_flag(-fno-exceptions) + endif() + + if (HAVE_CXX_FLAG_FSTRICT_ALIASING) + if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") #ICC17u2: Many false positives for Wstrict-aliasing + add_cxx_compiler_flag(-Wstrict-aliasing) + endif() + endif() + # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden + # (because of deprecated overload) + add_cxx_compiler_flag(-wd654) + add_cxx_compiler_flag(-Wthread-safety) + if (HAVE_CXX_FLAG_WTHREAD_SAFETY) + cxx_feature_check(THREAD_SAFETY_ATTRIBUTES "-DINCLUDE_DIRECTORIES=${PROJECT_SOURCE_DIR}/include") + endif() + + # On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a + # predefined macro, which turns on all of the wonderful libc extensions. + # However g++ doesn't do this in Cygwin so we have to define it ourselves + # since we depend on GNU/POSIX/BSD extensions. + if (CYGWIN) + add_definitions(-D_GNU_SOURCE=1) + endif() + + if (QNXNTO) + add_definitions(-D_QNX_SOURCE) + endif() + + # Link time optimisation + if (BENCHMARK_ENABLE_LTO) + add_cxx_compiler_flag(-flto) + add_cxx_compiler_flag(-Wno-lto-type-mismatch) + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + find_program(GCC_AR gcc-ar) + if (GCC_AR) + set(CMAKE_AR ${GCC_AR}) + endif() + find_program(GCC_RANLIB gcc-ranlib) + if (GCC_RANLIB) + set(CMAKE_RANLIB ${GCC_RANLIB}) + endif() + elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + include(llvm-toolchain) + endif() + endif() + + # Coverage build type + set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" + CACHE STRING "Flags used by the C++ compiler during coverage builds." + FORCE) + set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" + CACHE STRING "Flags used for linking binaries during coverage builds." + FORCE) + set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" + CACHE STRING "Flags used by the shared libraries linker during coverage builds." + FORCE) + mark_as_advanced( + BENCHMARK_CXX_FLAGS_COVERAGE + BENCHMARK_EXE_LINKER_FLAGS_COVERAGE + BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE) + set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING + "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.") + add_cxx_compiler_flag(--coverage COVERAGE) +endif() + +if (BENCHMARK_USE_LIBCXX) + if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + add_cxx_compiler_flag(-stdlib=libc++) + elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR + "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel" OR + "${CMAKE_CXX_COMPILER_ID}" STREQUAL "IntelLLVM") + add_cxx_compiler_flag(-nostdinc++) + message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS") + # Adding -nodefaultlibs directly to CMAKE__LINKER_FLAGS will break + # configuration checks such as 'find_package(Threads)' + list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs) + # -lc++ cannot be added directly to CMAKE__LINKER_FLAGS because + # linker flags appear before all linker inputs and -lc++ must appear after. + list(APPEND BENCHMARK_CXX_LIBRARIES c++) + else() + message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler") + endif() +endif(BENCHMARK_USE_LIBCXX) + +set(EXTRA_CXX_FLAGS "") +if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + # Clang on Windows fails to compile the regex feature check under C++11 + set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14") +endif() + +# C++ feature checks +# Determine the correct regular expression engine to use +cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS}) +cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS}) +cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS}) +if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) + message(FATAL_ERROR "Failed to determine the source files for the regular expression backend") +endif() +if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX + AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) + message(WARNING "Using std::regex with exceptions disabled is not fully supported") +endif() + +cxx_feature_check(STEADY_CLOCK) +# Ensure we have pthreads +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) +cxx_feature_check(PTHREAD_AFFINITY) + +if (BENCHMARK_ENABLE_LIBPFM) + find_package(PFM REQUIRED) +endif() + +# Set up directories +include_directories(${PROJECT_SOURCE_DIR}/include) + +# Build the targets +add_subdirectory(src) + +if (BENCHMARK_ENABLE_TESTING) + enable_testing() + if (BENCHMARK_ENABLE_GTEST_TESTS AND + NOT (TARGET gtest AND TARGET gtest_main AND + TARGET gmock AND TARGET gmock_main)) + if (BENCHMARK_USE_BUNDLED_GTEST) + include(GoogleTest) + else() + find_package(GTest CONFIG REQUIRED) + add_library(gtest ALIAS GTest::gtest) + add_library(gtest_main ALIAS GTest::gtest_main) + add_library(gmock ALIAS GTest::gmock) + add_library(gmock_main ALIAS GTest::gmock_main) + endif() + endif() + add_subdirectory(test) +endif() diff --git a/vendor/googlebenchmark/LICENSE b/vendor/googlebenchmark/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/googlebenchmark/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/googlebenchmark/cmake/AddCXXCompilerFlag.cmake b/vendor/googlebenchmark/cmake/AddCXXCompilerFlag.cmake new file mode 100644 index 000000000..858589e97 --- /dev/null +++ b/vendor/googlebenchmark/cmake/AddCXXCompilerFlag.cmake @@ -0,0 +1,78 @@ +# - Adds a compiler flag if it is supported by the compiler +# +# This function checks that the supplied compiler flag is supported and then +# adds it to the corresponding compiler flags +# +# add_cxx_compiler_flag( []) +# +# - Example +# +# include(AddCXXCompilerFlag) +# add_cxx_compiler_flag(-Wall) +# add_cxx_compiler_flag(-no-strict-aliasing RELEASE) +# Requires CMake 2.6+ + +if(__add_cxx_compiler_flag) + return() +endif() +set(__add_cxx_compiler_flag INCLUDED) + +include(CheckCXXCompilerFlag) + +function(mangle_compiler_flag FLAG OUTPUT) + string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) + string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) + string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) + string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) + set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) +endfunction(mangle_compiler_flag) + +function(add_cxx_compiler_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") + if(${MANGLED_FLAG}) + if(ARGC GREATER 1) + set(VARIANT ${ARGV1}) + string(TOUPPER "_${VARIANT}" VARIANT) + else() + set(VARIANT "") + endif() + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + endif() +endfunction() + +function(add_required_cxx_compiler_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") + if(${MANGLED_FLAG}) + if(ARGC GREATER 1) + set(VARIANT ${ARGV1}) + string(TOUPPER "_${VARIANT}" VARIANT) + else() + set(VARIANT "") + endif() + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) + else() + message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") + endif() +endfunction() + +function(check_cxx_warning_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + # Add -Werror to ensure the compiler generates an error if the warning flag + # doesn't exist. + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") +endfunction() diff --git a/vendor/googlebenchmark/cmake/CXXFeatureCheck.cmake b/vendor/googlebenchmark/cmake/CXXFeatureCheck.cmake new file mode 100644 index 000000000..e51482659 --- /dev/null +++ b/vendor/googlebenchmark/cmake/CXXFeatureCheck.cmake @@ -0,0 +1,82 @@ +# - Compile and run code to check for C++ features +# +# This functions compiles a source file under the `cmake` folder +# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake +# environment +# +# cxx_feature_check( []) +# +# - Example +# +# include(CXXFeatureCheck) +# cxx_feature_check(STD_REGEX) +# Requires CMake 2.8.12+ + +if(__cxx_feature_check) + return() +endif() +set(__cxx_feature_check INCLUDED) + +option(CXXFEATURECHECK_DEBUG OFF) + +function(cxx_feature_check FILE) + string(TOLOWER ${FILE} FILE) + string(TOUPPER ${FILE} VAR) + string(TOUPPER "HAVE_${VAR}" FEATURE) + if (DEFINED HAVE_${VAR}) + set(HAVE_${VAR} 1 PARENT_SCOPE) + add_definitions(-DHAVE_${VAR}) + return() + endif() + + set(FEATURE_CHECK_CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) + if (ARGC GREATER 1) + message(STATUS "Enabling additional flags: ${ARGV1}") + list(APPEND FEATURE_CHECK_CMAKE_FLAGS ${ARGV1}) + endif() + + if (NOT DEFINED COMPILE_${FEATURE}) + if(CMAKE_CROSSCOMPILING) + message(STATUS "Cross-compiling to test ${FEATURE}") + try_compile(COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CXX_STANDARD 11 + CXX_STANDARD_REQUIRED ON + CMAKE_FLAGS ${FEATURE_CHECK_CMAKE_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES} + OUTPUT_VARIABLE COMPILE_OUTPUT_VAR) + if(COMPILE_${FEATURE}) + message(WARNING + "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") + set(RUN_${FEATURE} 0 CACHE INTERNAL "") + else() + set(RUN_${FEATURE} 1 CACHE INTERNAL "") + endif() + else() + message(STATUS "Compiling and running to test ${FEATURE}") + try_run(RUN_${FEATURE} COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CXX_STANDARD 11 + CXX_STANDARD_REQUIRED ON + CMAKE_FLAGS ${FEATURE_CHECK_CMAKE_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES} + COMPILE_OUTPUT_VARIABLE COMPILE_OUTPUT_VAR) + endif() + endif() + + if(RUN_${FEATURE} EQUAL 0) + message(STATUS "Performing Test ${FEATURE} -- success") + set(HAVE_${VAR} 1 PARENT_SCOPE) + add_definitions(-DHAVE_${VAR}) + else() + if(NOT COMPILE_${FEATURE}) + if(CXXFEATURECHECK_DEBUG) + message(STATUS "Performing Test ${FEATURE} -- failed to compile: ${COMPILE_OUTPUT_VAR}") + else() + message(STATUS "Performing Test ${FEATURE} -- failed to compile") + endif() + else() + message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run") + endif() + endif() +endfunction() diff --git a/vendor/googlebenchmark/cmake/Config.cmake.in b/vendor/googlebenchmark/cmake/Config.cmake.in new file mode 100644 index 000000000..3659cfa2a --- /dev/null +++ b/vendor/googlebenchmark/cmake/Config.cmake.in @@ -0,0 +1,11 @@ +@PACKAGE_INIT@ + +include (CMakeFindDependencyMacro) + +find_dependency (Threads) + +if (@BENCHMARK_ENABLE_LIBPFM@) + find_dependency (PFM) +endif() + +include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") diff --git a/vendor/googlebenchmark/cmake/GetGitVersion.cmake b/vendor/googlebenchmark/cmake/GetGitVersion.cmake new file mode 100644 index 000000000..b0210103b --- /dev/null +++ b/vendor/googlebenchmark/cmake/GetGitVersion.cmake @@ -0,0 +1,36 @@ +# - Returns a version string from Git tags +# +# This function inspects the annotated git tags for the project and returns a string +# into a CMake variable +# +# get_git_version() +# +# - Example +# +# include(GetGitVersion) +# get_git_version(GIT_VERSION) +# +# Requires CMake 2.8.11+ +find_package(Git) + +if(__get_git_version) + return() +endif() +set(__get_git_version INCLUDED) + +function(get_git_version var) + if(GIT_EXECUTABLE) + execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 --dirty + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + RESULT_VARIABLE status + OUTPUT_VARIABLE GIT_VERSION + ERROR_QUIET) + if(status) + set(GIT_VERSION "v0.0.0") + endif() + else() + set(GIT_VERSION "v0.0.0") + endif() + + set(${var} ${GIT_VERSION} PARENT_SCOPE) +endfunction() diff --git a/vendor/googlebenchmark/cmake/GoogleTest.cmake b/vendor/googlebenchmark/cmake/GoogleTest.cmake new file mode 100644 index 000000000..e66e9d1a2 --- /dev/null +++ b/vendor/googlebenchmark/cmake/GoogleTest.cmake @@ -0,0 +1,58 @@ +# Download and unpack googletest at configure time +set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest") +configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY) + +set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes +execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" + -DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} . + RESULT_VARIABLE result + WORKING_DIRECTORY ${GOOGLETEST_PREFIX} +) + +if(result) + message(FATAL_ERROR "CMake step for googletest failed: ${result}") +endif() + +execute_process( + COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${GOOGLETEST_PREFIX} +) + +if(result) + message(FATAL_ERROR "Build step for googletest failed: ${result}") +endif() + +# Prevent overriding the parent project's compiler/linker +# settings on Windows +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +include(${GOOGLETEST_PREFIX}/googletest-paths.cmake) + +# Add googletest directly to our build. This defines +# the gtest and gtest_main targets. +add_subdirectory(${GOOGLETEST_SOURCE_DIR} + ${GOOGLETEST_BINARY_DIR} + EXCLUDE_FROM_ALL) + +# googletest doesn't seem to want to stay build warning clean so let's not hurt ourselves. +if (MSVC) + target_compile_options(gtest PRIVATE "/wd4244" "/wd4722") + target_compile_options(gtest_main PRIVATE "/wd4244" "/wd4722") + target_compile_options(gmock PRIVATE "/wd4244" "/wd4722") + target_compile_options(gmock_main PRIVATE "/wd4244" "/wd4722") +else() + target_compile_options(gtest PRIVATE "-w") + target_compile_options(gtest_main PRIVATE "-w") + target_compile_options(gmock PRIVATE "-w") + target_compile_options(gmock_main PRIVATE "-w") +endif() + +if(NOT DEFINED GTEST_COMPILE_COMMANDS) + set(GTEST_COMPILE_COMMANDS ON) +endif() + +set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) diff --git a/vendor/googlebenchmark/cmake/GoogleTest.cmake.in b/vendor/googlebenchmark/cmake/GoogleTest.cmake.in new file mode 100644 index 000000000..ce653ac37 --- /dev/null +++ b/vendor/googlebenchmark/cmake/GoogleTest.cmake.in @@ -0,0 +1,59 @@ +cmake_minimum_required(VERSION 2.8.12) + +project(googletest-download NONE) + +# Enable ExternalProject CMake module +include(ExternalProject) + +option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF) +set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH + "Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs") + +# Download and install GoogleTest + +message(STATUS "Looking for Google Test sources") +message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}") +if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND + EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND + EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt") + message(STATUS "Found Google Test in ${GOOGLETEST_PATH}") + + ExternalProject_Add( + googletest + PREFIX "${CMAKE_BINARY_DIR}" + DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" + SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir. + BINARY_DIR "${CMAKE_BINARY_DIR}/build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +else() + if(NOT ALLOW_DOWNLOADING_GOOGLETEST) + message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_USE_BUNDLED_GTEST, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.") + return() + else() + message(WARNING "Did not find Google Test sources! Fetching from web...") + ExternalProject_Add( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG "release-1.11.0" + PREFIX "${CMAKE_BINARY_DIR}" + STAMP_DIR "${CMAKE_BINARY_DIR}/stamp" + DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" + SOURCE_DIR "${CMAKE_BINARY_DIR}/src" + BINARY_DIR "${CMAKE_BINARY_DIR}/build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) + endif() +endif() + +ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR) +file(WRITE googletest-paths.cmake +"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\") +set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\") +") diff --git a/vendor/googlebenchmark/cmake/Modules/FindLLVMAr.cmake b/vendor/googlebenchmark/cmake/Modules/FindLLVMAr.cmake new file mode 100644 index 000000000..23469813c --- /dev/null +++ b/vendor/googlebenchmark/cmake/Modules/FindLLVMAr.cmake @@ -0,0 +1,16 @@ +include(FeatureSummary) + +find_program(LLVMAR_EXECUTABLE + NAMES llvm-ar + DOC "The llvm-ar executable" + ) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(LLVMAr + DEFAULT_MSG + LLVMAR_EXECUTABLE) + +SET_PACKAGE_PROPERTIES(LLVMAr PROPERTIES + URL https://llvm.org/docs/CommandGuide/llvm-ar.html + DESCRIPTION "create, modify, and extract from archives" +) diff --git a/vendor/googlebenchmark/cmake/Modules/FindLLVMNm.cmake b/vendor/googlebenchmark/cmake/Modules/FindLLVMNm.cmake new file mode 100644 index 000000000..e56430a04 --- /dev/null +++ b/vendor/googlebenchmark/cmake/Modules/FindLLVMNm.cmake @@ -0,0 +1,16 @@ +include(FeatureSummary) + +find_program(LLVMNM_EXECUTABLE + NAMES llvm-nm + DOC "The llvm-nm executable" + ) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(LLVMNm + DEFAULT_MSG + LLVMNM_EXECUTABLE) + +SET_PACKAGE_PROPERTIES(LLVMNm PROPERTIES + URL https://llvm.org/docs/CommandGuide/llvm-nm.html + DESCRIPTION "list LLVM bitcode and object file’s symbol table" +) diff --git a/vendor/googlebenchmark/cmake/Modules/FindLLVMRanLib.cmake b/vendor/googlebenchmark/cmake/Modules/FindLLVMRanLib.cmake new file mode 100644 index 000000000..7b53e1a79 --- /dev/null +++ b/vendor/googlebenchmark/cmake/Modules/FindLLVMRanLib.cmake @@ -0,0 +1,15 @@ +include(FeatureSummary) + +find_program(LLVMRANLIB_EXECUTABLE + NAMES llvm-ranlib + DOC "The llvm-ranlib executable" + ) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(LLVMRanLib + DEFAULT_MSG + LLVMRANLIB_EXECUTABLE) + +SET_PACKAGE_PROPERTIES(LLVMRanLib PROPERTIES + DESCRIPTION "generate index for LLVM archive" +) diff --git a/vendor/googlebenchmark/cmake/Modules/FindPFM.cmake b/vendor/googlebenchmark/cmake/Modules/FindPFM.cmake new file mode 100644 index 000000000..4c1ce938f --- /dev/null +++ b/vendor/googlebenchmark/cmake/Modules/FindPFM.cmake @@ -0,0 +1,28 @@ +# If successful, the following variables will be defined: +# PFM_FOUND. +# PFM_LIBRARIES +# PFM_INCLUDE_DIRS +# the following target will be defined: +# PFM::libpfm + +include(FeatureSummary) +include(FindPackageHandleStandardArgs) + +set_package_properties(PFM PROPERTIES + URL http://perfmon2.sourceforge.net/ + DESCRIPTION "A helper library to develop monitoring tools" + PURPOSE "Used to program specific performance monitoring events") + +find_library(PFM_LIBRARY NAMES pfm) +find_path(PFM_INCLUDE_DIR NAMES perfmon/pfmlib.h) + +find_package_handle_standard_args(PFM REQUIRED_VARS PFM_LIBRARY PFM_INCLUDE_DIR) + +if (PFM_FOUND AND NOT TARGET PFM::libpfm) + add_library(PFM::libpfm UNKNOWN IMPORTED) + set_target_properties(PFM::libpfm PROPERTIES + IMPORTED_LOCATION "${PFM_LIBRARY}" + INTERFACE_INCLUDE_DIRECTORIES "${PFM_INCLUDE_DIR}") +endif() + +mark_as_advanced(PFM_LIBRARY PFM_INCLUDE_DIR) diff --git a/vendor/googlebenchmark/cmake/benchmark.pc.in b/vendor/googlebenchmark/cmake/benchmark.pc.in new file mode 100644 index 000000000..043f2fc75 --- /dev/null +++ b/vendor/googlebenchmark/cmake/benchmark.pc.in @@ -0,0 +1,12 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ + +Name: @PROJECT_NAME@ +Description: Google microbenchmark framework +Version: @VERSION@ + +Libs: -L${libdir} -lbenchmark +Libs.private: -lpthread @BENCHMARK_PRIVATE_LINK_LIBRARIES@ +Cflags: -I${includedir} diff --git a/vendor/googlebenchmark/cmake/benchmark_main.pc.in b/vendor/googlebenchmark/cmake/benchmark_main.pc.in new file mode 100644 index 000000000..a90f3cd06 --- /dev/null +++ b/vendor/googlebenchmark/cmake/benchmark_main.pc.in @@ -0,0 +1,7 @@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ + +Name: @PROJECT_NAME@ +Description: Google microbenchmark framework (with main() function) +Version: @VERSION@ +Requires: benchmark +Libs: -L${libdir} -lbenchmark_main diff --git a/vendor/googlebenchmark/cmake/gnu_posix_regex.cpp b/vendor/googlebenchmark/cmake/gnu_posix_regex.cpp new file mode 100644 index 000000000..b5b91cdab --- /dev/null +++ b/vendor/googlebenchmark/cmake/gnu_posix_regex.cpp @@ -0,0 +1,12 @@ +#include +#include +int main() { + std::string str = "test0159"; + regex_t re; + int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); + if (ec != 0) { + return ec; + } + return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; +} + diff --git a/vendor/googlebenchmark/cmake/llvm-toolchain.cmake b/vendor/googlebenchmark/cmake/llvm-toolchain.cmake new file mode 100644 index 000000000..fc119e52f --- /dev/null +++ b/vendor/googlebenchmark/cmake/llvm-toolchain.cmake @@ -0,0 +1,8 @@ +find_package(LLVMAr REQUIRED) +set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE) + +find_package(LLVMNm REQUIRED) +set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE) + +find_package(LLVMRanLib REQUIRED) +set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE) diff --git a/vendor/googlebenchmark/cmake/posix_regex.cpp b/vendor/googlebenchmark/cmake/posix_regex.cpp new file mode 100644 index 000000000..466dc6256 --- /dev/null +++ b/vendor/googlebenchmark/cmake/posix_regex.cpp @@ -0,0 +1,14 @@ +#include +#include +int main() { + std::string str = "test0159"; + regex_t re; + int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); + if (ec != 0) { + return ec; + } + int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; + regfree(&re); + return ret; +} + diff --git a/vendor/googlebenchmark/cmake/pthread_affinity.cpp b/vendor/googlebenchmark/cmake/pthread_affinity.cpp new file mode 100644 index 000000000..7b143bc02 --- /dev/null +++ b/vendor/googlebenchmark/cmake/pthread_affinity.cpp @@ -0,0 +1,16 @@ +#include +int main() { + cpu_set_t set; + CPU_ZERO(&set); + for (int i = 0; i < CPU_SETSIZE; ++i) { + CPU_SET(i, &set); + CPU_CLR(i, &set); + } + pthread_t self = pthread_self(); + int ret; + ret = pthread_getaffinity_np(self, sizeof(set), &set); + if (ret != 0) return ret; + ret = pthread_setaffinity_np(self, sizeof(set), &set); + if (ret != 0) return ret; + return 0; +} diff --git a/vendor/googlebenchmark/cmake/split_list.cmake b/vendor/googlebenchmark/cmake/split_list.cmake new file mode 100644 index 000000000..67aed3fdc --- /dev/null +++ b/vendor/googlebenchmark/cmake/split_list.cmake @@ -0,0 +1,3 @@ +macro(split_list listname) + string(REPLACE ";" " " ${listname} "${${listname}}") +endmacro() diff --git a/vendor/googlebenchmark/cmake/std_regex.cpp b/vendor/googlebenchmark/cmake/std_regex.cpp new file mode 100644 index 000000000..696f2a26b --- /dev/null +++ b/vendor/googlebenchmark/cmake/std_regex.cpp @@ -0,0 +1,10 @@ +#include +#include +int main() { + const std::string str = "test0159"; + std::regex re; + re = std::regex("^[a-z]+[0-9]+$", + std::regex_constants::extended | std::regex_constants::nosubs); + return std::regex_search(str, re) ? 0 : -1; +} + diff --git a/vendor/googlebenchmark/cmake/steady_clock.cpp b/vendor/googlebenchmark/cmake/steady_clock.cpp new file mode 100644 index 000000000..66d50d17e --- /dev/null +++ b/vendor/googlebenchmark/cmake/steady_clock.cpp @@ -0,0 +1,7 @@ +#include + +int main() { + typedef std::chrono::steady_clock Clock; + Clock::time_point tp = Clock::now(); + ((void)tp); +} diff --git a/vendor/googlebenchmark/cmake/thread_safety_attributes.cpp b/vendor/googlebenchmark/cmake/thread_safety_attributes.cpp new file mode 100644 index 000000000..46161babd --- /dev/null +++ b/vendor/googlebenchmark/cmake/thread_safety_attributes.cpp @@ -0,0 +1,4 @@ +#define HAVE_THREAD_SAFETY_ATTRIBUTES +#include "../src/mutex.h" + +int main() {} diff --git a/vendor/googlebenchmark/include/benchmark/benchmark.h b/vendor/googlebenchmark/include/benchmark/benchmark.h new file mode 100644 index 000000000..7dd72e27b --- /dev/null +++ b/vendor/googlebenchmark/include/benchmark/benchmark.h @@ -0,0 +1,2060 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Support for registering benchmarks for functions. + +/* Example usage: +// Define a function that executes the code to be measured a +// specified number of times: +static void BM_StringCreation(benchmark::State& state) { + for (auto _ : state) + std::string empty_string; +} + +// Register the function as a benchmark +BENCHMARK(BM_StringCreation); + +// Define another benchmark +static void BM_StringCopy(benchmark::State& state) { + std::string x = "hello"; + for (auto _ : state) + std::string copy(x); +} +BENCHMARK(BM_StringCopy); + +// Augment the main() program to invoke benchmarks if specified +// via the --benchmark_filter command line flag. E.g., +// my_unittest --benchmark_filter=all +// my_unittest --benchmark_filter=BM_StringCreation +// my_unittest --benchmark_filter=String +// my_unittest --benchmark_filter='Copy|Creation' +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); + benchmark::Shutdown(); + return 0; +} + +// Sometimes a family of microbenchmarks can be implemented with +// just one routine that takes an extra argument to specify which +// one of the family of benchmarks to run. For example, the following +// code defines a family of microbenchmarks for measuring the speed +// of memcpy() calls of different lengths: + +static void BM_memcpy(benchmark::State& state) { + char* src = new char[state.range(0)]; char* dst = new char[state.range(0)]; + memset(src, 'x', state.range(0)); + for (auto _ : state) + memcpy(dst, src, state.range(0)); + state.SetBytesProcessed(state.iterations() * state.range(0)); + delete[] src; delete[] dst; +} +BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); + +// The preceding code is quite repetitive, and can be replaced with the +// following short-hand. The following invocation will pick a few +// appropriate arguments in the specified range and will generate a +// microbenchmark for each such argument. +BENCHMARK(BM_memcpy)->Range(8, 8<<10); + +// You might have a microbenchmark that depends on two inputs. For +// example, the following code defines a family of microbenchmarks for +// measuring the speed of set insertion. +static void BM_SetInsert(benchmark::State& state) { + set data; + for (auto _ : state) { + state.PauseTiming(); + data = ConstructRandomSet(state.range(0)); + state.ResumeTiming(); + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert) + ->Args({1<<10, 128}) + ->Args({2<<10, 128}) + ->Args({4<<10, 128}) + ->Args({8<<10, 128}) + ->Args({1<<10, 512}) + ->Args({2<<10, 512}) + ->Args({4<<10, 512}) + ->Args({8<<10, 512}); + +// The preceding code is quite repetitive, and can be replaced with +// the following short-hand. The following macro will pick a few +// appropriate arguments in the product of the two specified ranges +// and will generate a microbenchmark for each such pair. +BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); + +// For more complex patterns of inputs, passing a custom function +// to Apply allows programmatic specification of an +// arbitrary set of arguments to run the microbenchmark on. +// The following example enumerates a dense range on +// one parameter, and a sparse range on the second. +static void CustomArguments(benchmark::internal::Benchmark* b) { + for (int i = 0; i <= 10; ++i) + for (int j = 32; j <= 1024*1024; j *= 8) + b->Args({i, j}); +} +BENCHMARK(BM_SetInsert)->Apply(CustomArguments); + +// Templated microbenchmarks work the same way: +// Produce then consume 'size' messages 'iters' times +// Measures throughput in the absence of multiprogramming. +template int BM_Sequential(benchmark::State& state) { + Q q; + typename Q::value_type v; + for (auto _ : state) { + for (int i = state.range(0); i--; ) + q.push(v); + for (int e = state.range(0); e--; ) + q.Wait(&v); + } + // actually messages, not bytes: + state.SetBytesProcessed(state.iterations() * state.range(0)); +} +BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); + +Use `Benchmark::MinTime(double t)` to set the minimum time used to run the +benchmark. This option overrides the `benchmark_min_time` flag. + +void BM_test(benchmark::State& state) { + ... body ... +} +BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds. + +In a multithreaded test, it is guaranteed that none of the threads will start +until all have reached the loop start, and all will have finished before any +thread exits the loop body. As such, any global setup or teardown you want to +do can be wrapped in a check against the thread index: + +static void BM_MultiThreaded(benchmark::State& state) { + if (state.thread_index() == 0) { + // Setup code here. + } + for (auto _ : state) { + // Run the test as normal. + } + if (state.thread_index() == 0) { + // Teardown code here. + } +} +BENCHMARK(BM_MultiThreaded)->Threads(4); + + +If a benchmark runs a few milliseconds it may be hard to visually compare the +measured times, since the output data is given in nanoseconds per default. In +order to manually set the time unit, you can specify it manually: + +BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); +*/ + +#ifndef BENCHMARK_BENCHMARK_H_ +#define BENCHMARK_BENCHMARK_H_ + +// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. +#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) +#define BENCHMARK_HAS_CXX11 +#endif + +// This _MSC_VER check should detect VS 2017 v15.3 and newer. +#if __cplusplus >= 201703L || \ + (defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L) +#define BENCHMARK_HAS_CXX17 +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/export.h" + +#if defined(BENCHMARK_HAS_CXX11) +#include +#include +#include +#include +#endif + +#if defined(_MSC_VER) +#include // for _ReadWriteBarrier +#endif + +#ifndef BENCHMARK_HAS_CXX11 +#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + TypeName& operator=(const TypeName&) +#else +#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete +#endif + +#ifdef BENCHMARK_HAS_CXX17 +#define BENCHMARK_UNUSED [[maybe_unused]] +#elif defined(__GNUC__) || defined(__clang__) +#define BENCHMARK_UNUSED __attribute__((unused)) +#else +#define BENCHMARK_UNUSED +#endif + +// Used to annotate functions, methods and classes so they +// are not optimized by the compiler. Useful for tests +// where you expect loops to stay in place churning cycles +#if defined(__clang__) +#define BENCHMARK_DONT_OPTIMIZE __attribute__((optnone)) +#elif defined(__GNUC__) || defined(__GNUG__) +#define BENCHMARK_DONT_OPTIMIZE __attribute__((optimize(0))) +#else +// MSVC & Intel do not have a no-optimize attribute, only line pragmas +#define BENCHMARK_DONT_OPTIMIZE +#endif + +#if defined(__GNUC__) || defined(__clang__) +#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) +#elif defined(_MSC_VER) && !defined(__clang__) +#define BENCHMARK_ALWAYS_INLINE __forceinline +#define __func__ __FUNCTION__ +#else +#define BENCHMARK_ALWAYS_INLINE +#endif + +#define BENCHMARK_INTERNAL_TOSTRING2(x) #x +#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) + +// clang-format off +#if (defined(__GNUC__) && !defined(__NVCC__) && !defined(__NVCOMPILER)) || defined(__clang__) +#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) +#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("GCC diagnostic pop") +#elif defined(__NVCOMPILER) +#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) +#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING \ + _Pragma("diagnostic push") \ + _Pragma("diag_suppress deprecated_entity_with_custom_message") +#define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("diagnostic pop") +#else +#define BENCHMARK_BUILTIN_EXPECT(x, y) x +#define BENCHMARK_DEPRECATED_MSG(msg) +#define BENCHMARK_WARNING_MSG(msg) \ + __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \ + __LINE__) ") : warning note: " msg)) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING +#define BENCHMARK_RESTORE_DEPRECATED_WARNING +#endif +// clang-format on + +#if defined(__GNUC__) && !defined(__clang__) +#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#if defined(__GNUC__) || __has_builtin(__builtin_unreachable) +#define BENCHMARK_UNREACHABLE() __builtin_unreachable() +#elif defined(_MSC_VER) +#define BENCHMARK_UNREACHABLE() __assume(false) +#else +#define BENCHMARK_UNREACHABLE() ((void)0) +#endif + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_OVERRIDE override +#else +#define BENCHMARK_OVERRIDE +#endif + +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + +namespace benchmark { +class BenchmarkReporter; + +// Default number of minimum benchmark running time in seconds. +const char kDefaultMinTimeStr[] = "0.5s"; + +// Returns the version of the library. +BENCHMARK_EXPORT std::string GetBenchmarkVersion(); + +BENCHMARK_EXPORT void PrintDefaultHelp(); + +BENCHMARK_EXPORT void Initialize(int* argc, char** argv, + void (*HelperPrinterf)() = PrintDefaultHelp); +BENCHMARK_EXPORT void Shutdown(); + +// Report to stdout all arguments in 'argv' as unrecognized except the first. +// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). +BENCHMARK_EXPORT bool ReportUnrecognizedArguments(int argc, char** argv); + +// Returns the current value of --benchmark_filter. +BENCHMARK_EXPORT std::string GetBenchmarkFilter(); + +// Sets a new value to --benchmark_filter. (This will override this flag's +// current value). +// Should be called after `benchmark::Initialize()`, as +// `benchmark::Initialize()` will override the flag's value. +BENCHMARK_EXPORT void SetBenchmarkFilter(std::string value); + +// Returns the current value of --v (command line value for verbosity). +BENCHMARK_EXPORT int32_t GetBenchmarkVerbosity(); + +// Creates a default display reporter. Used by the library when no display +// reporter is provided, but also made available for external use in case a +// custom reporter should respect the `--benchmark_format` flag as a fallback +BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter(); + +// Generate a list of benchmarks matching the specified --benchmark_filter flag +// and if --benchmark_list_tests is specified return after printing the name +// of each matching benchmark. Otherwise run each matching benchmark and +// report the results. +// +// spec : Specify the benchmarks to run. If users do not specify this arg, +// then the value of FLAGS_benchmark_filter +// will be used. +// +// The second and third overload use the specified 'display_reporter' and +// 'file_reporter' respectively. 'file_reporter' will write to the file +// specified +// by '--benchmark_out'. If '--benchmark_out' is not given the +// 'file_reporter' is ignored. +// +// RETURNS: The number of matching benchmarks. +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(std::string spec); + +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, std::string spec); + +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks( + BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, std::string spec); + +// TimeUnit is passed to a benchmark in order to specify the order of magnitude +// for the measured time. +enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond }; + +BENCHMARK_EXPORT TimeUnit GetDefaultTimeUnit(); + +// Sets the default time unit the benchmarks use +// Has to be called before the benchmark loop to take effect +BENCHMARK_EXPORT void SetDefaultTimeUnit(TimeUnit unit); + +// If a MemoryManager is registered (via RegisterMemoryManager()), +// it can be used to collect and report allocation metrics for a run of the +// benchmark. +class MemoryManager { + public: + static const int64_t TombstoneValue; + + struct Result { + Result() + : num_allocs(0), + max_bytes_used(0), + total_allocated_bytes(TombstoneValue), + net_heap_growth(TombstoneValue) {} + + // The number of allocations made in total between Start and Stop. + int64_t num_allocs; + + // The peak memory use between Start and Stop. + int64_t max_bytes_used; + + // The total memory allocated, in bytes, between Start and Stop. + // Init'ed to TombstoneValue if metric not available. + int64_t total_allocated_bytes; + + // The net changes in memory, in bytes, between Start and Stop. + // ie., total_allocated_bytes - total_deallocated_bytes. + // Init'ed to TombstoneValue if metric not available. + int64_t net_heap_growth; + }; + + virtual ~MemoryManager() {} + + // Implement this to start recording allocation information. + virtual void Start() = 0; + + // Implement this to stop recording and fill out the given Result structure. + virtual void Stop(Result& result) = 0; +}; + +// Register a MemoryManager instance that will be used to collect and report +// allocation measurements for benchmark runs. +BENCHMARK_EXPORT +void RegisterMemoryManager(MemoryManager* memory_manager); + +// If a ProfilerManager is registered (via RegisterProfilerManager()), the +// benchmark will be run an additional time under the profiler to collect and +// report profile metrics for the run of the benchmark. +class ProfilerManager { + public: + virtual ~ProfilerManager() {} + + // This is called after `Setup()` code and right before the benchmark is run. + virtual void AfterSetupStart() = 0; + + // This is called before `Teardown()` code and right after the benchmark + // completes. + virtual void BeforeTeardownStop() = 0; +}; + +// Register a ProfilerManager instance that will be used to collect and report +// profile measurements for benchmark runs. +BENCHMARK_EXPORT +void RegisterProfilerManager(ProfilerManager* profiler_manager); + +// Add a key-value pair to output as part of the context stanza in the report. +BENCHMARK_EXPORT +void AddCustomContext(const std::string& key, const std::string& value); + +namespace internal { +class Benchmark; +class BenchmarkImp; +class BenchmarkFamilies; + +BENCHMARK_EXPORT std::map*& GetGlobalContext(); + +BENCHMARK_EXPORT +void UseCharPointer(char const volatile*); + +// Take ownership of the pointer and register the benchmark. Return the +// registered benchmark. +BENCHMARK_EXPORT Benchmark* RegisterBenchmarkInternal(Benchmark*); + +// Ensure that the standard streams are properly initialized in every TU. +BENCHMARK_EXPORT int InitializeStreams(); +BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); + +} // namespace internal + +#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ + defined(__EMSCRIPTEN__) +#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#endif + +// Force the compiler to flush pending writes to global memory. Acts as an +// effective read/write barrier +#ifdef BENCHMARK_HAS_CXX11 +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { + std::atomic_signal_fence(std::memory_order_acq_rel); +} +#endif + +// The DoNotOptimize(...) function can be used to prevent a value or +// expression from being optimized away by the compiler. This function is +// intended to add little to no overhead. +// See: https://youtu.be/nXaxk27zwlk?t=2441 +#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#if !defined(__GNUC__) || defined(__llvm__) || defined(__INTEL_COMPILER) +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + asm volatile("" : : "r,m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { +#if defined(__clang__) + asm volatile("" : "+r,m"(value) : : "memory"); +#else + asm volatile("" : "+m,r"(value) : : "memory"); +#endif +} + +#ifdef BENCHMARK_HAS_CXX11 +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { +#if defined(__clang__) + asm volatile("" : "+r,m"(value) : : "memory"); +#else + asm volatile("" : "+m,r"(value) : : "memory"); +#endif +} +#endif +#elif defined(BENCHMARK_HAS_CXX11) && (__GNUC__ >= 5) +// Workaround for a bug with full argument copy overhead with GCC. +// See: #1340 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105519 +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp const& value) { + asm volatile("" : : "r,m"(value) : "memory"); +} + +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp const& value) { + asm volatile("" : : "m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp& value) { + asm volatile("" : "+m,r"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp&& value) { + asm volatile("" : "+m,r"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp&& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +#else +// Fallback for GCC < 5. Can add some overhead because the compiler is forced +// to use memory operations instead of operations with registers. +// TODO: Remove if GCC < 5 will be unsupported. +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + asm volatile("" : : "m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +#ifdef BENCHMARK_HAS_CXX11 +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { + asm volatile("" : "+m"(value) : : "memory"); +} +#endif +#endif + +#ifndef BENCHMARK_HAS_CXX11 +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { + asm volatile("" : : : "memory"); +} +#endif +#elif defined(_MSC_VER) +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + internal::UseCharPointer(&reinterpret_cast(value)); + _ReadWriteBarrier(); +} + +#ifndef BENCHMARK_HAS_CXX11 +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } +#endif +#else +#ifdef BENCHMARK_HAS_CXX11 +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { + internal::UseCharPointer(&reinterpret_cast(value)); +} +#else +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + internal::UseCharPointer(&reinterpret_cast(value)); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { + internal::UseCharPointer(&reinterpret_cast(value)); +} +#endif +// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers, before C++11. +#endif + +// This class is used for user-defined counters. +class Counter { + public: + enum Flags { + kDefaults = 0, + // Mark the counter as a rate. It will be presented divided + // by the duration of the benchmark. + kIsRate = 1 << 0, + // Mark the counter as a thread-average quantity. It will be + // presented divided by the number of threads. + kAvgThreads = 1 << 1, + // Mark the counter as a thread-average rate. See above. + kAvgThreadsRate = kIsRate | kAvgThreads, + // Mark the counter as a constant value, valid/same for *every* iteration. + // When reporting, it will be *multiplied* by the iteration count. + kIsIterationInvariant = 1 << 2, + // Mark the counter as a constant rate. + // When reporting, it will be *multiplied* by the iteration count + // and then divided by the duration of the benchmark. + kIsIterationInvariantRate = kIsRate | kIsIterationInvariant, + // Mark the counter as a iteration-average quantity. + // It will be presented divided by the number of iterations. + kAvgIterations = 1 << 3, + // Mark the counter as a iteration-average rate. See above. + kAvgIterationsRate = kIsRate | kAvgIterations, + + // In the end, invert the result. This is always done last! + kInvert = 1 << 31 + }; + + enum OneK { + // 1'000 items per 1k + kIs1000 = 1000, + // 1'024 items per 1k + kIs1024 = 1024 + }; + + double value; + Flags flags; + OneK oneK; + + BENCHMARK_ALWAYS_INLINE + Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000) + : value(v), flags(f), oneK(k) {} + + BENCHMARK_ALWAYS_INLINE operator double const &() const { return value; } + BENCHMARK_ALWAYS_INLINE operator double&() { return value; } +}; + +// A helper for user code to create unforeseen combinations of Flags, without +// having to do this cast manually each time, or providing this operator. +Counter::Flags inline operator|(const Counter::Flags& LHS, + const Counter::Flags& RHS) { + return static_cast(static_cast(LHS) | + static_cast(RHS)); +} + +// This is the container for the user-defined counters. +typedef std::map UserCounters; + +// BigO is passed to a benchmark in order to specify the asymptotic +// computational +// complexity for the benchmark. In case oAuto is selected, complexity will be +// calculated automatically to the best fit. +enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; + +typedef int64_t ComplexityN; + +typedef int64_t IterationCount; + +enum StatisticUnit { kTime, kPercentage }; + +// BigOFunc is passed to a benchmark in order to specify the asymptotic +// computational complexity for the benchmark. +typedef double(BigOFunc)(ComplexityN); + +// StatisticsFunc is passed to a benchmark in order to compute some descriptive +// statistics over all the measurements of some type +typedef double(StatisticsFunc)(const std::vector&); + +namespace internal { +struct Statistics { + std::string name_; + StatisticsFunc* compute_; + StatisticUnit unit_; + + Statistics(const std::string& name, StatisticsFunc* compute, + StatisticUnit unit = kTime) + : name_(name), compute_(compute), unit_(unit) {} +}; + +class BenchmarkInstance; +class ThreadTimer; +class ThreadManager; +class PerfCountersMeasurement; + +enum AggregationReportMode +#if defined(BENCHMARK_HAS_CXX11) + : unsigned +#else +#endif +{ + // The mode has not been manually specified + ARM_Unspecified = 0, + // The mode is user-specified. + // This may or may not be set when the following bit-flags are set. + ARM_Default = 1U << 0U, + // File reporter should only output aggregates. + ARM_FileReportAggregatesOnly = 1U << 1U, + // Display reporter should only output aggregates + ARM_DisplayReportAggregatesOnly = 1U << 2U, + // Both reporters should only display aggregates. + ARM_ReportAggregatesOnly = + ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly +}; + +enum Skipped +#if defined(BENCHMARK_HAS_CXX11) + : unsigned +#endif +{ + NotSkipped = 0, + SkippedWithMessage, + SkippedWithError +}; + +} // namespace internal + +// State is passed to a running Benchmark and contains state for the +// benchmark to use. +class BENCHMARK_EXPORT State { + public: + struct StateIterator; + friend struct StateIterator; + + // Returns iterators used to run each iteration of a benchmark using a + // C++11 ranged-based for loop. These functions should not be called directly. + // + // REQUIRES: The benchmark has not started running yet. Neither begin nor end + // have been called previously. + // + // NOTE: KeepRunning may not be used after calling either of these functions. + inline BENCHMARK_ALWAYS_INLINE StateIterator begin(); + inline BENCHMARK_ALWAYS_INLINE StateIterator end(); + + // Returns true if the benchmark should continue through another iteration. + // NOTE: A benchmark may not return from the test until KeepRunning() has + // returned false. + inline bool KeepRunning(); + + // Returns true iff the benchmark should run n more iterations. + // REQUIRES: 'n' > 0. + // NOTE: A benchmark must not return from the test until KeepRunningBatch() + // has returned false. + // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations. + // + // Intended usage: + // while (state.KeepRunningBatch(1000)) { + // // process 1000 elements + // } + inline bool KeepRunningBatch(IterationCount n); + + // REQUIRES: timer is running and 'SkipWithMessage(...)' or + // 'SkipWithError(...)' has not been called by the current thread. + // Stop the benchmark timer. If not called, the timer will be + // automatically stopped after the last iteration of the benchmark loop. + // + // For threaded benchmarks the PauseTiming() function only pauses the timing + // for the current thread. + // + // NOTE: The "real time" measurement is per-thread. If different threads + // report different measurements the largest one is reported. + // + // NOTE: PauseTiming()/ResumeTiming() are relatively + // heavyweight, and so their use should generally be avoided + // within each benchmark iteration, if possible. + void PauseTiming(); + + // REQUIRES: timer is not running and 'SkipWithMessage(...)' or + // 'SkipWithError(...)' has not been called by the current thread. + // Start the benchmark timer. The timer is NOT running on entrance to the + // benchmark function. It begins running after control flow enters the + // benchmark loop. + // + // NOTE: PauseTiming()/ResumeTiming() are relatively + // heavyweight, and so their use should generally be avoided + // within each benchmark iteration, if possible. + void ResumeTiming(); + + // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been + // called previously by the current thread. + // Report the benchmark as resulting in being skipped with the specified + // 'msg'. + // After this call the user may explicitly 'return' from the benchmark. + // + // If the ranged-for style of benchmark loop is used, the user must explicitly + // break from the loop, otherwise all future iterations will be run. + // If the 'KeepRunning()' loop is used the current thread will automatically + // exit the loop at the end of the current iteration. + // + // For threaded benchmarks only the current thread stops executing and future + // calls to `KeepRunning()` will block until all threads have completed + // the `KeepRunning()` loop. If multiple threads report being skipped only the + // first skip message is used. + // + // NOTE: Calling 'SkipWithMessage(...)' does not cause the benchmark to exit + // the current scope immediately. If the function is called from within + // the 'KeepRunning()' loop the current iteration will finish. It is the users + // responsibility to exit the scope as needed. + void SkipWithMessage(const std::string& msg); + + // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been + // called previously by the current thread. + // Report the benchmark as resulting in an error with the specified 'msg'. + // After this call the user may explicitly 'return' from the benchmark. + // + // If the ranged-for style of benchmark loop is used, the user must explicitly + // break from the loop, otherwise all future iterations will be run. + // If the 'KeepRunning()' loop is used the current thread will automatically + // exit the loop at the end of the current iteration. + // + // For threaded benchmarks only the current thread stops executing and future + // calls to `KeepRunning()` will block until all threads have completed + // the `KeepRunning()` loop. If multiple threads report an error only the + // first error message is used. + // + // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit + // the current scope immediately. If the function is called from within + // the 'KeepRunning()' loop the current iteration will finish. It is the users + // responsibility to exit the scope as needed. + void SkipWithError(const std::string& msg); + + // Returns true if 'SkipWithMessage(...)' or 'SkipWithError(...)' was called. + bool skipped() const { return internal::NotSkipped != skipped_; } + + // Returns true if an error has been reported with 'SkipWithError(...)'. + bool error_occurred() const { return internal::SkippedWithError == skipped_; } + + // REQUIRES: called exactly once per iteration of the benchmarking loop. + // Set the manually measured time for this benchmark iteration, which + // is used instead of automatically measured time if UseManualTime() was + // specified. + // + // For threaded benchmarks the final value will be set to the largest + // reported values. + void SetIterationTime(double seconds); + + // Set the number of bytes processed by the current benchmark + // execution. This routine is typically called once at the end of a + // throughput oriented benchmark. + // + // REQUIRES: a benchmark has exited its benchmarking loop. + BENCHMARK_ALWAYS_INLINE + void SetBytesProcessed(int64_t bytes) { + counters["bytes_per_second"] = + Counter(static_cast(bytes), Counter::kIsRate, Counter::kIs1024); + } + + BENCHMARK_ALWAYS_INLINE + int64_t bytes_processed() const { + if (counters.find("bytes_per_second") != counters.end()) + return static_cast(counters.at("bytes_per_second")); + return 0; + } + + // If this routine is called with complexity_n > 0 and complexity report is + // requested for the + // family benchmark, then current benchmark will be part of the computation + // and complexity_n will + // represent the length of N. + BENCHMARK_ALWAYS_INLINE + void SetComplexityN(ComplexityN complexity_n) { + complexity_n_ = complexity_n; + } + + BENCHMARK_ALWAYS_INLINE + ComplexityN complexity_length_n() const { return complexity_n_; } + + // If this routine is called with items > 0, then an items/s + // label is printed on the benchmark report line for the currently + // executing benchmark. It is typically called at the end of a processing + // benchmark where a processing items/second output is desired. + // + // REQUIRES: a benchmark has exited its benchmarking loop. + BENCHMARK_ALWAYS_INLINE + void SetItemsProcessed(int64_t items) { + counters["items_per_second"] = + Counter(static_cast(items), benchmark::Counter::kIsRate); + } + + BENCHMARK_ALWAYS_INLINE + int64_t items_processed() const { + if (counters.find("items_per_second") != counters.end()) + return static_cast(counters.at("items_per_second")); + return 0; + } + + // If this routine is called, the specified label is printed at the + // end of the benchmark report line for the currently executing + // benchmark. Example: + // static void BM_Compress(benchmark::State& state) { + // ... + // double compress = input_size / output_size; + // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression)); + // } + // Produces output that looks like: + // BM_Compress 50 50 14115038 compress:27.3% + // + // REQUIRES: a benchmark has exited its benchmarking loop. + void SetLabel(const std::string& label); + + // Range arguments for this run. CHECKs if the argument has been set. + BENCHMARK_ALWAYS_INLINE + int64_t range(std::size_t pos = 0) const { + assert(range_.size() > pos); + return range_[pos]; + } + + BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead") + int64_t range_x() const { return range(0); } + + BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") + int64_t range_y() const { return range(1); } + + // Number of threads concurrently executing the benchmark. + BENCHMARK_ALWAYS_INLINE + int threads() const { return threads_; } + + // Index of the executing thread. Values from [0, threads). + BENCHMARK_ALWAYS_INLINE + int thread_index() const { return thread_index_; } + + BENCHMARK_ALWAYS_INLINE + IterationCount iterations() const { + if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { + return 0; + } + return max_iterations - total_iterations_ + batch_leftover_; + } + + BENCHMARK_ALWAYS_INLINE + std::string name() const { return name_; } + + private: + // items we expect on the first cache line (ie 64 bytes of the struct) + // When total_iterations_ is 0, KeepRunning() and friends will return false. + // May be larger than max_iterations. + IterationCount total_iterations_; + + // When using KeepRunningBatch(), batch_leftover_ holds the number of + // iterations beyond max_iters that were run. Used to track + // completed_iterations_ accurately. + IterationCount batch_leftover_; + + public: + const IterationCount max_iterations; + + private: + bool started_; + bool finished_; + internal::Skipped skipped_; + + // items we don't need on the first cache line + std::vector range_; + + ComplexityN complexity_n_; + + public: + // Container for user-defined counters. + UserCounters counters; + + private: + State(std::string name, IterationCount max_iters, + const std::vector& ranges, int thread_i, int n_threads, + internal::ThreadTimer* timer, internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement); + + void StartKeepRunning(); + // Implementation of KeepRunning() and KeepRunningBatch(). + // is_batch must be true unless n is 1. + inline bool KeepRunningInternal(IterationCount n, bool is_batch); + void FinishKeepRunning(); + + const std::string name_; + const int thread_index_; + const int threads_; + + internal::ThreadTimer* const timer_; + internal::ThreadManager* const manager_; + internal::PerfCountersMeasurement* const perf_counters_measurement_; + + friend class internal::BenchmarkInstance; +}; + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() { + return KeepRunningInternal(1, /*is_batch=*/false); +} + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) { + return KeepRunningInternal(n, /*is_batch=*/true); +} + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n, + bool is_batch) { + // total_iterations_ is set to 0 by the constructor, and always set to a + // nonzero value by StartKepRunning(). + assert(n > 0); + // n must be 1 unless is_batch is true. + assert(is_batch || n == 1); + if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) { + total_iterations_ -= n; + return true; + } + if (!started_) { + StartKeepRunning(); + if (!skipped() && total_iterations_ >= n) { + total_iterations_ -= n; + return true; + } + } + // For non-batch runs, total_iterations_ must be 0 by now. + if (is_batch && total_iterations_ != 0) { + batch_leftover_ = n - total_iterations_; + total_iterations_ = 0; + return true; + } + FinishKeepRunning(); + return false; +} + +struct State::StateIterator { + struct BENCHMARK_UNUSED Value {}; + typedef std::forward_iterator_tag iterator_category; + typedef Value value_type; + typedef Value reference; + typedef Value pointer; + typedef std::ptrdiff_t difference_type; + + private: + friend class State; + BENCHMARK_ALWAYS_INLINE + StateIterator() : cached_(0), parent_() {} + + BENCHMARK_ALWAYS_INLINE + explicit StateIterator(State* st) + : cached_(st->skipped() ? 0 : st->max_iterations), parent_(st) {} + + public: + BENCHMARK_ALWAYS_INLINE + Value operator*() const { return Value(); } + + BENCHMARK_ALWAYS_INLINE + StateIterator& operator++() { + assert(cached_ > 0); + --cached_; + return *this; + } + + BENCHMARK_ALWAYS_INLINE + bool operator!=(StateIterator const&) const { + if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true; + parent_->FinishKeepRunning(); + return false; + } + + private: + IterationCount cached_; + State* const parent_; +}; + +inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() { + return StateIterator(this); +} +inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() { + StartKeepRunning(); + return StateIterator(); +} + +namespace internal { + +typedef void(Function)(State&); + +// ------------------------------------------------------ +// Benchmark registration object. The BENCHMARK() macro expands +// into an internal::Benchmark* object. Various methods can +// be called on this object to change the properties of the benchmark. +// Each method returns "this" so that multiple method calls can +// chained into one expression. +class BENCHMARK_EXPORT Benchmark { + public: + virtual ~Benchmark(); + + // Note: the following methods all return "this" so that multiple + // method calls can be chained together in one expression. + + // Specify the name of the benchmark + Benchmark* Name(const std::string& name); + + // Run this benchmark once with "x" as the extra argument passed + // to the function. + // REQUIRES: The function passed to the constructor must accept an arg1. + Benchmark* Arg(int64_t x); + + // Run this benchmark with the given time unit for the generated output report + Benchmark* Unit(TimeUnit unit); + + // Run this benchmark once for a number of values picked from the + // range [start..limit]. (start and limit are always picked.) + // REQUIRES: The function passed to the constructor must accept an arg1. + Benchmark* Range(int64_t start, int64_t limit); + + // Run this benchmark once for all values in the range [start..limit] with + // specific step + // REQUIRES: The function passed to the constructor must accept an arg1. + Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1); + + // Run this benchmark once with "args" as the extra arguments passed + // to the function. + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* Args(const std::vector& args); + + // Equivalent to Args({x, y}) + // NOTE: This is a legacy C++03 interface provided for compatibility only. + // New code should use 'Args'. + Benchmark* ArgPair(int64_t x, int64_t y) { + std::vector args; + args.push_back(x); + args.push_back(y); + return Args(args); + } + + // Run this benchmark once for a number of values picked from the + // ranges [start..limit]. (starts and limits are always picked.) + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* Ranges(const std::vector >& ranges); + + // Run this benchmark once for each combination of values in the (cartesian) + // product of the supplied argument lists. + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* ArgsProduct(const std::vector >& arglists); + + // Equivalent to ArgNames({name}) + Benchmark* ArgName(const std::string& name); + + // Set the argument names to display in the benchmark name. If not called, + // only argument values will be shown. + Benchmark* ArgNames(const std::vector& names); + + // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}). + // NOTE: This is a legacy C++03 interface provided for compatibility only. + // New code should use 'Ranges'. + Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) { + std::vector > ranges; + ranges.push_back(std::make_pair(lo1, hi1)); + ranges.push_back(std::make_pair(lo2, hi2)); + return Ranges(ranges); + } + + // Have "setup" and/or "teardown" invoked once for every benchmark run. + // If the benchmark is multi-threaded (will run in k threads concurrently), + // the setup callback will be be invoked exactly once (not k times) before + // each run with k threads. Time allowing (e.g. for a short benchmark), there + // may be multiple such runs per benchmark, each run with its own + // "setup"/"teardown". + // + // If the benchmark uses different size groups of threads (e.g. via + // ThreadRange), the above will be true for each size group. + // + // The callback will be passed a State object, which includes the number + // of threads, thread-index, benchmark arguments, etc. + // + // The callback must not be NULL or self-deleting. + Benchmark* Setup(void (*setup)(const benchmark::State&)); + Benchmark* Teardown(void (*teardown)(const benchmark::State&)); + + // Pass this benchmark object to *func, which can customize + // the benchmark by calling various methods like Arg, Args, + // Threads, etc. + Benchmark* Apply(void (*func)(Benchmark* benchmark)); + + // Set the range multiplier for non-dense range. If not called, the range + // multiplier kRangeMultiplier will be used. + Benchmark* RangeMultiplier(int multiplier); + + // Set the minimum amount of time to use when running this benchmark. This + // option overrides the `benchmark_min_time` flag. + // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. + Benchmark* MinTime(double t); + + // Set the minimum amount of time to run the benchmark before taking runtimes + // of this benchmark into account. This + // option overrides the `benchmark_min_warmup_time` flag. + // REQUIRES: `t >= 0` and `Iterations` has not been called on this benchmark. + Benchmark* MinWarmUpTime(double t); + + // Specify the amount of iterations that should be run by this benchmark. + // This option overrides the `benchmark_min_time` flag. + // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. + // + // NOTE: This function should only be used when *exact* iteration control is + // needed and never to control or limit how long a benchmark runs, where + // `--benchmark_min_time=s` or `MinTime(...)` should be used instead. + Benchmark* Iterations(IterationCount n); + + // Specify the amount of times to repeat this benchmark. This option overrides + // the `benchmark_repetitions` flag. + // REQUIRES: `n > 0` + Benchmark* Repetitions(int n); + + // Specify if each repetition of the benchmark should be reported separately + // or if only the final statistics should be reported. If the benchmark + // is not repeated then the single result is always reported. + // Applies to *ALL* reporters (display and file). + Benchmark* ReportAggregatesOnly(bool value = true); + + // Same as ReportAggregatesOnly(), but applies to display reporter only. + Benchmark* DisplayAggregatesOnly(bool value = true); + + // By default, the CPU time is measured only for the main thread, which may + // be unrepresentative if the benchmark uses threads internally. If called, + // the total CPU time spent by all the threads will be measured instead. + // By default, only the main thread CPU time will be measured. + Benchmark* MeasureProcessCPUTime(); + + // If a particular benchmark should use the Wall clock instead of the CPU time + // (be it either the CPU time of the main thread only (default), or the + // total CPU usage of the benchmark), call this method. If called, the elapsed + // (wall) time will be used to control how many iterations are run, and in the + // printing of items/second or MB/seconds values. + // If not called, the CPU time used by the benchmark will be used. + Benchmark* UseRealTime(); + + // If a benchmark must measure time manually (e.g. if GPU execution time is + // being + // measured), call this method. If called, each benchmark iteration should + // call + // SetIterationTime(seconds) to report the measured time, which will be used + // to control how many iterations are run, and in the printing of items/second + // or MB/second values. + Benchmark* UseManualTime(); + + // Set the asymptotic computational complexity for the benchmark. If called + // the asymptotic computational complexity will be shown on the output. + Benchmark* Complexity(BigO complexity = benchmark::oAuto); + + // Set the asymptotic computational complexity for the benchmark. If called + // the asymptotic computational complexity will be shown on the output. + Benchmark* Complexity(BigOFunc* complexity); + + // Add this statistics to be computed over all the values of benchmark run + Benchmark* ComputeStatistics(const std::string& name, + StatisticsFunc* statistics, + StatisticUnit unit = kTime); + + // Support for running multiple copies of the same benchmark concurrently + // in multiple threads. This may be useful when measuring the scaling + // of some piece of code. + + // Run one instance of this benchmark concurrently in t threads. + Benchmark* Threads(int t); + + // Pick a set of values T from [min_threads,max_threads]. + // min_threads and max_threads are always included in T. Run this + // benchmark once for each value in T. The benchmark run for a + // particular value t consists of t threads running the benchmark + // function concurrently. For example, consider: + // BENCHMARK(Foo)->ThreadRange(1,16); + // This will run the following benchmarks: + // Foo in 1 thread + // Foo in 2 threads + // Foo in 4 threads + // Foo in 8 threads + // Foo in 16 threads + Benchmark* ThreadRange(int min_threads, int max_threads); + + // For each value n in the range, run this benchmark once using n threads. + // min_threads and max_threads are always included in the range. + // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts + // a benchmark with 1, 4, 7 and 8 threads. + Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1); + + // Equivalent to ThreadRange(NumCPUs(), NumCPUs()) + Benchmark* ThreadPerCpu(); + + virtual void Run(State& state) = 0; + + TimeUnit GetTimeUnit() const; + + protected: + explicit Benchmark(const std::string& name); + void SetName(const std::string& name); + + public: + const char* GetName() const; + int ArgsCnt() const; + const char* GetArgName(int arg) const; + + private: + friend class BenchmarkFamilies; + friend class BenchmarkInstance; + + std::string name_; + AggregationReportMode aggregation_report_mode_; + std::vector arg_names_; // Args for all benchmark runs + std::vector > args_; // Args for all benchmark runs + + TimeUnit time_unit_; + bool use_default_time_unit_; + + int range_multiplier_; + double min_time_; + double min_warmup_time_; + IterationCount iterations_; + int repetitions_; + bool measure_process_cpu_time_; + bool use_real_time_; + bool use_manual_time_; + BigO complexity_; + BigOFunc* complexity_lambda_; + std::vector statistics_; + std::vector thread_counts_; + + typedef void (*callback_function)(const benchmark::State&); + callback_function setup_; + callback_function teardown_; + + Benchmark(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; + + Benchmark& operator=(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; +}; + +} // namespace internal + +// Create and register a benchmark with the specified 'name' that invokes +// the specified functor 'fn'. +// +// RETURNS: A pointer to the registered benchmark. +internal::Benchmark* RegisterBenchmark(const std::string& name, + internal::Function* fn); + +#if defined(BENCHMARK_HAS_CXX11) +template +internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn); +#endif + +// Remove all registered benchmarks. All pointers to previously registered +// benchmarks are invalidated. +BENCHMARK_EXPORT void ClearRegisteredBenchmarks(); + +namespace internal { +// The class used to hold all Benchmarks created from static function. +// (ie those created using the BENCHMARK(...) macros. +class BENCHMARK_EXPORT FunctionBenchmark : public Benchmark { + public: + FunctionBenchmark(const std::string& name, Function* func) + : Benchmark(name), func_(func) {} + + void Run(State& st) BENCHMARK_OVERRIDE; + + private: + Function* func_; +}; + +#ifdef BENCHMARK_HAS_CXX11 +template +class LambdaBenchmark : public Benchmark { + public: + void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); } + + private: + template + LambdaBenchmark(const std::string& name, OLambda&& lam) + : Benchmark(name), lambda_(std::forward(lam)) {} + + LambdaBenchmark(LambdaBenchmark const&) = delete; + + template // NOLINTNEXTLINE(readability-redundant-declaration) + friend Benchmark* ::benchmark::RegisterBenchmark(const std::string&, Lam&&); + + Lambda lambda_; +}; +#endif +} // namespace internal + +inline internal::Benchmark* RegisterBenchmark(const std::string& name, + internal::Function* fn) { + // FIXME: this should be a `std::make_unique<>()` but we don't have C++14. + // codechecker_intentional [cplusplus.NewDeleteLeaks] + return internal::RegisterBenchmarkInternal( + ::new internal::FunctionBenchmark(name, fn)); +} + +#ifdef BENCHMARK_HAS_CXX11 +template +internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn) { + using BenchType = + internal::LambdaBenchmark::type>; + // FIXME: this should be a `std::make_unique<>()` but we don't have C++14. + // codechecker_intentional [cplusplus.NewDeleteLeaks] + return internal::RegisterBenchmarkInternal( + ::new BenchType(name, std::forward(fn))); +} +#endif + +#if defined(BENCHMARK_HAS_CXX11) && \ + (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409) +template +internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn, + Args&&... args) { + return benchmark::RegisterBenchmark( + name, [=](benchmark::State& st) { fn(st, args...); }); +} +#else +#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK +#endif + +// The base class for all fixture tests. +class Fixture : public internal::Benchmark { + public: + Fixture() : internal::Benchmark("") {} + + void Run(State& st) BENCHMARK_OVERRIDE { + this->SetUp(st); + this->BenchmarkCase(st); + this->TearDown(st); + } + + // These will be deprecated ... + virtual void SetUp(const State&) {} + virtual void TearDown(const State&) {} + // ... In favor of these. + virtual void SetUp(State& st) { SetUp(const_cast(st)); } + virtual void TearDown(State& st) { TearDown(const_cast(st)); } + + protected: + virtual void BenchmarkCase(State&) = 0; +}; +} // namespace benchmark + +// ------------------------------------------------------ +// Macro to register benchmarks + +// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1 +// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be +// empty. If X is empty the expression becomes (+1 == +0). +#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0) +#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__ +#else +#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__ +#endif + +// Helpers for generating unique variable names +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_PRIVATE_NAME(...) \ + BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, \ + __VA_ARGS__) +#else +#define BENCHMARK_PRIVATE_NAME(n) \ + BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n) +#endif // BENCHMARK_HAS_CXX11 + +#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c) +#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c +// Helper for concatenation with macro name expansion +#define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \ + BaseClass##_##Method##_Benchmark + +#define BENCHMARK_PRIVATE_DECLARE(n) \ + static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ + BENCHMARK_UNUSED + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK(...) \ + BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#__VA_ARGS__, \ + __VA_ARGS__))) +#else +#define BENCHMARK(n) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#n, n))) +#endif // BENCHMARK_HAS_CXX11 + +// Old-style macros +#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a)) +#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)}) +#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t)) +#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi)) +#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \ + BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}}) + +#ifdef BENCHMARK_HAS_CXX11 + +// Register a benchmark which invokes the function specified by `func` +// with the additional arguments specified by `...`. +// +// For example: +// +// template ` +// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { +// [...] +//} +// /* Registers a benchmark named "BM_takes_args/int_string_test` */ +// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); +#define BENCHMARK_CAPTURE(func, test_case_name, ...) \ + BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #func "/" #test_case_name, \ + [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) + +#endif // BENCHMARK_HAS_CXX11 + +// This will register a benchmark for a templatized function. For example: +// +// template +// void BM_Foo(int iters); +// +// BENCHMARK_TEMPLATE(BM_Foo, 1); +// +// will register BM_Foo<1> as a benchmark. +#define BENCHMARK_TEMPLATE1(n, a) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n))) + +#define BENCHMARK_TEMPLATE2(n, a, b) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \ + n))) + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE(n, ...) \ + BENCHMARK_PRIVATE_DECLARE(n) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>))) +#else +#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) +#endif + +#ifdef BENCHMARK_HAS_CXX11 +// This will register a benchmark for a templatized function, +// with the additional arguments specified by `...`. +// +// For example: +// +// template ` +// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { +// [...] +//} +// /* Registers a benchmark named "BM_takes_args/int_string_test` */ +// BENCHMARK_TEMPLATE1_CAPTURE(BM_takes_args, void, int_string_test, 42, +// std::string("abc")); +#define BENCHMARK_TEMPLATE1_CAPTURE(func, a, test_case_name, ...) \ + BENCHMARK_CAPTURE(func, test_case_name, __VA_ARGS__) + +#define BENCHMARK_TEMPLATE2_CAPTURE(func, a, b, test_case_name, ...) \ + BENCHMARK_PRIVATE_DECLARE(func) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #func "<" #a "," #b ">" \ + "/" #test_case_name, \ + [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) +#endif // BENCHMARK_HAS_CXX11 + +#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ + }; + +#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #a ">/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ + }; + +#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ + }; + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ + class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ + }; +#else +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) +#endif + +#define BENCHMARK_DEFINE_F(BaseClass, Method) \ + BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase + +#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase + +#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \ + BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \ + BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase +#else +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) +#endif + +#define BENCHMARK_REGISTER_F(BaseClass, Method) \ + BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)) + +#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \ + BENCHMARK_PRIVATE_DECLARE(TestName) = \ + (::benchmark::internal::RegisterBenchmarkInternal(new TestName())) + +// This macro will define and register a benchmark within a fixture class. +#define BENCHMARK_F(BaseClass, Method) \ + BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase + +#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase + +#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \ + BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ + BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase +#else +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) +#endif + +// Helper macro to create a main routine in a test that runs the benchmarks +// Note the workaround for Hexagon simulator passing argc != 0, argv = NULL. +#define BENCHMARK_MAIN() \ + int main(int argc, char** argv) { \ + char arg0_default[] = "benchmark"; \ + char* args_default = arg0_default; \ + if (!argv) { \ + argc = 1; \ + argv = &args_default; \ + } \ + ::benchmark::Initialize(&argc, argv); \ + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ + ::benchmark::RunSpecifiedBenchmarks(); \ + ::benchmark::Shutdown(); \ + return 0; \ + } \ + int main(int, char**) + +// ------------------------------------------------------ +// Benchmark Reporters + +namespace benchmark { + +struct BENCHMARK_EXPORT CPUInfo { + struct CacheInfo { + std::string type; + int level; + int size; + int num_sharing; + }; + + enum Scaling { UNKNOWN, ENABLED, DISABLED }; + + int num_cpus; + Scaling scaling; + double cycles_per_second; + std::vector caches; + std::vector load_avg; + + static const CPUInfo& Get(); + + private: + CPUInfo(); + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); +}; + +// Adding Struct for System Information +struct BENCHMARK_EXPORT SystemInfo { + std::string name; + static const SystemInfo& Get(); + + private: + SystemInfo(); + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo); +}; + +// BenchmarkName contains the components of the Benchmark's name +// which allows individual fields to be modified or cleared before +// building the final name using 'str()'. +struct BENCHMARK_EXPORT BenchmarkName { + std::string function_name; + std::string args; + std::string min_time; + std::string min_warmup_time; + std::string iterations; + std::string repetitions; + std::string time_type; + std::string threads; + + // Return the full name of the benchmark with each non-empty + // field separated by a '/' + std::string str() const; +}; + +// Interface for custom benchmark result printers. +// By default, benchmark reports are printed to stdout. However an application +// can control the destination of the reports by calling +// RunSpecifiedBenchmarks and passing it a custom reporter object. +// The reporter object must implement the following interface. +class BENCHMARK_EXPORT BenchmarkReporter { + public: + struct Context { + CPUInfo const& cpu_info; + SystemInfo const& sys_info; + // The number of chars in the longest benchmark name. + size_t name_field_width; + static const char* executable_name; + Context(); + }; + + struct BENCHMARK_EXPORT Run { + static const int64_t no_repetition_index = -1; + enum RunType { RT_Iteration, RT_Aggregate }; + + Run() + : run_type(RT_Iteration), + aggregate_unit(kTime), + skipped(internal::NotSkipped), + iterations(1), + threads(1), + time_unit(GetDefaultTimeUnit()), + real_accumulated_time(0), + cpu_accumulated_time(0), + max_heapbytes_used(0), + use_real_time_for_initial_big_o(false), + complexity(oNone), + complexity_lambda(), + complexity_n(0), + report_big_o(false), + report_rms(false), + memory_result(NULL), + allocs_per_iter(0.0) {} + + std::string benchmark_name() const; + BenchmarkName run_name; + int64_t family_index; + int64_t per_family_instance_index; + RunType run_type; + std::string aggregate_name; + StatisticUnit aggregate_unit; + std::string report_label; // Empty if not set by benchmark. + internal::Skipped skipped; + std::string skip_message; + + IterationCount iterations; + int64_t threads; + int64_t repetition_index; + int64_t repetitions; + TimeUnit time_unit; + double real_accumulated_time; + double cpu_accumulated_time; + + // Return a value representing the real time per iteration in the unit + // specified by 'time_unit'. + // NOTE: If 'iterations' is zero the returned value represents the + // accumulated time. + double GetAdjustedRealTime() const; + + // Return a value representing the cpu time per iteration in the unit + // specified by 'time_unit'. + // NOTE: If 'iterations' is zero the returned value represents the + // accumulated time. + double GetAdjustedCPUTime() const; + + // This is set to 0.0 if memory tracing is not enabled. + double max_heapbytes_used; + + // By default Big-O is computed for CPU time, but that is not what you want + // to happen when manual time was requested, which is stored as real time. + bool use_real_time_for_initial_big_o; + + // Keep track of arguments to compute asymptotic complexity + BigO complexity; + BigOFunc* complexity_lambda; + ComplexityN complexity_n; + + // what statistics to compute from the measurements + const std::vector* statistics; + + // Inform print function whether the current run is a complexity report + bool report_big_o; + bool report_rms; + + UserCounters counters; + + // Memory metrics. + const MemoryManager::Result* memory_result; + double allocs_per_iter; + }; + + struct PerFamilyRunReports { + PerFamilyRunReports() : num_runs_total(0), num_runs_done(0) {} + + // How many runs will all instances of this benchmark perform? + int num_runs_total; + + // How many runs have happened already? + int num_runs_done; + + // The reports about (non-errneous!) runs of this family. + std::vector Runs; + }; + + // Construct a BenchmarkReporter with the output stream set to 'std::cout' + // and the error stream set to 'std::cerr' + BenchmarkReporter(); + + // Called once for every suite of benchmarks run. + // The parameter "context" contains information that the + // reporter may wish to use when generating its report, for example the + // platform under which the benchmarks are running. The benchmark run is + // never started if this function returns false, allowing the reporter + // to skip runs based on the context information. + virtual bool ReportContext(const Context& context) = 0; + + // Called once for each group of benchmark runs, gives information about + // the configurations of the runs. + virtual void ReportRunsConfig(double /*min_time*/, + bool /*has_explicit_iters*/, + IterationCount /*iters*/) {} + + // Called once for each group of benchmark runs, gives information about + // cpu-time and heap memory usage during the benchmark run. If the group + // of runs contained more than two entries then 'report' contains additional + // elements representing the mean and standard deviation of those runs. + // Additionally if this group of runs was the last in a family of benchmarks + // 'reports' contains additional entries representing the asymptotic + // complexity and RMS of that benchmark family. + virtual void ReportRuns(const std::vector& report) = 0; + + // Called once and only once after ever group of benchmarks is run and + // reported. + virtual void Finalize() {} + + // REQUIRES: The object referenced by 'out' is valid for the lifetime + // of the reporter. + void SetOutputStream(std::ostream* out) { + assert(out); + output_stream_ = out; + } + + // REQUIRES: The object referenced by 'err' is valid for the lifetime + // of the reporter. + void SetErrorStream(std::ostream* err) { + assert(err); + error_stream_ = err; + } + + std::ostream& GetOutputStream() const { return *output_stream_; } + + std::ostream& GetErrorStream() const { return *error_stream_; } + + virtual ~BenchmarkReporter(); + + // Write a human readable string to 'out' representing the specified + // 'context'. + // REQUIRES: 'out' is non-null. + static void PrintBasicContext(std::ostream* out, Context const& context); + + private: + std::ostream* output_stream_; + std::ostream* error_stream_; +}; + +// Simple reporter that outputs benchmark data to the console. This is the +// default reporter used by RunSpecifiedBenchmarks(). +class BENCHMARK_EXPORT ConsoleReporter : public BenchmarkReporter { + public: + enum OutputOptions { + OO_None = 0, + OO_Color = 1, + OO_Tabular = 2, + OO_ColorTabular = OO_Color | OO_Tabular, + OO_Defaults = OO_ColorTabular + }; + explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) + : output_options_(opts_), name_field_width_(0), printed_header_(false) {} + + bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + + protected: + virtual void PrintRunData(const Run& report); + virtual void PrintHeader(const Run& report); + + OutputOptions output_options_; + size_t name_field_width_; + UserCounters prev_counters_; + bool printed_header_; +}; + +class BENCHMARK_EXPORT JSONReporter : public BenchmarkReporter { + public: + JSONReporter() : first_report_(true) {} + bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + void Finalize() BENCHMARK_OVERRIDE; + + private: + void PrintRunData(const Run& report); + + bool first_report_; +}; + +class BENCHMARK_EXPORT BENCHMARK_DEPRECATED_MSG( + "The CSV Reporter will be removed in a future release") CSVReporter + : public BenchmarkReporter { + public: + CSVReporter() : printed_header_(false) {} + bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + + private: + void PrintRunData(const Run& report); + + bool printed_header_; + std::set user_counter_names_; +}; + +inline const char* GetTimeUnitString(TimeUnit unit) { + switch (unit) { + case kSecond: + return "s"; + case kMillisecond: + return "ms"; + case kMicrosecond: + return "us"; + case kNanosecond: + return "ns"; + } + BENCHMARK_UNREACHABLE(); +} + +inline double GetTimeUnitMultiplier(TimeUnit unit) { + switch (unit) { + case kSecond: + return 1; + case kMillisecond: + return 1e3; + case kMicrosecond: + return 1e6; + case kNanosecond: + return 1e9; + } + BENCHMARK_UNREACHABLE(); +} + +// Creates a list of integer values for the given range and multiplier. +// This can be used together with ArgsProduct() to allow multiple ranges +// with different multipliers. +// Example: +// ArgsProduct({ +// CreateRange(0, 1024, /*multi=*/32), +// CreateRange(0, 100, /*multi=*/4), +// CreateDenseRange(0, 4, /*step=*/1), +// }); +BENCHMARK_EXPORT +std::vector CreateRange(int64_t lo, int64_t hi, int multi); + +// Creates a list of integer values for the given range and step. +BENCHMARK_EXPORT +std::vector CreateDenseRange(int64_t start, int64_t limit, int step); + +} // namespace benchmark + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#endif // BENCHMARK_BENCHMARK_H_ diff --git a/vendor/googlebenchmark/include/benchmark/export.h b/vendor/googlebenchmark/include/benchmark/export.h new file mode 100644 index 000000000..f96f8596c --- /dev/null +++ b/vendor/googlebenchmark/include/benchmark/export.h @@ -0,0 +1,47 @@ +#ifndef BENCHMARK_EXPORT_H +#define BENCHMARK_EXPORT_H + +#if defined(_WIN32) +#define EXPORT_ATTR __declspec(dllexport) +#define IMPORT_ATTR __declspec(dllimport) +#define NO_EXPORT_ATTR +#define DEPRECATED_ATTR __declspec(deprecated) +#else // _WIN32 +#define EXPORT_ATTR __attribute__((visibility("default"))) +#define IMPORT_ATTR __attribute__((visibility("default"))) +#define NO_EXPORT_ATTR __attribute__((visibility("hidden"))) +#define DEPRECATE_ATTR __attribute__((__deprecated__)) +#endif // _WIN32 + +#ifdef BENCHMARK_STATIC_DEFINE +#define BENCHMARK_EXPORT +#define BENCHMARK_NO_EXPORT +#else // BENCHMARK_STATIC_DEFINE +#ifndef BENCHMARK_EXPORT +#ifdef benchmark_EXPORTS +/* We are building this library */ +#define BENCHMARK_EXPORT EXPORT_ATTR +#else // benchmark_EXPORTS +/* We are using this library */ +#define BENCHMARK_EXPORT IMPORT_ATTR +#endif // benchmark_EXPORTS +#endif // !BENCHMARK_EXPORT + +#ifndef BENCHMARK_NO_EXPORT +#define BENCHMARK_NO_EXPORT NO_EXPORT_ATTR +#endif // !BENCHMARK_NO_EXPORT +#endif // BENCHMARK_STATIC_DEFINE + +#ifndef BENCHMARK_DEPRECATED +#define BENCHMARK_DEPRECATED DEPRECATE_ATTR +#endif // BENCHMARK_DEPRECATED + +#ifndef BENCHMARK_DEPRECATED_EXPORT +#define BENCHMARK_DEPRECATED_EXPORT BENCHMARK_EXPORT BENCHMARK_DEPRECATED +#endif // BENCHMARK_DEPRECATED_EXPORT + +#ifndef BENCHMARK_DEPRECATED_NO_EXPORT +#define BENCHMARK_DEPRECATED_NO_EXPORT BENCHMARK_NO_EXPORT BENCHMARK_DEPRECATED +#endif // BENCHMARK_DEPRECATED_EXPORT + +#endif /* BENCHMARK_EXPORT_H */ diff --git a/vendor/googlebenchmark/src/CMakeLists.txt b/vendor/googlebenchmark/src/CMakeLists.txt new file mode 100644 index 000000000..32126c0d2 --- /dev/null +++ b/vendor/googlebenchmark/src/CMakeLists.txt @@ -0,0 +1,180 @@ +#Allow the source files to find headers in src / +include(GNUInstallDirs) +include_directories(${PROJECT_SOURCE_DIR}/src) + +if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) + list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) + list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) +endif() + +file(GLOB + SOURCE_FILES + *.cc + ${PROJECT_SOURCE_DIR}/include/benchmark/*.h + ${CMAKE_CURRENT_SOURCE_DIR}/*.h) +file(GLOB BENCHMARK_MAIN "benchmark_main.cc") +foreach(item ${BENCHMARK_MAIN}) + list(REMOVE_ITEM SOURCE_FILES "${item}") +endforeach() + +add_library(benchmark ${SOURCE_FILES}) +add_library(benchmark::benchmark ALIAS benchmark) +set_target_properties(benchmark PROPERTIES + OUTPUT_NAME "benchmark" + VERSION ${GENERIC_LIB_VERSION} + SOVERSION ${GENERIC_LIB_SOVERSION} +) +target_include_directories(benchmark PUBLIC + $ +) + +set_property( + SOURCE benchmark.cc + APPEND + PROPERTY COMPILE_DEFINITIONS + BENCHMARK_VERSION="${VERSION}" +) + +# libpfm, if available +if (PFM_FOUND) + target_link_libraries(benchmark PRIVATE PFM::libpfm) + target_compile_definitions(benchmark PRIVATE -DHAVE_LIBPFM) +endif() + +# pthread affinity, if available +if(HAVE_PTHREAD_AFFINITY) + target_compile_definitions(benchmark PRIVATE -DBENCHMARK_HAS_PTHREAD_AFFINITY) +endif() + +# Link threads. +target_link_libraries(benchmark PRIVATE Threads::Threads) + +target_link_libraries(benchmark PRIVATE ${BENCHMARK_CXX_LIBRARIES}) + +if(HAVE_LIB_RT) + target_link_libraries(benchmark PRIVATE rt) +endif(HAVE_LIB_RT) + + +# We need extra libraries on Windows +if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") + target_link_libraries(benchmark PRIVATE shlwapi) +endif() + +# We need extra libraries on Solaris +if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") + target_link_libraries(benchmark PRIVATE kstat) + set(BENCHMARK_PRIVATE_LINK_LIBRARIES -lkstat) +endif() + +if (NOT BUILD_SHARED_LIBS) + target_compile_definitions(benchmark PUBLIC -DBENCHMARK_STATIC_DEFINE) +endif() + +# Benchmark main library +add_library(benchmark_main "benchmark_main.cc") +add_library(benchmark::benchmark_main ALIAS benchmark_main) +set_target_properties(benchmark_main PROPERTIES + OUTPUT_NAME "benchmark_main" + VERSION ${GENERIC_LIB_VERSION} + SOVERSION ${GENERIC_LIB_SOVERSION} + DEFINE_SYMBOL benchmark_EXPORTS +) +target_link_libraries(benchmark_main PUBLIC benchmark::benchmark) + +set(generated_dir "${PROJECT_BINARY_DIR}") + +set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") +set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") +set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") +set(pkg_config_main "${generated_dir}/${PROJECT_NAME}_main.pc") +set(targets_to_export benchmark benchmark_main) +set(targets_export_name "${PROJECT_NAME}Targets") + +set(namespace "${PROJECT_NAME}::") + +include(CMakePackageConfigHelpers) + +configure_package_config_file ( + ${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in + ${project_config} + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} + NO_SET_AND_CHECK_MACRO + NO_CHECK_REQUIRED_COMPONENTS_MACRO +) +write_basic_package_version_file( + "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion +) + +configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) +configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark_main.pc.in" "${pkg_config_main}" @ONLY) + +export ( + TARGETS ${targets_to_export} + NAMESPACE "${namespace}" + FILE ${generated_dir}/${targets_export_name}.cmake +) + +if (BENCHMARK_ENABLE_INSTALL) + # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) + install( + TARGETS ${targets_to_export} + EXPORT ${targets_export_name} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + + install( + DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" + "${PROJECT_BINARY_DIR}/include/benchmark" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING PATTERN "*.*h") + + install( + FILES "${project_config}" "${version_config}" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") + + install( + FILES "${pkg_config}" "${pkg_config_main}" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + + install( + EXPORT "${targets_export_name}" + NAMESPACE "${namespace}" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") +endif() + +if (BENCHMARK_ENABLE_DOXYGEN) + find_package(Doxygen REQUIRED) + set(DOXYGEN_QUIET YES) + set(DOXYGEN_RECURSIVE YES) + set(DOXYGEN_GENERATE_HTML YES) + set(DOXYGEN_GENERATE_MAN NO) + set(DOXYGEN_MARKDOWN_SUPPORT YES) + set(DOXYGEN_BUILTIN_STL_SUPPORT YES) + set(DOXYGEN_EXTRACT_PACKAGE YES) + set(DOXYGEN_EXTRACT_STATIC YES) + set(DOXYGEN_SHOW_INCLUDE_FILES YES) + set(DOXYGEN_BINARY_TOC YES) + set(DOXYGEN_TOC_EXPAND YES) + set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "index.md") + doxygen_add_docs(benchmark_doxygen + docs + include + src + ALL + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMENT "Building documentation with Doxygen.") + if (BENCHMARK_ENABLE_INSTALL AND BENCHMARK_INSTALL_DOCS) + install( + DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/html/" + DESTINATION ${CMAKE_INSTALL_DOCDIR}) + endif() +else() + if (BENCHMARK_ENABLE_INSTALL AND BENCHMARK_INSTALL_DOCS) + install( + DIRECTORY "${PROJECT_SOURCE_DIR}/docs/" + DESTINATION ${CMAKE_INSTALL_DOCDIR}) + endif() +endif() diff --git a/vendor/googlebenchmark/src/arraysize.h b/vendor/googlebenchmark/src/arraysize.h new file mode 100644 index 000000000..51a50f2df --- /dev/null +++ b/vendor/googlebenchmark/src/arraysize.h @@ -0,0 +1,33 @@ +#ifndef BENCHMARK_ARRAYSIZE_H_ +#define BENCHMARK_ARRAYSIZE_H_ + +#include "internal_macros.h" + +namespace benchmark { +namespace internal { +// The arraysize(arr) macro returns the # of elements in an array arr. +// The expression is a compile-time constant, and therefore can be +// used in defining new arrays, for example. If you use arraysize on +// a pointer by mistake, you will get a compile-time error. +// + +// This template function declaration is used in defining arraysize. +// Note that the function doesn't need an implementation, as we only +// use its type. +template +char (&ArraySizeHelper(T (&array)[N]))[N]; + +// That gcc wants both of these prototypes seems mysterious. VC, for +// its part, can't decide which to use (another mystery). Matching of +// template overloads: the final frontier. +#ifndef COMPILER_MSVC +template +char (&ArraySizeHelper(const T (&array)[N]))[N]; +#endif + +#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) + +} // end namespace internal +} // end namespace benchmark + +#endif // BENCHMARK_ARRAYSIZE_H_ diff --git a/vendor/googlebenchmark/src/benchmark.cc b/vendor/googlebenchmark/src/benchmark.cc new file mode 100644 index 000000000..2d085447b --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark.cc @@ -0,0 +1,812 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" + +#include "benchmark_api_internal.h" +#include "benchmark_runner.h" +#include "internal_macros.h" + +#ifndef BENCHMARK_OS_WINDOWS +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "perf_counters.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "thread_manager.h" +#include "thread_timer.h" + +namespace benchmark { +// Print a list of benchmarks. This option overrides all other options. +BM_DEFINE_bool(benchmark_list_tests, false); + +// A regular expression that specifies the set of benchmarks to execute. If +// this flag is empty, or if this flag is the string \"all\", all benchmarks +// linked into the binary are run. +BM_DEFINE_string(benchmark_filter, ""); + +// Specification of how long to run the benchmark. +// +// It can be either an exact number of iterations (specified as `x`), +// or a minimum number of seconds (specified as `s`). If the latter +// format (ie., min seconds) is used, the system may run the benchmark longer +// until the results are considered significant. +// +// For backward compatibility, the `s` suffix may be omitted, in which case, +// the specified number is interpreted as the number of seconds. +// +// For cpu-time based tests, this is the lower bound +// on the total cpu time used by all threads that make up the test. For +// real-time based tests, this is the lower bound on the elapsed time of the +// benchmark execution, regardless of number of threads. +BM_DEFINE_string(benchmark_min_time, kDefaultMinTimeStr); + +// Minimum number of seconds a benchmark should be run before results should be +// taken into account. This e.g can be necessary for benchmarks of code which +// needs to fill some form of cache before performance is of interest. +// Note: results gathered within this period are discarded and not used for +// reported result. +BM_DEFINE_double(benchmark_min_warmup_time, 0.0); + +// The number of runs of each benchmark. If greater than 1, the mean and +// standard deviation of the runs will be reported. +BM_DEFINE_int32(benchmark_repetitions, 1); + +// If set, enable random interleaving of repetitions of all benchmarks. +// See http://github.com/google/benchmark/issues/1051 for details. +BM_DEFINE_bool(benchmark_enable_random_interleaving, false); + +// Report the result of each benchmark repetitions. When 'true' is specified +// only the mean, standard deviation, and other statistics are reported for +// repeated benchmarks. Affects all reporters. +BM_DEFINE_bool(benchmark_report_aggregates_only, false); + +// Display the result of each benchmark repetitions. When 'true' is specified +// only the mean, standard deviation, and other statistics are displayed for +// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects +// the display reporter, but *NOT* file reporter, which will still contain +// all the output. +BM_DEFINE_bool(benchmark_display_aggregates_only, false); + +// The format to use for console output. +// Valid values are 'console', 'json', or 'csv'. +BM_DEFINE_string(benchmark_format, "console"); + +// The format to use for file output. +// Valid values are 'console', 'json', or 'csv'. +BM_DEFINE_string(benchmark_out_format, "json"); + +// The file to write additional output to. +BM_DEFINE_string(benchmark_out, ""); + +// Whether to use colors in the output. Valid values: +// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if +// the output is being sent to a terminal and the TERM environment variable is +// set to a terminal type that supports colors. +BM_DEFINE_string(benchmark_color, "auto"); + +// Whether to use tabular format when printing user counters to the console. +// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false. +BM_DEFINE_bool(benchmark_counters_tabular, false); + +// List of additional perf counters to collect, in libpfm format. For more +// information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html +BM_DEFINE_string(benchmark_perf_counters, ""); + +// Extra context to include in the output formatted as comma-separated key-value +// pairs. Kept internal as it's only used for parsing from env/command line. +BM_DEFINE_kvpairs(benchmark_context, {}); + +// Set the default time unit to use for reports +// Valid values are 'ns', 'us', 'ms' or 's' +BM_DEFINE_string(benchmark_time_unit, ""); + +// The level of verbose logging to output +BM_DEFINE_int32(v, 0); + +namespace internal { + +std::map* global_context = nullptr; + +BENCHMARK_EXPORT std::map*& GetGlobalContext() { + return global_context; +} + +static void const volatile* volatile global_force_escape_pointer; + +// FIXME: Verify if LTO still messes this up? +void UseCharPointer(char const volatile* const v) { + // We want to escape the pointer `v` so that the compiler can not eliminate + // computations that produced it. To do that, we escape the pointer by storing + // it into a volatile variable, since generally, volatile store, is not + // something the compiler is allowed to elide. + global_force_escape_pointer = reinterpret_cast(v); +} + +} // namespace internal + +State::State(std::string name, IterationCount max_iters, + const std::vector& ranges, int thread_i, int n_threads, + internal::ThreadTimer* timer, internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement) + : total_iterations_(0), + batch_leftover_(0), + max_iterations(max_iters), + started_(false), + finished_(false), + skipped_(internal::NotSkipped), + range_(ranges), + complexity_n_(0), + name_(std::move(name)), + thread_index_(thread_i), + threads_(n_threads), + timer_(timer), + manager_(manager), + perf_counters_measurement_(perf_counters_measurement) { + BM_CHECK(max_iterations != 0) << "At least one iteration must be run"; + BM_CHECK_LT(thread_index_, threads_) + << "thread_index must be less than threads"; + + // Add counters with correct flag now. If added with `counters[name]` in + // `PauseTiming`, a new `Counter` will be inserted the first time, which + // won't have the flag. Inserting them now also reduces the allocations + // during the benchmark. + if (perf_counters_measurement_) { + for (const std::string& counter_name : + perf_counters_measurement_->names()) { + counters[counter_name] = Counter(0.0, Counter::kAvgIterations); + } + } + + // Note: The use of offsetof below is technically undefined until C++17 + // because State is not a standard layout type. However, all compilers + // currently provide well-defined behavior as an extension (which is + // demonstrated since constexpr evaluation must diagnose all undefined + // behavior). However, GCC and Clang also warn about this use of offsetof, + // which must be suppressed. +#if defined(__INTEL_COMPILER) +#pragma warning push +#pragma warning(disable : 1875) +#elif defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif +#if defined(__NVCC__) +#pragma nv_diagnostic push +#pragma nv_diag_suppress 1427 +#endif +#if defined(__NVCOMPILER) +#pragma diagnostic push +#pragma diag_suppress offset_in_non_POD_nonstandard +#endif + // Offset tests to ensure commonly accessed data is on the first cache line. + const int cache_line_size = 64; + static_assert( + offsetof(State, skipped_) <= (cache_line_size - sizeof(skipped_)), ""); +#if defined(__INTEL_COMPILER) +#pragma warning pop +#elif defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif +#if defined(__NVCC__) +#pragma nv_diagnostic pop +#endif +#if defined(__NVCOMPILER) +#pragma diagnostic pop +#endif +} + +void State::PauseTiming() { + // Add in time accumulated so far + BM_CHECK(started_ && !finished_ && !skipped()); + timer_->StopTimer(); + if (perf_counters_measurement_) { + std::vector> measurements; + if (!perf_counters_measurement_->Stop(measurements)) { + BM_CHECK(false) << "Perf counters read the value failed."; + } + for (const auto& name_and_measurement : measurements) { + const std::string& name = name_and_measurement.first; + const double measurement = name_and_measurement.second; + // Counter was inserted with `kAvgIterations` flag by the constructor. + assert(counters.find(name) != counters.end()); + counters[name].value += measurement; + } + } +} + +void State::ResumeTiming() { + BM_CHECK(started_ && !finished_ && !skipped()); + timer_->StartTimer(); + if (perf_counters_measurement_) { + perf_counters_measurement_->Start(); + } +} + +void State::SkipWithMessage(const std::string& msg) { + skipped_ = internal::SkippedWithMessage; + { + MutexLock l(manager_->GetBenchmarkMutex()); + if (internal::NotSkipped == manager_->results.skipped_) { + manager_->results.skip_message_ = msg; + manager_->results.skipped_ = skipped_; + } + } + total_iterations_ = 0; + if (timer_->running()) timer_->StopTimer(); +} + +void State::SkipWithError(const std::string& msg) { + skipped_ = internal::SkippedWithError; + { + MutexLock l(manager_->GetBenchmarkMutex()); + if (internal::NotSkipped == manager_->results.skipped_) { + manager_->results.skip_message_ = msg; + manager_->results.skipped_ = skipped_; + } + } + total_iterations_ = 0; + if (timer_->running()) timer_->StopTimer(); +} + +void State::SetIterationTime(double seconds) { + timer_->SetIterationTime(seconds); +} + +void State::SetLabel(const std::string& label) { + MutexLock l(manager_->GetBenchmarkMutex()); + manager_->results.report_label_ = label; +} + +void State::StartKeepRunning() { + BM_CHECK(!started_ && !finished_); + started_ = true; + total_iterations_ = skipped() ? 0 : max_iterations; + manager_->StartStopBarrier(); + if (!skipped()) ResumeTiming(); +} + +void State::FinishKeepRunning() { + BM_CHECK(started_ && (!finished_ || skipped())); + if (!skipped()) { + PauseTiming(); + } + // Total iterations has now wrapped around past 0. Fix this. + total_iterations_ = 0; + finished_ = true; + manager_->StartStopBarrier(); +} + +namespace internal { +namespace { + +// Flushes streams after invoking reporter methods that write to them. This +// ensures users get timely updates even when streams are not line-buffered. +void FlushStreams(BenchmarkReporter* reporter) { + if (!reporter) return; + std::flush(reporter->GetOutputStream()); + std::flush(reporter->GetErrorStream()); +} + +// Reports in both display and file reporters. +void Report(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, const RunResults& run_results) { + auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only, + const RunResults& results) { + assert(reporter); + // If there are no aggregates, do output non-aggregates. + aggregates_only &= !results.aggregates_only.empty(); + if (!aggregates_only) reporter->ReportRuns(results.non_aggregates); + if (!results.aggregates_only.empty()) + reporter->ReportRuns(results.aggregates_only); + }; + + report_one(display_reporter, run_results.display_report_aggregates_only, + run_results); + if (file_reporter) + report_one(file_reporter, run_results.file_report_aggregates_only, + run_results); + + FlushStreams(display_reporter); + FlushStreams(file_reporter); +} + +void RunBenchmarks(const std::vector& benchmarks, + BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter) { + // Note the file_reporter can be null. + BM_CHECK(display_reporter != nullptr); + + // Determine the width of the name field using a minimum width of 10. + bool might_have_aggregates = FLAGS_benchmark_repetitions > 1; + size_t name_field_width = 10; + size_t stat_field_width = 0; + for (const BenchmarkInstance& benchmark : benchmarks) { + name_field_width = + std::max(name_field_width, benchmark.name().str().size()); + might_have_aggregates |= benchmark.repetitions() > 1; + + for (const auto& Stat : benchmark.statistics()) + stat_field_width = std::max(stat_field_width, Stat.name_.size()); + } + if (might_have_aggregates) name_field_width += 1 + stat_field_width; + + // Print header here + BenchmarkReporter::Context context; + context.name_field_width = name_field_width; + + // Keep track of running times of all instances of each benchmark family. + std::map + per_family_reports; + + if (display_reporter->ReportContext(context) && + (!file_reporter || file_reporter->ReportContext(context))) { + FlushStreams(display_reporter); + FlushStreams(file_reporter); + + size_t num_repetitions_total = 0; + + // This perfcounters object needs to be created before the runners vector + // below so it outlasts their lifetime. + PerfCountersMeasurement perfcounters( + StrSplit(FLAGS_benchmark_perf_counters, ',')); + + // Vector of benchmarks to run + std::vector runners; + runners.reserve(benchmarks.size()); + + // Count the number of benchmarks with threads to warn the user in case + // performance counters are used. + int benchmarks_with_threads = 0; + + // Loop through all benchmarks + for (const BenchmarkInstance& benchmark : benchmarks) { + BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr; + if (benchmark.complexity() != oNone) + reports_for_family = &per_family_reports[benchmark.family_index()]; + benchmarks_with_threads += (benchmark.threads() > 1); + runners.emplace_back(benchmark, &perfcounters, reports_for_family); + int num_repeats_of_this_instance = runners.back().GetNumRepeats(); + num_repetitions_total += + static_cast(num_repeats_of_this_instance); + if (reports_for_family) + reports_for_family->num_runs_total += num_repeats_of_this_instance; + } + assert(runners.size() == benchmarks.size() && "Unexpected runner count."); + + // The use of performance counters with threads would be unintuitive for + // the average user so we need to warn them about this case + if ((benchmarks_with_threads > 0) && (perfcounters.num_counters() > 0)) { + GetErrorLogInstance() + << "***WARNING*** There are " << benchmarks_with_threads + << " benchmarks with threads and " << perfcounters.num_counters() + << " performance counters were requested. Beware counters will " + "reflect the combined usage across all " + "threads.\n"; + } + + std::vector repetition_indices; + repetition_indices.reserve(num_repetitions_total); + for (size_t runner_index = 0, num_runners = runners.size(); + runner_index != num_runners; ++runner_index) { + const internal::BenchmarkRunner& runner = runners[runner_index]; + std::fill_n(std::back_inserter(repetition_indices), + runner.GetNumRepeats(), runner_index); + } + assert(repetition_indices.size() == num_repetitions_total && + "Unexpected number of repetition indexes."); + + if (FLAGS_benchmark_enable_random_interleaving) { + std::random_device rd; + std::mt19937 g(rd()); + std::shuffle(repetition_indices.begin(), repetition_indices.end(), g); + } + + for (size_t repetition_index : repetition_indices) { + internal::BenchmarkRunner& runner = runners[repetition_index]; + runner.DoOneRepetition(); + if (runner.HasRepeatsRemaining()) continue; + // FIXME: report each repetition separately, not all of them in bulk. + + display_reporter->ReportRunsConfig( + runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters()); + if (file_reporter) + file_reporter->ReportRunsConfig( + runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters()); + + RunResults run_results = runner.GetResults(); + + // Maybe calculate complexity report + if (const auto* reports_for_family = runner.GetReportsForFamily()) { + if (reports_for_family->num_runs_done == + reports_for_family->num_runs_total) { + auto additional_run_stats = ComputeBigO(reports_for_family->Runs); + run_results.aggregates_only.insert(run_results.aggregates_only.end(), + additional_run_stats.begin(), + additional_run_stats.end()); + per_family_reports.erase( + static_cast(reports_for_family->Runs.front().family_index)); + } + } + + Report(display_reporter, file_reporter, run_results); + } + } + display_reporter->Finalize(); + if (file_reporter) file_reporter->Finalize(); + FlushStreams(display_reporter); + FlushStreams(file_reporter); +} + +// Disable deprecated warnings temporarily because we need to reference +// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations +BENCHMARK_DISABLE_DEPRECATED_WARNING + +std::unique_ptr CreateReporter( + std::string const& name, ConsoleReporter::OutputOptions output_opts) { + typedef std::unique_ptr PtrType; + if (name == "console") { + return PtrType(new ConsoleReporter(output_opts)); + } + if (name == "json") { + return PtrType(new JSONReporter()); + } + if (name == "csv") { + return PtrType(new CSVReporter()); + } + std::cerr << "Unexpected format: '" << name << "'\n"; + std::exit(1); +} + +BENCHMARK_RESTORE_DEPRECATED_WARNING + +} // end namespace + +bool IsZero(double n) { + return std::abs(n) < std::numeric_limits::epsilon(); +} + +ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { + int output_opts = ConsoleReporter::OO_Defaults; + auto is_benchmark_color = [force_no_color]() -> bool { + if (force_no_color) { + return false; + } + if (FLAGS_benchmark_color == "auto") { + return IsColorTerminal(); + } + return IsTruthyFlagValue(FLAGS_benchmark_color); + }; + if (is_benchmark_color()) { + output_opts |= ConsoleReporter::OO_Color; + } else { + output_opts &= ~ConsoleReporter::OO_Color; + } + if (FLAGS_benchmark_counters_tabular) { + output_opts |= ConsoleReporter::OO_Tabular; + } else { + output_opts &= ~ConsoleReporter::OO_Tabular; + } + return static_cast(output_opts); +} + +} // end namespace internal + +BenchmarkReporter* CreateDefaultDisplayReporter() { + static auto default_display_reporter = + internal::CreateReporter(FLAGS_benchmark_format, + internal::GetOutputOptions()) + .release(); + return default_display_reporter; +} + +size_t RunSpecifiedBenchmarks() { + return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(std::string spec) { + return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec)); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { + return RunSpecifiedBenchmarks(display_reporter, nullptr, + FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + std::string spec) { + return RunSpecifiedBenchmarks(display_reporter, nullptr, std::move(spec)); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter) { + return RunSpecifiedBenchmarks(display_reporter, file_reporter, + FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, + std::string spec) { + if (spec.empty() || spec == "all") + spec = "."; // Regexp that matches all benchmarks + + // Setup the reporters + std::ofstream output_file; + std::unique_ptr default_display_reporter; + std::unique_ptr default_file_reporter; + if (!display_reporter) { + default_display_reporter.reset(CreateDefaultDisplayReporter()); + display_reporter = default_display_reporter.get(); + } + auto& Out = display_reporter->GetOutputStream(); + auto& Err = display_reporter->GetErrorStream(); + + std::string const& fname = FLAGS_benchmark_out; + if (fname.empty() && file_reporter) { + Err << "A custom file reporter was provided but " + "--benchmark_out= was not specified." + << std::endl; + Out.flush(); + Err.flush(); + std::exit(1); + } + if (!fname.empty()) { + output_file.open(fname); + if (!output_file.is_open()) { + Err << "invalid file name: '" << fname << "'" << std::endl; + Out.flush(); + Err.flush(); + std::exit(1); + } + if (!file_reporter) { + default_file_reporter = internal::CreateReporter( + FLAGS_benchmark_out_format, FLAGS_benchmark_counters_tabular + ? ConsoleReporter::OO_Tabular + : ConsoleReporter::OO_None); + file_reporter = default_file_reporter.get(); + } + file_reporter->SetOutputStream(&output_file); + file_reporter->SetErrorStream(&output_file); + } + + std::vector benchmarks; + if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) { + Out.flush(); + Err.flush(); + return 0; + } + + if (benchmarks.empty()) { + Err << "Failed to match any benchmarks against regex: " << spec << "\n"; + Out.flush(); + Err.flush(); + return 0; + } + + if (FLAGS_benchmark_list_tests) { + for (auto const& benchmark : benchmarks) + Out << benchmark.name().str() << "\n"; + } else { + internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); + } + + Out.flush(); + Err.flush(); + return benchmarks.size(); +} + +namespace { +// stores the time unit benchmarks use by default +TimeUnit default_time_unit = kNanosecond; +} // namespace + +TimeUnit GetDefaultTimeUnit() { return default_time_unit; } + +void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; } + +std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; } + +void SetBenchmarkFilter(std::string value) { + FLAGS_benchmark_filter = std::move(value); +} + +int32_t GetBenchmarkVerbosity() { return FLAGS_v; } + +void RegisterMemoryManager(MemoryManager* manager) { + internal::memory_manager = manager; +} + +void RegisterProfilerManager(ProfilerManager* manager) { + internal::profiler_manager = manager; +} + +void AddCustomContext(const std::string& key, const std::string& value) { + if (internal::global_context == nullptr) { + internal::global_context = new std::map(); + } + if (!internal::global_context->emplace(key, value).second) { + std::cerr << "Failed to add custom context \"" << key << "\" as it already " + << "exists with value \"" << value << "\"\n"; + } +} + +namespace internal { + +void (*HelperPrintf)(); + +void PrintUsageAndExit() { + HelperPrintf(); + exit(0); +} + +void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) { + if (time_unit_flag == "s") { + return SetDefaultTimeUnit(kSecond); + } + if (time_unit_flag == "ms") { + return SetDefaultTimeUnit(kMillisecond); + } + if (time_unit_flag == "us") { + return SetDefaultTimeUnit(kMicrosecond); + } + if (time_unit_flag == "ns") { + return SetDefaultTimeUnit(kNanosecond); + } + if (!time_unit_flag.empty()) { + PrintUsageAndExit(); + } +} + +void ParseCommandLineFlags(int* argc, char** argv) { + using namespace benchmark; + BenchmarkReporter::Context::executable_name = + (argc && *argc > 0) ? argv[0] : "unknown"; + for (int i = 1; argc && i < *argc; ++i) { + if (ParseBoolFlag(argv[i], "benchmark_list_tests", + &FLAGS_benchmark_list_tests) || + ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || + ParseStringFlag(argv[i], "benchmark_min_time", + &FLAGS_benchmark_min_time) || + ParseDoubleFlag(argv[i], "benchmark_min_warmup_time", + &FLAGS_benchmark_min_warmup_time) || + ParseInt32Flag(argv[i], "benchmark_repetitions", + &FLAGS_benchmark_repetitions) || + ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving", + &FLAGS_benchmark_enable_random_interleaving) || + ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", + &FLAGS_benchmark_report_aggregates_only) || + ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", + &FLAGS_benchmark_display_aggregates_only) || + ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) || + ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) || + ParseStringFlag(argv[i], "benchmark_out_format", + &FLAGS_benchmark_out_format) || + ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || + ParseBoolFlag(argv[i], "benchmark_counters_tabular", + &FLAGS_benchmark_counters_tabular) || + ParseStringFlag(argv[i], "benchmark_perf_counters", + &FLAGS_benchmark_perf_counters) || + ParseKeyValueFlag(argv[i], "benchmark_context", + &FLAGS_benchmark_context) || + ParseStringFlag(argv[i], "benchmark_time_unit", + &FLAGS_benchmark_time_unit) || + ParseInt32Flag(argv[i], "v", &FLAGS_v)) { + for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; + + --(*argc); + --i; + } else if (IsFlag(argv[i], "help")) { + PrintUsageAndExit(); + } + } + for (auto const* flag : + {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) { + if (*flag != "console" && *flag != "json" && *flag != "csv") { + PrintUsageAndExit(); + } + } + SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit); + if (FLAGS_benchmark_color.empty()) { + PrintUsageAndExit(); + } + for (const auto& kv : FLAGS_benchmark_context) { + AddCustomContext(kv.first, kv.second); + } +} + +int InitializeStreams() { + static std::ios_base::Init init; + return 0; +} + +} // end namespace internal + +std::string GetBenchmarkVersion() { +#ifdef BENCHMARK_VERSION + return {BENCHMARK_VERSION}; +#else + return {""}; +#endif +} + +void PrintDefaultHelp() { + fprintf(stdout, + "benchmark" + " [--benchmark_list_tests={true|false}]\n" + " [--benchmark_filter=]\n" + " [--benchmark_min_time=`x` OR `s` ]\n" + " [--benchmark_min_warmup_time=]\n" + " [--benchmark_repetitions=]\n" + " [--benchmark_enable_random_interleaving={true|false}]\n" + " [--benchmark_report_aggregates_only={true|false}]\n" + " [--benchmark_display_aggregates_only={true|false}]\n" + " [--benchmark_format=]\n" + " [--benchmark_out=]\n" + " [--benchmark_out_format=]\n" + " [--benchmark_color={auto|true|false}]\n" + " [--benchmark_counters_tabular={true|false}]\n" +#if defined HAVE_LIBPFM + " [--benchmark_perf_counters=,...]\n" +#endif + " [--benchmark_context==,...]\n" + " [--benchmark_time_unit={ns|us|ms|s}]\n" + " [--v=]\n"); +} + +void Initialize(int* argc, char** argv, void (*HelperPrintf)()) { + internal::HelperPrintf = HelperPrintf; + internal::ParseCommandLineFlags(argc, argv); + internal::LogLevel() = FLAGS_v; +} + +void Shutdown() { delete internal::global_context; } + +bool ReportUnrecognizedArguments(int argc, char** argv) { + for (int i = 1; i < argc; ++i) { + fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], + argv[i]); + } + return argc > 1; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/benchmark_api_internal.cc b/vendor/googlebenchmark/src/benchmark_api_internal.cc new file mode 100644 index 000000000..286f98653 --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_api_internal.cc @@ -0,0 +1,118 @@ +#include "benchmark_api_internal.h" + +#include + +#include "string_util.h" + +namespace benchmark { +namespace internal { + +BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx, + int per_family_instance_idx, + const std::vector& args, + int thread_count) + : benchmark_(*benchmark), + family_index_(family_idx), + per_family_instance_index_(per_family_instance_idx), + aggregation_report_mode_(benchmark_.aggregation_report_mode_), + args_(args), + time_unit_(benchmark_.GetTimeUnit()), + measure_process_cpu_time_(benchmark_.measure_process_cpu_time_), + use_real_time_(benchmark_.use_real_time_), + use_manual_time_(benchmark_.use_manual_time_), + complexity_(benchmark_.complexity_), + complexity_lambda_(benchmark_.complexity_lambda_), + statistics_(benchmark_.statistics_), + repetitions_(benchmark_.repetitions_), + min_time_(benchmark_.min_time_), + min_warmup_time_(benchmark_.min_warmup_time_), + iterations_(benchmark_.iterations_), + threads_(thread_count) { + name_.function_name = benchmark_.name_; + + size_t arg_i = 0; + for (const auto& arg : args) { + if (!name_.args.empty()) { + name_.args += '/'; + } + + if (arg_i < benchmark->arg_names_.size()) { + const auto& arg_name = benchmark_.arg_names_[arg_i]; + if (!arg_name.empty()) { + name_.args += StrFormat("%s:", arg_name.c_str()); + } + } + + name_.args += StrFormat("%" PRId64, arg); + ++arg_i; + } + + if (!IsZero(benchmark->min_time_)) { + name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_); + } + + if (!IsZero(benchmark->min_warmup_time_)) { + name_.min_warmup_time = + StrFormat("min_warmup_time:%0.3f", benchmark_.min_warmup_time_); + } + + if (benchmark_.iterations_ != 0) { + name_.iterations = StrFormat( + "iterations:%lu", static_cast(benchmark_.iterations_)); + } + + if (benchmark_.repetitions_ != 0) { + name_.repetitions = StrFormat("repeats:%d", benchmark_.repetitions_); + } + + if (benchmark_.measure_process_cpu_time_) { + name_.time_type = "process_time"; + } + + if (benchmark_.use_manual_time_) { + if (!name_.time_type.empty()) { + name_.time_type += '/'; + } + name_.time_type += "manual_time"; + } else if (benchmark_.use_real_time_) { + if (!name_.time_type.empty()) { + name_.time_type += '/'; + } + name_.time_type += "real_time"; + } + + if (!benchmark_.thread_counts_.empty()) { + name_.threads = StrFormat("threads:%d", threads_); + } + + setup_ = benchmark_.setup_; + teardown_ = benchmark_.teardown_; +} + +State BenchmarkInstance::Run( + IterationCount iters, int thread_id, internal::ThreadTimer* timer, + internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement) const { + State st(name_.function_name, iters, args_, thread_id, threads_, timer, + manager, perf_counters_measurement); + benchmark_.Run(st); + return st; +} + +void BenchmarkInstance::Setup() const { + if (setup_) { + State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_, + nullptr, nullptr, nullptr); + setup_(st); + } +} + +void BenchmarkInstance::Teardown() const { + if (teardown_) { + State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_, + nullptr, nullptr, nullptr); + teardown_(st); + } +} +} // namespace internal +} // namespace benchmark diff --git a/vendor/googlebenchmark/src/benchmark_api_internal.h b/vendor/googlebenchmark/src/benchmark_api_internal.h new file mode 100644 index 000000000..94f516531 --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_api_internal.h @@ -0,0 +1,87 @@ +#ifndef BENCHMARK_API_INTERNAL_H +#define BENCHMARK_API_INTERNAL_H + +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "commandlineflags.h" + +namespace benchmark { +namespace internal { + +// Information kept per benchmark we may want to run +class BenchmarkInstance { + public: + BenchmarkInstance(Benchmark* benchmark, int family_index, + int per_family_instance_index, + const std::vector& args, int threads); + + const BenchmarkName& name() const { return name_; } + int family_index() const { return family_index_; } + int per_family_instance_index() const { return per_family_instance_index_; } + AggregationReportMode aggregation_report_mode() const { + return aggregation_report_mode_; + } + TimeUnit time_unit() const { return time_unit_; } + bool measure_process_cpu_time() const { return measure_process_cpu_time_; } + bool use_real_time() const { return use_real_time_; } + bool use_manual_time() const { return use_manual_time_; } + BigO complexity() const { return complexity_; } + BigOFunc* complexity_lambda() const { return complexity_lambda_; } + const std::vector& statistics() const { return statistics_; } + int repetitions() const { return repetitions_; } + double min_time() const { return min_time_; } + double min_warmup_time() const { return min_warmup_time_; } + IterationCount iterations() const { return iterations_; } + int threads() const { return threads_; } + void Setup() const; + void Teardown() const; + + State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, + internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement) const; + + private: + BenchmarkName name_; + Benchmark& benchmark_; + const int family_index_; + const int per_family_instance_index_; + AggregationReportMode aggregation_report_mode_; + const std::vector& args_; + TimeUnit time_unit_; + bool measure_process_cpu_time_; + bool use_real_time_; + bool use_manual_time_; + BigO complexity_; + BigOFunc* complexity_lambda_; + UserCounters counters_; + const std::vector& statistics_; + int repetitions_; + double min_time_; + double min_warmup_time_; + IterationCount iterations_; + int threads_; // Number of concurrent threads to us + + typedef void (*callback_function)(const benchmark::State&); + callback_function setup_ = nullptr; + callback_function teardown_ = nullptr; +}; + +bool FindBenchmarksInternal(const std::string& re, + std::vector* benchmarks, + std::ostream* Err); + +bool IsZero(double n); + +BENCHMARK_EXPORT +ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); + +} // end namespace internal +} // end namespace benchmark + +#endif // BENCHMARK_API_INTERNAL_H diff --git a/vendor/googlebenchmark/src/benchmark_main.cc b/vendor/googlebenchmark/src/benchmark_main.cc new file mode 100644 index 000000000..cd61cd2ad --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_main.cc @@ -0,0 +1,18 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" + +BENCHMARK_EXPORT int main(int, char**); +BENCHMARK_MAIN(); diff --git a/vendor/googlebenchmark/src/benchmark_name.cc b/vendor/googlebenchmark/src/benchmark_name.cc new file mode 100644 index 000000000..01676bbc8 --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_name.cc @@ -0,0 +1,59 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +namespace benchmark { + +namespace { + +// Compute the total size of a pack of std::strings +size_t size_impl() { return 0; } + +template +size_t size_impl(const Head& head, const Tail&... tail) { + return head.size() + size_impl(tail...); +} + +// Join a pack of std::strings using a delimiter +// TODO: use absl::StrJoin +void join_impl(std::string&, char) {} + +template +void join_impl(std::string& s, const char delimiter, const Head& head, + const Tail&... tail) { + if (!s.empty() && !head.empty()) { + s += delimiter; + } + + s += head; + + join_impl(s, delimiter, tail...); +} + +template +std::string join(char delimiter, const Ts&... ts) { + std::string s; + s.reserve(sizeof...(Ts) + size_impl(ts...)); + join_impl(s, delimiter, ts...); + return s; +} +} // namespace + +BENCHMARK_EXPORT +std::string BenchmarkName::str() const { + return join('/', function_name, args, min_time, min_warmup_time, iterations, + repetitions, time_type, threads); +} +} // namespace benchmark diff --git a/vendor/googlebenchmark/src/benchmark_register.cc b/vendor/googlebenchmark/src/benchmark_register.cc new file mode 100644 index 000000000..8ade04822 --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_register.cc @@ -0,0 +1,521 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark_register.h" + +#ifndef BENCHMARK_OS_WINDOWS +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" +#include "check.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { + +namespace { +// For non-dense Range, intermediate values are powers of kRangeMultiplier. +static constexpr int kRangeMultiplier = 8; + +// The size of a benchmark family determines is the number of inputs to repeat +// the benchmark on. If this is "large" then warn the user during configuration. +static constexpr size_t kMaxFamilySize = 100; + +static constexpr char kDisabledPrefix[] = "DISABLED_"; +} // end namespace + +namespace internal { + +//=============================================================================// +// BenchmarkFamilies +//=============================================================================// + +// Class for managing registered benchmarks. Note that each registered +// benchmark identifies a family of related benchmarks to run. +class BenchmarkFamilies { + public: + static BenchmarkFamilies* GetInstance(); + + // Registers a benchmark family and returns the index assigned to it. + size_t AddBenchmark(std::unique_ptr family); + + // Clear all registered benchmark families. + void ClearBenchmarks(); + + // Extract the list of benchmark instances that match the specified + // regular expression. + bool FindBenchmarks(std::string re, + std::vector* benchmarks, + std::ostream* Err); + + private: + BenchmarkFamilies() {} + + std::vector> families_; + Mutex mutex_; +}; + +BenchmarkFamilies* BenchmarkFamilies::GetInstance() { + static BenchmarkFamilies instance; + return &instance; +} + +size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr family) { + MutexLock l(mutex_); + size_t index = families_.size(); + families_.push_back(std::move(family)); + return index; +} + +void BenchmarkFamilies::ClearBenchmarks() { + MutexLock l(mutex_); + families_.clear(); + families_.shrink_to_fit(); +} + +bool BenchmarkFamilies::FindBenchmarks( + std::string spec, std::vector* benchmarks, + std::ostream* ErrStream) { + BM_CHECK(ErrStream); + auto& Err = *ErrStream; + // Make regular expression out of command-line flag + std::string error_msg; + Regex re; + bool is_negative_filter = false; + if (spec[0] == '-') { + spec.replace(0, 1, ""); + is_negative_filter = true; + } + if (!re.Init(spec, &error_msg)) { + Err << "Could not compile benchmark re: " << error_msg << std::endl; + return false; + } + + // Special list of thread counts to use when none are specified + const std::vector one_thread = {1}; + + int next_family_index = 0; + + MutexLock l(mutex_); + for (std::unique_ptr& family : families_) { + int family_index = next_family_index; + int per_family_instance_index = 0; + + // Family was deleted or benchmark doesn't match + if (!family) continue; + + if (family->ArgsCnt() == -1) { + family->Args({}); + } + const std::vector* thread_counts = + (family->thread_counts_.empty() + ? &one_thread + : &static_cast&>(family->thread_counts_)); + const size_t family_size = family->args_.size() * thread_counts->size(); + // The benchmark will be run at least 'family_size' different inputs. + // If 'family_size' is very large warn the user. + if (family_size > kMaxFamilySize) { + Err << "The number of inputs is very large. " << family->name_ + << " will be repeated at least " << family_size << " times.\n"; + } + // reserve in the special case the regex ".", since we know the final + // family size. this doesn't take into account any disabled benchmarks + // so worst case we reserve more than we need. + if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size); + + for (auto const& args : family->args_) { + for (int num_threads : *thread_counts) { + BenchmarkInstance instance(family.get(), family_index, + per_family_instance_index, args, + num_threads); + + const auto full_name = instance.name().str(); + if (full_name.rfind(kDisabledPrefix, 0) != 0 && + ((re.Match(full_name) && !is_negative_filter) || + (!re.Match(full_name) && is_negative_filter))) { + benchmarks->push_back(std::move(instance)); + + ++per_family_instance_index; + + // Only bump the next family index once we've estabilished that + // at least one instance of this family will be run. + if (next_family_index == family_index) ++next_family_index; + } + } + } + } + return true; +} + +Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { + std::unique_ptr bench_ptr(bench); + BenchmarkFamilies* families = BenchmarkFamilies::GetInstance(); + families->AddBenchmark(std::move(bench_ptr)); + return bench; +} + +// FIXME: This function is a hack so that benchmark.cc can access +// `BenchmarkFamilies` +bool FindBenchmarksInternal(const std::string& re, + std::vector* benchmarks, + std::ostream* Err) { + return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err); +} + +//=============================================================================// +// Benchmark +//=============================================================================// + +Benchmark::Benchmark(const std::string& name) + : name_(name), + aggregation_report_mode_(ARM_Unspecified), + time_unit_(GetDefaultTimeUnit()), + use_default_time_unit_(true), + range_multiplier_(kRangeMultiplier), + min_time_(0), + min_warmup_time_(0), + iterations_(0), + repetitions_(0), + measure_process_cpu_time_(false), + use_real_time_(false), + use_manual_time_(false), + complexity_(oNone), + complexity_lambda_(nullptr), + setup_(nullptr), + teardown_(nullptr) { + ComputeStatistics("mean", StatisticsMean); + ComputeStatistics("median", StatisticsMedian); + ComputeStatistics("stddev", StatisticsStdDev); + ComputeStatistics("cv", StatisticsCV, kPercentage); +} + +Benchmark::~Benchmark() {} + +Benchmark* Benchmark::Name(const std::string& name) { + SetName(name); + return this; +} + +Benchmark* Benchmark::Arg(int64_t x) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + args_.push_back({x}); + return this; +} + +Benchmark* Benchmark::Unit(TimeUnit unit) { + time_unit_ = unit; + use_default_time_unit_ = false; + return this; +} + +Benchmark* Benchmark::Range(int64_t start, int64_t limit) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + std::vector arglist; + AddRange(&arglist, start, limit, range_multiplier_); + + for (int64_t i : arglist) { + args_.push_back({i}); + } + return this; +} + +Benchmark* Benchmark::Ranges( + const std::vector>& ranges) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); + std::vector> arglists(ranges.size()); + for (std::size_t i = 0; i < ranges.size(); i++) { + AddRange(&arglists[i], ranges[i].first, ranges[i].second, + range_multiplier_); + } + + ArgsProduct(arglists); + + return this; +} + +Benchmark* Benchmark::ArgsProduct( + const std::vector>& arglists) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(arglists.size())); + + std::vector indices(arglists.size()); + const std::size_t total = std::accumulate( + std::begin(arglists), std::end(arglists), std::size_t{1}, + [](const std::size_t res, const std::vector& arglist) { + return res * arglist.size(); + }); + std::vector args; + args.reserve(arglists.size()); + for (std::size_t i = 0; i < total; i++) { + for (std::size_t arg = 0; arg < arglists.size(); arg++) { + args.push_back(arglists[arg][indices[arg]]); + } + args_.push_back(args); + args.clear(); + + std::size_t arg = 0; + do { + indices[arg] = (indices[arg] + 1) % arglists[arg].size(); + } while (indices[arg++] == 0 && arg < arglists.size()); + } + + return this; +} + +Benchmark* Benchmark::ArgName(const std::string& name) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + arg_names_ = {name}; + return this; +} + +Benchmark* Benchmark::ArgNames(const std::vector& names) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(names.size())); + arg_names_ = names; + return this; +} + +Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + BM_CHECK_LE(start, limit); + for (int64_t arg = start; arg <= limit; arg += step) { + args_.push_back({arg}); + } + return this; +} + +Benchmark* Benchmark::Args(const std::vector& args) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); + args_.push_back(args); + return this; +} + +Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { + custom_arguments(this); + return this; +} + +Benchmark* Benchmark::Setup(void (*setup)(const benchmark::State&)) { + BM_CHECK(setup != nullptr); + setup_ = setup; + return this; +} + +Benchmark* Benchmark::Teardown(void (*teardown)(const benchmark::State&)) { + BM_CHECK(teardown != nullptr); + teardown_ = teardown; + return this; +} + +Benchmark* Benchmark::RangeMultiplier(int multiplier) { + BM_CHECK(multiplier > 1); + range_multiplier_ = multiplier; + return this; +} + +Benchmark* Benchmark::MinTime(double t) { + BM_CHECK(t > 0.0); + BM_CHECK(iterations_ == 0); + min_time_ = t; + return this; +} + +Benchmark* Benchmark::MinWarmUpTime(double t) { + BM_CHECK(t >= 0.0); + BM_CHECK(iterations_ == 0); + min_warmup_time_ = t; + return this; +} + +Benchmark* Benchmark::Iterations(IterationCount n) { + BM_CHECK(n > 0); + BM_CHECK(IsZero(min_time_)); + BM_CHECK(IsZero(min_warmup_time_)); + iterations_ = n; + return this; +} + +Benchmark* Benchmark::Repetitions(int n) { + BM_CHECK(n > 0); + repetitions_ = n; + return this; +} + +Benchmark* Benchmark::ReportAggregatesOnly(bool value) { + aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default; + return this; +} + +Benchmark* Benchmark::DisplayAggregatesOnly(bool value) { + // If we were called, the report mode is no longer 'unspecified', in any case. + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ | ARM_Default); + + if (value) { + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly); + } else { + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly); + } + + return this; +} + +Benchmark* Benchmark::MeasureProcessCPUTime() { + // Can be used together with UseRealTime() / UseManualTime(). + measure_process_cpu_time_ = true; + return this; +} + +Benchmark* Benchmark::UseRealTime() { + BM_CHECK(!use_manual_time_) + << "Cannot set UseRealTime and UseManualTime simultaneously."; + use_real_time_ = true; + return this; +} + +Benchmark* Benchmark::UseManualTime() { + BM_CHECK(!use_real_time_) + << "Cannot set UseRealTime and UseManualTime simultaneously."; + use_manual_time_ = true; + return this; +} + +Benchmark* Benchmark::Complexity(BigO complexity) { + complexity_ = complexity; + return this; +} + +Benchmark* Benchmark::Complexity(BigOFunc* complexity) { + complexity_lambda_ = complexity; + complexity_ = oLambda; + return this; +} + +Benchmark* Benchmark::ComputeStatistics(const std::string& name, + StatisticsFunc* statistics, + StatisticUnit unit) { + statistics_.emplace_back(name, statistics, unit); + return this; +} + +Benchmark* Benchmark::Threads(int t) { + BM_CHECK_GT(t, 0); + thread_counts_.push_back(t); + return this; +} + +Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { + BM_CHECK_GT(min_threads, 0); + BM_CHECK_GE(max_threads, min_threads); + + AddRange(&thread_counts_, min_threads, max_threads, 2); + return this; +} + +Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, + int stride) { + BM_CHECK_GT(min_threads, 0); + BM_CHECK_GE(max_threads, min_threads); + BM_CHECK_GE(stride, 1); + + for (auto i = min_threads; i < max_threads; i += stride) { + thread_counts_.push_back(i); + } + thread_counts_.push_back(max_threads); + return this; +} + +Benchmark* Benchmark::ThreadPerCpu() { + thread_counts_.push_back(CPUInfo::Get().num_cpus); + return this; +} + +void Benchmark::SetName(const std::string& name) { name_ = name; } + +const char* Benchmark::GetName() const { return name_.c_str(); } + +int Benchmark::ArgsCnt() const { + if (args_.empty()) { + if (arg_names_.empty()) return -1; + return static_cast(arg_names_.size()); + } + return static_cast(args_.front().size()); +} + +const char* Benchmark::GetArgName(int arg) const { + BM_CHECK_GE(arg, 0); + size_t uarg = static_cast(arg); + BM_CHECK_LT(uarg, arg_names_.size()); + return arg_names_[uarg].c_str(); +} + +TimeUnit Benchmark::GetTimeUnit() const { + return use_default_time_unit_ ? GetDefaultTimeUnit() : time_unit_; +} + +//=============================================================================// +// FunctionBenchmark +//=============================================================================// + +void FunctionBenchmark::Run(State& st) { func_(st); } + +} // end namespace internal + +void ClearRegisteredBenchmarks() { + internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); +} + +std::vector CreateRange(int64_t lo, int64_t hi, int multi) { + std::vector args; + internal::AddRange(&args, lo, hi, multi); + return args; +} + +std::vector CreateDenseRange(int64_t start, int64_t limit, int step) { + BM_CHECK_LE(start, limit); + std::vector args; + for (int64_t arg = start; arg <= limit; arg += step) { + args.push_back(arg); + } + return args; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/benchmark_register.h b/vendor/googlebenchmark/src/benchmark_register.h new file mode 100644 index 000000000..be50265f7 --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_register.h @@ -0,0 +1,109 @@ +#ifndef BENCHMARK_REGISTER_H +#define BENCHMARK_REGISTER_H + +#include +#include +#include + +#include "check.h" + +namespace benchmark { +namespace internal { + +// Append the powers of 'mult' in the closed interval [lo, hi]. +// Returns iterator to the start of the inserted range. +template +typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, + int mult) { + BM_CHECK_GE(lo, 0); + BM_CHECK_GE(hi, lo); + BM_CHECK_GE(mult, 2); + + const size_t start_offset = dst->size(); + + static const T kmax = std::numeric_limits::max(); + + // Space out the values in multiples of "mult" + for (T i = static_cast(1); i <= hi; i = static_cast(i * mult)) { + if (i >= lo) { + dst->push_back(i); + } + // Break the loop here since multiplying by + // 'mult' would move outside of the range of T + if (i > kmax / mult) break; + } + + return dst->begin() + static_cast(start_offset); +} + +template +void AddNegatedPowers(std::vector* dst, T lo, T hi, int mult) { + // We negate lo and hi so we require that they cannot be equal to 'min'. + BM_CHECK_GT(lo, std::numeric_limits::min()); + BM_CHECK_GT(hi, std::numeric_limits::min()); + BM_CHECK_GE(hi, lo); + BM_CHECK_LE(hi, 0); + + // Add positive powers, then negate and reverse. + // Casts necessary since small integers get promoted + // to 'int' when negating. + const auto lo_complement = static_cast(-lo); + const auto hi_complement = static_cast(-hi); + + const auto it = AddPowers(dst, hi_complement, lo_complement, mult); + + std::for_each(it, dst->end(), [](T& t) { t = static_cast(t * -1); }); + std::reverse(it, dst->end()); +} + +template +void AddRange(std::vector* dst, T lo, T hi, int mult) { + static_assert(std::is_integral::value && std::is_signed::value, + "Args type must be a signed integer"); + + BM_CHECK_GE(hi, lo); + BM_CHECK_GE(mult, 2); + + // Add "lo" + dst->push_back(lo); + + // Handle lo == hi as a special case, so we then know + // lo < hi and so it is safe to add 1 to lo and subtract 1 + // from hi without falling outside of the range of T. + if (lo == hi) return; + + // Ensure that lo_inner <= hi_inner below. + if (lo + 1 == hi) { + dst->push_back(hi); + return; + } + + // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive). + const auto lo_inner = static_cast(lo + 1); + const auto hi_inner = static_cast(hi - 1); + + // Insert negative values + if (lo_inner < 0) { + AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult); + } + + // Treat 0 as a special case (see discussion on #762). + if (lo < 0 && hi >= 0) { + dst->push_back(0); + } + + // Insert positive values + if (hi_inner > 0) { + AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult); + } + + // Add "hi" (if different from last value). + if (hi != dst->back()) { + dst->push_back(hi); + } +} + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_REGISTER_H diff --git a/vendor/googlebenchmark/src/benchmark_runner.cc b/vendor/googlebenchmark/src/benchmark_runner.cc new file mode 100644 index 000000000..3a8c3076a --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_runner.cc @@ -0,0 +1,525 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark_runner.h" + +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" +#include "internal_macros.h" + +#ifndef BENCHMARK_OS_WINDOWS +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "perf_counters.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "thread_manager.h" +#include "thread_timer.h" + +namespace benchmark { + +namespace internal { + +MemoryManager* memory_manager = nullptr; + +ProfilerManager* profiler_manager = nullptr; + +namespace { + +static constexpr IterationCount kMaxIterations = 1000000000000; +const double kDefaultMinTime = + std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr); + +BenchmarkReporter::Run CreateRunReport( + const benchmark::internal::BenchmarkInstance& b, + const internal::ThreadManager::Result& results, + IterationCount memory_iterations, + const MemoryManager::Result* memory_result, double seconds, + int64_t repetition_index, int64_t repeats) { + // Create report about this benchmark run. + BenchmarkReporter::Run report; + + report.run_name = b.name(); + report.family_index = b.family_index(); + report.per_family_instance_index = b.per_family_instance_index(); + report.skipped = results.skipped_; + report.skip_message = results.skip_message_; + report.report_label = results.report_label_; + // This is the total iterations across all threads. + report.iterations = results.iterations; + report.time_unit = b.time_unit(); + report.threads = b.threads(); + report.repetition_index = repetition_index; + report.repetitions = repeats; + + if (!report.skipped) { + if (b.use_manual_time()) { + report.real_accumulated_time = results.manual_time_used; + } else { + report.real_accumulated_time = results.real_time_used; + } + report.use_real_time_for_initial_big_o = b.use_manual_time(); + report.cpu_accumulated_time = results.cpu_time_used; + report.complexity_n = results.complexity_n; + report.complexity = b.complexity(); + report.complexity_lambda = b.complexity_lambda(); + report.statistics = &b.statistics(); + report.counters = results.counters; + + if (memory_iterations > 0) { + assert(memory_result != nullptr); + report.memory_result = memory_result; + report.allocs_per_iter = + memory_iterations ? static_cast(memory_result->num_allocs) / + static_cast(memory_iterations) + : 0; + } + + internal::Finish(&report.counters, results.iterations, seconds, + b.threads()); + } + return report; +} + +// Execute one thread of benchmark b for the specified number of iterations. +// Adds the stats collected for the thread into manager->results. +void RunInThread(const BenchmarkInstance* b, IterationCount iters, + int thread_id, ThreadManager* manager, + PerfCountersMeasurement* perf_counters_measurement) { + internal::ThreadTimer timer( + b->measure_process_cpu_time() + ? internal::ThreadTimer::CreateProcessCpuTime() + : internal::ThreadTimer::Create()); + + State st = + b->Run(iters, thread_id, &timer, manager, perf_counters_measurement); + BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations) + << "Benchmark returned before State::KeepRunning() returned false!"; + { + MutexLock l(manager->GetBenchmarkMutex()); + internal::ThreadManager::Result& results = manager->results; + results.iterations += st.iterations(); + results.cpu_time_used += timer.cpu_time_used(); + results.real_time_used += timer.real_time_used(); + results.manual_time_used += timer.manual_time_used(); + results.complexity_n += st.complexity_length_n(); + internal::Increment(&results.counters, st.counters); + } + manager->NotifyThreadComplete(); +} + +double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b, + const BenchTimeType& iters_or_time) { + if (!IsZero(b.min_time())) return b.min_time(); + // If the flag was used to specify number of iters, then return the default + // min_time. + if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime; + + return iters_or_time.time; +} + +IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b, + const BenchTimeType& iters_or_time) { + if (b.iterations() != 0) return b.iterations(); + + // We've already concluded that this flag is currently used to pass + // iters but do a check here again anyway. + BM_CHECK(iters_or_time.tag == BenchTimeType::ITERS); + return iters_or_time.iters; +} + +} // end namespace + +BenchTimeType ParseBenchMinTime(const std::string& value) { + BenchTimeType ret; + + if (value.empty()) { + ret.tag = BenchTimeType::TIME; + ret.time = 0.0; + return ret; + } + + if (value.back() == 'x') { + char* p_end; + // Reset errno before it's changed by strtol. + errno = 0; + IterationCount num_iters = std::strtol(value.c_str(), &p_end, 10); + + // After a valid parse, p_end should have been set to + // point to the 'x' suffix. + BM_CHECK(errno == 0 && p_end != nullptr && *p_end == 'x') + << "Malformed iters value passed to --benchmark_min_time: `" << value + << "`. Expected --benchmark_min_time=x."; + + ret.tag = BenchTimeType::ITERS; + ret.iters = num_iters; + return ret; + } + + bool has_suffix = value.back() == 's'; + if (!has_suffix) { + BM_VLOG(0) << "Value passed to --benchmark_min_time should have a suffix. " + "Eg., `30s` for 30-seconds."; + } + + char* p_end; + // Reset errno before it's changed by strtod. + errno = 0; + double min_time = std::strtod(value.c_str(), &p_end); + + // After a successful parse, p_end should point to the suffix 's', + // or the end of the string if the suffix was omitted. + BM_CHECK(errno == 0 && p_end != nullptr && + ((has_suffix && *p_end == 's') || *p_end == '\0')) + << "Malformed seconds value passed to --benchmark_min_time: `" << value + << "`. Expected --benchmark_min_time=x."; + + ret.tag = BenchTimeType::TIME; + ret.time = min_time; + + return ret; +} + +BenchmarkRunner::BenchmarkRunner( + const benchmark::internal::BenchmarkInstance& b_, + PerfCountersMeasurement* pcm_, + BenchmarkReporter::PerFamilyRunReports* reports_for_family_) + : b(b_), + reports_for_family(reports_for_family_), + parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)), + min_time(ComputeMinTime(b_, parsed_benchtime_flag)), + min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0) + ? b.min_warmup_time() + : FLAGS_benchmark_min_warmup_time), + warmup_done(!(min_warmup_time > 0.0)), + repeats(b.repetitions() != 0 ? b.repetitions() + : FLAGS_benchmark_repetitions), + has_explicit_iteration_count(b.iterations() != 0 || + parsed_benchtime_flag.tag == + BenchTimeType::ITERS), + pool(static_cast(b.threads() - 1)), + iters(has_explicit_iteration_count + ? ComputeIters(b_, parsed_benchtime_flag) + : 1), + perf_counters_measurement_ptr(pcm_) { + run_results.display_report_aggregates_only = + (FLAGS_benchmark_report_aggregates_only || + FLAGS_benchmark_display_aggregates_only); + run_results.file_report_aggregates_only = + FLAGS_benchmark_report_aggregates_only; + if (b.aggregation_report_mode() != internal::ARM_Unspecified) { + run_results.display_report_aggregates_only = + (b.aggregation_report_mode() & + internal::ARM_DisplayReportAggregatesOnly); + run_results.file_report_aggregates_only = + (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly); + BM_CHECK(FLAGS_benchmark_perf_counters.empty() || + (perf_counters_measurement_ptr->num_counters() == 0)) + << "Perf counters were requested but could not be set up."; + } +} + +BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() { + BM_VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n"; + + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(b.threads())); + + // Run all but one thread in separate threads + for (std::size_t ti = 0; ti < pool.size(); ++ti) { + pool[ti] = std::thread(&RunInThread, &b, iters, static_cast(ti + 1), + manager.get(), perf_counters_measurement_ptr); + } + // And run one thread here directly. + // (If we were asked to run just one thread, we don't create new threads.) + // Yes, we need to do this here *after* we start the separate threads. + RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr); + + // The main thread has finished. Now let's wait for the other threads. + manager->WaitForAllThreads(); + for (std::thread& thread : pool) thread.join(); + + IterationResults i; + // Acquire the measurements/counters from the manager, UNDER THE LOCK! + { + MutexLock l(manager->GetBenchmarkMutex()); + i.results = manager->results; + } + + // And get rid of the manager. + manager.reset(); + + // Adjust real/manual time stats since they were reported per thread. + i.results.real_time_used /= b.threads(); + i.results.manual_time_used /= b.threads(); + // If we were measuring whole-process CPU usage, adjust the CPU time too. + if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads(); + + BM_VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" + << i.results.real_time_used << "\n"; + + // By using KeepRunningBatch a benchmark can iterate more times than + // requested, so take the iteration count from i.results. + i.iters = i.results.iterations / b.threads(); + + // Base decisions off of real time if requested by this benchmark. + i.seconds = i.results.cpu_time_used; + if (b.use_manual_time()) { + i.seconds = i.results.manual_time_used; + } else if (b.use_real_time()) { + i.seconds = i.results.real_time_used; + } + + return i; +} + +IterationCount BenchmarkRunner::PredictNumItersNeeded( + const IterationResults& i) const { + // See how much iterations should be increased by. + // Note: Avoid division by zero with max(seconds, 1ns). + double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9); + // If our last run was at least 10% of FLAGS_benchmark_min_time then we + // use the multiplier directly. + // Otherwise we use at most 10 times expansion. + // NOTE: When the last run was at least 10% of the min time the max + // expansion should be 14x. + const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1; + multiplier = is_significant ? multiplier : 10.0; + + // So what seems to be the sufficiently-large iteration count? Round up. + const IterationCount max_next_iters = static_cast( + std::llround(std::max(multiplier * static_cast(i.iters), + static_cast(i.iters) + 1.0))); + // But we do have *some* limits though.. + const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); + + BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; + return next_iters; // round up before conversion to integer. +} + +bool BenchmarkRunner::ShouldReportIterationResults( + const IterationResults& i) const { + // Determine if this run should be reported; + // Either it has run for a sufficient amount of time + // or because an error was reported. + return i.results.skipped_ || + i.iters >= kMaxIterations || // Too many iterations already. + i.seconds >= + GetMinTimeToApply() || // The elapsed time is large enough. + // CPU time is specified but the elapsed real time greatly exceeds + // the minimum time. + // Note that user provided timers are except from this test. + ((i.results.real_time_used >= 5 * GetMinTimeToApply()) && + !b.use_manual_time()); +} + +double BenchmarkRunner::GetMinTimeToApply() const { + // In order to re-use functionality to run and measure benchmarks for running + // a warmup phase of the benchmark, we need a way of telling whether to apply + // min_time or min_warmup_time. This function will figure out if we are in the + // warmup phase and therefore need to apply min_warmup_time or if we already + // in the benchmarking phase and min_time needs to be applied. + return warmup_done ? min_time : min_warmup_time; +} + +void BenchmarkRunner::FinishWarmUp(const IterationCount& i) { + warmup_done = true; + iters = i; +} + +void BenchmarkRunner::RunWarmUp() { + // Use the same mechanisms for warming up the benchmark as used for actually + // running and measuring the benchmark. + IterationResults i_warmup; + // Dont use the iterations determined in the warmup phase for the actual + // measured benchmark phase. While this may be a good starting point for the + // benchmark and it would therefore get rid of the need to figure out how many + // iterations are needed if min_time is set again, this may also be a complete + // wrong guess since the warmup loops might be considerably slower (e.g + // because of caching effects). + const IterationCount i_backup = iters; + + for (;;) { + b.Setup(); + i_warmup = DoNIterations(); + b.Teardown(); + + const bool finish = ShouldReportIterationResults(i_warmup); + + if (finish) { + FinishWarmUp(i_backup); + break; + } + + // Although we are running "only" a warmup phase where running enough + // iterations at once without measuring time isn't as important as it is for + // the benchmarking phase, we still do it the same way as otherwise it is + // very confusing for the user to know how to choose a proper value for + // min_warmup_time if a different approach on running it is used. + iters = PredictNumItersNeeded(i_warmup); + assert(iters > i_warmup.iters && + "if we did more iterations than we want to do the next time, " + "then we should have accepted the current iteration run."); + } +} + +MemoryManager::Result* BenchmarkRunner::RunMemoryManager( + IterationCount memory_iterations) { + // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an + // optional so we don't have to own the Result here. + // Can't do it now due to cxx03. + memory_results.push_back(MemoryManager::Result()); + MemoryManager::Result* memory_result = &memory_results.back(); + memory_manager->Start(); + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(1)); + b.Setup(); + RunInThread(&b, memory_iterations, 0, manager.get(), + perf_counters_measurement_ptr); + manager->WaitForAllThreads(); + manager.reset(); + b.Teardown(); + memory_manager->Stop(*memory_result); + return memory_result; +} + +void BenchmarkRunner::RunProfilerManager() { + // TODO: Provide a way to specify the number of iterations. + IterationCount profile_iterations = 1; + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(1)); + b.Setup(); + profiler_manager->AfterSetupStart(); + RunInThread(&b, profile_iterations, 0, manager.get(), + /*perf_counters_measurement_ptr=*/nullptr); + manager->WaitForAllThreads(); + profiler_manager->BeforeTeardownStop(); + manager.reset(); + b.Teardown(); +} + +void BenchmarkRunner::DoOneRepetition() { + assert(HasRepeatsRemaining() && "Already done all repetitions?"); + + const bool is_the_first_repetition = num_repetitions_done == 0; + + // In case a warmup phase is requested by the benchmark, run it now. + // After running the warmup phase the BenchmarkRunner should be in a state as + // this warmup never happened except the fact that warmup_done is set. Every + // other manipulation of the BenchmarkRunner instance would be a bug! Please + // fix it. + if (!warmup_done) RunWarmUp(); + + IterationResults i; + // We *may* be gradually increasing the length (iteration count) + // of the benchmark until we decide the results are significant. + // And once we do, we report those last results and exit. + // Please do note that the if there are repetitions, the iteration count + // is *only* calculated for the *first* repetition, and other repetitions + // simply use that precomputed iteration count. + for (;;) { + b.Setup(); + i = DoNIterations(); + b.Teardown(); + + // Do we consider the results to be significant? + // If we are doing repetitions, and the first repetition was already done, + // it has calculated the correct iteration time, so we have run that very + // iteration count just now. No need to calculate anything. Just report. + // Else, the normal rules apply. + const bool results_are_significant = !is_the_first_repetition || + has_explicit_iteration_count || + ShouldReportIterationResults(i); + + if (results_are_significant) break; // Good, let's report them! + + // Nope, bad iteration. Let's re-estimate the hopefully-sufficient + // iteration count, and run the benchmark again... + + iters = PredictNumItersNeeded(i); + assert(iters > i.iters && + "if we did more iterations than we want to do the next time, " + "then we should have accepted the current iteration run."); + } + + // Produce memory measurements if requested. + MemoryManager::Result* memory_result = nullptr; + IterationCount memory_iterations = 0; + if (memory_manager != nullptr) { + // Only run a few iterations to reduce the impact of one-time + // allocations in benchmarks that are not properly managed. + memory_iterations = std::min(16, iters); + memory_result = RunMemoryManager(memory_iterations); + } + + if (profiler_manager != nullptr) { + RunProfilerManager(); + } + + // Ok, now actually report. + BenchmarkReporter::Run report = + CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, + num_repetitions_done, repeats); + + if (reports_for_family) { + ++reports_for_family->num_runs_done; + if (!report.skipped) reports_for_family->Runs.push_back(report); + } + + run_results.non_aggregates.push_back(report); + + ++num_repetitions_done; +} + +RunResults&& BenchmarkRunner::GetResults() { + assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?"); + + // Calculate additional statistics over the repetitions of this instance. + run_results.aggregates_only = ComputeStats(run_results.non_aggregates); + + return std::move(run_results); +} + +} // end namespace internal + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/benchmark_runner.h b/vendor/googlebenchmark/src/benchmark_runner.h new file mode 100644 index 000000000..cd34d2d5b --- /dev/null +++ b/vendor/googlebenchmark/src/benchmark_runner.h @@ -0,0 +1,136 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_RUNNER_H_ +#define BENCHMARK_RUNNER_H_ + +#include +#include + +#include "benchmark_api_internal.h" +#include "internal_macros.h" +#include "perf_counters.h" +#include "thread_manager.h" + +namespace benchmark { + +BM_DECLARE_string(benchmark_min_time); +BM_DECLARE_double(benchmark_min_warmup_time); +BM_DECLARE_int32(benchmark_repetitions); +BM_DECLARE_bool(benchmark_report_aggregates_only); +BM_DECLARE_bool(benchmark_display_aggregates_only); +BM_DECLARE_string(benchmark_perf_counters); + +namespace internal { + +extern MemoryManager* memory_manager; +extern ProfilerManager* profiler_manager; + +struct RunResults { + std::vector non_aggregates; + std::vector aggregates_only; + + bool display_report_aggregates_only = false; + bool file_report_aggregates_only = false; +}; + +struct BENCHMARK_EXPORT BenchTimeType { + enum { ITERS, TIME } tag; + union { + IterationCount iters; + double time; + }; +}; + +BENCHMARK_EXPORT +BenchTimeType ParseBenchMinTime(const std::string& value); + +class BenchmarkRunner { + public: + BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, + benchmark::internal::PerfCountersMeasurement* pmc_, + BenchmarkReporter::PerFamilyRunReports* reports_for_family); + + int GetNumRepeats() const { return repeats; } + + bool HasRepeatsRemaining() const { + return GetNumRepeats() != num_repetitions_done; + } + + void DoOneRepetition(); + + RunResults&& GetResults(); + + BenchmarkReporter::PerFamilyRunReports* GetReportsForFamily() const { + return reports_for_family; + } + + double GetMinTime() const { return min_time; } + + bool HasExplicitIters() const { return has_explicit_iteration_count; } + + IterationCount GetIters() const { return iters; } + + private: + RunResults run_results; + + const benchmark::internal::BenchmarkInstance& b; + BenchmarkReporter::PerFamilyRunReports* reports_for_family; + + BenchTimeType parsed_benchtime_flag; + const double min_time; + const double min_warmup_time; + bool warmup_done; + const int repeats; + const bool has_explicit_iteration_count; + + int num_repetitions_done = 0; + + std::vector pool; + + std::vector memory_results; + + IterationCount iters; // preserved between repetitions! + // So only the first repetition has to find/calculate it, + // the other repetitions will just use that precomputed iteration count. + + PerfCountersMeasurement* const perf_counters_measurement_ptr = nullptr; + + struct IterationResults { + internal::ThreadManager::Result results; + IterationCount iters; + double seconds; + }; + IterationResults DoNIterations(); + + MemoryManager::Result* RunMemoryManager(IterationCount memory_iterations); + + void RunProfilerManager(); + + IterationCount PredictNumItersNeeded(const IterationResults& i) const; + + bool ShouldReportIterationResults(const IterationResults& i) const; + + double GetMinTimeToApply() const; + + void FinishWarmUp(const IterationCount& i); + + void RunWarmUp(); +}; + +} // namespace internal + +} // end namespace benchmark + +#endif // BENCHMARK_RUNNER_H_ diff --git a/vendor/googlebenchmark/src/check.cc b/vendor/googlebenchmark/src/check.cc new file mode 100644 index 000000000..5f7526e08 --- /dev/null +++ b/vendor/googlebenchmark/src/check.cc @@ -0,0 +1,11 @@ +#include "check.h" + +namespace benchmark { +namespace internal { + +static AbortHandlerT* handler = &std::abort; + +BENCHMARK_EXPORT AbortHandlerT*& GetAbortHandler() { return handler; } + +} // namespace internal +} // namespace benchmark diff --git a/vendor/googlebenchmark/src/check.h b/vendor/googlebenchmark/src/check.h new file mode 100644 index 000000000..c1cd5e85e --- /dev/null +++ b/vendor/googlebenchmark/src/check.h @@ -0,0 +1,106 @@ +#ifndef CHECK_H_ +#define CHECK_H_ + +#include +#include +#include + +#include "benchmark/export.h" +#include "internal_macros.h" +#include "log.h" + +#if defined(__GNUC__) || defined(__clang__) +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#elif defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER >= 1900 +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#else +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif +#define __func__ __FUNCTION__ +#else +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif + +namespace benchmark { +namespace internal { + +typedef void(AbortHandlerT)(); + +BENCHMARK_EXPORT +AbortHandlerT*& GetAbortHandler(); + +BENCHMARK_NORETURN inline void CallAbortHandler() { + GetAbortHandler()(); + std::abort(); // fallback to enforce noreturn +} + +// CheckHandler is the class constructed by failing BM_CHECK macros. +// CheckHandler will log information about the failures and abort when it is +// destructed. +class CheckHandler { + public: + CheckHandler(const char* check, const char* file, const char* func, int line) + : log_(GetErrorLogInstance()) { + log_ << file << ":" << line << ": " << func << ": Check `" << check + << "' failed. "; + } + + LogType& GetLog() { return log_; } + +#if defined(COMPILER_MSVC) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif + BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { + log_ << std::endl; + CallAbortHandler(); + } +#if defined(COMPILER_MSVC) +#pragma warning(pop) +#endif + + CheckHandler& operator=(const CheckHandler&) = delete; + CheckHandler(const CheckHandler&) = delete; + CheckHandler() = delete; + + private: + LogType& log_; +}; + +} // end namespace internal +} // end namespace benchmark + +// The BM_CHECK macro returns a std::ostream object that can have extra +// information written to it. +#ifndef NDEBUG +#define BM_CHECK(b) \ + (b ? ::benchmark::internal::GetNullLogInstance() \ + : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ + .GetLog()) +#else +#define BM_CHECK(b) ::benchmark::internal::GetNullLogInstance() +#endif + +// clang-format off +// preserve whitespacing between operators for alignment +#define BM_CHECK_EQ(a, b) BM_CHECK((a) == (b)) +#define BM_CHECK_NE(a, b) BM_CHECK((a) != (b)) +#define BM_CHECK_GE(a, b) BM_CHECK((a) >= (b)) +#define BM_CHECK_LE(a, b) BM_CHECK((a) <= (b)) +#define BM_CHECK_GT(a, b) BM_CHECK((a) > (b)) +#define BM_CHECK_LT(a, b) BM_CHECK((a) < (b)) + +#define BM_CHECK_FLOAT_EQ(a, b, eps) BM_CHECK(std::fabs((a) - (b)) < (eps)) +#define BM_CHECK_FLOAT_NE(a, b, eps) BM_CHECK(std::fabs((a) - (b)) >= (eps)) +#define BM_CHECK_FLOAT_GE(a, b, eps) BM_CHECK((a) - (b) > -(eps)) +#define BM_CHECK_FLOAT_LE(a, b, eps) BM_CHECK((b) - (a) > -(eps)) +#define BM_CHECK_FLOAT_GT(a, b, eps) BM_CHECK((a) - (b) > (eps)) +#define BM_CHECK_FLOAT_LT(a, b, eps) BM_CHECK((b) - (a) > (eps)) +//clang-format on + +#endif // CHECK_H_ diff --git a/vendor/googlebenchmark/src/colorprint.cc b/vendor/googlebenchmark/src/colorprint.cc new file mode 100644 index 000000000..abc71492f --- /dev/null +++ b/vendor/googlebenchmark/src/colorprint.cc @@ -0,0 +1,200 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "colorprint.h" + +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#include +#include +#else +#include +#endif // BENCHMARK_OS_WINDOWS + +namespace benchmark { +namespace { +#ifdef BENCHMARK_OS_WINDOWS +typedef WORD PlatformColorCode; +#else +typedef const char* PlatformColorCode; +#endif + +PlatformColorCode GetPlatformColorCode(LogColor color) { +#ifdef BENCHMARK_OS_WINDOWS + switch (color) { + case COLOR_RED: + return FOREGROUND_RED; + case COLOR_GREEN: + return FOREGROUND_GREEN; + case COLOR_YELLOW: + return FOREGROUND_RED | FOREGROUND_GREEN; + case COLOR_BLUE: + return FOREGROUND_BLUE; + case COLOR_MAGENTA: + return FOREGROUND_BLUE | FOREGROUND_RED; + case COLOR_CYAN: + return FOREGROUND_BLUE | FOREGROUND_GREEN; + case COLOR_WHITE: // fall through to default + default: + return 0; + } +#else + switch (color) { + case COLOR_RED: + return "1"; + case COLOR_GREEN: + return "2"; + case COLOR_YELLOW: + return "3"; + case COLOR_BLUE: + return "4"; + case COLOR_MAGENTA: + return "5"; + case COLOR_CYAN: + return "6"; + case COLOR_WHITE: + return "7"; + default: + return nullptr; + }; +#endif +} + +} // end namespace + +std::string FormatString(const char* msg, va_list args) { + // we might need a second shot at this, so pre-emptivly make a copy + va_list args_cp; + va_copy(args_cp, args); + + std::size_t size = 256; + char local_buff[256]; + auto ret = vsnprintf(local_buff, size, msg, args_cp); + + va_end(args_cp); + + // currently there is no error handling for failure, so this is hack. + BM_CHECK(ret >= 0); + + if (ret == 0) { // handle empty expansion + return {}; + } + if (static_cast(ret) < size) { + return local_buff; + } + // we did not provide a long enough buffer on our first attempt. + size = static_cast(ret) + 1; // + 1 for the null byte + std::unique_ptr buff(new char[size]); + ret = vsnprintf(buff.get(), size, msg, args); + BM_CHECK(ret > 0 && (static_cast(ret)) < size); + return buff.get(); +} + +std::string FormatString(const char* msg, ...) { + va_list args; + va_start(args, msg); + auto tmp = FormatString(msg, args); + va_end(args); + return tmp; +} + +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ColorPrintf(out, color, fmt, args); + va_end(args); +} + +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, + va_list args) { +#ifdef BENCHMARK_OS_WINDOWS + ((void)out); // suppress unused warning + + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + out.flush(); + SetConsoleTextAttribute(stdout_handle, + GetPlatformColorCode(color) | FOREGROUND_INTENSITY); + out << FormatString(fmt, args); + + out.flush(); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + const char* color_code = GetPlatformColorCode(color); + if (color_code) out << FormatString("\033[0;3%sm", color_code); + out << FormatString(fmt, args) << "\033[m"; +#endif +} + +bool IsColorTerminal() { +#if BENCHMARK_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return 0 != _isatty(_fileno(stdout)); +#else + // On non-Windows platforms, we rely on the TERM variable. This list of + // supported TERM values is copied from Google Test: + // . + const char* const SUPPORTED_TERM_VALUES[] = { + "xterm", + "xterm-color", + "xterm-256color", + "screen", + "screen-256color", + "tmux", + "tmux-256color", + "rxvt-unicode", + "rxvt-unicode-256color", + "linux", + "cygwin", + "xterm-kitty", + "alacritty", + "foot", + "foot-extra", + "wezterm", + }; + + const char* const term = getenv("TERM"); + + bool term_supports_color = false; + for (const char* candidate : SUPPORTED_TERM_VALUES) { + if (term && 0 == strcmp(term, candidate)) { + term_supports_color = true; + break; + } + } + + return 0 != isatty(fileno(stdout)) && term_supports_color; +#endif // BENCHMARK_OS_WINDOWS +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/colorprint.h b/vendor/googlebenchmark/src/colorprint.h new file mode 100644 index 000000000..9f6fab9b3 --- /dev/null +++ b/vendor/googlebenchmark/src/colorprint.h @@ -0,0 +1,33 @@ +#ifndef BENCHMARK_COLORPRINT_H_ +#define BENCHMARK_COLORPRINT_H_ + +#include +#include +#include + +namespace benchmark { +enum LogColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW, + COLOR_BLUE, + COLOR_MAGENTA, + COLOR_CYAN, + COLOR_WHITE +}; + +std::string FormatString(const char* msg, va_list args); +std::string FormatString(const char* msg, ...); + +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, + va_list args); +void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); + +// Returns true if stdout appears to be a terminal that supports colored +// output, false otherwise. +bool IsColorTerminal(); + +} // end namespace benchmark + +#endif // BENCHMARK_COLORPRINT_H_ diff --git a/vendor/googlebenchmark/src/commandlineflags.cc b/vendor/googlebenchmark/src/commandlineflags.cc new file mode 100644 index 000000000..dcb414959 --- /dev/null +++ b/vendor/googlebenchmark/src/commandlineflags.cc @@ -0,0 +1,298 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "commandlineflags.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../src/string_util.h" + +namespace benchmark { +namespace { + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { + // Parses the environment variable as a decimal integer. + char* end = nullptr; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + std::cerr << src_text << " is expected to be a 32-bit integer, " + << "but actually has value \"" << str << "\".\n"; + return false; + } + + // Is the parsed value in the range of an Int32? + const int32_t result = static_cast(long_value); + if (long_value == std::numeric_limits::max() || + long_value == std::numeric_limits::min() || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + std::cerr << src_text << " is expected to be a 32-bit integer, " + << "but actually has value \"" << str << "\", " + << "which overflows.\n"; + return false; + } + + *value = result; + return true; +} + +// Parses 'str' for a double. If successful, writes the result to *value and +// returns true; otherwise leaves *value unchanged and returns false. +bool ParseDouble(const std::string& src_text, const char* str, double* value) { + // Parses the environment variable as a decimal integer. + char* end = nullptr; + const double double_value = strtod(str, &end); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + std::cerr << src_text << " is expected to be a double, " + << "but actually has value \"" << str << "\".\n"; + return false; + } + + *value = double_value; + return true; +} + +// Parses 'str' into KV pairs. If successful, writes the result to *value and +// returns true; otherwise leaves *value unchanged and returns false. +bool ParseKvPairs(const std::string& src_text, const char* str, + std::map* value) { + std::map kvs; + for (const auto& kvpair : StrSplit(str, ',')) { + const auto kv = StrSplit(kvpair, '='); + if (kv.size() != 2) { + std::cerr << src_text << " is expected to be a comma-separated list of " + << "= strings, but actually has value \"" << str + << "\".\n"; + return false; + } + if (!kvs.emplace(kv[0], kv[1]).second) { + std::cerr << src_text << " is expected to contain unique keys but key \"" + << kv[0] << "\" was repeated.\n"; + return false; + } + } + + *value = kvs; + return true; +} + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "BENCHMARK_FOO" in the open-source version. +static std::string FlagToEnvVar(const char* flag) { + const std::string flag_str(flag); + + std::string env_var; + for (size_t i = 0; i != flag_str.length(); ++i) + env_var += static_cast(::toupper(flag_str.c_str()[i])); + + return env_var; +} + +} // namespace + +BENCHMARK_EXPORT +bool BoolFromEnv(const char* flag, bool default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); +} + +BENCHMARK_EXPORT +int32_t Int32FromEnv(const char* flag, int32_t default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + int32_t value = default_val; + if (value_str == nullptr || + !ParseInt32(std::string("Environment variable ") + env_var, value_str, + &value)) { + return default_val; + } + return value; +} + +BENCHMARK_EXPORT +double DoubleFromEnv(const char* flag, double default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + double value = default_val; + if (value_str == nullptr || + !ParseDouble(std::string("Environment variable ") + env_var, value_str, + &value)) { + return default_val; + } + return value; +} + +BENCHMARK_EXPORT +const char* StringFromEnv(const char* flag, const char* default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value = getenv(env_var.c_str()); + return value == nullptr ? default_val : value; +} + +BENCHMARK_EXPORT +std::map KvPairsFromEnv( + const char* flag, std::map default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + + if (value_str == nullptr) return default_val; + + std::map value; + if (!ParseKvPairs("Environment variable " + env_var, value_str, &value)) { + return default_val; + } + return value; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or nullptr if the parsing failed. +const char* ParseFlagValue(const char* str, const char* flag, + bool def_optional) { + // str and flag must not be nullptr. + if (str == nullptr || flag == nullptr) return nullptr; + + // The flag must start with "--". + const std::string flag_str = std::string("--") + std::string(flag); + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) return flag_end; + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return nullptr; + + // Returns the string after "=". + return flag_end + 1; +} + +BENCHMARK_EXPORT +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Converts the string value to a bool. + *value = IsTruthyFlagValue(value_str); + return true; +} + +BENCHMARK_EXPORT +bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Sets *value to the value of the flag. + return ParseInt32(std::string("The value of flag --") + flag, value_str, + value); +} + +BENCHMARK_EXPORT +bool ParseDoubleFlag(const char* str, const char* flag, double* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + // Sets *value to the value of the flag. + return ParseDouble(std::string("The value of flag --") + flag, value_str, + value); +} + +BENCHMARK_EXPORT +bool ParseStringFlag(const char* str, const char* flag, std::string* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == nullptr) return false; + + *value = value_str; + return true; +} + +BENCHMARK_EXPORT +bool ParseKeyValueFlag(const char* str, const char* flag, + std::map* value) { + const char* const value_str = ParseFlagValue(str, flag, false); + + if (value_str == nullptr) return false; + + for (const auto& kvpair : StrSplit(value_str, ',')) { + const auto kv = StrSplit(kvpair, '='); + if (kv.size() != 2) return false; + value->emplace(kv[0], kv[1]); + } + + return true; +} + +BENCHMARK_EXPORT +bool IsFlag(const char* str, const char* flag) { + return (ParseFlagValue(str, flag, true) != nullptr); +} + +BENCHMARK_EXPORT +bool IsTruthyFlagValue(const std::string& value) { + if (value.size() == 1) { + char v = value[0]; + return isalnum(v) && + !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N'); + } + if (!value.empty()) { + std::string value_lower(value); + std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(), + [](char c) { return static_cast(::tolower(c)); }); + return !(value_lower == "false" || value_lower == "no" || + value_lower == "off"); + } + return true; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/commandlineflags.h b/vendor/googlebenchmark/src/commandlineflags.h new file mode 100644 index 000000000..788262897 --- /dev/null +++ b/vendor/googlebenchmark/src/commandlineflags.h @@ -0,0 +1,133 @@ +#ifndef BENCHMARK_COMMANDLINEFLAGS_H_ +#define BENCHMARK_COMMANDLINEFLAGS_H_ + +#include +#include +#include + +#include "benchmark/export.h" + +// Macro for referencing flags. +#define FLAG(name) FLAGS_##name + +// Macros for declaring flags. +#define BM_DECLARE_bool(name) BENCHMARK_EXPORT extern bool FLAG(name) +#define BM_DECLARE_int32(name) BENCHMARK_EXPORT extern int32_t FLAG(name) +#define BM_DECLARE_double(name) BENCHMARK_EXPORT extern double FLAG(name) +#define BM_DECLARE_string(name) BENCHMARK_EXPORT extern std::string FLAG(name) +#define BM_DECLARE_kvpairs(name) \ + BENCHMARK_EXPORT extern std::map FLAG(name) + +// Macros for defining flags. +#define BM_DEFINE_bool(name, default_val) \ + BENCHMARK_EXPORT bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val) +#define BM_DEFINE_int32(name, default_val) \ + BENCHMARK_EXPORT int32_t FLAG(name) = \ + benchmark::Int32FromEnv(#name, default_val) +#define BM_DEFINE_double(name, default_val) \ + BENCHMARK_EXPORT double FLAG(name) = \ + benchmark::DoubleFromEnv(#name, default_val) +#define BM_DEFINE_string(name, default_val) \ + BENCHMARK_EXPORT std::string FLAG(name) = \ + benchmark::StringFromEnv(#name, default_val) +#define BM_DEFINE_kvpairs(name, default_val) \ + BENCHMARK_EXPORT std::map FLAG(name) = \ + benchmark::KvPairsFromEnv(#name, default_val) + +namespace benchmark { + +// Parses a bool from the environment variable corresponding to the given flag. +// +// If the variable exists, returns IsTruthyFlagValue() value; if not, +// returns the given default value. +BENCHMARK_EXPORT +bool BoolFromEnv(const char* flag, bool default_val); + +// Parses an Int32 from the environment variable corresponding to the given +// flag. +// +// If the variable exists, returns ParseInt32() value; if not, returns +// the given default value. +BENCHMARK_EXPORT +int32_t Int32FromEnv(const char* flag, int32_t default_val); + +// Parses an Double from the environment variable corresponding to the given +// flag. +// +// If the variable exists, returns ParseDouble(); if not, returns +// the given default value. +BENCHMARK_EXPORT +double DoubleFromEnv(const char* flag, double default_val); + +// Parses a string from the environment variable corresponding to the given +// flag. +// +// If variable exists, returns its value; if not, returns +// the given default value. +BENCHMARK_EXPORT +const char* StringFromEnv(const char* flag, const char* default_val); + +// Parses a set of kvpairs from the environment variable corresponding to the +// given flag. +// +// If variable exists, returns its value; if not, returns +// the given default value. +BENCHMARK_EXPORT +std::map KvPairsFromEnv( + const char* flag, std::map default_val); + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true if it passes IsTruthyValue(). +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +BENCHMARK_EXPORT +bool ParseBoolFlag(const char* str, const char* flag, bool* value); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +BENCHMARK_EXPORT +bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); + +// Parses a string for a Double flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +BENCHMARK_EXPORT +bool ParseDoubleFlag(const char* str, const char* flag, double* value); + +// Parses a string for a string flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +BENCHMARK_EXPORT +bool ParseStringFlag(const char* str, const char* flag, std::string* value); + +// Parses a string for a kvpairs flag in the form "--flag=key=value,key=value" +// +// On success, stores the value of the flag in *value and returns true. On +// failure returns false, though *value may have been mutated. +BENCHMARK_EXPORT +bool ParseKeyValueFlag(const char* str, const char* flag, + std::map* value); + +// Returns true if the string matches the flag. +BENCHMARK_EXPORT +bool IsFlag(const char* str, const char* flag); + +// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or +// some non-alphanumeric character. Also returns false if the value matches +// one of 'no', 'false', 'off' (case-insensitive). As a special case, also +// returns true if value is the empty string. +BENCHMARK_EXPORT +bool IsTruthyFlagValue(const std::string& value); + +} // end namespace benchmark + +#endif // BENCHMARK_COMMANDLINEFLAGS_H_ diff --git a/vendor/googlebenchmark/src/complexity.cc b/vendor/googlebenchmark/src/complexity.cc new file mode 100644 index 000000000..63acd504d --- /dev/null +++ b/vendor/googlebenchmark/src/complexity.cc @@ -0,0 +1,255 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source project : https://github.com/ismaelJimenez/cpp.leastsq +// Adapted to be used with google benchmark + +#include "complexity.h" + +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" + +namespace benchmark { + +// Internal function to calculate the different scalability forms +BigOFunc* FittingCurve(BigO complexity) { + switch (complexity) { + case oN: + return [](IterationCount n) -> double { return static_cast(n); }; + case oNSquared: + return [](IterationCount n) -> double { return std::pow(n, 2); }; + case oNCubed: + return [](IterationCount n) -> double { return std::pow(n, 3); }; + case oLogN: + return [](IterationCount n) -> double { + return std::log2(static_cast(n)); + }; + case oNLogN: + return [](IterationCount n) -> double { + return static_cast(n) * std::log2(static_cast(n)); + }; + case o1: + default: + return [](IterationCount) { return 1.0; }; + } +} + +// Function to return an string for the calculated complexity +std::string GetBigOString(BigO complexity) { + switch (complexity) { + case oN: + return "N"; + case oNSquared: + return "N^2"; + case oNCubed: + return "N^3"; + case oLogN: + return "lgN"; + case oNLogN: + return "NlgN"; + case o1: + return "(1)"; + default: + return "f(N)"; + } +} + +// Find the coefficient for the high-order term in the running time, by +// minimizing the sum of squares of relative error, for the fitting curve +// given by the lambda expression. +// - n : Vector containing the size of the benchmark tests. +// - time : Vector containing the times for the benchmark tests. +// - fitting_curve : lambda expression (e.g. [](ComplexityN n) {return n; };). + +// For a deeper explanation on the algorithm logic, please refer to +// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics + +LeastSq MinimalLeastSq(const std::vector& n, + const std::vector& time, + BigOFunc* fitting_curve) { + double sigma_gn_squared = 0.0; + double sigma_time = 0.0; + double sigma_time_gn = 0.0; + + // Calculate least square fitting parameter + for (size_t i = 0; i < n.size(); ++i) { + double gn_i = fitting_curve(n[i]); + sigma_gn_squared += gn_i * gn_i; + sigma_time += time[i]; + sigma_time_gn += time[i] * gn_i; + } + + LeastSq result; + result.complexity = oLambda; + + // Calculate complexity. + result.coef = sigma_time_gn / sigma_gn_squared; + + // Calculate RMS + double rms = 0.0; + for (size_t i = 0; i < n.size(); ++i) { + double fit = result.coef * fitting_curve(n[i]); + rms += std::pow((time[i] - fit), 2); + } + + // Normalized RMS by the mean of the observed values + double mean = sigma_time / static_cast(n.size()); + result.rms = std::sqrt(rms / static_cast(n.size())) / mean; + + return result; +} + +// Find the coefficient for the high-order term in the running time, by +// minimizing the sum of squares of relative error. +// - n : Vector containing the size of the benchmark tests. +// - time : Vector containing the times for the benchmark tests. +// - complexity : If different than oAuto, the fitting curve will stick to +// this one. If it is oAuto, it will be calculated the best +// fitting curve. +LeastSq MinimalLeastSq(const std::vector& n, + const std::vector& time, const BigO complexity) { + BM_CHECK_EQ(n.size(), time.size()); + BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two + // benchmark runs are given + BM_CHECK_NE(complexity, oNone); + + LeastSq best_fit; + + if (complexity == oAuto) { + std::vector fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; + + // Take o1 as default best fitting curve + best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); + best_fit.complexity = o1; + + // Compute all possible fitting curves and stick to the best one + for (const auto& fit : fit_curves) { + LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); + if (current_fit.rms < best_fit.rms) { + best_fit = current_fit; + best_fit.complexity = fit; + } + } + } else { + best_fit = MinimalLeastSq(n, time, FittingCurve(complexity)); + best_fit.complexity = complexity; + } + + return best_fit; +} + +std::vector ComputeBigO( + const std::vector& reports) { + typedef BenchmarkReporter::Run Run; + std::vector results; + + if (reports.size() < 2) return results; + + // Accumulators. + std::vector n; + std::vector real_time; + std::vector cpu_time; + + // Populate the accumulators. + for (const Run& run : reports) { + BM_CHECK_GT(run.complexity_n, 0) + << "Did you forget to call SetComplexityN?"; + n.push_back(run.complexity_n); + real_time.push_back(run.real_accumulated_time / + static_cast(run.iterations)); + cpu_time.push_back(run.cpu_accumulated_time / + static_cast(run.iterations)); + } + + LeastSq result_cpu; + LeastSq result_real; + + if (reports[0].complexity == oLambda) { + result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); + result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); + } else { + const BigO* InitialBigO = &reports[0].complexity; + const bool use_real_time_for_initial_big_o = + reports[0].use_real_time_for_initial_big_o; + if (use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + InitialBigO = &result_real.complexity; + // The Big-O complexity for CPU time must have the same Big-O function! + } + result_cpu = MinimalLeastSq(n, cpu_time, *InitialBigO); + InitialBigO = &result_cpu.complexity; + if (!use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + } + } + + // Drop the 'args' when reporting complexity. + auto run_name = reports[0].run_name; + run_name.args.clear(); + + // Get the data from the accumulator to BenchmarkReporter::Run's. + Run big_o; + big_o.run_name = run_name; + big_o.family_index = reports[0].family_index; + big_o.per_family_instance_index = reports[0].per_family_instance_index; + big_o.run_type = BenchmarkReporter::Run::RT_Aggregate; + big_o.repetitions = reports[0].repetitions; + big_o.repetition_index = Run::no_repetition_index; + big_o.threads = reports[0].threads; + big_o.aggregate_name = "BigO"; + big_o.aggregate_unit = StatisticUnit::kTime; + big_o.report_label = reports[0].report_label; + big_o.iterations = 0; + big_o.real_accumulated_time = result_real.coef; + big_o.cpu_accumulated_time = result_cpu.coef; + big_o.report_big_o = true; + big_o.complexity = result_cpu.complexity; + + // All the time results are reported after being multiplied by the + // time unit multiplier. But since RMS is a relative quantity it + // should not be multiplied at all. So, here, we _divide_ it by the + // multiplier so that when it is multiplied later the result is the + // correct one. + double multiplier = GetTimeUnitMultiplier(reports[0].time_unit); + + // Only add label to mean/stddev if it is same for all runs + Run rms; + rms.run_name = run_name; + rms.family_index = reports[0].family_index; + rms.per_family_instance_index = reports[0].per_family_instance_index; + rms.run_type = BenchmarkReporter::Run::RT_Aggregate; + rms.aggregate_name = "RMS"; + rms.aggregate_unit = StatisticUnit::kPercentage; + rms.report_label = big_o.report_label; + rms.iterations = 0; + rms.repetition_index = Run::no_repetition_index; + rms.repetitions = reports[0].repetitions; + rms.threads = reports[0].threads; + rms.real_accumulated_time = result_real.rms / multiplier; + rms.cpu_accumulated_time = result_cpu.rms / multiplier; + rms.report_rms = true; + rms.complexity = result_cpu.complexity; + // don't forget to keep the time unit, or we won't be able to + // recover the correct value. + rms.time_unit = reports[0].time_unit; + + results.push_back(big_o); + results.push_back(rms); + return results; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/complexity.h b/vendor/googlebenchmark/src/complexity.h new file mode 100644 index 000000000..0a0679b48 --- /dev/null +++ b/vendor/googlebenchmark/src/complexity.h @@ -0,0 +1,55 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source project : https://github.com/ismaelJimenez/cpp.leastsq +// Adapted to be used with google benchmark + +#ifndef COMPLEXITY_H_ +#define COMPLEXITY_H_ + +#include +#include + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// Return a vector containing the bigO and RMS information for the specified +// list of reports. If 'reports.size() < 2' an empty vector is returned. +std::vector ComputeBigO( + const std::vector& reports); + +// This data structure will contain the result returned by MinimalLeastSq +// - coef : Estimated coefficient for the high-order term as +// interpolated from data. +// - rms : Normalized Root Mean Squared Error. +// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability +// form has been provided to MinimalLeastSq this will return +// the same value. In case BigO::oAuto has been selected, this +// parameter will return the best fitting curve detected. + +struct LeastSq { + LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} + + double coef; + double rms; + BigO complexity; +}; + +// Function to return an string for the calculated complexity +std::string GetBigOString(BigO complexity); + +} // end namespace benchmark + +#endif // COMPLEXITY_H_ diff --git a/vendor/googlebenchmark/src/console_reporter.cc b/vendor/googlebenchmark/src/console_reporter.cc new file mode 100644 index 000000000..35c3de2a4 --- /dev/null +++ b/vendor/googlebenchmark/src/console_reporter.cc @@ -0,0 +1,210 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { + +BENCHMARK_EXPORT +bool ConsoleReporter::ReportContext(const Context& context) { + name_field_width_ = context.name_field_width; + printed_header_ = false; + prev_counters_.clear(); + + PrintBasicContext(&GetErrorStream(), context); + +#ifdef BENCHMARK_OS_WINDOWS + if ((output_options_ & OO_Color)) { + auto stdOutBuf = std::cout.rdbuf(); + auto outStreamBuf = GetOutputStream().rdbuf(); + if (stdOutBuf != outStreamBuf) { + GetErrorStream() + << "Color printing is only supported for stdout on windows." + " Disabling color printing\n"; + output_options_ = static_cast(output_options_ & ~OO_Color); + } + } +#endif + + return true; +} + +BENCHMARK_EXPORT +void ConsoleReporter::PrintHeader(const Run& run) { + std::string str = + FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), + "Benchmark", "Time", "CPU", "Iterations"); + if (!run.counters.empty()) { + if (output_options_ & OO_Tabular) { + for (auto const& c : run.counters) { + str += FormatString(" %10s", c.first.c_str()); + } + } else { + str += " UserCounters..."; + } + } + std::string line = std::string(str.length(), '-'); + GetOutputStream() << line << "\n" << str << "\n" << line << "\n"; +} + +BENCHMARK_EXPORT +void ConsoleReporter::ReportRuns(const std::vector& reports) { + for (const auto& run : reports) { + // print the header: + // --- if none was printed yet + bool print_header = !printed_header_; + // --- or if the format is tabular and this run + // has different fields from the prev header + print_header |= (output_options_ & OO_Tabular) && + (!internal::SameNames(run.counters, prev_counters_)); + if (print_header) { + printed_header_ = true; + prev_counters_ = run.counters; + PrintHeader(run); + } + // As an alternative to printing the headers like this, we could sort + // the benchmarks by header and then print. But this would require + // waiting for the full results before printing, or printing twice. + PrintRunData(run); + } +} + +static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, + ...) { + va_list args; + va_start(args, fmt); + out << FormatString(fmt, args); + va_end(args); +} + +static std::string FormatTime(double time) { + // For the time columns of the console printer 13 digits are reserved. One of + // them is a space and max two of them are the time unit (e.g ns). That puts + // us at 10 digits usable for the number. + // Align decimal places... + if (time < 1.0) { + return FormatString("%10.3f", time); + } + if (time < 10.0) { + return FormatString("%10.2f", time); + } + if (time < 100.0) { + return FormatString("%10.1f", time); + } + // Assuming the time is at max 9.9999e+99 and we have 10 digits for the + // number, we get 10-1(.)-1(e)-1(sign)-2(exponent) = 5 digits to print. + if (time > 9999999999 /*max 10 digit number*/) { + return FormatString("%1.4e", time); + } + return FormatString("%10.0f", time); +} + +BENCHMARK_EXPORT +void ConsoleReporter::PrintRunData(const Run& result) { + typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); + auto& Out = GetOutputStream(); + PrinterFn* printer = (output_options_ & OO_Color) + ? static_cast(ColorPrintf) + : IgnoreColorPrint; + auto name_color = + (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; + printer(Out, name_color, "%-*s ", name_field_width_, + result.benchmark_name().c_str()); + + if (internal::SkippedWithError == result.skipped) { + printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", + result.skip_message.c_str()); + printer(Out, COLOR_DEFAULT, "\n"); + return; + } else if (internal::SkippedWithMessage == result.skipped) { + printer(Out, COLOR_WHITE, "SKIPPED: \'%s\'", result.skip_message.c_str()); + printer(Out, COLOR_DEFAULT, "\n"); + return; + } + + const double real_time = result.GetAdjustedRealTime(); + const double cpu_time = result.GetAdjustedCPUTime(); + const std::string real_time_str = FormatTime(real_time); + const std::string cpu_time_str = FormatTime(cpu_time); + + if (result.report_big_o) { + std::string big_o = GetBigOString(result.complexity); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, + big_o.c_str(), cpu_time, big_o.c_str()); + } else if (result.report_rms) { + printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", + cpu_time * 100, "%"); + } else if (result.run_type != Run::RT_Aggregate || + result.aggregate_unit == StatisticUnit::kTime) { + const char* timeLabel = GetTimeUnitString(result.time_unit); + printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), + timeLabel, cpu_time_str.c_str(), timeLabel); + } else { + assert(result.aggregate_unit == StatisticUnit::kPercentage); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", + (100. * result.real_accumulated_time), "%", + (100. * result.cpu_accumulated_time), "%"); + } + + if (!result.report_big_o && !result.report_rms) { + printer(Out, COLOR_CYAN, "%10lld", result.iterations); + } + + for (auto& c : result.counters) { + const std::size_t cNameLen = + std::max(std::string::size_type(10), c.first.length()); + std::string s; + const char* unit = ""; + if (result.run_type == Run::RT_Aggregate && + result.aggregate_unit == StatisticUnit::kPercentage) { + s = StrFormat("%.2f", 100. * c.second.value); + unit = "%"; + } else { + s = HumanReadableNumber(c.second.value, c.second.oneK); + if (c.second.flags & Counter::kIsRate) + unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; + } + if (output_options_ & OO_Tabular) { + printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), + unit); + } else { + printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit); + } + } + + if (!result.report_label.empty()) { + printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); + } + + printer(Out, COLOR_DEFAULT, "\n"); +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/counter.cc b/vendor/googlebenchmark/src/counter.cc new file mode 100644 index 000000000..aa14cd809 --- /dev/null +++ b/vendor/googlebenchmark/src/counter.cc @@ -0,0 +1,80 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "counter.h" + +namespace benchmark { +namespace internal { + +double Finish(Counter const& c, IterationCount iterations, double cpu_time, + double num_threads) { + double v = c.value; + if (c.flags & Counter::kIsRate) { + v /= cpu_time; + } + if (c.flags & Counter::kAvgThreads) { + v /= num_threads; + } + if (c.flags & Counter::kIsIterationInvariant) { + v *= static_cast(iterations); + } + if (c.flags & Counter::kAvgIterations) { + v /= static_cast(iterations); + } + + if (c.flags & Counter::kInvert) { // Invert is *always* last. + v = 1.0 / v; + } + return v; +} + +void Finish(UserCounters* l, IterationCount iterations, double cpu_time, + double num_threads) { + for (auto& c : *l) { + c.second.value = Finish(c.second, iterations, cpu_time, num_threads); + } +} + +void Increment(UserCounters* l, UserCounters const& r) { + // add counters present in both or just in *l + for (auto& c : *l) { + auto it = r.find(c.first); + if (it != r.end()) { + c.second.value = c.second + it->second; + } + } + // add counters present in r, but not in *l + for (auto const& tc : r) { + auto it = l->find(tc.first); + if (it == l->end()) { + (*l)[tc.first] = tc.second; + } + } +} + +bool SameNames(UserCounters const& l, UserCounters const& r) { + if (&l == &r) return true; + if (l.size() != r.size()) { + return false; + } + for (auto const& c : l) { + if (r.find(c.first) == r.end()) { + return false; + } + } + return true; +} + +} // end namespace internal +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/counter.h b/vendor/googlebenchmark/src/counter.h new file mode 100644 index 000000000..1f5a58e31 --- /dev/null +++ b/vendor/googlebenchmark/src/counter.h @@ -0,0 +1,32 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_COUNTER_H_ +#define BENCHMARK_COUNTER_H_ + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// these counter-related functions are hidden to reduce API surface. +namespace internal { +void Finish(UserCounters* l, IterationCount iterations, double time, + double num_threads); +void Increment(UserCounters* l, UserCounters const& r); +bool SameNames(UserCounters const& l, UserCounters const& r); +} // end namespace internal + +} // end namespace benchmark + +#endif // BENCHMARK_COUNTER_H_ diff --git a/vendor/googlebenchmark/src/csv_reporter.cc b/vendor/googlebenchmark/src/csv_reporter.cc new file mode 100644 index 000000000..4b39e2c52 --- /dev/null +++ b/vendor/googlebenchmark/src/csv_reporter.cc @@ -0,0 +1,169 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "complexity.h" +#include "string_util.h" +#include "timers.h" + +// File format reference: http://edoceo.com/utilitas/csv-file-format. + +namespace benchmark { + +namespace { +std::vector elements = { + "name", "iterations", "real_time", "cpu_time", + "time_unit", "bytes_per_second", "items_per_second", "label", + "error_occurred", "error_message"}; +} // namespace + +std::string CsvEscape(const std::string& s) { + std::string tmp; + tmp.reserve(s.size() + 2); + for (char c : s) { + switch (c) { + case '"': + tmp += "\"\""; + break; + default: + tmp += c; + break; + } + } + return '"' + tmp + '"'; +} + +BENCHMARK_EXPORT +bool CSVReporter::ReportContext(const Context& context) { + PrintBasicContext(&GetErrorStream(), context); + return true; +} + +BENCHMARK_EXPORT +void CSVReporter::ReportRuns(const std::vector& reports) { + std::ostream& Out = GetOutputStream(); + + if (!printed_header_) { + // save the names of all the user counters + for (const auto& run : reports) { + for (const auto& cnt : run.counters) { + if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") + continue; + user_counter_names_.insert(cnt.first); + } + } + + // print the header + for (auto B = elements.begin(); B != elements.end();) { + Out << *B++; + if (B != elements.end()) Out << ","; + } + for (auto B = user_counter_names_.begin(); + B != user_counter_names_.end();) { + Out << ",\"" << *B++ << "\""; + } + Out << "\n"; + + printed_header_ = true; + } else { + // check that all the current counters are saved in the name set + for (const auto& run : reports) { + for (const auto& cnt : run.counters) { + if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") + continue; + BM_CHECK(user_counter_names_.find(cnt.first) != + user_counter_names_.end()) + << "All counters must be present in each run. " + << "Counter named \"" << cnt.first + << "\" was not in a run after being added to the header"; + } + } + } + + // print results for each run + for (const auto& run : reports) { + PrintRunData(run); + } +} + +BENCHMARK_EXPORT +void CSVReporter::PrintRunData(const Run& run) { + std::ostream& Out = GetOutputStream(); + Out << CsvEscape(run.benchmark_name()) << ","; + if (run.skipped) { + Out << std::string(elements.size() - 3, ','); + Out << std::boolalpha << (internal::SkippedWithError == run.skipped) << ","; + Out << CsvEscape(run.skip_message) << "\n"; + return; + } + + // Do not print iteration on bigO and RMS report + if (!run.report_big_o && !run.report_rms) { + Out << run.iterations; + } + Out << ","; + + if (run.run_type != Run::RT_Aggregate || + run.aggregate_unit == StatisticUnit::kTime) { + Out << run.GetAdjustedRealTime() << ","; + Out << run.GetAdjustedCPUTime() << ","; + } else { + assert(run.aggregate_unit == StatisticUnit::kPercentage); + Out << run.real_accumulated_time << ","; + Out << run.cpu_accumulated_time << ","; + } + + // Do not print timeLabel on bigO and RMS report + if (run.report_big_o) { + Out << GetBigOString(run.complexity); + } else if (!run.report_rms && + run.aggregate_unit != StatisticUnit::kPercentage) { + Out << GetTimeUnitString(run.time_unit); + } + Out << ","; + + if (run.counters.find("bytes_per_second") != run.counters.end()) { + Out << run.counters.at("bytes_per_second"); + } + Out << ","; + if (run.counters.find("items_per_second") != run.counters.end()) { + Out << run.counters.at("items_per_second"); + } + Out << ","; + if (!run.report_label.empty()) { + Out << CsvEscape(run.report_label); + } + Out << ",,"; // for error_occurred and error_message + + // Print user counters + for (const auto& ucn : user_counter_names_) { + auto it = run.counters.find(ucn); + if (it == run.counters.end()) { + Out << ","; + } else { + Out << "," << it->second; + } + } + Out << '\n'; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/cycleclock.h b/vendor/googlebenchmark/src/cycleclock.h new file mode 100644 index 000000000..bd62f5d7e --- /dev/null +++ b/vendor/googlebenchmark/src/cycleclock.h @@ -0,0 +1,243 @@ +// ---------------------------------------------------------------------- +// CycleClock +// A CycleClock tells you the current time in Cycles. The "time" +// is actually time since power-on. This is like time() but doesn't +// involve a system call and is much more precise. +// +// NOTE: Not all cpu/platform/kernel combinations guarantee that this +// clock increments at a constant rate or is synchronized across all logical +// cpus in a system. +// +// If you need the above guarantees, please consider using a different +// API. There are efforts to provide an interface which provides a millisecond +// granularity and implemented as a memory read. A memory read is generally +// cheaper than the CycleClock for many architectures. +// +// Also, in some out of order CPU implementations, the CycleClock is not +// serializing. So if you're trying to count at cycles granularity, your +// data might be inaccurate due to out of order instruction execution. +// ---------------------------------------------------------------------- + +#ifndef BENCHMARK_CYCLECLOCK_H_ +#define BENCHMARK_CYCLECLOCK_H_ + +#include + +#include "benchmark/benchmark.h" +#include "internal_macros.h" + +#if defined(BENCHMARK_OS_MACOSX) +#include +#endif +// For MSVC, we want to use '_asm rdtsc' when possible (since it works +// with even ancient MSVC compilers), and when not possible the +// __rdtsc intrinsic, declared in . Unfortunately, in some +// environments, and have conflicting +// declarations of some other intrinsics, breaking compilation. +// Therefore, we simply declare __rdtsc ourselves. See also +// http://connect.microsoft.com/VisualStudio/feedback/details/262047 +#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) && \ + !defined(_M_ARM64EC) +extern "C" uint64_t __rdtsc(); +#pragma intrinsic(__rdtsc) +#endif + +#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW) +#include +#include +#endif + +#ifdef BENCHMARK_OS_EMSCRIPTEN +#include +#endif + +namespace benchmark { +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h +namespace cycleclock { +// This should return the number of cycles since power-on. Thread-safe. +inline BENCHMARK_ALWAYS_INLINE int64_t Now() { +#if defined(BENCHMARK_OS_MACOSX) + // this goes at the top because we need ALL Macs, regardless of + // architecture, to return the number of "mach time units" that + // have passed since startup. See sysinfo.cc where + // InitializeSystemInfo() sets the supposed cpu clock frequency of + // macs to the number of mach time units per second, not actual + // CPU clock frequency (which can change in the face of CPU + // frequency scaling). Also note that when the Mac sleeps, this + // counter pauses; it does not continue counting, nor does it + // reset to zero. + return static_cast(mach_absolute_time()); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // this goes above x86-specific code because old versions of Emscripten + // define __x86_64__, although they have nothing to do with it. + return static_cast(emscripten_get_now() * 1e+6); +#elif defined(__i386__) + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +#elif defined(__x86_64__) || defined(__amd64__) + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return static_cast((high << 32) | low); +#elif defined(__powerpc__) || defined(__ppc__) + // This returns a time-base, which is not always precisely a cycle-count. +#if defined(__powerpc64__) || defined(__ppc64__) + int64_t tb; + asm volatile("mfspr %0, 268" : "=r"(tb)); + return tb; +#else + uint32_t tbl, tbu0, tbu1; + asm volatile( + "mftbu %0\n" + "mftb %1\n" + "mftbu %2" + : "=r"(tbu0), "=r"(tbl), "=r"(tbu1)); + tbl &= -static_cast(tbu0 == tbu1); + // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed) + return (static_cast(tbu1) << 32) | tbl; +#endif +#elif defined(__sparc__) + int64_t tick; + asm(".byte 0x83, 0x41, 0x00, 0x00"); + asm("mov %%g1, %0" : "=r"(tick)); + return tick; +#elif defined(__ia64__) + int64_t itc; + asm("mov %0 = ar.itc" : "=r"(itc)); + return itc; +#elif defined(COMPILER_MSVC) && defined(_M_IX86) + // Older MSVC compilers (like 7.x) don't seem to support the + // __rdtsc intrinsic properly, so I prefer to use _asm instead + // when I know it will work. Otherwise, I'll use __rdtsc and hope + // the code is being compiled with a non-ancient compiler. + _asm rdtsc +#elif defined(COMPILER_MSVC) && (defined(_M_ARM64) || defined(_M_ARM64EC)) + // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics + // and https://reviews.llvm.org/D53115 + int64_t virtual_timer_value; + virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT); + return virtual_timer_value; +#elif defined(COMPILER_MSVC) + return __rdtsc(); +#elif defined(BENCHMARK_OS_NACL) + // Native Client validator on x86/x86-64 allows RDTSC instructions, + // and this case is handled above. Native Client validator on ARM + // rejects MRC instructions (used in the ARM-specific sequence below), + // so we handle it here. Portable Native Client compiles to + // architecture-agnostic bytecode, which doesn't provide any + // cycle counter access mnemonics. + + // Native Client does not provide any API to access cycle counter. + // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday + // because is provides nanosecond resolution (which is noticeable at + // least for PNaCl modules running on x86 Mac & Linux). + // Initialize to always return 0 if clock_gettime fails. + struct timespec ts = {0, 0}; + clock_gettime(CLOCK_MONOTONIC, &ts); + return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; +#elif defined(__aarch64__) + // System timer of ARMv8 runs at a different frequency than the CPU's. + // The frequency is fixed, typically in the range 1-50MHz. It can be + // read at CNTFRQ special register. We assume the OS has set up + // the virtual timer properly. + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +#elif defined(__ARM_ARCH) + // V6 is the earliest arch that has a standard cyclecount + // Native Client validator doesn't allow MRC instructions. +#if (__ARM_ARCH >= 6) + uint32_t pmccntr; + uint32_t pmuseren; + uint32_t pmcntenset; + // Read the user mode perf monitor counter access permissions. + asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); + if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); + if (pmcntenset & 0x80000000ul) { // Is it counting? + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); + // The counter is set up to count every 64th cycle + return static_cast(pmccntr) * 64; // Should optimize to << 6 + } + } +#endif + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__mips__) || defined(__m68k__) + // mips apparently only allows rdtsc for superusers, so we fall + // back to gettimeofday. It's possible clock_gettime would be better. + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__loongarch__) || defined(__csky__) + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__s390__) // Covers both s390 and s390x. + // Return the CPU clock. + uint64_t tsc; +#if defined(BENCHMARK_OS_ZOS) + // z/OS HLASM syntax. + asm(" stck %0" : "=m"(tsc) : : "cc"); +#else + // Linux on Z syntax. + asm("stck %0" : "=Q"(tsc) : : "cc"); +#endif + return tsc; +#elif defined(__riscv) // RISC-V + // Use RDTIME (and RDTIMEH on riscv32). + // RDCYCLE is a privileged instruction since Linux 6.6. +#if __riscv_xlen == 32 + uint32_t cycles_lo, cycles_hi0, cycles_hi1; + // This asm also includes the PowerPC overflow handling strategy, as above. + // Implemented in assembly because Clang insisted on branching. + asm volatile( + "rdtimeh %0\n" + "rdtime %1\n" + "rdtimeh %2\n" + "sub %0, %0, %2\n" + "seqz %0, %0\n" + "sub %0, zero, %0\n" + "and %1, %1, %0\n" + : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1)); + return static_cast((static_cast(cycles_hi1) << 32) | + cycles_lo); +#else + uint64_t cycles; + asm volatile("rdtime %0" : "=r"(cycles)); + return static_cast(cycles); +#endif +#elif defined(__e2k__) || defined(__elbrus__) + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__hexagon__) + uint64_t pcycle; + asm volatile("%0 = C15:14" : "=r"(pcycle)); + return static_cast(pcycle); +#elif defined(__alpha__) + // Alpha has a cycle counter, the PCC register, but it is an unsigned 32-bit + // integer and thus wraps every ~4s, making using it for tick counts + // unreliable beyond this time range. The real-time clock is low-precision, + // roughtly ~1ms, but it is the only option that can reasonable count + // indefinitely. + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#else + // The soft failover to a generic implementation is automatic only for ARM. + // For other platforms the developer is expected to make an attempt to create + // a fast implementation and use generic version if nothing better is + // available. +#error You need to define CycleTimer for your OS and CPU +#endif +} +} // end namespace cycleclock +} // end namespace benchmark + +#endif // BENCHMARK_CYCLECLOCK_H_ diff --git a/vendor/googlebenchmark/src/internal_macros.h b/vendor/googlebenchmark/src/internal_macros.h new file mode 100644 index 000000000..f4894ba8e --- /dev/null +++ b/vendor/googlebenchmark/src/internal_macros.h @@ -0,0 +1,111 @@ +#ifndef BENCHMARK_INTERNAL_MACROS_H_ +#define BENCHMARK_INTERNAL_MACROS_H_ + +/* Needed to detect STL */ +#include + +// clang-format off + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#if defined(__clang__) + #if !defined(COMPILER_CLANG) + #define COMPILER_CLANG + #endif +#elif defined(_MSC_VER) + #if !defined(COMPILER_MSVC) + #define COMPILER_MSVC + #endif +#elif defined(__GNUC__) + #if !defined(COMPILER_GCC) + #define COMPILER_GCC + #endif +#endif + +#if __has_feature(cxx_attributes) + #define BENCHMARK_NORETURN [[noreturn]] +#elif defined(__GNUC__) + #define BENCHMARK_NORETURN __attribute__((noreturn)) +#elif defined(COMPILER_MSVC) + #define BENCHMARK_NORETURN __declspec(noreturn) +#else + #define BENCHMARK_NORETURN +#endif + +#if defined(__CYGWIN__) + #define BENCHMARK_OS_CYGWIN 1 +#elif defined(_WIN32) + #define BENCHMARK_OS_WINDOWS 1 + // WINAPI_FAMILY_PARTITION is defined in winapifamily.h. + // We include windows.h which implicitly includes winapifamily.h for compatibility. + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #if defined(WINAPI_FAMILY_PARTITION) + #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + #define BENCHMARK_OS_WINDOWS_WIN32 1 + #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) + #define BENCHMARK_OS_WINDOWS_RT 1 + #endif + #endif + #if defined(__MINGW32__) + #define BENCHMARK_OS_MINGW 1 + #endif +#elif defined(__APPLE__) + #define BENCHMARK_OS_APPLE 1 + #include "TargetConditionals.h" + #if defined(TARGET_OS_MAC) + #define BENCHMARK_OS_MACOSX 1 + #if defined(TARGET_OS_IPHONE) + #define BENCHMARK_OS_IOS 1 + #endif + #endif +#elif defined(__FreeBSD__) + #define BENCHMARK_OS_FREEBSD 1 +#elif defined(__NetBSD__) + #define BENCHMARK_OS_NETBSD 1 +#elif defined(__OpenBSD__) + #define BENCHMARK_OS_OPENBSD 1 +#elif defined(__DragonFly__) + #define BENCHMARK_OS_DRAGONFLY 1 +#elif defined(__linux__) + #define BENCHMARK_OS_LINUX 1 +#elif defined(__native_client__) + #define BENCHMARK_OS_NACL 1 +#elif defined(__EMSCRIPTEN__) + #define BENCHMARK_OS_EMSCRIPTEN 1 +#elif defined(__rtems__) + #define BENCHMARK_OS_RTEMS 1 +#elif defined(__Fuchsia__) +#define BENCHMARK_OS_FUCHSIA 1 +#elif defined (__SVR4) && defined (__sun) +#define BENCHMARK_OS_SOLARIS 1 +#elif defined(__QNX__) +#define BENCHMARK_OS_QNX 1 +#elif defined(__MVS__) +#define BENCHMARK_OS_ZOS 1 +#elif defined(__hexagon__) +#define BENCHMARK_OS_QURT 1 +#endif + +#if defined(__ANDROID__) && defined(__GLIBCXX__) +#define BENCHMARK_STL_ANDROID_GNUSTL 1 +#endif + +#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ + && !defined(__EXCEPTIONS) + #define BENCHMARK_HAS_NO_EXCEPTIONS +#endif + +#if defined(COMPILER_CLANG) || defined(COMPILER_GCC) + #define BENCHMARK_MAYBE_UNUSED __attribute__((unused)) +#else + #define BENCHMARK_MAYBE_UNUSED +#endif + +// clang-format on + +#endif // BENCHMARK_INTERNAL_MACROS_H_ diff --git a/vendor/googlebenchmark/src/json_reporter.cc b/vendor/googlebenchmark/src/json_reporter.cc new file mode 100644 index 000000000..b8c8c94c0 --- /dev/null +++ b/vendor/googlebenchmark/src/json_reporter.cc @@ -0,0 +1,327 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include // for setprecision +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "complexity.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { +namespace { + +std::string StrEscape(const std::string& s) { + std::string tmp; + tmp.reserve(s.size()); + for (char c : s) { + switch (c) { + case '\b': + tmp += "\\b"; + break; + case '\f': + tmp += "\\f"; + break; + case '\n': + tmp += "\\n"; + break; + case '\r': + tmp += "\\r"; + break; + case '\t': + tmp += "\\t"; + break; + case '\\': + tmp += "\\\\"; + break; + case '"': + tmp += "\\\""; + break; + default: + tmp += c; + break; + } + } + return tmp; +} + +std::string FormatKV(std::string const& key, std::string const& value) { + return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), + StrEscape(value).c_str()); +} + +std::string FormatKV(std::string const& key, const char* value) { + return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), + StrEscape(value).c_str()); +} + +std::string FormatKV(std::string const& key, bool value) { + return StrFormat("\"%s\": %s", StrEscape(key).c_str(), + value ? "true" : "false"); +} + +std::string FormatKV(std::string const& key, int64_t value) { + std::stringstream ss; + ss << '"' << StrEscape(key) << "\": " << value; + return ss.str(); +} + +std::string FormatKV(std::string const& key, double value) { + std::stringstream ss; + ss << '"' << StrEscape(key) << "\": "; + + if (std::isnan(value)) + ss << (value < 0 ? "-" : "") << "NaN"; + else if (std::isinf(value)) + ss << (value < 0 ? "-" : "") << "Infinity"; + else { + const auto max_digits10 = + std::numeric_limits::max_digits10; + const auto max_fractional_digits10 = max_digits10 - 1; + ss << std::scientific << std::setprecision(max_fractional_digits10) + << value; + } + return ss.str(); +} + +int64_t RoundDouble(double v) { return std::lround(v); } + +} // end namespace + +bool JSONReporter::ReportContext(const Context& context) { + std::ostream& out = GetOutputStream(); + + out << "{\n"; + std::string inner_indent(2, ' '); + + // Open context block and print context information. + out << inner_indent << "\"context\": {\n"; + std::string indent(4, ' '); + + std::string walltime_value = LocalDateTimeString(); + out << indent << FormatKV("date", walltime_value) << ",\n"; + + out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; + + if (Context::executable_name) { + out << indent << FormatKV("executable", Context::executable_name) << ",\n"; + } + + CPUInfo const& info = context.cpu_info; + out << indent << FormatKV("num_cpus", static_cast(info.num_cpus)) + << ",\n"; + out << indent + << FormatKV("mhz_per_cpu", + RoundDouble(info.cycles_per_second / 1000000.0)) + << ",\n"; + if (CPUInfo::Scaling::UNKNOWN != info.scaling) { + out << indent + << FormatKV("cpu_scaling_enabled", + info.scaling == CPUInfo::Scaling::ENABLED ? true : false) + << ",\n"; + } + + out << indent << "\"caches\": [\n"; + indent = std::string(6, ' '); + std::string cache_indent(8, ' '); + for (size_t i = 0; i < info.caches.size(); ++i) { + auto& CI = info.caches[i]; + out << indent << "{\n"; + out << cache_indent << FormatKV("type", CI.type) << ",\n"; + out << cache_indent << FormatKV("level", static_cast(CI.level)) + << ",\n"; + out << cache_indent << FormatKV("size", static_cast(CI.size)) + << ",\n"; + out << cache_indent + << FormatKV("num_sharing", static_cast(CI.num_sharing)) + << "\n"; + out << indent << "}"; + if (i != info.caches.size() - 1) out << ","; + out << "\n"; + } + indent = std::string(4, ' '); + out << indent << "],\n"; + out << indent << "\"load_avg\": ["; + for (auto it = info.load_avg.begin(); it != info.load_avg.end();) { + out << *it++; + if (it != info.load_avg.end()) out << ","; + } + out << "],\n"; + + out << indent << FormatKV("library_version", GetBenchmarkVersion()); + out << ",\n"; + +#if defined(NDEBUG) + const char build_type[] = "release"; +#else + const char build_type[] = "debug"; +#endif + out << indent << FormatKV("library_build_type", build_type); + out << ",\n"; + + // NOTE: our json schema is not strictly tied to the library version! + out << indent << FormatKV("json_schema_version", int64_t(1)); + + std::map* global_context = + internal::GetGlobalContext(); + + if (global_context != nullptr) { + for (const auto& kv : *global_context) { + out << ",\n"; + out << indent << FormatKV(kv.first, kv.second); + } + } + out << "\n"; + + // Close context block and open the list of benchmarks. + out << inner_indent << "},\n"; + out << inner_indent << "\"benchmarks\": [\n"; + return true; +} + +void JSONReporter::ReportRuns(std::vector const& reports) { + if (reports.empty()) { + return; + } + std::string indent(4, ' '); + std::ostream& out = GetOutputStream(); + if (!first_report_) { + out << ",\n"; + } + first_report_ = false; + + for (auto it = reports.begin(); it != reports.end(); ++it) { + out << indent << "{\n"; + PrintRunData(*it); + out << indent << '}'; + auto it_cp = it; + if (++it_cp != reports.end()) { + out << ",\n"; + } + } +} + +void JSONReporter::Finalize() { + // Close the list of benchmarks and the top level object. + GetOutputStream() << "\n ]\n}\n"; +} + +void JSONReporter::PrintRunData(Run const& run) { + std::string indent(6, ' '); + std::ostream& out = GetOutputStream(); + out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; + out << indent << FormatKV("family_index", run.family_index) << ",\n"; + out << indent + << FormatKV("per_family_instance_index", run.per_family_instance_index) + << ",\n"; + out << indent << FormatKV("run_name", run.run_name.str()) << ",\n"; + out << indent << FormatKV("run_type", [&run]() -> const char* { + switch (run.run_type) { + case BenchmarkReporter::Run::RT_Iteration: + return "iteration"; + case BenchmarkReporter::Run::RT_Aggregate: + return "aggregate"; + } + BENCHMARK_UNREACHABLE(); + }()) << ",\n"; + out << indent << FormatKV("repetitions", run.repetitions) << ",\n"; + if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) { + out << indent << FormatKV("repetition_index", run.repetition_index) + << ",\n"; + } + out << indent << FormatKV("threads", run.threads) << ",\n"; + if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { + out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; + out << indent << FormatKV("aggregate_unit", [&run]() -> const char* { + switch (run.aggregate_unit) { + case StatisticUnit::kTime: + return "time"; + case StatisticUnit::kPercentage: + return "percentage"; + } + BENCHMARK_UNREACHABLE(); + }()) << ",\n"; + } + if (internal::SkippedWithError == run.skipped) { + out << indent << FormatKV("error_occurred", true) << ",\n"; + out << indent << FormatKV("error_message", run.skip_message) << ",\n"; + } else if (internal::SkippedWithMessage == run.skipped) { + out << indent << FormatKV("skipped", true) << ",\n"; + out << indent << FormatKV("skip_message", run.skip_message) << ",\n"; + } + if (!run.report_big_o && !run.report_rms) { + out << indent << FormatKV("iterations", run.iterations) << ",\n"; + if (run.run_type != Run::RT_Aggregate || + run.aggregate_unit == StatisticUnit::kTime) { + out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) + << ",\n"; + out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + } else { + assert(run.aggregate_unit == StatisticUnit::kPercentage); + out << indent << FormatKV("real_time", run.real_accumulated_time) + << ",\n"; + out << indent << FormatKV("cpu_time", run.cpu_accumulated_time); + } + out << ",\n" + << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); + } else if (run.report_big_o) { + out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) + << ",\n"; + out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) + << ",\n"; + out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; + out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); + } else if (run.report_rms) { + out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); + } + + for (auto& c : run.counters) { + out << ",\n" << indent << FormatKV(c.first, c.second); + } + + if (run.memory_result) { + const MemoryManager::Result memory_result = *run.memory_result; + out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); + out << ",\n" + << indent << FormatKV("max_bytes_used", memory_result.max_bytes_used); + + auto report_if_present = [&out, &indent](const std::string& label, + int64_t val) { + if (val != MemoryManager::TombstoneValue) + out << ",\n" << indent << FormatKV(label, val); + }; + + report_if_present("total_allocated_bytes", + memory_result.total_allocated_bytes); + report_if_present("net_heap_growth", memory_result.net_heap_growth); + } + + if (!run.report_label.empty()) { + out << ",\n" << indent << FormatKV("label", run.report_label); + } + out << '\n'; +} + +const int64_t MemoryManager::TombstoneValue = + std::numeric_limits::max(); + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/log.h b/vendor/googlebenchmark/src/log.h new file mode 100644 index 000000000..9a21400b0 --- /dev/null +++ b/vendor/googlebenchmark/src/log.h @@ -0,0 +1,88 @@ +#ifndef BENCHMARK_LOG_H_ +#define BENCHMARK_LOG_H_ + +#include +#include + +// NOTE: this is also defined in benchmark.h but we're trying to avoid a +// dependency. +// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. +#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) +#define BENCHMARK_HAS_CXX11 +#endif + +namespace benchmark { +namespace internal { + +typedef std::basic_ostream&(EndLType)(std::basic_ostream&); + +class LogType { + friend LogType& GetNullLogInstance(); + friend LogType& GetErrorLogInstance(); + + // FIXME: Add locking to output. + template + friend LogType& operator<<(LogType&, Tp const&); + friend LogType& operator<<(LogType&, EndLType*); + + private: + LogType(std::ostream* out) : out_(out) {} + std::ostream* out_; + + // NOTE: we could use BENCHMARK_DISALLOW_COPY_AND_ASSIGN but we shouldn't have + // a dependency on benchmark.h from here. +#ifndef BENCHMARK_HAS_CXX11 + LogType(const LogType&); + LogType& operator=(const LogType&); +#else + LogType(const LogType&) = delete; + LogType& operator=(const LogType&) = delete; +#endif +}; + +template +LogType& operator<<(LogType& log, Tp const& value) { + if (log.out_) { + *log.out_ << value; + } + return log; +} + +inline LogType& operator<<(LogType& log, EndLType* m) { + if (log.out_) { + *log.out_ << m; + } + return log; +} + +inline int& LogLevel() { + static int log_level = 0; + return log_level; +} + +inline LogType& GetNullLogInstance() { + static LogType null_log(static_cast(nullptr)); + return null_log; +} + +inline LogType& GetErrorLogInstance() { + static LogType error_log(&std::clog); + return error_log; +} + +inline LogType& GetLogInstanceForLevel(int level) { + if (level <= LogLevel()) { + return GetErrorLogInstance(); + } + return GetNullLogInstance(); +} + +} // end namespace internal +} // end namespace benchmark + +// clang-format off +#define BM_VLOG(x) \ + (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ + " ") +// clang-format on +#endif diff --git a/vendor/googlebenchmark/src/mutex.h b/vendor/googlebenchmark/src/mutex.h new file mode 100644 index 000000000..bec78d9e5 --- /dev/null +++ b/vendor/googlebenchmark/src/mutex.h @@ -0,0 +1,155 @@ +#ifndef BENCHMARK_MUTEX_H_ +#define BENCHMARK_MUTEX_H_ + +#include +#include + +#include "check.h" + +// Enable thread safety attributes only with clang. +// The attributes can be safely erased when compiling with other compilers. +#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) +#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op +#endif + +#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x)) + +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable) + +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x)) + +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__)) + +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__)) + +#define REQUIRES(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__)) + +#define REQUIRES_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__)) + +#define ACQUIRE(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__)) + +#define ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__)) + +#define RELEASE(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__)) + +#define RELEASE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__)) + +#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__)) + +#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x)) + +#define ASSERT_SHARED_CAPABILITY(x) \ + THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x)) + +#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x)) + +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis) + +namespace benchmark { + +typedef std::condition_variable Condition; + +// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that +// we can annotate them with thread safety attributes and use the +// -Wthread-safety warning with clang. The standard library types cannot be +// used directly because they do not provide the required annotations. +class CAPABILITY("mutex") Mutex { + public: + Mutex() {} + + void lock() ACQUIRE() { mut_.lock(); } + void unlock() RELEASE() { mut_.unlock(); } + std::mutex& native_handle() { return mut_; } + + private: + std::mutex mut_; +}; + +class SCOPED_CAPABILITY MutexLock { + typedef std::unique_lock MutexLockImp; + + public: + MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} + ~MutexLock() RELEASE() {} + MutexLockImp& native_handle() { return ml_; } + + private: + MutexLockImp ml_; +}; + +class Barrier { + public: + Barrier(int num_threads) : running_threads_(num_threads) {} + + // Called by each thread + bool wait() EXCLUDES(lock_) { + bool last_thread = false; + { + MutexLock ml(lock_); + last_thread = createBarrier(ml); + } + if (last_thread) phase_condition_.notify_all(); + return last_thread; + } + + void removeThread() EXCLUDES(lock_) { + MutexLock ml(lock_); + --running_threads_; + if (entered_ != 0) phase_condition_.notify_all(); + } + + private: + Mutex lock_; + Condition phase_condition_; + int running_threads_; + + // State for barrier management + int phase_number_ = 0; + int entered_ = 0; // Number of threads that have entered this barrier + + // Enter the barrier and wait until all other threads have also + // entered the barrier. Returns iff this is the last thread to + // enter the barrier. + bool createBarrier(MutexLock& ml) REQUIRES(lock_) { + BM_CHECK_LT(entered_, running_threads_); + entered_++; + if (entered_ < running_threads_) { + // Wait for all threads to enter + int phase_number_cp = phase_number_; + auto cb = [this, phase_number_cp]() { + return this->phase_number_ > phase_number_cp || + entered_ == running_threads_; // A thread has aborted in error + }; + phase_condition_.wait(ml.native_handle(), cb); + if (phase_number_ > phase_number_cp) return false; + // else (running_threads_ == entered_) and we are the last thread. + } + // Last thread has reached the barrier + phase_number_++; + entered_ = 0; + return true; + } +}; + +} // end namespace benchmark + +#endif // BENCHMARK_MUTEX_H_ diff --git a/vendor/googlebenchmark/src/perf_counters.cc b/vendor/googlebenchmark/src/perf_counters.cc new file mode 100644 index 000000000..fa1cbb0e8 --- /dev/null +++ b/vendor/googlebenchmark/src/perf_counters.cc @@ -0,0 +1,284 @@ +// Copyright 2021 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "perf_counters.h" + +#include +#include +#include + +#if defined HAVE_LIBPFM +#include "perfmon/pfmlib.h" +#include "perfmon/pfmlib_perf_event.h" +#endif + +namespace benchmark { +namespace internal { + +constexpr size_t PerfCounterValues::kMaxCounters; + +#if defined HAVE_LIBPFM + +size_t PerfCounterValues::Read(const std::vector& leaders) { + // Create a pointer for multiple reads + const size_t bufsize = values_.size() * sizeof(values_[0]); + char* ptr = reinterpret_cast(values_.data()); + size_t size = bufsize; + for (int lead : leaders) { + auto read_bytes = ::read(lead, ptr, size); + if (read_bytes >= ssize_t(sizeof(uint64_t))) { + // Actual data bytes are all bytes minus initial padding + std::size_t data_bytes = + static_cast(read_bytes) - sizeof(uint64_t); + // This should be very cheap since it's in hot cache + std::memmove(ptr, ptr + sizeof(uint64_t), data_bytes); + // Increment our counters + ptr += data_bytes; + size -= data_bytes; + } else { + int err = errno; + GetErrorLogInstance() << "Error reading lead " << lead << " errno:" << err + << " " << ::strerror(err) << "\n"; + return 0; + } + } + return (bufsize - size) / sizeof(uint64_t); +} + +const bool PerfCounters::kSupported = true; + +// Initializes libpfm only on the first call. Returns whether that single +// initialization was successful. +bool PerfCounters::Initialize() { + // Function-scope static gets initialized only once on first call. + static const bool success = []() { + return pfm_initialize() == PFM_SUCCESS; + }(); + return success; +} + +bool PerfCounters::IsCounterSupported(const std::string& name) { + Initialize(); + perf_event_attr_t attr; + std::memset(&attr, 0, sizeof(attr)); + pfm_perf_encode_arg_t arg; + std::memset(&arg, 0, sizeof(arg)); + arg.attr = &attr; + const int mode = PFM_PLM3; // user mode only + int ret = pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT_EXT, + &arg); + return (ret == PFM_SUCCESS); +} + +PerfCounters PerfCounters::Create( + const std::vector& counter_names) { + if (!counter_names.empty()) { + Initialize(); + } + + // Valid counters will populate these arrays but we start empty + std::vector valid_names; + std::vector counter_ids; + std::vector leader_ids; + + // Resize to the maximum possible + valid_names.reserve(counter_names.size()); + counter_ids.reserve(counter_names.size()); + + const int kCounterMode = PFM_PLM3; // user mode only + + // Group leads will be assigned on demand. The idea is that once we cannot + // create a counter descriptor, the reason is that this group has maxed out + // so we set the group_id again to -1 and retry - giving the algorithm a + // chance to create a new group leader to hold the next set of counters. + int group_id = -1; + + // Loop through all performance counters + for (size_t i = 0; i < counter_names.size(); ++i) { + // we are about to push into the valid names vector + // check if we did not reach the maximum + if (valid_names.size() == PerfCounterValues::kMaxCounters) { + // Log a message if we maxed out and stop adding + GetErrorLogInstance() + << counter_names.size() << " counters were requested. The maximum is " + << PerfCounterValues::kMaxCounters << " and " << valid_names.size() + << " were already added. All remaining counters will be ignored\n"; + // stop the loop and return what we have already + break; + } + + // Check if this name is empty + const auto& name = counter_names[i]; + if (name.empty()) { + GetErrorLogInstance() + << "A performance counter name was the empty string\n"; + continue; + } + + // Here first means first in group, ie the group leader + const bool is_first = (group_id < 0); + + // This struct will be populated by libpfm from the counter string + // and then fed into the syscall perf_event_open + struct perf_event_attr attr {}; + attr.size = sizeof(attr); + + // This is the input struct to libpfm. + pfm_perf_encode_arg_t arg{}; + arg.attr = &attr; + const int pfm_get = pfm_get_os_event_encoding(name.c_str(), kCounterMode, + PFM_OS_PERF_EVENT, &arg); + if (pfm_get != PFM_SUCCESS) { + GetErrorLogInstance() + << "Unknown performance counter name: " << name << "\n"; + continue; + } + + // We then proceed to populate the remaining fields in our attribute struct + // Note: the man page for perf_event_create suggests inherit = true and + // read_format = PERF_FORMAT_GROUP don't work together, but that's not the + // case. + attr.disabled = is_first; + attr.inherit = true; + attr.pinned = is_first; + attr.exclude_kernel = true; + attr.exclude_user = false; + attr.exclude_hv = true; + + // Read all counters in a group in one read. + attr.read_format = PERF_FORMAT_GROUP; //| PERF_FORMAT_TOTAL_TIME_ENABLED | + // PERF_FORMAT_TOTAL_TIME_RUNNING; + + int id = -1; + while (id < 0) { + static constexpr size_t kNrOfSyscallRetries = 5; + // Retry syscall as it was interrupted often (b/64774091). + for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries; + ++num_retries) { + id = perf_event_open(&attr, 0, -1, group_id, 0); + if (id >= 0 || errno != EINTR) { + break; + } + } + if (id < 0) { + // If the file descriptor is negative we might have reached a limit + // in the current group. Set the group_id to -1 and retry + if (group_id >= 0) { + // Create a new group + group_id = -1; + } else { + // At this point we have already retried to set a new group id and + // failed. We then give up. + break; + } + } + } + + // We failed to get a new file descriptor. We might have reached a hard + // hardware limit that cannot be resolved even with group multiplexing + if (id < 0) { + GetErrorLogInstance() << "***WARNING** Failed to get a file descriptor " + "for performance counter " + << name << ". Ignoring\n"; + + // We give up on this counter but try to keep going + // as the others would be fine + continue; + } + if (group_id < 0) { + // This is a leader, store and assign it to the current file descriptor + leader_ids.push_back(id); + group_id = id; + } + // This is a valid counter, add it to our descriptor's list + counter_ids.push_back(id); + valid_names.push_back(name); + } + + // Loop through all group leaders activating them + // There is another option of starting ALL counters in a process but + // that would be far reaching an intrusion. If the user is using PMCs + // by themselves then this would have a side effect on them. It is + // friendlier to loop through all groups individually. + for (int lead : leader_ids) { + if (ioctl(lead, PERF_EVENT_IOC_ENABLE) != 0) { + // This should never happen but if it does, we give up on the + // entire batch as recovery would be a mess. + GetErrorLogInstance() << "***WARNING*** Failed to start counters. " + "Claring out all counters.\n"; + + // Close all peformance counters + for (int id : counter_ids) { + ::close(id); + } + + // Return an empty object so our internal state is still good and + // the process can continue normally without impact + return NoCounters(); + } + } + + return PerfCounters(std::move(valid_names), std::move(counter_ids), + std::move(leader_ids)); +} + +void PerfCounters::CloseCounters() const { + if (counter_ids_.empty()) { + return; + } + for (int lead : leader_ids_) { + ioctl(lead, PERF_EVENT_IOC_DISABLE); + } + for (int fd : counter_ids_) { + close(fd); + } +} +#else // defined HAVE_LIBPFM +size_t PerfCounterValues::Read(const std::vector&) { return 0; } + +const bool PerfCounters::kSupported = false; + +bool PerfCounters::Initialize() { return false; } + +bool PerfCounters::IsCounterSupported(const std::string&) { return false; } + +PerfCounters PerfCounters::Create( + const std::vector& counter_names) { + if (!counter_names.empty()) { + GetErrorLogInstance() << "Performance counters not supported.\n"; + } + return NoCounters(); +} + +void PerfCounters::CloseCounters() const {} +#endif // defined HAVE_LIBPFM + +PerfCountersMeasurement::PerfCountersMeasurement( + const std::vector& counter_names) + : start_values_(counter_names.size()), end_values_(counter_names.size()) { + counters_ = PerfCounters::Create(counter_names); +} + +PerfCounters& PerfCounters::operator=(PerfCounters&& other) noexcept { + if (this != &other) { + CloseCounters(); + + counter_ids_ = std::move(other.counter_ids_); + leader_ids_ = std::move(other.leader_ids_); + counter_names_ = std::move(other.counter_names_); + } + return *this; +} +} // namespace internal +} // namespace benchmark diff --git a/vendor/googlebenchmark/src/perf_counters.h b/vendor/googlebenchmark/src/perf_counters.h new file mode 100644 index 000000000..bf5eb6bc3 --- /dev/null +++ b/vendor/googlebenchmark/src/perf_counters.h @@ -0,0 +1,200 @@ +// Copyright 2021 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_PERF_COUNTERS_H +#define BENCHMARK_PERF_COUNTERS_H + +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "log.h" +#include "mutex.h" + +#ifndef BENCHMARK_OS_WINDOWS +#include +#endif + +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + +namespace benchmark { +namespace internal { + +// Typically, we can only read a small number of counters. There is also a +// padding preceding counter values, when reading multiple counters with one +// syscall (which is desirable). PerfCounterValues abstracts these details. +// The implementation ensures the storage is inlined, and allows 0-based +// indexing into the counter values. +// The object is used in conjunction with a PerfCounters object, by passing it +// to Snapshot(). The Read() method relocates individual reads, discarding +// the initial padding from each group leader in the values buffer such that +// all user accesses through the [] operator are correct. +class BENCHMARK_EXPORT PerfCounterValues { + public: + explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) { + BM_CHECK_LE(nr_counters_, kMaxCounters); + } + + // We are reading correctly now so the values don't need to skip padding + uint64_t operator[](size_t pos) const { return values_[pos]; } + + // Increased the maximum to 32 only since the buffer + // is std::array<> backed + static constexpr size_t kMaxCounters = 32; + + private: + friend class PerfCounters; + // Get the byte buffer in which perf counters can be captured. + // This is used by PerfCounters::Read + std::pair get_data_buffer() { + return {reinterpret_cast(values_.data()), + sizeof(uint64_t) * (kPadding + nr_counters_)}; + } + + // This reading is complex and as the goal of this class is to + // abstract away the intrincacies of the reading process, this is + // a better place for it + size_t Read(const std::vector& leaders); + + // Move the padding to 2 due to the reading algorithm (1st padding plus a + // current read padding) + static constexpr size_t kPadding = 2; + std::array values_; + const size_t nr_counters_; +}; + +// Collect PMU counters. The object, once constructed, is ready to be used by +// calling read(). PMU counter collection is enabled from the time create() is +// called, to obtain the object, until the object's destructor is called. +class BENCHMARK_EXPORT PerfCounters final { + public: + // True iff this platform supports performance counters. + static const bool kSupported; + + // Returns an empty object + static PerfCounters NoCounters() { return PerfCounters(); } + + ~PerfCounters() { CloseCounters(); } + PerfCounters() = default; + PerfCounters(PerfCounters&&) = default; + PerfCounters(const PerfCounters&) = delete; + PerfCounters& operator=(PerfCounters&&) noexcept; + PerfCounters& operator=(const PerfCounters&) = delete; + + // Platform-specific implementations may choose to do some library + // initialization here. + static bool Initialize(); + + // Check if the given counter is supported, if the app wants to + // check before passing + static bool IsCounterSupported(const std::string& name); + + // Return a PerfCounters object ready to read the counters with the names + // specified. The values are user-mode only. The counter name format is + // implementation and OS specific. + // In case of failure, this method will in the worst case return an + // empty object whose state will still be valid. + static PerfCounters Create(const std::vector& counter_names); + + // Take a snapshot of the current value of the counters into the provided + // valid PerfCounterValues storage. The values are populated such that: + // names()[i]'s value is (*values)[i] + BENCHMARK_ALWAYS_INLINE bool Snapshot(PerfCounterValues* values) const { +#ifndef BENCHMARK_OS_WINDOWS + assert(values != nullptr); + return values->Read(leader_ids_) == counter_ids_.size(); +#else + (void)values; + return false; +#endif + } + + const std::vector& names() const { return counter_names_; } + size_t num_counters() const { return counter_names_.size(); } + + private: + PerfCounters(const std::vector& counter_names, + std::vector&& counter_ids, std::vector&& leader_ids) + : counter_ids_(std::move(counter_ids)), + leader_ids_(std::move(leader_ids)), + counter_names_(counter_names) {} + + void CloseCounters() const; + + std::vector counter_ids_; + std::vector leader_ids_; + std::vector counter_names_; +}; + +// Typical usage of the above primitives. +class BENCHMARK_EXPORT PerfCountersMeasurement final { + public: + PerfCountersMeasurement(const std::vector& counter_names); + + size_t num_counters() const { return counters_.num_counters(); } + + std::vector names() const { return counters_.names(); } + + BENCHMARK_ALWAYS_INLINE bool Start() { + if (num_counters() == 0) return true; + // Tell the compiler to not move instructions above/below where we take + // the snapshot. + ClobberMemory(); + valid_read_ &= counters_.Snapshot(&start_values_); + ClobberMemory(); + + return valid_read_; + } + + BENCHMARK_ALWAYS_INLINE bool Stop( + std::vector>& measurements) { + if (num_counters() == 0) return true; + // Tell the compiler to not move instructions above/below where we take + // the snapshot. + ClobberMemory(); + valid_read_ &= counters_.Snapshot(&end_values_); + ClobberMemory(); + + for (size_t i = 0; i < counters_.names().size(); ++i) { + double measurement = static_cast(end_values_[i]) - + static_cast(start_values_[i]); + measurements.push_back({counters_.names()[i], measurement}); + } + + return valid_read_; + } + + private: + PerfCounters counters_; + bool valid_read_ = true; + PerfCounterValues start_values_; + PerfCounterValues end_values_; +}; + +} // namespace internal +} // namespace benchmark + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#endif // BENCHMARK_PERF_COUNTERS_H diff --git a/vendor/googlebenchmark/src/re.h b/vendor/googlebenchmark/src/re.h new file mode 100644 index 000000000..9afb869be --- /dev/null +++ b/vendor/googlebenchmark/src/re.h @@ -0,0 +1,158 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_RE_H_ +#define BENCHMARK_RE_H_ + +#include "internal_macros.h" + +// clang-format off + +#if !defined(HAVE_STD_REGEX) && \ + !defined(HAVE_GNU_POSIX_REGEX) && \ + !defined(HAVE_POSIX_REGEX) + // No explicit regex selection; detect based on builtin hints. + #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) + #define HAVE_POSIX_REGEX 1 + #elif __cplusplus >= 199711L + #define HAVE_STD_REGEX 1 + #endif +#endif + +// Prefer C regex libraries when compiling w/o exceptions so that we can +// correctly report errors. +#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ + defined(HAVE_STD_REGEX) && \ + (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) + #undef HAVE_STD_REGEX +#endif + +#if defined(HAVE_STD_REGEX) + #include +#elif defined(HAVE_GNU_POSIX_REGEX) + #include +#elif defined(HAVE_POSIX_REGEX) + #include +#else +#error No regular expression backend was found! +#endif + +// clang-format on + +#include + +#include "check.h" + +namespace benchmark { + +// A wrapper around the POSIX regular expression API that provides automatic +// cleanup +class Regex { + public: + Regex() : init_(false) {} + + ~Regex(); + + // Compile a regular expression matcher from spec. Returns true on success. + // + // On failure (and if error is not nullptr), error is populated with a human + // readable error message if an error occurs. + bool Init(const std::string& spec, std::string* error); + + // Returns whether str matches the compiled regular expression. + bool Match(const std::string& str); + + private: + bool init_; +// Underlying regular expression object +#if defined(HAVE_STD_REGEX) + std::regex re_; +#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) + regex_t re_; +#else +#error No regular expression backend implementation available +#endif +}; + +#if defined(HAVE_STD_REGEX) + +inline bool Regex::Init(const std::string& spec, std::string* error) { +#ifdef BENCHMARK_HAS_NO_EXCEPTIONS + ((void)error); // suppress unused warning +#else + try { +#endif + re_ = std::regex(spec, std::regex_constants::extended); + init_ = true; +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS +} +catch (const std::regex_error& e) { + if (error) { + *error = e.what(); + } +} +#endif +return init_; +} + +inline Regex::~Regex() {} + +inline bool Regex::Match(const std::string& str) { + if (!init_) { + return false; + } + return std::regex_search(str, re_); +} + +#else +inline bool Regex::Init(const std::string& spec, std::string* error) { + int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); + if (ec != 0) { + if (error) { + size_t needed = regerror(ec, &re_, nullptr, 0); + char* errbuf = new char[needed]; + regerror(ec, &re_, errbuf, needed); + + // regerror returns the number of bytes necessary to null terminate + // the string, so we move that when assigning to error. + BM_CHECK_NE(needed, 0); + error->assign(errbuf, needed - 1); + + delete[] errbuf; + } + + return false; + } + + init_ = true; + return true; +} + +inline Regex::~Regex() { + if (init_) { + regfree(&re_); + } +} + +inline bool Regex::Match(const std::string& str) { + if (!init_) { + return false; + } + return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; +} +#endif + +} // end namespace benchmark + +#endif // BENCHMARK_RE_H_ diff --git a/vendor/googlebenchmark/src/reporter.cc b/vendor/googlebenchmark/src/reporter.cc new file mode 100644 index 000000000..076bc31a2 --- /dev/null +++ b/vendor/googlebenchmark/src/reporter.cc @@ -0,0 +1,118 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { + +BenchmarkReporter::BenchmarkReporter() + : output_stream_(&std::cout), error_stream_(&std::cerr) {} + +BenchmarkReporter::~BenchmarkReporter() {} + +void BenchmarkReporter::PrintBasicContext(std::ostream *out, + Context const &context) { + BM_CHECK(out) << "cannot be null"; + auto &Out = *out; + +#ifndef BENCHMARK_OS_QURT + // Date/time information is not available on QuRT. + // Attempting to get it via this call cause the binary to crash. + Out << LocalDateTimeString() << "\n"; +#endif + + if (context.executable_name) + Out << "Running " << context.executable_name << "\n"; + + const CPUInfo &info = context.cpu_info; + Out << "Run on (" << info.num_cpus << " X " + << (info.cycles_per_second / 1000000.0) << " MHz CPU " + << ((info.num_cpus > 1) ? "s" : "") << ")\n"; + if (info.caches.size() != 0) { + Out << "CPU Caches:\n"; + for (auto &CInfo : info.caches) { + Out << " L" << CInfo.level << " " << CInfo.type << " " + << (CInfo.size / 1024) << " KiB"; + if (CInfo.num_sharing != 0) + Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; + Out << "\n"; + } + } + if (!info.load_avg.empty()) { + Out << "Load Average: "; + for (auto It = info.load_avg.begin(); It != info.load_avg.end();) { + Out << StrFormat("%.2f", *It++); + if (It != info.load_avg.end()) Out << ", "; + } + Out << "\n"; + } + + std::map *global_context = + internal::GetGlobalContext(); + + if (global_context != nullptr) { + for (const auto &kv : *global_context) { + Out << kv.first << ": " << kv.second << "\n"; + } + } + + if (CPUInfo::Scaling::ENABLED == info.scaling) { + Out << "***WARNING*** CPU scaling is enabled, the benchmark " + "real time measurements may be noisy and will incur extra " + "overhead.\n"; + } + +#ifndef NDEBUG + Out << "***WARNING*** Library was built as DEBUG. Timings may be " + "affected.\n"; +#endif +} + +// No initializer because it's already initialized to NULL. +const char *BenchmarkReporter::Context::executable_name; + +BenchmarkReporter::Context::Context() + : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} + +std::string BenchmarkReporter::Run::benchmark_name() const { + std::string name = run_name.str(); + if (run_type == RT_Aggregate) { + name += "_" + aggregate_name; + } + return name; +} + +double BenchmarkReporter::Run::GetAdjustedRealTime() const { + double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); + if (iterations != 0) new_time /= static_cast(iterations); + return new_time; +} + +double BenchmarkReporter::Run::GetAdjustedCPUTime() const { + double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); + if (iterations != 0) new_time /= static_cast(iterations); + return new_time; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/statistics.cc b/vendor/googlebenchmark/src/statistics.cc new file mode 100644 index 000000000..16b60261f --- /dev/null +++ b/vendor/googlebenchmark/src/statistics.cc @@ -0,0 +1,214 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// Copyright 2017 Roman Lebedev. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "statistics.h" + +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" + +namespace benchmark { + +auto StatisticsSum = [](const std::vector& v) { + return std::accumulate(v.begin(), v.end(), 0.0); +}; + +double StatisticsMean(const std::vector& v) { + if (v.empty()) return 0.0; + return StatisticsSum(v) * (1.0 / static_cast(v.size())); +} + +double StatisticsMedian(const std::vector& v) { + if (v.size() < 3) return StatisticsMean(v); + std::vector copy(v); + + auto center = copy.begin() + v.size() / 2; + std::nth_element(copy.begin(), center, copy.end()); + + // Did we have an odd number of samples? If yes, then center is the median. + // If not, then we are looking for the average between center and the value + // before. Instead of resorting, we just look for the max value before it, + // which is not necessarily the element immediately preceding `center` Since + // `copy` is only partially sorted by `nth_element`. + if (v.size() % 2 == 1) return *center; + auto center2 = std::max_element(copy.begin(), center); + return (*center + *center2) / 2.0; +} + +// Return the sum of the squares of this sample set +auto SumSquares = [](const std::vector& v) { + return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); +}; + +auto Sqr = [](const double dat) { return dat * dat; }; +auto Sqrt = [](const double dat) { + // Avoid NaN due to imprecision in the calculations + if (dat < 0.0) return 0.0; + return std::sqrt(dat); +}; + +double StatisticsStdDev(const std::vector& v) { + const auto mean = StatisticsMean(v); + if (v.empty()) return mean; + + // Sample standard deviation is undefined for n = 1 + if (v.size() == 1) return 0.0; + + const double avg_squares = + SumSquares(v) * (1.0 / static_cast(v.size())); + return Sqrt(static_cast(v.size()) / + (static_cast(v.size()) - 1.0) * + (avg_squares - Sqr(mean))); +} + +double StatisticsCV(const std::vector& v) { + if (v.size() < 2) return 0.0; + + const auto stddev = StatisticsStdDev(v); + const auto mean = StatisticsMean(v); + + if (std::fpclassify(mean) == FP_ZERO) return 0.0; + + return stddev / mean; +} + +std::vector ComputeStats( + const std::vector& reports) { + typedef BenchmarkReporter::Run Run; + std::vector results; + + auto error_count = std::count_if(reports.begin(), reports.end(), + [](Run const& run) { return run.skipped; }); + + if (reports.size() - static_cast(error_count) < 2) { + // We don't report aggregated data if there was a single run. + return results; + } + + // Accumulators. + std::vector real_accumulated_time_stat; + std::vector cpu_accumulated_time_stat; + + real_accumulated_time_stat.reserve(reports.size()); + cpu_accumulated_time_stat.reserve(reports.size()); + + // All repetitions should be run with the same number of iterations so we + // can take this information from the first benchmark. + const IterationCount run_iterations = reports.front().iterations; + // create stats for user counters + struct CounterStat { + Counter c; + std::vector s; + }; + std::map counter_stats; + for (Run const& r : reports) { + for (auto const& cnt : r.counters) { + auto it = counter_stats.find(cnt.first); + if (it == counter_stats.end()) { + it = counter_stats + .emplace(cnt.first, + CounterStat{cnt.second, std::vector{}}) + .first; + it->second.s.reserve(reports.size()); + } else { + BM_CHECK_EQ(it->second.c.flags, cnt.second.flags); + } + } + } + + // Populate the accumulators. + for (Run const& run : reports) { + BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); + BM_CHECK_EQ(run_iterations, run.iterations); + if (run.skipped) continue; + real_accumulated_time_stat.emplace_back(run.real_accumulated_time); + cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); + // user counters + for (auto const& cnt : run.counters) { + auto it = counter_stats.find(cnt.first); + BM_CHECK_NE(it, counter_stats.end()); + it->second.s.emplace_back(cnt.second); + } + } + + // Only add label if it is same for all runs + std::string report_label = reports[0].report_label; + for (std::size_t i = 1; i < reports.size(); i++) { + if (reports[i].report_label != report_label) { + report_label = ""; + break; + } + } + + const double iteration_rescale_factor = + double(reports.size()) / double(run_iterations); + + for (const auto& Stat : *reports[0].statistics) { + // Get the data from the accumulator to BenchmarkReporter::Run's. + Run data; + data.run_name = reports[0].run_name; + data.family_index = reports[0].family_index; + data.per_family_instance_index = reports[0].per_family_instance_index; + data.run_type = BenchmarkReporter::Run::RT_Aggregate; + data.threads = reports[0].threads; + data.repetitions = reports[0].repetitions; + data.repetition_index = Run::no_repetition_index; + data.aggregate_name = Stat.name_; + data.aggregate_unit = Stat.unit_; + data.report_label = report_label; + + // It is incorrect to say that an aggregate is computed over + // run's iterations, because those iterations already got averaged. + // Similarly, if there are N repetitions with 1 iterations each, + // an aggregate will be computed over N measurements, not 1. + // Thus it is best to simply use the count of separate reports. + data.iterations = static_cast(reports.size()); + + data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); + data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); + + if (data.aggregate_unit == StatisticUnit::kTime) { + // We will divide these times by data.iterations when reporting, but the + // data.iterations is not necessarily the scale of these measurements, + // because in each repetition, these timers are sum over all the iters. + // And if we want to say that the stats are over N repetitions and not + // M iterations, we need to multiply these by (N/M). + data.real_accumulated_time *= iteration_rescale_factor; + data.cpu_accumulated_time *= iteration_rescale_factor; + } + + data.time_unit = reports[0].time_unit; + + // user counters + for (auto const& kv : counter_stats) { + // Do *NOT* rescale the custom counters. They are already properly scaled. + const auto uc_stat = Stat.compute_(kv.second.s); + auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, + counter_stats[kv.first].c.oneK); + data.counters[kv.first] = c; + } + + results.push_back(data); + } + + return results; +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/statistics.h b/vendor/googlebenchmark/src/statistics.h new file mode 100644 index 000000000..6e5560e8f --- /dev/null +++ b/vendor/googlebenchmark/src/statistics.h @@ -0,0 +1,44 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// Copyright 2017 Roman Lebedev. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef STATISTICS_H_ +#define STATISTICS_H_ + +#include + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// Return a vector containing the mean, median and standard deviation +// information (and any user-specified info) for the specified list of reports. +// If 'reports' contains less than two non-errored runs an empty vector is +// returned +BENCHMARK_EXPORT +std::vector ComputeStats( + const std::vector& reports); + +BENCHMARK_EXPORT +double StatisticsMean(const std::vector& v); +BENCHMARK_EXPORT +double StatisticsMedian(const std::vector& v); +BENCHMARK_EXPORT +double StatisticsStdDev(const std::vector& v); +BENCHMARK_EXPORT +double StatisticsCV(const std::vector& v); + +} // end namespace benchmark + +#endif // STATISTICS_H_ diff --git a/vendor/googlebenchmark/src/string_util.cc b/vendor/googlebenchmark/src/string_util.cc new file mode 100644 index 000000000..9ba63a700 --- /dev/null +++ b/vendor/googlebenchmark/src/string_util.cc @@ -0,0 +1,254 @@ +#include "string_util.h" + +#include +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +#include +#endif +#include +#include +#include +#include +#include + +#include "arraysize.h" +#include "benchmark/benchmark.h" + +namespace benchmark { +namespace { +// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. +const char* const kBigSIUnits[] = {"k", "M", "G", "T", "P", "E", "Z", "Y"}; +// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi. +const char* const kBigIECUnits[] = {"Ki", "Mi", "Gi", "Ti", + "Pi", "Ei", "Zi", "Yi"}; +// milli, micro, nano, pico, femto, atto, zepto, yocto. +const char* const kSmallSIUnits[] = {"m", "u", "n", "p", "f", "a", "z", "y"}; + +// We require that all three arrays have the same size. +static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), + "SI and IEC unit arrays must be the same size"); +static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), + "Small SI and Big SI unit arrays must be the same size"); + +static const int64_t kUnitsSize = arraysize(kBigSIUnits); + +void ToExponentAndMantissa(double val, int precision, double one_k, + std::string* mantissa, int64_t* exponent) { + std::stringstream mantissa_stream; + + if (val < 0) { + mantissa_stream << "-"; + val = -val; + } + + // Adjust threshold so that it never excludes things which can't be rendered + // in 'precision' digits. + const double adjusted_threshold = + std::max(1.0, 1.0 / std::pow(10.0, precision)); + const double big_threshold = (adjusted_threshold * one_k) - 1; + const double small_threshold = adjusted_threshold; + // Values in ]simple_threshold,small_threshold[ will be printed as-is + const double simple_threshold = 0.01; + + if (val > big_threshold) { + // Positive powers + double scaled = val; + for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) { + scaled /= one_k; + if (scaled <= big_threshold) { + mantissa_stream << scaled; + *exponent = static_cast(i + 1); + *mantissa = mantissa_stream.str(); + return; + } + } + mantissa_stream << val; + *exponent = 0; + } else if (val < small_threshold) { + // Negative powers + if (val < simple_threshold) { + double scaled = val; + for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { + scaled *= one_k; + if (scaled >= small_threshold) { + mantissa_stream << scaled; + *exponent = -static_cast(i + 1); + *mantissa = mantissa_stream.str(); + return; + } + } + } + mantissa_stream << val; + *exponent = 0; + } else { + mantissa_stream << val; + *exponent = 0; + } + *mantissa = mantissa_stream.str(); +} + +std::string ExponentToPrefix(int64_t exponent, bool iec) { + if (exponent == 0) return ""; + + const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); + if (index >= kUnitsSize) return ""; + + const char* const* array = + (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits); + + return std::string(array[index]); +} + +std::string ToBinaryStringFullySpecified(double value, int precision, + Counter::OneK one_k) { + std::string mantissa; + int64_t exponent; + ToExponentAndMantissa(value, precision, + one_k == Counter::kIs1024 ? 1024.0 : 1000.0, &mantissa, + &exponent); + return mantissa + ExponentToPrefix(exponent, one_k == Counter::kIs1024); +} + +std::string StrFormatImp(const char* msg, va_list args) { + // we might need a second shot at this, so pre-emptivly make a copy + va_list args_cp; + va_copy(args_cp, args); + + // TODO(ericwf): use std::array for first attempt to avoid one memory + // allocation guess what the size might be + std::array local_buff; + + // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation + // in the android-ndk + auto ret = vsnprintf(local_buff.data(), local_buff.size(), msg, args_cp); + + va_end(args_cp); + + // handle empty expansion + if (ret == 0) return std::string{}; + if (static_cast(ret) < local_buff.size()) + return std::string(local_buff.data()); + + // we did not provide a long enough buffer on our first attempt. + // add 1 to size to account for null-byte in size cast to prevent overflow + std::size_t size = static_cast(ret) + 1; + auto buff_ptr = std::unique_ptr(new char[size]); + // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation + // in the android-ndk + vsnprintf(buff_ptr.get(), size, msg, args); + return std::string(buff_ptr.get()); +} + +} // end namespace + +std::string HumanReadableNumber(double n, Counter::OneK one_k) { + return ToBinaryStringFullySpecified(n, 1, one_k); +} + +std::string StrFormat(const char* format, ...) { + va_list args; + va_start(args, format); + std::string tmp = StrFormatImp(format, args); + va_end(args); + return tmp; +} + +std::vector StrSplit(const std::string& str, char delim) { + if (str.empty()) return {}; + std::vector ret; + size_t first = 0; + size_t next = str.find(delim); + for (; next != std::string::npos; + first = next + 1, next = str.find(delim, first)) { + ret.push_back(str.substr(first, next - first)); + } + ret.push_back(str.substr(first)); + return ret; +} + +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const unsigned long result = strtoul(strStart, &strEnd, base); + + const int strtoulErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtoulErrno == ERANGE) { + throw std::out_of_range("stoul failed: " + str + + " is outside of range of unsigned long"); + } else if (strEnd == strStart || strtoulErrno != 0) { + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} + +int stoi(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const long result = strtol(strStart, &strEnd, base); + + const int strtolErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtolErrno == ERANGE || long(int(result)) != result) { + throw std::out_of_range("stoul failed: " + str + + " is outside of range of int"); + } else if (strEnd == strStart || strtolErrno != 0) { + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return int(result); +} + +double stod(const std::string& str, size_t* pos) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const double result = strtod(strStart, &strEnd); + + /* Restore previous errno */ + const int strtodErrno = errno; + errno = oldErrno; + + /* Check for errors and return */ + if (strtodErrno == ERANGE) { + throw std::out_of_range("stoul failed: " + str + + " is outside of range of int"); + } else if (strEnd == strStart || strtodErrno != 0) { + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} +#endif + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/string_util.h b/vendor/googlebenchmark/src/string_util.h new file mode 100644 index 000000000..731aa2c04 --- /dev/null +++ b/vendor/googlebenchmark/src/string_util.h @@ -0,0 +1,70 @@ +#ifndef BENCHMARK_STRING_UTIL_H_ +#define BENCHMARK_STRING_UTIL_H_ + +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "benchmark/export.h" +#include "check.h" +#include "internal_macros.h" + +namespace benchmark { + +BENCHMARK_EXPORT +std::string HumanReadableNumber(double n, Counter::OneK one_k); + +BENCHMARK_EXPORT +#if defined(__MINGW32__) +__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2))) +#elif defined(__GNUC__) +__attribute__((format(printf, 1, 2))) +#endif +std::string +StrFormat(const char* format, ...); + +inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { + return out; +} + +template +inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { + out << std::forward(f); + return StrCatImp(out, std::forward(rest)...); +} + +template +inline std::string StrCat(Args&&... args) { + std::ostringstream ss; + StrCatImp(ss, std::forward(args)...); + return ss.str(); +} + +BENCHMARK_EXPORT +std::vector StrSplit(const std::string& str, char delim); + +// Disable lint checking for this block since it re-implements C functions. +// NOLINTBEGIN +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos = nullptr, + int base = 10); +int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); +double stod(const std::string& str, size_t* pos = nullptr); +#else +using std::stod; // NOLINT(misc-unused-using-decls) +using std::stoi; // NOLINT(misc-unused-using-decls) +using std::stoul; // NOLINT(misc-unused-using-decls) +#endif +// NOLINTEND + +} // end namespace benchmark + +#endif // BENCHMARK_STRING_UTIL_H_ diff --git a/vendor/googlebenchmark/src/sysinfo.cc b/vendor/googlebenchmark/src/sysinfo.cc new file mode 100644 index 000000000..a153b20cf --- /dev/null +++ b/vendor/googlebenchmark/src/sysinfo.cc @@ -0,0 +1,871 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#if !defined(WINVER) || WINVER < 0x0600 +#undef WINVER +#define WINVER 0x0600 +#endif // WINVER handling +#include +#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA +#include +#include + +#include +#else +#include +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) +#include +#endif +#include +#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD +#include +#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ + defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \ + defined BENCHMARK_OS_DRAGONFLY +#define BENCHMARK_HAS_SYSCTL +#include +#endif +#endif +#if defined(BENCHMARK_OS_SOLARIS) +#include +#include +#endif +#if defined(BENCHMARK_OS_QNX) +#include +#endif +#if defined(BENCHMARK_OS_QURT) +#include +#endif +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "cycleclock.h" +#include "internal_macros.h" +#include "log.h" +#include "string_util.h" +#include "timers.h" + +namespace benchmark { +namespace { + +void PrintImp(std::ostream& out) { out << std::endl; } + +template +void PrintImp(std::ostream& out, First&& f, Rest&&... rest) { + out << std::forward(f); + PrintImp(out, std::forward(rest)...); +} + +template +BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { + PrintImp(std::cerr, std::forward(args)...); + std::exit(EXIT_FAILURE); +} + +#ifdef BENCHMARK_HAS_SYSCTL + +/// ValueUnion - A type used to correctly alias the byte-for-byte output of +/// `sysctl` with the result type it's to be interpreted as. +struct ValueUnion { + union DataT { + int32_t int32_value; + int64_t int64_value; + // For correct aliasing of union members from bytes. + char bytes[8]; + }; + using DataPtr = std::unique_ptr; + + // The size of the data union member + its trailing array size. + std::size_t size; + DataPtr buff; + + public: + ValueUnion() : size(0), buff(nullptr, &std::free) {} + + explicit ValueUnion(std::size_t buff_size) + : size(sizeof(DataT) + buff_size), + buff(::new (std::malloc(size)) DataT(), &std::free) {} + + ValueUnion(ValueUnion&& other) = default; + + explicit operator bool() const { return bool(buff); } + + char* data() const { return buff->bytes; } + + std::string GetAsString() const { return std::string(data()); } + + int64_t GetAsInteger() const { + if (size == sizeof(buff->int32_value)) + return buff->int32_value; + else if (size == sizeof(buff->int64_value)) + return buff->int64_value; + BENCHMARK_UNREACHABLE(); + } + + template + std::array GetAsArray() { + const int arr_size = sizeof(T) * N; + BM_CHECK_LE(arr_size, size); + std::array arr; + std::memcpy(arr.data(), data(), arr_size); + return arr; + } +}; + +ValueUnion GetSysctlImp(std::string const& name) { +#if defined BENCHMARK_OS_OPENBSD + int mib[2]; + + mib[0] = CTL_HW; + if ((name == "hw.ncpu") || (name == "hw.cpuspeed")) { + ValueUnion buff(sizeof(int)); + + if (name == "hw.ncpu") { + mib[1] = HW_NCPU; + } else { + mib[1] = HW_CPUSPEED; + } + + if (sysctl(mib, 2, buff.data(), &buff.size, nullptr, 0) == -1) { + return ValueUnion(); + } + return buff; + } + return ValueUnion(); +#else + std::size_t cur_buff_size = 0; + if (sysctlbyname(name.c_str(), nullptr, &cur_buff_size, nullptr, 0) == -1) + return ValueUnion(); + + ValueUnion buff(cur_buff_size); + if (sysctlbyname(name.c_str(), buff.data(), &buff.size, nullptr, 0) == 0) + return buff; + return ValueUnion(); +#endif +} + +BENCHMARK_MAYBE_UNUSED +bool GetSysctl(std::string const& name, std::string* out) { + out->clear(); + auto buff = GetSysctlImp(name); + if (!buff) return false; + out->assign(buff.data()); + return true; +} + +template ::value>::type> +bool GetSysctl(std::string const& name, Tp* out) { + *out = 0; + auto buff = GetSysctlImp(name); + if (!buff) return false; + *out = static_cast(buff.GetAsInteger()); + return true; +} + +template +bool GetSysctl(std::string const& name, std::array* out) { + auto buff = GetSysctlImp(name); + if (!buff) return false; + *out = buff.GetAsArray(); + return true; +} +#endif + +template +bool ReadFromFile(std::string const& fname, ArgT* arg) { + *arg = ArgT(); + std::ifstream f(fname.c_str()); + if (!f.is_open()) return false; + f >> *arg; + return f.good(); +} + +CPUInfo::Scaling CpuScaling(int num_cpus) { + // We don't have a valid CPU count, so don't even bother. + if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN; +#if defined(BENCHMARK_OS_QNX) + return CPUInfo::Scaling::UNKNOWN; +#elif !defined(BENCHMARK_OS_WINDOWS) + // On Linux, the CPUfreq subsystem exposes CPU information as files on the + // local file system. If reading the exported files fails, then we may not be + // running on Linux, so we silently ignore all the read errors. + std::string res; + for (int cpu = 0; cpu < num_cpus; ++cpu) { + std::string governor_file = + StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); + if (ReadFromFile(governor_file, &res) && res != "performance") + return CPUInfo::Scaling::ENABLED; + } + return CPUInfo::Scaling::DISABLED; +#else + return CPUInfo::Scaling::UNKNOWN; +#endif +} + +int CountSetBitsInCPUMap(std::string val) { + auto CountBits = [](std::string part) { + using CPUMask = std::bitset; + part = "0x" + part; + CPUMask mask(benchmark::stoul(part, nullptr, 16)); + return static_cast(mask.count()); + }; + std::size_t pos; + int total = 0; + while ((pos = val.find(',')) != std::string::npos) { + total += CountBits(val.substr(0, pos)); + val = val.substr(pos + 1); + } + if (!val.empty()) { + total += CountBits(val); + } + return total; +} + +BENCHMARK_MAYBE_UNUSED +std::vector GetCacheSizesFromKVFS() { + std::vector res; + std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; + int idx = 0; + while (true) { + CPUInfo::CacheInfo info; + std::string fpath = StrCat(dir, "index", idx++, "/"); + std::ifstream f(StrCat(fpath, "size").c_str()); + if (!f.is_open()) break; + std::string suffix; + f >> info.size; + if (f.fail()) + PrintErrorAndDie("Failed while reading file '", fpath, "size'"); + if (f.good()) { + f >> suffix; + if (f.bad()) + PrintErrorAndDie( + "Invalid cache size format: failed to read size suffix"); + else if (f && suffix != "K") + PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix); + else if (suffix == "K") + info.size *= 1024; + } + if (!ReadFromFile(StrCat(fpath, "type"), &info.type)) + PrintErrorAndDie("Failed to read from file ", fpath, "type"); + if (!ReadFromFile(StrCat(fpath, "level"), &info.level)) + PrintErrorAndDie("Failed to read from file ", fpath, "level"); + std::string map_str; + if (!ReadFromFile(StrCat(fpath, "shared_cpu_map"), &map_str)) + PrintErrorAndDie("Failed to read from file ", fpath, "shared_cpu_map"); + info.num_sharing = CountSetBitsInCPUMap(map_str); + res.push_back(info); + } + + return res; +} + +#ifdef BENCHMARK_OS_MACOSX +std::vector GetCacheSizesMacOSX() { + std::vector res; + std::array cache_counts{{0, 0, 0, 0}}; + GetSysctl("hw.cacheconfig", &cache_counts); + + struct { + std::string name; + std::string type; + int level; + int num_sharing; + } cases[] = {{"hw.l1dcachesize", "Data", 1, cache_counts[1]}, + {"hw.l1icachesize", "Instruction", 1, cache_counts[1]}, + {"hw.l2cachesize", "Unified", 2, cache_counts[2]}, + {"hw.l3cachesize", "Unified", 3, cache_counts[3]}}; + for (auto& c : cases) { + int val; + if (!GetSysctl(c.name, &val)) continue; + CPUInfo::CacheInfo info; + info.type = c.type; + info.level = c.level; + info.size = val; + info.num_sharing = c.num_sharing; + res.push_back(std::move(info)); + } + return res; +} +#elif defined(BENCHMARK_OS_WINDOWS) +std::vector GetCacheSizesWindows() { + std::vector res; + DWORD buffer_size = 0; + using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; + using CInfo = CACHE_DESCRIPTOR; + + using UPtr = std::unique_ptr; + GetLogicalProcessorInformation(nullptr, &buffer_size); + UPtr buff(static_cast(std::malloc(buffer_size)), &std::free); + if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) + PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", + GetLastError()); + + PInfo* it = buff.get(); + PInfo* end = buff.get() + (buffer_size / sizeof(PInfo)); + + for (; it != end; ++it) { + if (it->Relationship != RelationCache) continue; + using BitSet = std::bitset; + BitSet b(it->ProcessorMask); + // To prevent duplicates, only consider caches where CPU 0 is specified + if (!b.test(0)) continue; + const CInfo& cache = it->Cache; + CPUInfo::CacheInfo C; + C.num_sharing = static_cast(b.count()); + C.level = cache.Level; + C.size = static_cast(cache.Size); + C.type = "Unknown"; + switch (cache.Type) { + case CacheUnified: + C.type = "Unified"; + break; + case CacheInstruction: + C.type = "Instruction"; + break; + case CacheData: + C.type = "Data"; + break; + case CacheTrace: + C.type = "Trace"; + break; + } + res.push_back(C); + } + return res; +} +#elif BENCHMARK_OS_QNX +std::vector GetCacheSizesQNX() { + std::vector res; + struct cacheattr_entry* cache = SYSPAGE_ENTRY(cacheattr); + uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr); + int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize; + for (int i = 0; i < num; ++i) { + CPUInfo::CacheInfo info; + switch (cache->flags) { + case CACHE_FLAG_INSTR: + info.type = "Instruction"; + info.level = 1; + break; + case CACHE_FLAG_DATA: + info.type = "Data"; + info.level = 1; + break; + case CACHE_FLAG_UNIFIED: + info.type = "Unified"; + info.level = 2; + break; + case CACHE_FLAG_SHARED: + info.type = "Shared"; + info.level = 3; + break; + default: + continue; + break; + } + info.size = cache->line_size * cache->num_lines; + info.num_sharing = 0; + res.push_back(std::move(info)); + cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize); + } + return res; +} +#endif + +std::vector GetCacheSizes() { +#ifdef BENCHMARK_OS_MACOSX + return GetCacheSizesMacOSX(); +#elif defined(BENCHMARK_OS_WINDOWS) + return GetCacheSizesWindows(); +#elif defined(BENCHMARK_OS_QNX) + return GetCacheSizesQNX(); +#elif defined(BENCHMARK_OS_QURT) + return std::vector(); +#else + return GetCacheSizesFromKVFS(); +#endif +} + +std::string GetSystemName() { +#if defined(BENCHMARK_OS_WINDOWS) + std::string str; + static constexpr int COUNT = MAX_COMPUTERNAME_LENGTH + 1; + TCHAR hostname[COUNT] = {'\0'}; + DWORD DWCOUNT = COUNT; + if (!GetComputerName(hostname, &DWCOUNT)) return std::string(""); +#ifndef UNICODE + str = std::string(hostname, DWCOUNT); +#else + // `WideCharToMultiByte` returns `0` when conversion fails. + int len = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, hostname, + DWCOUNT, NULL, 0, NULL, NULL); + str.resize(len); + WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, hostname, DWCOUNT, &str[0], + str.size(), NULL, NULL); +#endif + return str; +#elif defined(BENCHMARK_OS_QURT) + std::string str = "Hexagon DSP"; + qurt_arch_version_t arch_version_struct; + if (qurt_sysenv_get_arch_version(&arch_version_struct) == QURT_EOK) { + str += " v"; + str += std::to_string(arch_version_struct.arch_version); + } + return str; +#else +#ifndef HOST_NAME_MAX +#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac doesn't have HOST_NAME_MAX defined +#define HOST_NAME_MAX 64 +#elif defined(BENCHMARK_OS_NACL) +#define HOST_NAME_MAX 64 +#elif defined(BENCHMARK_OS_QNX) +#define HOST_NAME_MAX 154 +#elif defined(BENCHMARK_OS_RTEMS) +#define HOST_NAME_MAX 256 +#elif defined(BENCHMARK_OS_SOLARIS) +#define HOST_NAME_MAX MAXHOSTNAMELEN +#elif defined(BENCHMARK_OS_ZOS) +#define HOST_NAME_MAX _POSIX_HOST_NAME_MAX +#else +#pragma message("HOST_NAME_MAX not defined. using 64") +#define HOST_NAME_MAX 64 +#endif +#endif // def HOST_NAME_MAX + char hostname[HOST_NAME_MAX]; + int retVal = gethostname(hostname, HOST_NAME_MAX); + if (retVal != 0) return std::string(""); + return std::string(hostname); +#endif // Catch-all POSIX block. +} + +int GetNumCPUsImpl() { +#ifdef BENCHMARK_HAS_SYSCTL + int num_cpu = -1; + if (GetSysctl("hw.ncpu", &num_cpu)) return num_cpu; + PrintErrorAndDie("Err: ", strerror(errno)); +#elif defined(BENCHMARK_OS_WINDOWS) + SYSTEM_INFO sysinfo; + // Use memset as opposed to = {} to avoid GCC missing initializer false + // positives. + std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); + GetSystemInfo(&sysinfo); + // number of logical processors in the current group + return static_cast(sysinfo.dwNumberOfProcessors); +#elif defined(BENCHMARK_OS_SOLARIS) + // Returns -1 in case of a failure. + long num_cpu = sysconf(_SC_NPROCESSORS_ONLN); + if (num_cpu < 0) { + PrintErrorAndDie("sysconf(_SC_NPROCESSORS_ONLN) failed with error: ", + strerror(errno)); + } + return (int)num_cpu; +#elif defined(BENCHMARK_OS_QNX) + return static_cast(_syspage_ptr->num_cpu); +#elif defined(BENCHMARK_OS_QURT) + qurt_sysenv_max_hthreads_t hardware_threads; + if (qurt_sysenv_get_max_hw_threads(&hardware_threads) != QURT_EOK) { + hardware_threads.max_hthreads = 1; + } + return hardware_threads.max_hthreads; +#else + int num_cpus = 0; + int max_id = -1; + std::ifstream f("/proc/cpuinfo"); + if (!f.is_open()) { + std::cerr << "Failed to open /proc/cpuinfo\n"; + return -1; + } +#if defined(__alpha__) + const std::string Key = "cpus detected"; +#else + const std::string Key = "processor"; +#endif + std::string ln; + while (std::getline(f, ln)) { + if (ln.empty()) continue; + std::size_t split_idx = ln.find(':'); + std::string value; +#if defined(__s390__) + // s390 has another format in /proc/cpuinfo + // it needs to be parsed differently + if (split_idx != std::string::npos) + value = ln.substr(Key.size() + 1, split_idx - Key.size() - 1); +#else + if (split_idx != std::string::npos) value = ln.substr(split_idx + 1); +#endif + if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { + num_cpus++; + if (!value.empty()) { + const int cur_id = benchmark::stoi(value); + max_id = std::max(cur_id, max_id); + } + } + } + if (f.bad()) { + PrintErrorAndDie("Failure reading /proc/cpuinfo"); + } + if (!f.eof()) { + PrintErrorAndDie("Failed to read to end of /proc/cpuinfo"); + } + f.close(); + + if ((max_id + 1) != num_cpus) { + fprintf(stderr, + "CPU ID assignments in /proc/cpuinfo seem messed up." + " This is usually caused by a bad BIOS.\n"); + } + return num_cpus; +#endif + BENCHMARK_UNREACHABLE(); +} + +int GetNumCPUs() { + const int num_cpus = GetNumCPUsImpl(); + if (num_cpus < 1) { + std::cerr << "Unable to extract number of CPUs. If your platform uses " + "/proc/cpuinfo, custom support may need to be added.\n"; + } + return num_cpus; +} + +class ThreadAffinityGuard final { + public: + ThreadAffinityGuard() : reset_affinity(SetAffinity()) { + if (!reset_affinity) + std::cerr << "***WARNING*** Failed to set thread affinity. Estimated CPU " + "frequency may be incorrect." + << std::endl; + } + + ~ThreadAffinityGuard() { + if (!reset_affinity) return; + +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) + int ret = pthread_setaffinity_np(self, sizeof(previous_affinity), + &previous_affinity); + if (ret == 0) return; +#elif defined(BENCHMARK_OS_WINDOWS_WIN32) + DWORD_PTR ret = SetThreadAffinityMask(self, previous_affinity); + if (ret != 0) return; +#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY + PrintErrorAndDie("Failed to reset thread affinity"); + } + + ThreadAffinityGuard(ThreadAffinityGuard&&) = delete; + ThreadAffinityGuard(const ThreadAffinityGuard&) = delete; + ThreadAffinityGuard& operator=(ThreadAffinityGuard&&) = delete; + ThreadAffinityGuard& operator=(const ThreadAffinityGuard&) = delete; + + private: + bool SetAffinity() { +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) + int ret; + self = pthread_self(); + ret = pthread_getaffinity_np(self, sizeof(previous_affinity), + &previous_affinity); + if (ret != 0) return false; + + cpu_set_t affinity; + memcpy(&affinity, &previous_affinity, sizeof(affinity)); + + bool is_first_cpu = true; + + for (int i = 0; i < CPU_SETSIZE; ++i) + if (CPU_ISSET(i, &affinity)) { + if (is_first_cpu) + is_first_cpu = false; + else + CPU_CLR(i, &affinity); + } + + if (is_first_cpu) return false; + + ret = pthread_setaffinity_np(self, sizeof(affinity), &affinity); + return ret == 0; +#elif defined(BENCHMARK_OS_WINDOWS_WIN32) + self = GetCurrentThread(); + DWORD_PTR mask = static_cast(1) << GetCurrentProcessorNumber(); + previous_affinity = SetThreadAffinityMask(self, mask); + return previous_affinity != 0; +#else + return false; +#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY + } + +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) + pthread_t self; + cpu_set_t previous_affinity; +#elif defined(BENCHMARK_OS_WINDOWS_WIN32) + HANDLE self; + DWORD_PTR previous_affinity; +#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY + bool reset_affinity; +}; + +double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { + // Currently, scaling is only used on linux path here, + // suppress diagnostics about it being unused on other paths. + (void)scaling; + +#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN + long freq; + + // If the kernel is exporting the tsc frequency use that. There are issues + // where cpuinfo_max_freq cannot be relied on because the BIOS may be + // exporintg an invalid p-state (on x86) or p-states may be used to put the + // processor in a new mode (turbo mode). Essentially, those frequencies + // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq) + // If CPU scaling is disabled, use the *current* frequency. + // Note that we specifically don't want to read cpuinfo_cur_freq, + // because it is only readable by root. + || (scaling == CPUInfo::Scaling::DISABLED && + ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", + &freq)) + // Otherwise, if CPU scaling may be in effect, we want to use + // the *maximum* frequency, not whatever CPU speed some random processor + // happens to be using now. + || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + // The value is in kHz (as the file name suggests). For example, on a + // 2GHz warpstation, the file contains the value "2000000". + return static_cast(freq) * 1000.0; + } + + const double error_value = -1; + double bogo_clock = error_value; + + std::ifstream f("/proc/cpuinfo"); + if (!f.is_open()) { + std::cerr << "failed to open /proc/cpuinfo\n"; + return error_value; + } + + auto StartsWithKey = [](std::string const& Value, std::string const& Key) { + if (Key.size() > Value.size()) return false; + auto Cmp = [&](char X, char Y) { + return std::tolower(X) == std::tolower(Y); + }; + return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp); + }; + + std::string ln; + while (std::getline(f, ln)) { + if (ln.empty()) continue; + std::size_t split_idx = ln.find(':'); + std::string value; + if (split_idx != std::string::npos) value = ln.substr(split_idx + 1); + // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only + // accept positive values. Some environments (virtual machines) report zero, + // which would cause infinite looping in WallTime_Init. + if (StartsWithKey(ln, "cpu MHz")) { + if (!value.empty()) { + double cycles_per_second = benchmark::stod(value) * 1000000.0; + if (cycles_per_second > 0) return cycles_per_second; + } + } else if (StartsWithKey(ln, "bogomips")) { + if (!value.empty()) { + bogo_clock = benchmark::stod(value) * 1000000.0; + if (bogo_clock < 0.0) bogo_clock = error_value; + } + } + } + if (f.bad()) { + std::cerr << "Failure reading /proc/cpuinfo\n"; + return error_value; + } + if (!f.eof()) { + std::cerr << "Failed to read to end of /proc/cpuinfo\n"; + return error_value; + } + f.close(); + // If we found the bogomips clock, but nothing better, we'll use it (but + // we're not happy about it); otherwise, fallback to the rough estimation + // below. + if (bogo_clock >= 0.0) return bogo_clock; + +#elif defined BENCHMARK_HAS_SYSCTL + constexpr auto* freqStr = +#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) + "machdep.tsc_freq"; +#elif defined BENCHMARK_OS_OPENBSD + "hw.cpuspeed"; +#elif defined BENCHMARK_OS_DRAGONFLY + "hw.tsc_frequency"; +#else + "hw.cpufrequency"; +#endif + unsigned long long hz = 0; +#if defined BENCHMARK_OS_OPENBSD + if (GetSysctl(freqStr, &hz)) return static_cast(hz * 1000000); +#else + if (GetSysctl(freqStr, &hz)) return static_cast(hz); +#endif + fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", + freqStr, strerror(errno)); + fprintf(stderr, + "This does not affect benchmark measurements, only the " + "metadata output.\n"); + +#elif defined BENCHMARK_OS_WINDOWS_WIN32 + // In NT, read MHz from the registry. If we fail to do so or we're in win9x + // then make a crude estimate. + DWORD data, data_size = sizeof(data); + if (IsWindowsXPOrGreater() && + SUCCEEDED( + SHGetValueA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", + "~MHz", nullptr, &data, &data_size))) + return static_cast(static_cast(data) * + static_cast(1000 * 1000)); // was mhz +#elif defined(BENCHMARK_OS_SOLARIS) + kstat_ctl_t* kc = kstat_open(); + if (!kc) { + std::cerr << "failed to open /dev/kstat\n"; + return -1; + } + kstat_t* ksp = kstat_lookup(kc, const_cast("cpu_info"), -1, + const_cast("cpu_info0")); + if (!ksp) { + std::cerr << "failed to lookup in /dev/kstat\n"; + return -1; + } + if (kstat_read(kc, ksp, NULL) < 0) { + std::cerr << "failed to read from /dev/kstat\n"; + return -1; + } + kstat_named_t* knp = (kstat_named_t*)kstat_data_lookup( + ksp, const_cast("current_clock_Hz")); + if (!knp) { + std::cerr << "failed to lookup data in /dev/kstat\n"; + return -1; + } + if (knp->data_type != KSTAT_DATA_UINT64) { + std::cerr << "current_clock_Hz is of unexpected data type: " + << knp->data_type << "\n"; + return -1; + } + double clock_hz = knp->value.ui64; + kstat_close(kc); + return clock_hz; +#elif defined(BENCHMARK_OS_QNX) + return static_cast( + static_cast(SYSPAGE_ENTRY(cpuinfo)->speed) * + static_cast(1000 * 1000)); +#elif defined(BENCHMARK_OS_QURT) + // QuRT doesn't provide any API to query Hexagon frequency. + return 1000000000; +#endif + // If we've fallen through, attempt to roughly estimate the CPU clock rate. + + // Make sure to use the same cycle counter when starting and stopping the + // cycle timer. We just pin the current thread to a cpu in the previous + // affinity set. + ThreadAffinityGuard affinity_guard; + + static constexpr double estimate_time_s = 1.0; + const double start_time = ChronoClockNow(); + const auto start_ticks = cycleclock::Now(); + + // Impose load instead of calling sleep() to make sure the cycle counter + // works. + using PRNG = std::minstd_rand; + using Result = PRNG::result_type; + PRNG rng(static_cast(start_ticks)); + + Result state = 0; + + do { + static constexpr size_t batch_size = 10000; + rng.discard(batch_size); + state += rng(); + + } while (ChronoClockNow() - start_time < estimate_time_s); + + DoNotOptimize(state); + + const auto end_ticks = cycleclock::Now(); + const double end_time = ChronoClockNow(); + + return static_cast(end_ticks - start_ticks) / (end_time - start_time); + // Reset the affinity of current thread when the lifetime of affinity_guard + // ends. +} + +std::vector GetLoadAvg() { +#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ + defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ + defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \ + !(defined(__ANDROID__) && __ANDROID_API__ < 29) + static constexpr int kMaxSamples = 3; + std::vector res(kMaxSamples, 0.0); + const size_t nelem = static_cast(getloadavg(res.data(), kMaxSamples)); + if (nelem < 1) { + res.clear(); + } else { + res.resize(nelem); + } + return res; +#else + return {}; +#endif +} + +} // end namespace + +const CPUInfo& CPUInfo::Get() { + static const CPUInfo* info = new CPUInfo(); + return *info; +} + +CPUInfo::CPUInfo() + : num_cpus(GetNumCPUs()), + scaling(CpuScaling(num_cpus)), + cycles_per_second(GetCPUCyclesPerSecond(scaling)), + caches(GetCacheSizes()), + load_avg(GetLoadAvg()) {} + +const SystemInfo& SystemInfo::Get() { + static const SystemInfo* info = new SystemInfo(); + return *info; +} + +SystemInfo::SystemInfo() : name(GetSystemName()) {} +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/thread_manager.h b/vendor/googlebenchmark/src/thread_manager.h new file mode 100644 index 000000000..819b3c44d --- /dev/null +++ b/vendor/googlebenchmark/src/thread_manager.h @@ -0,0 +1,63 @@ +#ifndef BENCHMARK_THREAD_MANAGER_H +#define BENCHMARK_THREAD_MANAGER_H + +#include + +#include "benchmark/benchmark.h" +#include "mutex.h" + +namespace benchmark { +namespace internal { + +class ThreadManager { + public: + explicit ThreadManager(int num_threads) + : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} + + Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { + return benchmark_mutex_; + } + + bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { + return start_stop_barrier_.wait(); + } + + void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { + start_stop_barrier_.removeThread(); + if (--alive_threads_ == 0) { + MutexLock lock(end_cond_mutex_); + end_condition_.notify_all(); + } + } + + void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { + MutexLock lock(end_cond_mutex_); + end_condition_.wait(lock.native_handle(), + [this]() { return alive_threads_ == 0; }); + } + + struct Result { + IterationCount iterations = 0; + double real_time_used = 0; + double cpu_time_used = 0; + double manual_time_used = 0; + int64_t complexity_n = 0; + std::string report_label_; + std::string skip_message_; + internal::Skipped skipped_ = internal::NotSkipped; + UserCounters counters; + }; + GUARDED_BY(GetBenchmarkMutex()) Result results; + + private: + mutable Mutex benchmark_mutex_; + std::atomic alive_threads_; + Barrier start_stop_barrier_; + Mutex end_cond_mutex_; + Condition end_condition_; +}; + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_THREAD_MANAGER_H diff --git a/vendor/googlebenchmark/src/thread_timer.h b/vendor/googlebenchmark/src/thread_timer.h new file mode 100644 index 000000000..eb23f5956 --- /dev/null +++ b/vendor/googlebenchmark/src/thread_timer.h @@ -0,0 +1,86 @@ +#ifndef BENCHMARK_THREAD_TIMER_H +#define BENCHMARK_THREAD_TIMER_H + +#include "check.h" +#include "timers.h" + +namespace benchmark { +namespace internal { + +class ThreadTimer { + explicit ThreadTimer(bool measure_process_cpu_time_) + : measure_process_cpu_time(measure_process_cpu_time_) {} + + public: + static ThreadTimer Create() { + return ThreadTimer(/*measure_process_cpu_time_=*/false); + } + static ThreadTimer CreateProcessCpuTime() { + return ThreadTimer(/*measure_process_cpu_time_=*/true); + } + + // Called by each thread + void StartTimer() { + running_ = true; + start_real_time_ = ChronoClockNow(); + start_cpu_time_ = ReadCpuTimerOfChoice(); + } + + // Called by each thread + void StopTimer() { + BM_CHECK(running_); + running_ = false; + real_time_used_ += ChronoClockNow() - start_real_time_; + // Floating point error can result in the subtraction producing a negative + // time. Guard against that. + cpu_time_used_ += + std::max(ReadCpuTimerOfChoice() - start_cpu_time_, 0); + } + + // Called by each thread + void SetIterationTime(double seconds) { manual_time_used_ += seconds; } + + bool running() const { return running_; } + + // REQUIRES: timer is not running + double real_time_used() const { + BM_CHECK(!running_); + return real_time_used_; + } + + // REQUIRES: timer is not running + double cpu_time_used() const { + BM_CHECK(!running_); + return cpu_time_used_; + } + + // REQUIRES: timer is not running + double manual_time_used() const { + BM_CHECK(!running_); + return manual_time_used_; + } + + private: + double ReadCpuTimerOfChoice() const { + if (measure_process_cpu_time) return ProcessCPUUsage(); + return ThreadCPUUsage(); + } + + // should the thread, or the process, time be measured? + const bool measure_process_cpu_time; + + bool running_ = false; // Is the timer running + double start_real_time_ = 0; // If running_ + double start_cpu_time_ = 0; // If running_ + + // Accumulated time so far (does not contain current slice if running_) + double real_time_used_ = 0; + double cpu_time_used_ = 0; + // Manually set iteration time. User sets this with SetIterationTime(seconds). + double manual_time_used_ = 0; +}; + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_THREAD_TIMER_H diff --git a/vendor/googlebenchmark/src/timers.cc b/vendor/googlebenchmark/src/timers.cc new file mode 100644 index 000000000..7ba540b88 --- /dev/null +++ b/vendor/googlebenchmark/src/timers.cc @@ -0,0 +1,284 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "timers.h" + +#include "internal_macros.h" + +#ifdef BENCHMARK_OS_WINDOWS +#include +#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA +#include +#include +#else +#include +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) +#include +#endif +#include +#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD +#include +#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \ + defined BENCHMARK_OS_MACOSX +#include +#endif +#if defined(BENCHMARK_OS_MACOSX) +#include +#include +#include +#endif +#if defined(BENCHMARK_OS_QURT) +#include +#endif +#endif + +#ifdef BENCHMARK_OS_EMSCRIPTEN +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "log.h" +#include "string_util.h" + +namespace benchmark { + +// Suppress unused warnings on helper functions. +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#if defined(__NVCOMPILER) +#pragma diag_suppress declared_but_not_referenced +#endif + +namespace { +#if defined(BENCHMARK_OS_WINDOWS) +double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { + ULARGE_INTEGER kernel; + ULARGE_INTEGER user; + kernel.HighPart = kernel_time.dwHighDateTime; + kernel.LowPart = kernel_time.dwLowDateTime; + user.HighPart = user_time.dwHighDateTime; + user.LowPart = user_time.dwLowDateTime; + return (static_cast(kernel.QuadPart) + + static_cast(user.QuadPart)) * + 1e-7; +} +#elif !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) +double MakeTime(struct rusage const& ru) { + return (static_cast(ru.ru_utime.tv_sec) + + static_cast(ru.ru_utime.tv_usec) * 1e-6 + + static_cast(ru.ru_stime.tv_sec) + + static_cast(ru.ru_stime.tv_usec) * 1e-6); +} +#endif +#if defined(BENCHMARK_OS_MACOSX) +double MakeTime(thread_basic_info_data_t const& info) { + return (static_cast(info.user_time.seconds) + + static_cast(info.user_time.microseconds) * 1e-6 + + static_cast(info.system_time.seconds) + + static_cast(info.system_time.microseconds) * 1e-6); +} +#endif +#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) +double MakeTime(struct timespec const& ts) { + return static_cast(ts.tv_sec) + + (static_cast(ts.tv_nsec) * 1e-9); +} +#endif + +BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { + std::cerr << "ERROR: " << msg << std::endl; + std::exit(EXIT_FAILURE); +} + +} // end namespace + +double ProcessCPUUsage() { +#if defined(BENCHMARK_OS_WINDOWS) + HANDLE proc = GetCurrentProcess(); + FILETIME creation_time; + FILETIME exit_time; + FILETIME kernel_time; + FILETIME user_time; + if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, + &user_time)) + return MakeTime(kernel_time, user_time); + DiagnoseAndExit("GetProccessTimes() failed"); +#elif defined(BENCHMARK_OS_QURT) + // Note that qurt_timer_get_ticks() is no longer documented as of SDK 5.3.0, + // and doesn't appear to work on at least some devices (eg Samsung S22), + // so let's use the actually-documented and apparently-equivalent + // qurt_sysclock_get_hw_ticks() call instead. + return static_cast( + qurt_timer_timetick_to_us(qurt_sysclock_get_hw_ticks())) * + 1.0e-6; +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. + // Use Emscripten-specific API. Reported CPU time would be exactly the + // same as total time, but this is ok because there aren't long-latency + // synchronous system calls in Emscripten. + return emscripten_get_now() * 1e-3; +#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. + // See https://github.com/google/benchmark/pull/292 + struct timespec spec; + if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) + return MakeTime(spec); + DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); +#else + struct rusage ru; + if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); + DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); +#endif +} + +double ThreadCPUUsage() { +#if defined(BENCHMARK_OS_WINDOWS) + HANDLE this_thread = GetCurrentThread(); + FILETIME creation_time; + FILETIME exit_time; + FILETIME kernel_time; + FILETIME user_time; + GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, + &user_time); + return MakeTime(kernel_time, user_time); +#elif defined(BENCHMARK_OS_QURT) + // Note that qurt_timer_get_ticks() is no longer documented as of SDK 5.3.0, + // and doesn't appear to work on at least some devices (eg Samsung S22), + // so let's use the actually-documented and apparently-equivalent + // qurt_sysclock_get_hw_ticks() call instead. + return static_cast( + qurt_timer_timetick_to_us(qurt_sysclock_get_hw_ticks())) * + 1.0e-6; +#elif defined(BENCHMARK_OS_MACOSX) + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. + // See https://github.com/google/benchmark/pull/292 + mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; + thread_basic_info_data_t info; + mach_port_t thread = pthread_mach_thread_np(pthread_self()); + if (thread_info(thread, THREAD_BASIC_INFO, + reinterpret_cast(&info), + &count) == KERN_SUCCESS) { + return MakeTime(info); + } + DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // Emscripten doesn't support traditional threads + return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_RTEMS) + // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See + // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c + return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_ZOS) + // z/OS doesn't support CLOCK_THREAD_CPUTIME_ID. + return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_SOLARIS) + struct rusage ru; + if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); + DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); +#elif defined(CLOCK_THREAD_CPUTIME_ID) + struct timespec ts; + if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); + DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); +#else +#error Per-thread timing is not available on your system. +#endif +} + +std::string LocalDateTimeString() { + // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM. + typedef std::chrono::system_clock Clock; + std::time_t now = Clock::to_time_t(Clock::now()); + const std::size_t kTzOffsetLen = 6; + const std::size_t kTimestampLen = 19; + + std::size_t tz_len; + std::size_t timestamp_len; + long int offset_minutes; + char tz_offset_sign = '+'; + // tz_offset is set in one of three ways: + // * strftime with %z - This either returns empty or the ISO 8601 time. The + // maximum length an + // ISO 8601 string can be is 7 (e.g. -03:30, plus trailing zero). + // * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to + // 19 for %02li, + // one for :, up to 19 %02li, plus trailing zero). + // * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus + // trailing zero). + // + // Thus, the maximum size this needs to be is 41. + char tz_offset[41]; + // Long enough buffer to avoid format-overflow warnings + char storage[128]; + +#if defined(BENCHMARK_OS_WINDOWS) + std::tm* timeinfo_p = ::localtime(&now); +#else + std::tm timeinfo; + std::tm* timeinfo_p = &timeinfo; + ::localtime_r(&now, &timeinfo); +#endif + + tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p); + + if (tz_len < kTzOffsetLen && tz_len > 1) { + // Timezone offset was written. strftime writes offset as +HHMM or -HHMM, + // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse + // the offset as an integer, then reprint it to a string. + + offset_minutes = ::strtol(tz_offset, NULL, 10); + if (offset_minutes < 0) { + offset_minutes *= -1; + tz_offset_sign = '-'; + } + + tz_len = static_cast( + ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", + tz_offset_sign, offset_minutes / 100, offset_minutes % 100)); + BM_CHECK(tz_len == kTzOffsetLen); + ((void)tz_len); // Prevent unused variable warning in optimized build. + } else { + // Unknown offset. RFC3339 specifies that unknown local offsets should be + // written as UTC time with -00:00 timezone. +#if defined(BENCHMARK_OS_WINDOWS) + // Potential race condition if another thread calls localtime or gmtime. + timeinfo_p = ::gmtime(&now); +#else + ::gmtime_r(&now, &timeinfo); +#endif + + strncpy(tz_offset, "-00:00", kTzOffsetLen + 1); + } + + timestamp_len = + std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", timeinfo_p); + BM_CHECK(timestamp_len == kTimestampLen); + // Prevent unused variable warning in optimized build. + ((void)kTimestampLen); + + std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1); + return std::string(storage); +} + +} // end namespace benchmark diff --git a/vendor/googlebenchmark/src/timers.h b/vendor/googlebenchmark/src/timers.h new file mode 100644 index 000000000..690086b36 --- /dev/null +++ b/vendor/googlebenchmark/src/timers.h @@ -0,0 +1,75 @@ +#ifndef BENCHMARK_TIMERS_H +#define BENCHMARK_TIMERS_H + +#include +#include + +namespace benchmark { + +// Return the CPU usage of the current process +double ProcessCPUUsage(); + +// Return the CPU usage of the children of the current process +double ChildrenCPUUsage(); + +// Return the CPU usage of the current thread +double ThreadCPUUsage(); + +#if defined(BENCHMARK_OS_QURT) + +// std::chrono::now() can return 0 on some Hexagon devices; +// this reads the value of a 56-bit, 19.2MHz hardware counter +// and converts it to seconds. Unlike std::chrono, this doesn't +// return an absolute time, but since ChronoClockNow() is only used +// to compute elapsed time, this shouldn't matter. +struct QuRTClock { + typedef uint64_t rep; + typedef std::ratio<1, 19200000> period; + typedef std::chrono::duration duration; + typedef std::chrono::time_point time_point; + static const bool is_steady = false; + + static time_point now() { + unsigned long long count; + asm volatile(" %0 = c31:30 " : "=r"(count)); + return time_point(static_cast(count)); + } +}; + +#else + +#if defined(HAVE_STEADY_CLOCK) +template +struct ChooseSteadyClock { + typedef std::chrono::high_resolution_clock type; +}; + +template <> +struct ChooseSteadyClock { + typedef std::chrono::steady_clock type; +}; +#endif // HAVE_STEADY_CLOCK + +#endif + +struct ChooseClockType { +#if defined(BENCHMARK_OS_QURT) + typedef QuRTClock type; +#elif defined(HAVE_STEADY_CLOCK) + typedef ChooseSteadyClock<>::type type; +#else + typedef std::chrono::high_resolution_clock type; +#endif +}; + +inline double ChronoClockNow() { + typedef ChooseClockType::type ClockType; + using FpSeconds = std::chrono::duration; + return FpSeconds(ClockType::now().time_since_epoch()).count(); +} + +std::string LocalDateTimeString(); + +} // end namespace benchmark + +#endif // BENCHMARK_TIMERS_H diff --git a/vendor/googletest.mask b/vendor/googletest.mask new file mode 100644 index 000000000..764d4a620 --- /dev/null +++ b/vendor/googletest.mask @@ -0,0 +1,19 @@ +googletest/test +googletest/docs +googletest/samples +googletest/README.md +googlemock/docs +googlemock/test +googlemock/README.md +docs +ci +.clang-format +BUILD.bazel +CONTRIBUTING.md +CONTRIBUTORS +googletest_deps.bzl +README.md +WORKSPACE +MODULE.bazel +WORKSPACE.bzlmod +fake_fuchsia_sdk.bzl diff --git a/vendor/googletest/CMakeLists.txt b/vendor/googletest/CMakeLists.txt new file mode 100644 index 000000000..9e6d6440d --- /dev/null +++ b/vendor/googletest/CMakeLists.txt @@ -0,0 +1,36 @@ +# Note: CMake support is community-based. The maintainers do not use CMake +# internally. + +cmake_minimum_required(VERSION 3.13) + +project(googletest-distribution) +set(GOOGLETEST_VERSION 1.14.0) + +if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX) + set(CMAKE_CXX_EXTENSIONS OFF) +endif() + +enable_testing() + +include(CMakeDependentOption) +include(GNUInstallDirs) + +# Note that googlemock target already builds googletest. +option(BUILD_GMOCK "Builds the googlemock subproject" ON) +option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding googletest may want to turn this OFF.)" ON) +option(GTEST_HAS_ABSL "Use Abseil and RE2. Requires Abseil and RE2 to be separately added to the build." OFF) + +if(GTEST_HAS_ABSL) + if(NOT TARGET absl::base) + find_package(absl REQUIRED) + endif() + if(NOT TARGET re2::re2) + find_package(re2 REQUIRED) + endif() +endif() + +if(BUILD_GMOCK) + add_subdirectory( googlemock ) +else() + add_subdirectory( googletest ) +endif() diff --git a/vendor/googletest/LICENSE b/vendor/googletest/LICENSE new file mode 100644 index 000000000..1941a11f8 --- /dev/null +++ b/vendor/googletest/LICENSE @@ -0,0 +1,28 @@ +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/googletest/googlemock/CMakeLists.txt b/vendor/googletest/googlemock/CMakeLists.txt new file mode 100644 index 000000000..99b2411f3 --- /dev/null +++ b/vendor/googletest/googlemock/CMakeLists.txt @@ -0,0 +1,210 @@ +######################################################################## +# Note: CMake support is community-based. The maintainers do not use CMake +# internally. +# +# CMake build script for Google Mock. +# +# To run the tests for Google Mock itself on Linux, use 'make test' or +# ctest. You can select which tests to run using 'ctest -R regex'. +# For more options, run 'ctest --help'. + +option(gmock_build_tests "Build all of Google Mock's own tests." OFF) + +# A directory to find Google Test sources. +if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gtest/CMakeLists.txt") + set(gtest_dir gtest) +else() + set(gtest_dir ../googletest) +endif() + +# Defines pre_project_set_up_hermetic_build() and set_up_hermetic_build(). +include("${gtest_dir}/cmake/hermetic_build.cmake" OPTIONAL) + +if (COMMAND pre_project_set_up_hermetic_build) + # Google Test also calls hermetic setup functions from add_subdirectory, + # although its changes will not affect things at the current scope. + pre_project_set_up_hermetic_build() +endif() + +######################################################################## +# +# Project-wide settings + +# Name of the project. +# +# CMake files in this project can refer to the root source directory +# as ${gmock_SOURCE_DIR} and to the root binary directory as +# ${gmock_BINARY_DIR}. +# Language "C" is required for find_package(Threads). +cmake_minimum_required(VERSION 3.13) +project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) + +if (COMMAND set_up_hermetic_build) + set_up_hermetic_build() +endif() + +# Instructs CMake to process Google Test's CMakeLists.txt and add its +# targets to the current scope. We are placing Google Test's binary +# directory in a subdirectory of our own as VC compilation may break +# if they are the same (the default). +add_subdirectory("${gtest_dir}" "${gmock_BINARY_DIR}/${gtest_dir}") + + +# These commands only run if this is the main project +if(CMAKE_PROJECT_NAME STREQUAL "gmock" OR CMAKE_PROJECT_NAME STREQUAL "googletest-distribution") + # BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to + # make it prominent in the GUI. + option(BUILD_SHARED_LIBS "Build shared libraries (DLLs)." OFF) +else() + mark_as_advanced(gmock_build_tests) +endif() + +# Although Google Test's CMakeLists.txt calls this function, the +# changes there don't affect the current scope. Therefore we have to +# call it again here. +config_compiler_and_linker() # from ${gtest_dir}/cmake/internal_utils.cmake + +# Adds Google Mock's and Google Test's header directories to the search path. +# Get Google Test's include dirs from the target, gtest_SOURCE_DIR is broken +# when using fetch-content with the name "GTest". +get_target_property(gtest_include_dirs gtest INCLUDE_DIRECTORIES) +set(gmock_build_include_dirs + "${gmock_SOURCE_DIR}/include" + "${gmock_SOURCE_DIR}" + "${gtest_include_dirs}") +include_directories(${gmock_build_include_dirs}) + +######################################################################## +# +# Defines the gmock & gmock_main libraries. User tests should link +# with one of them. + +# Google Mock libraries. We build them using more strict warnings than what +# are used for other targets, to ensure that Google Mock can be compiled by +# a user aggressive about warnings. +if (MSVC) + cxx_library(gmock + "${cxx_strict}" + "${gtest_dir}/src/gtest-all.cc" + src/gmock-all.cc) + + cxx_library(gmock_main + "${cxx_strict}" + "${gtest_dir}/src/gtest-all.cc" + src/gmock-all.cc + src/gmock_main.cc) +else() + cxx_library(gmock "${cxx_strict}" src/gmock-all.cc) + target_link_libraries(gmock PUBLIC gtest) + set_target_properties(gmock PROPERTIES VERSION ${GOOGLETEST_VERSION}) + cxx_library(gmock_main "${cxx_strict}" src/gmock_main.cc) + target_link_libraries(gmock_main PUBLIC gmock) + set_target_properties(gmock_main PROPERTIES VERSION ${GOOGLETEST_VERSION}) +endif() + +string(REPLACE ";" "$" dirs "${gmock_build_include_dirs}") +target_include_directories(gmock SYSTEM INTERFACE + "$" + "$/${CMAKE_INSTALL_INCLUDEDIR}>") +target_include_directories(gmock_main SYSTEM INTERFACE + "$" + "$/${CMAKE_INSTALL_INCLUDEDIR}>") + +######################################################################## +# +# Install rules. +install_project(gmock gmock_main) + +######################################################################## +# +# Google Mock's own tests. +# +# You can skip this section if you aren't interested in testing +# Google Mock itself. +# +# The tests are not built by default. To build them, set the +# gmock_build_tests option to ON. You can do it by running ccmake +# or specifying the -Dgmock_build_tests=ON flag when running cmake. + +if (gmock_build_tests) + # This must be set in the root directory for the tests to be run by + # 'make test' or ctest. + enable_testing() + + if (MINGW OR CYGWIN) + add_compile_options("-Wa,-mbig-obj") + endif() + + ############################################################ + # C++ tests built with standard compiler flags. + + cxx_test(gmock-actions_test gmock_main) + cxx_test(gmock-cardinalities_test gmock_main) + cxx_test(gmock_ex_test gmock_main) + cxx_test(gmock-function-mocker_test gmock_main) + cxx_test(gmock-internal-utils_test gmock_main) + cxx_test(gmock-matchers-arithmetic_test gmock_main) + cxx_test(gmock-matchers-comparisons_test gmock_main) + cxx_test(gmock-matchers-containers_test gmock_main) + cxx_test(gmock-matchers-misc_test gmock_main) + cxx_test(gmock-more-actions_test gmock_main) + cxx_test(gmock-nice-strict_test gmock_main) + cxx_test(gmock-port_test gmock_main) + cxx_test(gmock-spec-builders_test gmock_main) + cxx_test(gmock_link_test gmock_main test/gmock_link2_test.cc) + cxx_test(gmock_test gmock_main) + + if (DEFINED GTEST_HAS_PTHREAD) + cxx_test(gmock_stress_test gmock) + endif() + + # gmock_all_test is commented to save time building and running tests. + # Uncomment if necessary. + # cxx_test(gmock_all_test gmock_main) + + ############################################################ + # C++ tests built with non-standard compiler flags. + + if (MSVC) + cxx_library(gmock_main_no_exception "${cxx_no_exception}" + "${gtest_dir}/src/gtest-all.cc" src/gmock-all.cc src/gmock_main.cc) + + cxx_library(gmock_main_no_rtti "${cxx_no_rtti}" + "${gtest_dir}/src/gtest-all.cc" src/gmock-all.cc src/gmock_main.cc) + + else() + cxx_library(gmock_main_no_exception "${cxx_no_exception}" src/gmock_main.cc) + target_link_libraries(gmock_main_no_exception PUBLIC gmock) + + cxx_library(gmock_main_no_rtti "${cxx_no_rtti}" src/gmock_main.cc) + target_link_libraries(gmock_main_no_rtti PUBLIC gmock) + endif() + cxx_test_with_flags(gmock-more-actions_no_exception_test "${cxx_no_exception}" + gmock_main_no_exception test/gmock-more-actions_test.cc) + + cxx_test_with_flags(gmock_no_rtti_test "${cxx_no_rtti}" + gmock_main_no_rtti test/gmock-spec-builders_test.cc) + + cxx_shared_library(shared_gmock_main "${cxx_default}" + "${gtest_dir}/src/gtest-all.cc" src/gmock-all.cc src/gmock_main.cc) + + # Tests that a binary can be built with Google Mock as a shared library. On + # some system configurations, it may not possible to run the binary without + # knowing more details about the system configurations. We do not try to run + # this binary. To get a more robust shared library coverage, configure with + # -DBUILD_SHARED_LIBS=ON. + cxx_executable_with_flags(shared_gmock_test_ "${cxx_default}" + shared_gmock_main test/gmock-spec-builders_test.cc) + set_target_properties(shared_gmock_test_ + PROPERTIES + COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + + ############################################################ + # Python tests. + + cxx_executable(gmock_leak_test_ test gmock_main) + py_test(gmock_leak_test) + + cxx_executable(gmock_output_test_ test gmock) + py_test(gmock_output_test) +endif() diff --git a/vendor/googletest/googlemock/cmake/gmock.pc.in b/vendor/googletest/googlemock/cmake/gmock.pc.in new file mode 100644 index 000000000..23c67b5c8 --- /dev/null +++ b/vendor/googletest/googlemock/cmake/gmock.pc.in @@ -0,0 +1,10 @@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ + +Name: gmock +Description: GoogleMock (without main() function) +Version: @PROJECT_VERSION@ +URL: https://github.com/google/googletest +Requires: gtest = @PROJECT_VERSION@ +Libs: -L${libdir} -lgmock @CMAKE_THREAD_LIBS_INIT@ +Cflags: -I${includedir} @GTEST_HAS_PTHREAD_MACRO@ diff --git a/vendor/googletest/googlemock/cmake/gmock_main.pc.in b/vendor/googletest/googlemock/cmake/gmock_main.pc.in new file mode 100644 index 000000000..66ffea7f4 --- /dev/null +++ b/vendor/googletest/googlemock/cmake/gmock_main.pc.in @@ -0,0 +1,10 @@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ + +Name: gmock_main +Description: GoogleMock (with main() function) +Version: @PROJECT_VERSION@ +URL: https://github.com/google/googletest +Requires: gmock = @PROJECT_VERSION@ +Libs: -L${libdir} -lgmock_main @CMAKE_THREAD_LIBS_INIT@ +Cflags: -I${includedir} @GTEST_HAS_PTHREAD_MACRO@ diff --git a/vendor/googletest/googlemock/include/gmock/gmock-actions.h b/vendor/googletest/googlemock/include/gmock/gmock-actions.h new file mode 100644 index 000000000..cd1299695 --- /dev/null +++ b/vendor/googletest/googlemock/include/gmock/gmock-actions.h @@ -0,0 +1,2321 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// The ACTION* family of macros can be used in a namespace scope to +// define custom actions easily. The syntax: +// +// ACTION(name) { statements; } +// +// will define an action with the given name that executes the +// statements. The value returned by the statements will be used as +// the return value of the action. Inside the statements, you can +// refer to the K-th (0-based) argument of the mock function by +// 'argK', and refer to its type by 'argK_type'. For example: +// +// ACTION(IncrementArg1) { +// arg1_type temp = arg1; +// return ++(*temp); +// } +// +// allows you to write +// +// ...WillOnce(IncrementArg1()); +// +// You can also refer to the entire argument tuple and its type by +// 'args' and 'args_type', and refer to the mock function type and its +// return type by 'function_type' and 'return_type'. +// +// Note that you don't need to specify the types of the mock function +// arguments. However rest assured that your code is still type-safe: +// you'll get a compiler error if *arg1 doesn't support the ++ +// operator, or if the type of ++(*arg1) isn't compatible with the +// mock function's return type, for example. +// +// Sometimes you'll want to parameterize the action. For that you can use +// another macro: +// +// ACTION_P(name, param_name) { statements; } +// +// For example: +// +// ACTION_P(Add, n) { return arg0 + n; } +// +// will allow you to write: +// +// ...WillOnce(Add(5)); +// +// Note that you don't need to provide the type of the parameter +// either. If you need to reference the type of a parameter named +// 'foo', you can write 'foo_type'. For example, in the body of +// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type +// of 'n'. +// +// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P10 to support +// multi-parameter actions. +// +// For the purpose of typing, you can view +// +// ACTION_Pk(Foo, p1, ..., pk) { ... } +// +// as shorthand for +// +// template +// FooActionPk Foo(p1_type p1, ..., pk_type pk) { ... } +// +// In particular, you can provide the template type arguments +// explicitly when invoking Foo(), as in Foo(5, false); +// although usually you can rely on the compiler to infer the types +// for you automatically. You can assign the result of expression +// Foo(p1, ..., pk) to a variable of type FooActionPk. This can be useful when composing actions. +// +// You can also overload actions with different numbers of parameters: +// +// ACTION_P(Plus, a) { ... } +// ACTION_P2(Plus, a, b) { ... } +// +// While it's tempting to always use the ACTION* macros when defining +// a new action, you should also consider implementing ActionInterface +// or using MakePolymorphicAction() instead, especially if you need to +// use the action a lot. While these approaches require more work, +// they give you more control on the types of the mock function +// arguments and the action parameters, which in general leads to +// better compiler error messages that pay off in the long run. They +// also allow overloading actions based on parameter types (as opposed +// to just based on the number of parameters). +// +// CAVEAT: +// +// ACTION*() can only be used in a namespace scope as templates cannot be +// declared inside of a local class. +// Users can, however, define any local functors (e.g. a lambda) that +// can be used as actions. +// +// MORE INFORMATION: +// +// To learn more about using these macros, please search for 'ACTION' on +// https://github.com/google/googletest/blob/main/docs/gmock_cook_book.md + +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* + +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ + +#ifndef _WIN32_WCE +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/internal/gmock-internal-utils.h" +#include "gmock/internal/gmock-port.h" +#include "gmock/internal/gmock-pp.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4100) + +namespace testing { + +// To implement an action Foo, define: +// 1. a class FooAction that implements the ActionInterface interface, and +// 2. a factory function that creates an Action object from a +// const FooAction*. +// +// The two-level delegation design follows that of Matcher, providing +// consistency for extension developers. It also eases ownership +// management as Action objects can now be copied like plain values. + +namespace internal { + +// BuiltInDefaultValueGetter::Get() returns a +// default-constructed T value. BuiltInDefaultValueGetter::Get() crashes with an error. +// +// This primary template is used when kDefaultConstructible is true. +template +struct BuiltInDefaultValueGetter { + static T Get() { return T(); } +}; +template +struct BuiltInDefaultValueGetter { + static T Get() { + Assert(false, __FILE__, __LINE__, + "Default action undefined for the function return type."); +#if defined(__GNUC__) || defined(__clang__) + __builtin_unreachable(); +#elif defined(_MSC_VER) + __assume(0); +#else + return Invalid(); + // The above statement will never be reached, but is required in + // order for this function to compile. +#endif + } +}; + +// BuiltInDefaultValue::Get() returns the "built-in" default value +// for type T, which is NULL when T is a raw pointer type, 0 when T is +// a numeric type, false when T is bool, or "" when T is string or +// std::string. In addition, in C++11 and above, it turns a +// default-constructed T value if T is default constructible. For any +// other type T, the built-in default T value is undefined, and the +// function will abort the process. +template +class BuiltInDefaultValue { + public: + // This function returns true if and only if type T has a built-in default + // value. + static bool Exists() { return ::std::is_default_constructible::value; } + + static T Get() { + return BuiltInDefaultValueGetter< + T, ::std::is_default_constructible::value>::Get(); + } +}; + +// This partial specialization says that we use the same built-in +// default value for T and const T. +template +class BuiltInDefaultValue { + public: + static bool Exists() { return BuiltInDefaultValue::Exists(); } + static T Get() { return BuiltInDefaultValue::Get(); } +}; + +// This partial specialization defines the default values for pointer +// types. +template +class BuiltInDefaultValue { + public: + static bool Exists() { return true; } + static T* Get() { return nullptr; } +}; + +// The following specializations define the default values for +// specific types we care about. +#define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \ + template <> \ + class BuiltInDefaultValue { \ + public: \ + static bool Exists() { return true; } \ + static type Get() { return value; } \ + } + +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, ); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::std::string, ""); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(bool, false); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned char, '\0'); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed char, '\0'); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(char, '\0'); + +// There's no need for a default action for signed wchar_t, as that +// type is the same as wchar_t for gcc, and invalid for MSVC. +// +// There's also no need for a default action for unsigned wchar_t, as +// that type is the same as unsigned int for gcc, and invalid for +// MSVC. +#if GMOCK_WCHAR_T_IS_NATIVE_ +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(wchar_t, 0U); // NOLINT +#endif + +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long long, 0); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long long, 0); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0); + +#undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_ + +// Partial implementations of metaprogramming types from the standard library +// not available in C++11. + +template +struct negation + // NOLINTNEXTLINE + : std::integral_constant {}; + +// Base case: with zero predicates the answer is always true. +template +struct conjunction : std::true_type {}; + +// With a single predicate, the answer is that predicate. +template +struct conjunction : P1 {}; + +// With multiple predicates the answer is the first predicate if that is false, +// and we recurse otherwise. +template +struct conjunction + : std::conditional, P1>::type {}; + +template +struct disjunction : std::false_type {}; + +template +struct disjunction : P1 {}; + +template +struct disjunction + // NOLINTNEXTLINE + : std::conditional, P1>::type {}; + +template +using void_t = void; + +// Detects whether an expression of type `From` can be implicitly converted to +// `To` according to [conv]. In C++17, [conv]/3 defines this as follows: +// +// An expression e can be implicitly converted to a type T if and only if +// the declaration T t=e; is well-formed, for some invented temporary +// variable t ([dcl.init]). +// +// [conv]/2 implies we can use function argument passing to detect whether this +// initialization is valid. +// +// Note that this is distinct from is_convertible, which requires this be valid: +// +// To test() { +// return declval(); +// } +// +// In particular, is_convertible doesn't give the correct answer when `To` and +// `From` are the same non-moveable type since `declval` will be an rvalue +// reference, defeating the guaranteed copy elision that would otherwise make +// this function work. +// +// REQUIRES: `From` is not cv void. +template +struct is_implicitly_convertible { + private: + // A function that accepts a parameter of type T. This can be called with type + // U successfully only if U is implicitly convertible to T. + template + static void Accept(T); + + // A function that creates a value of type T. + template + static T Make(); + + // An overload be selected when implicit conversion from T to To is possible. + template (Make()))> + static std::true_type TestImplicitConversion(int); + + // A fallback overload selected in all other cases. + template + static std::false_type TestImplicitConversion(...); + + public: + using type = decltype(TestImplicitConversion(0)); + static constexpr bool value = type::value; +}; + +// Like std::invoke_result_t from C++17, but works only for objects with call +// operators (not e.g. member function pointers, which we don't need specific +// support for in OnceAction because std::function deals with them). +template +using call_result_t = decltype(std::declval()(std::declval()...)); + +template +struct is_callable_r_impl : std::false_type {}; + +// Specialize the struct for those template arguments where call_result_t is +// well-formed. When it's not, the generic template above is chosen, resulting +// in std::false_type. +template +struct is_callable_r_impl>, R, F, Args...> + : std::conditional< + std::is_void::value, // + std::true_type, // + is_implicitly_convertible, R>>::type {}; + +// Like std::is_invocable_r from C++17, but works only for objects with call +// operators. See the note on call_result_t. +template +using is_callable_r = is_callable_r_impl; + +// Like std::as_const from C++17. +template +typename std::add_const::type& as_const(T& t) { + return t; +} + +} // namespace internal + +// Specialized for function types below. +template +class OnceAction; + +// An action that can only be used once. +// +// This is accepted by WillOnce, which doesn't require the underlying action to +// be copy-constructible (only move-constructible), and promises to invoke it as +// an rvalue reference. This allows the action to work with move-only types like +// std::move_only_function in a type-safe manner. +// +// For example: +// +// // Assume we have some API that needs to accept a unique pointer to some +// // non-copyable object Foo. +// void AcceptUniquePointer(std::unique_ptr foo); +// +// // We can define an action that provides a Foo to that API. Because It +// // has to give away its unique pointer, it must not be called more than +// // once, so its call operator is &&-qualified. +// struct ProvideFoo { +// std::unique_ptr foo; +// +// void operator()() && { +// AcceptUniquePointer(std::move(Foo)); +// } +// }; +// +// // This action can be used with WillOnce. +// EXPECT_CALL(mock, Call) +// .WillOnce(ProvideFoo{std::make_unique(...)}); +// +// // But a call to WillRepeatedly will fail to compile. This is correct, +// // since the action cannot correctly be used repeatedly. +// EXPECT_CALL(mock, Call) +// .WillRepeatedly(ProvideFoo{std::make_unique(...)}); +// +// A less-contrived example would be an action that returns an arbitrary type, +// whose &&-qualified call operator is capable of dealing with move-only types. +template +class OnceAction final { + private: + // True iff we can use the given callable type (or lvalue reference) directly + // via StdFunctionAdaptor. + template + using IsDirectlyCompatible = internal::conjunction< + // It must be possible to capture the callable in StdFunctionAdaptor. + std::is_constructible::type, Callable>, + // The callable must be compatible with our signature. + internal::is_callable_r::type, + Args...>>; + + // True iff we can use the given callable type via StdFunctionAdaptor once we + // ignore incoming arguments. + template + using IsCompatibleAfterIgnoringArguments = internal::conjunction< + // It must be possible to capture the callable in a lambda. + std::is_constructible::type, Callable>, + // The callable must be invocable with zero arguments, returning something + // convertible to Result. + internal::is_callable_r::type>>; + + public: + // Construct from a callable that is directly compatible with our mocked + // signature: it accepts our function type's arguments and returns something + // convertible to our result type. + template ::type>>, + IsDirectlyCompatible> // + ::value, + int>::type = 0> + OnceAction(Callable&& callable) // NOLINT + : function_(StdFunctionAdaptor::type>( + {}, std::forward(callable))) {} + + // As above, but for a callable that ignores the mocked function's arguments. + template ::type>>, + // Exclude callables for which the overload above works. + // We'd rather provide the arguments if possible. + internal::negation>, + IsCompatibleAfterIgnoringArguments>::value, + int>::type = 0> + OnceAction(Callable&& callable) // NOLINT + // Call the constructor above with a callable + // that ignores the input arguments. + : OnceAction(IgnoreIncomingArguments::type>{ + std::forward(callable)}) {} + + // We are naturally copyable because we store only an std::function, but + // semantically we should not be copyable. + OnceAction(const OnceAction&) = delete; + OnceAction& operator=(const OnceAction&) = delete; + OnceAction(OnceAction&&) = default; + + // Invoke the underlying action callable with which we were constructed, + // handing it the supplied arguments. + Result Call(Args... args) && { + return function_(std::forward(args)...); + } + + private: + // An adaptor that wraps a callable that is compatible with our signature and + // being invoked as an rvalue reference so that it can be used as an + // StdFunctionAdaptor. This throws away type safety, but that's fine because + // this is only used by WillOnce, which we know calls at most once. + // + // Once we have something like std::move_only_function from C++23, we can do + // away with this. + template + class StdFunctionAdaptor final { + public: + // A tag indicating that the (otherwise universal) constructor is accepting + // the callable itself, instead of e.g. stealing calls for the move + // constructor. + struct CallableTag final {}; + + template + explicit StdFunctionAdaptor(CallableTag, F&& callable) + : callable_(std::make_shared(std::forward(callable))) {} + + // Rather than explicitly returning Result, we return whatever the wrapped + // callable returns. This allows for compatibility with existing uses like + // the following, when the mocked function returns void: + // + // EXPECT_CALL(mock_fn_, Call) + // .WillOnce([&] { + // [...] + // return 0; + // }); + // + // Such a callable can be turned into std::function. If we use an + // explicit return type of Result here then it *doesn't* work with + // std::function, because we'll get a "void function should not return a + // value" error. + // + // We need not worry about incompatible result types because the SFINAE on + // OnceAction already checks this for us. std::is_invocable_r_v itself makes + // the same allowance for void result types. + template + internal::call_result_t operator()( + ArgRefs&&... args) const { + return std::move(*callable_)(std::forward(args)...); + } + + private: + // We must put the callable on the heap so that we are copyable, which + // std::function needs. + std::shared_ptr callable_; + }; + + // An adaptor that makes a callable that accepts zero arguments callable with + // our mocked arguments. + template + struct IgnoreIncomingArguments { + internal::call_result_t operator()(Args&&...) { + return std::move(callable)(); + } + + Callable callable; + }; + + std::function function_; +}; + +// When an unexpected function call is encountered, Google Mock will +// let it return a default value if the user has specified one for its +// return type, or if the return type has a built-in default value; +// otherwise Google Mock won't know what value to return and will have +// to abort the process. +// +// The DefaultValue class allows a user to specify the +// default value for a type T that is both copyable and publicly +// destructible (i.e. anything that can be used as a function return +// type). The usage is: +// +// // Sets the default value for type T to be foo. +// DefaultValue::Set(foo); +template +class DefaultValue { + public: + // Sets the default value for type T; requires T to be + // copy-constructable and have a public destructor. + static void Set(T x) { + delete producer_; + producer_ = new FixedValueProducer(x); + } + + // Provides a factory function to be called to generate the default value. + // This method can be used even if T is only move-constructible, but it is not + // limited to that case. + typedef T (*FactoryFunction)(); + static void SetFactory(FactoryFunction factory) { + delete producer_; + producer_ = new FactoryValueProducer(factory); + } + + // Unsets the default value for type T. + static void Clear() { + delete producer_; + producer_ = nullptr; + } + + // Returns true if and only if the user has set the default value for type T. + static bool IsSet() { return producer_ != nullptr; } + + // Returns true if T has a default return value set by the user or there + // exists a built-in default value. + static bool Exists() { + return IsSet() || internal::BuiltInDefaultValue::Exists(); + } + + // Returns the default value for type T if the user has set one; + // otherwise returns the built-in default value. Requires that Exists() + // is true, which ensures that the return value is well-defined. + static T Get() { + return producer_ == nullptr ? internal::BuiltInDefaultValue::Get() + : producer_->Produce(); + } + + private: + class ValueProducer { + public: + virtual ~ValueProducer() = default; + virtual T Produce() = 0; + }; + + class FixedValueProducer : public ValueProducer { + public: + explicit FixedValueProducer(T value) : value_(value) {} + T Produce() override { return value_; } + + private: + const T value_; + FixedValueProducer(const FixedValueProducer&) = delete; + FixedValueProducer& operator=(const FixedValueProducer&) = delete; + }; + + class FactoryValueProducer : public ValueProducer { + public: + explicit FactoryValueProducer(FactoryFunction factory) + : factory_(factory) {} + T Produce() override { return factory_(); } + + private: + const FactoryFunction factory_; + FactoryValueProducer(const FactoryValueProducer&) = delete; + FactoryValueProducer& operator=(const FactoryValueProducer&) = delete; + }; + + static ValueProducer* producer_; +}; + +// This partial specialization allows a user to set default values for +// reference types. +template +class DefaultValue { + public: + // Sets the default value for type T&. + static void Set(T& x) { // NOLINT + address_ = &x; + } + + // Unsets the default value for type T&. + static void Clear() { address_ = nullptr; } + + // Returns true if and only if the user has set the default value for type T&. + static bool IsSet() { return address_ != nullptr; } + + // Returns true if T has a default return value set by the user or there + // exists a built-in default value. + static bool Exists() { + return IsSet() || internal::BuiltInDefaultValue::Exists(); + } + + // Returns the default value for type T& if the user has set one; + // otherwise returns the built-in default value if there is one; + // otherwise aborts the process. + static T& Get() { + return address_ == nullptr ? internal::BuiltInDefaultValue::Get() + : *address_; + } + + private: + static T* address_; +}; + +// This specialization allows DefaultValue::Get() to +// compile. +template <> +class DefaultValue { + public: + static bool Exists() { return true; } + static void Get() {} +}; + +// Points to the user-set default value for type T. +template +typename DefaultValue::ValueProducer* DefaultValue::producer_ = nullptr; + +// Points to the user-set default value for type T&. +template +T* DefaultValue::address_ = nullptr; + +// Implement this interface to define an action for function type F. +template +class ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + ActionInterface() = default; + virtual ~ActionInterface() = default; + + // Performs the action. This method is not const, as in general an + // action can have side effects and be stateful. For example, a + // get-the-next-element-from-the-collection action will need to + // remember the current element. + virtual Result Perform(const ArgumentTuple& args) = 0; + + private: + ActionInterface(const ActionInterface&) = delete; + ActionInterface& operator=(const ActionInterface&) = delete; +}; + +template +class Action; + +// An Action is a copyable and IMMUTABLE (except by assignment) +// object that represents an action to be taken when a mock function of type +// R(Args...) is called. The implementation of Action is just a +// std::shared_ptr to const ActionInterface. Don't inherit from Action! You +// can view an object implementing ActionInterface as a concrete action +// (including its current state), and an Action object as a handle to it. +template +class Action { + private: + using F = R(Args...); + + // Adapter class to allow constructing Action from a legacy ActionInterface. + // New code should create Actions from functors instead. + struct ActionAdapter { + // Adapter must be copyable to satisfy std::function requirements. + ::std::shared_ptr> impl_; + + template + typename internal::Function::Result operator()(InArgs&&... args) { + return impl_->Perform( + ::std::forward_as_tuple(::std::forward(args)...)); + } + }; + + template + using IsCompatibleFunctor = std::is_constructible, G>; + + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + // Constructs a null Action. Needed for storing Action objects in + // STL containers. + Action() = default; + + // Construct an Action from a specified callable. + // This cannot take std::function directly, because then Action would not be + // directly constructible from lambda (it would require two conversions). + template < + typename G, + typename = typename std::enable_if, std::is_constructible, + G>>::value>::type> + Action(G&& fun) { // NOLINT + Init(::std::forward(fun), IsCompatibleFunctor()); + } + + // Constructs an Action from its implementation. + explicit Action(ActionInterface* impl) + : fun_(ActionAdapter{::std::shared_ptr>(impl)}) {} + + // This constructor allows us to turn an Action object into an + // Action, as long as F's arguments can be implicitly converted + // to Func's and Func's return type can be implicitly converted to F's. + template + Action(const Action& action) // NOLINT + : fun_(action.fun_) {} + + // Returns true if and only if this is the DoDefault() action. + bool IsDoDefault() const { return fun_ == nullptr; } + + // Performs the action. Note that this method is const even though + // the corresponding method in ActionInterface is not. The reason + // is that a const Action means that it cannot be re-bound to + // another concrete action, not that the concrete action it binds to + // cannot change state. (Think of the difference between a const + // pointer and a pointer to const.) + Result Perform(ArgumentTuple args) const { + if (IsDoDefault()) { + internal::IllegalDoDefault(__FILE__, __LINE__); + } + return internal::Apply(fun_, ::std::move(args)); + } + + // An action can be used as a OnceAction, since it's obviously safe to call it + // once. + operator OnceAction() const { // NOLINT + // Return a OnceAction-compatible callable that calls Perform with the + // arguments it is provided. We could instead just return fun_, but then + // we'd need to handle the IsDoDefault() case separately. + struct OA { + Action action; + + R operator()(Args... args) && { + return action.Perform( + std::forward_as_tuple(std::forward(args)...)); + } + }; + + return OA{*this}; + } + + private: + template + friend class Action; + + template + void Init(G&& g, ::std::true_type) { + fun_ = ::std::forward(g); + } + + template + void Init(G&& g, ::std::false_type) { + fun_ = IgnoreArgs::type>{::std::forward(g)}; + } + + template + struct IgnoreArgs { + template + Result operator()(const InArgs&...) const { + return function_impl(); + } + + FunctionImpl function_impl; + }; + + // fun_ is an empty function if and only if this is the DoDefault() action. + ::std::function fun_; +}; + +// The PolymorphicAction class template makes it easy to implement a +// polymorphic action (i.e. an action that can be used in mock +// functions of than one type, e.g. Return()). +// +// To define a polymorphic action, a user first provides a COPYABLE +// implementation class that has a Perform() method template: +// +// class FooAction { +// public: +// template +// Result Perform(const ArgumentTuple& args) const { +// // Processes the arguments and returns a result, using +// // std::get(args) to get the N-th (0-based) argument in the tuple. +// } +// ... +// }; +// +// Then the user creates the polymorphic action using +// MakePolymorphicAction(object) where object has type FooAction. See +// the definition of Return(void) and SetArgumentPointee(value) for +// complete examples. +template +class PolymorphicAction { + public: + explicit PolymorphicAction(const Impl& impl) : impl_(impl) {} + + template + operator Action() const { + return Action(new MonomorphicImpl(impl_)); + } + + private: + template + class MonomorphicImpl : public ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {} + + Result Perform(const ArgumentTuple& args) override { + return impl_.template Perform(args); + } + + private: + Impl impl_; + }; + + Impl impl_; +}; + +// Creates an Action from its implementation and returns it. The +// created Action object owns the implementation. +template +Action MakeAction(ActionInterface* impl) { + return Action(impl); +} + +// Creates a polymorphic action from its implementation. This is +// easier to use than the PolymorphicAction constructor as it +// doesn't require you to explicitly write the template argument, e.g. +// +// MakePolymorphicAction(foo); +// vs +// PolymorphicAction(foo); +template +inline PolymorphicAction MakePolymorphicAction(const Impl& impl) { + return PolymorphicAction(impl); +} + +namespace internal { + +// Helper struct to specialize ReturnAction to execute a move instead of a copy +// on return. Useful for move-only types, but could be used on any type. +template +struct ByMoveWrapper { + explicit ByMoveWrapper(T value) : payload(std::move(value)) {} + T payload; +}; + +// The general implementation of Return(R). Specializations follow below. +template +class ReturnAction final { + public: + explicit ReturnAction(R value) : value_(std::move(value)) {} + + template >, // + negation>, // + std::is_convertible, // + std::is_move_constructible>::value>::type> + operator OnceAction() && { // NOLINT + return Impl(std::move(value_)); + } + + template >, // + negation>, // + std::is_convertible, // + std::is_copy_constructible>::value>::type> + operator Action() const { // NOLINT + return Impl(value_); + } + + private: + // Implements the Return(x) action for a mock function that returns type U. + template + class Impl final { + public: + // The constructor used when the return value is allowed to move from the + // input value (i.e. we are converting to OnceAction). + explicit Impl(R&& input_value) + : state_(new State(std::move(input_value))) {} + + // The constructor used when the return value is not allowed to move from + // the input value (i.e. we are converting to Action). + explicit Impl(const R& input_value) : state_(new State(input_value)) {} + + U operator()() && { return std::move(state_->value); } + U operator()() const& { return state_->value; } + + private: + // We put our state on the heap so that the compiler-generated copy/move + // constructors work correctly even when U is a reference-like type. This is + // necessary only because we eagerly create State::value (see the note on + // that symbol for details). If we instead had only the input value as a + // member then the default constructors would work fine. + // + // For example, when R is std::string and U is std::string_view, value is a + // reference to the string backed by input_value. The copy constructor would + // copy both, so that we wind up with a new input_value object (with the + // same contents) and a reference to the *old* input_value object rather + // than the new one. + struct State { + explicit State(const R& input_value_in) + : input_value(input_value_in), + // Make an implicit conversion to Result before initializing the U + // object we store, avoiding calling any explicit constructor of U + // from R. + // + // This simulates the language rules: a function with return type U + // that does `return R()` requires R to be implicitly convertible to + // U, and uses that path for the conversion, even U Result has an + // explicit constructor from R. + value(ImplicitCast_(internal::as_const(input_value))) {} + + // As above, but for the case where we're moving from the ReturnAction + // object because it's being used as a OnceAction. + explicit State(R&& input_value_in) + : input_value(std::move(input_value_in)), + // For the same reason as above we make an implicit conversion to U + // before initializing the value. + // + // Unlike above we provide the input value as an rvalue to the + // implicit conversion because this is a OnceAction: it's fine if it + // wants to consume the input value. + value(ImplicitCast_(std::move(input_value))) {} + + // A copy of the value originally provided by the user. We retain this in + // addition to the value of the mock function's result type below in case + // the latter is a reference-like type. See the std::string_view example + // in the documentation on Return. + R input_value; + + // The value we actually return, as the type returned by the mock function + // itself. + // + // We eagerly initialize this here, rather than lazily doing the implicit + // conversion automatically each time Perform is called, for historical + // reasons: in 2009-11, commit a070cbd91c (Google changelist 13540126) + // made the Action conversion operator eagerly convert the R value to + // U, but without keeping the R alive. This broke the use case discussed + // in the documentation for Return, making reference-like types such as + // std::string_view not safe to use as U where the input type R is a + // value-like type such as std::string. + // + // The example the commit gave was not very clear, nor was the issue + // thread (https://github.com/google/googlemock/issues/86), but it seems + // the worry was about reference-like input types R that flatten to a + // value-like type U when being implicitly converted. An example of this + // is std::vector::reference, which is often a proxy type with an + // reference to the underlying vector: + // + // // Helper method: have the mock function return bools according + // // to the supplied script. + // void SetActions(MockFunction& mock, + // const std::vector& script) { + // for (size_t i = 0; i < script.size(); ++i) { + // EXPECT_CALL(mock, Call(i)).WillOnce(Return(script[i])); + // } + // } + // + // TEST(Foo, Bar) { + // // Set actions using a temporary vector, whose operator[] + // // returns proxy objects that references that will be + // // dangling once the call to SetActions finishes and the + // // vector is destroyed. + // MockFunction mock; + // SetActions(mock, {false, true}); + // + // EXPECT_FALSE(mock.AsStdFunction()(0)); + // EXPECT_TRUE(mock.AsStdFunction()(1)); + // } + // + // This eager conversion helps with a simple case like this, but doesn't + // fully make these types work in general. For example the following still + // uses a dangling reference: + // + // TEST(Foo, Baz) { + // MockFunction()> mock; + // + // // Return the same vector twice, and then the empty vector + // // thereafter. + // auto action = Return(std::initializer_list{ + // "taco", "burrito", + // }); + // + // EXPECT_CALL(mock, Call) + // .WillOnce(action) + // .WillOnce(action) + // .WillRepeatedly(Return(std::vector{})); + // + // EXPECT_THAT(mock.AsStdFunction()(), + // ElementsAre("taco", "burrito")); + // EXPECT_THAT(mock.AsStdFunction()(), + // ElementsAre("taco", "burrito")); + // EXPECT_THAT(mock.AsStdFunction()(), IsEmpty()); + // } + // + U value; + }; + + const std::shared_ptr state_; + }; + + R value_; +}; + +// A specialization of ReturnAction when R is ByMoveWrapper for some T. +// +// This version applies the type system-defeating hack of moving from T even in +// the const call operator, checking at runtime that it isn't called more than +// once, since the user has declared their intent to do so by using ByMove. +template +class ReturnAction> final { + public: + explicit ReturnAction(ByMoveWrapper wrapper) + : state_(new State(std::move(wrapper.payload))) {} + + T operator()() const { + GTEST_CHECK_(!state_->called) + << "A ByMove() action must be performed at most once."; + + state_->called = true; + return std::move(state_->value); + } + + private: + // We store our state on the heap so that we are copyable as required by + // Action, despite the fact that we are stateful and T may not be copyable. + struct State { + explicit State(T&& value_in) : value(std::move(value_in)) {} + + T value; + bool called = false; + }; + + const std::shared_ptr state_; +}; + +// Implements the ReturnNull() action. +class ReturnNullAction { + public: + // Allows ReturnNull() to be used in any pointer-returning function. In C++11 + // this is enforced by returning nullptr, and in non-C++11 by asserting a + // pointer type on compile time. + template + static Result Perform(const ArgumentTuple&) { + return nullptr; + } +}; + +// Implements the Return() action. +class ReturnVoidAction { + public: + // Allows Return() to be used in any void-returning function. + template + static void Perform(const ArgumentTuple&) { + static_assert(std::is_void::value, "Result should be void."); + } +}; + +// Implements the polymorphic ReturnRef(x) action, which can be used +// in any function that returns a reference to the type of x, +// regardless of the argument types. +template +class ReturnRefAction { + public: + // Constructs a ReturnRefAction object from the reference to be returned. + explicit ReturnRefAction(T& ref) : ref_(ref) {} // NOLINT + + // This template type conversion operator allows ReturnRef(x) to be + // used in ANY function that returns a reference to x's type. + template + operator Action() const { + typedef typename Function::Result Result; + // Asserts that the function return type is a reference. This + // catches the user error of using ReturnRef(x) when Return(x) + // should be used, and generates some helpful error message. + static_assert(std::is_reference::value, + "use Return instead of ReturnRef to return a value"); + return Action(new Impl(ref_)); + } + + private: + // Implements the ReturnRef(x) action for a particular function type F. + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + explicit Impl(T& ref) : ref_(ref) {} // NOLINT + + Result Perform(const ArgumentTuple&) override { return ref_; } + + private: + T& ref_; + }; + + T& ref_; +}; + +// Implements the polymorphic ReturnRefOfCopy(x) action, which can be +// used in any function that returns a reference to the type of x, +// regardless of the argument types. +template +class ReturnRefOfCopyAction { + public: + // Constructs a ReturnRefOfCopyAction object from the reference to + // be returned. + explicit ReturnRefOfCopyAction(const T& value) : value_(value) {} // NOLINT + + // This template type conversion operator allows ReturnRefOfCopy(x) to be + // used in ANY function that returns a reference to x's type. + template + operator Action() const { + typedef typename Function::Result Result; + // Asserts that the function return type is a reference. This + // catches the user error of using ReturnRefOfCopy(x) when Return(x) + // should be used, and generates some helpful error message. + static_assert(std::is_reference::value, + "use Return instead of ReturnRefOfCopy to return a value"); + return Action(new Impl(value_)); + } + + private: + // Implements the ReturnRefOfCopy(x) action for a particular function type F. + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + explicit Impl(const T& value) : value_(value) {} // NOLINT + + Result Perform(const ArgumentTuple&) override { return value_; } + + private: + T value_; + }; + + const T value_; +}; + +// Implements the polymorphic ReturnRoundRobin(v) action, which can be +// used in any function that returns the element_type of v. +template +class ReturnRoundRobinAction { + public: + explicit ReturnRoundRobinAction(std::vector values) { + GTEST_CHECK_(!values.empty()) + << "ReturnRoundRobin requires at least one element."; + state_->values = std::move(values); + } + + template + T operator()(Args&&...) const { + return state_->Next(); + } + + private: + struct State { + T Next() { + T ret_val = values[i++]; + if (i == values.size()) i = 0; + return ret_val; + } + + std::vector values; + size_t i = 0; + }; + std::shared_ptr state_ = std::make_shared(); +}; + +// Implements the polymorphic DoDefault() action. +class DoDefaultAction { + public: + // This template type conversion operator allows DoDefault() to be + // used in any function. + template + operator Action() const { + return Action(); + } // NOLINT +}; + +// Implements the Assign action to set a given pointer referent to a +// particular value. +template +class AssignAction { + public: + AssignAction(T1* ptr, T2 value) : ptr_(ptr), value_(value) {} + + template + void Perform(const ArgumentTuple& /* args */) const { + *ptr_ = value_; + } + + private: + T1* const ptr_; + const T2 value_; +}; + +#ifndef GTEST_OS_WINDOWS_MOBILE + +// Implements the SetErrnoAndReturn action to simulate return from +// various system calls and libc functions. +template +class SetErrnoAndReturnAction { + public: + SetErrnoAndReturnAction(int errno_value, T result) + : errno_(errno_value), result_(result) {} + template + Result Perform(const ArgumentTuple& /* args */) const { + errno = errno_; + return result_; + } + + private: + const int errno_; + const T result_; +}; + +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Implements the SetArgumentPointee(x) action for any function +// whose N-th argument (0-based) is a pointer to x's type. +template +struct SetArgumentPointeeAction { + A value; + + template + void operator()(const Args&... args) const { + *::std::get(std::tie(args...)) = value; + } +}; + +// Implements the Invoke(object_ptr, &Class::Method) action. +template +struct InvokeMethodAction { + Class* const obj_ptr; + const MethodPtr method_ptr; + + template + auto operator()(Args&&... args) const + -> decltype((obj_ptr->*method_ptr)(std::forward(args)...)) { + return (obj_ptr->*method_ptr)(std::forward(args)...); + } +}; + +// Implements the InvokeWithoutArgs(f) action. The template argument +// FunctionImpl is the implementation type of f, which can be either a +// function pointer or a functor. InvokeWithoutArgs(f) can be used as an +// Action as long as f's type is compatible with F. +template +struct InvokeWithoutArgsAction { + FunctionImpl function_impl; + + // Allows InvokeWithoutArgs(f) to be used as any action whose type is + // compatible with f. + template + auto operator()(const Args&...) -> decltype(function_impl()) { + return function_impl(); + } +}; + +// Implements the InvokeWithoutArgs(object_ptr, &Class::Method) action. +template +struct InvokeMethodWithoutArgsAction { + Class* const obj_ptr; + const MethodPtr method_ptr; + + using ReturnType = + decltype((std::declval()->*std::declval())()); + + template + ReturnType operator()(const Args&...) const { + return (obj_ptr->*method_ptr)(); + } +}; + +// Implements the IgnoreResult(action) action. +template +class IgnoreResultAction { + public: + explicit IgnoreResultAction(const A& action) : action_(action) {} + + template + operator Action() const { + // Assert statement belongs here because this is the best place to verify + // conditions on F. It produces the clearest error messages + // in most compilers. + // Impl really belongs in this scope as a local class but can't + // because MSVC produces duplicate symbols in different translation units + // in this case. Until MS fixes that bug we put Impl into the class scope + // and put the typedef both here (for use in assert statement) and + // in the Impl class. But both definitions must be the same. + typedef typename internal::Function::Result Result; + + // Asserts at compile time that F returns void. + static_assert(std::is_void::value, "Result type should be void."); + + return Action(new Impl(action_)); + } + + private: + template + class Impl : public ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + explicit Impl(const A& action) : action_(action) {} + + void Perform(const ArgumentTuple& args) override { + // Performs the action and ignores its result. + action_.Perform(args); + } + + private: + // Type OriginalFunction is the same as F except that its return + // type is IgnoredValue. + typedef + typename internal::Function::MakeResultIgnoredValue OriginalFunction; + + const Action action_; + }; + + const A action_; +}; + +template +struct WithArgsAction { + InnerAction inner_action; + + // The signature of the function as seen by the inner action, given an out + // action with the given result and argument types. + template + using InnerSignature = + R(typename std::tuple_element>::type...); + + // Rather than a call operator, we must define conversion operators to + // particular action types. This is necessary for embedded actions like + // DoDefault(), which rely on an action conversion operators rather than + // providing a call operator because even with a particular set of arguments + // they don't have a fixed return type. + + template < + typename R, typename... Args, + typename std::enable_if< + std::is_convertible>...)>>::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + struct OA { + OnceAction> inner_action; + + R operator()(Args&&... args) && { + return std::move(inner_action) + .Call(std::get( + std::forward_as_tuple(std::forward(args)...))...); + } + }; + + return OA{std::move(inner_action)}; + } + + template < + typename R, typename... Args, + typename std::enable_if< + std::is_convertible>...)>>::value, + int>::type = 0> + operator Action() const { // NOLINT + Action> converted(inner_action); + + return [converted](Args&&... args) -> R { + return converted.Perform(std::forward_as_tuple( + std::get(std::forward_as_tuple(std::forward(args)...))...)); + }; + } +}; + +template +class DoAllAction; + +// Base case: only a single action. +template +class DoAllAction { + public: + struct UserConstructorTag {}; + + template + explicit DoAllAction(UserConstructorTag, T&& action) + : final_action_(std::forward(action)) {} + + // Rather than a call operator, we must define conversion operators to + // particular action types. This is necessary for embedded actions like + // DoDefault(), which rely on an action conversion operators rather than + // providing a call operator because even with a particular set of arguments + // they don't have a fixed return type. + + template >::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + return std::move(final_action_); + } + + template < + typename R, typename... Args, + typename std::enable_if< + std::is_convertible>::value, + int>::type = 0> + operator Action() const { // NOLINT + return final_action_; + } + + private: + FinalAction final_action_; +}; + +// Recursive case: support N actions by calling the initial action and then +// calling through to the base class containing N-1 actions. +template +class DoAllAction + : private DoAllAction { + private: + using Base = DoAllAction; + + // The type of reference that should be provided to an initial action for a + // mocked function parameter of type T. + // + // There are two quirks here: + // + // * Unlike most forwarding functions, we pass scalars through by value. + // This isn't strictly necessary because an lvalue reference would work + // fine too and be consistent with other non-reference types, but it's + // perhaps less surprising. + // + // For example if the mocked function has signature void(int), then it + // might seem surprising for the user's initial action to need to be + // convertible to Action. This is perhaps less + // surprising for a non-scalar type where there may be a performance + // impact, or it might even be impossible, to pass by value. + // + // * More surprisingly, `const T&` is often not a const reference type. + // By the reference collapsing rules in C++17 [dcl.ref]/6, if T refers to + // U& or U&& for some non-scalar type U, then InitialActionArgType is + // U&. In other words, we may hand over a non-const reference. + // + // So for example, given some non-scalar type Obj we have the following + // mappings: + // + // T InitialActionArgType + // ------- ----------------------- + // Obj const Obj& + // Obj& Obj& + // Obj&& Obj& + // const Obj const Obj& + // const Obj& const Obj& + // const Obj&& const Obj& + // + // In other words, the initial actions get a mutable view of an non-scalar + // argument if and only if the mock function itself accepts a non-const + // reference type. They are never given an rvalue reference to an + // non-scalar type. + // + // This situation makes sense if you imagine use with a matcher that is + // designed to write through a reference. For example, if the caller wants + // to fill in a reference argument and then return a canned value: + // + // EXPECT_CALL(mock, Call) + // .WillOnce(DoAll(SetArgReferee<0>(17), Return(19))); + // + template + using InitialActionArgType = + typename std::conditional::value, T, const T&>::type; + + public: + struct UserConstructorTag {}; + + template + explicit DoAllAction(UserConstructorTag, T&& initial_action, + U&&... other_actions) + : Base({}, std::forward(other_actions)...), + initial_action_(std::forward(initial_action)) {} + + template ...)>>, + std::is_convertible>>::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + // Return an action that first calls the initial action with arguments + // filtered through InitialActionArgType, then forwards arguments directly + // to the base class to deal with the remaining actions. + struct OA { + OnceAction...)> initial_action; + OnceAction remaining_actions; + + R operator()(Args... args) && { + std::move(initial_action) + .Call(static_cast>(args)...); + + return std::move(remaining_actions).Call(std::forward(args)...); + } + }; + + return OA{ + std::move(initial_action_), + std::move(static_cast(*this)), + }; + } + + template < + typename R, typename... Args, + typename std::enable_if< + conjunction< + // Both the initial action and the rest must support conversion to + // Action. + std::is_convertible...)>>, + std::is_convertible>>::value, + int>::type = 0> + operator Action() const { // NOLINT + // Return an action that first calls the initial action with arguments + // filtered through InitialActionArgType, then forwards arguments directly + // to the base class to deal with the remaining actions. + struct OA { + Action...)> initial_action; + Action remaining_actions; + + R operator()(Args... args) const { + initial_action.Perform(std::forward_as_tuple( + static_cast>(args)...)); + + return remaining_actions.Perform( + std::forward_as_tuple(std::forward(args)...)); + } + }; + + return OA{ + initial_action_, + static_cast(*this), + }; + } + + private: + InitialAction initial_action_; +}; + +template +struct ReturnNewAction { + T* operator()() const { + return internal::Apply( + [](const Params&... unpacked_params) { + return new T(unpacked_params...); + }, + params); + } + std::tuple params; +}; + +template +struct ReturnArgAction { + template ::type> + auto operator()(Args&&... args) const -> decltype(std::get( + std::forward_as_tuple(std::forward(args)...))) { + return std::get(std::forward_as_tuple(std::forward(args)...)); + } +}; + +template +struct SaveArgAction { + Ptr pointer; + + template + void operator()(const Args&... args) const { + *pointer = std::get(std::tie(args...)); + } +}; + +template +struct SaveArgPointeeAction { + Ptr pointer; + + template + void operator()(const Args&... args) const { + *pointer = *std::get(std::tie(args...)); + } +}; + +template +struct SetArgRefereeAction { + T value; + + template + void operator()(Args&&... args) const { + using argk_type = + typename ::std::tuple_element>::type; + static_assert(std::is_lvalue_reference::value, + "Argument must be a reference type."); + std::get(std::tie(args...)) = value; + } +}; + +template +struct SetArrayArgumentAction { + I1 first; + I2 last; + + template + void operator()(const Args&... args) const { + auto value = std::get(std::tie(args...)); + for (auto it = first; it != last; ++it, (void)++value) { + *value = *it; + } + } +}; + +template +struct DeleteArgAction { + template + void operator()(const Args&... args) const { + delete std::get(std::tie(args...)); + } +}; + +template +struct ReturnPointeeAction { + Ptr pointer; + template + auto operator()(const Args&...) const -> decltype(*pointer) { + return *pointer; + } +}; + +#if GTEST_HAS_EXCEPTIONS +template +struct ThrowAction { + T exception; + // We use a conversion operator to adapt to any return type. + template + operator Action() const { // NOLINT + T copy = exception; + return [copy](Args...) -> R { throw copy; }; + } +}; +struct RethrowAction { + std::exception_ptr exception; + template + operator Action() const { // NOLINT + return [ex = exception](Args...) -> R { std::rethrow_exception(ex); }; + } +}; +#endif // GTEST_HAS_EXCEPTIONS + +} // namespace internal + +// An Unused object can be implicitly constructed from ANY value. +// This is handy when defining actions that ignore some or all of the +// mock function arguments. For example, given +// +// MOCK_METHOD3(Foo, double(const string& label, double x, double y)); +// MOCK_METHOD3(Bar, double(int index, double x, double y)); +// +// instead of +// +// double DistanceToOriginWithLabel(const string& label, double x, double y) { +// return sqrt(x*x + y*y); +// } +// double DistanceToOriginWithIndex(int index, double x, double y) { +// return sqrt(x*x + y*y); +// } +// ... +// EXPECT_CALL(mock, Foo("abc", _, _)) +// .WillOnce(Invoke(DistanceToOriginWithLabel)); +// EXPECT_CALL(mock, Bar(5, _, _)) +// .WillOnce(Invoke(DistanceToOriginWithIndex)); +// +// you could write +// +// // We can declare any uninteresting argument as Unused. +// double DistanceToOrigin(Unused, double x, double y) { +// return sqrt(x*x + y*y); +// } +// ... +// EXPECT_CALL(mock, Foo("abc", _, _)).WillOnce(Invoke(DistanceToOrigin)); +// EXPECT_CALL(mock, Bar(5, _, _)).WillOnce(Invoke(DistanceToOrigin)); +typedef internal::IgnoredValue Unused; + +// Creates an action that does actions a1, a2, ..., sequentially in +// each invocation. All but the last action will have a readonly view of the +// arguments. +template +internal::DoAllAction::type...> DoAll( + Action&&... action) { + return internal::DoAllAction::type...>( + {}, std::forward(action)...); +} + +// WithArg(an_action) creates an action that passes the k-th +// (0-based) argument of the mock function to an_action and performs +// it. It adapts an action accepting one argument to one that accepts +// multiple arguments. For convenience, we also provide +// WithArgs(an_action) (defined below) as a synonym. +template +internal::WithArgsAction::type, k> WithArg( + InnerAction&& action) { + return {std::forward(action)}; +} + +// WithArgs(an_action) creates an action that passes +// the selected arguments of the mock function to an_action and +// performs it. It serves as an adaptor between actions with +// different argument lists. +template +internal::WithArgsAction::type, k, ks...> +WithArgs(InnerAction&& action) { + return {std::forward(action)}; +} + +// WithoutArgs(inner_action) can be used in a mock function with a +// non-empty argument list to perform inner_action, which takes no +// argument. In other words, it adapts an action accepting no +// argument to one that accepts (and ignores) arguments. +template +internal::WithArgsAction::type> WithoutArgs( + InnerAction&& action) { + return {std::forward(action)}; +} + +// Creates an action that returns a value. +// +// The returned type can be used with a mock function returning a non-void, +// non-reference type U as follows: +// +// * If R is convertible to U and U is move-constructible, then the action can +// be used with WillOnce. +// +// * If const R& is convertible to U and U is copy-constructible, then the +// action can be used with both WillOnce and WillRepeatedly. +// +// The mock expectation contains the R value from which the U return value is +// constructed (a move/copy of the argument to Return). This means that the R +// value will survive at least until the mock object's expectations are cleared +// or the mock object is destroyed, meaning that U can safely be a +// reference-like type such as std::string_view: +// +// // The mock function returns a view of a copy of the string fed to +// // Return. The view is valid even after the action is performed. +// MockFunction mock; +// EXPECT_CALL(mock, Call).WillOnce(Return(std::string("taco"))); +// const std::string_view result = mock.AsStdFunction()(); +// EXPECT_EQ("taco", result); +// +template +internal::ReturnAction Return(R value) { + return internal::ReturnAction(std::move(value)); +} + +// Creates an action that returns NULL. +inline PolymorphicAction ReturnNull() { + return MakePolymorphicAction(internal::ReturnNullAction()); +} + +// Creates an action that returns from a void function. +inline PolymorphicAction Return() { + return MakePolymorphicAction(internal::ReturnVoidAction()); +} + +// Creates an action that returns the reference to a variable. +template +inline internal::ReturnRefAction ReturnRef(R& x) { // NOLINT + return internal::ReturnRefAction(x); +} + +// Prevent using ReturnRef on reference to temporary. +template +internal::ReturnRefAction ReturnRef(R&&) = delete; + +// Creates an action that returns the reference to a copy of the +// argument. The copy is created when the action is constructed and +// lives as long as the action. +template +inline internal::ReturnRefOfCopyAction ReturnRefOfCopy(const R& x) { + return internal::ReturnRefOfCopyAction(x); +} + +// DEPRECATED: use Return(x) directly with WillOnce. +// +// Modifies the parent action (a Return() action) to perform a move of the +// argument instead of a copy. +// Return(ByMove()) actions can only be executed once and will assert this +// invariant. +template +internal::ByMoveWrapper ByMove(R x) { + return internal::ByMoveWrapper(std::move(x)); +} + +// Creates an action that returns an element of `vals`. Calling this action will +// repeatedly return the next value from `vals` until it reaches the end and +// will restart from the beginning. +template +internal::ReturnRoundRobinAction ReturnRoundRobin(std::vector vals) { + return internal::ReturnRoundRobinAction(std::move(vals)); +} + +// Creates an action that returns an element of `vals`. Calling this action will +// repeatedly return the next value from `vals` until it reaches the end and +// will restart from the beginning. +template +internal::ReturnRoundRobinAction ReturnRoundRobin( + std::initializer_list vals) { + return internal::ReturnRoundRobinAction(std::vector(vals)); +} + +// Creates an action that does the default action for the give mock function. +inline internal::DoDefaultAction DoDefault() { + return internal::DoDefaultAction(); +} + +// Creates an action that sets the variable pointed by the N-th +// (0-based) function argument to 'value'. +template +internal::SetArgumentPointeeAction SetArgPointee(T value) { + return {std::move(value)}; +} + +// The following version is DEPRECATED. +template +internal::SetArgumentPointeeAction SetArgumentPointee(T value) { + return {std::move(value)}; +} + +// Creates an action that sets a pointer referent to a given value. +template +PolymorphicAction> Assign(T1* ptr, T2 val) { + return MakePolymorphicAction(internal::AssignAction(ptr, val)); +} + +#ifndef GTEST_OS_WINDOWS_MOBILE + +// Creates an action that sets errno and returns the appropriate error. +template +PolymorphicAction> SetErrnoAndReturn( + int errval, T result) { + return MakePolymorphicAction( + internal::SetErrnoAndReturnAction(errval, result)); +} + +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Various overloads for Invoke(). + +// Legacy function. +// Actions can now be implicitly constructed from callables. No need to create +// wrapper objects. +// This function exists for backwards compatibility. +template +typename std::decay::type Invoke(FunctionImpl&& function_impl) { + return std::forward(function_impl); +} + +// Creates an action that invokes the given method on the given object +// with the mock function's arguments. +template +internal::InvokeMethodAction Invoke(Class* obj_ptr, + MethodPtr method_ptr) { + return {obj_ptr, method_ptr}; +} + +// Creates an action that invokes 'function_impl' with no argument. +template +internal::InvokeWithoutArgsAction::type> +InvokeWithoutArgs(FunctionImpl function_impl) { + return {std::move(function_impl)}; +} + +// Creates an action that invokes the given method on the given object +// with no argument. +template +internal::InvokeMethodWithoutArgsAction InvokeWithoutArgs( + Class* obj_ptr, MethodPtr method_ptr) { + return {obj_ptr, method_ptr}; +} + +// Creates an action that performs an_action and throws away its +// result. In other words, it changes the return type of an_action to +// void. an_action MUST NOT return void, or the code won't compile. +template +inline internal::IgnoreResultAction IgnoreResult(const A& an_action) { + return internal::IgnoreResultAction(an_action); +} + +// Creates a reference wrapper for the given L-value. If necessary, +// you can explicitly specify the type of the reference. For example, +// suppose 'derived' is an object of type Derived, ByRef(derived) +// would wrap a Derived&. If you want to wrap a const Base& instead, +// where Base is a base class of Derived, just write: +// +// ByRef(derived) +// +// N.B. ByRef is redundant with std::ref, std::cref and std::reference_wrapper. +// However, it may still be used for consistency with ByMove(). +template +inline ::std::reference_wrapper ByRef(T& l_value) { // NOLINT + return ::std::reference_wrapper(l_value); +} + +// The ReturnNew(a1, a2, ..., a_k) action returns a pointer to a new +// instance of type T, constructed on the heap with constructor arguments +// a1, a2, ..., and a_k. The caller assumes ownership of the returned value. +template +internal::ReturnNewAction::type...> ReturnNew( + Params&&... params) { + return {std::forward_as_tuple(std::forward(params)...)}; +} + +// Action ReturnArg() returns the k-th argument of the mock function. +template +internal::ReturnArgAction ReturnArg() { + return {}; +} + +// Action SaveArg(pointer) saves the k-th (0-based) argument of the +// mock function to *pointer. +template +internal::SaveArgAction SaveArg(Ptr pointer) { + return {pointer}; +} + +// Action SaveArgPointee(pointer) saves the value pointed to +// by the k-th (0-based) argument of the mock function to *pointer. +template +internal::SaveArgPointeeAction SaveArgPointee(Ptr pointer) { + return {pointer}; +} + +// Action SetArgReferee(value) assigns 'value' to the variable +// referenced by the k-th (0-based) argument of the mock function. +template +internal::SetArgRefereeAction::type> SetArgReferee( + T&& value) { + return {std::forward(value)}; +} + +// Action SetArrayArgument(first, last) copies the elements in +// source range [first, last) to the array pointed to by the k-th +// (0-based) argument, which can be either a pointer or an +// iterator. The action does not take ownership of the elements in the +// source range. +template +internal::SetArrayArgumentAction SetArrayArgument(I1 first, + I2 last) { + return {first, last}; +} + +// Action DeleteArg() deletes the k-th (0-based) argument of the mock +// function. +template +internal::DeleteArgAction DeleteArg() { + return {}; +} + +// This action returns the value pointed to by 'pointer'. +template +internal::ReturnPointeeAction ReturnPointee(Ptr pointer) { + return {pointer}; +} + +#if GTEST_HAS_EXCEPTIONS +// Action Throw(exception) can be used in a mock function of any type +// to throw the given exception. Any copyable value can be thrown, +// except for std::exception_ptr, which is likely a mistake if +// thrown directly. +template +typename std::enable_if< + !std::is_base_of::type>::value, + internal::ThrowAction::type>>::type +Throw(T&& exception) { + return {std::forward(exception)}; +} +// Action Rethrow(exception_ptr) can be used in a mock function of any type +// to rethrow any exception_ptr. Note that the same object is thrown each time. +inline internal::RethrowAction Rethrow(std::exception_ptr exception) { + return {std::move(exception)}; +} +#endif // GTEST_HAS_EXCEPTIONS + +namespace internal { + +// A macro from the ACTION* family (defined later in gmock-generated-actions.h) +// defines an action that can be used in a mock function. Typically, +// these actions only care about a subset of the arguments of the mock +// function. For example, if such an action only uses the second +// argument, it can be used in any mock function that takes >= 2 +// arguments where the type of the second argument is compatible. +// +// Therefore, the action implementation must be prepared to take more +// arguments than it needs. The ExcessiveArg type is used to +// represent those excessive arguments. In order to keep the compiler +// error messages tractable, we define it in the testing namespace +// instead of testing::internal. However, this is an INTERNAL TYPE +// and subject to change without notice, so a user MUST NOT USE THIS +// TYPE DIRECTLY. +struct ExcessiveArg {}; + +// Builds an implementation of an Action<> for some particular signature, using +// a class defined by an ACTION* macro. +template +struct ActionImpl; + +template +struct ImplBase { + struct Holder { + // Allows each copy of the Action<> to get to the Impl. + explicit operator const Impl&() const { return *ptr; } + std::shared_ptr ptr; + }; + using type = typename std::conditional::value, + Impl, Holder>::type; +}; + +template +struct ActionImpl : ImplBase::type { + using Base = typename ImplBase::type; + using function_type = R(Args...); + using args_type = std::tuple; + + ActionImpl() = default; // Only defined if appropriate for Base. + explicit ActionImpl(std::shared_ptr impl) : Base{std::move(impl)} {} + + R operator()(Args&&... arg) const { + static constexpr size_t kMaxArgs = + sizeof...(Args) <= 10 ? sizeof...(Args) : 10; + return Apply(std::make_index_sequence{}, + std::make_index_sequence<10 - kMaxArgs>{}, + args_type{std::forward(arg)...}); + } + + template + R Apply(std::index_sequence, std::index_sequence, + const args_type& args) const { + // Impl need not be specific to the signature of action being implemented; + // only the implementing function body needs to have all of the specific + // types instantiated. Up to 10 of the args that are provided by the + // args_type get passed, followed by a dummy of unspecified type for the + // remainder up to 10 explicit args. + static constexpr ExcessiveArg kExcessArg{}; + return static_cast(*this) + .template gmock_PerformImpl< + /*function_type=*/function_type, /*return_type=*/R, + /*args_type=*/args_type, + /*argN_type=*/ + typename std::tuple_element::type...>( + /*args=*/args, std::get(args)..., + ((void)excess_id, kExcessArg)...); + } +}; + +// Stores a default-constructed Impl as part of the Action<>'s +// std::function<>. The Impl should be trivial to copy. +template +::testing::Action MakeAction() { + return ::testing::Action(ActionImpl()); +} + +// Stores just the one given instance of Impl. +template +::testing::Action MakeAction(std::shared_ptr impl) { + return ::testing::Action(ActionImpl(std::move(impl))); +} + +#define GMOCK_INTERNAL_ARG_UNUSED(i, data, el) \ + , GTEST_INTERNAL_ATTRIBUTE_MAYBE_UNUSED const arg##i##_type& arg##i +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_ \ + GTEST_INTERNAL_ATTRIBUTE_MAYBE_UNUSED const args_type& args GMOCK_PP_REPEAT( \ + GMOCK_INTERNAL_ARG_UNUSED, , 10) + +#define GMOCK_INTERNAL_ARG(i, data, el) , const arg##i##_type& arg##i +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_ \ + const args_type& args GMOCK_PP_REPEAT(GMOCK_INTERNAL_ARG, , 10) + +#define GMOCK_INTERNAL_TEMPLATE_ARG(i, data, el) , typename arg##i##_type +#define GMOCK_ACTION_TEMPLATE_ARGS_NAMES_ \ + GMOCK_PP_TAIL(GMOCK_PP_REPEAT(GMOCK_INTERNAL_TEMPLATE_ARG, , 10)) + +#define GMOCK_INTERNAL_TYPENAME_PARAM(i, data, param) , typename param##_type +#define GMOCK_ACTION_TYPENAME_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPENAME_PARAM, , params)) + +#define GMOCK_INTERNAL_TYPE_PARAM(i, data, param) , param##_type +#define GMOCK_ACTION_TYPE_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPE_PARAM, , params)) + +#define GMOCK_INTERNAL_TYPE_GVALUE_PARAM(i, data, param) \ + , param##_type gmock_p##i +#define GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPE_GVALUE_PARAM, , params)) + +#define GMOCK_INTERNAL_GVALUE_PARAM(i, data, param) \ + , std::forward(gmock_p##i) +#define GMOCK_ACTION_GVALUE_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GVALUE_PARAM, , params)) + +#define GMOCK_INTERNAL_INIT_PARAM(i, data, param) \ + , param(::std::forward(gmock_p##i)) +#define GMOCK_ACTION_INIT_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_INIT_PARAM, , params)) + +#define GMOCK_INTERNAL_FIELD_PARAM(i, data, param) param##_type param; +#define GMOCK_ACTION_FIELD_PARAMS_(params) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_FIELD_PARAM, , params) + +#define GMOCK_INTERNAL_ACTION(name, full_name, params) \ + template \ + class full_name { \ + public: \ + explicit full_name(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ + : impl_(std::make_shared( \ + GMOCK_ACTION_GVALUE_PARAMS_(params))) {} \ + full_name(const full_name&) = default; \ + full_name(full_name&&) noexcept = default; \ + template \ + operator ::testing::Action() const { \ + return ::testing::internal::MakeAction(impl_); \ + } \ + \ + private: \ + class gmock_Impl { \ + public: \ + explicit gmock_Impl(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ + : GMOCK_ACTION_INIT_PARAMS_(params) {} \ + template \ + return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ + GMOCK_ACTION_FIELD_PARAMS_(params) \ + }; \ + std::shared_ptr impl_; \ + }; \ + template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) GTEST_MUST_USE_RESULT_; \ + template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) { \ + return full_name( \ + GMOCK_ACTION_GVALUE_PARAMS_(params)); \ + } \ + template \ + template \ + return_type \ + full_name::gmock_Impl::gmock_PerformImpl( \ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +} // namespace internal + +// Similar to GMOCK_INTERNAL_ACTION, but no bound parameters are stored. +#define ACTION(name) \ + class name##Action { \ + public: \ + explicit name##Action() noexcept {} \ + name##Action(const name##Action&) noexcept {} \ + template \ + operator ::testing::Action() const { \ + return ::testing::internal::MakeAction(); \ + } \ + \ + private: \ + class gmock_Impl { \ + public: \ + template \ + return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ + }; \ + }; \ + inline name##Action name() GTEST_MUST_USE_RESULT_; \ + inline name##Action name() { return name##Action(); } \ + template \ + return_type name##Action::gmock_Impl::gmock_PerformImpl( \ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP, (__VA_ARGS__)) + +#define ACTION_P2(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP2, (__VA_ARGS__)) + +#define ACTION_P3(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP3, (__VA_ARGS__)) + +#define ACTION_P4(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP4, (__VA_ARGS__)) + +#define ACTION_P5(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP5, (__VA_ARGS__)) + +#define ACTION_P6(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP6, (__VA_ARGS__)) + +#define ACTION_P7(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP7, (__VA_ARGS__)) + +#define ACTION_P8(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP8, (__VA_ARGS__)) + +#define ACTION_P9(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP9, (__VA_ARGS__)) + +#define ACTION_P10(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP10, (__VA_ARGS__)) + +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4100 + +#endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ diff --git a/vendor/googletest/googlemock/include/gmock/gmock-cardinalities.h b/vendor/googletest/googlemock/include/gmock/gmock-cardinalities.h new file mode 100644 index 000000000..533e604f3 --- /dev/null +++ b/vendor/googletest/googlemock/include/gmock/gmock-cardinalities.h @@ -0,0 +1,159 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used cardinalities. More +// cardinalities can be defined by the user implementing the +// CardinalityInterface interface if necessary. + +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* + +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ + +#include + +#include +#include // NOLINT + +#include "gmock/internal/gmock-port.h" +#include "gtest/gtest.h" + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ +/* class A needs to have dll-interface to be used by clients of class B */) + +namespace testing { + +// To implement a cardinality Foo, define: +// 1. a class FooCardinality that implements the +// CardinalityInterface interface, and +// 2. a factory function that creates a Cardinality object from a +// const FooCardinality*. +// +// The two-level delegation design follows that of Matcher, providing +// consistency for extension developers. It also eases ownership +// management as Cardinality objects can now be copied like plain values. + +// The implementation of a cardinality. +class CardinalityInterface { + public: + virtual ~CardinalityInterface() = default; + + // Conservative estimate on the lower/upper bound of the number of + // calls allowed. + virtual int ConservativeLowerBound() const { return 0; } + virtual int ConservativeUpperBound() const { return INT_MAX; } + + // Returns true if and only if call_count calls will satisfy this + // cardinality. + virtual bool IsSatisfiedByCallCount(int call_count) const = 0; + + // Returns true if and only if call_count calls will saturate this + // cardinality. + virtual bool IsSaturatedByCallCount(int call_count) const = 0; + + // Describes self to an ostream. + virtual void DescribeTo(::std::ostream* os) const = 0; +}; + +// A Cardinality is a copyable and IMMUTABLE (except by assignment) +// object that specifies how many times a mock function is expected to +// be called. The implementation of Cardinality is just a std::shared_ptr +// to const CardinalityInterface. Don't inherit from Cardinality! +class GTEST_API_ Cardinality { + public: + // Constructs a null cardinality. Needed for storing Cardinality + // objects in STL containers. + Cardinality() = default; + + // Constructs a Cardinality from its implementation. + explicit Cardinality(const CardinalityInterface* impl) : impl_(impl) {} + + // Conservative estimate on the lower/upper bound of the number of + // calls allowed. + int ConservativeLowerBound() const { return impl_->ConservativeLowerBound(); } + int ConservativeUpperBound() const { return impl_->ConservativeUpperBound(); } + + // Returns true if and only if call_count calls will satisfy this + // cardinality. + bool IsSatisfiedByCallCount(int call_count) const { + return impl_->IsSatisfiedByCallCount(call_count); + } + + // Returns true if and only if call_count calls will saturate this + // cardinality. + bool IsSaturatedByCallCount(int call_count) const { + return impl_->IsSaturatedByCallCount(call_count); + } + + // Returns true if and only if call_count calls will over-saturate this + // cardinality, i.e. exceed the maximum number of allowed calls. + bool IsOverSaturatedByCallCount(int call_count) const { + return impl_->IsSaturatedByCallCount(call_count) && + !impl_->IsSatisfiedByCallCount(call_count); + } + + // Describes self to an ostream + void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); } + + // Describes the given actual call count to an ostream. + static void DescribeActualCallCountTo(int actual_call_count, + ::std::ostream* os); + + private: + std::shared_ptr impl_; +}; + +// Creates a cardinality that allows at least n calls. +GTEST_API_ Cardinality AtLeast(int n); + +// Creates a cardinality that allows at most n calls. +GTEST_API_ Cardinality AtMost(int n); + +// Creates a cardinality that allows any number of calls. +GTEST_API_ Cardinality AnyNumber(); + +// Creates a cardinality that allows between min and max calls. +GTEST_API_ Cardinality Between(int min, int max); + +// Creates a cardinality that allows exactly n calls. +GTEST_API_ Cardinality Exactly(int n); + +// Creates a cardinality from its implementation. +inline Cardinality MakeCardinality(const CardinalityInterface* c) { + return Cardinality(c); +} + +} // namespace testing + +GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 + +#endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ diff --git a/vendor/googletest/googlemock/include/gmock/gmock-function-mocker.h b/vendor/googletest/googlemock/include/gmock/gmock-function-mocker.h new file mode 100644 index 000000000..d2cb13cd8 --- /dev/null +++ b/vendor/googletest/googlemock/include/gmock/gmock-function-mocker.h @@ -0,0 +1,519 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements MOCK_METHOD. + +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* + +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_FUNCTION_MOCKER_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_FUNCTION_MOCKER_H_ + +#include +#include // IWYU pragma: keep +#include // IWYU pragma: keep + +#include "gmock/gmock-spec-builders.h" +#include "gmock/internal/gmock-internal-utils.h" +#include "gmock/internal/gmock-pp.h" + +namespace testing { +namespace internal { +template +using identity_t = T; + +template +struct ThisRefAdjuster { + template + using AdjustT = typename std::conditional< + std::is_const::type>::value, + typename std::conditional::value, + const T&, const T&&>::type, + typename std::conditional::value, T&, + T&&>::type>::type; + + template + static AdjustT Adjust(const MockType& mock) { + return static_cast>(const_cast(mock)); + } +}; + +constexpr bool PrefixOf(const char* a, const char* b) { + return *a == 0 || (*a == *b && internal::PrefixOf(a + 1, b + 1)); +} + +template +constexpr bool StartsWith(const char (&prefix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(prefix, str); +} + +template +constexpr bool EndsWith(const char (&suffix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(suffix, str + M - N); +} + +template +constexpr bool Equals(const char (&a)[N], const char (&b)[M]) { + return N == M && internal::PrefixOf(a, b); +} + +template +constexpr bool ValidateSpec(const char (&spec)[N]) { + return internal::Equals("const", spec) || + internal::Equals("override", spec) || + internal::Equals("final", spec) || + internal::Equals("noexcept", spec) || + (internal::StartsWith("noexcept(", spec) && + internal::EndsWith(")", spec)) || + internal::Equals("ref(&)", spec) || + internal::Equals("ref(&&)", spec) || + (internal::StartsWith("Calltype(", spec) && + internal::EndsWith(")", spec)); +} + +} // namespace internal + +// The style guide prohibits "using" statements in a namespace scope +// inside a header file. However, the FunctionMocker class template +// is meant to be defined in the ::testing namespace. The following +// line is just a trick for working around a bug in MSVC 8.0, which +// cannot handle it if we define FunctionMocker in ::testing. +using internal::FunctionMocker; +} // namespace testing + +#define MOCK_METHOD(...) \ + GMOCK_INTERNAL_WARNING_PUSH() \ + GMOCK_INTERNAL_WARNING_CLANG(ignored, "-Wunused-member-function") \ + GMOCK_PP_VARIADIC_CALL(GMOCK_INTERNAL_MOCK_METHOD_ARG_, __VA_ARGS__) \ + GMOCK_INTERNAL_WARNING_POP() + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_1(...) \ + GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__) + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_2(...) \ + GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__) + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_3(_Ret, _MethodName, _Args) \ + GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, ()) + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ + GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ + GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ + GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ + GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ + GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ + GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ + GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \ + (GMOCK_INTERNAL_SIGNATURE(_Ret, _Args))) + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_5(...) \ + GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__) + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_6(...) \ + GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__) + +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_7(...) \ + GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__) + +#define GMOCK_INTERNAL_WRONG_ARITY(...) \ + static_assert( \ + false, \ + "MOCK_METHOD must be called with 3 or 4 arguments. _Ret, " \ + "_MethodName, _Args and optionally _Spec. _Args and _Spec must be " \ + "enclosed in parentheses. If _Ret is a type with unprotected commas, " \ + "it must also be enclosed in parentheses.") + +#define GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Tuple) \ + static_assert( \ + GMOCK_PP_IS_ENCLOSED_PARENS(_Tuple), \ + GMOCK_PP_STRINGIZE(_Tuple) " should be enclosed in parentheses.") + +#define GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE(_N, ...) \ + static_assert( \ + std::is_function<__VA_ARGS__>::value, \ + "Signature must be a function type, maybe return type contains " \ + "unprotected comma."); \ + static_assert( \ + ::testing::tuple_size::ArgumentTuple>::value == _N, \ + "This method does not take " GMOCK_PP_STRINGIZE( \ + _N) " arguments. Parenthesize all types with unprotected commas.") + +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT, ~, _Spec) + +#define GMOCK_INTERNAL_MOCK_METHOD_IMPL(_N, _MethodName, _Constness, \ + _Override, _Final, _NoexceptSpec, \ + _CallType, _RefSpec, _Signature) \ + typename ::testing::internal::Function::Result \ + GMOCK_INTERNAL_EXPAND(_CallType) \ + _MethodName(GMOCK_PP_REPEAT(GMOCK_INTERNAL_PARAMETER, _Signature, _N)) \ + GMOCK_PP_IF(_Constness, const, ) \ + _RefSpec _NoexceptSpec GMOCK_PP_IF(_Override, override, ) \ + GMOCK_PP_IF(_Final, final, ) { \ + GMOCK_MOCKER_(_N, _Constness, _MethodName) \ + .SetOwnerAndName(this, #_MethodName); \ + return GMOCK_MOCKER_(_N, _Constness, _MethodName) \ + .Invoke(GMOCK_PP_REPEAT(GMOCK_INTERNAL_FORWARD_ARG, _Signature, _N)); \ + } \ + ::testing::MockSpec gmock_##_MethodName( \ + GMOCK_PP_REPEAT(GMOCK_INTERNAL_MATCHER_PARAMETER, _Signature, _N)) \ + GMOCK_PP_IF(_Constness, const, ) _RefSpec { \ + GMOCK_MOCKER_(_N, _Constness, _MethodName).RegisterOwner(this); \ + return GMOCK_MOCKER_(_N, _Constness, _MethodName) \ + .With(GMOCK_PP_REPEAT(GMOCK_INTERNAL_MATCHER_ARGUMENT, , _N)); \ + } \ + ::testing::MockSpec gmock_##_MethodName( \ + const ::testing::internal::WithoutMatchers&, \ + GMOCK_PP_IF(_Constness, const, )::testing::internal::Function< \ + GMOCK_PP_REMOVE_PARENS(_Signature)>*) const _RefSpec _NoexceptSpec { \ + return ::testing::internal::ThisRefAdjuster::Adjust(*this) \ + .gmock_##_MethodName(GMOCK_PP_REPEAT( \ + GMOCK_INTERNAL_A_MATCHER_ARGUMENT, _Signature, _N)); \ + } \ + mutable ::testing::FunctionMocker \ + GMOCK_MOCKER_(_N, _Constness, _MethodName) + +#define GMOCK_INTERNAL_EXPAND(...) __VA_ARGS__ + +// Valid modifiers. +#define GMOCK_INTERNAL_HAS_CONST(_Tuple) \ + GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_CONST, ~, _Tuple)) + +#define GMOCK_INTERNAL_HAS_OVERRIDE(_Tuple) \ + GMOCK_PP_HAS_COMMA( \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_OVERRIDE, ~, _Tuple)) + +#define GMOCK_INTERNAL_HAS_FINAL(_Tuple) \ + GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_FINAL, ~, _Tuple)) + +#define GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_NOEXCEPT_SPEC_IF_NOEXCEPT, ~, _Tuple) + +#define GMOCK_INTERNAL_NOEXCEPT_SPEC_IF_NOEXCEPT(_i, _, _elem) \ + GMOCK_PP_IF( \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)), \ + _elem, ) + +#define GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE, ~, _Tuple) + +#define GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_IF( \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem)), \ + GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) + +#define GMOCK_INTERNAL_GET_REF_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_REF_SPEC_IF_REF, ~, _Tuple) + +#define GMOCK_INTERNAL_REF_SPEC_IF_REF(_i, _, _elem) \ + GMOCK_PP_IF(GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)), \ + GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) + +#ifdef GMOCK_INTERNAL_STRICT_SPEC_ASSERT +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + ::testing::internal::ValidateSpec(GMOCK_PP_STRINGIZE(_elem)), \ + "Token \'" GMOCK_PP_STRINGIZE( \ + _elem) "\' cannot be recognized as a valid specification " \ + "modifier. Is a ',' missing?"); +#else +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem))) == 1, \ + GMOCK_PP_STRINGIZE( \ + _elem) " cannot be recognized as a valid specification modifier."); +#endif // GMOCK_INTERNAL_STRICT_SPEC_ASSERT + +// Modifiers implementation. +#define GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CONST_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_CONST_I_const , + +#define GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_OVERRIDE_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_OVERRIDE_I_override , + +#define GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_FINAL_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_FINAL_I_final , + +#define GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_NOEXCEPT_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_NOEXCEPT_I_noexcept , + +#define GMOCK_INTERNAL_DETECT_REF(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_REF_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_REF_I_ref , + +#define GMOCK_INTERNAL_UNPACK_ref(x) x + +#define GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CALLTYPE_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_CALLTYPE_I_Calltype , + +#define GMOCK_INTERNAL_UNPACK_Calltype(...) __VA_ARGS__ + +// Note: The use of `identity_t` here allows _Ret to represent return types that +// would normally need to be specified in a different way. For example, a method +// returning a function pointer must be written as +// +// fn_ptr_return_t (*method(method_args_t...))(fn_ptr_args_t...) +// +// But we only support placing the return type at the beginning. To handle this, +// we wrap all calls in identity_t, so that a declaration will be expanded to +// +// identity_t method(method_args_t...) +// +// This allows us to work around the syntactic oddities of function/method +// types. +#define GMOCK_INTERNAL_SIGNATURE(_Ret, _Args) \ + ::testing::internal::identity_t( \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_TYPE, _, _Args)) + +#define GMOCK_INTERNAL_GET_TYPE(_i, _, _elem) \ + GMOCK_PP_COMMA_IF(_i) \ + GMOCK_PP_IF(GMOCK_PP_IS_BEGIN_PARENS(_elem), GMOCK_PP_REMOVE_PARENS, \ + GMOCK_PP_IDENTITY) \ + (_elem) + +#define GMOCK_INTERNAL_PARAMETER(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + GMOCK_INTERNAL_ARG_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature)) \ + gmock_a##_i + +#define GMOCK_INTERNAL_FORWARD_ARG(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + ::std::forward(gmock_a##_i) + +#define GMOCK_INTERNAL_MATCHER_PARAMETER(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + GMOCK_INTERNAL_MATCHER_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature)) \ + gmock_a##_i + +#define GMOCK_INTERNAL_MATCHER_ARGUMENT(_i, _1, _2) \ + GMOCK_PP_COMMA_IF(_i) \ + gmock_a##_i + +#define GMOCK_INTERNAL_A_MATCHER_ARGUMENT(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + ::testing::A() + +#define GMOCK_INTERNAL_ARG_O(_i, ...) \ + typename ::testing::internal::Function<__VA_ARGS__>::template Arg<_i>::type + +#define GMOCK_INTERNAL_MATCHER_O(_i, ...) \ + const ::testing::Matcher::template Arg<_i>::type>& + +#define MOCK_METHOD0(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 0, __VA_ARGS__) +#define MOCK_METHOD1(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 1, __VA_ARGS__) +#define MOCK_METHOD2(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 2, __VA_ARGS__) +#define MOCK_METHOD3(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 3, __VA_ARGS__) +#define MOCK_METHOD4(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 4, __VA_ARGS__) +#define MOCK_METHOD5(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 5, __VA_ARGS__) +#define MOCK_METHOD6(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 6, __VA_ARGS__) +#define MOCK_METHOD7(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 7, __VA_ARGS__) +#define MOCK_METHOD8(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 8, __VA_ARGS__) +#define MOCK_METHOD9(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 9, __VA_ARGS__) +#define MOCK_METHOD10(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, , m, 10, __VA_ARGS__) + +#define MOCK_CONST_METHOD0(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 0, __VA_ARGS__) +#define MOCK_CONST_METHOD1(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 1, __VA_ARGS__) +#define MOCK_CONST_METHOD2(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 2, __VA_ARGS__) +#define MOCK_CONST_METHOD3(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 3, __VA_ARGS__) +#define MOCK_CONST_METHOD4(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 4, __VA_ARGS__) +#define MOCK_CONST_METHOD5(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 5, __VA_ARGS__) +#define MOCK_CONST_METHOD6(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 6, __VA_ARGS__) +#define MOCK_CONST_METHOD7(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 7, __VA_ARGS__) +#define MOCK_CONST_METHOD8(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 8, __VA_ARGS__) +#define MOCK_CONST_METHOD9(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 9, __VA_ARGS__) +#define MOCK_CONST_METHOD10(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 10, __VA_ARGS__) + +#define MOCK_METHOD0_T(m, ...) MOCK_METHOD0(m, __VA_ARGS__) +#define MOCK_METHOD1_T(m, ...) MOCK_METHOD1(m, __VA_ARGS__) +#define MOCK_METHOD2_T(m, ...) MOCK_METHOD2(m, __VA_ARGS__) +#define MOCK_METHOD3_T(m, ...) MOCK_METHOD3(m, __VA_ARGS__) +#define MOCK_METHOD4_T(m, ...) MOCK_METHOD4(m, __VA_ARGS__) +#define MOCK_METHOD5_T(m, ...) MOCK_METHOD5(m, __VA_ARGS__) +#define MOCK_METHOD6_T(m, ...) MOCK_METHOD6(m, __VA_ARGS__) +#define MOCK_METHOD7_T(m, ...) MOCK_METHOD7(m, __VA_ARGS__) +#define MOCK_METHOD8_T(m, ...) MOCK_METHOD8(m, __VA_ARGS__) +#define MOCK_METHOD9_T(m, ...) MOCK_METHOD9(m, __VA_ARGS__) +#define MOCK_METHOD10_T(m, ...) MOCK_METHOD10(m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_T(m, ...) MOCK_CONST_METHOD0(m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_T(m, ...) MOCK_CONST_METHOD1(m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_T(m, ...) MOCK_CONST_METHOD2(m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_T(m, ...) MOCK_CONST_METHOD3(m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_T(m, ...) MOCK_CONST_METHOD4(m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_T(m, ...) MOCK_CONST_METHOD5(m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_T(m, ...) MOCK_CONST_METHOD6(m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_T(m, ...) MOCK_CONST_METHOD7(m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_T(m, ...) MOCK_CONST_METHOD8(m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_T(m, ...) MOCK_CONST_METHOD9(m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_T(m, ...) MOCK_CONST_METHOD10(m, __VA_ARGS__) + +#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 0, __VA_ARGS__) +#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 1, __VA_ARGS__) +#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 2, __VA_ARGS__) +#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 3, __VA_ARGS__) +#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 4, __VA_ARGS__) +#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 5, __VA_ARGS__) +#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 6, __VA_ARGS__) +#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 7, __VA_ARGS__) +#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 8, __VA_ARGS__) +#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 9, __VA_ARGS__) +#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 10, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 0, __VA_ARGS__) +#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 1, __VA_ARGS__) +#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 2, __VA_ARGS__) +#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 3, __VA_ARGS__) +#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 4, __VA_ARGS__) +#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 5, __VA_ARGS__) +#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 6, __VA_ARGS__) +#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 7, __VA_ARGS__) +#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 8, __VA_ARGS__) +#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 9, __VA_ARGS__) +#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 10, __VA_ARGS__) + +#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD0_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD1_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD2_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD3_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD4_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD5_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD6_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD7_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD8_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD9_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD10_WITH_CALLTYPE(ct, m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, __VA_ARGS__) + +#define GMOCK_INTERNAL_MOCK_METHODN(constness, ct, Method, args_num, ...) \ + GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ + args_num, ::testing::internal::identity_t<__VA_ARGS__>); \ + GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ + args_num, Method, GMOCK_PP_NARG0(constness), 0, 0, , ct, , \ + (::testing::internal::identity_t<__VA_ARGS__>)) + +#define GMOCK_MOCKER_(arity, constness, Method) \ + GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__) + +#endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_FUNCTION_MOCKER_H_ diff --git a/vendor/googletest/googlemock/include/gmock/gmock-matchers.h b/vendor/googletest/googlemock/include/gmock/gmock-matchers.h new file mode 100644 index 000000000..063ee6ca2 --- /dev/null +++ b/vendor/googletest/googlemock/include/gmock/gmock-matchers.h @@ -0,0 +1,5624 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// The MATCHER* family of macros can be used in a namespace scope to +// define custom matchers easily. +// +// Basic Usage +// =========== +// +// The syntax +// +// MATCHER(name, description_string) { statements; } +// +// defines a matcher with the given name that executes the statements, +// which must return a bool to indicate if the match succeeds. Inside +// the statements, you can refer to the value being matched by 'arg', +// and refer to its type by 'arg_type'. +// +// The description string documents what the matcher does, and is used +// to generate the failure message when the match fails. Since a +// MATCHER() is usually defined in a header file shared by multiple +// C++ source files, we require the description to be a C-string +// literal to avoid possible side effects. It can be empty, in which +// case we'll use the sequence of words in the matcher name as the +// description. +// +// For example: +// +// MATCHER(IsEven, "") { return (arg % 2) == 0; } +// +// allows you to write +// +// // Expects mock_foo.Bar(n) to be called where n is even. +// EXPECT_CALL(mock_foo, Bar(IsEven())); +// +// or, +// +// // Verifies that the value of some_expression is even. +// EXPECT_THAT(some_expression, IsEven()); +// +// If the above assertion fails, it will print something like: +// +// Value of: some_expression +// Expected: is even +// Actual: 7 +// +// where the description "is even" is automatically calculated from the +// matcher name IsEven. +// +// Argument Type +// ============= +// +// Note that the type of the value being matched (arg_type) is +// determined by the context in which you use the matcher and is +// supplied to you by the compiler, so you don't need to worry about +// declaring it (nor can you). This allows the matcher to be +// polymorphic. For example, IsEven() can be used to match any type +// where the value of "(arg % 2) == 0" can be implicitly converted to +// a bool. In the "Bar(IsEven())" example above, if method Bar() +// takes an int, 'arg_type' will be int; if it takes an unsigned long, +// 'arg_type' will be unsigned long; and so on. +// +// Parameterizing Matchers +// ======================= +// +// Sometimes you'll want to parameterize the matcher. For that you +// can use another macro: +// +// MATCHER_P(name, param_name, description_string) { statements; } +// +// For example: +// +// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } +// +// will allow you to write: +// +// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); +// +// which may lead to this message (assuming n is 10): +// +// Value of: Blah("a") +// Expected: has absolute value 10 +// Actual: -9 +// +// Note that both the matcher description and its parameter are +// printed, making the message human-friendly. +// +// In the matcher definition body, you can write 'foo_type' to +// reference the type of a parameter named 'foo'. For example, in the +// body of MATCHER_P(HasAbsoluteValue, value) above, you can write +// 'value_type' to refer to the type of 'value'. +// +// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P$n to +// support multi-parameter matchers. +// +// Describing Parameterized Matchers +// ================================= +// +// The last argument to MATCHER*() is a string-typed expression. The +// expression can reference all of the matcher's parameters and a +// special bool-typed variable named 'negation'. When 'negation' is +// false, the expression should evaluate to the matcher's description; +// otherwise it should evaluate to the description of the negation of +// the matcher. For example, +// +// using testing::PrintToString; +// +// MATCHER_P2(InClosedRange, low, hi, +// std::string(negation ? "is not" : "is") + " in range [" + +// PrintToString(low) + ", " + PrintToString(hi) + "]") { +// return low <= arg && arg <= hi; +// } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: is in range [4, 6] +// ... +// Expected: is not in range [2, 4] +// +// If you specify "" as the description, the failure message will +// contain the sequence of words in the matcher name followed by the +// parameter values printed as a tuple. For example, +// +// MATCHER_P2(InClosedRange, low, hi, "") { ... } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: in closed range (4, 6) +// ... +// Expected: not (in closed range (2, 4)) +// +// Types of Matcher Parameters +// =========================== +// +// For the purpose of typing, you can view +// +// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } +// +// as shorthand for +// +// template +// FooMatcherPk +// Foo(p1_type p1, ..., pk_type pk) { ... } +// +// When you write Foo(v1, ..., vk), the compiler infers the types of +// the parameters v1, ..., and vk for you. If you are not happy with +// the result of the type inference, you can specify the types by +// explicitly instantiating the template, as in Foo(5, +// false). As said earlier, you don't get to (or need to) specify +// 'arg_type' as that's determined by the context in which the matcher +// is used. You can assign the result of expression Foo(p1, ..., pk) +// to a variable of type FooMatcherPk. This +// can be useful when composing matchers. +// +// While you can instantiate a matcher template with reference types, +// passing the parameters by pointer usually makes your code more +// readable. If, however, you still want to pass a parameter by +// reference, be aware that in the failure message generated by the +// matcher you will see the value of the referenced object but not its +// address. +// +// Explaining Match Results +// ======================== +// +// Sometimes the matcher description alone isn't enough to explain why +// the match has failed or succeeded. For example, when expecting a +// long string, it can be very helpful to also print the diff between +// the expected string and the actual one. To achieve that, you can +// optionally stream additional information to a special variable +// named result_listener, whose type is a pointer to class +// MatchResultListener: +// +// MATCHER_P(EqualsLongString, str, "") { +// if (arg == str) return true; +// +// *result_listener << "the difference: " +/// << DiffStrings(str, arg); +// return false; +// } +// +// Overloading Matchers +// ==================== +// +// You can overload matchers with different numbers of parameters: +// +// MATCHER_P(Blah, a, description_string1) { ... } +// MATCHER_P2(Blah, a, b, description_string2) { ... } +// +// Caveats +// ======= +// +// When defining a new matcher, you should also consider implementing +// MatcherInterface or using MakePolymorphicMatcher(). These +// approaches require more work than the MATCHER* macros, but also +// give you more control on the types of the value being matched and +// the matcher parameters, which may leads to better compiler error +// messages when the matcher is used wrong. They also allow +// overloading matchers based on parameter types (as opposed to just +// based on the number of parameters). +// +// MATCHER*() can only be used in a namespace scope as templates cannot be +// declared inside of a local class. +// +// More Information +// ================ +// +// To learn more about using these macros, please search for 'MATCHER' +// on +// https://github.com/google/googletest/blob/main/docs/gmock_cook_book.md +// +// This file also implements some commonly used argument matchers. More +// matchers can be defined by the user implementing the +// MatcherInterface interface if necessary. +// +// See googletest/include/gtest/gtest-matchers.h for the definition of class +// Matcher, class MatcherInterface, and others. + +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* + +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // NOLINT +#include +#include +#include +#include +#include + +#include "gmock/internal/gmock-internal-utils.h" +#include "gmock/internal/gmock-port.h" +#include "gmock/internal/gmock-pp.h" +#include "gtest/gtest.h" + +// MSVC warning C5046 is new as of VS2017 version 15.8. +#if defined(_MSC_VER) && _MSC_VER >= 1915 +#define GMOCK_MAYBE_5046_ 5046 +#else +#define GMOCK_MAYBE_5046_ +#endif + +GTEST_DISABLE_MSC_WARNINGS_PUSH_( + 4251 GMOCK_MAYBE_5046_ /* class A needs to have dll-interface to be used by + clients of class B */ + /* Symbol involving type with internal linkage not defined */) + +namespace testing { + +// To implement a matcher Foo for type T, define: +// 1. a class FooMatcherImpl that implements the +// MatcherInterface interface, and +// 2. a factory function that creates a Matcher object from a +// FooMatcherImpl*. +// +// The two-level delegation design makes it possible to allow a user +// to write "v" instead of "Eq(v)" where a Matcher is expected, which +// is impossible if we pass matchers by pointers. It also eases +// ownership management as Matcher objects can now be copied like +// plain values. + +// A match result listener that stores the explanation in a string. +class StringMatchResultListener : public MatchResultListener { + public: + StringMatchResultListener() : MatchResultListener(&ss_) {} + + // Returns the explanation accumulated so far. + std::string str() const { return ss_.str(); } + + // Clears the explanation accumulated so far. + void Clear() { ss_.str(""); } + + private: + ::std::stringstream ss_; + + StringMatchResultListener(const StringMatchResultListener&) = delete; + StringMatchResultListener& operator=(const StringMatchResultListener&) = + delete; +}; + +// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION +// and MUST NOT BE USED IN USER CODE!!! +namespace internal { + +// The MatcherCastImpl class template is a helper for implementing +// MatcherCast(). We need this helper in order to partially +// specialize the implementation of MatcherCast() (C++ allows +// class/struct templates to be partially specialized, but not +// function templates.). + +// This general version is used when MatcherCast()'s argument is a +// polymorphic matcher (i.e. something that can be converted to a +// Matcher but is not one yet; for example, Eq(value)) or a value (for +// example, "hello"). +template +class MatcherCastImpl { + public: + static Matcher Cast(const M& polymorphic_matcher_or_value) { + // M can be a polymorphic matcher, in which case we want to use + // its conversion operator to create Matcher. Or it can be a value + // that should be passed to the Matcher's constructor. + // + // We can't call Matcher(polymorphic_matcher_or_value) when M is a + // polymorphic matcher because it'll be ambiguous if T has an implicit + // constructor from M (this usually happens when T has an implicit + // constructor from any type). + // + // It won't work to unconditionally implicit_cast + // polymorphic_matcher_or_value to Matcher because it won't trigger + // a user-defined conversion from M to T if one exists (assuming M is + // a value). + return CastImpl(polymorphic_matcher_or_value, + std::is_convertible>{}, + std::is_convertible{}); + } + + private: + template + static Matcher CastImpl(const M& polymorphic_matcher_or_value, + std::true_type /* convertible_to_matcher */, + std::integral_constant) { + // M is implicitly convertible to Matcher, which means that either + // M is a polymorphic matcher or Matcher has an implicit constructor + // from M. In both cases using the implicit conversion will produce a + // matcher. + // + // Even if T has an implicit constructor from M, it won't be called because + // creating Matcher would require a chain of two user-defined conversions + // (first to create T from M and then to create Matcher from T). + return polymorphic_matcher_or_value; + } + + // M can't be implicitly converted to Matcher, so M isn't a polymorphic + // matcher. It's a value of a type implicitly convertible to T. Use direct + // initialization to create a matcher. + static Matcher CastImpl(const M& value, + std::false_type /* convertible_to_matcher */, + std::true_type /* convertible_to_T */) { + return Matcher(ImplicitCast_(value)); + } + + // M can't be implicitly converted to either Matcher or T. Attempt to use + // polymorphic matcher Eq(value) in this case. + // + // Note that we first attempt to perform an implicit cast on the value and + // only fall back to the polymorphic Eq() matcher afterwards because the + // latter calls bool operator==(const Lhs& lhs, const Rhs& rhs) in the end + // which might be undefined even when Rhs is implicitly convertible to Lhs + // (e.g. std::pair vs. std::pair). + // + // We don't define this method inline as we need the declaration of Eq(). + static Matcher CastImpl(const M& value, + std::false_type /* convertible_to_matcher */, + std::false_type /* convertible_to_T */); +}; + +// This more specialized version is used when MatcherCast()'s argument +// is already a Matcher. This only compiles when type T can be +// statically converted to type U. +template +class MatcherCastImpl> { + public: + static Matcher Cast(const Matcher& source_matcher) { + return Matcher(new Impl(source_matcher)); + } + + private: + class Impl : public MatcherInterface { + public: + explicit Impl(const Matcher& source_matcher) + : source_matcher_(source_matcher) {} + + // We delegate the matching logic to the source matcher. + bool MatchAndExplain(T x, MatchResultListener* listener) const override { + using FromType = typename std::remove_cv::type>::type>::type; + using ToType = typename std::remove_cv::type>::type>::type; + // Do not allow implicitly converting base*/& to derived*/&. + static_assert( + // Do not trigger if only one of them is a pointer. That implies a + // regular conversion and not a down_cast. + (std::is_pointer::type>::value != + std::is_pointer::type>::value) || + std::is_same::value || + !std::is_base_of::value, + "Can't implicitly convert from to "); + + // Do the cast to `U` explicitly if necessary. + // Otherwise, let implicit conversions do the trick. + using CastType = + typename std::conditional::value, + T&, U>::type; + + return source_matcher_.MatchAndExplain(static_cast(x), + listener); + } + + void DescribeTo(::std::ostream* os) const override { + source_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + source_matcher_.DescribeNegationTo(os); + } + + private: + const Matcher source_matcher_; + }; +}; + +// This even more specialized version is used for efficiently casting +// a matcher to its own type. +template +class MatcherCastImpl> { + public: + static Matcher Cast(const Matcher& matcher) { return matcher; } +}; + +// Template specialization for parameterless Matcher. +template +class MatcherBaseImpl { + public: + MatcherBaseImpl() = default; + + template + operator ::testing::Matcher() const { // NOLINT(runtime/explicit) + return ::testing::Matcher(new + typename Derived::template gmock_Impl()); + } +}; + +// Template specialization for Matcher with parameters. +template