diff options
Diffstat (limited to 'libcxx/utils/google-benchmark/test')
21 files changed, 1292 insertions, 128 deletions
diff --git a/libcxx/utils/google-benchmark/test/CMakeLists.txt b/libcxx/utils/google-benchmark/test/CMakeLists.txt index 14ba7a6e2da..efce3ba524a 100644 --- a/libcxx/utils/google-benchmark/test/CMakeLists.txt +++ b/libcxx/utils/google-benchmark/test/CMakeLists.txt @@ -1,6 +1,7 @@ # Enable the tests find_package(Threads REQUIRED) +include(CheckCXXCompilerFlag) # NOTE: Some tests use `<cassert>` to perform the test. Therefore we must # strip -DNDEBUG from the default CMake flags in DEBUG mode. @@ -27,7 +28,7 @@ if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) endif() -add_library(output_test_helper STATIC output_test_helper.cc) +add_library(output_test_helper STATIC output_test_helper.cc output_test.h) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") @@ -41,7 +42,6 @@ macro(compile_output_test name) ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) - # Demonstration executable compile_benchmark_test(benchmark_test) add_test(benchmark benchmark_test --benchmark_min_time=0.01) @@ -75,6 +75,11 @@ compile_benchmark_test(skip_with_error_test) add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01) compile_benchmark_test(donotoptimize_test) +# Some of the issues with DoNotOptimize only occur when optimization is enabled +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +if (BENCHMARK_HAS_O3_FLAG) + set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") +endif() add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01) compile_benchmark_test(fixture_test) @@ -92,15 +97,31 @@ add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01) compile_output_test(reporter_output_test) add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01) +compile_output_test(templated_fixture_test) +add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_test) +add_test(user_counters_test user_counters_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_tabular_test) +add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) + check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) if (BENCHMARK_HAS_CXX03_FLAG) - set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") - string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") - compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test - PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}") + PROPERTIES + COMPILE_FLAGS "-std=c++03") + # libstdc++ provides different definitions within <map> between dialects. When + # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation + # causing the test to fail to compile. To prevent this we explicitly disable + # the warning. + check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) + if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) + set_target_properties(cxx03_test + PROPERTIES + LINK_FLAGS "-Wno-odr") + endif() add_test(cxx03 cxx03_test --benchmark_min_time=0.01) endif() @@ -113,6 +134,29 @@ endif() compile_output_test(complexity_test) add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) +############################################################################### +# GoogleTest Unit Tests +############################################################################### + +if (BENCHMARK_ENABLE_GTEST_TESTS) + macro(compile_gtest name) + add_executable(${name} "${name}.cc") + if (TARGET googletest) + add_dependencies(${name} googletest) + endif() + target_link_libraries(${name} benchmark + "${GTEST_BOTH_LIBRARIES}" ${CMAKE_THREAD_LIBS_INIT}) + endmacro(compile_gtest) + + macro(add_gtest name) + compile_gtest(${name}) + add_test(${name} ${name}) + endmacro() + + add_gtest(statistics_test) +endif(BENCHMARK_ENABLE_GTEST_TESTS) + + # Add the coverage command(s) if(CMAKE_BUILD_TYPE) string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) diff --git a/libcxx/utils/google-benchmark/test/basic_test.cc b/libcxx/utils/google-benchmark/test/basic_test.cc index 22de007cb6d..3348781ce72 100644 --- a/libcxx/utils/google-benchmark/test/basic_test.cc +++ b/libcxx/utils/google-benchmark/test/basic_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) void BM_empty(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } } @@ -12,7 +12,7 @@ BENCHMARK(BM_empty); BENCHMARK(BM_empty)->ThreadPerCpu(); void BM_spin_empty(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int x = 0; x < state.range(0); ++x) { benchmark::DoNotOptimize(x); } @@ -25,7 +25,7 @@ void BM_spin_pause_before(benchmark::State& state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -35,7 +35,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before); BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); void BM_spin_pause_during(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { state.PauseTiming(); for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); @@ -50,7 +50,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_during); BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); void BM_pause_during(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { state.PauseTiming(); state.ResumeTiming(); } @@ -61,7 +61,7 @@ BENCHMARK(BM_pause_during)->UseRealTime(); BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); void BM_spin_pause_after(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -77,7 +77,7 @@ void BM_spin_pause_before_and_after(benchmark::State& state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -90,10 +90,29 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); void BM_empty_stop_start(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); -BENCHMARK_MAIN() + +void BM_KeepRunning(benchmark::State& state) { + size_t iter_count = 0; + while (state.KeepRunning()) { + ++iter_count; + } + assert(iter_count == state.max_iterations); +} +BENCHMARK(BM_KeepRunning); + +void BM_RangedFor(benchmark::State& state) { + size_t iter_count = 0; + for (auto _ : state) { + ++iter_count; + } + assert(iter_count == state.max_iterations); +} +BENCHMARK(BM_RangedFor); + +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/benchmark_test.cc b/libcxx/utils/google-benchmark/test/benchmark_test.cc index 57731331e6d..78802c8da60 100644 --- a/libcxx/utils/google-benchmark/test/benchmark_test.cc +++ b/libcxx/utils/google-benchmark/test/benchmark_test.cc @@ -42,7 +42,7 @@ double CalculatePi(int depth) { std::set<int> ConstructRandomSet(int size) { std::set<int> s; - for (int i = 0; i < size; ++i) s.insert(i); + for (int i = 0; i < size; ++i) s.insert(s.end(), i); return s; } @@ -53,7 +53,7 @@ std::vector<int>* test_vector = nullptr; static void BM_Factorial(benchmark::State& state) { int fac_42 = 0; - while (state.KeepRunning()) fac_42 = Factorial(8); + for (auto _ : state) fac_42 = Factorial(8); // Prevent compiler optimizations std::stringstream ss; ss << fac_42; @@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime(); static void BM_CalculatePiRange(benchmark::State& state) { double pi = 0.0; - while (state.KeepRunning()) pi = CalculatePi(state.range(0)); + for (auto _ : state) pi = CalculatePi(state.range(0)); std::stringstream ss; ss << pi; state.SetLabel(ss.str()); @@ -73,7 +73,7 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); static void BM_CalculatePi(benchmark::State& state) { static const int depth = 1024; - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(CalculatePi(depth)); } } @@ -82,22 +82,26 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); static void BM_SetInsert(benchmark::State& state) { - while (state.KeepRunning()) { + std::set<int> data; + for (auto _ : state) { state.PauseTiming(); - std::set<int> data = ConstructRandomSet(state.range(0)); + data = ConstructRandomSet(state.range(0)); state.ResumeTiming(); for (int j = 0; j < state.range(1); ++j) data.insert(rand()); } state.SetItemsProcessed(state.iterations() * state.range(1)); state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); } -BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}}); + +// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, +// non-timed part of each iteration will make the benchmark take forever. +BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); template <typename Container, typename ValueType = typename Container::value_type> static void BM_Sequential(benchmark::State& state) { ValueType v = 42; - while (state.KeepRunning()) { + for (auto _ : state) { Container c; for (int i = state.range(0); --i;) c.push_back(v); } @@ -109,14 +113,14 @@ BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int) ->Range(1 << 0, 1 << 10); BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10); // Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512); #endif static void BM_StringCompare(benchmark::State& state) { std::string s1(state.range(0), '-'); std::string s2(state.range(0), '-'); - while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2)); + for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); } BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); @@ -126,7 +130,7 @@ static void BM_SetupTeardown(benchmark::State& state) { test_vector = new std::vector<int>(); } int i = 0; - while (state.KeepRunning()) { + for (auto _ : state) { std::lock_guard<std::mutex> l(test_vector_mu); if (i % 2 == 0) test_vector->push_back(i); @@ -142,7 +146,7 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); static void BM_LongTest(benchmark::State& state) { double tracker = 0.0; - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) benchmark::DoNotOptimize(tracker += i); } @@ -159,7 +163,7 @@ static void BM_ParallelMemset(benchmark::State& state) { test_vector = new std::vector<int>(size); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = from; i < to; i++) { // No need to lock test_vector_mu as ranges // do not overlap between threads. @@ -179,7 +183,7 @@ static void BM_ManualTiming(benchmark::State& state) { std::chrono::duration<double, std::micro> sleep_duration{ static_cast<double>(microseconds)}; - while (state.KeepRunning()) { + for (auto _ : state) { auto start = std::chrono::high_resolution_clock::now(); // Simulate some useful workload with a sleep std::this_thread::sleep_for( @@ -197,11 +201,11 @@ static void BM_ManualTiming(benchmark::State& state) { BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime(); BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime(); -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 template <class... Args> void BM_with_args(benchmark::State& state, Args&&...) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); @@ -213,24 +217,7 @@ void BM_non_template_args(benchmark::State& state, int, double) { } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); -static void BM_UserCounter(benchmark::State& state) { - static const int depth = 1024; - while (state.KeepRunning()) { - benchmark::DoNotOptimize(CalculatePi(depth)); - } - state.counters["Foo"] = 1; - state.counters["Bar"] = 2; - state.counters["Baz"] = 3; - state.counters["Bat"] = 5; -#ifdef BENCHMARK_HAS_CXX11 - state.counters.insert({{"Foo", 2}, {"Bar", 3}, {"Baz", 5}, {"Bat", 6}}); -#endif -} -BENCHMARK(BM_UserCounter)->Threads(8); -BENCHMARK(BM_UserCounter)->ThreadRange(1, 32); -BENCHMARK(BM_UserCounter)->ThreadPerCpu(); - -#endif // __cplusplus >= 201103L +#endif // BENCHMARK_HAS_CXX11 static void BM_DenseThreadRanges(benchmark::State& st) { switch (st.range(0)) { @@ -254,4 +241,4 @@ BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/complexity_test.cc b/libcxx/utils/google-benchmark/test/complexity_test.cc index 14e03b06eb1..89dfa580e6b 100644 --- a/libcxx/utils/google-benchmark/test/complexity_test.cc +++ b/libcxx/utils/google-benchmark/test/complexity_test.cc @@ -25,8 +25,8 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. {"^%rms_name %rms %rms[ ]*$", MR_Next}}); AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, - {"\"cpu_coefficient\": [0-9]+,$", MR_Next}, - {"\"real_coefficient\": [0-9]{1,5},$", MR_Next}, + {"\"cpu_coefficient\": %float,$", MR_Next}, + {"\"real_coefficient\": %float,$", MR_Next}, {"\"big_o\": \"%bigo\",$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next}, {"}", MR_Next}, @@ -46,7 +46,7 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < 1024; ++i) { benchmark::DoNotOptimize(&i); } @@ -94,7 +94,7 @@ void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range(0)); const int item_not_in_vector = state.range(0) * 2; // Test worst case scenario (item not in vector) - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); } state.SetComplexityN(state.range(0)); @@ -129,7 +129,7 @@ ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n); static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range(0)); - while (state.KeepRunning()) { + for (auto _ : state) { std::sort(v.begin(), v.end()); } state.SetComplexityN(state.range(0)); @@ -141,7 +141,7 @@ BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int n) { return n * std::log2(n); }); + ->Complexity([](int n) { return n * log2(n); }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) diff --git a/libcxx/utils/google-benchmark/test/cxx03_test.cc b/libcxx/utils/google-benchmark/test/cxx03_test.cc index a79d964e17b..baa9ed9262b 100644 --- a/libcxx/utils/google-benchmark/test/cxx03_test.cc +++ b/libcxx/utils/google-benchmark/test/cxx03_test.cc @@ -8,6 +8,10 @@ #error C++11 or greater detected. Should be C++03. #endif +#ifdef BENCHMARK_HAS_CXX11 +#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. +#endif + void BM_empty(benchmark::State& state) { while (state.KeepRunning()) { volatile std::size_t x = state.iterations(); @@ -39,10 +43,21 @@ void BM_template1(benchmark::State& state) { BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE1(BM_template1, int); +template <class T> +struct BM_Fixture : public ::benchmark::Fixture { +}; + +BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { + BM_empty(state); +} + void BM_counters(benchmark::State& state) { BM_empty(state); state.counters["Foo"] = 2; } BENCHMARK(BM_counters); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/diagnostics_test.cc b/libcxx/utils/google-benchmark/test/diagnostics_test.cc index 1046730b0fc..dd64a336553 100644 --- a/libcxx/utils/google-benchmark/test/diagnostics_test.cc +++ b/libcxx/utils/google-benchmark/test/diagnostics_test.cc @@ -11,7 +11,7 @@ #include <stdexcept> #include "../src/check.h" -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #if defined(__GNUC__) && !defined(__EXCEPTIONS) #define TEST_HAS_NO_EXCEPTIONS @@ -47,7 +47,7 @@ void BM_diagnostic_test(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } @@ -57,6 +57,22 @@ void BM_diagnostic_test(benchmark::State& state) { } BENCHMARK(BM_diagnostic_test); + +void BM_diagnostic_test_keep_running(benchmark::State& state) { + static bool called_once = false; + + if (called_once == false) try_invalid_pause_resume(state); + + while(state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } + + if (called_once == false) try_invalid_pause_resume(state); + + called_once = true; +} +BENCHMARK(BM_diagnostic_test_keep_running); + int main(int argc, char* argv[]) { benchmark::internal::GetAbortHandler() = &TestHandler; benchmark::Initialize(&argc, argv); diff --git a/libcxx/utils/google-benchmark/test/donotoptimize_test.cc b/libcxx/utils/google-benchmark/test/donotoptimize_test.cc index b21187aadc2..a705654a269 100644 --- a/libcxx/utils/google-benchmark/test/donotoptimize_test.cc +++ b/libcxx/utils/google-benchmark/test/donotoptimize_test.cc @@ -9,6 +9,22 @@ std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); std::uint64_t double_up(const std::uint64_t x) { return x * 2; } } +// Using DoNotOptimize on types like BitRef seem to cause a lot of problems +// with the inline assembly on both GCC and Clang. +struct BitRef { + int index; + unsigned char &byte; + +public: + static BitRef Make() { + static unsigned char arr[2] = {}; + BitRef b(1, arr[0]); + return b; + } +private: + BitRef(int i, unsigned char& b) : index(i), byte(b) {} +}; + int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types @@ -29,5 +45,8 @@ int main(int, char*[]) { benchmark::DoNotOptimize(double_up(x)); - return 0; + // These tests are to e + benchmark::DoNotOptimize(BitRef::Make()); + BitRef lval = BitRef::Make(); + benchmark::DoNotOptimize(lval); } diff --git a/libcxx/utils/google-benchmark/test/filter_test.cc b/libcxx/utils/google-benchmark/test/filter_test.cc index 3a205295f09..0e27065c155 100644 --- a/libcxx/utils/google-benchmark/test/filter_test.cc +++ b/libcxx/utils/google-benchmark/test/filter_test.cc @@ -36,31 +36,31 @@ class TestReporter : public benchmark::ConsoleReporter { } // end namespace static void NoPrefix(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(NoPrefix); static void BM_Foo(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_Foo); static void BM_Bar(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_Bar); static void BM_FooBar(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_FooBar); static void BM_FooBa(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_FooBa); diff --git a/libcxx/utils/google-benchmark/test/fixture_test.cc b/libcxx/utils/google-benchmark/test/fixture_test.cc index bbc2f957902..1462b10f02f 100644 --- a/libcxx/utils/google-benchmark/test/fixture_test.cc +++ b/libcxx/utils/google-benchmark/test/fixture_test.cc @@ -28,7 +28,7 @@ class MyFixture : public ::benchmark::Fixture { BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { assert(data.get() != nullptr); assert(*data == 42); - while (st.KeepRunning()) { + for (auto _ : st) { } } @@ -37,7 +37,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { assert(data.get() != nullptr); assert(*data == 42); } - while (st.KeepRunning()) { + for (auto _ : st) { assert(data.get() != nullptr); assert(*data == 42); } @@ -46,4 +46,4 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/map_test.cc b/libcxx/utils/google-benchmark/test/map_test.cc index 83457c9981c..311d2d22b80 100644 --- a/libcxx/utils/google-benchmark/test/map_test.cc +++ b/libcxx/utils/google-benchmark/test/map_test.cc @@ -18,9 +18,10 @@ std::map<int, int> ConstructRandomMap(int size) { // Basic version. static void BM_MapLookup(benchmark::State& state) { const int size = state.range(0); - while (state.KeepRunning()) { + std::map<int, int> m; + for (auto _ : state) { state.PauseTiming(); - std::map<int, int> m = ConstructRandomMap(size); + m = ConstructRandomMap(size); state.ResumeTiming(); for (int i = 0; i < size; ++i) { benchmark::DoNotOptimize(m.find(rand() % size)); @@ -44,7 +45,7 @@ class MapFixture : public ::benchmark::Fixture { BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { const int size = state.range(0); - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < size; ++i) { benchmark::DoNotOptimize(m.find(rand() % size)); } @@ -53,4 +54,4 @@ BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { } BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc b/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc index 8e67b3b2a99..0a82382f3ca 100644 --- a/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc +++ b/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc @@ -43,7 +43,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { }; BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { int product = state.range(0) * state.range(1) * state.range(2); for (int x = 0; x < product; x++) { benchmark::DoNotOptimize(x); @@ -60,15 +60,15 @@ void BM_CheckDefaultArgument(benchmark::State& state) { // Test that the 'range()' without an argument is the same as 'range(0)'. assert(state.range() == state.range(0)); assert(state.range() != state.range(1)); - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); static void BM_MultipleRanges(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } } BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/options_test.cc b/libcxx/utils/google-benchmark/test/options_test.cc index bbbed288398..fdec69174ee 100644 --- a/libcxx/utils/google-benchmark/test/options_test.cc +++ b/libcxx/utils/google-benchmark/test/options_test.cc @@ -1,4 +1,4 @@ -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #include <chrono> #include <thread> @@ -8,13 +8,13 @@ #include <cassert> void BM_basic(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } void BM_basic_slow(benchmark::State& state) { std::chrono::milliseconds sleep_duration(state.range(0)); - while (state.KeepRunning()) { + for (auto _ : state) { std::this_thread::sleep_for( std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)); } @@ -44,7 +44,7 @@ void CustomArgs(benchmark::internal::Benchmark* b) { BENCHMARK(BM_basic)->Apply(CustomArgs); -void BM_explicit_iteration_count(benchmark::State& st) { +void BM_explicit_iteration_count(benchmark::State& state) { // Test that benchmarks specified with an explicit iteration count are // only run once. static bool invoked_before = false; @@ -52,14 +52,14 @@ void BM_explicit_iteration_count(benchmark::State& st) { invoked_before = true; // Test that the requested iteration count is respected. - assert(st.max_iterations == 42); + assert(state.max_iterations == 42); size_t actual_iterations = 0; - while (st.KeepRunning()) + for (auto _ : state) ++actual_iterations; - assert(st.iterations() == st.max_iterations); - assert(st.iterations() == 42); + assert(state.iterations() == state.max_iterations); + assert(state.iterations() == 42); } BENCHMARK(BM_explicit_iteration_count)->Iterations(42); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/output_test.h b/libcxx/utils/google-benchmark/test/output_test.h index 57d4397ad5d..897a13866ba 100644 --- a/libcxx/utils/google-benchmark/test/output_test.h +++ b/libcxx/utils/google-benchmark/test/output_test.h @@ -7,6 +7,8 @@ #include <string> #include <utility> #include <vector> +#include <functional> +#include <sstream> #include "../src/re.h" #include "benchmark/benchmark.h" @@ -59,6 +61,134 @@ int SetSubstitutions( void RunOutputTests(int argc, char* argv[]); // ========================================================================= // +// ------------------------- Results checking ------------------------------ // +// ========================================================================= // + +// Call this macro to register a benchmark for checking its results. This +// should be all that's needed. It subscribes a function to check the (CSV) +// results of a benchmark. This is done only after verifying that the output +// strings are really as expected. +// bm_name_pattern: a name or a regex pattern which will be matched against +// all the benchmark names. Matching benchmarks +// will be the subject of a call to checker_function +// checker_function: should be of type ResultsCheckFn (see below) +#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ + size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) + +struct Results; +typedef std::function< void(Results const&) > ResultsCheckFn; + +size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); + +// Class holding the results of a benchmark. +// It is passed in calls to checker functions. +struct Results { + + // the benchmark name + std::string name; + // the benchmark fields + std::map< std::string, std::string > values; + + Results(const std::string& n) : name(n) {} + + int NumThreads() const; + + typedef enum { kCpuTime, kRealTime } BenchmarkTime; + + // get cpu_time or real_time in seconds + double GetTime(BenchmarkTime which) const; + + // get the real_time duration of the benchmark in seconds. + // it is better to use fuzzy float checks for this, as the float + // ASCII formatting is lossy. + double DurationRealTime() const { + return GetAs< double >("iterations") * GetTime(kRealTime); + } + // get the cpu_time duration of the benchmark in seconds + double DurationCPUTime() const { + return GetAs< double >("iterations") * GetTime(kCpuTime); + } + + // get the string for a result by name, or nullptr if the name + // is not found + const std::string* Get(const char* entry_name) const { + auto it = values.find(entry_name); + if(it == values.end()) return nullptr; + return &it->second; + } + + // get a result by name, parsed as a specific type. + // NOTE: for counters, use GetCounterAs instead. + template <class T> + T GetAs(const char* entry_name) const; + + // counters are written as doubles, so they have to be read first + // as a double, and only then converted to the asked type. + template <class T> + T GetCounterAs(const char* entry_name) const { + double dval = GetAs< double >(entry_name); + T tval = static_cast< T >(dval); + return tval; + } +}; + +template <class T> +T Results::GetAs(const char* entry_name) const { + auto *sv = Get(entry_name); + CHECK(sv != nullptr && !sv->empty()); + std::stringstream ss; + ss << *sv; + T out; + ss >> out; + CHECK(!ss.fail()); + return out; +} + +//---------------------------------- +// Macros to help in result checking. Do not use them with arguments causing +// side-effects. + +#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ + CONCAT(CHECK_, relationship) \ + (entry.getfn< var_type >(var_name), (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" + +// check with tolerance. eps_factor is the tolerance window, which is +// interpreted relative to value (eg, 0.1 means 10% of value). +#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ + CONCAT(CHECK_FLOAT_, relationship) \ + (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "with tolerance of " << (eps_factor) * (value) \ + << " (" << (eps_factor)*100. << "%), " \ + << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \ + << " (" << (((entry).getfn< var_type >(var_name) - (value)) \ + / \ + ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \ + << "%)" + +#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) + +#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) + +#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) + +#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + +// ========================================================================= // // --------------------------- Misc Utilities ------------------------------ // // ========================================================================= // diff --git a/libcxx/utils/google-benchmark/test/output_test_helper.cc b/libcxx/utils/google-benchmark/test/output_test_helper.cc index 54c028a67ba..24746f6d27f 100644 --- a/libcxx/utils/google-benchmark/test/output_test_helper.cc +++ b/libcxx/utils/google-benchmark/test/output_test_helper.cc @@ -2,10 +2,12 @@ #include <map> #include <memory> #include <sstream> +#include <cstring> #include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/re.h" // NOTE: re.h is for internal use only #include "output_test.h" +#include "../src/benchmark_api_internal.h" // ========================================================================= // // ------------------------------ Internals -------------------------------- // @@ -34,17 +36,25 @@ SubMap& GetSubstitutions() { static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, + // human-readable float + {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, {"%time", "[ ]*[0-9]{1,5} ns"}, {"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"}, {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"}, + {"%csv_header", + "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," + "items_per_second,label,error_occurred,error_message"}, {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, {"%csv_bytes_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, {"%csv_items_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"}, + {"%csv_bytes_items_report", + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + + "," + safe_dec_re + ",,,"}, {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, {"%csv_label_report_end", ",,"}}; return map; @@ -140,8 +150,179 @@ class TestReporter : public benchmark::BenchmarkReporter { std::vector<benchmark::BenchmarkReporter *> reporters_; }; } + +} // end namespace internal + +// ========================================================================= // +// -------------------------- Results checking ----------------------------- // +// ========================================================================= // + +namespace internal { + +// Utility class to manage subscribers for checking benchmark results. +// It works by parsing the CSV output to read the results. +class ResultsChecker { + public: + + struct PatternAndFn : public TestCase { // reusing TestCase for its regexes + PatternAndFn(const std::string& rx, ResultsCheckFn fn_) + : TestCase(rx), fn(fn_) {} + ResultsCheckFn fn; + }; + + std::vector< PatternAndFn > check_patterns; + std::vector< Results > results; + std::vector< std::string > field_names; + + void Add(const std::string& entry_pattern, ResultsCheckFn fn); + + void CheckResults(std::stringstream& output); + + private: + + void SetHeader_(const std::string& csv_header); + void SetValues_(const std::string& entry_csv_line); + + std::vector< std::string > SplitCsv_(const std::string& line); + +}; + +// store the static ResultsChecker in a function to prevent initialization +// order problems +ResultsChecker& GetResultsChecker() { + static ResultsChecker rc; + return rc; +} + +// add a results checker for a benchmark +void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { + check_patterns.emplace_back(entry_pattern, fn); +} + +// check the results of all subscribed benchmarks +void ResultsChecker::CheckResults(std::stringstream& output) { + // first reset the stream to the start + { + auto start = std::ios::streampos(0); + // clear before calling tellg() + output.clear(); + // seek to zero only when needed + if(output.tellg() > start) output.seekg(start); + // and just in case + output.clear(); + } + // now go over every line and publish it to the ResultsChecker + std::string line; + bool on_first = true; + while (output.eof() == false) { + CHECK(output.good()); + std::getline(output, line); + if (on_first) { + SetHeader_(line); // this is important + on_first = false; + continue; + } + SetValues_(line); + } + // finally we can call the subscribed check functions + for(const auto& p : check_patterns) { + VLOG(2) << "--------------------------------\n"; + VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; + for(const auto& r : results) { + if(!p.regex->Match(r.name)) { + VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; + continue; + } else { + VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; + } + VLOG(1) << "Checking results of " << r.name << ": ... \n"; + p.fn(r); + VLOG(1) << "Checking results of " << r.name << ": OK.\n"; + } + } +} + +// prepare for the names in this header +void ResultsChecker::SetHeader_(const std::string& csv_header) { + field_names = SplitCsv_(csv_header); +} + +// set the values for a benchmark +void ResultsChecker::SetValues_(const std::string& entry_csv_line) { + if(entry_csv_line.empty()) return; // some lines are empty + CHECK(!field_names.empty()); + auto vals = SplitCsv_(entry_csv_line); + CHECK_EQ(vals.size(), field_names.size()); + results.emplace_back(vals[0]); // vals[0] is the benchmark name + auto &entry = results.back(); + for (size_t i = 1, e = vals.size(); i < e; ++i) { + entry.values[field_names[i]] = vals[i]; + } +} + +// a quick'n'dirty csv splitter (eliminating quotes) +std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) { + std::vector< std::string > out; + if(line.empty()) return out; + if(!field_names.empty()) out.reserve(field_names.size()); + size_t prev = 0, pos = line.find_first_of(','), curr = pos; + while(pos != line.npos) { + CHECK(curr > 0); + if(line[prev] == '"') ++prev; + if(line[curr-1] == '"') --curr; + out.push_back(line.substr(prev, curr-prev)); + prev = pos + 1; + pos = line.find_first_of(',', pos + 1); + curr = pos; + } + curr = line.size(); + if(line[prev] == '"') ++prev; + if(line[curr-1] == '"') --curr; + out.push_back(line.substr(prev, curr-prev)); + return out; +} + } // end namespace internal +size_t AddChecker(const char* bm_name, ResultsCheckFn fn) +{ + auto &rc = internal::GetResultsChecker(); + rc.Add(bm_name, fn); + return rc.results.size(); +} + +int Results::NumThreads() const { + auto pos = name.find("/threads:"); + if(pos == name.npos) return 1; + auto end = name.find('/', pos + 9); + std::stringstream ss; + ss << name.substr(pos + 9, end); + int num = 1; + ss >> num; + CHECK(!ss.fail()); + return num; +} + +double Results::GetTime(BenchmarkTime which) const { + CHECK(which == kCpuTime || which == kRealTime); + const char *which_str = which == kCpuTime ? "cpu_time" : "real_time"; + double val = GetAs< double >(which_str); + auto unit = Get("time_unit"); + CHECK(unit); + if(*unit == "ns") { + return val * 1.e-9; + } else if(*unit == "us") { + return val * 1.e-6; + } else if(*unit == "ms") { + return val * 1.e-3; + } else if(*unit == "s") { + return val; + } else { + CHECK(1 == 0) << "unknown time unit: " << *unit; + return 0; + } +} + // ========================================================================= // // -------------------------- Public API Definitions------------------------ // // ========================================================================= // @@ -186,7 +367,8 @@ int SetSubstitutions( void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); - benchmark::ConsoleReporter CR(benchmark::ConsoleReporter::OO_None); + auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true); + benchmark::ConsoleReporter CR(options); benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; struct ReporterTest { @@ -231,4 +413,11 @@ void RunOutputTests(int argc, char* argv[]) { std::cout << "\n"; } + + // now that we know the output is as expected, we can dispatch + // the checks to subscribees. + auto &csv = TestCases[2]; + // would use == but gcc spits a warning + CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + internal::GetResultsChecker().CheckResults(csv.out_stream); } diff --git a/libcxx/utils/google-benchmark/test/register_benchmark_test.cc b/libcxx/utils/google-benchmark/test/register_benchmark_test.cc index e9f8ea530c1..8ab2c299393 100644 --- a/libcxx/utils/google-benchmark/test/register_benchmark_test.cc +++ b/libcxx/utils/google-benchmark/test/register_benchmark_test.cc @@ -61,7 +61,7 @@ typedef benchmark::internal::Benchmark* ReturnVal; // Test RegisterBenchmark with no additional arguments //----------------------------------------------------------------------------// void BM_function(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_function); @@ -77,7 +77,7 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK void BM_extra_args(benchmark::State& st, const char* label) { - while (st.KeepRunning()) { + for (auto _ : st) { } st.SetLabel(label); } @@ -99,7 +99,7 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); struct CustomFixture { void operator()(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } } }; @@ -114,23 +114,23 @@ void TestRegistrationAtRuntime() { #endif #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK { - int x = 42; + const char* x = "42"; auto capturing_lam = [=](benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } - st.SetLabel(std::to_string(x)); + st.SetLabel(x); }; benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); - AddCases({{"lambda_benchmark", "42"}}); + AddCases({{"lambda_benchmark", x}}); } #endif } -int main(int argc, char* argv[]) { +// Test that all benchmarks, registered at either during static init or runtime, +// are run and the results are passed to the reported. +void RunTestOne() { TestRegistrationAtRuntime(); - benchmark::Initialize(&argc, argv); - TestReporter test_reporter; benchmark::RunSpecifiedBenchmarks(&test_reporter); @@ -143,6 +143,40 @@ int main(int argc, char* argv[]) { ++EB; } assert(EB == ExpectedResults.end()); +} - return 0; +// Test that ClearRegisteredBenchmarks() clears all previously registered +// benchmarks. +// Also test that new benchmarks can be registered and ran afterwards. +void RunTestTwo() { + assert(ExpectedResults.size() != 0 && + "must have at least one registered benchmark"); + ExpectedResults.clear(); + benchmark::ClearRegisteredBenchmarks(); + + TestReporter test_reporter; + size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == 0); + assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); + + TestRegistrationAtRuntime(); + num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == ExpectedResults.size()); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); +} + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + RunTestOne(); + RunTestTwo(); } diff --git a/libcxx/utils/google-benchmark/test/reporter_output_test.cc b/libcxx/utils/google-benchmark/test/reporter_output_test.cc index cb52aec0c08..1620b31396f 100644 --- a/libcxx/utils/google-benchmark/test/reporter_output_test.cc +++ b/libcxx/utils/google-benchmark/test/reporter_output_test.cc @@ -13,16 +13,49 @@ ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {"^[-]+$", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{"name,iterations,real_time,cpu_time,time_unit,bytes_per_second," - "items_per_second,label,error_occurred,error_message"}}); +static int AddContextCases() { + AddCases(TC_ConsoleErr, + { + {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default}, + {"Run on \\(%int X %float MHz CPU s\\)", MR_Next}, + }); + AddCases(TC_JSONOut, {{"^\\{", MR_Default}, + {"\"context\":", MR_Next}, + {"\"date\": \"", MR_Next}, + {"\"num_cpus\": %int,$", MR_Next}, + {"\"mhz_per_cpu\": %float,$", MR_Next}, + {"\"cpu_scaling_enabled\": ", MR_Next}, + {"\"caches\": \\[$", MR_Next}}); + auto const& Caches = benchmark::CPUInfo::Get().caches; + if (!Caches.empty()) { + AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); + } + for (size_t I = 0; I < Caches.size(); ++I) { + std::string num_caches_str = + Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; + AddCases( + TC_ConsoleErr, + {{"L%int (Data|Instruction|Unified) %intK" + num_caches_str, MR_Next}}); + AddCases(TC_JSONOut, {{"\\{$", MR_Next}, + {"\"type\": \"", MR_Next}, + {"\"level\": %int,$", MR_Next}, + {"\"size\": %int,$", MR_Next}, + {"\"num_sharing\": %int$", MR_Next}, + {"}[,]{0,1}$", MR_Next}}); + } + + AddCases(TC_JSONOut, {{"],$"}}); + return 0; +} +int dummy_register = AddContextCases(); +ADD_CASES(TC_CSVOut, {{"%csv_header"}}); // ========================================================================= // // ------------------------ Testing Basic Output --------------------------- // // ========================================================================= // void BM_basic(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_basic); @@ -30,8 +63,8 @@ BENCHMARK(BM_basic); ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); @@ -41,20 +74,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); // ========================================================================= // void BM_bytes_per_second(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetBytesProcessed(1); } BENCHMARK(BM_bytes_per_second); ADD_CASES(TC_ConsoleOut, - {{"^BM_bytes_per_second %console_report +%floatB/s$"}}); + {{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bytes_per_second\": %int$", MR_Next}, + {"\"bytes_per_second\": %float$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); @@ -63,20 +96,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); // ========================================================================= // void BM_items_per_second(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetItemsProcessed(1); } BENCHMARK(BM_items_per_second); ADD_CASES(TC_ConsoleOut, - {{"^BM_items_per_second %console_report +%float items/s$"}}); + {{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"items_per_second\": %int$", MR_Next}, + {"\"items_per_second\": %float$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); @@ -85,7 +118,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); // ========================================================================= // void BM_label(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetLabel("some label"); } @@ -94,8 +127,8 @@ BENCHMARK(BM_label); ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"label\": \"some label\"$", MR_Next}, {"}", MR_Next}}); @@ -108,7 +141,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some " void BM_error(benchmark::State& state) { state.SkipWithError("message"); - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_error); @@ -125,7 +158,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}}); // ========================================================================= // void BM_no_arg_name(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_no_arg_name)->Arg(3); @@ -138,7 +171,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); // ========================================================================= // void BM_arg_name(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); @@ -151,7 +184,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}}); // ========================================================================= // void BM_arg_names(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); @@ -165,7 +198,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetComplexityN(state.range(0)); } @@ -181,30 +214,74 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, // Test that non-aggregate data is printed by default void BM_Repeat(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } +// need two repetitions min to be able to output any aggregate output +BENCHMARK(BM_Repeat)->Repetitions(2); +ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2_mean %console_report$"}, + {"^BM_Repeat/repeats:2_median %console_report$"}, + {"^BM_Repeat/repeats:2_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); +// but for two repetitions, mean and median is the same, so let's repeat.. BENCHMARK(BM_Repeat)->Repetitions(3); ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3_mean %console_report$"}, + {"^BM_Repeat/repeats:3_median %console_report$"}, {"^BM_Repeat/repeats:3_stddev %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3_median\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); +// median differs between even/odd number of repetitions, so just to be sure +BENCHMARK(BM_Repeat)->Repetitions(4); +ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4_mean %console_report$"}, + {"^BM_Repeat/repeats:4_median %console_report$"}, + {"^BM_Repeat/repeats:4_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}}); // Test that a non-repeated test still prints non-aggregate results even when // only-aggregate reports have been requested void BM_RepeatOnce(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); @@ -214,23 +291,26 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); // Test that non-aggregate data is not reported void BM_SummaryRepeat(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^BM_SummaryRepeat/repeats:3_mean %console_report$"}, + {"^BM_SummaryRepeat/repeats:3_median %console_report$"}, {"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}}); ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}}); void BM_RepeatTimeUnit(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_RepeatTimeUnit) @@ -240,18 +320,60 @@ BENCHMARK(BM_RepeatTimeUnit) ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"}, + {"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"}, {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}}); ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, + {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, {"\"time_unit\": \"us\",?$"}}); ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, + {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"}, {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}}); // ========================================================================= // +// -------------------- Testing user-provided statistics ------------------- // +// ========================================================================= // + +const auto UserStatistics = [](const std::vector<double>& v) { + return v.back(); +}; +void BM_UserStats(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_UserStats) + ->Repetitions(3) + ->ComputeStatistics("", UserStatistics); +// check that user-provided stats is calculated, and is after the default-ones +// empty string as name is intentional, it would sort before anything else +ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3_mean %console_report$"}, + {"^BM_UserStats/repeats:3_median %console_report$"}, + {"^BM_UserStats/repeats:3_stddev %console_report$"}, + {"^BM_UserStats/repeats:3_ %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_mean\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_median\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_stddev\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_mean\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_median\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_\",%csv_report$"}}); + +// ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // // ========================================================================= // diff --git a/libcxx/utils/google-benchmark/test/skip_with_error_test.cc b/libcxx/utils/google-benchmark/test/skip_with_error_test.cc index b74d33c5899..0c2f3481a85 100644 --- a/libcxx/utils/google-benchmark/test/skip_with_error_test.cc +++ b/libcxx/utils/google-benchmark/test/skip_with_error_test.cc @@ -70,6 +70,15 @@ void BM_error_before_running(benchmark::State& state) { BENCHMARK(BM_error_before_running); ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); +void BM_error_before_running_range_for(benchmark::State& state) { + state.SkipWithError("error message"); + for (auto _ : state) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_range_for); +ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); + void BM_error_during_running(benchmark::State& state) { int first_iter = true; while (state.KeepRunning()) { @@ -93,8 +102,31 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, {"/2/threads:4", false, ""}, {"/2/threads:8", false, ""}}); +void BM_error_during_running_ranged_for(benchmark::State& state) { + assert(state.max_iterations > 3 && "test requires at least a few iterations"); + int first_iter = true; + // NOTE: Users should not write the for loop explicitly. + for (auto It = state.begin(), End = state.end(); It != End; ++It) { + if (state.range(0) == 1) { + assert(first_iter); + first_iter = false; + state.SkipWithError("error message"); + // Test the unfortunate but documented behavior that the ranged-for loop + // doesn't automatically terminate when SkipWithError is set. + assert(++It != End); + break; // Required behavior + } + } +} +BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); +ADD_CASES("BM_error_during_running_ranged_for", + {{"/1/iterations:5", true, "error message"}, + {"/2/iterations:5", false, ""}}); + + + void BM_error_after_running(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } if (state.thread_index <= (state.threads / 2)) diff --git a/libcxx/utils/google-benchmark/test/statistics_test.cc b/libcxx/utils/google-benchmark/test/statistics_test.cc new file mode 100644 index 00000000000..b4d6abbb578 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/statistics_test.cc @@ -0,0 +1,61 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/statistics.h" +#include "gtest/gtest.h" + +namespace { +TEST(StatisticsTest, Mean) { + std::vector<double> Inputs; + { + Inputs = {42, 42, 42, 42}; + double Res = benchmark::StatisticsMean(Inputs); + EXPECT_DOUBLE_EQ(Res, 42.0); + } + { + Inputs = {1, 2, 3, 4}; + double Res = benchmark::StatisticsMean(Inputs); + EXPECT_DOUBLE_EQ(Res, 2.5); + } + { + Inputs = {1, 2, 5, 10, 10, 14}; + double Res = benchmark::StatisticsMean(Inputs); + EXPECT_DOUBLE_EQ(Res, 7.0); + } +} + +TEST(StatisticsTest, Median) { + std::vector<double> Inputs; + { + Inputs = {42, 42, 42, 42}; + double Res = benchmark::StatisticsMedian(Inputs); + EXPECT_DOUBLE_EQ(Res, 42.0); + } + { + Inputs = {1, 2, 3, 4}; + double Res = benchmark::StatisticsMedian(Inputs); + EXPECT_DOUBLE_EQ(Res, 2.5); + } + { + Inputs = {1, 2, 5, 10, 10}; + double Res = benchmark::StatisticsMedian(Inputs); + EXPECT_DOUBLE_EQ(Res, 5.0); + } +} + +TEST(StatisticsTest, StdDev) { + std::vector<double> Inputs; + { + Inputs = {101, 101, 101, 101}; + double Res = benchmark::StatisticsStdDev(Inputs); + EXPECT_DOUBLE_EQ(Res, 0.0); + } + { + Inputs = {1, 2, 3}; + double Res = benchmark::StatisticsStdDev(Inputs); + EXPECT_DOUBLE_EQ(Res, 1.0); + } +} + +} // end namespace diff --git a/libcxx/utils/google-benchmark/test/templated_fixture_test.cc b/libcxx/utils/google-benchmark/test/templated_fixture_test.cc new file mode 100644 index 00000000000..ec5b4c0cc07 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/templated_fixture_test.cc @@ -0,0 +1,28 @@ + +#include "benchmark/benchmark.h" + +#include <cassert> +#include <memory> + +template<typename T> +class MyFixture : public ::benchmark::Fixture { +public: + MyFixture() : data(0) {} + + T data; +}; + +BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) { + for (auto _ : st) { + data += 1; + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { + for (auto _ : st) { + data += 1.0; + } +} +BENCHMARK_REGISTER_F(MyFixture, Bar); + +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc b/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc new file mode 100644 index 00000000000..9b8a6132e6d --- /dev/null +++ b/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc @@ -0,0 +1,250 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// @todo: <jpmag> this checks the full output at once; the rule for +// CounterSet1 was failing because it was not matching "^[-]+$". +// @todo: <jpmag> check that the counters are vertically aligned. +ADD_CASES(TC_ConsoleOut, { +// keeping these lines long improves readability, so: +// clang-format off + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, +// clang-format on +}); +ADD_CASES(TC_CSVOut, {{"%csv_header," + "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +void BM_Counters_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", { 1, bm::Counter::kAvgThreads}}, + {"Bar", { 2, bm::Counter::kAvgThreads}}, + {"Baz", { 4, bm::Counter::kAvgThreads}}, + {"Bat", { 8, bm::Counter::kAvgThreads}}, + {"Frob", {16, bm::Counter::kAvgThreads}}, + {"Lob", {32, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabular(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8); + CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); + CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); + +// ========================================================================= // +// -------------------- Tabular+Rate Counters Output ----------------------- // +// ========================================================================= // + +void BM_CounterRates_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", { 1, bm::Counter::kAvgThreadsRate}}, + {"Bar", { 2, bm::Counter::kAvgThreadsRate}}, + {"Baz", { 4, bm::Counter::kAvgThreadsRate}}, + {"Bat", { 8, bm::Counter::kAvgThreadsRate}}, + {"Frob", {16, bm::Counter::kAvgThreadsRate}}, + {"Lob", {32, bm::Counter::kAvgThreadsRate}}, + }); +} +BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabularRate(Results const& e) { + double t = e.DurationCPUTime(); + CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32./t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", + &CheckTabularRate); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters +void BM_CounterSet0_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bar", {20, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet0(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); + +// again. +void BM_CounterSet1_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {15, bm::Counter::kAvgThreads}}, + {"Bar", {25, bm::Counter::kAvgThreads}}, + {"Baz", {45, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet1(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters, different set now. +void BM_CounterSet2_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bat", {30, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," + ",%float,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet2(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/libcxx/utils/google-benchmark/test/user_counters_test.cc b/libcxx/utils/google-benchmark/test/user_counters_test.cc new file mode 100644 index 00000000000..06aafb1fa14 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/user_counters_test.cc @@ -0,0 +1,217 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ---------------------- Testing Prologue Output -------------------------- // +// ========================================================================= // + +ADD_CASES(TC_ConsoleOut, + {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, + {"^[-]+$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); + +// ========================================================================= // +// ------------------------- Simple Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Simple(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2 * (double)state.iterations(); +} +BENCHMARK(BM_Counters_Simple); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSimple(Results const& e) { + double its = e.GetAs< double >("iterations"); + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + // check that the value of bar is within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2.*its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); + +// ========================================================================= // +// --------------------- Counters+Items+Bytes/s Output --------------------- // +// ========================================================================= // + +namespace { int num_calls1 = 0; } +void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = ++num_calls1; + state.SetBytesProcessed(364); + state.SetItemsProcessed(150); +} +BENCHMARK(BM_Counters_WithBytesAndItemsPSec); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_WithBytesAndItemsPSec %console_report " + "bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bytes_per_second\": %float,$", MR_Next}, + {"\"items_per_second\": %float,$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," + "%csv_bytes_items_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckBytesAndItemsPSec(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364./t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150./t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", + &CheckBytesAndItemsPSec); + +// ========================================================================= // +// ------------------------- Rate Counters Output -------------------------- // +// ========================================================================= // + +void BM_Counters_Rate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; +} +BENCHMARK(BM_Counters_Rate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckRate(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); + +// ========================================================================= // +// ------------------------- Thread Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Threads(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2; +} +BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreads(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; +} +BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", + &CheckAvgThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreadsRate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; +} +BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreadsRate(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./e.DurationCPUTime(), 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", + &CheckAvgThreadsRate); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } |