xref: /third_party/benchmark/test/basic_test.cc (revision a8c51b3f)
1
2#include "benchmark/benchmark.h"
3
4#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
5
6void BM_empty(benchmark::State& state) {
7  for (auto _ : state) {
8    auto iterations = state.iterations();
9    benchmark::DoNotOptimize(iterations);
10  }
11}
12BENCHMARK(BM_empty);
13BENCHMARK(BM_empty)->ThreadPerCpu();
14
15void BM_spin_empty(benchmark::State& state) {
16  for (auto _ : state) {
17    for (auto x = 0; x < state.range(0); ++x) {
18      benchmark::DoNotOptimize(x);
19    }
20  }
21}
22BASIC_BENCHMARK_TEST(BM_spin_empty);
23BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
24
25void BM_spin_pause_before(benchmark::State& state) {
26  for (auto i = 0; i < state.range(0); ++i) {
27    benchmark::DoNotOptimize(i);
28  }
29  for (auto _ : state) {
30    for (auto i = 0; i < state.range(0); ++i) {
31      benchmark::DoNotOptimize(i);
32    }
33  }
34}
35BASIC_BENCHMARK_TEST(BM_spin_pause_before);
36BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
37
38void BM_spin_pause_during(benchmark::State& state) {
39  for (auto _ : state) {
40    state.PauseTiming();
41    for (auto i = 0; i < state.range(0); ++i) {
42      benchmark::DoNotOptimize(i);
43    }
44    state.ResumeTiming();
45    for (auto i = 0; i < state.range(0); ++i) {
46      benchmark::DoNotOptimize(i);
47    }
48  }
49}
50BASIC_BENCHMARK_TEST(BM_spin_pause_during);
51BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
52
53void BM_pause_during(benchmark::State& state) {
54  for (auto _ : state) {
55    state.PauseTiming();
56    state.ResumeTiming();
57  }
58}
59BENCHMARK(BM_pause_during);
60BENCHMARK(BM_pause_during)->ThreadPerCpu();
61BENCHMARK(BM_pause_during)->UseRealTime();
62BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
63
64void BM_spin_pause_after(benchmark::State& state) {
65  for (auto _ : state) {
66    for (auto i = 0; i < state.range(0); ++i) {
67      benchmark::DoNotOptimize(i);
68    }
69  }
70  for (auto i = 0; i < state.range(0); ++i) {
71    benchmark::DoNotOptimize(i);
72  }
73}
74BASIC_BENCHMARK_TEST(BM_spin_pause_after);
75BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
76
77void BM_spin_pause_before_and_after(benchmark::State& state) {
78  for (auto i = 0; i < state.range(0); ++i) {
79    benchmark::DoNotOptimize(i);
80  }
81  for (auto _ : state) {
82    for (auto i = 0; i < state.range(0); ++i) {
83      benchmark::DoNotOptimize(i);
84    }
85  }
86  for (auto i = 0; i < state.range(0); ++i) {
87    benchmark::DoNotOptimize(i);
88  }
89}
90BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
91BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
92
93void BM_empty_stop_start(benchmark::State& state) {
94  for (auto _ : state) {
95  }
96}
97BENCHMARK(BM_empty_stop_start);
98BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
99
100void BM_KeepRunning(benchmark::State& state) {
101  benchmark::IterationCount iter_count = 0;
102  assert(iter_count == state.iterations());
103  while (state.KeepRunning()) {
104    ++iter_count;
105  }
106  assert(iter_count == state.iterations());
107}
108BENCHMARK(BM_KeepRunning);
109
110void BM_KeepRunningBatch(benchmark::State& state) {
111  // Choose a batch size >1000 to skip the typical runs with iteration
112  // targets of 10, 100 and 1000.  If these are not actually skipped the
113  // bug would be detectable as consecutive runs with the same iteration
114  // count.  Below we assert that this does not happen.
115  const benchmark::IterationCount batch_size = 1009;
116
117  static benchmark::IterationCount prior_iter_count = 0;
118  benchmark::IterationCount iter_count = 0;
119  while (state.KeepRunningBatch(batch_size)) {
120    iter_count += batch_size;
121  }
122  assert(state.iterations() == iter_count);
123
124  // Verify that the iteration count always increases across runs (see
125  // comment above).
126  assert(iter_count == batch_size            // max_iterations == 1
127         || iter_count > prior_iter_count);  // max_iterations > batch_size
128  prior_iter_count = iter_count;
129}
130// Register with a fixed repetition count to establish the invariant that
131// the iteration count should always change across runs.  This overrides
132// the --benchmark_repetitions command line flag, which would otherwise
133// cause this test to fail if set > 1.
134BENCHMARK(BM_KeepRunningBatch)->Repetitions(1);
135
136void BM_RangedFor(benchmark::State& state) {
137  benchmark::IterationCount iter_count = 0;
138  for (auto _ : state) {
139    ++iter_count;
140  }
141  assert(iter_count == state.max_iterations);
142}
143BENCHMARK(BM_RangedFor);
144
145#ifdef BENCHMARK_HAS_CXX11
146template <typename T>
147void BM_OneTemplateFunc(benchmark::State& state) {
148  auto arg = state.range(0);
149  T sum = 0;
150  for (auto _ : state) {
151    sum += static_cast<T>(arg);
152  }
153}
154BENCHMARK(BM_OneTemplateFunc<int>)->Arg(1);
155BENCHMARK(BM_OneTemplateFunc<double>)->Arg(1);
156
157template <typename A, typename B>
158void BM_TwoTemplateFunc(benchmark::State& state) {
159  auto arg = state.range(0);
160  A sum = 0;
161  B prod = 1;
162  for (auto _ : state) {
163    sum += static_cast<A>(arg);
164    prod *= static_cast<B>(arg);
165  }
166}
167BENCHMARK(BM_TwoTemplateFunc<int, double>)->Arg(1);
168BENCHMARK(BM_TwoTemplateFunc<double, int>)->Arg(1);
169
170#endif  // BENCHMARK_HAS_CXX11
171
172// Ensure that StateIterator provides all the necessary typedefs required to
173// instantiate std::iterator_traits.
174static_assert(
175    std::is_same<typename std::iterator_traits<
176                     benchmark::State::StateIterator>::value_type,
177                 typename benchmark::State::StateIterator::value_type>::value,
178    "");
179
180BENCHMARK_MAIN();
181