1 // Copyright 2015 Google Inc. All rights reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 #include "benchmark/benchmark.h"
17 #include "benchmark_api_internal.h"
18 #include "benchmark_runner.h"
19 #include "internal_macros.h"
21 #ifndef BENCHMARK_OS_WINDOWS
22 #if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
23 #include <sys/resource.h>
31 #include <condition_variable>
45 #include "colorprint.h"
46 #include "commandlineflags.h"
47 #include "complexity.h"
49 #include "internal_macros.h"
52 #include "perf_counters.h"
54 #include "statistics.h"
55 #include "string_util.h"
56 #include "thread_manager.h"
57 #include "thread_timer.h"
60 // Print a list of benchmarks. This option overrides all other options.
61 BM_DEFINE_bool(benchmark_list_tests
, false);
63 // A regular expression that specifies the set of benchmarks to execute. If
64 // this flag is empty, or if this flag is the string \"all\", all benchmarks
65 // linked into the binary are run.
66 BM_DEFINE_string(benchmark_filter
, "");
68 // Specification of how long to run the benchmark.
70 // It can be either an exact number of iterations (specified as `<integer>x`),
71 // or a minimum number of seconds (specified as `<float>s`). If the latter
72 // format (ie., min seconds) is used, the system may run the benchmark longer
73 // until the results are considered significant.
75 // For backward compatibility, the `s` suffix may be omitted, in which case,
76 // the specified number is interpreted as the number of seconds.
78 // For cpu-time based tests, this is the lower bound
79 // on the total cpu time used by all threads that make up the test. For
80 // real-time based tests, this is the lower bound on the elapsed time of the
81 // benchmark execution, regardless of number of threads.
82 BM_DEFINE_string(benchmark_min_time
, kDefaultMinTimeStr
);
84 // Minimum number of seconds a benchmark should be run before results should be
85 // taken into account. This e.g can be necessary for benchmarks of code which
86 // needs to fill some form of cache before performance is of interest.
87 // Note: results gathered within this period are discarded and not used for
89 BM_DEFINE_double(benchmark_min_warmup_time
, 0.0);
91 // The number of runs of each benchmark. If greater than 1, the mean and
92 // standard deviation of the runs will be reported.
93 BM_DEFINE_int32(benchmark_repetitions
, 1);
95 // If set, enable random interleaving of repetitions of all benchmarks.
96 // See http://github.com/google/benchmark/issues/1051 for details.
97 BM_DEFINE_bool(benchmark_enable_random_interleaving
, false);
99 // Report the result of each benchmark repetitions. When 'true' is specified
100 // only the mean, standard deviation, and other statistics are reported for
101 // repeated benchmarks. Affects all reporters.
102 BM_DEFINE_bool(benchmark_report_aggregates_only
, false);
104 // Display the result of each benchmark repetitions. When 'true' is specified
105 // only the mean, standard deviation, and other statistics are displayed for
106 // repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
107 // the display reporter, but *NOT* file reporter, which will still contain
109 BM_DEFINE_bool(benchmark_display_aggregates_only
, false);
111 // The format to use for console output.
112 // Valid values are 'console', 'json', or 'csv'.
113 BM_DEFINE_string(benchmark_format
, "console");
115 // The format to use for file output.
116 // Valid values are 'console', 'json', or 'csv'.
117 BM_DEFINE_string(benchmark_out_format
, "json");
119 // The file to write additional output to.
120 BM_DEFINE_string(benchmark_out
, "");
122 // Whether to use colors in the output. Valid values:
123 // 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
124 // the output is being sent to a terminal and the TERM environment variable is
125 // set to a terminal type that supports colors.
126 BM_DEFINE_string(benchmark_color
, "auto");
128 // Whether to use tabular format when printing user counters to the console.
129 // Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
130 BM_DEFINE_bool(benchmark_counters_tabular
, false);
132 // List of additional perf counters to collect, in libpfm format. For more
133 // information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
134 BM_DEFINE_string(benchmark_perf_counters
, "");
136 // Extra context to include in the output formatted as comma-separated key-value
137 // pairs. Kept internal as it's only used for parsing from env/command line.
138 BM_DEFINE_kvpairs(benchmark_context
, {});
140 // Set the default time unit to use for reports
141 // Valid values are 'ns', 'us', 'ms' or 's'
142 BM_DEFINE_string(benchmark_time_unit
, "");
144 // The level of verbose logging to output
145 BM_DEFINE_int32(v
, 0);
149 std::map
<std::string
, std::string
>* global_context
= nullptr;
151 BENCHMARK_EXPORT
std::map
<std::string
, std::string
>*& GetGlobalContext() {
152 return global_context
;
155 // FIXME: wouldn't LTO mess this up?
156 void UseCharPointer(char const volatile*) {}
158 } // namespace internal
160 State::State(std::string name
, IterationCount max_iters
,
161 const std::vector
<int64_t>& ranges
, int thread_i
, int n_threads
,
162 internal::ThreadTimer
* timer
, internal::ThreadManager
* manager
,
163 internal::PerfCountersMeasurement
* perf_counters_measurement
)
164 : total_iterations_(0),
166 max_iterations(max_iters
),
169 skipped_(internal::NotSkipped
),
172 name_(std::move(name
)),
173 thread_index_(thread_i
),
177 perf_counters_measurement_(perf_counters_measurement
) {
178 BM_CHECK(max_iterations
!= 0) << "At least one iteration must be run";
179 BM_CHECK_LT(thread_index_
, threads_
)
180 << "thread_index must be less than threads";
182 // Add counters with correct flag now. If added with `counters[name]` in
183 // `PauseTiming`, a new `Counter` will be inserted the first time, which
184 // won't have the flag. Inserting them now also reduces the allocations
185 // during the benchmark.
186 if (perf_counters_measurement_
) {
187 for (const std::string
& counter_name
:
188 perf_counters_measurement_
->names()) {
189 counters
[counter_name
] = Counter(0.0, Counter::kAvgIterations
);
193 // Note: The use of offsetof below is technically undefined until C++17
194 // because State is not a standard layout type. However, all compilers
195 // currently provide well-defined behavior as an extension (which is
196 // demonstrated since constexpr evaluation must diagnose all undefined
197 // behavior). However, GCC and Clang also warn about this use of offsetof,
198 // which must be suppressed.
199 #if defined(__INTEL_COMPILER)
201 #pragma warning(disable : 1875)
202 #elif defined(__GNUC__)
203 #pragma GCC diagnostic push
204 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
205 #elif defined(__clang__)
206 #pragma clang diagnostic push
207 #pragma clang diagnostic ignored "-Winvalid-offsetof"
209 #if defined(__NVCC__)
210 #pragma nv_diagnostic push
211 #pragma nv_diag_suppress 1427
213 #if defined(__NVCOMPILER)
214 #pragma diagnostic push
215 #pragma diag_suppress offset_in_non_POD_nonstandard
217 // Offset tests to ensure commonly accessed data is on the first cache line.
218 const int cache_line_size
= 64;
220 offsetof(State
, skipped_
) <= (cache_line_size
- sizeof(skipped_
)), "");
221 #if defined(__INTEL_COMPILER)
223 #elif defined(__GNUC__)
224 #pragma GCC diagnostic pop
225 #elif defined(__clang__)
226 #pragma clang diagnostic pop
228 #if defined(__NVCC__)
229 #pragma nv_diagnostic pop
231 #if defined(__NVCOMPILER)
232 #pragma diagnostic pop
236 void State::PauseTiming() {
237 // Add in time accumulated so far
238 BM_CHECK(started_
&& !finished_
&& !skipped());
240 if (perf_counters_measurement_
) {
241 std::vector
<std::pair
<std::string
, double>> measurements
;
242 if (!perf_counters_measurement_
->Stop(measurements
)) {
243 BM_CHECK(false) << "Perf counters read the value failed.";
245 for (const auto& name_and_measurement
: measurements
) {
246 const std::string
& name
= name_and_measurement
.first
;
247 const double measurement
= name_and_measurement
.second
;
248 // Counter was inserted with `kAvgIterations` flag by the constructor.
249 assert(counters
.find(name
) != counters
.end());
250 counters
[name
].value
+= measurement
;
255 void State::ResumeTiming() {
256 BM_CHECK(started_
&& !finished_
&& !skipped());
257 timer_
->StartTimer();
258 if (perf_counters_measurement_
) {
259 perf_counters_measurement_
->Start();
263 void State::SkipWithMessage(const std::string
& msg
) {
264 skipped_
= internal::SkippedWithMessage
;
266 MutexLock
l(manager_
->GetBenchmarkMutex());
267 if (internal::NotSkipped
== manager_
->results
.skipped_
) {
268 manager_
->results
.skip_message_
= msg
;
269 manager_
->results
.skipped_
= skipped_
;
272 total_iterations_
= 0;
273 if (timer_
->running()) timer_
->StopTimer();
276 void State::SkipWithError(const std::string
& msg
) {
277 skipped_
= internal::SkippedWithError
;
279 MutexLock
l(manager_
->GetBenchmarkMutex());
280 if (internal::NotSkipped
== manager_
->results
.skipped_
) {
281 manager_
->results
.skip_message_
= msg
;
282 manager_
->results
.skipped_
= skipped_
;
285 total_iterations_
= 0;
286 if (timer_
->running()) timer_
->StopTimer();
289 void State::SetIterationTime(double seconds
) {
290 timer_
->SetIterationTime(seconds
);
293 void State::SetLabel(const std::string
& label
) {
294 MutexLock
l(manager_
->GetBenchmarkMutex());
295 manager_
->results
.report_label_
= label
;
298 void State::StartKeepRunning() {
299 BM_CHECK(!started_
&& !finished_
);
301 total_iterations_
= skipped() ? 0 : max_iterations
;
302 manager_
->StartStopBarrier();
303 if (!skipped()) ResumeTiming();
306 void State::FinishKeepRunning() {
307 BM_CHECK(started_
&& (!finished_
|| skipped()));
311 // Total iterations has now wrapped around past 0. Fix this.
312 total_iterations_
= 0;
314 manager_
->StartStopBarrier();
320 // Flushes streams after invoking reporter methods that write to them. This
321 // ensures users get timely updates even when streams are not line-buffered.
322 void FlushStreams(BenchmarkReporter
* reporter
) {
323 if (!reporter
) return;
324 std::flush(reporter
->GetOutputStream());
325 std::flush(reporter
->GetErrorStream());
328 // Reports in both display and file reporters.
329 void Report(BenchmarkReporter
* display_reporter
,
330 BenchmarkReporter
* file_reporter
, const RunResults
& run_results
) {
331 auto report_one
= [](BenchmarkReporter
* reporter
, bool aggregates_only
,
332 const RunResults
& results
) {
334 // If there are no aggregates, do output non-aggregates.
335 aggregates_only
&= !results
.aggregates_only
.empty();
336 if (!aggregates_only
) reporter
->ReportRuns(results
.non_aggregates
);
337 if (!results
.aggregates_only
.empty())
338 reporter
->ReportRuns(results
.aggregates_only
);
341 report_one(display_reporter
, run_results
.display_report_aggregates_only
,
344 report_one(file_reporter
, run_results
.file_report_aggregates_only
,
347 FlushStreams(display_reporter
);
348 FlushStreams(file_reporter
);
351 void RunBenchmarks(const std::vector
<BenchmarkInstance
>& benchmarks
,
352 BenchmarkReporter
* display_reporter
,
353 BenchmarkReporter
* file_reporter
) {
354 // Note the file_reporter can be null.
355 BM_CHECK(display_reporter
!= nullptr);
357 // Determine the width of the name field using a minimum width of 10.
358 bool might_have_aggregates
= FLAGS_benchmark_repetitions
> 1;
359 size_t name_field_width
= 10;
360 size_t stat_field_width
= 0;
361 for (const BenchmarkInstance
& benchmark
: benchmarks
) {
363 std::max
<size_t>(name_field_width
, benchmark
.name().str().size());
364 might_have_aggregates
|= benchmark
.repetitions() > 1;
366 for (const auto& Stat
: benchmark
.statistics())
367 stat_field_width
= std::max
<size_t>(stat_field_width
, Stat
.name_
.size());
369 if (might_have_aggregates
) name_field_width
+= 1 + stat_field_width
;
372 BenchmarkReporter::Context context
;
373 context
.name_field_width
= name_field_width
;
375 // Keep track of running times of all instances of each benchmark family.
376 std::map
<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports
>
379 if (display_reporter
->ReportContext(context
) &&
380 (!file_reporter
|| file_reporter
->ReportContext(context
))) {
381 FlushStreams(display_reporter
);
382 FlushStreams(file_reporter
);
384 size_t num_repetitions_total
= 0;
386 // This perfcounters object needs to be created before the runners vector
387 // below so it outlasts their lifetime.
388 PerfCountersMeasurement
perfcounters(
389 StrSplit(FLAGS_benchmark_perf_counters
, ','));
391 // Vector of benchmarks to run
392 std::vector
<internal::BenchmarkRunner
> runners
;
393 runners
.reserve(benchmarks
.size());
395 // Count the number of benchmarks with threads to warn the user in case
396 // performance counters are used.
397 int benchmarks_with_threads
= 0;
399 // Loop through all benchmarks
400 for (const BenchmarkInstance
& benchmark
: benchmarks
) {
401 BenchmarkReporter::PerFamilyRunReports
* reports_for_family
= nullptr;
402 if (benchmark
.complexity() != oNone
)
403 reports_for_family
= &per_family_reports
[benchmark
.family_index()];
404 benchmarks_with_threads
+= (benchmark
.threads() > 1);
405 runners
.emplace_back(benchmark
, &perfcounters
, reports_for_family
);
406 int num_repeats_of_this_instance
= runners
.back().GetNumRepeats();
407 num_repetitions_total
+= num_repeats_of_this_instance
;
408 if (reports_for_family
)
409 reports_for_family
->num_runs_total
+= num_repeats_of_this_instance
;
411 assert(runners
.size() == benchmarks
.size() && "Unexpected runner count.");
413 // The use of performance counters with threads would be unintuitive for
414 // the average user so we need to warn them about this case
415 if ((benchmarks_with_threads
> 0) && (perfcounters
.num_counters() > 0)) {
416 GetErrorLogInstance()
417 << "***WARNING*** There are " << benchmarks_with_threads
418 << " benchmarks with threads and " << perfcounters
.num_counters()
419 << " performance counters were requested. Beware counters will "
420 "reflect the combined usage across all "
424 std::vector
<size_t> repetition_indices
;
425 repetition_indices
.reserve(num_repetitions_total
);
426 for (size_t runner_index
= 0, num_runners
= runners
.size();
427 runner_index
!= num_runners
; ++runner_index
) {
428 const internal::BenchmarkRunner
& runner
= runners
[runner_index
];
429 std::fill_n(std::back_inserter(repetition_indices
),
430 runner
.GetNumRepeats(), runner_index
);
432 assert(repetition_indices
.size() == num_repetitions_total
&&
433 "Unexpected number of repetition indexes.");
435 if (FLAGS_benchmark_enable_random_interleaving
) {
436 std::random_device rd
;
437 std::mt19937
g(rd());
438 std::shuffle(repetition_indices
.begin(), repetition_indices
.end(), g
);
441 for (size_t repetition_index
: repetition_indices
) {
442 internal::BenchmarkRunner
& runner
= runners
[repetition_index
];
443 runner
.DoOneRepetition();
444 if (runner
.HasRepeatsRemaining()) continue;
445 // FIXME: report each repetition separately, not all of them in bulk.
447 display_reporter
->ReportRunsConfig(
448 runner
.GetMinTime(), runner
.HasExplicitIters(), runner
.GetIters());
450 file_reporter
->ReportRunsConfig(
451 runner
.GetMinTime(), runner
.HasExplicitIters(), runner
.GetIters());
453 RunResults run_results
= runner
.GetResults();
455 // Maybe calculate complexity report
456 if (const auto* reports_for_family
= runner
.GetReportsForFamily()) {
457 if (reports_for_family
->num_runs_done
==
458 reports_for_family
->num_runs_total
) {
459 auto additional_run_stats
= ComputeBigO(reports_for_family
->Runs
);
460 run_results
.aggregates_only
.insert(run_results
.aggregates_only
.end(),
461 additional_run_stats
.begin(),
462 additional_run_stats
.end());
463 per_family_reports
.erase(
464 static_cast<int>(reports_for_family
->Runs
.front().family_index
));
468 Report(display_reporter
, file_reporter
, run_results
);
471 display_reporter
->Finalize();
472 if (file_reporter
) file_reporter
->Finalize();
473 FlushStreams(display_reporter
);
474 FlushStreams(file_reporter
);
477 // Disable deprecated warnings temporarily because we need to reference
478 // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
479 BENCHMARK_DISABLE_DEPRECATED_WARNING
481 std::unique_ptr
<BenchmarkReporter
> CreateReporter(
482 std::string
const& name
, ConsoleReporter::OutputOptions output_opts
) {
483 typedef std::unique_ptr
<BenchmarkReporter
> PtrType
;
484 if (name
== "console") {
485 return PtrType(new ConsoleReporter(output_opts
));
487 if (name
== "json") {
488 return PtrType(new JSONReporter());
491 return PtrType(new CSVReporter());
493 std::cerr
<< "Unexpected format: '" << name
<< "'\n";
497 BENCHMARK_RESTORE_DEPRECATED_WARNING
501 bool IsZero(double n
) {
502 return std::abs(n
) < std::numeric_limits
<double>::epsilon();
505 ConsoleReporter::OutputOptions
GetOutputOptions(bool force_no_color
) {
506 int output_opts
= ConsoleReporter::OO_Defaults
;
507 auto is_benchmark_color
= [force_no_color
]() -> bool {
508 if (force_no_color
) {
511 if (FLAGS_benchmark_color
== "auto") {
512 return IsColorTerminal();
514 return IsTruthyFlagValue(FLAGS_benchmark_color
);
516 if (is_benchmark_color()) {
517 output_opts
|= ConsoleReporter::OO_Color
;
519 output_opts
&= ~ConsoleReporter::OO_Color
;
521 if (FLAGS_benchmark_counters_tabular
) {
522 output_opts
|= ConsoleReporter::OO_Tabular
;
524 output_opts
&= ~ConsoleReporter::OO_Tabular
;
526 return static_cast<ConsoleReporter::OutputOptions
>(output_opts
);
529 } // end namespace internal
531 BenchmarkReporter
* CreateDefaultDisplayReporter() {
532 static auto default_display_reporter
=
533 internal::CreateReporter(FLAGS_benchmark_format
,
534 internal::GetOutputOptions())
536 return default_display_reporter
;
539 size_t RunSpecifiedBenchmarks() {
540 return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter
);
543 size_t RunSpecifiedBenchmarks(std::string spec
) {
544 return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec
));
547 size_t RunSpecifiedBenchmarks(BenchmarkReporter
* display_reporter
) {
548 return RunSpecifiedBenchmarks(display_reporter
, nullptr,
549 FLAGS_benchmark_filter
);
552 size_t RunSpecifiedBenchmarks(BenchmarkReporter
* display_reporter
,
554 return RunSpecifiedBenchmarks(display_reporter
, nullptr, std::move(spec
));
557 size_t RunSpecifiedBenchmarks(BenchmarkReporter
* display_reporter
,
558 BenchmarkReporter
* file_reporter
) {
559 return RunSpecifiedBenchmarks(display_reporter
, file_reporter
,
560 FLAGS_benchmark_filter
);
563 size_t RunSpecifiedBenchmarks(BenchmarkReporter
* display_reporter
,
564 BenchmarkReporter
* file_reporter
,
566 if (spec
.empty() || spec
== "all")
567 spec
= "."; // Regexp that matches all benchmarks
569 // Setup the reporters
570 std::ofstream output_file
;
571 std::unique_ptr
<BenchmarkReporter
> default_display_reporter
;
572 std::unique_ptr
<BenchmarkReporter
> default_file_reporter
;
573 if (!display_reporter
) {
574 default_display_reporter
.reset(CreateDefaultDisplayReporter());
575 display_reporter
= default_display_reporter
.get();
577 auto& Out
= display_reporter
->GetOutputStream();
578 auto& Err
= display_reporter
->GetErrorStream();
580 std::string
const& fname
= FLAGS_benchmark_out
;
581 if (fname
.empty() && file_reporter
) {
582 Err
<< "A custom file reporter was provided but "
583 "--benchmark_out=<file> was not specified."
589 if (!fname
.empty()) {
590 output_file
.open(fname
);
591 if (!output_file
.is_open()) {
592 Err
<< "invalid file name: '" << fname
<< "'" << std::endl
;
597 if (!file_reporter
) {
598 default_file_reporter
= internal::CreateReporter(
599 FLAGS_benchmark_out_format
, FLAGS_benchmark_counters_tabular
600 ? ConsoleReporter::OO_Tabular
601 : ConsoleReporter::OO_None
);
602 file_reporter
= default_file_reporter
.get();
604 file_reporter
->SetOutputStream(&output_file
);
605 file_reporter
->SetErrorStream(&output_file
);
608 std::vector
<internal::BenchmarkInstance
> benchmarks
;
609 if (!FindBenchmarksInternal(spec
, &benchmarks
, &Err
)) {
615 if (benchmarks
.empty()) {
616 Err
<< "Failed to match any benchmarks against regex: " << spec
<< "\n";
622 if (FLAGS_benchmark_list_tests
) {
623 for (auto const& benchmark
: benchmarks
)
624 Out
<< benchmark
.name().str() << "\n";
626 internal::RunBenchmarks(benchmarks
, display_reporter
, file_reporter
);
631 return benchmarks
.size();
635 // stores the time unit benchmarks use by default
636 TimeUnit default_time_unit
= kNanosecond
;
639 TimeUnit
GetDefaultTimeUnit() { return default_time_unit
; }
641 void SetDefaultTimeUnit(TimeUnit unit
) { default_time_unit
= unit
; }
643 std::string
GetBenchmarkFilter() { return FLAGS_benchmark_filter
; }
645 void SetBenchmarkFilter(std::string value
) {
646 FLAGS_benchmark_filter
= std::move(value
);
649 int32_t GetBenchmarkVerbosity() { return FLAGS_v
; }
651 void RegisterMemoryManager(MemoryManager
* manager
) {
652 internal::memory_manager
= manager
;
655 void AddCustomContext(const std::string
& key
, const std::string
& value
) {
656 if (internal::global_context
== nullptr) {
657 internal::global_context
= new std::map
<std::string
, std::string
>();
659 if (!internal::global_context
->emplace(key
, value
).second
) {
660 std::cerr
<< "Failed to add custom context \"" << key
<< "\" as it already "
661 << "exists with value \"" << value
<< "\"\n";
667 void (*HelperPrintf
)();
669 void PrintUsageAndExit() {
674 void SetDefaultTimeUnitFromFlag(const std::string
& time_unit_flag
) {
675 if (time_unit_flag
== "s") {
676 return SetDefaultTimeUnit(kSecond
);
678 if (time_unit_flag
== "ms") {
679 return SetDefaultTimeUnit(kMillisecond
);
681 if (time_unit_flag
== "us") {
682 return SetDefaultTimeUnit(kMicrosecond
);
684 if (time_unit_flag
== "ns") {
685 return SetDefaultTimeUnit(kNanosecond
);
687 if (!time_unit_flag
.empty()) {
692 void ParseCommandLineFlags(int* argc
, char** argv
) {
693 using namespace benchmark
;
694 BenchmarkReporter::Context::executable_name
=
695 (argc
&& *argc
> 0) ? argv
[0] : "unknown";
696 for (int i
= 1; argc
&& i
< *argc
; ++i
) {
697 if (ParseBoolFlag(argv
[i
], "benchmark_list_tests",
698 &FLAGS_benchmark_list_tests
) ||
699 ParseStringFlag(argv
[i
], "benchmark_filter", &FLAGS_benchmark_filter
) ||
700 ParseStringFlag(argv
[i
], "benchmark_min_time",
701 &FLAGS_benchmark_min_time
) ||
702 ParseDoubleFlag(argv
[i
], "benchmark_min_warmup_time",
703 &FLAGS_benchmark_min_warmup_time
) ||
704 ParseInt32Flag(argv
[i
], "benchmark_repetitions",
705 &FLAGS_benchmark_repetitions
) ||
706 ParseBoolFlag(argv
[i
], "benchmark_enable_random_interleaving",
707 &FLAGS_benchmark_enable_random_interleaving
) ||
708 ParseBoolFlag(argv
[i
], "benchmark_report_aggregates_only",
709 &FLAGS_benchmark_report_aggregates_only
) ||
710 ParseBoolFlag(argv
[i
], "benchmark_display_aggregates_only",
711 &FLAGS_benchmark_display_aggregates_only
) ||
712 ParseStringFlag(argv
[i
], "benchmark_format", &FLAGS_benchmark_format
) ||
713 ParseStringFlag(argv
[i
], "benchmark_out", &FLAGS_benchmark_out
) ||
714 ParseStringFlag(argv
[i
], "benchmark_out_format",
715 &FLAGS_benchmark_out_format
) ||
716 ParseStringFlag(argv
[i
], "benchmark_color", &FLAGS_benchmark_color
) ||
717 ParseBoolFlag(argv
[i
], "benchmark_counters_tabular",
718 &FLAGS_benchmark_counters_tabular
) ||
719 ParseStringFlag(argv
[i
], "benchmark_perf_counters",
720 &FLAGS_benchmark_perf_counters
) ||
721 ParseKeyValueFlag(argv
[i
], "benchmark_context",
722 &FLAGS_benchmark_context
) ||
723 ParseStringFlag(argv
[i
], "benchmark_time_unit",
724 &FLAGS_benchmark_time_unit
) ||
725 ParseInt32Flag(argv
[i
], "v", &FLAGS_v
)) {
726 for (int j
= i
; j
!= *argc
- 1; ++j
) argv
[j
] = argv
[j
+ 1];
730 } else if (IsFlag(argv
[i
], "help")) {
734 for (auto const* flag
:
735 {&FLAGS_benchmark_format
, &FLAGS_benchmark_out_format
}) {
736 if (*flag
!= "console" && *flag
!= "json" && *flag
!= "csv") {
740 SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit
);
741 if (FLAGS_benchmark_color
.empty()) {
744 for (const auto& kv
: FLAGS_benchmark_context
) {
745 AddCustomContext(kv
.first
, kv
.second
);
749 int InitializeStreams() {
750 static std::ios_base::Init init
;
754 } // end namespace internal
756 std::string
GetBenchmarkVersion() { return {BENCHMARK_VERSION
}; }
758 void PrintDefaultHelp() {
761 " [--benchmark_list_tests={true|false}]\n"
762 " [--benchmark_filter=<regex>]\n"
763 " [--benchmark_min_time=`<integer>x` OR `<float>s` ]\n"
764 " [--benchmark_min_warmup_time=<min_warmup_time>]\n"
765 " [--benchmark_repetitions=<num_repetitions>]\n"
766 " [--benchmark_enable_random_interleaving={true|false}]\n"
767 " [--benchmark_report_aggregates_only={true|false}]\n"
768 " [--benchmark_display_aggregates_only={true|false}]\n"
769 " [--benchmark_format=<console|json|csv>]\n"
770 " [--benchmark_out=<filename>]\n"
771 " [--benchmark_out_format=<json|console|csv>]\n"
772 " [--benchmark_color={auto|true|false}]\n"
773 " [--benchmark_counters_tabular={true|false}]\n"
774 #if defined HAVE_LIBPFM
775 " [--benchmark_perf_counters=<counter>,...]\n"
777 " [--benchmark_context=<key>=<value>,...]\n"
778 " [--benchmark_time_unit={ns|us|ms|s}]\n"
779 " [--v=<verbosity>]\n");
782 void Initialize(int* argc
, char** argv
, void (*HelperPrintf
)()) {
783 internal::HelperPrintf
= HelperPrintf
;
784 internal::ParseCommandLineFlags(argc
, argv
);
785 internal::LogLevel() = FLAGS_v
;
788 void Shutdown() { delete internal::global_context
; }
790 bool ReportUnrecognizedArguments(int argc
, char** argv
) {
791 for (int i
= 1; i
< argc
; ++i
) {
792 fprintf(stderr
, "%s: error: unrecognized command-line flag: %s\n", argv
[0],
798 } // end namespace benchmark