[llvm-exegesis][NFC] Improve parsing of the YAML files
[llvm-core.git] / utils / benchmark / tools / compare.py
blobf0a4455f5fb7cc9d96148758c640e4b27b81d3b3
1 #!/usr/bin/env python
3 """
4 compare.py - versatile benchmark output compare tool
5 """
7 import argparse
8 from argparse import ArgumentParser
9 import sys
10 import gbench
11 from gbench import util, report
12 from gbench.util import *
15 def check_inputs(in1, in2, flags):
16 """
17 Perform checking on the user provided inputs and diagnose any abnormalities
18 """
19 in1_kind, in1_err = classify_input_file(in1)
20 in2_kind, in2_err = classify_input_file(in2)
21 output_file = find_benchmark_flag('--benchmark_out=', flags)
22 output_type = find_benchmark_flag('--benchmark_out_format=', flags)
23 if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
24 print(("WARNING: '--benchmark_out=%s' will be passed to both "
25 "benchmarks causing it to be overwritten") % output_file)
26 if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
27 print("WARNING: passing optional flags has no effect since both "
28 "inputs are JSON")
29 if output_type is not None and output_type != 'json':
30 print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
31 " is not supported.") % output_type)
32 sys.exit(1)
35 def create_parser():
36 parser = ArgumentParser(
37 description='versatile benchmark output compare tool')
38 subparsers = parser.add_subparsers(
39 help='This tool has multiple modes of operation:',
40 dest='mode')
42 parser_a = subparsers.add_parser(
43 'benchmarks',
44 help='The most simple use-case, compare all the output of these two benchmarks')
45 baseline = parser_a.add_argument_group(
46 'baseline', 'The benchmark baseline')
47 baseline.add_argument(
48 'test_baseline',
49 metavar='test_baseline',
50 type=argparse.FileType('r'),
51 nargs=1,
52 help='A benchmark executable or JSON output file')
53 contender = parser_a.add_argument_group(
54 'contender', 'The benchmark that will be compared against the baseline')
55 contender.add_argument(
56 'test_contender',
57 metavar='test_contender',
58 type=argparse.FileType('r'),
59 nargs=1,
60 help='A benchmark executable or JSON output file')
61 parser_a.add_argument(
62 'benchmark_options',
63 metavar='benchmark_options',
64 nargs=argparse.REMAINDER,
65 help='Arguments to pass when running benchmark executables')
67 parser_b = subparsers.add_parser(
68 'filters', help='Compare filter one with the filter two of benchmark')
69 baseline = parser_b.add_argument_group(
70 'baseline', 'The benchmark baseline')
71 baseline.add_argument(
72 'test',
73 metavar='test',
74 type=argparse.FileType('r'),
75 nargs=1,
76 help='A benchmark executable or JSON output file')
77 baseline.add_argument(
78 'filter_baseline',
79 metavar='filter_baseline',
80 type=str,
81 nargs=1,
82 help='The first filter, that will be used as baseline')
83 contender = parser_b.add_argument_group(
84 'contender', 'The benchmark that will be compared against the baseline')
85 contender.add_argument(
86 'filter_contender',
87 metavar='filter_contender',
88 type=str,
89 nargs=1,
90 help='The second filter, that will be compared against the baseline')
91 parser_b.add_argument(
92 'benchmark_options',
93 metavar='benchmark_options',
94 nargs=argparse.REMAINDER,
95 help='Arguments to pass when running benchmark executables')
97 parser_c = subparsers.add_parser(
98 'benchmarksfiltered',
99 help='Compare filter one of first benchmark with filter two of the second benchmark')
100 baseline = parser_c.add_argument_group(
101 'baseline', 'The benchmark baseline')
102 baseline.add_argument(
103 'test_baseline',
104 metavar='test_baseline',
105 type=argparse.FileType('r'),
106 nargs=1,
107 help='A benchmark executable or JSON output file')
108 baseline.add_argument(
109 'filter_baseline',
110 metavar='filter_baseline',
111 type=str,
112 nargs=1,
113 help='The first filter, that will be used as baseline')
114 contender = parser_c.add_argument_group(
115 'contender', 'The benchmark that will be compared against the baseline')
116 contender.add_argument(
117 'test_contender',
118 metavar='test_contender',
119 type=argparse.FileType('r'),
120 nargs=1,
121 help='The second benchmark executable or JSON output file, that will be compared against the baseline')
122 contender.add_argument(
123 'filter_contender',
124 metavar='filter_contender',
125 type=str,
126 nargs=1,
127 help='The second filter, that will be compared against the baseline')
128 parser_c.add_argument(
129 'benchmark_options',
130 metavar='benchmark_options',
131 nargs=argparse.REMAINDER,
132 help='Arguments to pass when running benchmark executables')
134 return parser
137 def main():
138 # Parse the command line flags
139 parser = create_parser()
140 args, unknown_args = parser.parse_known_args()
141 if args.mode is None:
142 parser.print_help()
143 exit(1)
144 assert not unknown_args
145 benchmark_options = args.benchmark_options
147 if args.mode == 'benchmarks':
148 test_baseline = args.test_baseline[0].name
149 test_contender = args.test_contender[0].name
150 filter_baseline = ''
151 filter_contender = ''
153 # NOTE: if test_baseline == test_contender, you are analyzing the stdev
155 description = 'Comparing %s to %s' % (test_baseline, test_contender)
156 elif args.mode == 'filters':
157 test_baseline = args.test[0].name
158 test_contender = args.test[0].name
159 filter_baseline = args.filter_baseline[0]
160 filter_contender = args.filter_contender[0]
162 # NOTE: if filter_baseline == filter_contender, you are analyzing the
163 # stdev
165 description = 'Comparing %s to %s (from %s)' % (
166 filter_baseline, filter_contender, args.test[0].name)
167 elif args.mode == 'benchmarksfiltered':
168 test_baseline = args.test_baseline[0].name
169 test_contender = args.test_contender[0].name
170 filter_baseline = args.filter_baseline[0]
171 filter_contender = args.filter_contender[0]
173 # NOTE: if test_baseline == test_contender and
174 # filter_baseline == filter_contender, you are analyzing the stdev
176 description = 'Comparing %s (from %s) to %s (from %s)' % (
177 filter_baseline, test_baseline, filter_contender, test_contender)
178 else:
179 # should never happen
180 print("Unrecognized mode of operation: '%s'" % args.mode)
181 parser.print_help()
182 exit(1)
184 check_inputs(test_baseline, test_contender, benchmark_options)
186 options_baseline = []
187 options_contender = []
189 if filter_baseline and filter_contender:
190 options_baseline = ['--benchmark_filter=%s' % filter_baseline]
191 options_contender = ['--benchmark_filter=%s' % filter_contender]
193 # Run the benchmarks and report the results
194 json1 = json1_orig = gbench.util.run_or_load_benchmark(
195 test_baseline, benchmark_options + options_baseline)
196 json2 = json2_orig = gbench.util.run_or_load_benchmark(
197 test_contender, benchmark_options + options_contender)
199 # Now, filter the benchmarks so that the difference report can work
200 if filter_baseline and filter_contender:
201 replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
202 json1 = gbench.report.filter_benchmark(
203 json1_orig, filter_baseline, replacement)
204 json2 = gbench.report.filter_benchmark(
205 json2_orig, filter_contender, replacement)
207 # Diff and output
208 output_lines = gbench.report.generate_difference_report(json1, json2)
209 print(description)
210 for ln in output_lines:
211 print(ln)
214 import unittest
217 class TestParser(unittest.TestCase):
218 def setUp(self):
219 self.parser = create_parser()
220 testInputs = os.path.join(
221 os.path.dirname(
222 os.path.realpath(__file__)),
223 'gbench',
224 'Inputs')
225 self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
226 self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
228 def test_benchmarks_basic(self):
229 parsed = self.parser.parse_args(
230 ['benchmarks', self.testInput0, self.testInput1])
231 self.assertEqual(parsed.mode, 'benchmarks')
232 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
233 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
234 self.assertFalse(parsed.benchmark_options)
236 def test_benchmarks_with_remainder(self):
237 parsed = self.parser.parse_args(
238 ['benchmarks', self.testInput0, self.testInput1, 'd'])
239 self.assertEqual(parsed.mode, 'benchmarks')
240 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
241 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
242 self.assertEqual(parsed.benchmark_options, ['d'])
244 def test_benchmarks_with_remainder_after_doubleminus(self):
245 parsed = self.parser.parse_args(
246 ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
247 self.assertEqual(parsed.mode, 'benchmarks')
248 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
249 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
250 self.assertEqual(parsed.benchmark_options, ['e'])
252 def test_filters_basic(self):
253 parsed = self.parser.parse_args(
254 ['filters', self.testInput0, 'c', 'd'])
255 self.assertEqual(parsed.mode, 'filters')
256 self.assertEqual(parsed.test[0].name, self.testInput0)
257 self.assertEqual(parsed.filter_baseline[0], 'c')
258 self.assertEqual(parsed.filter_contender[0], 'd')
259 self.assertFalse(parsed.benchmark_options)
261 def test_filters_with_remainder(self):
262 parsed = self.parser.parse_args(
263 ['filters', self.testInput0, 'c', 'd', 'e'])
264 self.assertEqual(parsed.mode, 'filters')
265 self.assertEqual(parsed.test[0].name, self.testInput0)
266 self.assertEqual(parsed.filter_baseline[0], 'c')
267 self.assertEqual(parsed.filter_contender[0], 'd')
268 self.assertEqual(parsed.benchmark_options, ['e'])
270 def test_filters_with_remainder_after_doubleminus(self):
271 parsed = self.parser.parse_args(
272 ['filters', self.testInput0, 'c', 'd', '--', 'f'])
273 self.assertEqual(parsed.mode, 'filters')
274 self.assertEqual(parsed.test[0].name, self.testInput0)
275 self.assertEqual(parsed.filter_baseline[0], 'c')
276 self.assertEqual(parsed.filter_contender[0], 'd')
277 self.assertEqual(parsed.benchmark_options, ['f'])
279 def test_benchmarksfiltered_basic(self):
280 parsed = self.parser.parse_args(
281 ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
282 self.assertEqual(parsed.mode, 'benchmarksfiltered')
283 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
284 self.assertEqual(parsed.filter_baseline[0], 'c')
285 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
286 self.assertEqual(parsed.filter_contender[0], 'e')
287 self.assertFalse(parsed.benchmark_options)
289 def test_benchmarksfiltered_with_remainder(self):
290 parsed = self.parser.parse_args(
291 ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
292 self.assertEqual(parsed.mode, 'benchmarksfiltered')
293 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
294 self.assertEqual(parsed.filter_baseline[0], 'c')
295 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
296 self.assertEqual(parsed.filter_contender[0], 'e')
297 self.assertEqual(parsed.benchmark_options[0], 'f')
299 def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
300 parsed = self.parser.parse_args(
301 ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
302 self.assertEqual(parsed.mode, 'benchmarksfiltered')
303 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
304 self.assertEqual(parsed.filter_baseline[0], 'c')
305 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
306 self.assertEqual(parsed.filter_contender[0], 'e')
307 self.assertEqual(parsed.benchmark_options[0], 'g')
310 if __name__ == '__main__':
311 # unittest.main()
312 main()
314 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
315 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
316 # kate: indent-mode python; remove-trailing-spaces modified;