Add android load balancing shift
[chromium-blink-merge.git] / tools / telemetry / telemetry / benchmark_runner.py
blobca05bd39394c58df5cb35c6149ce9705d993f46e
1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Parses the command line, discovers the appropriate benchmarks, and runs them.
7 Handles benchmark configuration, but all the logic for
8 actually running the benchmark is in Benchmark and PageRunner."""
10 import difflib
11 import hashlib
12 import inspect
13 import json
14 import os
15 import sys
17 from telemetry import benchmark
18 from telemetry.core import browser_finder
19 from telemetry.core import browser_options
20 from telemetry.core import command_line
21 from telemetry.core import discover
22 from telemetry import decorators
25 def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
26 """ Print benchmarks that are not filtered in the same order of benchmarks in
27 the |benchmarks| list.
29 Args:
30 benchmarks: the list of benchmarks to be printed (in the same order of the
31 list).
32 possible_browser: the possible_browser instance that's used for checking
33 which benchmarks are enabled.
34 output_pipe: the stream in which benchmarks are printed on.
35 """
36 if not benchmarks:
37 print >> output_pipe, 'No benchmarks found!'
38 return
39 b = None # Need this to stop pylint from complaining undefined variable.
40 if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
41 assert False, '|benchmarks| param contains non benchmark class: %s' % b
43 # Align the benchmark names to the longest one.
44 format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
45 disabled_benchmarks = []
47 print >> output_pipe, 'Available benchmarks %sare:' % (
48 'for %s ' %possible_browser.browser_type if possible_browser else '')
49 for benchmark_class in benchmarks:
50 if possible_browser and not decorators.IsEnabled(benchmark_class,
51 possible_browser)[0]:
52 disabled_benchmarks.append(benchmark_class)
53 continue
54 print >> output_pipe, format_string % (
55 benchmark_class.Name(), benchmark_class.Description())
57 if disabled_benchmarks:
58 print >> output_pipe
59 print >> output_pipe, (
60 'Disabled benchmarks for %s are (force run with -d):' %
61 possible_browser.browser_type)
62 for benchmark_class in disabled_benchmarks:
63 print >> output_pipe, format_string % (
64 benchmark_class.Name(), benchmark_class.Description())
65 print >> output_pipe, (
66 'Pass --browser to list benchmarks for another browser.')
67 print >> output_pipe
70 def GetMostLikelyMatchedBenchmarks(all_benchmarks, input_benchmark_name):
71 """ Returns the list of benchmarks whose name most likely matched with
72 |input_benchmark_name|.
74 Args:
75 all_benchmarks: the list of benchmark classes.
76 input_benchmark_name: a string to be matched against the names of benchmarks
77 in |all_benchmarks|.
79 Returns:
80 A list of benchmark classes whose name likely matched
81 |input_benchmark_name|. Benchmark classes are arranged in descending order
82 of similarity between their names to |input_benchmark_name|.
83 """
84 def MatchedWithBenchmarkInputNameScore(benchmark_class):
85 return difflib.SequenceMatcher(
86 isjunk=None,
87 a=benchmark_class.Name(), b=input_benchmark_name).ratio()
88 benchmarks_with_similar_names = [
89 b for b in all_benchmarks if
90 MatchedWithBenchmarkInputNameScore(b) > 0.4]
91 ordered_list = sorted(benchmarks_with_similar_names,
92 key=MatchedWithBenchmarkInputNameScore,
93 reverse=True)
94 return ordered_list
96 class Environment(object):
97 """Contains information about the benchmark runtime environment.
99 Attributes:
100 top_level_dir: A dir that contains benchmark, page test, and/or user story
101 set dirs and associated artifacts.
102 benchmark_dirs: A list of dirs containing benchmarks.
103 benchmark_aliases: A dict of name:alias string pairs to be matched against
104 exactly during benchmark selection.
106 def __init__(self, top_level_dir, benchmark_dirs=None,
107 benchmark_aliases=None):
108 self._top_level_dir = top_level_dir
109 self._benchmark_dirs = benchmark_dirs or []
110 self._benchmark_aliases = benchmark_aliases or dict()
112 if benchmark_aliases:
113 self._benchmark_aliases = benchmark_aliases
114 else:
115 self._benchmark_aliases = {}
117 @property
118 def top_level_dir(self):
119 return self._top_level_dir
121 @property
122 def benchmark_dirs(self):
123 return self._benchmark_dirs
125 @property
126 def benchmark_aliases(self):
127 return self._benchmark_aliases
130 class Help(command_line.OptparseCommand):
131 """Display help information about a command"""
133 usage = '[command]'
135 def Run(self, args):
136 if len(args.positional_args) == 1:
137 commands = _MatchingCommands(args.positional_args[0])
138 if len(commands) == 1:
139 command = commands[0]
140 parser = command.CreateParser()
141 command.AddCommandLineArgs(parser, None)
142 parser.print_help()
143 return 0
145 print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
146 print >> sys.stderr, 'Available commands are:'
147 for command in _Commands():
148 print >> sys.stderr, ' %-10s %s' % (
149 command.Name(), command.Description())
150 print >> sys.stderr, ('"%s help <command>" to see usage information '
151 'for a specific command.' % _ScriptName())
152 return 0
155 class List(command_line.OptparseCommand):
156 """Lists the available benchmarks"""
158 usage = '[benchmark_name] [<options>]'
160 @classmethod
161 def CreateParser(cls):
162 options = browser_options.BrowserFinderOptions()
163 parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
164 return parser
166 @classmethod
167 def AddCommandLineArgs(cls, parser, _):
168 parser.add_option('-j', '--json-output-file', type='string')
169 parser.add_option('-n', '--num-shards', type='int', default=1)
171 @classmethod
172 def ProcessCommandLineArgs(cls, parser, args, environment):
173 if not args.positional_args:
174 args.benchmarks = _Benchmarks(environment)
175 elif len(args.positional_args) == 1:
176 args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
177 environment, exact_matches=False)
178 else:
179 parser.error('Must provide at most one benchmark name.')
181 def Run(self, args):
182 possible_browser = browser_finder.FindBrowser(args)
183 if args.browser_type in (
184 'exact', 'release', 'release_x64', 'debug', 'debug_x64', 'canary'):
185 args.browser_type = 'reference'
186 possible_reference_browser = browser_finder.FindBrowser(args)
187 else:
188 possible_reference_browser = None
189 if args.json_output_file:
190 with open(args.json_output_file, 'w') as f:
191 f.write(_GetJsonBenchmarkList(possible_browser,
192 possible_reference_browser,
193 args.benchmarks, args.num_shards))
194 else:
195 PrintBenchmarkList(args.benchmarks, possible_browser)
196 return 0
199 class Run(command_line.OptparseCommand):
200 """Run one or more benchmarks (default)"""
202 usage = 'benchmark_name [page_set] [<options>]'
204 @classmethod
205 def CreateParser(cls):
206 options = browser_options.BrowserFinderOptions()
207 parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
208 return parser
210 @classmethod
211 def AddCommandLineArgs(cls, parser, environment):
212 benchmark.AddCommandLineArgs(parser)
214 # Allow benchmarks to add their own command line options.
215 matching_benchmarks = []
216 for arg in sys.argv[1:]:
217 matching_benchmarks += _MatchBenchmarkName(arg, environment)
219 if matching_benchmarks:
220 # TODO(dtu): After move to argparse, add command-line args for all
221 # benchmarks to subparser. Using subparsers will avoid duplicate
222 # arguments.
223 matching_benchmark = matching_benchmarks.pop()
224 matching_benchmark.AddCommandLineArgs(parser)
225 # The benchmark's options override the defaults!
226 matching_benchmark.SetArgumentDefaults(parser)
228 @classmethod
229 def ProcessCommandLineArgs(cls, parser, args, environment):
230 all_benchmarks = _Benchmarks(environment)
231 if not args.positional_args:
232 possible_browser = (
233 browser_finder.FindBrowser(args) if args.browser_type else None)
234 PrintBenchmarkList(all_benchmarks, possible_browser)
235 sys.exit(-1)
237 input_benchmark_name = args.positional_args[0]
238 matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
239 if not matching_benchmarks:
240 print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
241 print >> sys.stderr
242 most_likely_matched_benchmarks = GetMostLikelyMatchedBenchmarks(
243 all_benchmarks, input_benchmark_name)
244 if most_likely_matched_benchmarks:
245 print >> sys.stderr, 'Do you mean any of those benchmarks below?'
246 PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
247 sys.exit(-1)
249 if len(matching_benchmarks) > 1:
250 print >> sys.stderr, ('Multiple benchmarks named "%s".' %
251 input_benchmark_name)
252 print >> sys.stderr, 'Did you mean one of these?'
253 print >> sys.stderr
254 PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
255 sys.exit(-1)
257 benchmark_class = matching_benchmarks.pop()
258 if len(args.positional_args) > 1:
259 parser.error('Too many arguments.')
261 assert issubclass(benchmark_class, benchmark.Benchmark), (
262 'Trying to run a non-Benchmark?!')
264 benchmark.ProcessCommandLineArgs(parser, args)
265 benchmark_class.ProcessCommandLineArgs(parser, args)
267 cls._benchmark = benchmark_class
269 def Run(self, args):
270 return min(255, self._benchmark().Run(args))
273 def _ScriptName():
274 return os.path.basename(sys.argv[0])
277 def _Commands():
278 """Generates a list of all classes in this file that subclass Command."""
279 for _, cls in inspect.getmembers(sys.modules[__name__]):
280 if not inspect.isclass(cls):
281 continue
282 if not issubclass(cls, command_line.Command):
283 continue
284 yield cls
286 def _MatchingCommands(string):
287 return [command for command in _Commands()
288 if command.Name().startswith(string)]
290 @decorators.Cache
291 def _Benchmarks(environment):
292 benchmarks = []
293 for search_dir in environment.benchmark_dirs:
294 benchmarks += discover.DiscoverClasses(search_dir,
295 environment.top_level_dir,
296 benchmark.Benchmark,
297 index_by_class_name=True).values()
298 return benchmarks
300 def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
301 def _Matches(input_string, search_string):
302 if search_string.startswith(input_string):
303 return True
304 for part in search_string.split('.'):
305 if part.startswith(input_string):
306 return True
307 return False
309 # Exact matching.
310 if exact_matches:
311 # Don't add aliases to search dict, only allow exact matching for them.
312 if input_benchmark_name in environment.benchmark_aliases:
313 exact_match = environment.benchmark_aliases[input_benchmark_name]
314 else:
315 exact_match = input_benchmark_name
317 for benchmark_class in _Benchmarks(environment):
318 if exact_match == benchmark_class.Name():
319 return [benchmark_class]
320 return []
322 # Fuzzy matching.
323 return [benchmark_class for benchmark_class in _Benchmarks(environment)
324 if _Matches(input_benchmark_name, benchmark_class.Name())]
327 def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
328 benchmark_classes, num_shards):
329 """Returns a list of all enabled benchmarks in a JSON format expected by
330 buildbots.
332 JSON format (see build/android/pylib/perf/benchmark_runner.py):
333 { "version": <int>,
334 "steps": {
335 <string>: {
336 "device_affinity": <int>,
337 "cmd": <string>,
338 "perf_dashboard_id": <string>,
344 output = {
345 'version': 1,
346 'steps': {
349 for benchmark_class in benchmark_classes:
350 if not issubclass(benchmark_class, benchmark.Benchmark):
351 continue
352 enabled, _ = decorators.IsEnabled(benchmark_class, possible_browser)
353 if not enabled:
354 continue
356 base_name = benchmark_class.Name()
357 base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
358 '-v', '--output-format=chartjson', '--upload-results',
359 base_name]
360 perf_dashboard_id = base_name
361 # TODO(fmeawad): Currently we set the device affinity to a stable hash of
362 # the benchmark name. This somewhat evenly distributes benchmarks among the
363 # requested number of shards. However, it is far from optimal in terms of
364 # cycle time. We should add a benchmark size decorator (e.g. small, medium,
365 # large) and let that inform sharding.
367 # Based on the current timings, we shift the result of the hash function to
368 # achieve better load balancing. Those shift values are to be revised when
369 # necessary. (See tools/build/scripts/tools/perf/chrome-perf-step-timings.py
370 # for more details)
371 hash_shift = {
372 2 : 47,
373 5 : 56,
374 8 : 50
376 shift = hash_shift.get(num_shards, 0)
377 base_name_hash = hashlib.sha1(base_name).hexdigest()
378 device_affinity = (int(base_name_hash, 16) >> shift) % num_shards
380 output['steps'][base_name] = {
381 'cmd': ' '.join(base_cmd + [
382 '--browser=%s' % possible_browser.browser_type]),
383 'device_affinity': device_affinity,
384 'perf_dashboard_id': perf_dashboard_id,
386 if possible_reference_browser:
387 enabled, _ = decorators.IsEnabled(
388 benchmark_class, possible_reference_browser)
389 if enabled:
390 output['steps'][base_name + '.reference'] = {
391 'cmd': ' '.join(base_cmd + [
392 '--browser=reference', '--output-trace-tag=_ref']),
393 'device_affinity': device_affinity,
394 'perf_dashboard_id': perf_dashboard_id,
397 return json.dumps(output, indent=2, sort_keys=True)
400 def main(environment):
401 # Get the command name from the command line.
402 if len(sys.argv) > 1 and sys.argv[1] == '--help':
403 sys.argv[1] = 'help'
405 command_name = 'run'
406 for arg in sys.argv[1:]:
407 if not arg.startswith('-'):
408 command_name = arg
409 break
411 # TODO(eakuefner): Remove this hack after we port to argparse.
412 if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
413 command_name = 'run'
414 sys.argv[2] = '--help'
416 # Validate and interpret the command name.
417 commands = _MatchingCommands(command_name)
418 if len(commands) > 1:
419 print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
420 % (command_name, _ScriptName()))
421 for command in commands:
422 print >> sys.stderr, ' %-10s %s' % (
423 command.Name(), command.Description())
424 return 1
425 if commands:
426 command = commands[0]
427 else:
428 command = Run
430 # Parse and run the command.
431 parser = command.CreateParser()
432 command.AddCommandLineArgs(parser, environment)
433 options, args = parser.parse_args()
434 if commands:
435 args = args[1:]
436 options.positional_args = args
437 command.ProcessCommandLineArgs(parser, options, environment)
438 return command().Run(options)