1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Parses the command line, discovers the appropriate benchmarks, and runs them.
7 Handles benchmark configuration, but all the logic for
8 actually running the benchmark is in Benchmark and PageRunner."""
17 from telemetry
import benchmark
18 from telemetry
.core
import browser_finder
19 from telemetry
.core
import browser_options
20 from telemetry
.core
import command_line
21 from telemetry
.core
import discover
22 from telemetry
import decorators
25 def PrintBenchmarkList(benchmarks
, possible_browser
, output_pipe
=sys
.stdout
):
26 """ Print benchmarks that are not filtered in the same order of benchmarks in
27 the |benchmarks| list.
30 benchmarks: the list of benchmarks to be printed (in the same order of the
32 possible_browser: the possible_browser instance that's used for checking
33 which benchmarks are enabled.
34 output_pipe: the stream in which benchmarks are printed on.
37 print >> output_pipe
, 'No benchmarks found!'
39 b
= None # Need this to stop pylint from complaining undefined variable.
40 if any(not issubclass(b
, benchmark
.Benchmark
) for b
in benchmarks
):
41 assert False, '|benchmarks| param contains non benchmark class: %s' % b
43 # Align the benchmark names to the longest one.
44 format_string
= ' %%-%ds %%s' % max(len(b
.Name()) for b
in benchmarks
)
45 disabled_benchmarks
= []
47 print >> output_pipe
, 'Available benchmarks %sare:' % (
48 'for %s ' %possible_browser
.browser_type
if possible_browser
else '')
49 for benchmark_class
in benchmarks
:
50 if possible_browser
and not decorators
.IsEnabled(benchmark_class
,
52 disabled_benchmarks
.append(benchmark_class
)
54 print >> output_pipe
, format_string
% (
55 benchmark_class
.Name(), benchmark_class
.Description())
57 if disabled_benchmarks
:
59 print >> output_pipe
, (
60 'Disabled benchmarks for %s are (force run with -d):' %
61 possible_browser
.browser_type
)
62 for benchmark_class
in disabled_benchmarks
:
63 print >> output_pipe
, format_string
% (
64 benchmark_class
.Name(), benchmark_class
.Description())
65 print >> output_pipe
, (
66 'Pass --browser to list benchmarks for another browser.')
70 def GetMostLikelyMatchedBenchmarks(all_benchmarks
, input_benchmark_name
):
71 """ Returns the list of benchmarks whose name most likely matched with
72 |input_benchmark_name|.
75 all_benchmarks: the list of benchmark classes.
76 input_benchmark_name: a string to be matched against the names of benchmarks
80 A list of benchmark classes whose name likely matched
81 |input_benchmark_name|. Benchmark classes are arranged in descending order
82 of similarity between their names to |input_benchmark_name|.
84 def MatchedWithBenchmarkInputNameScore(benchmark_class
):
85 return difflib
.SequenceMatcher(
87 a
=benchmark_class
.Name(), b
=input_benchmark_name
).ratio()
88 benchmarks_with_similar_names
= [
89 b
for b
in all_benchmarks
if
90 MatchedWithBenchmarkInputNameScore(b
) > 0.4]
91 ordered_list
= sorted(benchmarks_with_similar_names
,
92 key
=MatchedWithBenchmarkInputNameScore
,
96 class Environment(object):
97 """Contains information about the benchmark runtime environment.
100 top_level_dir: A dir that contains benchmark, page test, and/or user story
101 set dirs and associated artifacts.
102 benchmark_dirs: A list of dirs containing benchmarks.
103 benchmark_aliases: A dict of name:alias string pairs to be matched against
104 exactly during benchmark selection.
106 def __init__(self
, top_level_dir
, benchmark_dirs
=None,
107 benchmark_aliases
=None):
108 self
._top
_level
_dir
= top_level_dir
109 self
._benchmark
_dirs
= benchmark_dirs
or []
110 self
._benchmark
_aliases
= benchmark_aliases
or dict()
112 if benchmark_aliases
:
113 self
._benchmark
_aliases
= benchmark_aliases
115 self
._benchmark
_aliases
= {}
118 def top_level_dir(self
):
119 return self
._top
_level
_dir
122 def benchmark_dirs(self
):
123 return self
._benchmark
_dirs
126 def benchmark_aliases(self
):
127 return self
._benchmark
_aliases
130 class Help(command_line
.OptparseCommand
):
131 """Display help information about a command"""
136 if len(args
.positional_args
) == 1:
137 commands
= _MatchingCommands(args
.positional_args
[0])
138 if len(commands
) == 1:
139 command
= commands
[0]
140 parser
= command
.CreateParser()
141 command
.AddCommandLineArgs(parser
, None)
145 print >> sys
.stderr
, ('usage: %s [command] [<options>]' % _ScriptName())
146 print >> sys
.stderr
, 'Available commands are:'
147 for command
in _Commands():
148 print >> sys
.stderr
, ' %-10s %s' % (
149 command
.Name(), command
.Description())
150 print >> sys
.stderr
, ('"%s help <command>" to see usage information '
151 'for a specific command.' % _ScriptName())
155 class List(command_line
.OptparseCommand
):
156 """Lists the available benchmarks"""
158 usage
= '[benchmark_name] [<options>]'
161 def CreateParser(cls
):
162 options
= browser_options
.BrowserFinderOptions()
163 parser
= options
.CreateParser('%%prog %s %s' % (cls
.Name(), cls
.usage
))
167 def AddCommandLineArgs(cls
, parser
, _
):
168 parser
.add_option('-j', '--json-output-file', type='string')
169 parser
.add_option('-n', '--num-shards', type='int', default
=1)
172 def ProcessCommandLineArgs(cls
, parser
, args
, environment
):
173 if not args
.positional_args
:
174 args
.benchmarks
= _Benchmarks(environment
)
175 elif len(args
.positional_args
) == 1:
176 args
.benchmarks
= _MatchBenchmarkName(args
.positional_args
[0],
177 environment
, exact_matches
=False)
179 parser
.error('Must provide at most one benchmark name.')
182 possible_browser
= browser_finder
.FindBrowser(args
)
183 if args
.browser_type
in (
184 'exact', 'release', 'release_x64', 'debug', 'debug_x64', 'canary'):
185 args
.browser_type
= 'reference'
186 possible_reference_browser
= browser_finder
.FindBrowser(args
)
188 possible_reference_browser
= None
189 if args
.json_output_file
:
190 with
open(args
.json_output_file
, 'w') as f
:
191 f
.write(_GetJsonBenchmarkList(possible_browser
,
192 possible_reference_browser
,
193 args
.benchmarks
, args
.num_shards
))
195 PrintBenchmarkList(args
.benchmarks
, possible_browser
)
199 class Run(command_line
.OptparseCommand
):
200 """Run one or more benchmarks (default)"""
202 usage
= 'benchmark_name [page_set] [<options>]'
205 def CreateParser(cls
):
206 options
= browser_options
.BrowserFinderOptions()
207 parser
= options
.CreateParser('%%prog %s %s' % (cls
.Name(), cls
.usage
))
211 def AddCommandLineArgs(cls
, parser
, environment
):
212 benchmark
.AddCommandLineArgs(parser
)
214 # Allow benchmarks to add their own command line options.
215 matching_benchmarks
= []
216 for arg
in sys
.argv
[1:]:
217 matching_benchmarks
+= _MatchBenchmarkName(arg
, environment
)
219 if matching_benchmarks
:
220 # TODO(dtu): After move to argparse, add command-line args for all
221 # benchmarks to subparser. Using subparsers will avoid duplicate
223 matching_benchmark
= matching_benchmarks
.pop()
224 matching_benchmark
.AddCommandLineArgs(parser
)
225 # The benchmark's options override the defaults!
226 matching_benchmark
.SetArgumentDefaults(parser
)
229 def ProcessCommandLineArgs(cls
, parser
, args
, environment
):
230 all_benchmarks
= _Benchmarks(environment
)
231 if not args
.positional_args
:
233 browser_finder
.FindBrowser(args
) if args
.browser_type
else None)
234 PrintBenchmarkList(all_benchmarks
, possible_browser
)
237 input_benchmark_name
= args
.positional_args
[0]
238 matching_benchmarks
= _MatchBenchmarkName(input_benchmark_name
, environment
)
239 if not matching_benchmarks
:
240 print >> sys
.stderr
, 'No benchmark named "%s".' % input_benchmark_name
242 most_likely_matched_benchmarks
= GetMostLikelyMatchedBenchmarks(
243 all_benchmarks
, input_benchmark_name
)
244 if most_likely_matched_benchmarks
:
245 print >> sys
.stderr
, 'Do you mean any of those benchmarks below?'
246 PrintBenchmarkList(most_likely_matched_benchmarks
, None, sys
.stderr
)
249 if len(matching_benchmarks
) > 1:
250 print >> sys
.stderr
, ('Multiple benchmarks named "%s".' %
251 input_benchmark_name
)
252 print >> sys
.stderr
, 'Did you mean one of these?'
254 PrintBenchmarkList(matching_benchmarks
, None, sys
.stderr
)
257 benchmark_class
= matching_benchmarks
.pop()
258 if len(args
.positional_args
) > 1:
259 parser
.error('Too many arguments.')
261 assert issubclass(benchmark_class
, benchmark
.Benchmark
), (
262 'Trying to run a non-Benchmark?!')
264 benchmark
.ProcessCommandLineArgs(parser
, args
)
265 benchmark_class
.ProcessCommandLineArgs(parser
, args
)
267 cls
._benchmark
= benchmark_class
270 return min(255, self
._benchmark
().Run(args
))
274 return os
.path
.basename(sys
.argv
[0])
278 """Generates a list of all classes in this file that subclass Command."""
279 for _
, cls
in inspect
.getmembers(sys
.modules
[__name__
]):
280 if not inspect
.isclass(cls
):
282 if not issubclass(cls
, command_line
.Command
):
286 def _MatchingCommands(string
):
287 return [command
for command
in _Commands()
288 if command
.Name().startswith(string
)]
291 def _Benchmarks(environment
):
293 for search_dir
in environment
.benchmark_dirs
:
294 benchmarks
+= discover
.DiscoverClasses(search_dir
,
295 environment
.top_level_dir
,
297 index_by_class_name
=True).values()
300 def _MatchBenchmarkName(input_benchmark_name
, environment
, exact_matches
=True):
301 def _Matches(input_string
, search_string
):
302 if search_string
.startswith(input_string
):
304 for part
in search_string
.split('.'):
305 if part
.startswith(input_string
):
311 # Don't add aliases to search dict, only allow exact matching for them.
312 if input_benchmark_name
in environment
.benchmark_aliases
:
313 exact_match
= environment
.benchmark_aliases
[input_benchmark_name
]
315 exact_match
= input_benchmark_name
317 for benchmark_class
in _Benchmarks(environment
):
318 if exact_match
== benchmark_class
.Name():
319 return [benchmark_class
]
323 return [benchmark_class
for benchmark_class
in _Benchmarks(environment
)
324 if _Matches(input_benchmark_name
, benchmark_class
.Name())]
327 def _GetJsonBenchmarkList(possible_browser
, possible_reference_browser
,
328 benchmark_classes
, num_shards
):
329 """Returns a list of all enabled benchmarks in a JSON format expected by
332 JSON format (see build/android/pylib/perf/benchmark_runner.py):
336 "device_affinity": <int>,
338 "perf_dashboard_id": <string>,
349 for benchmark_class
in benchmark_classes
:
350 if not issubclass(benchmark_class
, benchmark
.Benchmark
):
352 enabled
, _
= decorators
.IsEnabled(benchmark_class
, possible_browser
)
356 base_name
= benchmark_class
.Name()
357 base_cmd
= [sys
.executable
, os
.path
.realpath(sys
.argv
[0]),
358 '-v', '--output-format=chartjson', '--upload-results',
360 perf_dashboard_id
= base_name
361 # TODO(fmeawad): Currently we set the device affinity to a stable hash of
362 # the benchmark name. This somewhat evenly distributes benchmarks among the
363 # requested number of shards. However, it is far from optimal in terms of
364 # cycle time. We should add a benchmark size decorator (e.g. small, medium,
365 # large) and let that inform sharding.
367 # Based on the current timings, we shift the result of the hash function to
368 # achieve better load balancing. Those shift values are to be revised when
369 # necessary. (See tools/build/scripts/tools/perf/chrome-perf-step-timings.py
376 shift
= hash_shift
.get(num_shards
, 0)
377 base_name_hash
= hashlib
.sha1(base_name
).hexdigest()
378 device_affinity
= (int(base_name_hash
, 16) >> shift
) % num_shards
380 output
['steps'][base_name
] = {
381 'cmd': ' '.join(base_cmd
+ [
382 '--browser=%s' % possible_browser
.browser_type
]),
383 'device_affinity': device_affinity
,
384 'perf_dashboard_id': perf_dashboard_id
,
386 if possible_reference_browser
:
387 enabled
, _
= decorators
.IsEnabled(
388 benchmark_class
, possible_reference_browser
)
390 output
['steps'][base_name
+ '.reference'] = {
391 'cmd': ' '.join(base_cmd
+ [
392 '--browser=reference', '--output-trace-tag=_ref']),
393 'device_affinity': device_affinity
,
394 'perf_dashboard_id': perf_dashboard_id
,
397 return json
.dumps(output
, indent
=2, sort_keys
=True)
400 def main(environment
):
401 # Get the command name from the command line.
402 if len(sys
.argv
) > 1 and sys
.argv
[1] == '--help':
406 for arg
in sys
.argv
[1:]:
407 if not arg
.startswith('-'):
411 # TODO(eakuefner): Remove this hack after we port to argparse.
412 if command_name
== 'help' and len(sys
.argv
) > 2 and sys
.argv
[2] == 'run':
414 sys
.argv
[2] = '--help'
416 # Validate and interpret the command name.
417 commands
= _MatchingCommands(command_name
)
418 if len(commands
) > 1:
419 print >> sys
.stderr
, ('"%s" is not a %s command. Did you mean one of these?'
420 % (command_name
, _ScriptName()))
421 for command
in commands
:
422 print >> sys
.stderr
, ' %-10s %s' % (
423 command
.Name(), command
.Description())
426 command
= commands
[0]
430 # Parse and run the command.
431 parser
= command
.CreateParser()
432 command
.AddCommandLineArgs(parser
, environment
)
433 options
, args
= parser
.parse_args()
436 options
.positional_args
= args
437 command
.ProcessCommandLineArgs(parser
, options
, environment
)
438 return command().Run(options
)