Make callers of CommandLine use it via the base:: namespace.
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blob3e75ec4126122a9a36b5a4c1c0dcba7bc88704ce
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import optparse
15 import os
16 import platform
17 import re
18 import subprocess
19 import sys
20 import traceback
22 from auto_bisect import bisect_perf_regression
23 from auto_bisect import bisect_utils
24 from auto_bisect import math_utils
25 from auto_bisect import source_control
27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
28 CROS_IP_ENV = 'BISECT_CROS_IP'
30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
36 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 class Goma(object):
41 def __init__(self, path_to_goma):
42 self._abs_path_to_goma = None
43 self._abs_path_to_goma_file = None
44 if not path_to_goma:
45 return
46 self._abs_path_to_goma = os.path.abspath(path_to_goma)
47 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
48 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
50 def __enter__(self):
51 if self._HasGomaPath():
52 self._SetupAndStart()
53 return self
55 def __exit__(self, *_):
56 if self._HasGomaPath():
57 self._Stop()
59 def _HasGomaPath(self):
60 return bool(self._abs_path_to_goma)
62 def _SetupEnvVars(self):
63 if os.name == 'nt':
64 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
65 ' cl.exe')
66 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
67 ' cl.exe')
68 else:
69 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
70 os.environ['PATH']])
72 def _SetupAndStart(self):
73 """Sets up goma and launches it.
75 Args:
76 path_to_goma: Path to goma directory.
78 Returns:
79 True if successful."""
80 self._SetupEnvVars()
82 # Sometimes goma is lingering around if something went bad on a previous
83 # run. Stop it before starting a new process. Can ignore the return code
84 # since it will return an error if it wasn't running.
85 self._Stop()
87 if subprocess.call([self._abs_path_to_goma_file, 'start']):
88 raise RuntimeError('Goma failed to start.')
90 def _Stop(self):
91 subprocess.call([self._abs_path_to_goma_file, 'stop'])
94 def _LoadConfigFile(config_file_path):
95 """Attempts to load the specified config file as a module
96 and grab the global config dict.
98 Args:
99 config_file_path: Path to the config file.
101 Returns:
102 If successful, returns the config dict loaded from the file. If no
103 such dictionary could be loaded, returns the empty dictionary.
105 try:
106 local_vars = {}
107 execfile(config_file_path, local_vars)
108 return local_vars['config']
109 except Exception:
110 print
111 traceback.print_exc()
112 print
113 return {}
116 def _ValidateConfigFile(config_contents, required_parameters):
117 """Validates the config file contents, checking whether all values are
118 non-empty.
120 Args:
121 config_contents: A config dictionary.
122 required_parameters: A list of parameters to check for.
124 Returns:
125 True if valid.
127 for parameter in required_parameters:
128 if parameter not in config_contents:
129 return False
130 value = config_contents[parameter]
131 if not value or type(value) is not str:
132 return False
133 return True
136 def _ValidatePerfConfigFile(config_contents):
137 """Validates the perf config file contents.
139 This is used when we're doing a perf try job, rather than a bisect.
140 The config file is called run-perf-test.cfg by default.
142 The parameters checked are the required parameters; any additional optional
143 parameters won't be checked and validation will still pass.
145 Args:
146 config_contents: A config dictionary.
148 Returns:
149 True if valid.
151 required_parameters = [
152 'command',
153 'repeat_count',
154 'truncate_percent',
155 'max_time_minutes',
157 return _ValidateConfigFile(config_contents, required_parameters)
160 def _ValidateBisectConfigFile(config_contents):
161 """Validates the bisect config file contents.
163 The parameters checked are the required parameters; any additional optional
164 parameters won't be checked and validation will still pass.
166 Args:
167 config_contents: A config dictionary.
169 Returns:
170 True if valid.
172 required_params = [
173 'command',
174 'good_revision',
175 'bad_revision',
176 'metric',
177 'repeat_count',
178 'truncate_percent',
179 'max_time_minutes',
181 return _ValidateConfigFile(config_contents, required_params)
184 def _OutputFailedResults(text_to_print):
185 bisect_utils.OutputAnnotationStepStart('Results - Failed')
186 print
187 print text_to_print
188 print
189 bisect_utils.OutputAnnotationStepClosed()
192 def _CreateBisectOptionsFromConfig(config):
193 print config['command']
194 opts_dict = {}
195 opts_dict['command'] = config['command']
196 opts_dict['metric'] = config.get('metric')
198 if config['repeat_count']:
199 opts_dict['repeat_test_count'] = int(config['repeat_count'])
201 if config['truncate_percent']:
202 opts_dict['truncate_percent'] = int(config['truncate_percent'])
204 if config['max_time_minutes']:
205 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
207 if config.has_key('use_goma'):
208 opts_dict['use_goma'] = config['use_goma']
209 if config.has_key('goma_dir'):
210 opts_dict['goma_dir'] = config['goma_dir']
212 if config.has_key('improvement_direction'):
213 opts_dict['improvement_direction'] = int(config['improvement_direction'])
215 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
216 opts_dict['bug_id'] = config['bug_id']
218 opts_dict['build_preference'] = 'ninja'
219 opts_dict['output_buildbot_annotations'] = True
221 if '--browser=cros' in config['command']:
222 opts_dict['target_platform'] = 'cros'
224 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
225 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
226 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
227 else:
228 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
229 'BISECT_CROS_BOARD undefined.')
230 elif 'android' in config['command']:
231 if 'android-chrome-shell' in config['command']:
232 opts_dict['target_platform'] = 'android'
233 elif 'android-chrome' in config['command']:
234 opts_dict['target_platform'] = 'android-chrome'
235 else:
236 opts_dict['target_platform'] = 'android'
238 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
241 def _ParseCloudLinksFromOutput(output):
242 html_results_pattern = re.compile(
243 r'\s(?P<VALUES>http://storage.googleapis.com/' +
244 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
245 re.MULTILINE)
246 profiler_pattern = re.compile(
247 r'\s(?P<VALUES>https://console.developers.google.com/' +
248 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
249 re.MULTILINE)
251 results = {
252 'html-results': html_results_pattern.findall(output),
253 'profiler': profiler_pattern.findall(output),
256 return results
259 def _ParseAndOutputCloudLinks(
260 results_without_patch, results_with_patch, annotations_dict):
261 cloud_links_without_patch = _ParseCloudLinksFromOutput(
262 results_without_patch[2])
263 cloud_links_with_patch = _ParseCloudLinksFromOutput(
264 results_with_patch[2])
266 cloud_file_link = (cloud_links_without_patch['html-results'][0]
267 if cloud_links_without_patch['html-results'] else '')
269 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
270 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
272 # Calculate the % difference in the means of the 2 runs.
273 percent_diff_in_means = None
274 std_err = None
275 if (results_with_patch[0].has_key('mean') and
276 results_with_patch[0].has_key('values')):
277 percent_diff_in_means = (results_with_patch[0]['mean'] /
278 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
279 std_err = math_utils.PooledStandardError(
280 [results_with_patch[0]['values'], results_without_patch[0]['values']])
282 if percent_diff_in_means is not None and std_err is not None:
283 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
284 (percent_diff_in_means, std_err))
285 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
286 'Std. Error'.center(20, ' '))
287 print ' %s %s %s' % ('Patch'.center(10, ' '),
288 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
289 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
290 print ' %s %s %s' % ('No Patch'.center(10, ' '),
291 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
292 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
293 if cloud_file_link:
294 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
295 bisect_utils.OutputAnnotationStepClosed()
296 elif cloud_file_link:
297 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
299 if profiler_file_links_with_patch and profiler_file_links_without_patch:
300 for i in xrange(len(profiler_file_links_with_patch)):
301 bisect_utils.OutputAnnotationStepLink(
302 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
303 profiler_file_links_with_patch[i])
304 for i in xrange(len(profiler_file_links_without_patch)):
305 bisect_utils.OutputAnnotationStepLink(
306 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
307 profiler_file_links_without_patch[i])
310 def _ResolveRevisionsFromConfig(config):
311 if not 'good_revision' in config and not 'bad_revision' in config:
312 return (None, None)
314 bad_revision = source_control.ResolveToRevision(
315 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
316 if not bad_revision:
317 raise RuntimeError('Failed to resolve [%s] to git hash.',
318 config['bad_revision'])
319 good_revision = source_control.ResolveToRevision(
320 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
321 if not good_revision:
322 raise RuntimeError('Failed to resolve [%s] to git hash.',
323 config['good_revision'])
325 return (good_revision, bad_revision)
328 def _GetStepAnnotationStringsDict(config):
329 if 'good_revision' in config and 'bad_revision' in config:
330 return {
331 'build1': 'Building [%s]' % config['good_revision'],
332 'build2': 'Building [%s]' % config['bad_revision'],
333 'run1': 'Running [%s]' % config['good_revision'],
334 'run2': 'Running [%s]' % config['bad_revision'],
335 'sync1': 'Syncing [%s]' % config['good_revision'],
336 'sync2': 'Syncing [%s]' % config['bad_revision'],
337 'results_label1': config['good_revision'],
338 'results_label2': config['bad_revision'],
339 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
340 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
342 else:
343 return {
344 'build1': 'Building With Patch',
345 'build2': 'Building Without Patch',
346 'run1': 'Running With Patch',
347 'run2': 'Running Without Patch',
348 'results_label1': 'Patch',
349 'results_label2': 'ToT',
350 'profiler_link1': 'With Patch - Profiler Data',
351 'profiler_link2': 'Without Patch - Profiler Data',
355 def _RunBuildStepForPerformanceTest(bisect_instance,
356 build_string,
357 sync_string,
358 revision):
359 if revision:
360 bisect_utils.OutputAnnotationStepStart(sync_string)
361 if not source_control.SyncToRevision(revision, 'gclient'):
362 raise RuntimeError('Failed [%s].' % sync_string)
363 bisect_utils.OutputAnnotationStepClosed()
365 bisect_utils.OutputAnnotationStepStart(build_string)
367 if bisect_utils.RunGClient(['runhooks']):
368 raise RuntimeError('Failed to run gclient runhooks')
370 if not bisect_instance.ObtainBuild('chromium'):
371 raise RuntimeError('Patched version failed to build.')
373 bisect_utils.OutputAnnotationStepClosed()
376 def _RunCommandStepForPerformanceTest(bisect_instance,
377 opts,
378 reset_on_first_run,
379 upload_on_last_run,
380 results_label,
381 run_string):
382 bisect_utils.OutputAnnotationStepStart(run_string)
384 results = bisect_instance.RunPerformanceTestAndParseResults(
385 opts.command,
386 opts.metric,
387 reset_on_first_run=reset_on_first_run,
388 upload_on_last_run=upload_on_last_run,
389 results_label=results_label)
391 if results[1]:
392 raise RuntimeError('Patched version failed to run performance test.')
394 bisect_utils.OutputAnnotationStepClosed()
396 return results
399 def _RunPerformanceTest(config):
400 """Runs a performance test with and without the current patch.
402 Args:
403 config: Contents of the config file, a dictionary.
405 Attempts to build and run the current revision with and without the
406 current patch, with the parameters passed in.
408 # Bisect script expects to be run from the src directory
409 os.chdir(SRC_DIR)
411 opts = _CreateBisectOptionsFromConfig(config)
412 revisions = _ResolveRevisionsFromConfig(config)
413 annotations_dict = _GetStepAnnotationStringsDict(config)
414 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
416 _RunBuildStepForPerformanceTest(b,
417 annotations_dict.get('build1'),
418 annotations_dict.get('sync1'),
419 revisions[0])
421 results_with_patch = _RunCommandStepForPerformanceTest(
422 b, opts, True, True, annotations_dict['results_label1'],
423 annotations_dict['run1'])
425 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
426 # TODO: When this is re-written to recipes, this should use bot_update's
427 # revert mechanism to fully revert the client. But for now, since we know that
428 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
429 # simply reset those two directories.
430 bisect_utils.CheckRunGit(['reset', '--hard'])
431 bisect_utils.CheckRunGit(['reset', '--hard'],
432 os.path.join('third_party', 'WebKit'))
433 bisect_utils.OutputAnnotationStepClosed()
435 _RunBuildStepForPerformanceTest(b,
436 annotations_dict.get('build2'),
437 annotations_dict.get('sync2'),
438 revisions[1])
440 results_without_patch = _RunCommandStepForPerformanceTest(
441 b, opts, False, True, annotations_dict['results_label2'],
442 annotations_dict['run2'])
444 # Find the link to the cloud stored results file.
445 _ParseAndOutputCloudLinks(
446 results_without_patch, results_with_patch, annotations_dict)
449 def _SetupAndRunPerformanceTest(config, path_to_goma):
450 """Attempts to build and run the current revision with and without the
451 current patch, with the parameters passed in.
453 Args:
454 config: The config read from run-perf-test.cfg.
455 path_to_goma: Path to goma directory.
457 Returns:
458 An exit code: 0 on success, otherwise 1.
460 if platform.release() == 'XP':
461 print 'Windows XP is not supported for perf try jobs because it lacks '
462 print 'goma support. Please refer to crbug.com/330900.'
463 return 1
464 try:
465 with Goma(path_to_goma) as _:
466 config['use_goma'] = bool(path_to_goma)
467 if config['use_goma']:
468 config['goma_dir'] = os.path.abspath(path_to_goma)
469 _RunPerformanceTest(config)
470 return 0
471 except RuntimeError, e:
472 bisect_utils.OutputAnnotationStepClosed()
473 _OutputFailedResults('Error: %s' % e.message)
474 return 1
477 def _RunBisectionScript(
478 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
479 """Attempts to execute the bisect script with the given parameters.
481 Args:
482 config: A dict containing the parameters to pass to the script.
483 working_directory: A working directory to provide to the bisect script,
484 where it will store it's own copy of the depot.
485 path_to_goma: Path to goma directory.
486 path_to_extra_src: Path to extra source file.
487 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
489 Returns:
490 An exit status code: 0 on success, otherwise 1.
492 _PrintConfigStep(config)
494 # Construct the basic command with all necessary arguments.
495 cmd = [
496 'python',
497 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
498 '--command', config['command'],
499 '--good_revision', config['good_revision'],
500 '--bad_revision', config['bad_revision'],
501 '--metric', config['metric'],
502 '--working_directory', working_directory,
503 '--output_buildbot_annotations'
506 # Add flags for any optional config parameters if given in the config.
507 options = [
508 ('repeat_count', '--repeat_test_count'),
509 ('truncate_percent', '--truncate_percent'),
510 ('max_time_minutes', '--max_time_minutes'),
511 ('bisect_mode', '--bisect_mode'),
512 ('improvement_direction', '--improvement_direction'),
513 ('bug_id', '--bug_id'),
514 ('builder_host', '--builder_host'),
515 ('builder_port', '--builder_port'),
517 for config_key, flag in options:
518 if config.has_key(config_key):
519 cmd.extend([flag, config[config_key]])
521 cmd.extend(['--build_preference', 'ninja'])
523 # Possibly set the target platform name based on the browser name in a
524 # Telemetry command.
525 if 'android-chrome-shell' in config['command']:
526 cmd.extend(['--target_platform', 'android'])
527 elif 'android-chrome' in config['command']:
528 cmd.extend(['--target_platform', 'android-chrome'])
529 elif 'android' in config['command']:
530 cmd.extend(['--target_platform', 'android'])
532 if path_to_goma:
533 # For Windows XP platforms, goma service is not supported.
534 # Moreover we don't compile chrome when gs_bucket flag is set instead
535 # use builds archives, therefore ignore goma service for Windows XP.
536 # See http://crbug.com/330900.
537 if platform.release() == 'XP':
538 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
539 'on Windows XP platform. Please refer to crbug.com/330900.')
540 path_to_goma = None
541 cmd.append('--use_goma')
543 if path_to_extra_src:
544 cmd.extend(['--extra_src', path_to_extra_src])
546 if dry_run:
547 cmd.extend([
548 '--debug_ignore_build',
549 '--debug_ignore_sync',
550 '--debug_ignore_perf_test'
553 cmd = [str(c) for c in cmd]
555 with Goma(path_to_goma) as _:
556 return_code = subprocess.call(cmd)
558 if return_code:
559 print ('Error: bisect_perf_regression.py returned with error %d\n'
560 % return_code)
562 return return_code
565 def _PrintConfigStep(config):
566 """Prints out the given config, along with Buildbot annotations."""
567 bisect_utils.OutputAnnotationStepStart('Config')
568 print
569 for k, v in config.iteritems():
570 print ' %s : %s' % (k, v)
571 print
572 bisect_utils.OutputAnnotationStepClosed()
575 def _OptionParser():
576 """Returns the options parser for run-bisect-perf-regression.py."""
577 usage = ('%prog [options] [-- chromium-options]\n'
578 'Used by a try bot to run the bisection script using the parameters'
579 ' provided in the auto_bisect/bisect.cfg file.')
580 parser = optparse.OptionParser(usage=usage)
581 parser.add_option('-w', '--working_directory',
582 type='str',
583 help='A working directory to supply to the bisection '
584 'script, which will use it as the location to checkout '
585 'a copy of the chromium depot.')
586 parser.add_option('-p', '--path_to_goma',
587 type='str',
588 help='Path to goma directory. If this is supplied, goma '
589 'builds will be enabled.')
590 parser.add_option('--path_to_config',
591 type='str',
592 help='Path to the config file to use. If this is supplied, '
593 'the bisect script will use this to override the default '
594 'config file path. The script will attempt to load it '
595 'as a bisect config first, then a perf config.')
596 parser.add_option('--extra_src',
597 type='str',
598 help='Path to extra source file. If this is supplied, '
599 'bisect script will use this to override default behavior.')
600 parser.add_option('--dry_run',
601 action="store_true",
602 help='The script will perform the full bisect, but '
603 'without syncing, building, or running the performance '
604 'tests.')
605 return parser
608 def main():
609 """Entry point for run-bisect-perf-regression.py.
611 Reads the config file, and then tries to either bisect a regression or
612 just run a performance test, depending on the particular config parameters
613 specified in the config file.
615 parser = _OptionParser()
616 opts, _ = parser.parse_args()
618 # Use the default config file path unless one was specified.
619 config_path = BISECT_CONFIG_PATH
620 if opts.path_to_config:
621 config_path = opts.path_to_config
622 config = _LoadConfigFile(config_path)
624 # Check if the config is valid for running bisect job.
625 config_is_valid = _ValidateBisectConfigFile(config)
627 if config and config_is_valid:
628 if not opts.working_directory:
629 print 'Error: missing required parameter: --working_directory\n'
630 parser.print_help()
631 return 1
633 return _RunBisectionScript(
634 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
635 opts.dry_run)
637 # If it wasn't valid for running a bisect, then maybe the user wanted
638 # to run a perf test instead of a bisect job. Try reading any possible
639 # perf test config files.
640 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
641 for current_perf_cfg_file in perf_cfg_files:
642 if opts.path_to_config:
643 path_to_perf_cfg = opts.path_to_config
644 else:
645 path_to_perf_cfg = os.path.join(
646 os.path.abspath(os.path.dirname(sys.argv[0])),
647 current_perf_cfg_file)
649 config = _LoadConfigFile(path_to_perf_cfg)
650 config_is_valid = _ValidatePerfConfigFile(config)
652 if config and config_is_valid:
653 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
655 print ('Error: Could not load config file. Double check your changes to '
656 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
657 return 1
660 if __name__ == '__main__':
661 sys.exit(main())