Remove adapter method on HistoryBackend delegating to AndroidProviderBackend
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blob9fc7de4539a64bf061ff0054897bdc97b8c4bb22
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import optparse
15 import os
16 import platform
17 import re
18 import subprocess
19 import sys
20 import traceback
22 from auto_bisect import bisect_perf_regression
23 from auto_bisect import bisect_utils
24 from auto_bisect import math_utils
25 from auto_bisect import source_control
27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
28 CROS_IP_ENV = 'BISECT_CROS_IP'
30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
36 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 class Goma(object):
41 def __init__(self, path_to_goma):
42 self._abs_path_to_goma = None
43 self._abs_path_to_goma_file = None
44 if not path_to_goma:
45 return
46 self._abs_path_to_goma = os.path.abspath(path_to_goma)
47 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
48 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
50 def __enter__(self):
51 if self._HasGomaPath():
52 self._SetupAndStart()
53 return self
55 def __exit__(self, *_):
56 if self._HasGomaPath():
57 self._Stop()
59 def _HasGomaPath(self):
60 return bool(self._abs_path_to_goma)
62 def _SetupEnvVars(self):
63 if os.name == 'nt':
64 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
65 ' cl.exe')
66 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
67 ' cl.exe')
68 else:
69 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
70 os.environ['PATH']])
72 def _SetupAndStart(self):
73 """Sets up goma and launches it.
75 Args:
76 path_to_goma: Path to goma directory.
78 Returns:
79 True if successful."""
80 self._SetupEnvVars()
82 # Sometimes goma is lingering around if something went bad on a previous
83 # run. Stop it before starting a new process. Can ignore the return code
84 # since it will return an error if it wasn't running.
85 self._Stop()
87 if subprocess.call([self._abs_path_to_goma_file, 'start']):
88 raise RuntimeError('Goma failed to start.')
90 def _Stop(self):
91 subprocess.call([self._abs_path_to_goma_file, 'stop'])
94 def _LoadConfigFile(config_file_path):
95 """Attempts to load the specified config file as a module
96 and grab the global config dict.
98 Args:
99 config_file_path: Path to the config file.
101 Returns:
102 If successful, returns the config dict loaded from the file. If no
103 such dictionary could be loaded, returns the empty dictionary.
105 try:
106 local_vars = {}
107 execfile(config_file_path, local_vars)
108 return local_vars['config']
109 except Exception:
110 print
111 traceback.print_exc()
112 print
113 return {}
116 def _ValidateConfigFile(config_contents, required_parameters):
117 """Validates the config file contents, checking whether all values are
118 non-empty.
120 Args:
121 config_contents: A config dictionary.
122 required_parameters: A list of parameters to check for.
124 Returns:
125 True if valid.
127 for parameter in required_parameters:
128 if parameter not in config_contents:
129 return False
130 value = config_contents[parameter]
131 if not value or type(value) is not str:
132 return False
133 return True
136 def _ValidatePerfConfigFile(config_contents):
137 """Validates the perf config file contents.
139 This is used when we're doing a perf try job, rather than a bisect.
140 The config file is called run-perf-test.cfg by default.
142 The parameters checked are the required parameters; any additional optional
143 parameters won't be checked and validation will still pass.
145 Args:
146 config_contents: A config dictionary.
148 Returns:
149 True if valid.
151 return _ValidateConfigFile(config_contents, required_parameters=['command'])
154 def _ValidateBisectConfigFile(config_contents):
155 """Validates the bisect config file contents.
157 The parameters checked are the required parameters; any additional optional
158 parameters won't be checked and validation will still pass.
160 Args:
161 config_contents: A config dictionary.
163 Returns:
164 True if valid.
166 return _ValidateConfigFile(
167 config_contents,
168 required_parameters=['command', 'good_revision', 'bad_revision'])
171 def _OutputFailedResults(text_to_print):
172 bisect_utils.OutputAnnotationStepStart('Results - Failed')
173 print
174 print text_to_print
175 print
176 bisect_utils.OutputAnnotationStepClosed()
179 def _CreateBisectOptionsFromConfig(config):
180 print config['command']
181 opts_dict = {}
182 opts_dict['command'] = config['command']
183 opts_dict['metric'] = config.get('metric')
185 if config['repeat_count']:
186 opts_dict['repeat_test_count'] = int(config['repeat_count'])
188 if config['truncate_percent']:
189 opts_dict['truncate_percent'] = int(config['truncate_percent'])
191 if config['max_time_minutes']:
192 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
194 if config.has_key('use_goma'):
195 opts_dict['use_goma'] = config['use_goma']
196 if config.has_key('goma_dir'):
197 opts_dict['goma_dir'] = config['goma_dir']
199 if config.has_key('improvement_direction'):
200 opts_dict['improvement_direction'] = int(config['improvement_direction'])
202 if config.has_key('target_arch'):
203 opts_dict['target_arch'] = config['target_arch']
205 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
206 opts_dict['bug_id'] = config['bug_id']
208 opts_dict['build_preference'] = 'ninja'
209 opts_dict['output_buildbot_annotations'] = True
211 if '--browser=cros' in config['command']:
212 opts_dict['target_platform'] = 'cros'
214 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
215 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
216 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
217 else:
218 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
219 'BISECT_CROS_BOARD undefined.')
220 elif 'android' in config['command']:
221 if 'android-chrome-shell' in config['command']:
222 opts_dict['target_platform'] = 'android'
223 elif 'android-chrome' in config['command']:
224 opts_dict['target_platform'] = 'android-chrome'
225 else:
226 opts_dict['target_platform'] = 'android'
228 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
231 def _ParseCloudLinksFromOutput(output):
232 html_results_pattern = re.compile(
233 r'\s(?P<VALUES>http://storage.googleapis.com/' +
234 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
235 re.MULTILINE)
236 profiler_pattern = re.compile(
237 r'\s(?P<VALUES>https://console.developers.google.com/' +
238 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
239 re.MULTILINE)
241 results = {
242 'html-results': html_results_pattern.findall(output),
243 'profiler': profiler_pattern.findall(output),
246 return results
249 def _ParseAndOutputCloudLinks(
250 results_without_patch, results_with_patch, annotations_dict):
251 cloud_links_without_patch = _ParseCloudLinksFromOutput(
252 results_without_patch[2])
253 cloud_links_with_patch = _ParseCloudLinksFromOutput(
254 results_with_patch[2])
256 cloud_file_link = (cloud_links_without_patch['html-results'][0]
257 if cloud_links_without_patch['html-results'] else '')
259 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
260 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
262 # Calculate the % difference in the means of the 2 runs.
263 percent_diff_in_means = None
264 std_err = None
265 if (results_with_patch[0].has_key('mean') and
266 results_with_patch[0].has_key('values')):
267 percent_diff_in_means = (results_with_patch[0]['mean'] /
268 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
269 std_err = math_utils.PooledStandardError(
270 [results_with_patch[0]['values'], results_without_patch[0]['values']])
272 if percent_diff_in_means is not None and std_err is not None:
273 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
274 (percent_diff_in_means, std_err))
275 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
276 'Std. Error'.center(20, ' '))
277 print ' %s %s %s' % ('Patch'.center(10, ' '),
278 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
279 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
280 print ' %s %s %s' % ('No Patch'.center(10, ' '),
281 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
282 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
283 if cloud_file_link:
284 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
285 bisect_utils.OutputAnnotationStepClosed()
286 elif cloud_file_link:
287 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
289 if profiler_file_links_with_patch and profiler_file_links_without_patch:
290 for i in xrange(len(profiler_file_links_with_patch)):
291 bisect_utils.OutputAnnotationStepLink(
292 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
293 profiler_file_links_with_patch[i])
294 for i in xrange(len(profiler_file_links_without_patch)):
295 bisect_utils.OutputAnnotationStepLink(
296 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
297 profiler_file_links_without_patch[i])
300 def _ResolveRevisionsFromConfig(config):
301 if not 'good_revision' in config and not 'bad_revision' in config:
302 return (None, None)
304 bad_revision = source_control.ResolveToRevision(
305 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
306 if not bad_revision:
307 raise RuntimeError('Failed to resolve [%s] to git hash.',
308 config['bad_revision'])
309 good_revision = source_control.ResolveToRevision(
310 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
311 if not good_revision:
312 raise RuntimeError('Failed to resolve [%s] to git hash.',
313 config['good_revision'])
315 return (good_revision, bad_revision)
318 def _GetStepAnnotationStringsDict(config):
319 if 'good_revision' in config and 'bad_revision' in config:
320 return {
321 'build1': 'Building [%s]' % config['good_revision'],
322 'build2': 'Building [%s]' % config['bad_revision'],
323 'run1': 'Running [%s]' % config['good_revision'],
324 'run2': 'Running [%s]' % config['bad_revision'],
325 'sync1': 'Syncing [%s]' % config['good_revision'],
326 'sync2': 'Syncing [%s]' % config['bad_revision'],
327 'results_label1': config['good_revision'],
328 'results_label2': config['bad_revision'],
329 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
330 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
332 else:
333 return {
334 'build1': 'Building With Patch',
335 'build2': 'Building Without Patch',
336 'run1': 'Running With Patch',
337 'run2': 'Running Without Patch',
338 'results_label1': 'Patch',
339 'results_label2': 'ToT',
340 'profiler_link1': 'With Patch - Profiler Data',
341 'profiler_link2': 'Without Patch - Profiler Data',
345 def _RunBuildStepForPerformanceTest(bisect_instance,
346 build_string,
347 sync_string,
348 revision):
349 if revision:
350 bisect_utils.OutputAnnotationStepStart(sync_string)
351 if not source_control.SyncToRevision(revision, 'gclient'):
352 raise RuntimeError('Failed [%s].' % sync_string)
353 bisect_utils.OutputAnnotationStepClosed()
355 bisect_utils.OutputAnnotationStepStart(build_string)
357 if bisect_utils.RunGClient(['runhooks']):
358 raise RuntimeError('Failed to run gclient runhooks')
360 if not bisect_instance.ObtainBuild('chromium'):
361 raise RuntimeError('Patched version failed to build.')
363 bisect_utils.OutputAnnotationStepClosed()
366 def _RunCommandStepForPerformanceTest(bisect_instance,
367 opts,
368 reset_on_first_run,
369 upload_on_last_run,
370 results_label,
371 run_string):
372 bisect_utils.OutputAnnotationStepStart(run_string)
374 results = bisect_instance.RunPerformanceTestAndParseResults(
375 opts.command,
376 opts.metric,
377 reset_on_first_run=reset_on_first_run,
378 upload_on_last_run=upload_on_last_run,
379 results_label=results_label)
381 if results[1]:
382 raise RuntimeError('Patched version failed to run performance test.')
384 bisect_utils.OutputAnnotationStepClosed()
386 return results
389 def _RunPerformanceTest(config):
390 """Runs a performance test with and without the current patch.
392 Args:
393 config: Contents of the config file, a dictionary.
395 Attempts to build and run the current revision with and without the
396 current patch, with the parameters passed in.
398 # Bisect script expects to be run from the src directory
399 os.chdir(SRC_DIR)
401 opts = _CreateBisectOptionsFromConfig(config)
402 revisions = _ResolveRevisionsFromConfig(config)
403 annotations_dict = _GetStepAnnotationStringsDict(config)
404 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
406 _RunBuildStepForPerformanceTest(b,
407 annotations_dict.get('build1'),
408 annotations_dict.get('sync1'),
409 revisions[0])
411 results_with_patch = _RunCommandStepForPerformanceTest(
412 b, opts, True, True, annotations_dict['results_label1'],
413 annotations_dict['run1'])
415 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
416 # TODO: When this is re-written to recipes, this should use bot_update's
417 # revert mechanism to fully revert the client. But for now, since we know that
418 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
419 # simply reset those two directories.
420 bisect_utils.CheckRunGit(['reset', '--hard'])
421 bisect_utils.CheckRunGit(['reset', '--hard'],
422 os.path.join('third_party', 'WebKit'))
423 bisect_utils.OutputAnnotationStepClosed()
425 _RunBuildStepForPerformanceTest(b,
426 annotations_dict.get('build2'),
427 annotations_dict.get('sync2'),
428 revisions[1])
430 results_without_patch = _RunCommandStepForPerformanceTest(
431 b, opts, False, True, annotations_dict['results_label2'],
432 annotations_dict['run2'])
434 # Find the link to the cloud stored results file.
435 _ParseAndOutputCloudLinks(
436 results_without_patch, results_with_patch, annotations_dict)
439 def _SetupAndRunPerformanceTest(config, path_to_goma):
440 """Attempts to build and run the current revision with and without the
441 current patch, with the parameters passed in.
443 Args:
444 config: The config read from run-perf-test.cfg.
445 path_to_goma: Path to goma directory.
447 Returns:
448 An exit code: 0 on success, otherwise 1.
450 if platform.release() == 'XP':
451 print 'Windows XP is not supported for perf try jobs because it lacks '
452 print 'goma support. Please refer to crbug.com/330900.'
453 return 1
454 try:
455 with Goma(path_to_goma) as _:
456 config['use_goma'] = bool(path_to_goma)
457 if config['use_goma']:
458 config['goma_dir'] = os.path.abspath(path_to_goma)
459 _RunPerformanceTest(config)
460 return 0
461 except RuntimeError, e:
462 bisect_utils.OutputAnnotationStepClosed()
463 _OutputFailedResults('Error: %s' % e.message)
464 return 1
467 def _RunBisectionScript(
468 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
469 """Attempts to execute the bisect script with the given parameters.
471 Args:
472 config: A dict containing the parameters to pass to the script.
473 working_directory: A working directory to provide to the bisect script,
474 where it will store it's own copy of the depot.
475 path_to_goma: Path to goma directory.
476 path_to_extra_src: Path to extra source file.
477 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
479 Returns:
480 An exit status code: 0 on success, otherwise 1.
482 _PrintConfigStep(config)
484 # Construct the basic command with all necessary arguments.
485 cmd = [
486 'python',
487 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
488 '--command', config['command'],
489 '--good_revision', config['good_revision'],
490 '--bad_revision', config['bad_revision'],
491 '--working_directory', working_directory,
492 '--output_buildbot_annotations'
495 # Add flags for any optional config parameters if given in the config.
496 options = [
497 ('metric', '--metric'),
498 ('repeat_count', '--repeat_test_count'),
499 ('truncate_percent', '--truncate_percent'),
500 ('max_time_minutes', '--max_time_minutes'),
501 ('bisect_mode', '--bisect_mode'),
502 ('improvement_direction', '--improvement_direction'),
503 ('bug_id', '--bug_id'),
504 ('builder_type', '--builder_type'),
505 ('target_arch', '--target_arch'),
507 for config_key, flag in options:
508 if config.has_key(config_key):
509 cmd.extend([flag, config[config_key]])
511 cmd.extend(['--build_preference', 'ninja'])
513 # Possibly set the target platform name based on the browser name in a
514 # Telemetry command.
515 if 'android-chrome-shell' in config['command']:
516 cmd.extend(['--target_platform', 'android'])
517 elif 'android-chrome' in config['command']:
518 cmd.extend(['--target_platform', 'android-chrome'])
519 elif 'android' in config['command']:
520 cmd.extend(['--target_platform', 'android'])
522 if path_to_goma:
523 # For Windows XP platforms, goma service is not supported.
524 # Moreover we don't compile chrome when gs_bucket flag is set instead
525 # use builds archives, therefore ignore goma service for Windows XP.
526 # See http://crbug.com/330900.
527 if platform.release() == 'XP':
528 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
529 'on Windows XP platform. Please refer to crbug.com/330900.')
530 path_to_goma = None
531 cmd.append('--use_goma')
532 cmd.append('--goma_dir')
533 cmd.append(os.path.abspath(path_to_goma))
535 if path_to_extra_src:
536 cmd.extend(['--extra_src', path_to_extra_src])
538 if dry_run:
539 cmd.extend([
540 '--debug_ignore_build',
541 '--debug_ignore_sync',
542 '--debug_ignore_perf_test'
545 cmd = [str(c) for c in cmd]
547 with Goma(path_to_goma) as _:
548 return_code = subprocess.call(cmd)
550 if return_code:
551 print ('Error: bisect_perf_regression.py returned with error %d\n'
552 % return_code)
554 return return_code
557 def _PrintConfigStep(config):
558 """Prints out the given config, along with Buildbot annotations."""
559 bisect_utils.OutputAnnotationStepStart('Config')
560 print
561 for k, v in config.iteritems():
562 print ' %s : %s' % (k, v)
563 print
564 bisect_utils.OutputAnnotationStepClosed()
567 def _OptionParser():
568 """Returns the options parser for run-bisect-perf-regression.py."""
569 usage = ('%prog [options] [-- chromium-options]\n'
570 'Used by a try bot to run the bisection script using the parameters'
571 ' provided in the auto_bisect/bisect.cfg file.')
572 parser = optparse.OptionParser(usage=usage)
573 parser.add_option('-w', '--working_directory',
574 type='str',
575 help='A working directory to supply to the bisection '
576 'script, which will use it as the location to checkout '
577 'a copy of the chromium depot.')
578 parser.add_option('-p', '--path_to_goma',
579 type='str',
580 help='Path to goma directory. If this is supplied, goma '
581 'builds will be enabled.')
582 parser.add_option('--path_to_config',
583 type='str',
584 help='Path to the config file to use. If this is supplied, '
585 'the bisect script will use this to override the default '
586 'config file path. The script will attempt to load it '
587 'as a bisect config first, then a perf config.')
588 parser.add_option('--extra_src',
589 type='str',
590 help='Path to extra source file. If this is supplied, '
591 'bisect script will use this to override default behavior.')
592 parser.add_option('--dry_run',
593 action="store_true",
594 help='The script will perform the full bisect, but '
595 'without syncing, building, or running the performance '
596 'tests.')
597 return parser
600 def main():
601 """Entry point for run-bisect-perf-regression.py.
603 Reads the config file, and then tries to either bisect a regression or
604 just run a performance test, depending on the particular config parameters
605 specified in the config file.
607 parser = _OptionParser()
608 opts, _ = parser.parse_args()
610 # Use the default config file path unless one was specified.
611 config_path = BISECT_CONFIG_PATH
612 if opts.path_to_config:
613 config_path = opts.path_to_config
614 config = _LoadConfigFile(config_path)
616 # Check if the config is valid for running bisect job.
617 config_is_valid = _ValidateBisectConfigFile(config)
619 if config and config_is_valid:
620 if not opts.working_directory:
621 print 'Error: missing required parameter: --working_directory\n'
622 parser.print_help()
623 return 1
625 return _RunBisectionScript(
626 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
627 opts.dry_run)
629 # If it wasn't valid for running a bisect, then maybe the user wanted
630 # to run a perf test instead of a bisect job. Try reading any possible
631 # perf test config files.
632 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
633 for current_perf_cfg_file in perf_cfg_files:
634 if opts.path_to_config:
635 path_to_perf_cfg = opts.path_to_config
636 else:
637 path_to_perf_cfg = os.path.join(
638 os.path.abspath(os.path.dirname(sys.argv[0])),
639 current_perf_cfg_file)
641 config = _LoadConfigFile(path_to_perf_cfg)
642 config_is_valid = _ValidatePerfConfigFile(config)
644 if config and config_is_valid:
645 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
647 print ('Error: Could not load config file. Double check your changes to '
648 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
649 return 1
652 if __name__ == '__main__':
653 sys.exit(main())