Android Credit Card unmasking UI - replace text with progress bar
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blobf2eff2a9d65de9896f07f3f6fdbbd0986862630b
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import optparse
15 import os
16 import platform
17 import re
18 import subprocess
19 import sys
20 import traceback
22 from auto_bisect import bisect_perf_regression
23 from auto_bisect import bisect_utils
24 from auto_bisect import math_utils
25 from auto_bisect import source_control
27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
28 CROS_IP_ENV = 'BISECT_CROS_IP'
30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
36 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 class Goma(object):
41 def __init__(self, path_to_goma):
42 self._abs_path_to_goma = None
43 self._abs_path_to_goma_file = None
44 if not path_to_goma:
45 return
46 self._abs_path_to_goma = os.path.abspath(path_to_goma)
47 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
48 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
50 def __enter__(self):
51 if self._HasGomaPath():
52 self._SetupAndStart()
53 return self
55 def __exit__(self, *_):
56 if self._HasGomaPath():
57 self._Stop()
59 def _HasGomaPath(self):
60 return bool(self._abs_path_to_goma)
62 def _SetupEnvVars(self):
63 if os.name == 'nt':
64 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
65 ' cl.exe')
66 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
67 ' cl.exe')
68 else:
69 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
70 os.environ['PATH']])
72 def _SetupAndStart(self):
73 """Sets up goma and launches it.
75 Args:
76 path_to_goma: Path to goma directory.
78 Returns:
79 True if successful."""
80 self._SetupEnvVars()
82 # Sometimes goma is lingering around if something went bad on a previous
83 # run. Stop it before starting a new process. Can ignore the return code
84 # since it will return an error if it wasn't running.
85 self._Stop()
87 if subprocess.call([self._abs_path_to_goma_file, 'start']):
88 raise RuntimeError('Goma failed to start.')
90 def _Stop(self):
91 subprocess.call([self._abs_path_to_goma_file, 'stop'])
94 def _LoadConfigFile(config_file_path):
95 """Attempts to load the specified config file as a module
96 and grab the global config dict.
98 Args:
99 config_file_path: Path to the config file.
101 Returns:
102 If successful, returns the config dict loaded from the file. If no
103 such dictionary could be loaded, returns the empty dictionary.
105 try:
106 local_vars = {}
107 execfile(config_file_path, local_vars)
108 return local_vars['config']
109 except Exception:
110 print
111 traceback.print_exc()
112 print
113 return {}
116 def _ValidateConfigFile(config_contents, required_parameters):
117 """Validates the config file contents, checking whether all values are
118 non-empty.
120 Args:
121 config_contents: A config dictionary.
122 required_parameters: A list of parameters to check for.
124 Returns:
125 True if valid.
127 for parameter in required_parameters:
128 if parameter not in config_contents:
129 return False
130 value = config_contents[parameter]
131 if not value or type(value) is not str:
132 return False
133 return True
136 def _ValidatePerfConfigFile(config_contents):
137 """Validates the perf config file contents.
139 This is used when we're doing a perf try job, rather than a bisect.
140 The config file is called run-perf-test.cfg by default.
142 The parameters checked are the required parameters; any additional optional
143 parameters won't be checked and validation will still pass.
145 Args:
146 config_contents: A config dictionary.
148 Returns:
149 True if valid.
151 required_parameters = [
152 'command',
153 'repeat_count',
154 'truncate_percent',
155 'max_time_minutes',
157 return _ValidateConfigFile(config_contents, required_parameters)
160 def _ValidateBisectConfigFile(config_contents):
161 """Validates the bisect config file contents.
163 The parameters checked are the required parameters; any additional optional
164 parameters won't be checked and validation will still pass.
166 Args:
167 config_contents: A config dictionary.
169 Returns:
170 True if valid.
172 required_params = [
173 'command',
174 'good_revision',
175 'bad_revision',
176 'metric',
177 'repeat_count',
178 'truncate_percent',
179 'max_time_minutes',
181 return _ValidateConfigFile(config_contents, required_params)
184 def _OutputFailedResults(text_to_print):
185 bisect_utils.OutputAnnotationStepStart('Results - Failed')
186 print
187 print text_to_print
188 print
189 bisect_utils.OutputAnnotationStepClosed()
192 def _CreateBisectOptionsFromConfig(config):
193 print config['command']
194 opts_dict = {}
195 opts_dict['command'] = config['command']
196 opts_dict['metric'] = config.get('metric')
198 if config['repeat_count']:
199 opts_dict['repeat_test_count'] = int(config['repeat_count'])
201 if config['truncate_percent']:
202 opts_dict['truncate_percent'] = int(config['truncate_percent'])
204 if config['max_time_minutes']:
205 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
207 if config.has_key('use_goma'):
208 opts_dict['use_goma'] = config['use_goma']
209 if config.has_key('goma_dir'):
210 opts_dict['goma_dir'] = config['goma_dir']
212 if config.has_key('improvement_direction'):
213 opts_dict['improvement_direction'] = int(config['improvement_direction'])
215 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
216 opts_dict['bug_id'] = config['bug_id']
218 opts_dict['build_preference'] = 'ninja'
219 opts_dict['output_buildbot_annotations'] = True
221 if '--browser=cros' in config['command']:
222 opts_dict['target_platform'] = 'cros'
224 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
225 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
226 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
227 else:
228 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
229 'BISECT_CROS_BOARD undefined.')
230 elif 'android' in config['command']:
231 if 'android-chrome-shell' in config['command']:
232 opts_dict['target_platform'] = 'android'
233 elif 'android-chrome' in config['command']:
234 opts_dict['target_platform'] = 'android-chrome'
235 else:
236 opts_dict['target_platform'] = 'android'
238 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
241 def _ParseCloudLinksFromOutput(output):
242 html_results_pattern = re.compile(
243 r'\s(?P<VALUES>http://storage.googleapis.com/' +
244 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
245 re.MULTILINE)
246 profiler_pattern = re.compile(
247 r'\s(?P<VALUES>https://console.developers.google.com/' +
248 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
249 re.MULTILINE)
251 results = {
252 'html-results': html_results_pattern.findall(output),
253 'profiler': profiler_pattern.findall(output),
256 return results
259 def _ParseAndOutputCloudLinks(
260 results_without_patch, results_with_patch, annotations_dict):
261 cloud_links_without_patch = _ParseCloudLinksFromOutput(
262 results_without_patch[2])
263 cloud_links_with_patch = _ParseCloudLinksFromOutput(
264 results_with_patch[2])
266 cloud_file_link = (cloud_links_without_patch['html-results'][0]
267 if cloud_links_without_patch['html-results'] else '')
269 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
270 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
272 # Calculate the % difference in the means of the 2 runs.
273 percent_diff_in_means = None
274 std_err = None
275 if (results_with_patch[0].has_key('mean') and
276 results_with_patch[0].has_key('values')):
277 percent_diff_in_means = (results_with_patch[0]['mean'] /
278 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
279 std_err = math_utils.PooledStandardError(
280 [results_with_patch[0]['values'], results_without_patch[0]['values']])
282 if percent_diff_in_means is not None and std_err is not None:
283 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
284 (percent_diff_in_means, std_err))
285 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
286 'Std. Error'.center(20, ' '))
287 print ' %s %s %s' % ('Patch'.center(10, ' '),
288 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
289 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
290 print ' %s %s %s' % ('No Patch'.center(10, ' '),
291 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
292 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
293 if cloud_file_link:
294 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
295 bisect_utils.OutputAnnotationStepClosed()
296 elif cloud_file_link:
297 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
299 if profiler_file_links_with_patch and profiler_file_links_without_patch:
300 for i in xrange(len(profiler_file_links_with_patch)):
301 bisect_utils.OutputAnnotationStepLink(
302 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
303 profiler_file_links_with_patch[i])
304 for i in xrange(len(profiler_file_links_without_patch)):
305 bisect_utils.OutputAnnotationStepLink(
306 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
307 profiler_file_links_without_patch[i])
310 def _ResolveRevisionsFromConfig(config):
311 if not 'good_revision' in config and not 'bad_revision' in config:
312 return (None, None)
314 bad_revision = source_control.ResolveToRevision(
315 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
316 if not bad_revision:
317 raise RuntimeError('Failed to resolve [%s] to git hash.',
318 config['bad_revision'])
319 good_revision = source_control.ResolveToRevision(
320 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
321 if not good_revision:
322 raise RuntimeError('Failed to resolve [%s] to git hash.',
323 config['good_revision'])
325 return (good_revision, bad_revision)
328 def _GetStepAnnotationStringsDict(config):
329 if 'good_revision' in config and 'bad_revision' in config:
330 return {
331 'build1': 'Building [%s]' % config['good_revision'],
332 'build2': 'Building [%s]' % config['bad_revision'],
333 'run1': 'Running [%s]' % config['good_revision'],
334 'run2': 'Running [%s]' % config['bad_revision'],
335 'sync1': 'Syncing [%s]' % config['good_revision'],
336 'sync2': 'Syncing [%s]' % config['bad_revision'],
337 'results_label1': config['good_revision'],
338 'results_label2': config['bad_revision'],
339 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
340 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
342 else:
343 return {
344 'build1': 'Building With Patch',
345 'build2': 'Building Without Patch',
346 'run1': 'Running With Patch',
347 'run2': 'Running Without Patch',
348 'results_label1': 'Patch',
349 'results_label2': 'ToT',
350 'profiler_link1': 'With Patch - Profiler Data',
351 'profiler_link2': 'Without Patch - Profiler Data',
355 def _RunBuildStepForPerformanceTest(bisect_instance,
356 build_string,
357 sync_string,
358 revision):
359 if revision:
360 bisect_utils.OutputAnnotationStepStart(sync_string)
361 if not source_control.SyncToRevision(revision, 'gclient'):
362 raise RuntimeError('Failed [%s].' % sync_string)
363 bisect_utils.OutputAnnotationStepClosed()
365 bisect_utils.OutputAnnotationStepStart(build_string)
367 if bisect_utils.RunGClient(['runhooks']):
368 raise RuntimeError('Failed to run gclient runhooks')
370 if not bisect_instance.ObtainBuild('chromium'):
371 raise RuntimeError('Patched version failed to build.')
373 bisect_utils.OutputAnnotationStepClosed()
376 def _RunCommandStepForPerformanceTest(bisect_instance,
377 opts,
378 reset_on_first_run,
379 upload_on_last_run,
380 results_label,
381 run_string):
382 bisect_utils.OutputAnnotationStepStart(run_string)
384 results = bisect_instance.RunPerformanceTestAndParseResults(
385 opts.command,
386 opts.metric,
387 reset_on_first_run=reset_on_first_run,
388 upload_on_last_run=upload_on_last_run,
389 results_label=results_label)
391 if results[1]:
392 raise RuntimeError('Patched version failed to run performance test.')
394 bisect_utils.OutputAnnotationStepClosed()
396 return results
399 def _RunPerformanceTest(config):
400 """Runs a performance test with and without the current patch.
402 Args:
403 config: Contents of the config file, a dictionary.
405 Attempts to build and run the current revision with and without the
406 current patch, with the parameters passed in.
408 # Bisect script expects to be run from the src directory
409 os.chdir(SRC_DIR)
411 opts = _CreateBisectOptionsFromConfig(config)
412 revisions = _ResolveRevisionsFromConfig(config)
413 annotations_dict = _GetStepAnnotationStringsDict(config)
414 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
416 _RunBuildStepForPerformanceTest(b,
417 annotations_dict.get('build1'),
418 annotations_dict.get('sync1'),
419 revisions[0])
421 results_with_patch = _RunCommandStepForPerformanceTest(
422 b, opts, True, True, annotations_dict['results_label1'],
423 annotations_dict['run1'])
425 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
426 # TODO: When this is re-written to recipes, this should use bot_update's
427 # revert mechanism to fully revert the client. But for now, since we know that
428 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
429 # simply reset those two directories.
430 bisect_utils.CheckRunGit(['reset', '--hard'])
431 bisect_utils.CheckRunGit(['reset', '--hard'],
432 os.path.join('third_party', 'WebKit'))
433 bisect_utils.OutputAnnotationStepClosed()
435 _RunBuildStepForPerformanceTest(b,
436 annotations_dict.get('build2'),
437 annotations_dict.get('sync2'),
438 revisions[1])
440 results_without_patch = _RunCommandStepForPerformanceTest(
441 b, opts, False, True, annotations_dict['results_label2'],
442 annotations_dict['run2'])
444 # Find the link to the cloud stored results file.
445 _ParseAndOutputCloudLinks(
446 results_without_patch, results_with_patch, annotations_dict)
449 def _SetupAndRunPerformanceTest(config, path_to_goma):
450 """Attempts to build and run the current revision with and without the
451 current patch, with the parameters passed in.
453 Args:
454 config: The config read from run-perf-test.cfg.
455 path_to_goma: Path to goma directory.
457 Returns:
458 An exit code: 0 on success, otherwise 1.
460 if platform.release() == 'XP':
461 print 'Windows XP is not supported for perf try jobs because it lacks '
462 print 'goma support. Please refer to crbug.com/330900.'
463 return 1
464 try:
465 with Goma(path_to_goma) as _:
466 config['use_goma'] = bool(path_to_goma)
467 if config['use_goma']:
468 config['goma_dir'] = os.path.abspath(path_to_goma)
469 _RunPerformanceTest(config)
470 return 0
471 except RuntimeError, e:
472 bisect_utils.OutputAnnotationStepClosed()
473 _OutputFailedResults('Error: %s' % e.message)
474 return 1
477 def _RunBisectionScript(
478 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
479 """Attempts to execute the bisect script with the given parameters.
481 Args:
482 config: A dict containing the parameters to pass to the script.
483 working_directory: A working directory to provide to the bisect script,
484 where it will store it's own copy of the depot.
485 path_to_goma: Path to goma directory.
486 path_to_extra_src: Path to extra source file.
487 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
489 Returns:
490 An exit status code: 0 on success, otherwise 1.
492 _PrintConfigStep(config)
494 # Construct the basic command with all necessary arguments.
495 cmd = [
496 'python',
497 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
498 '--command', config['command'],
499 '--good_revision', config['good_revision'],
500 '--bad_revision', config['bad_revision'],
501 '--metric', config['metric'],
502 '--working_directory', working_directory,
503 '--output_buildbot_annotations'
506 # Add flags for any optional config parameters if given in the config.
507 options = [
508 ('repeat_count', '--repeat_test_count'),
509 ('truncate_percent', '--truncate_percent'),
510 ('max_time_minutes', '--max_time_minutes'),
511 ('bisect_mode', '--bisect_mode'),
512 ('improvement_direction', '--improvement_direction'),
513 ('bug_id', '--bug_id'),
514 ('builder_type', '--builder_type'),
516 for config_key, flag in options:
517 if config.has_key(config_key):
518 cmd.extend([flag, config[config_key]])
520 cmd.extend(['--build_preference', 'ninja'])
522 # Possibly set the target platform name based on the browser name in a
523 # Telemetry command.
524 if 'android-chrome-shell' in config['command']:
525 cmd.extend(['--target_platform', 'android'])
526 elif 'android-chrome' in config['command']:
527 cmd.extend(['--target_platform', 'android-chrome'])
528 elif 'android' in config['command']:
529 cmd.extend(['--target_platform', 'android'])
531 if path_to_goma:
532 # For Windows XP platforms, goma service is not supported.
533 # Moreover we don't compile chrome when gs_bucket flag is set instead
534 # use builds archives, therefore ignore goma service for Windows XP.
535 # See http://crbug.com/330900.
536 if platform.release() == 'XP':
537 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
538 'on Windows XP platform. Please refer to crbug.com/330900.')
539 path_to_goma = None
540 cmd.append('--use_goma')
541 cmd.append('--goma_dir')
542 cmd.append(os.path.abspath(path_to_goma))
544 if path_to_extra_src:
545 cmd.extend(['--extra_src', path_to_extra_src])
547 if dry_run:
548 cmd.extend([
549 '--debug_ignore_build',
550 '--debug_ignore_sync',
551 '--debug_ignore_perf_test'
554 cmd = [str(c) for c in cmd]
556 with Goma(path_to_goma) as _:
557 return_code = subprocess.call(cmd)
559 if return_code:
560 print ('Error: bisect_perf_regression.py returned with error %d\n'
561 % return_code)
563 return return_code
566 def _PrintConfigStep(config):
567 """Prints out the given config, along with Buildbot annotations."""
568 bisect_utils.OutputAnnotationStepStart('Config')
569 print
570 for k, v in config.iteritems():
571 print ' %s : %s' % (k, v)
572 print
573 bisect_utils.OutputAnnotationStepClosed()
576 def _OptionParser():
577 """Returns the options parser for run-bisect-perf-regression.py."""
578 usage = ('%prog [options] [-- chromium-options]\n'
579 'Used by a try bot to run the bisection script using the parameters'
580 ' provided in the auto_bisect/bisect.cfg file.')
581 parser = optparse.OptionParser(usage=usage)
582 parser.add_option('-w', '--working_directory',
583 type='str',
584 help='A working directory to supply to the bisection '
585 'script, which will use it as the location to checkout '
586 'a copy of the chromium depot.')
587 parser.add_option('-p', '--path_to_goma',
588 type='str',
589 help='Path to goma directory. If this is supplied, goma '
590 'builds will be enabled.')
591 parser.add_option('--path_to_config',
592 type='str',
593 help='Path to the config file to use. If this is supplied, '
594 'the bisect script will use this to override the default '
595 'config file path. The script will attempt to load it '
596 'as a bisect config first, then a perf config.')
597 parser.add_option('--extra_src',
598 type='str',
599 help='Path to extra source file. If this is supplied, '
600 'bisect script will use this to override default behavior.')
601 parser.add_option('--dry_run',
602 action="store_true",
603 help='The script will perform the full bisect, but '
604 'without syncing, building, or running the performance '
605 'tests.')
606 return parser
609 def main():
610 """Entry point for run-bisect-perf-regression.py.
612 Reads the config file, and then tries to either bisect a regression or
613 just run a performance test, depending on the particular config parameters
614 specified in the config file.
616 parser = _OptionParser()
617 opts, _ = parser.parse_args()
619 # Use the default config file path unless one was specified.
620 config_path = BISECT_CONFIG_PATH
621 if opts.path_to_config:
622 config_path = opts.path_to_config
623 config = _LoadConfigFile(config_path)
625 # Check if the config is valid for running bisect job.
626 config_is_valid = _ValidateBisectConfigFile(config)
628 if config and config_is_valid:
629 if not opts.working_directory:
630 print 'Error: missing required parameter: --working_directory\n'
631 parser.print_help()
632 return 1
634 return _RunBisectionScript(
635 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
636 opts.dry_run)
638 # If it wasn't valid for running a bisect, then maybe the user wanted
639 # to run a perf test instead of a bisect job. Try reading any possible
640 # perf test config files.
641 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
642 for current_perf_cfg_file in perf_cfg_files:
643 if opts.path_to_config:
644 path_to_perf_cfg = opts.path_to_config
645 else:
646 path_to_perf_cfg = os.path.join(
647 os.path.abspath(os.path.dirname(sys.argv[0])),
648 current_perf_cfg_file)
650 config = _LoadConfigFile(path_to_perf_cfg)
651 config_is_valid = _ValidatePerfConfigFile(config)
653 if config and config_is_valid:
654 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
656 print ('Error: Could not load config file. Double check your changes to '
657 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
658 return 1
661 if __name__ == '__main__':
662 sys.exit(main())