[NaCl SDK] nacl_io: Fix bad test added in def60c00.
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blob81e20f622cf179e2e6ab854270c23169ad5912f6
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import optparse
15 import os
16 import platform
17 import re
18 import subprocess
19 import sys
20 import traceback
22 from auto_bisect import bisect_perf_regression
23 from auto_bisect import bisect_utils
24 from auto_bisect import math_utils
25 from auto_bisect import source_control
27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
28 CROS_IP_ENV = 'BISECT_CROS_IP'
30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
36 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 class Goma(object):
41 def __init__(self, path_to_goma):
42 self._abs_path_to_goma = None
43 self._abs_path_to_goma_file = None
44 if not path_to_goma:
45 return
46 self._abs_path_to_goma = os.path.abspath(path_to_goma)
47 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
48 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
50 def __enter__(self):
51 if self._HasGomaPath():
52 self._SetupAndStart()
53 return self
55 def __exit__(self, *_):
56 if self._HasGomaPath():
57 self._Stop()
59 def _HasGomaPath(self):
60 return bool(self._abs_path_to_goma)
62 def _SetupEnvVars(self):
63 if os.name == 'nt':
64 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
65 ' cl.exe')
66 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
67 ' cl.exe')
68 else:
69 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
70 os.environ['PATH']])
72 def _SetupAndStart(self):
73 """Sets up goma and launches it.
75 Args:
76 path_to_goma: Path to goma directory.
78 Returns:
79 True if successful."""
80 self._SetupEnvVars()
82 # Sometimes goma is lingering around if something went bad on a previous
83 # run. Stop it before starting a new process. Can ignore the return code
84 # since it will return an error if it wasn't running.
85 self._Stop()
87 if subprocess.call([self._abs_path_to_goma_file, 'start']):
88 raise RuntimeError('Goma failed to start.')
90 def _Stop(self):
91 subprocess.call([self._abs_path_to_goma_file, 'stop'])
94 def _LoadConfigFile(config_file_path):
95 """Attempts to load the specified config file as a module
96 and grab the global config dict.
98 Args:
99 config_file_path: Path to the config file.
101 Returns:
102 If successful, returns the config dict loaded from the file. If no
103 such dictionary could be loaded, returns the empty dictionary.
105 try:
106 local_vars = {}
107 execfile(config_file_path, local_vars)
108 return local_vars['config']
109 except Exception:
110 print
111 traceback.print_exc()
112 print
113 return {}
116 def _ValidateConfigFile(config_contents, required_parameters):
117 """Validates the config file contents, checking whether all values are
118 non-empty.
120 Args:
121 config_contents: A config dictionary.
122 required_parameters: A list of parameters to check for.
124 Returns:
125 True if valid.
127 for parameter in required_parameters:
128 if parameter not in config_contents:
129 return False
130 value = config_contents[parameter]
131 if not value or type(value) is not str:
132 return False
133 return True
136 def _ValidatePerfConfigFile(config_contents):
137 """Validates the perf config file contents.
139 This is used when we're doing a perf try job, rather than a bisect.
140 The config file is called run-perf-test.cfg by default.
142 The parameters checked are the required parameters; any additional optional
143 parameters won't be checked and validation will still pass.
145 Args:
146 config_contents: A config dictionary.
148 Returns:
149 True if valid.
151 return _ValidateConfigFile(config_contents, required_parameters=['command'])
154 def _ValidateBisectConfigFile(config_contents):
155 """Validates the bisect config file contents.
157 The parameters checked are the required parameters; any additional optional
158 parameters won't be checked and validation will still pass.
160 Args:
161 config_contents: A config dictionary.
163 Returns:
164 True if valid.
166 return _ValidateConfigFile(
167 config_contents,
168 required_parameters=['command', 'good_revision', 'bad_revision'])
171 def _OutputFailedResults(text_to_print):
172 bisect_utils.OutputAnnotationStepStart('Results - Failed')
173 print
174 print text_to_print
175 print
176 bisect_utils.OutputAnnotationStepClosed()
179 def _CreateBisectOptionsFromConfig(config):
180 print config['command']
181 opts_dict = {}
182 opts_dict['command'] = config['command']
183 opts_dict['metric'] = config.get('metric')
185 if config['repeat_count']:
186 opts_dict['repeat_test_count'] = int(config['repeat_count'])
188 if config['truncate_percent']:
189 opts_dict['truncate_percent'] = int(config['truncate_percent'])
191 if config['max_time_minutes']:
192 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
194 if config.has_key('use_goma'):
195 opts_dict['use_goma'] = config['use_goma']
196 if config.has_key('goma_dir'):
197 opts_dict['goma_dir'] = config['goma_dir']
199 if config.has_key('improvement_direction'):
200 opts_dict['improvement_direction'] = int(config['improvement_direction'])
202 if config.has_key('target_arch'):
203 opts_dict['target_arch'] = config['target_arch']
205 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
206 opts_dict['bug_id'] = config['bug_id']
208 opts_dict['build_preference'] = 'ninja'
209 opts_dict['output_buildbot_annotations'] = True
211 if '--browser=cros' in config['command']:
212 opts_dict['target_platform'] = 'cros'
214 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
215 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
216 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
217 else:
218 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
219 'BISECT_CROS_BOARD undefined.')
220 elif 'android' in config['command']:
221 if 'android-chrome-shell' in config['command']:
222 opts_dict['target_platform'] = 'android'
223 elif 'android-chrome' in config['command']:
224 opts_dict['target_platform'] = 'android-chrome'
225 else:
226 opts_dict['target_platform'] = 'android'
228 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
231 def _ParseCloudLinksFromOutput(output):
232 html_results_pattern = re.compile(
233 r'\s(?P<VALUES>http://storage.googleapis.com/' +
234 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
235 re.MULTILINE)
236 profiler_pattern = re.compile(
237 r'\s(?P<VALUES>https://console.developers.google.com/' +
238 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
239 re.MULTILINE)
241 results = {
242 'html-results': html_results_pattern.findall(output),
243 'profiler': profiler_pattern.findall(output),
246 return results
249 def _ParseAndOutputCloudLinks(
250 results_without_patch, results_with_patch, annotations_dict):
251 cloud_links_without_patch = _ParseCloudLinksFromOutput(
252 results_without_patch[2])
253 cloud_links_with_patch = _ParseCloudLinksFromOutput(
254 results_with_patch[2])
256 cloud_file_link = (cloud_links_without_patch['html-results'][0]
257 if cloud_links_without_patch['html-results'] else '')
259 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
260 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
262 # Calculate the % difference in the means of the 2 runs.
263 percent_diff_in_means = None
264 std_err = None
265 if (results_with_patch[0].has_key('mean') and
266 results_with_patch[0].has_key('values')):
267 percent_diff_in_means = (results_with_patch[0]['mean'] /
268 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
269 std_err = math_utils.PooledStandardError(
270 [results_with_patch[0]['values'], results_without_patch[0]['values']])
272 if percent_diff_in_means is not None and std_err is not None:
273 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
274 (percent_diff_in_means, std_err))
275 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
276 'Std. Error'.center(20, ' '))
277 print ' %s %s %s' % ('Patch'.center(10, ' '),
278 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
279 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
280 print ' %s %s %s' % ('No Patch'.center(10, ' '),
281 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
282 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
283 if cloud_file_link:
284 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
285 bisect_utils.OutputAnnotationStepClosed()
286 elif cloud_file_link:
287 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
289 if profiler_file_links_with_patch and profiler_file_links_without_patch:
290 for i in xrange(len(profiler_file_links_with_patch)):
291 bisect_utils.OutputAnnotationStepLink(
292 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
293 profiler_file_links_with_patch[i])
294 for i in xrange(len(profiler_file_links_without_patch)):
295 bisect_utils.OutputAnnotationStepLink(
296 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
297 profiler_file_links_without_patch[i])
300 def _ResolveRevisionsFromConfig(config):
301 if not 'good_revision' in config and not 'bad_revision' in config:
302 return (None, None)
304 bad_revision = source_control.ResolveToRevision(
305 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
306 if not bad_revision:
307 raise RuntimeError('Failed to resolve [%s] to git hash.',
308 config['bad_revision'])
309 good_revision = source_control.ResolveToRevision(
310 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
311 if not good_revision:
312 raise RuntimeError('Failed to resolve [%s] to git hash.',
313 config['good_revision'])
315 return (good_revision, bad_revision)
318 def _GetStepAnnotationStringsDict(config):
319 if 'good_revision' in config and 'bad_revision' in config:
320 return {
321 'build1': 'Building [%s]' % config['good_revision'],
322 'build2': 'Building [%s]' % config['bad_revision'],
323 'run1': 'Running [%s]' % config['good_revision'],
324 'run2': 'Running [%s]' % config['bad_revision'],
325 'sync1': 'Syncing [%s]' % config['good_revision'],
326 'sync2': 'Syncing [%s]' % config['bad_revision'],
327 'results_label1': config['good_revision'],
328 'results_label2': config['bad_revision'],
329 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
330 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
332 else:
333 return {
334 'build1': 'Building With Patch',
335 'build2': 'Building Without Patch',
336 'run1': 'Running With Patch',
337 'run2': 'Running Without Patch',
338 'results_label1': 'Patch',
339 'results_label2': 'ToT',
340 'profiler_link1': 'With Patch - Profiler Data',
341 'profiler_link2': 'Without Patch - Profiler Data',
345 def _RunBuildStepForPerformanceTest(bisect_instance,
346 build_string,
347 sync_string,
348 revision):
349 if revision:
350 bisect_utils.OutputAnnotationStepStart(sync_string)
351 if not source_control.SyncToRevision(revision, 'gclient'):
352 raise RuntimeError('Failed [%s].' % sync_string)
353 bisect_utils.OutputAnnotationStepClosed()
355 bisect_utils.OutputAnnotationStepStart(build_string)
357 if bisect_utils.RunGClient(['runhooks']):
358 raise RuntimeError('Failed to run gclient runhooks')
360 if not bisect_instance.ObtainBuild('chromium'):
361 raise RuntimeError('Patched version failed to build.')
363 bisect_utils.OutputAnnotationStepClosed()
366 def _RunCommandStepForPerformanceTest(bisect_instance,
367 opts,
368 reset_on_first_run,
369 upload_on_last_run,
370 results_label,
371 run_string):
372 bisect_utils.OutputAnnotationStepStart(run_string)
374 results = bisect_instance.RunPerformanceTestAndParseResults(
375 opts.command,
376 opts.metric,
377 reset_on_first_run=reset_on_first_run,
378 upload_on_last_run=upload_on_last_run,
379 results_label=results_label,
380 allow_flakes=False)
382 if results[1]:
383 raise RuntimeError('Patched version failed to run performance test.')
385 bisect_utils.OutputAnnotationStepClosed()
387 return results
390 def _RunPerformanceTest(config):
391 """Runs a performance test with and without the current patch.
393 Args:
394 config: Contents of the config file, a dictionary.
396 Attempts to build and run the current revision with and without the
397 current patch, with the parameters passed in.
399 # Bisect script expects to be run from the src directory
400 os.chdir(SRC_DIR)
402 opts = _CreateBisectOptionsFromConfig(config)
403 revisions = _ResolveRevisionsFromConfig(config)
404 annotations_dict = _GetStepAnnotationStringsDict(config)
405 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
407 _RunBuildStepForPerformanceTest(b,
408 annotations_dict.get('build1'),
409 annotations_dict.get('sync1'),
410 revisions[0])
412 results_with_patch = _RunCommandStepForPerformanceTest(
413 b, opts, True, True, annotations_dict['results_label1'],
414 annotations_dict['run1'])
416 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
417 # TODO: When this is re-written to recipes, this should use bot_update's
418 # revert mechanism to fully revert the client. But for now, since we know that
419 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
420 # simply reset those two directories.
421 bisect_utils.CheckRunGit(['reset', '--hard'])
422 bisect_utils.CheckRunGit(['reset', '--hard'],
423 os.path.join('third_party', 'WebKit'))
424 bisect_utils.OutputAnnotationStepClosed()
426 _RunBuildStepForPerformanceTest(b,
427 annotations_dict.get('build2'),
428 annotations_dict.get('sync2'),
429 revisions[1])
431 results_without_patch = _RunCommandStepForPerformanceTest(
432 b, opts, False, True, annotations_dict['results_label2'],
433 annotations_dict['run2'])
435 # Find the link to the cloud stored results file.
436 _ParseAndOutputCloudLinks(
437 results_without_patch, results_with_patch, annotations_dict)
440 def _SetupAndRunPerformanceTest(config, path_to_goma):
441 """Attempts to build and run the current revision with and without the
442 current patch, with the parameters passed in.
444 Args:
445 config: The config read from run-perf-test.cfg.
446 path_to_goma: Path to goma directory.
448 Returns:
449 An exit code: 0 on success, otherwise 1.
451 if platform.release() == 'XP':
452 print 'Windows XP is not supported for perf try jobs because it lacks '
453 print 'goma support. Please refer to crbug.com/330900.'
454 return 1
455 try:
456 with Goma(path_to_goma) as _:
457 config['use_goma'] = bool(path_to_goma)
458 if config['use_goma']:
459 config['goma_dir'] = os.path.abspath(path_to_goma)
460 _RunPerformanceTest(config)
461 return 0
462 except RuntimeError, e:
463 bisect_utils.OutputAnnotationStepFailure()
464 bisect_utils.OutputAnnotationStepClosed()
465 _OutputFailedResults('Error: %s' % e.message)
466 return 1
469 def _RunBisectionScript(
470 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
471 """Attempts to execute the bisect script with the given parameters.
473 Args:
474 config: A dict containing the parameters to pass to the script.
475 working_directory: A working directory to provide to the bisect script,
476 where it will store it's own copy of the depot.
477 path_to_goma: Path to goma directory.
478 path_to_extra_src: Path to extra source file.
479 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
481 Returns:
482 An exit status code: 0 on success, otherwise 1.
484 _PrintConfigStep(config)
486 # Construct the basic command with all necessary arguments.
487 cmd = [
488 'python',
489 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
490 '--command', config['command'],
491 '--good_revision', config['good_revision'],
492 '--bad_revision', config['bad_revision'],
493 '--working_directory', working_directory,
494 '--output_buildbot_annotations'
497 # Add flags for any optional config parameters if given in the config.
498 options = [
499 ('metric', '--metric'),
500 ('repeat_count', '--repeat_test_count'),
501 ('truncate_percent', '--truncate_percent'),
502 ('max_time_minutes', '--max_time_minutes'),
503 ('bisect_mode', '--bisect_mode'),
504 ('improvement_direction', '--improvement_direction'),
505 ('bug_id', '--bug_id'),
506 ('builder_type', '--builder_type'),
507 ('target_arch', '--target_arch'),
509 for config_key, flag in options:
510 if config.has_key(config_key):
511 cmd.extend([flag, config[config_key]])
513 cmd.extend(['--build_preference', 'ninja'])
515 # Possibly set the target platform name based on the browser name in a
516 # Telemetry command.
517 if 'android-chrome-shell' in config['command']:
518 cmd.extend(['--target_platform', 'android'])
519 elif 'android-chrome' in config['command']:
520 cmd.extend(['--target_platform', 'android-chrome'])
521 elif 'android' in config['command']:
522 cmd.extend(['--target_platform', 'android'])
524 if path_to_goma:
525 # For Windows XP platforms, goma service is not supported.
526 # Moreover we don't compile chrome when gs_bucket flag is set instead
527 # use builds archives, therefore ignore goma service for Windows XP.
528 # See http://crbug.com/330900.
529 if platform.release() == 'XP':
530 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
531 'on Windows XP platform. Please refer to crbug.com/330900.')
532 path_to_goma = None
533 cmd.append('--use_goma')
534 cmd.append('--goma_dir')
535 cmd.append(os.path.abspath(path_to_goma))
537 if path_to_extra_src:
538 cmd.extend(['--extra_src', path_to_extra_src])
540 if dry_run:
541 cmd.extend([
542 '--debug_ignore_build',
543 '--debug_ignore_sync',
544 '--debug_ignore_perf_test'
547 cmd = [str(c) for c in cmd]
549 with Goma(path_to_goma) as _:
550 return_code = subprocess.call(cmd)
552 if return_code:
553 print ('Error: bisect_perf_regression.py returned with error %d\n'
554 % return_code)
556 return return_code
559 def _PrintConfigStep(config):
560 """Prints out the given config, along with Buildbot annotations."""
561 bisect_utils.OutputAnnotationStepStart('Config')
562 print
563 for k, v in config.iteritems():
564 print ' %s : %s' % (k, v)
565 print
566 bisect_utils.OutputAnnotationStepClosed()
569 def _OptionParser():
570 """Returns the options parser for run-bisect-perf-regression.py."""
571 usage = ('%prog [options] [-- chromium-options]\n'
572 'Used by a try bot to run the bisection script using the parameters'
573 ' provided in the auto_bisect/bisect.cfg file.')
574 parser = optparse.OptionParser(usage=usage)
575 parser.add_option('-w', '--working_directory',
576 type='str',
577 help='A working directory to supply to the bisection '
578 'script, which will use it as the location to checkout '
579 'a copy of the chromium depot.')
580 parser.add_option('-p', '--path_to_goma',
581 type='str',
582 help='Path to goma directory. If this is supplied, goma '
583 'builds will be enabled.')
584 parser.add_option('--path_to_config',
585 type='str',
586 help='Path to the config file to use. If this is supplied, '
587 'the bisect script will use this to override the default '
588 'config file path. The script will attempt to load it '
589 'as a bisect config first, then a perf config.')
590 parser.add_option('--extra_src',
591 type='str',
592 help='Path to extra source file. If this is supplied, '
593 'bisect script will use this to override default behavior.')
594 parser.add_option('--dry_run',
595 action="store_true",
596 help='The script will perform the full bisect, but '
597 'without syncing, building, or running the performance '
598 'tests.')
599 return parser
602 def main():
603 """Entry point for run-bisect-perf-regression.py.
605 Reads the config file, and then tries to either bisect a regression or
606 just run a performance test, depending on the particular config parameters
607 specified in the config file.
609 parser = _OptionParser()
610 opts, _ = parser.parse_args()
612 # Use the default config file path unless one was specified.
613 config_path = BISECT_CONFIG_PATH
614 if opts.path_to_config:
615 config_path = opts.path_to_config
616 config = _LoadConfigFile(config_path)
618 # Check if the config is valid for running bisect job.
619 config_is_valid = _ValidateBisectConfigFile(config)
621 if config and config_is_valid:
622 if not opts.working_directory:
623 print 'Error: missing required parameter: --working_directory\n'
624 parser.print_help()
625 return 1
627 return _RunBisectionScript(
628 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
629 opts.dry_run)
631 # If it wasn't valid for running a bisect, then maybe the user wanted
632 # to run a perf test instead of a bisect job. Try reading any possible
633 # perf test config files.
634 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
635 for current_perf_cfg_file in perf_cfg_files:
636 if opts.path_to_config:
637 path_to_perf_cfg = opts.path_to_config
638 else:
639 path_to_perf_cfg = os.path.join(
640 os.path.abspath(os.path.dirname(sys.argv[0])),
641 current_perf_cfg_file)
643 config = _LoadConfigFile(path_to_perf_cfg)
644 config_is_valid = _ValidatePerfConfigFile(config)
646 if config and config_is_valid:
647 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
649 print ('Error: Could not load config file. Double check your changes to '
650 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
651 return 1
654 if __name__ == '__main__':
655 sys.exit(main())