[Session restore] Rename group name Enabled to Restore.
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blobdda359b9b40c698cf142b2c93d72c25e8f623622
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import json
15 import optparse
16 import os
17 import platform
18 import re
19 import shlex
20 import subprocess
21 import sys
22 import traceback
24 from auto_bisect import bisect_perf_regression
25 from auto_bisect import bisect_utils
26 from auto_bisect import math_utils
27 from auto_bisect import source_control
29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
30 CROS_IP_ENV = 'BISECT_CROS_IP'
31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
40 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
41 BENCHMARKS_JSON_FILE = 'benchmarks.json'
43 class Goma(object):
45 def __init__(self, path_to_goma):
46 self._abs_path_to_goma = None
47 self._abs_path_to_goma_file = None
48 if not path_to_goma:
49 return
50 self._abs_path_to_goma = os.path.abspath(path_to_goma)
51 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
52 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
54 def __enter__(self):
55 if self._HasGomaPath():
56 self._SetupAndStart()
57 return self
59 def __exit__(self, *_):
60 if self._HasGomaPath():
61 self._Stop()
63 def _HasGomaPath(self):
64 return bool(self._abs_path_to_goma)
66 def _SetupEnvVars(self):
67 if os.name == 'nt':
68 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
69 ' cl.exe')
70 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
71 ' cl.exe')
72 else:
73 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
74 os.environ['PATH']])
76 def _SetupAndStart(self):
77 """Sets up goma and launches it.
79 Args:
80 path_to_goma: Path to goma directory.
82 Returns:
83 True if successful."""
84 self._SetupEnvVars()
86 # Sometimes goma is lingering around if something went bad on a previous
87 # run. Stop it before starting a new process. Can ignore the return code
88 # since it will return an error if it wasn't running.
89 self._Stop()
91 if subprocess.call([self._abs_path_to_goma_file, 'start']):
92 raise RuntimeError('Goma failed to start.')
94 def _Stop(self):
95 subprocess.call([self._abs_path_to_goma_file, 'stop'])
98 def _LoadConfigFile(config_file_path):
99 """Attempts to load the specified config file as a module
100 and grab the global config dict.
102 Args:
103 config_file_path: Path to the config file.
105 Returns:
106 If successful, returns the config dict loaded from the file. If no
107 such dictionary could be loaded, returns the empty dictionary.
109 try:
110 local_vars = {}
111 execfile(config_file_path, local_vars)
112 return local_vars['config']
113 except Exception:
114 print
115 traceback.print_exc()
116 print
117 return {}
120 def _ValidateConfigFile(config_contents, required_parameters):
121 """Validates the config file contents, checking whether all values are
122 non-empty.
124 Args:
125 config_contents: A config dictionary.
126 required_parameters: A list of parameters to check for.
128 Returns:
129 True if valid.
131 for parameter in required_parameters:
132 if parameter not in config_contents:
133 return False
134 value = config_contents[parameter]
135 if not value or type(value) is not str:
136 return False
137 return True
140 def _ValidatePerfConfigFile(config_contents):
141 """Validates the perf config file contents.
143 This is used when we're doing a perf try job, rather than a bisect.
144 The config file is called run-perf-test.cfg by default.
146 The parameters checked are the required parameters; any additional optional
147 parameters won't be checked and validation will still pass.
149 Args:
150 config_contents: A config dictionary.
152 Returns:
153 True if valid.
155 return _ValidateConfigFile(config_contents, required_parameters=['command'])
158 def _ValidateBisectConfigFile(config_contents):
159 """Validates the bisect config file contents.
161 The parameters checked are the required parameters; any additional optional
162 parameters won't be checked and validation will still pass.
164 Args:
165 config_contents: A config dictionary.
167 Returns:
168 True if valid.
170 return _ValidateConfigFile(
171 config_contents,
172 required_parameters=['command', 'good_revision', 'bad_revision'])
175 def _OutputFailedResults(text_to_print):
176 bisect_utils.OutputAnnotationStepStart('Results - Failed')
177 print
178 print text_to_print
179 print
180 bisect_utils.OutputAnnotationStepClosed()
183 def _CreateBisectOptionsFromConfig(config):
184 print config['command']
185 opts_dict = {}
186 opts_dict['command'] = config['command']
187 opts_dict['metric'] = config.get('metric')
189 if config['repeat_count']:
190 opts_dict['repeat_test_count'] = int(config['repeat_count'])
192 if config['truncate_percent']:
193 opts_dict['truncate_percent'] = int(config['truncate_percent'])
195 if config['max_time_minutes']:
196 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
198 if config.has_key('use_goma'):
199 opts_dict['use_goma'] = config['use_goma']
200 if config.has_key('goma_dir'):
201 opts_dict['goma_dir'] = config['goma_dir']
203 if config.has_key('improvement_direction'):
204 opts_dict['improvement_direction'] = int(config['improvement_direction'])
206 if config.has_key('target_arch'):
207 opts_dict['target_arch'] = config['target_arch']
209 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
210 opts_dict['bug_id'] = config['bug_id']
212 opts_dict['build_preference'] = 'ninja'
213 opts_dict['output_buildbot_annotations'] = True
215 if '--browser=cros' in config['command']:
216 opts_dict['target_platform'] = 'cros'
218 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
219 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
220 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
221 else:
222 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
223 'BISECT_CROS_BOARD undefined.')
224 elif 'android' in config['command']:
225 if 'android-chrome-shell' in config['command']:
226 opts_dict['target_platform'] = 'android'
227 elif 'android-chrome' in config['command']:
228 opts_dict['target_platform'] = 'android-chrome'
229 else:
230 opts_dict['target_platform'] = 'android'
232 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
235 def _ParseCloudLinksFromOutput(output):
236 html_results_pattern = re.compile(
237 r'\s(?P<VALUES>http://storage.googleapis.com/' +
238 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
239 re.MULTILINE)
240 profiler_pattern = re.compile(
241 r'\s(?P<VALUES>https://console.developers.google.com/' +
242 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
243 re.MULTILINE)
245 results = {
246 'html-results': html_results_pattern.findall(output),
247 'profiler': profiler_pattern.findall(output),
250 return results
253 def _ParseAndOutputCloudLinks(
254 results_without_patch, results_with_patch, annotations_dict):
255 cloud_links_without_patch = _ParseCloudLinksFromOutput(
256 results_without_patch[2])
257 cloud_links_with_patch = _ParseCloudLinksFromOutput(
258 results_with_patch[2])
260 cloud_file_link = (cloud_links_without_patch['html-results'][0]
261 if cloud_links_without_patch['html-results'] else '')
263 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
264 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
266 # Calculate the % difference in the means of the 2 runs.
267 percent_diff_in_means = None
268 std_err = None
269 if (results_with_patch[0].has_key('mean') and
270 results_with_patch[0].has_key('values')):
271 percent_diff_in_means = (results_with_patch[0]['mean'] /
272 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
273 std_err = math_utils.PooledStandardError(
274 [results_with_patch[0]['values'], results_without_patch[0]['values']])
276 if percent_diff_in_means is not None and std_err is not None:
277 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
278 (percent_diff_in_means, std_err))
279 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
280 'Std. Error'.center(20, ' '))
281 print ' %s %s %s' % ('Patch'.center(10, ' '),
282 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
283 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
284 print ' %s %s %s' % ('No Patch'.center(10, ' '),
285 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
286 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
287 if cloud_file_link:
288 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
289 bisect_utils.OutputAnnotationStepClosed()
290 elif cloud_file_link:
291 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
293 if profiler_file_links_with_patch and profiler_file_links_without_patch:
294 for i in xrange(len(profiler_file_links_with_patch)):
295 bisect_utils.OutputAnnotationStepLink(
296 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
297 profiler_file_links_with_patch[i])
298 for i in xrange(len(profiler_file_links_without_patch)):
299 bisect_utils.OutputAnnotationStepLink(
300 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
301 profiler_file_links_without_patch[i])
304 def _ResolveRevisionsFromConfig(config):
305 if not 'good_revision' in config and not 'bad_revision' in config:
306 return (None, None)
308 bad_revision = source_control.ResolveToRevision(
309 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
310 if not bad_revision:
311 raise RuntimeError('Failed to resolve [%s] to git hash.',
312 config['bad_revision'])
313 good_revision = source_control.ResolveToRevision(
314 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
315 if not good_revision:
316 raise RuntimeError('Failed to resolve [%s] to git hash.',
317 config['good_revision'])
319 return (good_revision, bad_revision)
322 def _GetStepAnnotationStringsDict(config):
323 if 'good_revision' in config and 'bad_revision' in config:
324 return {
325 'build1': 'Building [%s]' % config['good_revision'],
326 'build2': 'Building [%s]' % config['bad_revision'],
327 'run1': 'Running [%s]' % config['good_revision'],
328 'run2': 'Running [%s]' % config['bad_revision'],
329 'sync1': 'Syncing [%s]' % config['good_revision'],
330 'sync2': 'Syncing [%s]' % config['bad_revision'],
331 'results_label1': config['good_revision'],
332 'results_label2': config['bad_revision'],
333 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
334 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
336 else:
337 return {
338 'build1': 'Building With Patch',
339 'build2': 'Building Without Patch',
340 'run1': 'Running With Patch',
341 'run2': 'Running Without Patch',
342 'results_label1': 'Patch',
343 'results_label2': 'ToT',
344 'profiler_link1': 'With Patch - Profiler Data',
345 'profiler_link2': 'Without Patch - Profiler Data',
349 def _RunBuildStepForPerformanceTest(bisect_instance,
350 build_string,
351 sync_string,
352 revision):
353 if revision:
354 bisect_utils.OutputAnnotationStepStart(sync_string)
355 if not source_control.SyncToRevision(revision, 'gclient'):
356 raise RuntimeError('Failed [%s].' % sync_string)
357 bisect_utils.OutputAnnotationStepClosed()
359 bisect_utils.OutputAnnotationStepStart(build_string)
361 if bisect_utils.RunGClient(['runhooks']):
362 raise RuntimeError('Failed to run gclient runhooks')
364 if not bisect_instance.ObtainBuild('chromium'):
365 raise RuntimeError('Patched version failed to build.')
367 bisect_utils.OutputAnnotationStepClosed()
370 def _RunCommandStepForPerformanceTest(bisect_instance,
371 opts,
372 reset_on_first_run,
373 upload_on_last_run,
374 results_label,
375 run_string):
376 bisect_utils.OutputAnnotationStepStart(run_string)
378 results = bisect_instance.RunPerformanceTestAndParseResults(
379 opts.command,
380 opts.metric,
381 reset_on_first_run=reset_on_first_run,
382 upload_on_last_run=upload_on_last_run,
383 results_label=results_label,
384 allow_flakes=False)
386 if results[1]:
387 raise RuntimeError('Patched version failed to run performance test.')
389 bisect_utils.OutputAnnotationStepClosed()
391 return results
394 def _RunPerformanceTest(config):
395 """Runs a performance test with and without the current patch.
397 Args:
398 config: Contents of the config file, a dictionary.
400 Attempts to build and run the current revision with and without the
401 current patch, with the parameters passed in.
403 # Bisect script expects to be run from the src directory
404 os.chdir(SRC_DIR)
406 opts = _CreateBisectOptionsFromConfig(config)
407 revisions = _ResolveRevisionsFromConfig(config)
408 annotations_dict = _GetStepAnnotationStringsDict(config)
409 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
411 _RunBuildStepForPerformanceTest(b,
412 annotations_dict.get('build1'),
413 annotations_dict.get('sync1'),
414 revisions[0])
416 results_with_patch = _RunCommandStepForPerformanceTest(
417 b, opts, True, True, annotations_dict['results_label1'],
418 annotations_dict['run1'])
420 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
421 # TODO: When this is re-written to recipes, this should use bot_update's
422 # revert mechanism to fully revert the client. But for now, since we know that
423 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
424 # simply reset those two directories.
425 bisect_utils.CheckRunGit(['reset', '--hard'])
426 bisect_utils.CheckRunGit(['reset', '--hard'],
427 os.path.join('third_party', 'WebKit'))
428 bisect_utils.OutputAnnotationStepClosed()
430 _RunBuildStepForPerformanceTest(b,
431 annotations_dict.get('build2'),
432 annotations_dict.get('sync2'),
433 revisions[1])
435 results_without_patch = _RunCommandStepForPerformanceTest(
436 b, opts, False, True, annotations_dict['results_label2'],
437 annotations_dict['run2'])
439 # Find the link to the cloud stored results file.
440 _ParseAndOutputCloudLinks(
441 results_without_patch, results_with_patch, annotations_dict)
444 def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False):
445 """Attempts to build and run the current revision with and without the
446 current patch, with the parameters passed in.
448 Args:
449 config: The config read from run-perf-test.cfg.
450 path_to_goma: Path to goma directory.
451 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
453 Returns:
454 An exit code: 0 on success, otherwise 1.
456 if platform.release() == 'XP':
457 print 'Windows XP is not supported for perf try jobs because it lacks '
458 print 'goma support. Please refer to crbug.com/330900.'
459 return 1
460 try:
461 with Goma(path_to_goma) as _:
462 config['use_goma'] = bool(path_to_goma)
463 if config['use_goma']:
464 config['goma_dir'] = os.path.abspath(path_to_goma)
465 if not is_cq_tryjob:
466 _RunPerformanceTest(config)
467 else:
468 return _RunBenchmarksForCommitQueue(config)
469 return 0
470 except RuntimeError, e:
471 bisect_utils.OutputAnnotationStepFailure()
472 bisect_utils.OutputAnnotationStepClosed()
473 _OutputFailedResults('Error: %s' % e.message)
474 return 1
477 def _RunBisectionScript(
478 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
479 """Attempts to execute the bisect script with the given parameters.
481 Args:
482 config: A dict containing the parameters to pass to the script.
483 working_directory: A working directory to provide to the bisect script,
484 where it will store it's own copy of the depot.
485 path_to_goma: Path to goma directory.
486 path_to_extra_src: Path to extra source file.
487 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
489 Returns:
490 An exit status code: 0 on success, otherwise 1.
492 _PrintConfigStep(config)
494 # Construct the basic command with all necessary arguments.
495 cmd = [
496 'python',
497 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
498 '--command', config['command'],
499 '--good_revision', config['good_revision'],
500 '--bad_revision', config['bad_revision'],
501 '--working_directory', working_directory,
502 '--output_buildbot_annotations'
505 # Add flags for any optional config parameters if given in the config.
506 options = [
507 ('metric', '--metric'),
508 ('repeat_count', '--repeat_test_count'),
509 ('truncate_percent', '--truncate_percent'),
510 ('max_time_minutes', '--max_time_minutes'),
511 ('bisect_mode', '--bisect_mode'),
512 ('improvement_direction', '--improvement_direction'),
513 ('bug_id', '--bug_id'),
514 ('builder_type', '--builder_type'),
515 ('target_arch', '--target_arch'),
517 for config_key, flag in options:
518 if config.has_key(config_key):
519 cmd.extend([flag, config[config_key]])
521 cmd.extend(['--build_preference', 'ninja'])
523 # Possibly set the target platform name based on the browser name in a
524 # Telemetry command.
525 if 'android-chrome-shell' in config['command']:
526 cmd.extend(['--target_platform', 'android'])
527 elif 'android-chrome' in config['command']:
528 cmd.extend(['--target_platform', 'android-chrome'])
529 elif 'android' in config['command']:
530 cmd.extend(['--target_platform', 'android'])
532 if path_to_goma:
533 # For Windows XP platforms, goma service is not supported.
534 # Moreover we don't compile chrome when gs_bucket flag is set instead
535 # use builds archives, therefore ignore goma service for Windows XP.
536 # See http://crbug.com/330900.
537 if platform.release() == 'XP':
538 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
539 'on Windows XP platform. Please refer to crbug.com/330900.')
540 path_to_goma = None
541 cmd.append('--use_goma')
542 cmd.append('--goma_dir')
543 cmd.append(os.path.abspath(path_to_goma))
545 if path_to_extra_src:
546 cmd.extend(['--extra_src', path_to_extra_src])
548 if dry_run:
549 cmd.extend([
550 '--debug_ignore_build',
551 '--debug_ignore_sync',
552 '--debug_ignore_perf_test'
555 cmd = [str(c) for c in cmd]
557 with Goma(path_to_goma) as _:
558 return_code = subprocess.call(cmd)
560 if return_code:
561 print ('Error: bisect_perf_regression.py returned with error %d\n'
562 % return_code)
564 return return_code
567 def _PrintConfigStep(config):
568 """Prints out the given config, along with Buildbot annotations."""
569 bisect_utils.OutputAnnotationStepStart('Config')
570 print
571 for k, v in config.iteritems():
572 print ' %s : %s' % (k, v)
573 print
574 bisect_utils.OutputAnnotationStepClosed()
577 def _GetBrowserType(bot_platform):
578 """Gets the browser type to be used in the run benchmark command."""
579 if bot_platform == 'android':
580 return 'android-chrome-shell'
581 elif 'x64' in bot_platform:
582 return 'release_x64'
584 return 'release'
588 def _GuessTelemetryTestCommand(bot_platform, test_name=None):
589 """Creates a Telemetry benchmark command based on bot and test name."""
590 command = []
591 # On Windows, Python scripts should be prefixed with the python command.
592 if bot_platform == 'win':
593 command.append('python')
594 command.append('tools/perf/run_benchmark')
595 command.append('-v')
596 command.append('--browser=%s' % _GetBrowserType(bot_platform))
597 if test_name:
598 command.append(test_name)
600 return ' '.join(command)
603 def _GetConfigBasedOnPlatform(config, bot_name, test_name):
604 """Generates required options to create BisectPerformanceMetrics instance."""
605 opts_dict = {
606 'command': _GuessTelemetryTestCommand(bot_name, test_name),
607 'target_arch': 'x64' if 'x64' in bot_name else 'ia32',
608 'build_preference': 'ninja',
609 'output_buildbot_annotations': True,
610 'repeat_test_count': 1,
611 'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE,
614 if 'use_goma' in config:
615 opts_dict['use_goma'] = config['use_goma']
616 if 'goma_dir' in config:
617 opts_dict['goma_dir'] = config['goma_dir']
618 if 'android-chrome-shell' in opts_dict['command']:
619 opts_dict['target_platform'] = 'android'
621 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
624 def _GetModifiedFilesFromPatch(cwd=None):
625 """Gets list of files modified in the current patch."""
626 log_output = bisect_utils.CheckRunGit(
627 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
628 modified_files = log_output.split()
629 return modified_files
632 def _GetAffectedBenchmarkModuleNames():
633 """Gets list of modified benchmark files under tools/perf/benchmarks."""
634 all_affected_files = _GetModifiedFilesFromPatch()
635 modified_benchmarks = []
636 for affected_file in all_affected_files:
637 if affected_file.startswith(PERF_BENCHMARKS_PATH):
638 benchmark = os.path.basename(os.path.splitext(affected_file)[0])
639 modified_benchmarks.append(benchmark)
640 return modified_benchmarks
643 def _ListAvailableBenchmarks(bot_platform):
644 """Gets all available benchmarks names as a list."""
645 browser_type = _GetBrowserType(bot_platform)
646 if os.path.exists(BENCHMARKS_JSON_FILE):
647 os.remove(BENCHMARKS_JSON_FILE)
648 command = []
649 if 'win' in bot_platform:
650 command.append('python')
651 command.append('tools/perf/run_benchmark')
652 command.extend([
653 'list',
654 '--browser',
655 browser_type,
656 '--json-output',
657 BENCHMARKS_JSON_FILE])
658 try:
659 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
660 command=command, cwd=SRC_DIR)
661 if return_code:
662 raise RuntimeError('Something went wrong while listing benchmarks. '
663 'Please review the command line: %s.\nERROR: [%s]' %
664 (' '.join(command), output))
665 with open(BENCHMARKS_JSON_FILE) as tests_json:
666 tests_data = json.load(tests_json)
667 if tests_data.get('steps'):
668 return tests_data.get('steps').keys()
669 finally:
670 try:
671 if os.path.exists(BENCHMARKS_JSON_FILE):
672 os.remove(BENCHMARKS_JSON_FILE)
673 except OSError as e:
674 if e.errno != errno.ENOENT:
675 raise
676 return None
679 def _OutputOverallResults(results):
680 """Creates results step and prints results on buildbot job."""
681 test_status = all(current_value == True for current_value in results.values())
682 bisect_utils.OutputAnnotationStepStart(
683 'Results - %s' % ('Passed' if test_status else 'Failed'))
684 print
685 print 'Results of benchmarks:'
686 print
687 for benchmark, result in results.iteritems():
688 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
689 if not test_status:
690 bisect_utils.OutputAnnotationStepFailure()
691 bisect_utils.OutputAnnotationStepClosed()
692 # Returns 0 for success and 1 for failure.
693 return 0 if test_status else 1
696 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
697 """Runs a Telemetry benchmark."""
698 bisect_utils.OutputAnnotationStepStart(benchmark_name)
699 command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
700 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
701 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
702 # A value other than 0 indicates that the test couldn't be run, and results
703 # should also include an error message.
704 if return_code:
705 print ('Error: Something went wrong running the benchmark: %s.'
706 'Please review the command line:%s\n\n%s' %
707 (benchmark_name, command_to_run, output))
708 bisect_utils.OutputAnnotationStepFailure()
709 print output
710 bisect_utils.OutputAnnotationStepClosed()
711 # results[1] contains the return code from subprocess that executes test
712 # command, On successful test run it contains 0 otherwise any non-zero value.
713 return return_code == 0
716 def _RunBenchmarksForCommitQueue(config):
717 """Runs Telemetry benchmark for the commit queue."""
718 os.chdir(SRC_DIR)
719 # To determine the bot platform by reading buildbot name from environment
720 # variable.
721 bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
722 if not bot_name:
723 bot_name = sys.platform
724 bot_name = bot_name.split('_')[0]
726 affected_benchmarks = _GetAffectedBenchmarkModuleNames()
727 # Abort if there are no changes to benchmark any existing benchmark files.
728 if not affected_benchmarks:
729 bisect_utils.OutputAnnotationStepStart('Results')
730 print
731 print ('There are no modification to Telemetry benchmarks,'
732 ' aborting the try job.')
733 bisect_utils.OutputAnnotationStepClosed()
734 return 0
736 # Bisect script expects to be run from the src directory
737 # Gets required options inorder to create BisectPerformanceMetrics instance.
738 # Since command is a required arg in BisectPerformanceMetrics, we just create
739 # a dummy command for now.
740 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
741 annotations_dict = _GetStepAnnotationStringsDict(config)
742 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
743 _RunBuildStepForPerformanceTest(b,
744 annotations_dict.get('build1'),
745 annotations_dict.get('sync1'),
746 None)
747 available_benchmarks = _ListAvailableBenchmarks(bot_name)
748 overall_results = {}
749 for affected_benchmark in affected_benchmarks:
750 for benchmark in available_benchmarks:
751 if (benchmark.startswith(affected_benchmark) and
752 not benchmark.endswith('reference')):
753 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
755 return _OutputOverallResults(overall_results)
758 def _OptionParser():
759 """Returns the options parser for run-bisect-perf-regression.py."""
761 def ConvertJson(option, _, value, parser):
762 """Provides an OptionParser callback to unmarshal a JSON string."""
763 setattr(parser.values, option.dest, json.loads(value))
765 usage = ('%prog [options] [-- chromium-options]\n'
766 'Used by a try bot to run the bisection script using the parameters'
767 ' provided in the auto_bisect/bisect.cfg file.')
768 parser = optparse.OptionParser(usage=usage)
769 parser.add_option('-w', '--working_directory',
770 type='str',
771 help='A working directory to supply to the bisection '
772 'script, which will use it as the location to checkout '
773 'a copy of the chromium depot.')
774 parser.add_option('-p', '--path_to_goma',
775 type='str',
776 help='Path to goma directory. If this is supplied, goma '
777 'builds will be enabled.')
778 parser.add_option('--path_to_config',
779 type='str',
780 help='Path to the config file to use. If this is supplied, '
781 'the bisect script will use this to override the default '
782 'config file path. The script will attempt to load it '
783 'as a bisect config first, then a perf config.')
784 parser.add_option('--extra_src',
785 type='str',
786 help='Path to extra source file. If this is supplied, '
787 'bisect script will use this to override default behavior.')
788 parser.add_option('--dry_run',
789 action="store_true",
790 help='The script will perform the full bisect, but '
791 'without syncing, building, or running the performance '
792 'tests.')
793 # This argument is passed by buildbot to supply build properties to the bisect
794 # script. Note: Don't change "--build-properties" property name.
795 parser.add_option('--build-properties', action='callback',
796 dest='build_properties',
797 callback=ConvertJson, type='string',
798 nargs=1, default={},
799 help='build properties in JSON format')
801 return parser
804 def main():
805 """Entry point for run-bisect-perf-regression.py.
807 Reads the config file, and then tries to either bisect a regression or
808 just run a performance test, depending on the particular config parameters
809 specified in the config file.
811 parser = _OptionParser()
812 opts, _ = parser.parse_args()
814 # Use the default config file path unless one was specified.
815 config_path = BISECT_CONFIG_PATH
816 if opts.path_to_config:
817 config_path = opts.path_to_config
818 config = _LoadConfigFile(config_path)
820 # Check if the config is valid for running bisect job.
821 config_is_valid = _ValidateBisectConfigFile(config)
823 if config and config_is_valid:
824 if not opts.working_directory:
825 print 'Error: missing required parameter: --working_directory\n'
826 parser.print_help()
827 return 1
829 return _RunBisectionScript(
830 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
831 opts.dry_run)
833 # If it wasn't valid for running a bisect, then maybe the user wanted
834 # to run a perf test instead of a bisect job. Try reading any possible
835 # perf test config files.
836 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
837 for current_perf_cfg_file in perf_cfg_files:
838 if opts.path_to_config:
839 path_to_perf_cfg = opts.path_to_config
840 else:
841 path_to_perf_cfg = os.path.join(
842 os.path.abspath(os.path.dirname(sys.argv[0])),
843 current_perf_cfg_file)
845 config = _LoadConfigFile(path_to_perf_cfg)
846 config_is_valid = _ValidatePerfConfigFile(config)
848 if config and config_is_valid:
849 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
851 # If there are no changes to config file, then check if the request is
852 # from commit-bot, if so then run the modified Telemetry benchmarks for the
853 # patch.
854 if opts.build_properties.get('requester') == 'commit-bot@chromium.org':
855 return _SetupAndRunPerformanceTest(
856 config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True)
858 print ('Error: Could not load config file. Double check your changes to '
859 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
860 return 1
863 if __name__ == '__main__':
864 sys.exit(main())