Roll src/third_party/skia ef6c50a:d0fff5b
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blob7641617c5426bde916ba3730643b729733279f95
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import json
15 import optparse
16 import os
17 import platform
18 import re
19 import shlex
20 import subprocess
21 import sys
22 import traceback
24 from auto_bisect import bisect_perf_regression
25 from auto_bisect import bisect_utils
26 from auto_bisect import math_utils
27 from auto_bisect import source_control
29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
30 CROS_IP_ENV = 'BISECT_CROS_IP'
31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
40 PERF_MEASUREMENTS_PATH = 'tools/perf/measurements'
41 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
42 BENCHMARKS_JSON_FILE = 'benchmarks.json'
44 # This is used to identify tryjobs triggered by the commit queue.
45 _COMMIT_QUEUE_USERS = [
46 '5071639625-1lppvbtck1morgivc6sq4dul7klu27sd@developer.gserviceaccount.com',
47 'commit-bot@chromium.org']
49 class Goma(object):
51 def __init__(self, path_to_goma):
52 self._abs_path_to_goma = None
53 self._abs_path_to_goma_file = None
54 if not path_to_goma:
55 return
56 self._abs_path_to_goma = os.path.abspath(path_to_goma)
57 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
58 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
60 def __enter__(self):
61 if self._HasGomaPath():
62 self._SetupAndStart()
63 return self
65 def __exit__(self, *_):
66 if self._HasGomaPath():
67 self._Stop()
69 def _HasGomaPath(self):
70 return bool(self._abs_path_to_goma)
72 def _SetupEnvVars(self):
73 if os.name == 'nt':
74 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
75 ' cl.exe')
76 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
77 ' cl.exe')
78 else:
79 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
80 os.environ['PATH']])
82 def _SetupAndStart(self):
83 """Sets up goma and launches it.
85 Args:
86 path_to_goma: Path to goma directory.
88 Returns:
89 True if successful."""
90 self._SetupEnvVars()
92 # Sometimes goma is lingering around if something went bad on a previous
93 # run. Stop it before starting a new process. Can ignore the return code
94 # since it will return an error if it wasn't running.
95 self._Stop()
97 if subprocess.call([self._abs_path_to_goma_file, 'start']):
98 raise RuntimeError('Goma failed to start.')
100 def _Stop(self):
101 subprocess.call([self._abs_path_to_goma_file, 'stop'])
104 def _LoadConfigFile(config_file_path):
105 """Attempts to load the specified config file as a module
106 and grab the global config dict.
108 Args:
109 config_file_path: Path to the config file.
111 Returns:
112 If successful, returns the config dict loaded from the file. If no
113 such dictionary could be loaded, returns the empty dictionary.
115 try:
116 local_vars = {}
117 execfile(config_file_path, local_vars)
118 return local_vars['config']
119 except Exception:
120 print
121 traceback.print_exc()
122 print
123 return {}
126 def _ValidateConfigFile(config_contents, required_parameters):
127 """Validates the config file contents, checking whether all values are
128 non-empty.
130 Args:
131 config_contents: A config dictionary.
132 required_parameters: A list of parameters to check for.
134 Returns:
135 True if valid.
137 for parameter in required_parameters:
138 if parameter not in config_contents:
139 return False
140 value = config_contents[parameter]
141 if not value or type(value) is not str:
142 return False
143 return True
146 def _ValidatePerfConfigFile(config_contents):
147 """Validates the perf config file contents.
149 This is used when we're doing a perf try job, rather than a bisect.
150 The config file is called run-perf-test.cfg by default.
152 The parameters checked are the required parameters; any additional optional
153 parameters won't be checked and validation will still pass.
155 Args:
156 config_contents: A config dictionary.
158 Returns:
159 True if valid.
161 return _ValidateConfigFile(config_contents, required_parameters=['command'])
164 def _ValidateBisectConfigFile(config_contents):
165 """Validates the bisect config file contents.
167 The parameters checked are the required parameters; any additional optional
168 parameters won't be checked and validation will still pass.
170 Args:
171 config_contents: A config dictionary.
173 Returns:
174 True if valid.
176 return _ValidateConfigFile(
177 config_contents,
178 required_parameters=['command', 'good_revision', 'bad_revision'])
181 def _OutputFailedResults(text_to_print):
182 bisect_utils.OutputAnnotationStepStart('Results - Failed')
183 print
184 print text_to_print
185 print
186 bisect_utils.OutputAnnotationStepClosed()
189 def _CreateBisectOptionsFromConfig(config):
190 print config['command']
191 opts_dict = {}
192 opts_dict['command'] = config['command']
193 opts_dict['metric'] = config.get('metric')
195 if config['repeat_count']:
196 opts_dict['repeat_test_count'] = int(config['repeat_count'])
198 if config['truncate_percent']:
199 opts_dict['truncate_percent'] = int(config['truncate_percent'])
201 if config['max_time_minutes']:
202 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
204 if config.has_key('use_goma'):
205 opts_dict['use_goma'] = config['use_goma']
206 if config.has_key('goma_dir'):
207 opts_dict['goma_dir'] = config['goma_dir']
209 if config.has_key('improvement_direction'):
210 opts_dict['improvement_direction'] = int(config['improvement_direction'])
212 if config.has_key('target_arch'):
213 opts_dict['target_arch'] = config['target_arch']
215 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
216 opts_dict['bug_id'] = config['bug_id']
218 opts_dict['build_preference'] = 'ninja'
219 opts_dict['output_buildbot_annotations'] = True
221 if '--browser=cros' in config['command']:
222 opts_dict['target_platform'] = 'cros'
224 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
225 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
226 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
227 else:
228 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
229 'BISECT_CROS_BOARD undefined.')
230 elif 'android' in config['command']:
231 if 'android-chromium' in config['command']:
232 opts_dict['target_platform'] = 'android'
233 elif 'android-chrome' in config['command']:
234 opts_dict['target_platform'] = 'android-chrome'
235 else:
236 opts_dict['target_platform'] = 'android'
238 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
241 def _ParseCloudLinksFromOutput(output):
242 html_results_pattern = re.compile(
243 r'\s(?P<VALUES>http://storage.googleapis.com/' +
244 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
245 re.MULTILINE)
246 profiler_pattern = re.compile(
247 r'\s(?P<VALUES>https://console.developers.google.com/' +
248 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
249 re.MULTILINE)
251 results = {
252 'html-results': html_results_pattern.findall(output),
253 'profiler': profiler_pattern.findall(output),
256 return results
259 def _ParseAndOutputCloudLinks(
260 results_without_patch, results_with_patch, annotations_dict):
261 cloud_links_without_patch = _ParseCloudLinksFromOutput(
262 results_without_patch[2])
263 cloud_links_with_patch = _ParseCloudLinksFromOutput(
264 results_with_patch[2])
266 cloud_file_link = (cloud_links_without_patch['html-results'][0]
267 if cloud_links_without_patch['html-results'] else '')
269 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
270 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
272 # Calculate the % difference in the means of the 2 runs.
273 percent_diff_in_means = None
274 std_err = None
275 if (results_with_patch[0].has_key('mean') and
276 results_with_patch[0].has_key('values')):
277 percent_diff_in_means = (results_with_patch[0]['mean'] /
278 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
279 std_err = math_utils.PooledStandardError(
280 [results_with_patch[0]['values'], results_without_patch[0]['values']])
282 if percent_diff_in_means is not None and std_err is not None:
283 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
284 (percent_diff_in_means, std_err))
285 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
286 'Std. Error'.center(20, ' '))
287 print ' %s %s %s' % ('Patch'.center(10, ' '),
288 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
289 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
290 print ' %s %s %s' % ('No Patch'.center(10, ' '),
291 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
292 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
293 if cloud_file_link:
294 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
295 bisect_utils.OutputAnnotationStepClosed()
296 elif cloud_file_link:
297 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
299 if profiler_file_links_with_patch and profiler_file_links_without_patch:
300 for i in xrange(len(profiler_file_links_with_patch)):
301 bisect_utils.OutputAnnotationStepLink(
302 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
303 profiler_file_links_with_patch[i])
304 for i in xrange(len(profiler_file_links_without_patch)):
305 bisect_utils.OutputAnnotationStepLink(
306 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
307 profiler_file_links_without_patch[i])
310 def _ResolveRevisionsFromConfig(config):
311 if not 'good_revision' in config and not 'bad_revision' in config:
312 return (None, None)
314 bad_revision = source_control.ResolveToRevision(
315 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
316 if not bad_revision:
317 raise RuntimeError('Failed to resolve [%s] to git hash.',
318 config['bad_revision'])
319 good_revision = source_control.ResolveToRevision(
320 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
321 if not good_revision:
322 raise RuntimeError('Failed to resolve [%s] to git hash.',
323 config['good_revision'])
325 return (good_revision, bad_revision)
328 def _GetStepAnnotationStringsDict(config):
329 if 'good_revision' in config and 'bad_revision' in config:
330 return {
331 'build1': 'Building [%s]' % config['good_revision'],
332 'build2': 'Building [%s]' % config['bad_revision'],
333 'run1': 'Running [%s]' % config['good_revision'],
334 'run2': 'Running [%s]' % config['bad_revision'],
335 'sync1': 'Syncing [%s]' % config['good_revision'],
336 'sync2': 'Syncing [%s]' % config['bad_revision'],
337 'results_label1': config['good_revision'],
338 'results_label2': config['bad_revision'],
339 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
340 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
342 else:
343 return {
344 'build1': 'Building With Patch',
345 'build2': 'Building Without Patch',
346 'run1': 'Running With Patch',
347 'run2': 'Running Without Patch',
348 'results_label1': 'Patch',
349 'results_label2': 'ToT',
350 'profiler_link1': 'With Patch - Profiler Data',
351 'profiler_link2': 'Without Patch - Profiler Data',
355 def _RunBuildStepForPerformanceTest(bisect_instance,
356 build_string,
357 sync_string,
358 revision):
359 if revision:
360 bisect_utils.OutputAnnotationStepStart(sync_string)
361 if not source_control.SyncToRevision(revision, 'gclient'):
362 raise RuntimeError('Failed [%s].' % sync_string)
363 bisect_utils.OutputAnnotationStepClosed()
365 bisect_utils.OutputAnnotationStepStart(build_string)
367 if bisect_utils.RunGClient(['runhooks']):
368 raise RuntimeError('Failed to run gclient runhooks')
370 if not bisect_instance.ObtainBuild('chromium'):
371 raise RuntimeError('Patched version failed to build.')
373 bisect_utils.OutputAnnotationStepClosed()
376 def _RunCommandStepForPerformanceTest(bisect_instance,
377 opts,
378 reset_on_first_run,
379 upload_on_last_run,
380 results_label,
381 run_string):
382 bisect_utils.OutputAnnotationStepStart(run_string)
384 results = bisect_instance.RunPerformanceTestAndParseResults(
385 opts.command,
386 opts.metric,
387 reset_on_first_run=reset_on_first_run,
388 upload_on_last_run=upload_on_last_run,
389 results_label=results_label,
390 allow_flakes=False)
392 if results[1]:
393 raise RuntimeError('Patched version failed to run performance test.')
395 bisect_utils.OutputAnnotationStepClosed()
397 return results
400 def _RunPerformanceTest(config):
401 """Runs a performance test with and without the current patch.
403 Args:
404 config: Contents of the config file, a dictionary.
406 Attempts to build and run the current revision with and without the
407 current patch, with the parameters passed in.
409 # Bisect script expects to be run from the src directory
410 os.chdir(SRC_DIR)
412 opts = _CreateBisectOptionsFromConfig(config)
413 revisions = _ResolveRevisionsFromConfig(config)
414 annotations_dict = _GetStepAnnotationStringsDict(config)
415 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
417 _RunBuildStepForPerformanceTest(b,
418 annotations_dict.get('build1'),
419 annotations_dict.get('sync1'),
420 revisions[0])
422 results_with_patch = _RunCommandStepForPerformanceTest(
423 b, opts, True, True, annotations_dict['results_label1'],
424 annotations_dict['run1'])
426 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
427 # TODO: When this is re-written to recipes, this should use bot_update's
428 # revert mechanism to fully revert the client. But for now, since we know that
429 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
430 # simply reset those two directories.
431 bisect_utils.CheckRunGit(['reset', '--hard'])
432 bisect_utils.CheckRunGit(['reset', '--hard'],
433 os.path.join('third_party', 'WebKit'))
434 bisect_utils.OutputAnnotationStepClosed()
436 _RunBuildStepForPerformanceTest(b,
437 annotations_dict.get('build2'),
438 annotations_dict.get('sync2'),
439 revisions[1])
441 results_without_patch = _RunCommandStepForPerformanceTest(
442 b, opts, False, True, annotations_dict['results_label2'],
443 annotations_dict['run2'])
445 # Find the link to the cloud stored results file.
446 _ParseAndOutputCloudLinks(
447 results_without_patch, results_with_patch, annotations_dict)
450 def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False):
451 """Attempts to build and run the current revision with and without the
452 current patch, with the parameters passed in.
454 Args:
455 config: The config read from run-perf-test.cfg.
456 path_to_goma: Path to goma directory.
457 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
459 Returns:
460 An exit code: 0 on success, otherwise 1.
462 if platform.release() == 'XP':
463 print 'Windows XP is not supported for perf try jobs because it lacks '
464 print 'goma support. Please refer to crbug.com/330900.'
465 return 1
466 try:
467 with Goma(path_to_goma) as _:
468 config['use_goma'] = bool(path_to_goma)
469 if config['use_goma']:
470 config['goma_dir'] = os.path.abspath(path_to_goma)
471 if not is_cq_tryjob:
472 _RunPerformanceTest(config)
473 else:
474 return _RunBenchmarksForCommitQueue(config)
475 return 0
476 except RuntimeError, e:
477 bisect_utils.OutputAnnotationStepFailure()
478 bisect_utils.OutputAnnotationStepClosed()
479 _OutputFailedResults('Error: %s' % e.message)
480 return 1
483 def _RunBisectionScript(
484 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
485 """Attempts to execute the bisect script with the given parameters.
487 Args:
488 config: A dict containing the parameters to pass to the script.
489 working_directory: A working directory to provide to the bisect script,
490 where it will store it's own copy of the depot.
491 path_to_goma: Path to goma directory.
492 path_to_extra_src: Path to extra source file.
493 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
495 Returns:
496 An exit status code: 0 on success, otherwise 1.
498 _PrintConfigStep(config)
500 # Construct the basic command with all necessary arguments.
501 cmd = [
502 'python',
503 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
504 '--command', config['command'],
505 '--good_revision', config['good_revision'],
506 '--bad_revision', config['bad_revision'],
507 '--working_directory', working_directory,
508 '--output_buildbot_annotations'
511 # Add flags for any optional config parameters if given in the config.
512 options = [
513 ('metric', '--metric'),
514 ('repeat_count', '--repeat_test_count'),
515 ('truncate_percent', '--truncate_percent'),
516 ('max_time_minutes', '--max_time_minutes'),
517 ('bisect_mode', '--bisect_mode'),
518 ('improvement_direction', '--improvement_direction'),
519 ('bug_id', '--bug_id'),
520 ('builder_type', '--builder_type'),
521 ('target_arch', '--target_arch'),
523 for config_key, flag in options:
524 if config.has_key(config_key):
525 cmd.extend([flag, config[config_key]])
527 cmd.extend(['--build_preference', 'ninja'])
529 # Possibly set the target platform name based on the browser name in a
530 # Telemetry command.
531 if 'android-chromium' in config['command']:
532 cmd.extend(['--target_platform', 'android'])
533 elif 'android-chrome' in config['command']:
534 cmd.extend(['--target_platform', 'android-chrome'])
535 elif 'android' in config['command']:
536 cmd.extend(['--target_platform', 'android'])
538 if path_to_goma:
539 # For Windows XP platforms, goma service is not supported.
540 # Moreover we don't compile chrome when gs_bucket flag is set instead
541 # use builds archives, therefore ignore goma service for Windows XP.
542 # See http://crbug.com/330900.
543 if platform.release() == 'XP':
544 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
545 'on Windows XP platform. Please refer to crbug.com/330900.')
546 path_to_goma = None
547 cmd.append('--use_goma')
548 cmd.append('--goma_dir')
549 cmd.append(os.path.abspath(path_to_goma))
551 if path_to_extra_src:
552 cmd.extend(['--extra_src', path_to_extra_src])
554 if dry_run:
555 cmd.extend([
556 '--debug_ignore_build',
557 '--debug_ignore_sync',
558 '--debug_ignore_perf_test'
561 cmd = [str(c) for c in cmd]
563 with Goma(path_to_goma) as _:
564 return_code = subprocess.call(cmd)
566 if return_code:
567 print ('Error: bisect_perf_regression.py returned with error %d\n'
568 % return_code)
570 return return_code
573 def _PrintConfigStep(config):
574 """Prints out the given config, along with Buildbot annotations."""
575 bisect_utils.OutputAnnotationStepStart('Config')
576 print
577 for k, v in config.iteritems():
578 print ' %s : %s' % (k, v)
579 print
580 bisect_utils.OutputAnnotationStepClosed()
583 def _GetBrowserType(bot_platform):
584 """Gets the browser type to be used in the run benchmark command."""
585 if bot_platform == 'android':
586 return 'android-chromium'
587 elif 'x64' in bot_platform:
588 return 'release_x64'
590 return 'release'
594 def _GuessTelemetryTestCommand(bot_platform, test_name=None):
595 """Creates a Telemetry benchmark command based on bot and test name."""
596 command = []
597 # On Windows, Python scripts should be prefixed with the python command.
598 if bot_platform == 'win':
599 command.append('python')
600 command.append('tools/perf/run_benchmark')
601 command.append('-v')
602 command.append('--browser=%s' % _GetBrowserType(bot_platform))
603 if test_name:
604 command.append(test_name)
606 return ' '.join(command)
609 def _GetConfigBasedOnPlatform(config, bot_name, test_name):
610 """Generates required options to create BisectPerformanceMetrics instance."""
611 opts_dict = {
612 'command': _GuessTelemetryTestCommand(bot_name, test_name),
613 'target_arch': 'x64' if 'x64' in bot_name else 'ia32',
614 'build_preference': 'ninja',
615 'output_buildbot_annotations': True,
616 'repeat_test_count': 1,
617 'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE,
620 if 'use_goma' in config:
621 opts_dict['use_goma'] = config['use_goma']
622 if 'goma_dir' in config:
623 opts_dict['goma_dir'] = config['goma_dir']
624 if 'android-chromium' in opts_dict['command']:
625 opts_dict['target_platform'] = 'android'
627 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
630 def _GetModifiedFilesFromPatch(cwd=None):
631 """Gets list of files modified in the current patch."""
632 log_output = bisect_utils.CheckRunGit(
633 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
634 modified_files = log_output.split()
635 return modified_files
638 def _GetAffectedBenchmarkModuleNames():
639 """Gets list of modified benchmark files under tools/perf/benchmarks."""
640 all_affected_files = _GetModifiedFilesFromPatch()
641 modified_benchmarks = []
642 for affected_file in all_affected_files:
643 if (affected_file.startswith(PERF_BENCHMARKS_PATH) or
644 affected_file.startswith(PERF_MEASUREMENTS_PATH)):
645 benchmark = os.path.basename(os.path.splitext(affected_file)[0])
646 modified_benchmarks.append(benchmark)
647 return modified_benchmarks
650 def _ListAvailableBenchmarks(bot_platform):
651 """Gets all available benchmarks names as a list."""
652 browser_type = _GetBrowserType(bot_platform)
653 if os.path.exists(BENCHMARKS_JSON_FILE):
654 os.remove(BENCHMARKS_JSON_FILE)
655 command = []
656 if 'win' in bot_platform:
657 command.append('python')
658 command.append('tools/perf/run_benchmark')
659 command.extend([
660 'list',
661 '--browser',
662 browser_type,
663 '--json-output',
664 BENCHMARKS_JSON_FILE])
665 try:
666 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
667 command=command, cwd=SRC_DIR)
668 if return_code:
669 raise RuntimeError('Something went wrong while listing benchmarks. '
670 'Please review the command line: %s.\nERROR: [%s]' %
671 (' '.join(command), output))
672 with open(BENCHMARKS_JSON_FILE) as tests_json:
673 tests_data = json.load(tests_json)
674 if tests_data.get('steps'):
675 return tests_data.get('steps').keys()
676 finally:
677 try:
678 if os.path.exists(BENCHMARKS_JSON_FILE):
679 os.remove(BENCHMARKS_JSON_FILE)
680 except OSError as e:
681 if e.errno != errno.ENOENT:
682 raise
683 return None
686 def _OutputOverallResults(results):
687 """Creates results step and prints results on buildbot job."""
688 test_status = all(current_value == True for current_value in results.values())
689 bisect_utils.OutputAnnotationStepStart(
690 'Results - %s' % ('Passed' if test_status else 'Failed'))
691 print
692 print 'Results of benchmarks:'
693 print
694 for benchmark, result in results.iteritems():
695 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
696 if not test_status:
697 bisect_utils.OutputAnnotationStepFailure()
698 bisect_utils.OutputAnnotationStepClosed()
699 # Returns 0 for success and 1 for failure.
700 return 0 if test_status else 1
703 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
704 """Runs a Telemetry benchmark."""
705 bisect_utils.OutputAnnotationStepStart(benchmark_name)
706 command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
707 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
708 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
709 # A value other than 0 indicates that the test couldn't be run, and results
710 # should also include an error message.
711 if return_code:
712 print ('Error: Something went wrong running the benchmark: %s.'
713 'Please review the command line:%s\n\n%s' %
714 (benchmark_name, command_to_run, output))
715 bisect_utils.OutputAnnotationStepFailure()
716 print output
717 bisect_utils.OutputAnnotationStepClosed()
718 # results[1] contains the return code from subprocess that executes test
719 # command, On successful test run it contains 0 otherwise any non-zero value.
720 return return_code == 0
723 def _RunBenchmarksForCommitQueue(config):
724 """Runs Telemetry benchmark for the commit queue."""
725 os.chdir(SRC_DIR)
726 # To determine the bot platform by reading buildbot name from environment
727 # variable.
728 bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
729 if not bot_name:
730 bot_name = sys.platform
731 bot_name = bot_name.split('_')[0]
733 affected_benchmarks = _GetAffectedBenchmarkModuleNames()
734 # Abort if there are no changes to benchmark any existing benchmark files.
735 if not affected_benchmarks:
736 bisect_utils.OutputAnnotationStepStart('Results')
737 print
738 print ('There are no modification to Telemetry benchmarks,'
739 ' aborting the try job.')
740 bisect_utils.OutputAnnotationStepClosed()
741 return 0
743 # Bisect script expects to be run from the src directory
744 # Gets required options inorder to create BisectPerformanceMetrics instance.
745 # Since command is a required arg in BisectPerformanceMetrics, we just create
746 # a dummy command for now.
747 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
748 annotations_dict = _GetStepAnnotationStringsDict(config)
749 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
750 _RunBuildStepForPerformanceTest(b,
751 annotations_dict.get('build1'),
752 annotations_dict.get('sync1'),
753 None)
754 available_benchmarks = _ListAvailableBenchmarks(bot_name)
755 overall_results = {}
756 for affected_benchmark in affected_benchmarks:
757 for benchmark in available_benchmarks:
758 if (benchmark.startswith(affected_benchmark) and
759 not benchmark.endswith('reference')):
760 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
762 return _OutputOverallResults(overall_results)
765 def _OptionParser():
766 """Returns the options parser for run-bisect-perf-regression.py."""
768 def ConvertJson(option, _, value, parser):
769 """Provides an OptionParser callback to unmarshal a JSON string."""
770 setattr(parser.values, option.dest, json.loads(value))
772 usage = ('%prog [options] [-- chromium-options]\n'
773 'Used by a try bot to run the bisection script using the parameters'
774 ' provided in the auto_bisect/bisect.cfg file.')
775 parser = optparse.OptionParser(usage=usage)
776 parser.add_option('-w', '--working_directory',
777 type='str',
778 help='A working directory to supply to the bisection '
779 'script, which will use it as the location to checkout '
780 'a copy of the chromium depot.')
781 parser.add_option('-p', '--path_to_goma',
782 type='str',
783 help='Path to goma directory. If this is supplied, goma '
784 'builds will be enabled.')
785 parser.add_option('--path_to_config',
786 type='str',
787 help='Path to the config file to use. If this is supplied, '
788 'the bisect script will use this to override the default '
789 'config file path. The script will attempt to load it '
790 'as a bisect config first, then a perf config.')
791 parser.add_option('--extra_src',
792 type='str',
793 help='Path to extra source file. If this is supplied, '
794 'bisect script will use this to override default behavior.')
795 parser.add_option('--dry_run',
796 action="store_true",
797 help='The script will perform the full bisect, but '
798 'without syncing, building, or running the performance '
799 'tests.')
800 # This argument is passed by buildbot to supply build properties to the bisect
801 # script. Note: Don't change "--build-properties" property name.
802 parser.add_option('--build-properties', action='callback',
803 dest='build_properties',
804 callback=ConvertJson, type='string',
805 nargs=1, default={},
806 help='build properties in JSON format')
808 return parser
811 def main():
812 """Entry point for run-bisect-perf-regression.py.
814 Reads the config file, and then tries to either bisect a regression or
815 just run a performance test, depending on the particular config parameters
816 specified in the config file.
818 parser = _OptionParser()
819 opts, _ = parser.parse_args()
821 # Use the default config file path unless one was specified.
822 config_path = BISECT_CONFIG_PATH
823 if opts.path_to_config:
824 config_path = opts.path_to_config
825 config = _LoadConfigFile(config_path)
827 # Check if the config is valid for running bisect job.
828 config_is_valid = _ValidateBisectConfigFile(config)
830 if config and config_is_valid:
831 if not opts.working_directory:
832 print 'Error: missing required parameter: --working_directory\n'
833 parser.print_help()
834 return 1
836 return _RunBisectionScript(
837 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
838 opts.dry_run)
840 # If it wasn't valid for running a bisect, then maybe the user wanted
841 # to run a perf test instead of a bisect job. Try reading any possible
842 # perf test config files.
843 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
844 for current_perf_cfg_file in perf_cfg_files:
845 if opts.path_to_config:
846 path_to_perf_cfg = opts.path_to_config
847 else:
848 path_to_perf_cfg = os.path.join(
849 os.path.abspath(os.path.dirname(sys.argv[0])),
850 current_perf_cfg_file)
852 config = _LoadConfigFile(path_to_perf_cfg)
853 config_is_valid = _ValidatePerfConfigFile(config)
855 if config and config_is_valid:
856 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
858 # If there are no changes to config file, then check if the request is
859 # from the commit queue, if so then run the modified Telemetry benchmarks for
860 # the patch.
861 if opts.build_properties.get('requester') in _COMMIT_QUEUE_USERS:
862 return _SetupAndRunPerformanceTest(
863 config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True)
865 print ('Error: Could not load config file. Double check your changes to '
866 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
867 return 1
870 if __name__ == '__main__':
871 sys.exit(main())