Permission message rules: Each rule must have >= 1 required permissions
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blob27c0c7755d369556b5e2c33853ca9c9d5fc49b80
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import json
15 import optparse
16 import os
17 import platform
18 import re
19 import shlex
20 import subprocess
21 import sys
22 import traceback
24 from auto_bisect import bisect_perf_regression
25 from auto_bisect import bisect_utils
26 from auto_bisect import math_utils
27 from auto_bisect import source_control
29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
30 CROS_IP_ENV = 'BISECT_CROS_IP'
31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
40 PERF_MEASUREMENTS_PATH = 'tools/perf/measurements'
41 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
42 BENCHMARKS_JSON_FILE = 'benchmarks.json'
44 # This is used to identify tryjobs triggered by the commit queue.
45 _COMMIT_QUEUE_USERS = [
46 '5071639625-1lppvbtck1morgivc6sq4dul7klu27sd@developer.gserviceaccount.com',
47 'commit-bot@chromium.org']
49 class Goma(object):
51 def __init__(self, path_to_goma):
52 self._abs_path_to_goma = None
53 self._abs_path_to_goma_file = None
54 if not path_to_goma:
55 return
56 self._abs_path_to_goma = os.path.abspath(path_to_goma)
57 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
58 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
60 def __enter__(self):
61 if self._HasGomaPath():
62 self._SetupAndStart()
63 return self
65 def __exit__(self, *_):
66 if self._HasGomaPath():
67 self._Stop()
69 def _HasGomaPath(self):
70 return bool(self._abs_path_to_goma)
72 def _SetupEnvVars(self):
73 if os.name == 'nt':
74 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
75 ' cl.exe')
76 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
77 ' cl.exe')
78 else:
79 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
80 os.environ['PATH']])
82 def _SetupAndStart(self):
83 """Sets up goma and launches it.
85 Args:
86 path_to_goma: Path to goma directory.
88 Returns:
89 True if successful."""
90 self._SetupEnvVars()
92 # Sometimes goma is lingering around if something went bad on a previous
93 # run. Stop it before starting a new process. Can ignore the return code
94 # since it will return an error if it wasn't running.
95 self._Stop()
97 if subprocess.call([self._abs_path_to_goma_file, 'start']):
98 raise RuntimeError('Goma failed to start.')
100 def _Stop(self):
101 subprocess.call([self._abs_path_to_goma_file, 'stop'])
104 def _LoadConfigFile(config_file_path):
105 """Attempts to load the specified config file as a module
106 and grab the global config dict.
108 Args:
109 config_file_path: Path to the config file.
111 Returns:
112 If successful, returns the config dict loaded from the file. If no
113 such dictionary could be loaded, returns the empty dictionary.
115 try:
116 local_vars = {}
117 execfile(config_file_path, local_vars)
118 return local_vars['config']
119 except Exception:
120 print
121 traceback.print_exc()
122 print
123 return {}
126 def _ValidateConfigFile(config_contents, required_parameters):
127 """Validates the config file contents, checking whether all values are
128 non-empty.
130 Args:
131 config_contents: A config dictionary.
132 required_parameters: A list of parameters to check for.
134 Returns:
135 True if valid.
137 for parameter in required_parameters:
138 if parameter not in config_contents:
139 return False
140 value = config_contents[parameter]
141 if not value or type(value) is not str:
142 return False
143 return True
146 def _ValidatePerfConfigFile(config_contents):
147 """Validates the perf config file contents.
149 This is used when we're doing a perf try job, rather than a bisect.
150 The config file is called run-perf-test.cfg by default.
152 The parameters checked are the required parameters; any additional optional
153 parameters won't be checked and validation will still pass.
155 Args:
156 config_contents: A config dictionary.
158 Returns:
159 True if valid.
161 return _ValidateConfigFile(config_contents, required_parameters=['command'])
164 def _ValidateBisectConfigFile(config_contents):
165 """Validates the bisect config file contents.
167 The parameters checked are the required parameters; any additional optional
168 parameters won't be checked and validation will still pass.
170 Args:
171 config_contents: A config dictionary.
173 Returns:
174 True if valid.
176 return _ValidateConfigFile(
177 config_contents,
178 required_parameters=['command', 'good_revision', 'bad_revision'])
181 def _OutputFailedResults(text_to_print):
182 bisect_utils.OutputAnnotationStepStart('Results - Failed')
183 print
184 print text_to_print
185 print
186 bisect_utils.OutputAnnotationStepClosed()
189 def _CreateBisectOptionsFromConfig(config):
190 print config['command']
191 opts_dict = {}
192 opts_dict['command'] = config['command']
193 opts_dict['metric'] = config.get('metric')
195 if config['repeat_count']:
196 opts_dict['repeat_test_count'] = int(config['repeat_count'])
198 if config['truncate_percent']:
199 opts_dict['truncate_percent'] = int(config['truncate_percent'])
201 if config['max_time_minutes']:
202 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
204 if config.has_key('use_goma'):
205 opts_dict['use_goma'] = config['use_goma']
206 if config.has_key('goma_dir'):
207 opts_dict['goma_dir'] = config['goma_dir']
209 if config.has_key('improvement_direction'):
210 opts_dict['improvement_direction'] = int(config['improvement_direction'])
212 if config.has_key('target_arch'):
213 opts_dict['target_arch'] = config['target_arch']
215 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
216 opts_dict['bug_id'] = config['bug_id']
218 opts_dict['build_preference'] = 'ninja'
219 opts_dict['output_buildbot_annotations'] = True
221 if '--browser=cros' in config['command']:
222 opts_dict['target_platform'] = 'cros'
224 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
225 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
226 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
227 else:
228 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
229 'BISECT_CROS_BOARD undefined.')
230 elif 'android' in config['command']:
231 # TODO (prasadv): Remove android-chrome-shell check once we confirm that
232 # there are no pending bisect jobs with this in command.
233 if any(item in config['command']
234 for item in ['android-chrome-shell', 'android-chromium']):
235 opts_dict['target_platform'] = 'android'
236 elif 'android-chrome' in config['command']:
237 opts_dict['target_platform'] = 'android-chrome'
238 else:
239 opts_dict['target_platform'] = 'android'
241 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
244 def _ParseCloudLinksFromOutput(output):
245 html_results_pattern = re.compile(
246 r'\s(?P<VALUES>http://storage.googleapis.com/' +
247 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
248 re.MULTILINE)
249 profiler_pattern = re.compile(
250 r'\s(?P<VALUES>https://console.developers.google.com/' +
251 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
252 re.MULTILINE)
254 results = {
255 'html-results': html_results_pattern.findall(output),
256 'profiler': profiler_pattern.findall(output),
259 return results
262 def _ParseAndOutputCloudLinks(
263 results_without_patch, results_with_patch, annotations_dict):
264 cloud_links_without_patch = _ParseCloudLinksFromOutput(
265 results_without_patch[2])
266 cloud_links_with_patch = _ParseCloudLinksFromOutput(
267 results_with_patch[2])
269 cloud_file_link = (cloud_links_without_patch['html-results'][0]
270 if cloud_links_without_patch['html-results'] else '')
272 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
273 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
275 # Calculate the % difference in the means of the 2 runs.
276 percent_diff_in_means = None
277 std_err = None
278 if (results_with_patch[0].has_key('mean') and
279 results_with_patch[0].has_key('values')):
280 percent_diff_in_means = (results_with_patch[0]['mean'] /
281 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
282 std_err = math_utils.PooledStandardError(
283 [results_with_patch[0]['values'], results_without_patch[0]['values']])
285 if percent_diff_in_means is not None and std_err is not None:
286 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
287 (percent_diff_in_means, std_err))
288 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
289 'Std. Error'.center(20, ' '))
290 print ' %s %s %s' % ('Patch'.center(10, ' '),
291 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
292 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
293 print ' %s %s %s' % ('No Patch'.center(10, ' '),
294 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
295 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
296 if cloud_file_link:
297 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
298 bisect_utils.OutputAnnotationStepClosed()
299 elif cloud_file_link:
300 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
302 if profiler_file_links_with_patch and profiler_file_links_without_patch:
303 for i in xrange(len(profiler_file_links_with_patch)):
304 bisect_utils.OutputAnnotationStepLink(
305 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
306 profiler_file_links_with_patch[i])
307 for i in xrange(len(profiler_file_links_without_patch)):
308 bisect_utils.OutputAnnotationStepLink(
309 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
310 profiler_file_links_without_patch[i])
313 def _ResolveRevisionsFromConfig(config):
314 if not 'good_revision' in config and not 'bad_revision' in config:
315 return (None, None)
317 bad_revision = source_control.ResolveToRevision(
318 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
319 if not bad_revision:
320 raise RuntimeError('Failed to resolve [%s] to git hash.',
321 config['bad_revision'])
322 good_revision = source_control.ResolveToRevision(
323 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
324 if not good_revision:
325 raise RuntimeError('Failed to resolve [%s] to git hash.',
326 config['good_revision'])
328 return (good_revision, bad_revision)
331 def _GetStepAnnotationStringsDict(config):
332 if 'good_revision' in config and 'bad_revision' in config:
333 return {
334 'build1': 'Building [%s]' % config['good_revision'],
335 'build2': 'Building [%s]' % config['bad_revision'],
336 'run1': 'Running [%s]' % config['good_revision'],
337 'run2': 'Running [%s]' % config['bad_revision'],
338 'sync1': 'Syncing [%s]' % config['good_revision'],
339 'sync2': 'Syncing [%s]' % config['bad_revision'],
340 'results_label1': config['good_revision'],
341 'results_label2': config['bad_revision'],
342 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
343 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
345 else:
346 return {
347 'build1': 'Building With Patch',
348 'build2': 'Building Without Patch',
349 'run1': 'Running With Patch',
350 'run2': 'Running Without Patch',
351 'results_label1': 'Patch',
352 'results_label2': 'ToT',
353 'profiler_link1': 'With Patch - Profiler Data',
354 'profiler_link2': 'Without Patch - Profiler Data',
358 def _RunBuildStepForPerformanceTest(bisect_instance,
359 build_string,
360 sync_string,
361 revision):
362 if revision:
363 bisect_utils.OutputAnnotationStepStart(sync_string)
364 if not source_control.SyncToRevision(revision, 'gclient'):
365 raise RuntimeError('Failed [%s].' % sync_string)
366 bisect_utils.OutputAnnotationStepClosed()
368 bisect_utils.OutputAnnotationStepStart(build_string)
370 if bisect_utils.RunGClient(['runhooks']):
371 raise RuntimeError('Failed to run gclient runhooks')
373 if not bisect_instance.ObtainBuild('chromium'):
374 raise RuntimeError('Patched version failed to build.')
376 bisect_utils.OutputAnnotationStepClosed()
379 def _RunCommandStepForPerformanceTest(bisect_instance,
380 opts,
381 reset_on_first_run,
382 upload_on_last_run,
383 results_label,
384 run_string):
385 bisect_utils.OutputAnnotationStepStart(run_string)
387 results = bisect_instance.RunPerformanceTestAndParseResults(
388 opts.command,
389 opts.metric,
390 reset_on_first_run=reset_on_first_run,
391 upload_on_last_run=upload_on_last_run,
392 results_label=results_label,
393 allow_flakes=False)
395 if results[1]:
396 raise RuntimeError('Patched version failed to run performance test.')
398 bisect_utils.OutputAnnotationStepClosed()
400 return results
403 def _RunPerformanceTest(config):
404 """Runs a performance test with and without the current patch.
406 Args:
407 config: Contents of the config file, a dictionary.
409 Attempts to build and run the current revision with and without the
410 current patch, with the parameters passed in.
412 # Bisect script expects to be run from the src directory
413 os.chdir(SRC_DIR)
415 opts = _CreateBisectOptionsFromConfig(config)
416 revisions = _ResolveRevisionsFromConfig(config)
417 annotations_dict = _GetStepAnnotationStringsDict(config)
418 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
420 _RunBuildStepForPerformanceTest(b,
421 annotations_dict.get('build1'),
422 annotations_dict.get('sync1'),
423 revisions[0])
425 results_with_patch = _RunCommandStepForPerformanceTest(
426 b, opts, True, True, annotations_dict['results_label1'],
427 annotations_dict['run1'])
429 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
430 # TODO: When this is re-written to recipes, this should use bot_update's
431 # revert mechanism to fully revert the client. But for now, since we know that
432 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
433 # simply reset those two directories.
434 bisect_utils.CheckRunGit(['reset', '--hard'])
435 bisect_utils.CheckRunGit(['reset', '--hard'],
436 os.path.join('third_party', 'WebKit'))
437 bisect_utils.OutputAnnotationStepClosed()
439 _RunBuildStepForPerformanceTest(b,
440 annotations_dict.get('build2'),
441 annotations_dict.get('sync2'),
442 revisions[1])
444 results_without_patch = _RunCommandStepForPerformanceTest(
445 b, opts, False, True, annotations_dict['results_label2'],
446 annotations_dict['run2'])
448 # Find the link to the cloud stored results file.
449 _ParseAndOutputCloudLinks(
450 results_without_patch, results_with_patch, annotations_dict)
453 def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False):
454 """Attempts to build and run the current revision with and without the
455 current patch, with the parameters passed in.
457 Args:
458 config: The config read from run-perf-test.cfg.
459 path_to_goma: Path to goma directory.
460 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
462 Returns:
463 An exit code: 0 on success, otherwise 1.
465 if platform.release() == 'XP':
466 print 'Windows XP is not supported for perf try jobs because it lacks '
467 print 'goma support. Please refer to crbug.com/330900.'
468 return 1
469 try:
470 with Goma(path_to_goma) as _:
471 config['use_goma'] = bool(path_to_goma)
472 if config['use_goma']:
473 config['goma_dir'] = os.path.abspath(path_to_goma)
474 if not is_cq_tryjob:
475 _RunPerformanceTest(config)
476 else:
477 return _RunBenchmarksForCommitQueue(config)
478 return 0
479 except RuntimeError, e:
480 bisect_utils.OutputAnnotationStepFailure()
481 bisect_utils.OutputAnnotationStepClosed()
482 _OutputFailedResults('Error: %s' % e.message)
483 return 1
486 def _RunBisectionScript(
487 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
488 """Attempts to execute the bisect script with the given parameters.
490 Args:
491 config: A dict containing the parameters to pass to the script.
492 working_directory: A working directory to provide to the bisect script,
493 where it will store it's own copy of the depot.
494 path_to_goma: Path to goma directory.
495 path_to_extra_src: Path to extra source file.
496 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
498 Returns:
499 An exit status code: 0 on success, otherwise 1.
501 _PrintConfigStep(config)
503 # Construct the basic command with all necessary arguments.
504 cmd = [
505 'python',
506 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
507 '--command', config['command'],
508 '--good_revision', config['good_revision'],
509 '--bad_revision', config['bad_revision'],
510 '--working_directory', working_directory,
511 '--output_buildbot_annotations'
514 # Add flags for any optional config parameters if given in the config.
515 options = [
516 ('metric', '--metric'),
517 ('repeat_count', '--repeat_test_count'),
518 ('truncate_percent', '--truncate_percent'),
519 ('max_time_minutes', '--max_time_minutes'),
520 ('bisect_mode', '--bisect_mode'),
521 ('improvement_direction', '--improvement_direction'),
522 ('bug_id', '--bug_id'),
523 ('builder_type', '--builder_type'),
524 ('target_arch', '--target_arch'),
526 for config_key, flag in options:
527 if config.has_key(config_key):
528 cmd.extend([flag, config[config_key]])
530 cmd.extend(['--build_preference', 'ninja'])
532 # Possibly set the target platform name based on the browser name in a
533 # Telemetry command.
534 # TODO (prasadv): Remove android-chrome-shell check once we confirm there are
535 # no pending bisect jobs with this in command.
536 if any(item in config['command']
537 for item in ['android-chrome-shell', 'android-chromium']):
538 cmd.extend(['--target_platform', 'android'])
539 elif 'android-chrome' in config['command']:
540 cmd.extend(['--target_platform', 'android-chrome'])
541 elif 'android' in config['command']:
542 cmd.extend(['--target_platform', 'android'])
544 if path_to_goma:
545 # For Windows XP platforms, goma service is not supported.
546 # Moreover we don't compile chrome when gs_bucket flag is set instead
547 # use builds archives, therefore ignore goma service for Windows XP.
548 # See http://crbug.com/330900.
549 if platform.release() == 'XP':
550 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
551 'on Windows XP platform. Please refer to crbug.com/330900.')
552 path_to_goma = None
553 cmd.append('--use_goma')
554 cmd.append('--goma_dir')
555 cmd.append(os.path.abspath(path_to_goma))
557 if path_to_extra_src:
558 cmd.extend(['--extra_src', path_to_extra_src])
560 if dry_run:
561 cmd.extend([
562 '--debug_ignore_build',
563 '--debug_ignore_sync',
564 '--debug_ignore_perf_test'
567 cmd = [str(c) for c in cmd]
569 with Goma(path_to_goma) as _:
570 return_code = subprocess.call(cmd)
572 if return_code:
573 print ('Error: bisect_perf_regression.py returned with error %d\n'
574 % return_code)
576 return return_code
579 def _PrintConfigStep(config):
580 """Prints out the given config, along with Buildbot annotations."""
581 bisect_utils.OutputAnnotationStepStart('Config')
582 print
583 for k, v in config.iteritems():
584 print ' %s : %s' % (k, v)
585 print
586 bisect_utils.OutputAnnotationStepClosed()
589 def _GetBrowserType(bot_platform):
590 """Gets the browser type to be used in the run benchmark command."""
591 if bot_platform == 'android':
592 return 'android-chromium'
593 elif 'x64' in bot_platform:
594 return 'release_x64'
596 return 'release'
600 def _GuessTelemetryTestCommand(bot_platform, test_name=None):
601 """Creates a Telemetry benchmark command based on bot and test name."""
602 command = []
603 # On Windows, Python scripts should be prefixed with the python command.
604 if bot_platform == 'win':
605 command.append('python')
606 command.append('tools/perf/run_benchmark')
607 command.append('-v')
608 command.append('--browser=%s' % _GetBrowserType(bot_platform))
609 if test_name:
610 command.append(test_name)
612 return ' '.join(command)
615 def _GetConfigBasedOnPlatform(config, bot_name, test_name):
616 """Generates required options to create BisectPerformanceMetrics instance."""
617 opts_dict = {
618 'command': _GuessTelemetryTestCommand(bot_name, test_name),
619 'target_arch': 'x64' if 'x64' in bot_name else 'ia32',
620 'build_preference': 'ninja',
621 'output_buildbot_annotations': True,
622 'repeat_test_count': 1,
623 'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE,
626 if 'use_goma' in config:
627 opts_dict['use_goma'] = config['use_goma']
628 if 'goma_dir' in config:
629 opts_dict['goma_dir'] = config['goma_dir']
630 # TODO (prasadv): Remove android-chrome-shell check once we confirm there are
631 # no pending bisect jobs with this in command.
632 if any(item in opts_dict['command']
633 for item in ['android-chrome-shell', 'android-chromium']):
634 opts_dict['target_platform'] = 'android'
636 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
639 def _GetModifiedFilesFromPatch(cwd=None):
640 """Gets list of files modified in the current patch."""
641 log_output = bisect_utils.CheckRunGit(
642 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
643 modified_files = log_output.split()
644 return modified_files
647 def _GetAffectedBenchmarkModuleNames():
648 """Gets list of modified benchmark files under tools/perf/benchmarks."""
649 all_affected_files = _GetModifiedFilesFromPatch()
650 modified_benchmarks = []
651 for affected_file in all_affected_files:
652 if (affected_file.startswith(PERF_BENCHMARKS_PATH) or
653 affected_file.startswith(PERF_MEASUREMENTS_PATH)):
654 benchmark = os.path.basename(os.path.splitext(affected_file)[0])
655 modified_benchmarks.append(benchmark)
656 return modified_benchmarks
659 def _ListAvailableBenchmarks(bot_platform):
660 """Gets all available benchmarks names as a list."""
661 browser_type = _GetBrowserType(bot_platform)
662 if os.path.exists(BENCHMARKS_JSON_FILE):
663 os.remove(BENCHMARKS_JSON_FILE)
664 command = []
665 if 'win' in bot_platform:
666 command.append('python')
667 command.append('tools/perf/run_benchmark')
668 command.extend([
669 'list',
670 '--browser',
671 browser_type,
672 '--json-output',
673 BENCHMARKS_JSON_FILE])
674 try:
675 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
676 command=command, cwd=SRC_DIR)
677 if return_code:
678 raise RuntimeError('Something went wrong while listing benchmarks. '
679 'Please review the command line: %s.\nERROR: [%s]' %
680 (' '.join(command), output))
681 with open(BENCHMARKS_JSON_FILE) as tests_json:
682 tests_data = json.load(tests_json)
683 if tests_data.get('steps'):
684 return tests_data.get('steps').keys()
685 finally:
686 try:
687 if os.path.exists(BENCHMARKS_JSON_FILE):
688 os.remove(BENCHMARKS_JSON_FILE)
689 except OSError as e:
690 if e.errno != errno.ENOENT:
691 raise
692 return None
695 def _OutputOverallResults(results):
696 """Creates results step and prints results on buildbot job."""
697 test_status = all(current_value == True for current_value in results.values())
698 bisect_utils.OutputAnnotationStepStart(
699 'Results - %s' % ('Passed' if test_status else 'Failed'))
700 print
701 print 'Results of benchmarks:'
702 print
703 for benchmark, result in results.iteritems():
704 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
705 if not test_status:
706 bisect_utils.OutputAnnotationStepFailure()
707 bisect_utils.OutputAnnotationStepClosed()
708 # Returns 0 for success and 1 for failure.
709 return 0 if test_status else 1
712 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
713 """Runs a Telemetry benchmark."""
714 bisect_utils.OutputAnnotationStepStart(benchmark_name)
715 command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
716 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
717 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
718 # A value other than 0 indicates that the test couldn't be run, and results
719 # should also include an error message.
720 if return_code:
721 print ('Error: Something went wrong running the benchmark: %s.'
722 'Please review the command line:%s\n\n%s' %
723 (benchmark_name, command_to_run, output))
724 bisect_utils.OutputAnnotationStepFailure()
725 print output
726 bisect_utils.OutputAnnotationStepClosed()
727 # results[1] contains the return code from subprocess that executes test
728 # command, On successful test run it contains 0 otherwise any non-zero value.
729 return return_code == 0
732 def _RunBenchmarksForCommitQueue(config):
733 """Runs Telemetry benchmark for the commit queue."""
734 os.chdir(SRC_DIR)
735 # To determine the bot platform by reading buildbot name from environment
736 # variable.
737 bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
738 if not bot_name:
739 bot_name = sys.platform
740 bot_name = bot_name.split('_')[0]
742 affected_benchmarks = _GetAffectedBenchmarkModuleNames()
743 # Abort if there are no changes to benchmark any existing benchmark files.
744 if not affected_benchmarks:
745 bisect_utils.OutputAnnotationStepStart('Results')
746 print
747 print ('There are no modification to Telemetry benchmarks,'
748 ' aborting the try job.')
749 bisect_utils.OutputAnnotationStepClosed()
750 return 0
752 # Bisect script expects to be run from the src directory
753 # Gets required options inorder to create BisectPerformanceMetrics instance.
754 # Since command is a required arg in BisectPerformanceMetrics, we just create
755 # a dummy command for now.
756 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
757 annotations_dict = _GetStepAnnotationStringsDict(config)
758 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
759 _RunBuildStepForPerformanceTest(b,
760 annotations_dict.get('build1'),
761 annotations_dict.get('sync1'),
762 None)
763 available_benchmarks = _ListAvailableBenchmarks(bot_name)
764 overall_results = {}
765 for affected_benchmark in affected_benchmarks:
766 for benchmark in available_benchmarks:
767 if (benchmark.startswith(affected_benchmark) and
768 not benchmark.endswith('reference')):
769 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
771 return _OutputOverallResults(overall_results)
774 def _OptionParser():
775 """Returns the options parser for run-bisect-perf-regression.py."""
777 def ConvertJson(option, _, value, parser):
778 """Provides an OptionParser callback to unmarshal a JSON string."""
779 setattr(parser.values, option.dest, json.loads(value))
781 usage = ('%prog [options] [-- chromium-options]\n'
782 'Used by a try bot to run the bisection script using the parameters'
783 ' provided in the auto_bisect/bisect.cfg file.')
784 parser = optparse.OptionParser(usage=usage)
785 parser.add_option('-w', '--working_directory',
786 type='str',
787 help='A working directory to supply to the bisection '
788 'script, which will use it as the location to checkout '
789 'a copy of the chromium depot.')
790 parser.add_option('-p', '--path_to_goma',
791 type='str',
792 help='Path to goma directory. If this is supplied, goma '
793 'builds will be enabled.')
794 parser.add_option('--path_to_config',
795 type='str',
796 help='Path to the config file to use. If this is supplied, '
797 'the bisect script will use this to override the default '
798 'config file path. The script will attempt to load it '
799 'as a bisect config first, then a perf config.')
800 parser.add_option('--extra_src',
801 type='str',
802 help='Path to extra source file. If this is supplied, '
803 'bisect script will use this to override default behavior.')
804 parser.add_option('--dry_run',
805 action="store_true",
806 help='The script will perform the full bisect, but '
807 'without syncing, building, or running the performance '
808 'tests.')
809 # This argument is passed by buildbot to supply build properties to the bisect
810 # script. Note: Don't change "--build-properties" property name.
811 parser.add_option('--build-properties', action='callback',
812 dest='build_properties',
813 callback=ConvertJson, type='string',
814 nargs=1, default={},
815 help='build properties in JSON format')
817 return parser
820 def main():
821 """Entry point for run-bisect-perf-regression.py.
823 Reads the config file, and then tries to either bisect a regression or
824 just run a performance test, depending on the particular config parameters
825 specified in the config file.
827 parser = _OptionParser()
828 opts, _ = parser.parse_args()
830 # Use the default config file path unless one was specified.
831 config_path = BISECT_CONFIG_PATH
832 if opts.path_to_config:
833 config_path = opts.path_to_config
834 config = _LoadConfigFile(config_path)
836 # Check if the config is valid for running bisect job.
837 config_is_valid = _ValidateBisectConfigFile(config)
839 if config and config_is_valid:
840 if not opts.working_directory:
841 print 'Error: missing required parameter: --working_directory\n'
842 parser.print_help()
843 return 1
845 return _RunBisectionScript(
846 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
847 opts.dry_run)
849 # If it wasn't valid for running a bisect, then maybe the user wanted
850 # to run a perf test instead of a bisect job. Try reading any possible
851 # perf test config files.
852 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
853 for current_perf_cfg_file in perf_cfg_files:
854 if opts.path_to_config:
855 path_to_perf_cfg = opts.path_to_config
856 else:
857 path_to_perf_cfg = os.path.join(
858 os.path.abspath(os.path.dirname(sys.argv[0])),
859 current_perf_cfg_file)
861 config = _LoadConfigFile(path_to_perf_cfg)
862 config_is_valid = _ValidatePerfConfigFile(config)
864 if config and config_is_valid:
865 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
867 # If there are no changes to config file, then check if the request is
868 # from the commit queue, if so then run the modified Telemetry benchmarks for
869 # the patch.
870 if opts.build_properties.get('requester') in _COMMIT_QUEUE_USERS:
871 return _SetupAndRunPerformanceTest(
872 config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True)
874 print ('Error: Could not load config file. Double check your changes to '
875 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
876 return 1
879 if __name__ == '__main__':
880 sys.exit(main())