LayoutTestPermissionManager: pass top level origin, not URL.
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
bloba9c7e54938110f89ad0c83c67535865c9a19aa09
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import json
15 import optparse
16 import os
17 import platform
18 import re
19 import shlex
20 import subprocess
21 import sys
22 import traceback
24 from auto_bisect import bisect_perf_regression
25 from auto_bisect import bisect_utils
26 from auto_bisect import math_utils
27 from auto_bisect import source_control
29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
30 CROS_IP_ENV = 'BISECT_CROS_IP'
31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
40 PERF_MEASUREMENTS_PATH = 'tools/perf/measurements'
41 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
42 BENCHMARKS_JSON_FILE = 'benchmarks.json'
44 class Goma(object):
46 def __init__(self, path_to_goma):
47 self._abs_path_to_goma = None
48 self._abs_path_to_goma_file = None
49 if not path_to_goma:
50 return
51 self._abs_path_to_goma = os.path.abspath(path_to_goma)
52 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
53 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
55 def __enter__(self):
56 if self._HasGomaPath():
57 self._SetupAndStart()
58 return self
60 def __exit__(self, *_):
61 if self._HasGomaPath():
62 self._Stop()
64 def _HasGomaPath(self):
65 return bool(self._abs_path_to_goma)
67 def _SetupEnvVars(self):
68 if os.name == 'nt':
69 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
70 ' cl.exe')
71 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
72 ' cl.exe')
73 else:
74 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
75 os.environ['PATH']])
77 def _SetupAndStart(self):
78 """Sets up goma and launches it.
80 Args:
81 path_to_goma: Path to goma directory.
83 Returns:
84 True if successful."""
85 self._SetupEnvVars()
87 # Sometimes goma is lingering around if something went bad on a previous
88 # run. Stop it before starting a new process. Can ignore the return code
89 # since it will return an error if it wasn't running.
90 self._Stop()
92 if subprocess.call([self._abs_path_to_goma_file, 'start']):
93 raise RuntimeError('Goma failed to start.')
95 def _Stop(self):
96 subprocess.call([self._abs_path_to_goma_file, 'stop'])
99 def _LoadConfigFile(config_file_path):
100 """Attempts to load the specified config file as a module
101 and grab the global config dict.
103 Args:
104 config_file_path: Path to the config file.
106 Returns:
107 If successful, returns the config dict loaded from the file. If no
108 such dictionary could be loaded, returns the empty dictionary.
110 try:
111 local_vars = {}
112 execfile(config_file_path, local_vars)
113 return local_vars['config']
114 except Exception:
115 print
116 traceback.print_exc()
117 print
118 return {}
121 def _ValidateConfigFile(config_contents, required_parameters):
122 """Validates the config file contents, checking whether all values are
123 non-empty.
125 Args:
126 config_contents: A config dictionary.
127 required_parameters: A list of parameters to check for.
129 Returns:
130 True if valid.
132 for parameter in required_parameters:
133 if parameter not in config_contents:
134 return False
135 value = config_contents[parameter]
136 if not value or type(value) is not str:
137 return False
138 return True
141 def _ValidatePerfConfigFile(config_contents):
142 """Validates the perf config file contents.
144 This is used when we're doing a perf try job, rather than a bisect.
145 The config file is called run-perf-test.cfg by default.
147 The parameters checked are the required parameters; any additional optional
148 parameters won't be checked and validation will still pass.
150 Args:
151 config_contents: A config dictionary.
153 Returns:
154 True if valid.
156 return _ValidateConfigFile(config_contents, required_parameters=['command'])
159 def _ValidateBisectConfigFile(config_contents):
160 """Validates the bisect config file contents.
162 The parameters checked are the required parameters; any additional optional
163 parameters won't be checked and validation will still pass.
165 Args:
166 config_contents: A config dictionary.
168 Returns:
169 True if valid.
171 return _ValidateConfigFile(
172 config_contents,
173 required_parameters=['command', 'good_revision', 'bad_revision'])
176 def _OutputFailedResults(text_to_print):
177 bisect_utils.OutputAnnotationStepStart('Results - Failed')
178 print
179 print text_to_print
180 print
181 bisect_utils.OutputAnnotationStepClosed()
184 def _CreateBisectOptionsFromConfig(config):
185 print config['command']
186 opts_dict = {}
187 opts_dict['command'] = config['command']
188 opts_dict['metric'] = config.get('metric')
190 if config['repeat_count']:
191 opts_dict['repeat_test_count'] = int(config['repeat_count'])
193 if config['truncate_percent']:
194 opts_dict['truncate_percent'] = int(config['truncate_percent'])
196 if config['max_time_minutes']:
197 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
199 if config.has_key('use_goma'):
200 opts_dict['use_goma'] = config['use_goma']
201 if config.has_key('goma_dir'):
202 opts_dict['goma_dir'] = config['goma_dir']
204 if config.has_key('improvement_direction'):
205 opts_dict['improvement_direction'] = int(config['improvement_direction'])
207 if config.has_key('target_arch'):
208 opts_dict['target_arch'] = config['target_arch']
210 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
211 opts_dict['bug_id'] = config['bug_id']
213 opts_dict['build_preference'] = 'ninja'
214 opts_dict['output_buildbot_annotations'] = True
216 if '--browser=cros' in config['command']:
217 opts_dict['target_platform'] = 'cros'
219 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
220 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
221 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
222 else:
223 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
224 'BISECT_CROS_BOARD undefined.')
225 elif 'android' in config['command']:
226 if 'android-chrome-shell' in config['command']:
227 opts_dict['target_platform'] = 'android'
228 elif 'android-chrome' in config['command']:
229 opts_dict['target_platform'] = 'android-chrome'
230 else:
231 opts_dict['target_platform'] = 'android'
233 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
236 def _ParseCloudLinksFromOutput(output):
237 html_results_pattern = re.compile(
238 r'\s(?P<VALUES>http://storage.googleapis.com/' +
239 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
240 re.MULTILINE)
241 profiler_pattern = re.compile(
242 r'\s(?P<VALUES>https://console.developers.google.com/' +
243 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
244 re.MULTILINE)
246 results = {
247 'html-results': html_results_pattern.findall(output),
248 'profiler': profiler_pattern.findall(output),
251 return results
254 def _ParseAndOutputCloudLinks(
255 results_without_patch, results_with_patch, annotations_dict):
256 cloud_links_without_patch = _ParseCloudLinksFromOutput(
257 results_without_patch[2])
258 cloud_links_with_patch = _ParseCloudLinksFromOutput(
259 results_with_patch[2])
261 cloud_file_link = (cloud_links_without_patch['html-results'][0]
262 if cloud_links_without_patch['html-results'] else '')
264 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
265 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
267 # Calculate the % difference in the means of the 2 runs.
268 percent_diff_in_means = None
269 std_err = None
270 if (results_with_patch[0].has_key('mean') and
271 results_with_patch[0].has_key('values')):
272 percent_diff_in_means = (results_with_patch[0]['mean'] /
273 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
274 std_err = math_utils.PooledStandardError(
275 [results_with_patch[0]['values'], results_without_patch[0]['values']])
277 if percent_diff_in_means is not None and std_err is not None:
278 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
279 (percent_diff_in_means, std_err))
280 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
281 'Std. Error'.center(20, ' '))
282 print ' %s %s %s' % ('Patch'.center(10, ' '),
283 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
284 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
285 print ' %s %s %s' % ('No Patch'.center(10, ' '),
286 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
287 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
288 if cloud_file_link:
289 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
290 bisect_utils.OutputAnnotationStepClosed()
291 elif cloud_file_link:
292 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
294 if profiler_file_links_with_patch and profiler_file_links_without_patch:
295 for i in xrange(len(profiler_file_links_with_patch)):
296 bisect_utils.OutputAnnotationStepLink(
297 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
298 profiler_file_links_with_patch[i])
299 for i in xrange(len(profiler_file_links_without_patch)):
300 bisect_utils.OutputAnnotationStepLink(
301 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
302 profiler_file_links_without_patch[i])
305 def _ResolveRevisionsFromConfig(config):
306 if not 'good_revision' in config and not 'bad_revision' in config:
307 return (None, None)
309 bad_revision = source_control.ResolveToRevision(
310 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
311 if not bad_revision:
312 raise RuntimeError('Failed to resolve [%s] to git hash.',
313 config['bad_revision'])
314 good_revision = source_control.ResolveToRevision(
315 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
316 if not good_revision:
317 raise RuntimeError('Failed to resolve [%s] to git hash.',
318 config['good_revision'])
320 return (good_revision, bad_revision)
323 def _GetStepAnnotationStringsDict(config):
324 if 'good_revision' in config and 'bad_revision' in config:
325 return {
326 'build1': 'Building [%s]' % config['good_revision'],
327 'build2': 'Building [%s]' % config['bad_revision'],
328 'run1': 'Running [%s]' % config['good_revision'],
329 'run2': 'Running [%s]' % config['bad_revision'],
330 'sync1': 'Syncing [%s]' % config['good_revision'],
331 'sync2': 'Syncing [%s]' % config['bad_revision'],
332 'results_label1': config['good_revision'],
333 'results_label2': config['bad_revision'],
334 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
335 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
337 else:
338 return {
339 'build1': 'Building With Patch',
340 'build2': 'Building Without Patch',
341 'run1': 'Running With Patch',
342 'run2': 'Running Without Patch',
343 'results_label1': 'Patch',
344 'results_label2': 'ToT',
345 'profiler_link1': 'With Patch - Profiler Data',
346 'profiler_link2': 'Without Patch - Profiler Data',
350 def _RunBuildStepForPerformanceTest(bisect_instance,
351 build_string,
352 sync_string,
353 revision):
354 if revision:
355 bisect_utils.OutputAnnotationStepStart(sync_string)
356 if not source_control.SyncToRevision(revision, 'gclient'):
357 raise RuntimeError('Failed [%s].' % sync_string)
358 bisect_utils.OutputAnnotationStepClosed()
360 bisect_utils.OutputAnnotationStepStart(build_string)
362 if bisect_utils.RunGClient(['runhooks']):
363 raise RuntimeError('Failed to run gclient runhooks')
365 if not bisect_instance.ObtainBuild('chromium'):
366 raise RuntimeError('Patched version failed to build.')
368 bisect_utils.OutputAnnotationStepClosed()
371 def _RunCommandStepForPerformanceTest(bisect_instance,
372 opts,
373 reset_on_first_run,
374 upload_on_last_run,
375 results_label,
376 run_string):
377 bisect_utils.OutputAnnotationStepStart(run_string)
379 results = bisect_instance.RunPerformanceTestAndParseResults(
380 opts.command,
381 opts.metric,
382 reset_on_first_run=reset_on_first_run,
383 upload_on_last_run=upload_on_last_run,
384 results_label=results_label,
385 allow_flakes=False)
387 if results[1]:
388 raise RuntimeError('Patched version failed to run performance test.')
390 bisect_utils.OutputAnnotationStepClosed()
392 return results
395 def _RunPerformanceTest(config):
396 """Runs a performance test with and without the current patch.
398 Args:
399 config: Contents of the config file, a dictionary.
401 Attempts to build and run the current revision with and without the
402 current patch, with the parameters passed in.
404 # Bisect script expects to be run from the src directory
405 os.chdir(SRC_DIR)
407 opts = _CreateBisectOptionsFromConfig(config)
408 revisions = _ResolveRevisionsFromConfig(config)
409 annotations_dict = _GetStepAnnotationStringsDict(config)
410 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
412 _RunBuildStepForPerformanceTest(b,
413 annotations_dict.get('build1'),
414 annotations_dict.get('sync1'),
415 revisions[0])
417 results_with_patch = _RunCommandStepForPerformanceTest(
418 b, opts, True, True, annotations_dict['results_label1'],
419 annotations_dict['run1'])
421 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
422 # TODO: When this is re-written to recipes, this should use bot_update's
423 # revert mechanism to fully revert the client. But for now, since we know that
424 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
425 # simply reset those two directories.
426 bisect_utils.CheckRunGit(['reset', '--hard'])
427 bisect_utils.CheckRunGit(['reset', '--hard'],
428 os.path.join('third_party', 'WebKit'))
429 bisect_utils.OutputAnnotationStepClosed()
431 _RunBuildStepForPerformanceTest(b,
432 annotations_dict.get('build2'),
433 annotations_dict.get('sync2'),
434 revisions[1])
436 results_without_patch = _RunCommandStepForPerformanceTest(
437 b, opts, False, True, annotations_dict['results_label2'],
438 annotations_dict['run2'])
440 # Find the link to the cloud stored results file.
441 _ParseAndOutputCloudLinks(
442 results_without_patch, results_with_patch, annotations_dict)
445 def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False):
446 """Attempts to build and run the current revision with and without the
447 current patch, with the parameters passed in.
449 Args:
450 config: The config read from run-perf-test.cfg.
451 path_to_goma: Path to goma directory.
452 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
454 Returns:
455 An exit code: 0 on success, otherwise 1.
457 if platform.release() == 'XP':
458 print 'Windows XP is not supported for perf try jobs because it lacks '
459 print 'goma support. Please refer to crbug.com/330900.'
460 return 1
461 try:
462 with Goma(path_to_goma) as _:
463 config['use_goma'] = bool(path_to_goma)
464 if config['use_goma']:
465 config['goma_dir'] = os.path.abspath(path_to_goma)
466 if not is_cq_tryjob:
467 _RunPerformanceTest(config)
468 else:
469 return _RunBenchmarksForCommitQueue(config)
470 return 0
471 except RuntimeError, e:
472 bisect_utils.OutputAnnotationStepFailure()
473 bisect_utils.OutputAnnotationStepClosed()
474 _OutputFailedResults('Error: %s' % e.message)
475 return 1
478 def _RunBisectionScript(
479 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
480 """Attempts to execute the bisect script with the given parameters.
482 Args:
483 config: A dict containing the parameters to pass to the script.
484 working_directory: A working directory to provide to the bisect script,
485 where it will store it's own copy of the depot.
486 path_to_goma: Path to goma directory.
487 path_to_extra_src: Path to extra source file.
488 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
490 Returns:
491 An exit status code: 0 on success, otherwise 1.
493 _PrintConfigStep(config)
495 # Construct the basic command with all necessary arguments.
496 cmd = [
497 'python',
498 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
499 '--command', config['command'],
500 '--good_revision', config['good_revision'],
501 '--bad_revision', config['bad_revision'],
502 '--working_directory', working_directory,
503 '--output_buildbot_annotations'
506 # Add flags for any optional config parameters if given in the config.
507 options = [
508 ('metric', '--metric'),
509 ('repeat_count', '--repeat_test_count'),
510 ('truncate_percent', '--truncate_percent'),
511 ('max_time_minutes', '--max_time_minutes'),
512 ('bisect_mode', '--bisect_mode'),
513 ('improvement_direction', '--improvement_direction'),
514 ('bug_id', '--bug_id'),
515 ('builder_type', '--builder_type'),
516 ('target_arch', '--target_arch'),
518 for config_key, flag in options:
519 if config.has_key(config_key):
520 cmd.extend([flag, config[config_key]])
522 cmd.extend(['--build_preference', 'ninja'])
524 # Possibly set the target platform name based on the browser name in a
525 # Telemetry command.
526 if 'android-chrome-shell' in config['command']:
527 cmd.extend(['--target_platform', 'android'])
528 elif 'android-chrome' in config['command']:
529 cmd.extend(['--target_platform', 'android-chrome'])
530 elif 'android' in config['command']:
531 cmd.extend(['--target_platform', 'android'])
533 if path_to_goma:
534 # For Windows XP platforms, goma service is not supported.
535 # Moreover we don't compile chrome when gs_bucket flag is set instead
536 # use builds archives, therefore ignore goma service for Windows XP.
537 # See http://crbug.com/330900.
538 if platform.release() == 'XP':
539 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
540 'on Windows XP platform. Please refer to crbug.com/330900.')
541 path_to_goma = None
542 cmd.append('--use_goma')
543 cmd.append('--goma_dir')
544 cmd.append(os.path.abspath(path_to_goma))
546 if path_to_extra_src:
547 cmd.extend(['--extra_src', path_to_extra_src])
549 if dry_run:
550 cmd.extend([
551 '--debug_ignore_build',
552 '--debug_ignore_sync',
553 '--debug_ignore_perf_test'
556 cmd = [str(c) for c in cmd]
558 with Goma(path_to_goma) as _:
559 return_code = subprocess.call(cmd)
561 if return_code:
562 print ('Error: bisect_perf_regression.py returned with error %d\n'
563 % return_code)
565 return return_code
568 def _PrintConfigStep(config):
569 """Prints out the given config, along with Buildbot annotations."""
570 bisect_utils.OutputAnnotationStepStart('Config')
571 print
572 for k, v in config.iteritems():
573 print ' %s : %s' % (k, v)
574 print
575 bisect_utils.OutputAnnotationStepClosed()
578 def _GetBrowserType(bot_platform):
579 """Gets the browser type to be used in the run benchmark command."""
580 if bot_platform == 'android':
581 return 'android-chrome-shell'
582 elif 'x64' in bot_platform:
583 return 'release_x64'
585 return 'release'
589 def _GuessTelemetryTestCommand(bot_platform, test_name=None):
590 """Creates a Telemetry benchmark command based on bot and test name."""
591 command = []
592 # On Windows, Python scripts should be prefixed with the python command.
593 if bot_platform == 'win':
594 command.append('python')
595 command.append('tools/perf/run_benchmark')
596 command.append('-v')
597 command.append('--browser=%s' % _GetBrowserType(bot_platform))
598 if test_name:
599 command.append(test_name)
601 return ' '.join(command)
604 def _GetConfigBasedOnPlatform(config, bot_name, test_name):
605 """Generates required options to create BisectPerformanceMetrics instance."""
606 opts_dict = {
607 'command': _GuessTelemetryTestCommand(bot_name, test_name),
608 'target_arch': 'x64' if 'x64' in bot_name else 'ia32',
609 'build_preference': 'ninja',
610 'output_buildbot_annotations': True,
611 'repeat_test_count': 1,
612 'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE,
615 if 'use_goma' in config:
616 opts_dict['use_goma'] = config['use_goma']
617 if 'goma_dir' in config:
618 opts_dict['goma_dir'] = config['goma_dir']
619 if 'android-chrome-shell' in opts_dict['command']:
620 opts_dict['target_platform'] = 'android'
622 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
625 def _GetModifiedFilesFromPatch(cwd=None):
626 """Gets list of files modified in the current patch."""
627 log_output = bisect_utils.CheckRunGit(
628 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
629 modified_files = log_output.split()
630 return modified_files
633 def _GetAffectedBenchmarkModuleNames():
634 """Gets list of modified benchmark files under tools/perf/benchmarks."""
635 all_affected_files = _GetModifiedFilesFromPatch()
636 modified_benchmarks = []
637 for affected_file in all_affected_files:
638 if (affected_file.startswith(PERF_BENCHMARKS_PATH) or
639 affected_file.startswith(PERF_MEASUREMENTS_PATH)):
640 benchmark = os.path.basename(os.path.splitext(affected_file)[0])
641 modified_benchmarks.append(benchmark)
642 return modified_benchmarks
645 def _ListAvailableBenchmarks(bot_platform):
646 """Gets all available benchmarks names as a list."""
647 browser_type = _GetBrowserType(bot_platform)
648 if os.path.exists(BENCHMARKS_JSON_FILE):
649 os.remove(BENCHMARKS_JSON_FILE)
650 command = []
651 if 'win' in bot_platform:
652 command.append('python')
653 command.append('tools/perf/run_benchmark')
654 command.extend([
655 'list',
656 '--browser',
657 browser_type,
658 '--json-output',
659 BENCHMARKS_JSON_FILE])
660 try:
661 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
662 command=command, cwd=SRC_DIR)
663 if return_code:
664 raise RuntimeError('Something went wrong while listing benchmarks. '
665 'Please review the command line: %s.\nERROR: [%s]' %
666 (' '.join(command), output))
667 with open(BENCHMARKS_JSON_FILE) as tests_json:
668 tests_data = json.load(tests_json)
669 if tests_data.get('steps'):
670 return tests_data.get('steps').keys()
671 finally:
672 try:
673 if os.path.exists(BENCHMARKS_JSON_FILE):
674 os.remove(BENCHMARKS_JSON_FILE)
675 except OSError as e:
676 if e.errno != errno.ENOENT:
677 raise
678 return None
681 def _OutputOverallResults(results):
682 """Creates results step and prints results on buildbot job."""
683 test_status = all(current_value == True for current_value in results.values())
684 bisect_utils.OutputAnnotationStepStart(
685 'Results - %s' % ('Passed' if test_status else 'Failed'))
686 print
687 print 'Results of benchmarks:'
688 print
689 for benchmark, result in results.iteritems():
690 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
691 if not test_status:
692 bisect_utils.OutputAnnotationStepFailure()
693 bisect_utils.OutputAnnotationStepClosed()
694 # Returns 0 for success and 1 for failure.
695 return 0 if test_status else 1
698 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
699 """Runs a Telemetry benchmark."""
700 bisect_utils.OutputAnnotationStepStart(benchmark_name)
701 command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
702 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
703 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
704 # A value other than 0 indicates that the test couldn't be run, and results
705 # should also include an error message.
706 if return_code:
707 print ('Error: Something went wrong running the benchmark: %s.'
708 'Please review the command line:%s\n\n%s' %
709 (benchmark_name, command_to_run, output))
710 bisect_utils.OutputAnnotationStepFailure()
711 print output
712 bisect_utils.OutputAnnotationStepClosed()
713 # results[1] contains the return code from subprocess that executes test
714 # command, On successful test run it contains 0 otherwise any non-zero value.
715 return return_code == 0
718 def _RunBenchmarksForCommitQueue(config):
719 """Runs Telemetry benchmark for the commit queue."""
720 os.chdir(SRC_DIR)
721 # To determine the bot platform by reading buildbot name from environment
722 # variable.
723 bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
724 if not bot_name:
725 bot_name = sys.platform
726 bot_name = bot_name.split('_')[0]
728 affected_benchmarks = _GetAffectedBenchmarkModuleNames()
729 # Abort if there are no changes to benchmark any existing benchmark files.
730 if not affected_benchmarks:
731 bisect_utils.OutputAnnotationStepStart('Results')
732 print
733 print ('There are no modification to Telemetry benchmarks,'
734 ' aborting the try job.')
735 bisect_utils.OutputAnnotationStepClosed()
736 return 0
738 # Bisect script expects to be run from the src directory
739 # Gets required options inorder to create BisectPerformanceMetrics instance.
740 # Since command is a required arg in BisectPerformanceMetrics, we just create
741 # a dummy command for now.
742 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
743 annotations_dict = _GetStepAnnotationStringsDict(config)
744 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
745 _RunBuildStepForPerformanceTest(b,
746 annotations_dict.get('build1'),
747 annotations_dict.get('sync1'),
748 None)
749 available_benchmarks = _ListAvailableBenchmarks(bot_name)
750 overall_results = {}
751 for affected_benchmark in affected_benchmarks:
752 for benchmark in available_benchmarks:
753 if (benchmark.startswith(affected_benchmark) and
754 not benchmark.endswith('reference')):
755 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
757 return _OutputOverallResults(overall_results)
760 def _OptionParser():
761 """Returns the options parser for run-bisect-perf-regression.py."""
763 def ConvertJson(option, _, value, parser):
764 """Provides an OptionParser callback to unmarshal a JSON string."""
765 setattr(parser.values, option.dest, json.loads(value))
767 usage = ('%prog [options] [-- chromium-options]\n'
768 'Used by a try bot to run the bisection script using the parameters'
769 ' provided in the auto_bisect/bisect.cfg file.')
770 parser = optparse.OptionParser(usage=usage)
771 parser.add_option('-w', '--working_directory',
772 type='str',
773 help='A working directory to supply to the bisection '
774 'script, which will use it as the location to checkout '
775 'a copy of the chromium depot.')
776 parser.add_option('-p', '--path_to_goma',
777 type='str',
778 help='Path to goma directory. If this is supplied, goma '
779 'builds will be enabled.')
780 parser.add_option('--path_to_config',
781 type='str',
782 help='Path to the config file to use. If this is supplied, '
783 'the bisect script will use this to override the default '
784 'config file path. The script will attempt to load it '
785 'as a bisect config first, then a perf config.')
786 parser.add_option('--extra_src',
787 type='str',
788 help='Path to extra source file. If this is supplied, '
789 'bisect script will use this to override default behavior.')
790 parser.add_option('--dry_run',
791 action="store_true",
792 help='The script will perform the full bisect, but '
793 'without syncing, building, or running the performance '
794 'tests.')
795 # This argument is passed by buildbot to supply build properties to the bisect
796 # script. Note: Don't change "--build-properties" property name.
797 parser.add_option('--build-properties', action='callback',
798 dest='build_properties',
799 callback=ConvertJson, type='string',
800 nargs=1, default={},
801 help='build properties in JSON format')
803 return parser
806 def main():
807 """Entry point for run-bisect-perf-regression.py.
809 Reads the config file, and then tries to either bisect a regression or
810 just run a performance test, depending on the particular config parameters
811 specified in the config file.
813 parser = _OptionParser()
814 opts, _ = parser.parse_args()
816 # Use the default config file path unless one was specified.
817 config_path = BISECT_CONFIG_PATH
818 if opts.path_to_config:
819 config_path = opts.path_to_config
820 config = _LoadConfigFile(config_path)
822 # Check if the config is valid for running bisect job.
823 config_is_valid = _ValidateBisectConfigFile(config)
825 if config and config_is_valid:
826 if not opts.working_directory:
827 print 'Error: missing required parameter: --working_directory\n'
828 parser.print_help()
829 return 1
831 return _RunBisectionScript(
832 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
833 opts.dry_run)
835 # If it wasn't valid for running a bisect, then maybe the user wanted
836 # to run a perf test instead of a bisect job. Try reading any possible
837 # perf test config files.
838 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
839 for current_perf_cfg_file in perf_cfg_files:
840 if opts.path_to_config:
841 path_to_perf_cfg = opts.path_to_config
842 else:
843 path_to_perf_cfg = os.path.join(
844 os.path.abspath(os.path.dirname(sys.argv[0])),
845 current_perf_cfg_file)
847 config = _LoadConfigFile(path_to_perf_cfg)
848 config_is_valid = _ValidatePerfConfigFile(config)
850 if config and config_is_valid:
851 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
853 # If there are no changes to config file, then check if the request is
854 # from commit-bot, if so then run the modified Telemetry benchmarks for the
855 # patch.
856 if opts.build_properties.get('requester') == 'commit-bot@chromium.org':
857 return _SetupAndRunPerformanceTest(
858 config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True)
860 print ('Error: Could not load config file. Double check your changes to '
861 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
862 return 1
865 if __name__ == '__main__':
866 sys.exit(main())