Switch TestFrameNavigationObserver to DidCommitProvisionalLoadForFrame.
[chromium-blink-merge.git] / tools / run-bisect-perf-regression.py
blobece0635b797a73bb8b4ef0f6a1ef47d70cf20b5e
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there.
12 """
14 import json
15 import optparse
16 import os
17 import platform
18 import re
19 import shlex
20 import subprocess
21 import sys
22 import traceback
24 from auto_bisect import bisect_perf_regression
25 from auto_bisect import bisect_utils
26 from auto_bisect import math_utils
27 from auto_bisect import source_control
29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
30 CROS_IP_ENV = 'BISECT_CROS_IP'
31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
40 PERF_MEASUREMENTS_PATH = 'tools/perf/measurements'
41 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
42 BENCHMARKS_JSON_FILE = 'benchmarks.json'
44 # This is used to identify tryjobs triggered by the commit queue.
45 _COMMIT_QUEUE_USER = ('5071639625-1lppvbtck1morgivc6sq4dul7klu27sd'
46 '@developer.gserviceaccount.com')
48 class Goma(object):
50 def __init__(self, path_to_goma):
51 self._abs_path_to_goma = None
52 self._abs_path_to_goma_file = None
53 if not path_to_goma:
54 return
55 self._abs_path_to_goma = os.path.abspath(path_to_goma)
56 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
57 self._abs_path_to_goma_file = os.path.join(self._abs_path_to_goma, filename)
59 def __enter__(self):
60 if self._HasGomaPath():
61 self._SetupAndStart()
62 return self
64 def __exit__(self, *_):
65 if self._HasGomaPath():
66 self._Stop()
68 def _HasGomaPath(self):
69 return bool(self._abs_path_to_goma)
71 def _SetupEnvVars(self):
72 if os.name == 'nt':
73 os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
74 ' cl.exe')
75 os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
76 ' cl.exe')
77 else:
78 os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
79 os.environ['PATH']])
81 def _SetupAndStart(self):
82 """Sets up goma and launches it.
84 Args:
85 path_to_goma: Path to goma directory.
87 Returns:
88 True if successful."""
89 self._SetupEnvVars()
91 # Sometimes goma is lingering around if something went bad on a previous
92 # run. Stop it before starting a new process. Can ignore the return code
93 # since it will return an error if it wasn't running.
94 self._Stop()
96 if subprocess.call([self._abs_path_to_goma_file, 'start']):
97 raise RuntimeError('Goma failed to start.')
99 def _Stop(self):
100 subprocess.call([self._abs_path_to_goma_file, 'stop'])
103 def _LoadConfigFile(config_file_path):
104 """Attempts to load the specified config file as a module
105 and grab the global config dict.
107 Args:
108 config_file_path: Path to the config file.
110 Returns:
111 If successful, returns the config dict loaded from the file. If no
112 such dictionary could be loaded, returns the empty dictionary.
114 try:
115 local_vars = {}
116 execfile(config_file_path, local_vars)
117 return local_vars['config']
118 except Exception:
119 print
120 traceback.print_exc()
121 print
122 return {}
125 def _ValidateConfigFile(config_contents, required_parameters):
126 """Validates the config file contents, checking whether all values are
127 non-empty.
129 Args:
130 config_contents: A config dictionary.
131 required_parameters: A list of parameters to check for.
133 Returns:
134 True if valid.
136 for parameter in required_parameters:
137 if parameter not in config_contents:
138 return False
139 value = config_contents[parameter]
140 if not value or type(value) is not str:
141 return False
142 return True
145 def _ValidatePerfConfigFile(config_contents):
146 """Validates the perf config file contents.
148 This is used when we're doing a perf try job, rather than a bisect.
149 The config file is called run-perf-test.cfg by default.
151 The parameters checked are the required parameters; any additional optional
152 parameters won't be checked and validation will still pass.
154 Args:
155 config_contents: A config dictionary.
157 Returns:
158 True if valid.
160 return _ValidateConfigFile(config_contents, required_parameters=['command'])
163 def _ValidateBisectConfigFile(config_contents):
164 """Validates the bisect config file contents.
166 The parameters checked are the required parameters; any additional optional
167 parameters won't be checked and validation will still pass.
169 Args:
170 config_contents: A config dictionary.
172 Returns:
173 True if valid.
175 return _ValidateConfigFile(
176 config_contents,
177 required_parameters=['command', 'good_revision', 'bad_revision'])
180 def _OutputFailedResults(text_to_print):
181 bisect_utils.OutputAnnotationStepStart('Results - Failed')
182 print
183 print text_to_print
184 print
185 bisect_utils.OutputAnnotationStepClosed()
188 def _CreateBisectOptionsFromConfig(config):
189 print config['command']
190 opts_dict = {}
191 opts_dict['command'] = config['command']
192 opts_dict['metric'] = config.get('metric')
194 if config['repeat_count']:
195 opts_dict['repeat_test_count'] = int(config['repeat_count'])
197 if config['truncate_percent']:
198 opts_dict['truncate_percent'] = int(config['truncate_percent'])
200 if config['max_time_minutes']:
201 opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
203 if config.has_key('use_goma'):
204 opts_dict['use_goma'] = config['use_goma']
205 if config.has_key('goma_dir'):
206 opts_dict['goma_dir'] = config['goma_dir']
208 if config.has_key('improvement_direction'):
209 opts_dict['improvement_direction'] = int(config['improvement_direction'])
211 if config.has_key('target_arch'):
212 opts_dict['target_arch'] = config['target_arch']
214 if config.has_key('bug_id') and str(config['bug_id']).isdigit():
215 opts_dict['bug_id'] = config['bug_id']
217 opts_dict['build_preference'] = 'ninja'
218 opts_dict['output_buildbot_annotations'] = True
220 if '--browser=cros' in config['command']:
221 opts_dict['target_platform'] = 'cros'
223 if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
224 opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
225 opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
226 else:
227 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
228 'BISECT_CROS_BOARD undefined.')
229 elif 'android' in config['command']:
230 if 'android-chrome-shell' in config['command']:
231 opts_dict['target_platform'] = 'android'
232 elif 'android-chrome' in config['command']:
233 opts_dict['target_platform'] = 'android-chrome'
234 else:
235 opts_dict['target_platform'] = 'android'
237 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
240 def _ParseCloudLinksFromOutput(output):
241 html_results_pattern = re.compile(
242 r'\s(?P<VALUES>http://storage.googleapis.com/' +
243 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
244 re.MULTILINE)
245 profiler_pattern = re.compile(
246 r'\s(?P<VALUES>https://console.developers.google.com/' +
247 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
248 re.MULTILINE)
250 results = {
251 'html-results': html_results_pattern.findall(output),
252 'profiler': profiler_pattern.findall(output),
255 return results
258 def _ParseAndOutputCloudLinks(
259 results_without_patch, results_with_patch, annotations_dict):
260 cloud_links_without_patch = _ParseCloudLinksFromOutput(
261 results_without_patch[2])
262 cloud_links_with_patch = _ParseCloudLinksFromOutput(
263 results_with_patch[2])
265 cloud_file_link = (cloud_links_without_patch['html-results'][0]
266 if cloud_links_without_patch['html-results'] else '')
268 profiler_file_links_with_patch = cloud_links_with_patch['profiler']
269 profiler_file_links_without_patch = cloud_links_without_patch['profiler']
271 # Calculate the % difference in the means of the 2 runs.
272 percent_diff_in_means = None
273 std_err = None
274 if (results_with_patch[0].has_key('mean') and
275 results_with_patch[0].has_key('values')):
276 percent_diff_in_means = (results_with_patch[0]['mean'] /
277 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
278 std_err = math_utils.PooledStandardError(
279 [results_with_patch[0]['values'], results_without_patch[0]['values']])
281 if percent_diff_in_means is not None and std_err is not None:
282 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
283 (percent_diff_in_means, std_err))
284 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
285 'Std. Error'.center(20, ' '))
286 print ' %s %s %s' % ('Patch'.center(10, ' '),
287 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
288 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
289 print ' %s %s %s' % ('No Patch'.center(10, ' '),
290 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
291 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
292 if cloud_file_link:
293 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
294 bisect_utils.OutputAnnotationStepClosed()
295 elif cloud_file_link:
296 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
298 if profiler_file_links_with_patch and profiler_file_links_without_patch:
299 for i in xrange(len(profiler_file_links_with_patch)):
300 bisect_utils.OutputAnnotationStepLink(
301 '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
302 profiler_file_links_with_patch[i])
303 for i in xrange(len(profiler_file_links_without_patch)):
304 bisect_utils.OutputAnnotationStepLink(
305 '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
306 profiler_file_links_without_patch[i])
309 def _ResolveRevisionsFromConfig(config):
310 if not 'good_revision' in config and not 'bad_revision' in config:
311 return (None, None)
313 bad_revision = source_control.ResolveToRevision(
314 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100)
315 if not bad_revision:
316 raise RuntimeError('Failed to resolve [%s] to git hash.',
317 config['bad_revision'])
318 good_revision = source_control.ResolveToRevision(
319 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100)
320 if not good_revision:
321 raise RuntimeError('Failed to resolve [%s] to git hash.',
322 config['good_revision'])
324 return (good_revision, bad_revision)
327 def _GetStepAnnotationStringsDict(config):
328 if 'good_revision' in config and 'bad_revision' in config:
329 return {
330 'build1': 'Building [%s]' % config['good_revision'],
331 'build2': 'Building [%s]' % config['bad_revision'],
332 'run1': 'Running [%s]' % config['good_revision'],
333 'run2': 'Running [%s]' % config['bad_revision'],
334 'sync1': 'Syncing [%s]' % config['good_revision'],
335 'sync2': 'Syncing [%s]' % config['bad_revision'],
336 'results_label1': config['good_revision'],
337 'results_label2': config['bad_revision'],
338 'profiler_link1': 'Profiler Data - %s' % config['good_revision'],
339 'profiler_link2': 'Profiler Data - %s' % config['bad_revision'],
341 else:
342 return {
343 'build1': 'Building With Patch',
344 'build2': 'Building Without Patch',
345 'run1': 'Running With Patch',
346 'run2': 'Running Without Patch',
347 'results_label1': 'Patch',
348 'results_label2': 'ToT',
349 'profiler_link1': 'With Patch - Profiler Data',
350 'profiler_link2': 'Without Patch - Profiler Data',
354 def _RunBuildStepForPerformanceTest(bisect_instance,
355 build_string,
356 sync_string,
357 revision):
358 if revision:
359 bisect_utils.OutputAnnotationStepStart(sync_string)
360 if not source_control.SyncToRevision(revision, 'gclient'):
361 raise RuntimeError('Failed [%s].' % sync_string)
362 bisect_utils.OutputAnnotationStepClosed()
364 bisect_utils.OutputAnnotationStepStart(build_string)
366 if bisect_utils.RunGClient(['runhooks']):
367 raise RuntimeError('Failed to run gclient runhooks')
369 if not bisect_instance.ObtainBuild('chromium'):
370 raise RuntimeError('Patched version failed to build.')
372 bisect_utils.OutputAnnotationStepClosed()
375 def _RunCommandStepForPerformanceTest(bisect_instance,
376 opts,
377 reset_on_first_run,
378 upload_on_last_run,
379 results_label,
380 run_string):
381 bisect_utils.OutputAnnotationStepStart(run_string)
383 results = bisect_instance.RunPerformanceTestAndParseResults(
384 opts.command,
385 opts.metric,
386 reset_on_first_run=reset_on_first_run,
387 upload_on_last_run=upload_on_last_run,
388 results_label=results_label,
389 allow_flakes=False)
391 if results[1]:
392 raise RuntimeError('Patched version failed to run performance test.')
394 bisect_utils.OutputAnnotationStepClosed()
396 return results
399 def _RunPerformanceTest(config):
400 """Runs a performance test with and without the current patch.
402 Args:
403 config: Contents of the config file, a dictionary.
405 Attempts to build and run the current revision with and without the
406 current patch, with the parameters passed in.
408 # Bisect script expects to be run from the src directory
409 os.chdir(SRC_DIR)
411 opts = _CreateBisectOptionsFromConfig(config)
412 revisions = _ResolveRevisionsFromConfig(config)
413 annotations_dict = _GetStepAnnotationStringsDict(config)
414 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
416 _RunBuildStepForPerformanceTest(b,
417 annotations_dict.get('build1'),
418 annotations_dict.get('sync1'),
419 revisions[0])
421 results_with_patch = _RunCommandStepForPerformanceTest(
422 b, opts, True, True, annotations_dict['results_label1'],
423 annotations_dict['run1'])
425 bisect_utils.OutputAnnotationStepStart('Reverting Patch')
426 # TODO: When this is re-written to recipes, this should use bot_update's
427 # revert mechanism to fully revert the client. But for now, since we know that
428 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
429 # simply reset those two directories.
430 bisect_utils.CheckRunGit(['reset', '--hard'])
431 bisect_utils.CheckRunGit(['reset', '--hard'],
432 os.path.join('third_party', 'WebKit'))
433 bisect_utils.OutputAnnotationStepClosed()
435 _RunBuildStepForPerformanceTest(b,
436 annotations_dict.get('build2'),
437 annotations_dict.get('sync2'),
438 revisions[1])
440 results_without_patch = _RunCommandStepForPerformanceTest(
441 b, opts, False, True, annotations_dict['results_label2'],
442 annotations_dict['run2'])
444 # Find the link to the cloud stored results file.
445 _ParseAndOutputCloudLinks(
446 results_without_patch, results_with_patch, annotations_dict)
449 def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False):
450 """Attempts to build and run the current revision with and without the
451 current patch, with the parameters passed in.
453 Args:
454 config: The config read from run-perf-test.cfg.
455 path_to_goma: Path to goma directory.
456 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
458 Returns:
459 An exit code: 0 on success, otherwise 1.
461 if platform.release() == 'XP':
462 print 'Windows XP is not supported for perf try jobs because it lacks '
463 print 'goma support. Please refer to crbug.com/330900.'
464 return 1
465 try:
466 with Goma(path_to_goma) as _:
467 config['use_goma'] = bool(path_to_goma)
468 if config['use_goma']:
469 config['goma_dir'] = os.path.abspath(path_to_goma)
470 if not is_cq_tryjob:
471 _RunPerformanceTest(config)
472 else:
473 return _RunBenchmarksForCommitQueue(config)
474 return 0
475 except RuntimeError, e:
476 bisect_utils.OutputAnnotationStepFailure()
477 bisect_utils.OutputAnnotationStepClosed()
478 _OutputFailedResults('Error: %s' % e.message)
479 return 1
482 def _RunBisectionScript(
483 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
484 """Attempts to execute the bisect script with the given parameters.
486 Args:
487 config: A dict containing the parameters to pass to the script.
488 working_directory: A working directory to provide to the bisect script,
489 where it will store it's own copy of the depot.
490 path_to_goma: Path to goma directory.
491 path_to_extra_src: Path to extra source file.
492 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
494 Returns:
495 An exit status code: 0 on success, otherwise 1.
497 _PrintConfigStep(config)
499 # Construct the basic command with all necessary arguments.
500 cmd = [
501 'python',
502 os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
503 '--command', config['command'],
504 '--good_revision', config['good_revision'],
505 '--bad_revision', config['bad_revision'],
506 '--working_directory', working_directory,
507 '--output_buildbot_annotations'
510 # Add flags for any optional config parameters if given in the config.
511 options = [
512 ('metric', '--metric'),
513 ('repeat_count', '--repeat_test_count'),
514 ('truncate_percent', '--truncate_percent'),
515 ('max_time_minutes', '--max_time_minutes'),
516 ('bisect_mode', '--bisect_mode'),
517 ('improvement_direction', '--improvement_direction'),
518 ('bug_id', '--bug_id'),
519 ('builder_type', '--builder_type'),
520 ('target_arch', '--target_arch'),
522 for config_key, flag in options:
523 if config.has_key(config_key):
524 cmd.extend([flag, config[config_key]])
526 cmd.extend(['--build_preference', 'ninja'])
528 # Possibly set the target platform name based on the browser name in a
529 # Telemetry command.
530 if 'android-chrome-shell' in config['command']:
531 cmd.extend(['--target_platform', 'android'])
532 elif 'android-chrome' in config['command']:
533 cmd.extend(['--target_platform', 'android-chrome'])
534 elif 'android' in config['command']:
535 cmd.extend(['--target_platform', 'android'])
537 if path_to_goma:
538 # For Windows XP platforms, goma service is not supported.
539 # Moreover we don't compile chrome when gs_bucket flag is set instead
540 # use builds archives, therefore ignore goma service for Windows XP.
541 # See http://crbug.com/330900.
542 if platform.release() == 'XP':
543 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
544 'on Windows XP platform. Please refer to crbug.com/330900.')
545 path_to_goma = None
546 cmd.append('--use_goma')
547 cmd.append('--goma_dir')
548 cmd.append(os.path.abspath(path_to_goma))
550 if path_to_extra_src:
551 cmd.extend(['--extra_src', path_to_extra_src])
553 if dry_run:
554 cmd.extend([
555 '--debug_ignore_build',
556 '--debug_ignore_sync',
557 '--debug_ignore_perf_test'
560 cmd = [str(c) for c in cmd]
562 with Goma(path_to_goma) as _:
563 return_code = subprocess.call(cmd)
565 if return_code:
566 print ('Error: bisect_perf_regression.py returned with error %d\n'
567 % return_code)
569 return return_code
572 def _PrintConfigStep(config):
573 """Prints out the given config, along with Buildbot annotations."""
574 bisect_utils.OutputAnnotationStepStart('Config')
575 print
576 for k, v in config.iteritems():
577 print ' %s : %s' % (k, v)
578 print
579 bisect_utils.OutputAnnotationStepClosed()
582 def _GetBrowserType(bot_platform):
583 """Gets the browser type to be used in the run benchmark command."""
584 if bot_platform == 'android':
585 return 'android-chrome-shell'
586 elif 'x64' in bot_platform:
587 return 'release_x64'
589 return 'release'
593 def _GuessTelemetryTestCommand(bot_platform, test_name=None):
594 """Creates a Telemetry benchmark command based on bot and test name."""
595 command = []
596 # On Windows, Python scripts should be prefixed with the python command.
597 if bot_platform == 'win':
598 command.append('python')
599 command.append('tools/perf/run_benchmark')
600 command.append('-v')
601 command.append('--browser=%s' % _GetBrowserType(bot_platform))
602 if test_name:
603 command.append(test_name)
605 return ' '.join(command)
608 def _GetConfigBasedOnPlatform(config, bot_name, test_name):
609 """Generates required options to create BisectPerformanceMetrics instance."""
610 opts_dict = {
611 'command': _GuessTelemetryTestCommand(bot_name, test_name),
612 'target_arch': 'x64' if 'x64' in bot_name else 'ia32',
613 'build_preference': 'ninja',
614 'output_buildbot_annotations': True,
615 'repeat_test_count': 1,
616 'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE,
619 if 'use_goma' in config:
620 opts_dict['use_goma'] = config['use_goma']
621 if 'goma_dir' in config:
622 opts_dict['goma_dir'] = config['goma_dir']
623 if 'android-chrome-shell' in opts_dict['command']:
624 opts_dict['target_platform'] = 'android'
626 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
629 def _GetModifiedFilesFromPatch(cwd=None):
630 """Gets list of files modified in the current patch."""
631 log_output = bisect_utils.CheckRunGit(
632 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
633 modified_files = log_output.split()
634 return modified_files
637 def _GetAffectedBenchmarkModuleNames():
638 """Gets list of modified benchmark files under tools/perf/benchmarks."""
639 all_affected_files = _GetModifiedFilesFromPatch()
640 modified_benchmarks = []
641 for affected_file in all_affected_files:
642 if (affected_file.startswith(PERF_BENCHMARKS_PATH) or
643 affected_file.startswith(PERF_MEASUREMENTS_PATH)):
644 benchmark = os.path.basename(os.path.splitext(affected_file)[0])
645 modified_benchmarks.append(benchmark)
646 return modified_benchmarks
649 def _ListAvailableBenchmarks(bot_platform):
650 """Gets all available benchmarks names as a list."""
651 browser_type = _GetBrowserType(bot_platform)
652 if os.path.exists(BENCHMARKS_JSON_FILE):
653 os.remove(BENCHMARKS_JSON_FILE)
654 command = []
655 if 'win' in bot_platform:
656 command.append('python')
657 command.append('tools/perf/run_benchmark')
658 command.extend([
659 'list',
660 '--browser',
661 browser_type,
662 '--json-output',
663 BENCHMARKS_JSON_FILE])
664 try:
665 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
666 command=command, cwd=SRC_DIR)
667 if return_code:
668 raise RuntimeError('Something went wrong while listing benchmarks. '
669 'Please review the command line: %s.\nERROR: [%s]' %
670 (' '.join(command), output))
671 with open(BENCHMARKS_JSON_FILE) as tests_json:
672 tests_data = json.load(tests_json)
673 if tests_data.get('steps'):
674 return tests_data.get('steps').keys()
675 finally:
676 try:
677 if os.path.exists(BENCHMARKS_JSON_FILE):
678 os.remove(BENCHMARKS_JSON_FILE)
679 except OSError as e:
680 if e.errno != errno.ENOENT:
681 raise
682 return None
685 def _OutputOverallResults(results):
686 """Creates results step and prints results on buildbot job."""
687 test_status = all(current_value == True for current_value in results.values())
688 bisect_utils.OutputAnnotationStepStart(
689 'Results - %s' % ('Passed' if test_status else 'Failed'))
690 print
691 print 'Results of benchmarks:'
692 print
693 for benchmark, result in results.iteritems():
694 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
695 if not test_status:
696 bisect_utils.OutputAnnotationStepFailure()
697 bisect_utils.OutputAnnotationStepClosed()
698 # Returns 0 for success and 1 for failure.
699 return 0 if test_status else 1
702 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
703 """Runs a Telemetry benchmark."""
704 bisect_utils.OutputAnnotationStepStart(benchmark_name)
705 command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
706 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
707 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
708 # A value other than 0 indicates that the test couldn't be run, and results
709 # should also include an error message.
710 if return_code:
711 print ('Error: Something went wrong running the benchmark: %s.'
712 'Please review the command line:%s\n\n%s' %
713 (benchmark_name, command_to_run, output))
714 bisect_utils.OutputAnnotationStepFailure()
715 print output
716 bisect_utils.OutputAnnotationStepClosed()
717 # results[1] contains the return code from subprocess that executes test
718 # command, On successful test run it contains 0 otherwise any non-zero value.
719 return return_code == 0
722 def _RunBenchmarksForCommitQueue(config):
723 """Runs Telemetry benchmark for the commit queue."""
724 os.chdir(SRC_DIR)
725 # To determine the bot platform by reading buildbot name from environment
726 # variable.
727 bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
728 if not bot_name:
729 bot_name = sys.platform
730 bot_name = bot_name.split('_')[0]
732 affected_benchmarks = _GetAffectedBenchmarkModuleNames()
733 # Abort if there are no changes to benchmark any existing benchmark files.
734 if not affected_benchmarks:
735 bisect_utils.OutputAnnotationStepStart('Results')
736 print
737 print ('There are no modification to Telemetry benchmarks,'
738 ' aborting the try job.')
739 bisect_utils.OutputAnnotationStepClosed()
740 return 0
742 # Bisect script expects to be run from the src directory
743 # Gets required options inorder to create BisectPerformanceMetrics instance.
744 # Since command is a required arg in BisectPerformanceMetrics, we just create
745 # a dummy command for now.
746 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
747 annotations_dict = _GetStepAnnotationStringsDict(config)
748 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
749 _RunBuildStepForPerformanceTest(b,
750 annotations_dict.get('build1'),
751 annotations_dict.get('sync1'),
752 None)
753 available_benchmarks = _ListAvailableBenchmarks(bot_name)
754 overall_results = {}
755 for affected_benchmark in affected_benchmarks:
756 for benchmark in available_benchmarks:
757 if (benchmark.startswith(affected_benchmark) and
758 not benchmark.endswith('reference')):
759 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
761 return _OutputOverallResults(overall_results)
764 def _OptionParser():
765 """Returns the options parser for run-bisect-perf-regression.py."""
767 def ConvertJson(option, _, value, parser):
768 """Provides an OptionParser callback to unmarshal a JSON string."""
769 setattr(parser.values, option.dest, json.loads(value))
771 usage = ('%prog [options] [-- chromium-options]\n'
772 'Used by a try bot to run the bisection script using the parameters'
773 ' provided in the auto_bisect/bisect.cfg file.')
774 parser = optparse.OptionParser(usage=usage)
775 parser.add_option('-w', '--working_directory',
776 type='str',
777 help='A working directory to supply to the bisection '
778 'script, which will use it as the location to checkout '
779 'a copy of the chromium depot.')
780 parser.add_option('-p', '--path_to_goma',
781 type='str',
782 help='Path to goma directory. If this is supplied, goma '
783 'builds will be enabled.')
784 parser.add_option('--path_to_config',
785 type='str',
786 help='Path to the config file to use. If this is supplied, '
787 'the bisect script will use this to override the default '
788 'config file path. The script will attempt to load it '
789 'as a bisect config first, then a perf config.')
790 parser.add_option('--extra_src',
791 type='str',
792 help='Path to extra source file. If this is supplied, '
793 'bisect script will use this to override default behavior.')
794 parser.add_option('--dry_run',
795 action="store_true",
796 help='The script will perform the full bisect, but '
797 'without syncing, building, or running the performance '
798 'tests.')
799 # This argument is passed by buildbot to supply build properties to the bisect
800 # script. Note: Don't change "--build-properties" property name.
801 parser.add_option('--build-properties', action='callback',
802 dest='build_properties',
803 callback=ConvertJson, type='string',
804 nargs=1, default={},
805 help='build properties in JSON format')
807 return parser
810 def main():
811 """Entry point for run-bisect-perf-regression.py.
813 Reads the config file, and then tries to either bisect a regression or
814 just run a performance test, depending on the particular config parameters
815 specified in the config file.
817 parser = _OptionParser()
818 opts, _ = parser.parse_args()
820 # Use the default config file path unless one was specified.
821 config_path = BISECT_CONFIG_PATH
822 if opts.path_to_config:
823 config_path = opts.path_to_config
824 config = _LoadConfigFile(config_path)
826 # Check if the config is valid for running bisect job.
827 config_is_valid = _ValidateBisectConfigFile(config)
829 if config and config_is_valid:
830 if not opts.working_directory:
831 print 'Error: missing required parameter: --working_directory\n'
832 parser.print_help()
833 return 1
835 return _RunBisectionScript(
836 config, opts.working_directory, opts.path_to_goma, opts.extra_src,
837 opts.dry_run)
839 # If it wasn't valid for running a bisect, then maybe the user wanted
840 # to run a perf test instead of a bisect job. Try reading any possible
841 # perf test config files.
842 perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
843 for current_perf_cfg_file in perf_cfg_files:
844 if opts.path_to_config:
845 path_to_perf_cfg = opts.path_to_config
846 else:
847 path_to_perf_cfg = os.path.join(
848 os.path.abspath(os.path.dirname(sys.argv[0])),
849 current_perf_cfg_file)
851 config = _LoadConfigFile(path_to_perf_cfg)
852 config_is_valid = _ValidatePerfConfigFile(config)
854 if config and config_is_valid:
855 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
857 # If there are no changes to config file, then check if the request is
858 # from the commit queue, if so then run the modified Telemetry benchmarks for
859 # the patch.
860 if opts.build_properties.get('requester') == _COMMIT_QUEUE_USER:
861 return _SetupAndRunPerformanceTest(
862 config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True)
864 print ('Error: Could not load config file. Double check your changes to '
865 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
866 return 1
869 if __name__ == '__main__':
870 sys.exit(main())