2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
24 from auto_bisect
import bisect_perf_regression
25 from auto_bisect
import bisect_utils
26 from auto_bisect
import math_utils
27 from auto_bisect
import source_control
29 CROS_BOARD_ENV
= 'BISECT_CROS_BOARD'
30 CROS_IP_ENV
= 'BISECT_CROS_IP'
31 SCRIPT_DIR
= os
.path
.abspath(os
.path
.dirname(__file__
))
32 SRC_DIR
= os
.path
.join(SCRIPT_DIR
, os
.path
.pardir
)
33 BISECT_CONFIG_PATH
= os
.path
.join(SCRIPT_DIR
, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH
= os
.path
.join(SCRIPT_DIR
, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH
= os
.path
.join(
36 SRC_DIR
, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR
= os
.path
.join(SCRIPT_DIR
, 'auto_bisect')
39 PERF_BENCHMARKS_PATH
= 'tools/perf/benchmarks'
40 PERF_MEASUREMENTS_PATH
= 'tools/perf/measurements'
41 BUILDBOT_BUILDERNAME
= 'BUILDBOT_BUILDERNAME'
42 BENCHMARKS_JSON_FILE
= 'benchmarks.json'
44 # This is used to identify tryjobs triggered by the commit queue.
45 _COMMIT_QUEUE_USERS
= [
46 '5071639625-1lppvbtck1morgivc6sq4dul7klu27sd@developer.gserviceaccount.com',
47 'commit-bot@chromium.org']
51 def __init__(self
, path_to_goma
):
52 self
._abs
_path
_to
_goma
= None
53 self
._abs
_path
_to
_goma
_file
= None
56 self
._abs
_path
_to
_goma
= os
.path
.abspath(path_to_goma
)
57 filename
= 'goma_ctl.bat' if os
.name
== 'nt' else 'goma_ctl.sh'
58 self
._abs
_path
_to
_goma
_file
= os
.path
.join(self
._abs
_path
_to
_goma
, filename
)
61 if self
._HasGomaPath
():
65 def __exit__(self
, *_
):
66 if self
._HasGomaPath
():
69 def _HasGomaPath(self
):
70 return bool(self
._abs
_path
_to
_goma
)
72 def _SetupEnvVars(self
):
74 os
.environ
['CC'] = (os
.path
.join(self
._abs
_path
_to
_goma
, 'gomacc.exe') +
76 os
.environ
['CXX'] = (os
.path
.join(self
._abs
_path
_to
_goma
, 'gomacc.exe') +
79 os
.environ
['PATH'] = os
.pathsep
.join([self
._abs
_path
_to
_goma
,
82 def _SetupAndStart(self
):
83 """Sets up goma and launches it.
86 path_to_goma: Path to goma directory.
89 True if successful."""
92 # Sometimes goma is lingering around if something went bad on a previous
93 # run. Stop it before starting a new process. Can ignore the return code
94 # since it will return an error if it wasn't running.
97 if subprocess
.call([self
._abs
_path
_to
_goma
_file
, 'start']):
98 raise RuntimeError('Goma failed to start.')
101 subprocess
.call([self
._abs
_path
_to
_goma
_file
, 'stop'])
104 def _LoadConfigFile(config_file_path
):
105 """Attempts to load the specified config file as a module
106 and grab the global config dict.
109 config_file_path: Path to the config file.
112 If successful, returns the config dict loaded from the file. If no
113 such dictionary could be loaded, returns the empty dictionary.
117 execfile(config_file_path
, local_vars
)
118 return local_vars
['config']
121 traceback
.print_exc()
126 def _ValidateConfigFile(config_contents
, required_parameters
):
127 """Validates the config file contents, checking whether all values are
131 config_contents: A config dictionary.
132 required_parameters: A list of parameters to check for.
137 for parameter
in required_parameters
:
138 if parameter
not in config_contents
:
140 value
= config_contents
[parameter
]
141 if not value
or type(value
) is not str:
146 def _ValidatePerfConfigFile(config_contents
):
147 """Validates the perf config file contents.
149 This is used when we're doing a perf try job, rather than a bisect.
150 The config file is called run-perf-test.cfg by default.
152 The parameters checked are the required parameters; any additional optional
153 parameters won't be checked and validation will still pass.
156 config_contents: A config dictionary.
161 return _ValidateConfigFile(config_contents
, required_parameters
=['command'])
164 def _ValidateBisectConfigFile(config_contents
):
165 """Validates the bisect config file contents.
167 The parameters checked are the required parameters; any additional optional
168 parameters won't be checked and validation will still pass.
171 config_contents: A config dictionary.
176 return _ValidateConfigFile(
178 required_parameters
=['command', 'good_revision', 'bad_revision'])
181 def _OutputFailedResults(text_to_print
):
182 bisect_utils
.OutputAnnotationStepStart('Results - Failed')
186 bisect_utils
.OutputAnnotationStepClosed()
189 def _CreateBisectOptionsFromConfig(config
):
190 print config
['command']
192 opts_dict
['command'] = config
['command']
193 opts_dict
['metric'] = config
.get('metric')
195 if config
['repeat_count']:
196 opts_dict
['repeat_test_count'] = int(config
['repeat_count'])
198 if config
['truncate_percent']:
199 opts_dict
['truncate_percent'] = int(config
['truncate_percent'])
201 if config
['max_time_minutes']:
202 opts_dict
['max_time_minutes'] = int(config
['max_time_minutes'])
204 if config
.has_key('use_goma'):
205 opts_dict
['use_goma'] = config
['use_goma']
206 if config
.has_key('goma_dir'):
207 opts_dict
['goma_dir'] = config
['goma_dir']
209 if config
.has_key('improvement_direction'):
210 opts_dict
['improvement_direction'] = int(config
['improvement_direction'])
212 if config
.has_key('target_arch'):
213 opts_dict
['target_arch'] = config
['target_arch']
215 if config
.has_key('bug_id') and str(config
['bug_id']).isdigit():
216 opts_dict
['bug_id'] = config
['bug_id']
218 opts_dict
['build_preference'] = 'ninja'
219 opts_dict
['output_buildbot_annotations'] = True
221 if '--browser=cros' in config
['command']:
222 opts_dict
['target_platform'] = 'cros'
224 if os
.environ
[CROS_BOARD_ENV
] and os
.environ
[CROS_IP_ENV
]:
225 opts_dict
['cros_board'] = os
.environ
[CROS_BOARD_ENV
]
226 opts_dict
['cros_remote_ip'] = os
.environ
[CROS_IP_ENV
]
228 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
229 'BISECT_CROS_BOARD undefined.')
230 elif 'android' in config
['command']:
231 # TODO (prasadv): Remove android-chrome-shell check once we confirm that
232 # there are no pending bisect jobs with this in command.
233 if any(item
in config
['command']
234 for item
in ['android-chrome-shell', 'android-chromium']):
235 opts_dict
['target_platform'] = 'android'
236 elif 'android-chrome' in config
['command']:
237 opts_dict
['target_platform'] = 'android-chrome'
239 opts_dict
['target_platform'] = 'android'
241 return bisect_perf_regression
.BisectOptions
.FromDict(opts_dict
)
244 def _ParseCloudLinksFromOutput(output
):
245 html_results_pattern
= re
.compile(
246 r
'\s(?P<VALUES>http://storage.googleapis.com/' +
247 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
249 profiler_pattern
= re
.compile(
250 r
'\s(?P<VALUES>https://console.developers.google.com/' +
251 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
255 'html-results': html_results_pattern
.findall(output
),
256 'profiler': profiler_pattern
.findall(output
),
262 def _ParseAndOutputCloudLinks(
263 results_without_patch
, results_with_patch
, annotations_dict
):
264 cloud_links_without_patch
= _ParseCloudLinksFromOutput(
265 results_without_patch
[2])
266 cloud_links_with_patch
= _ParseCloudLinksFromOutput(
267 results_with_patch
[2])
269 cloud_file_link
= (cloud_links_without_patch
['html-results'][0]
270 if cloud_links_without_patch
['html-results'] else '')
272 profiler_file_links_with_patch
= cloud_links_with_patch
['profiler']
273 profiler_file_links_without_patch
= cloud_links_without_patch
['profiler']
275 # Calculate the % difference in the means of the 2 runs.
276 percent_diff_in_means
= None
278 if (results_with_patch
[0].has_key('mean') and
279 results_with_patch
[0].has_key('values')):
280 percent_diff_in_means
= (results_with_patch
[0]['mean'] /
281 max(0.0001, results_without_patch
[0]['mean'])) * 100.0 - 100.0
282 std_err
= math_utils
.PooledStandardError(
283 [results_with_patch
[0]['values'], results_without_patch
[0]['values']])
285 if percent_diff_in_means
is not None and std_err
is not None:
286 bisect_utils
.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
287 (percent_diff_in_means
, std_err
))
288 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
289 'Std. Error'.center(20, ' '))
290 print ' %s %s %s' % ('Patch'.center(10, ' '),
291 ('%.02f' % results_with_patch
[0]['mean']).center(20, ' '),
292 ('%.02f' % results_with_patch
[0]['std_err']).center(20, ' '))
293 print ' %s %s %s' % ('No Patch'.center(10, ' '),
294 ('%.02f' % results_without_patch
[0]['mean']).center(20, ' '),
295 ('%.02f' % results_without_patch
[0]['std_err']).center(20, ' '))
297 bisect_utils
.OutputAnnotationStepLink('HTML Results', cloud_file_link
)
298 bisect_utils
.OutputAnnotationStepClosed()
299 elif cloud_file_link
:
300 bisect_utils
.OutputAnnotationStepLink('HTML Results', cloud_file_link
)
302 if profiler_file_links_with_patch
and profiler_file_links_without_patch
:
303 for i
in xrange(len(profiler_file_links_with_patch
)):
304 bisect_utils
.OutputAnnotationStepLink(
305 '%s[%d]' % (annotations_dict
.get('profiler_link1'), i
),
306 profiler_file_links_with_patch
[i
])
307 for i
in xrange(len(profiler_file_links_without_patch
)):
308 bisect_utils
.OutputAnnotationStepLink(
309 '%s[%d]' % (annotations_dict
.get('profiler_link2'), i
),
310 profiler_file_links_without_patch
[i
])
313 def _ResolveRevisionsFromConfig(config
):
314 if not 'good_revision' in config
and not 'bad_revision' in config
:
317 bad_revision
= source_control
.ResolveToRevision(
318 config
['bad_revision'], 'chromium', bisect_utils
.DEPOT_DEPS_NAME
, 100)
320 raise RuntimeError('Failed to resolve [%s] to git hash.',
321 config
['bad_revision'])
322 good_revision
= source_control
.ResolveToRevision(
323 config
['good_revision'], 'chromium', bisect_utils
.DEPOT_DEPS_NAME
, -100)
324 if not good_revision
:
325 raise RuntimeError('Failed to resolve [%s] to git hash.',
326 config
['good_revision'])
328 return (good_revision
, bad_revision
)
331 def _GetStepAnnotationStringsDict(config
):
332 if 'good_revision' in config
and 'bad_revision' in config
:
334 'build1': 'Building [%s]' % config
['good_revision'],
335 'build2': 'Building [%s]' % config
['bad_revision'],
336 'run1': 'Running [%s]' % config
['good_revision'],
337 'run2': 'Running [%s]' % config
['bad_revision'],
338 'sync1': 'Syncing [%s]' % config
['good_revision'],
339 'sync2': 'Syncing [%s]' % config
['bad_revision'],
340 'results_label1': config
['good_revision'],
341 'results_label2': config
['bad_revision'],
342 'profiler_link1': 'Profiler Data - %s' % config
['good_revision'],
343 'profiler_link2': 'Profiler Data - %s' % config
['bad_revision'],
347 'build1': 'Building With Patch',
348 'build2': 'Building Without Patch',
349 'run1': 'Running With Patch',
350 'run2': 'Running Without Patch',
351 'results_label1': 'Patch',
352 'results_label2': 'ToT',
353 'profiler_link1': 'With Patch - Profiler Data',
354 'profiler_link2': 'Without Patch - Profiler Data',
358 def _RunBuildStepForPerformanceTest(bisect_instance
,
363 bisect_utils
.OutputAnnotationStepStart(sync_string
)
364 if not source_control
.SyncToRevision(revision
, 'gclient'):
365 raise RuntimeError('Failed [%s].' % sync_string
)
366 bisect_utils
.OutputAnnotationStepClosed()
368 bisect_utils
.OutputAnnotationStepStart(build_string
)
370 if bisect_utils
.RunGClient(['runhooks']):
371 raise RuntimeError('Failed to run gclient runhooks')
373 if not bisect_instance
.ObtainBuild('chromium'):
374 raise RuntimeError('Patched version failed to build.')
376 bisect_utils
.OutputAnnotationStepClosed()
379 def _RunCommandStepForPerformanceTest(bisect_instance
,
385 bisect_utils
.OutputAnnotationStepStart(run_string
)
387 results
= bisect_instance
.RunPerformanceTestAndParseResults(
390 reset_on_first_run
=reset_on_first_run
,
391 upload_on_last_run
=upload_on_last_run
,
392 results_label
=results_label
,
396 raise RuntimeError('Patched version failed to run performance test.')
398 bisect_utils
.OutputAnnotationStepClosed()
403 def _RunPerformanceTest(config
):
404 """Runs a performance test with and without the current patch.
407 config: Contents of the config file, a dictionary.
409 Attempts to build and run the current revision with and without the
410 current patch, with the parameters passed in.
412 # Bisect script expects to be run from the src directory
415 opts
= _CreateBisectOptionsFromConfig(config
)
416 revisions
= _ResolveRevisionsFromConfig(config
)
417 annotations_dict
= _GetStepAnnotationStringsDict(config
)
418 b
= bisect_perf_regression
.BisectPerformanceMetrics(opts
, os
.getcwd())
420 _RunBuildStepForPerformanceTest(b
,
421 annotations_dict
.get('build1'),
422 annotations_dict
.get('sync1'),
425 results_with_patch
= _RunCommandStepForPerformanceTest(
426 b
, opts
, True, True, annotations_dict
['results_label1'],
427 annotations_dict
['run1'])
429 bisect_utils
.OutputAnnotationStepStart('Reverting Patch')
430 # TODO: When this is re-written to recipes, this should use bot_update's
431 # revert mechanism to fully revert the client. But for now, since we know that
432 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
433 # simply reset those two directories.
434 bisect_utils
.CheckRunGit(['reset', '--hard'])
435 bisect_utils
.CheckRunGit(['reset', '--hard'],
436 os
.path
.join('third_party', 'WebKit'))
437 bisect_utils
.OutputAnnotationStepClosed()
439 _RunBuildStepForPerformanceTest(b
,
440 annotations_dict
.get('build2'),
441 annotations_dict
.get('sync2'),
444 results_without_patch
= _RunCommandStepForPerformanceTest(
445 b
, opts
, False, True, annotations_dict
['results_label2'],
446 annotations_dict
['run2'])
448 # Find the link to the cloud stored results file.
449 _ParseAndOutputCloudLinks(
450 results_without_patch
, results_with_patch
, annotations_dict
)
453 def _SetupAndRunPerformanceTest(config
, path_to_goma
, is_cq_tryjob
=False):
454 """Attempts to build and run the current revision with and without the
455 current patch, with the parameters passed in.
458 config: The config read from run-perf-test.cfg.
459 path_to_goma: Path to goma directory.
460 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
463 An exit code: 0 on success, otherwise 1.
465 if platform
.release() == 'XP':
466 print 'Windows XP is not supported for perf try jobs because it lacks '
467 print 'goma support. Please refer to crbug.com/330900.'
470 with
Goma(path_to_goma
) as _
:
471 config
['use_goma'] = bool(path_to_goma
)
472 if config
['use_goma']:
473 config
['goma_dir'] = os
.path
.abspath(path_to_goma
)
475 _RunPerformanceTest(config
)
477 return _RunBenchmarksForCommitQueue(config
)
479 except RuntimeError, e
:
480 bisect_utils
.OutputAnnotationStepFailure()
481 bisect_utils
.OutputAnnotationStepClosed()
482 _OutputFailedResults('Error: %s' % e
.message
)
486 def _RunBisectionScript(
487 config
, working_directory
, path_to_goma
, path_to_extra_src
, dry_run
):
488 """Attempts to execute the bisect script with the given parameters.
491 config: A dict containing the parameters to pass to the script.
492 working_directory: A working directory to provide to the bisect script,
493 where it will store it's own copy of the depot.
494 path_to_goma: Path to goma directory.
495 path_to_extra_src: Path to extra source file.
496 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
499 An exit status code: 0 on success, otherwise 1.
501 _PrintConfigStep(config
)
503 # Construct the basic command with all necessary arguments.
506 os
.path
.join(BISECT_SCRIPT_DIR
, 'bisect_perf_regression.py'),
507 '--command', config
['command'],
508 '--good_revision', config
['good_revision'],
509 '--bad_revision', config
['bad_revision'],
510 '--working_directory', working_directory
,
511 '--output_buildbot_annotations'
514 # Add flags for any optional config parameters if given in the config.
516 ('metric', '--metric'),
517 ('repeat_count', '--repeat_test_count'),
518 ('truncate_percent', '--truncate_percent'),
519 ('max_time_minutes', '--max_time_minutes'),
520 ('bisect_mode', '--bisect_mode'),
521 ('improvement_direction', '--improvement_direction'),
522 ('bug_id', '--bug_id'),
523 ('builder_type', '--builder_type'),
524 ('target_arch', '--target_arch'),
526 for config_key
, flag
in options
:
527 if config
.has_key(config_key
):
528 cmd
.extend([flag
, config
[config_key
]])
530 cmd
.extend(['--build_preference', 'ninja'])
532 # Possibly set the target platform name based on the browser name in a
534 # TODO (prasadv): Remove android-chrome-shell check once we confirm there are
535 # no pending bisect jobs with this in command.
536 if any(item
in config
['command']
537 for item
in ['android-chrome-shell', 'android-chromium']):
538 cmd
.extend(['--target_platform', 'android'])
539 elif 'android-chrome' in config
['command']:
540 cmd
.extend(['--target_platform', 'android-chrome'])
541 elif 'android' in config
['command']:
542 cmd
.extend(['--target_platform', 'android'])
545 # For Windows XP platforms, goma service is not supported.
546 # Moreover we don't compile chrome when gs_bucket flag is set instead
547 # use builds archives, therefore ignore goma service for Windows XP.
548 # See http://crbug.com/330900.
549 if platform
.release() == 'XP':
550 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
551 'on Windows XP platform. Please refer to crbug.com/330900.')
553 cmd
.append('--use_goma')
554 cmd
.append('--goma_dir')
555 cmd
.append(os
.path
.abspath(path_to_goma
))
557 if path_to_extra_src
:
558 cmd
.extend(['--extra_src', path_to_extra_src
])
562 '--debug_ignore_build',
563 '--debug_ignore_sync',
564 '--debug_ignore_perf_test'
567 cmd
= [str(c
) for c
in cmd
]
569 with
Goma(path_to_goma
) as _
:
570 return_code
= subprocess
.call(cmd
)
573 print ('Error: bisect_perf_regression.py returned with error %d\n'
579 def _PrintConfigStep(config
):
580 """Prints out the given config, along with Buildbot annotations."""
581 bisect_utils
.OutputAnnotationStepStart('Config')
583 for k
, v
in config
.iteritems():
584 print ' %s : %s' % (k
, v
)
586 bisect_utils
.OutputAnnotationStepClosed()
589 def _GetBrowserType(bot_platform
):
590 """Gets the browser type to be used in the run benchmark command."""
591 if bot_platform
== 'android':
592 return 'android-chromium'
593 elif 'x64' in bot_platform
:
600 def _GuessTelemetryTestCommand(bot_platform
, test_name
=None):
601 """Creates a Telemetry benchmark command based on bot and test name."""
603 # On Windows, Python scripts should be prefixed with the python command.
604 if bot_platform
== 'win':
605 command
.append('python')
606 command
.append('tools/perf/run_benchmark')
608 command
.append('--browser=%s' % _GetBrowserType(bot_platform
))
610 command
.append(test_name
)
612 return ' '.join(command
)
615 def _GetConfigBasedOnPlatform(config
, bot_name
, test_name
):
616 """Generates required options to create BisectPerformanceMetrics instance."""
618 'command': _GuessTelemetryTestCommand(bot_name
, test_name
),
619 'target_arch': 'x64' if 'x64' in bot_name
else 'ia32',
620 'build_preference': 'ninja',
621 'output_buildbot_annotations': True,
622 'repeat_test_count': 1,
623 'bisect_mode': bisect_utils
.BISECT_MODE_RETURN_CODE
,
626 if 'use_goma' in config
:
627 opts_dict
['use_goma'] = config
['use_goma']
628 if 'goma_dir' in config
:
629 opts_dict
['goma_dir'] = config
['goma_dir']
630 # TODO (prasadv): Remove android-chrome-shell check once we confirm there are
631 # no pending bisect jobs with this in command.
632 if any(item
in opts_dict
['command']
633 for item
in ['android-chrome-shell', 'android-chromium']):
634 opts_dict
['target_platform'] = 'android'
636 return bisect_perf_regression
.BisectOptions
.FromDict(opts_dict
)
639 def _GetModifiedFilesFromPatch(cwd
=None):
640 """Gets list of files modified in the current patch."""
641 log_output
= bisect_utils
.CheckRunGit(
642 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd
=cwd
)
643 modified_files
= log_output
.split()
644 return modified_files
647 def _GetAffectedBenchmarkModuleNames():
648 """Gets list of modified benchmark files under tools/perf/benchmarks."""
649 all_affected_files
= _GetModifiedFilesFromPatch()
650 modified_benchmarks
= []
651 for affected_file
in all_affected_files
:
652 if (affected_file
.startswith(PERF_BENCHMARKS_PATH
) or
653 affected_file
.startswith(PERF_MEASUREMENTS_PATH
)):
654 benchmark
= os
.path
.basename(os
.path
.splitext(affected_file
)[0])
655 modified_benchmarks
.append(benchmark
)
656 return modified_benchmarks
659 def _ListAvailableBenchmarks(bot_platform
):
660 """Gets all available benchmarks names as a list."""
661 browser_type
= _GetBrowserType(bot_platform
)
662 if os
.path
.exists(BENCHMARKS_JSON_FILE
):
663 os
.remove(BENCHMARKS_JSON_FILE
)
665 if 'win' in bot_platform
:
666 command
.append('python')
667 command
.append('tools/perf/run_benchmark')
673 BENCHMARKS_JSON_FILE
])
675 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(
676 command
=command
, cwd
=SRC_DIR
)
678 raise RuntimeError('Something went wrong while listing benchmarks. '
679 'Please review the command line: %s.\nERROR: [%s]' %
680 (' '.join(command
), output
))
681 with
open(BENCHMARKS_JSON_FILE
) as tests_json
:
682 tests_data
= json
.load(tests_json
)
683 if tests_data
.get('steps'):
684 return tests_data
.get('steps').keys()
687 if os
.path
.exists(BENCHMARKS_JSON_FILE
):
688 os
.remove(BENCHMARKS_JSON_FILE
)
690 if e
.errno
!= errno
.ENOENT
:
695 def _OutputOverallResults(results
):
696 """Creates results step and prints results on buildbot job."""
697 test_status
= all(current_value
== True for current_value
in results
.values())
698 bisect_utils
.OutputAnnotationStepStart(
699 'Results - %s' % ('Passed' if test_status
else 'Failed'))
701 print 'Results of benchmarks:'
703 for benchmark
, result
in results
.iteritems():
704 print '%s: %s' % (benchmark
, 'Passed' if result
else 'Failed')
706 bisect_utils
.OutputAnnotationStepFailure()
707 bisect_utils
.OutputAnnotationStepClosed()
708 # Returns 0 for success and 1 for failure.
709 return 0 if test_status
else 1
712 def _RunBenchmark(bisect_instance
, opts
, bot_name
, benchmark_name
):
713 """Runs a Telemetry benchmark."""
714 bisect_utils
.OutputAnnotationStepStart(benchmark_name
)
715 command_to_run
= _GuessTelemetryTestCommand(bot_name
, benchmark_name
)
716 args
= shlex
.split(command_to_run
, posix
=not bisect_utils
.IsWindowsHost())
717 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(args
, SRC_DIR
)
718 # A value other than 0 indicates that the test couldn't be run, and results
719 # should also include an error message.
721 print ('Error: Something went wrong running the benchmark: %s.'
722 'Please review the command line:%s\n\n%s' %
723 (benchmark_name
, command_to_run
, output
))
724 bisect_utils
.OutputAnnotationStepFailure()
726 bisect_utils
.OutputAnnotationStepClosed()
727 # results[1] contains the return code from subprocess that executes test
728 # command, On successful test run it contains 0 otherwise any non-zero value.
729 return return_code
== 0
732 def _RunBenchmarksForCommitQueue(config
):
733 """Runs Telemetry benchmark for the commit queue."""
735 # To determine the bot platform by reading buildbot name from environment
737 bot_name
= os
.environ
.get(BUILDBOT_BUILDERNAME
)
739 bot_name
= sys
.platform
740 bot_name
= bot_name
.split('_')[0]
742 affected_benchmarks
= _GetAffectedBenchmarkModuleNames()
743 # Abort if there are no changes to benchmark any existing benchmark files.
744 if not affected_benchmarks
:
745 bisect_utils
.OutputAnnotationStepStart('Results')
747 print ('There are no modification to Telemetry benchmarks,'
748 ' aborting the try job.')
749 bisect_utils
.OutputAnnotationStepClosed()
752 # Bisect script expects to be run from the src directory
753 # Gets required options inorder to create BisectPerformanceMetrics instance.
754 # Since command is a required arg in BisectPerformanceMetrics, we just create
755 # a dummy command for now.
756 opts
= _GetConfigBasedOnPlatform(config
, bot_name
, test_name
='')
757 annotations_dict
= _GetStepAnnotationStringsDict(config
)
758 b
= bisect_perf_regression
.BisectPerformanceMetrics(opts
, os
.getcwd())
759 _RunBuildStepForPerformanceTest(b
,
760 annotations_dict
.get('build1'),
761 annotations_dict
.get('sync1'),
763 available_benchmarks
= _ListAvailableBenchmarks(bot_name
)
765 for affected_benchmark
in affected_benchmarks
:
766 for benchmark
in available_benchmarks
:
767 if (benchmark
.startswith(affected_benchmark
) and
768 not benchmark
.endswith('reference')):
769 overall_results
[benchmark
] = _RunBenchmark(b
, opts
, bot_name
, benchmark
)
771 return _OutputOverallResults(overall_results
)
775 """Returns the options parser for run-bisect-perf-regression.py."""
777 def ConvertJson(option
, _
, value
, parser
):
778 """Provides an OptionParser callback to unmarshal a JSON string."""
779 setattr(parser
.values
, option
.dest
, json
.loads(value
))
781 usage
= ('%prog [options] [-- chromium-options]\n'
782 'Used by a try bot to run the bisection script using the parameters'
783 ' provided in the auto_bisect/bisect.cfg file.')
784 parser
= optparse
.OptionParser(usage
=usage
)
785 parser
.add_option('-w', '--working_directory',
787 help='A working directory to supply to the bisection '
788 'script, which will use it as the location to checkout '
789 'a copy of the chromium depot.')
790 parser
.add_option('-p', '--path_to_goma',
792 help='Path to goma directory. If this is supplied, goma '
793 'builds will be enabled.')
794 parser
.add_option('--path_to_config',
796 help='Path to the config file to use. If this is supplied, '
797 'the bisect script will use this to override the default '
798 'config file path. The script will attempt to load it '
799 'as a bisect config first, then a perf config.')
800 parser
.add_option('--extra_src',
802 help='Path to extra source file. If this is supplied, '
803 'bisect script will use this to override default behavior.')
804 parser
.add_option('--dry_run',
806 help='The script will perform the full bisect, but '
807 'without syncing, building, or running the performance '
809 # This argument is passed by buildbot to supply build properties to the bisect
810 # script. Note: Don't change "--build-properties" property name.
811 parser
.add_option('--build-properties', action
='callback',
812 dest
='build_properties',
813 callback
=ConvertJson
, type='string',
815 help='build properties in JSON format')
821 """Entry point for run-bisect-perf-regression.py.
823 Reads the config file, and then tries to either bisect a regression or
824 just run a performance test, depending on the particular config parameters
825 specified in the config file.
827 parser
= _OptionParser()
828 opts
, _
= parser
.parse_args()
830 # Use the default config file path unless one was specified.
831 config_path
= BISECT_CONFIG_PATH
832 if opts
.path_to_config
:
833 config_path
= opts
.path_to_config
834 config
= _LoadConfigFile(config_path
)
836 # Check if the config is valid for running bisect job.
837 config_is_valid
= _ValidateBisectConfigFile(config
)
839 if config
and config_is_valid
:
840 if not opts
.working_directory
:
841 print 'Error: missing required parameter: --working_directory\n'
845 return _RunBisectionScript(
846 config
, opts
.working_directory
, opts
.path_to_goma
, opts
.extra_src
,
849 # If it wasn't valid for running a bisect, then maybe the user wanted
850 # to run a perf test instead of a bisect job. Try reading any possible
851 # perf test config files.
852 perf_cfg_files
= [RUN_TEST_CONFIG_PATH
, WEBKIT_RUN_TEST_CONFIG_PATH
]
853 for current_perf_cfg_file
in perf_cfg_files
:
854 if opts
.path_to_config
:
855 path_to_perf_cfg
= opts
.path_to_config
857 path_to_perf_cfg
= os
.path
.join(
858 os
.path
.abspath(os
.path
.dirname(sys
.argv
[0])),
859 current_perf_cfg_file
)
861 config
= _LoadConfigFile(path_to_perf_cfg
)
862 config_is_valid
= _ValidatePerfConfigFile(config
)
864 if config
and config_is_valid
:
865 return _SetupAndRunPerformanceTest(config
, opts
.path_to_goma
)
867 # If there are no changes to config file, then check if the request is
868 # from the commit queue, if so then run the modified Telemetry benchmarks for
870 if opts
.build_properties
.get('requester') in _COMMIT_QUEUE_USERS
:
871 return _SetupAndRunPerformanceTest(
872 config
={}, path_to_goma
=opts
.path_to_goma
, is_cq_tryjob
=True)
874 print ('Error: Could not load config file. Double check your changes to '
875 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
879 if __name__
== '__main__':