2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Run Performance Test Bisect Tool
8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the
24 from auto_bisect
import bisect_perf_regression
25 from auto_bisect
import bisect_utils
26 from auto_bisect
import math_utils
27 from auto_bisect
import source_control
29 CROS_BOARD_ENV
= 'BISECT_CROS_BOARD'
30 CROS_IP_ENV
= 'BISECT_CROS_IP'
31 SCRIPT_DIR
= os
.path
.abspath(os
.path
.dirname(__file__
))
32 SRC_DIR
= os
.path
.join(SCRIPT_DIR
, os
.path
.pardir
)
33 BISECT_CONFIG_PATH
= os
.path
.join(SCRIPT_DIR
, 'auto_bisect', 'bisect.cfg')
34 RUN_TEST_CONFIG_PATH
= os
.path
.join(SCRIPT_DIR
, 'run-perf-test.cfg')
35 WEBKIT_RUN_TEST_CONFIG_PATH
= os
.path
.join(
36 SRC_DIR
, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
37 BISECT_SCRIPT_DIR
= os
.path
.join(SCRIPT_DIR
, 'auto_bisect')
39 PERF_BENCHMARKS_PATH
= 'tools/perf/benchmarks'
40 BUILDBOT_BUILDERNAME
= 'BUILDBOT_BUILDERNAME'
41 BENCHMARKS_JSON_FILE
= 'benchmarks.json'
45 def __init__(self
, path_to_goma
):
46 self
._abs
_path
_to
_goma
= None
47 self
._abs
_path
_to
_goma
_file
= None
50 self
._abs
_path
_to
_goma
= os
.path
.abspath(path_to_goma
)
51 filename
= 'goma_ctl.bat' if os
.name
== 'nt' else 'goma_ctl.sh'
52 self
._abs
_path
_to
_goma
_file
= os
.path
.join(self
._abs
_path
_to
_goma
, filename
)
55 if self
._HasGomaPath
():
59 def __exit__(self
, *_
):
60 if self
._HasGomaPath
():
63 def _HasGomaPath(self
):
64 return bool(self
._abs
_path
_to
_goma
)
66 def _SetupEnvVars(self
):
68 os
.environ
['CC'] = (os
.path
.join(self
._abs
_path
_to
_goma
, 'gomacc.exe') +
70 os
.environ
['CXX'] = (os
.path
.join(self
._abs
_path
_to
_goma
, 'gomacc.exe') +
73 os
.environ
['PATH'] = os
.pathsep
.join([self
._abs
_path
_to
_goma
,
76 def _SetupAndStart(self
):
77 """Sets up goma and launches it.
80 path_to_goma: Path to goma directory.
83 True if successful."""
86 # Sometimes goma is lingering around if something went bad on a previous
87 # run. Stop it before starting a new process. Can ignore the return code
88 # since it will return an error if it wasn't running.
91 if subprocess
.call([self
._abs
_path
_to
_goma
_file
, 'start']):
92 raise RuntimeError('Goma failed to start.')
95 subprocess
.call([self
._abs
_path
_to
_goma
_file
, 'stop'])
98 def _LoadConfigFile(config_file_path
):
99 """Attempts to load the specified config file as a module
100 and grab the global config dict.
103 config_file_path: Path to the config file.
106 If successful, returns the config dict loaded from the file. If no
107 such dictionary could be loaded, returns the empty dictionary.
111 execfile(config_file_path
, local_vars
)
112 return local_vars
['config']
115 traceback
.print_exc()
120 def _ValidateConfigFile(config_contents
, required_parameters
):
121 """Validates the config file contents, checking whether all values are
125 config_contents: A config dictionary.
126 required_parameters: A list of parameters to check for.
131 for parameter
in required_parameters
:
132 if parameter
not in config_contents
:
134 value
= config_contents
[parameter
]
135 if not value
or type(value
) is not str:
140 def _ValidatePerfConfigFile(config_contents
):
141 """Validates the perf config file contents.
143 This is used when we're doing a perf try job, rather than a bisect.
144 The config file is called run-perf-test.cfg by default.
146 The parameters checked are the required parameters; any additional optional
147 parameters won't be checked and validation will still pass.
150 config_contents: A config dictionary.
155 return _ValidateConfigFile(config_contents
, required_parameters
=['command'])
158 def _ValidateBisectConfigFile(config_contents
):
159 """Validates the bisect config file contents.
161 The parameters checked are the required parameters; any additional optional
162 parameters won't be checked and validation will still pass.
165 config_contents: A config dictionary.
170 return _ValidateConfigFile(
172 required_parameters
=['command', 'good_revision', 'bad_revision'])
175 def _OutputFailedResults(text_to_print
):
176 bisect_utils
.OutputAnnotationStepStart('Results - Failed')
180 bisect_utils
.OutputAnnotationStepClosed()
183 def _CreateBisectOptionsFromConfig(config
):
184 print config
['command']
186 opts_dict
['command'] = config
['command']
187 opts_dict
['metric'] = config
.get('metric')
189 if config
['repeat_count']:
190 opts_dict
['repeat_test_count'] = int(config
['repeat_count'])
192 if config
['truncate_percent']:
193 opts_dict
['truncate_percent'] = int(config
['truncate_percent'])
195 if config
['max_time_minutes']:
196 opts_dict
['max_time_minutes'] = int(config
['max_time_minutes'])
198 if config
.has_key('use_goma'):
199 opts_dict
['use_goma'] = config
['use_goma']
200 if config
.has_key('goma_dir'):
201 opts_dict
['goma_dir'] = config
['goma_dir']
203 if config
.has_key('improvement_direction'):
204 opts_dict
['improvement_direction'] = int(config
['improvement_direction'])
206 if config
.has_key('target_arch'):
207 opts_dict
['target_arch'] = config
['target_arch']
209 if config
.has_key('bug_id') and str(config
['bug_id']).isdigit():
210 opts_dict
['bug_id'] = config
['bug_id']
212 opts_dict
['build_preference'] = 'ninja'
213 opts_dict
['output_buildbot_annotations'] = True
215 if '--browser=cros' in config
['command']:
216 opts_dict
['target_platform'] = 'cros'
218 if os
.environ
[CROS_BOARD_ENV
] and os
.environ
[CROS_IP_ENV
]:
219 opts_dict
['cros_board'] = os
.environ
[CROS_BOARD_ENV
]
220 opts_dict
['cros_remote_ip'] = os
.environ
[CROS_IP_ENV
]
222 raise RuntimeError('CrOS build selected, but BISECT_CROS_IP or'
223 'BISECT_CROS_BOARD undefined.')
224 elif 'android' in config
['command']:
225 if 'android-chrome-shell' in config
['command']:
226 opts_dict
['target_platform'] = 'android'
227 elif 'android-chrome' in config
['command']:
228 opts_dict
['target_platform'] = 'android-chrome'
230 opts_dict
['target_platform'] = 'android'
232 return bisect_perf_regression
.BisectOptions
.FromDict(opts_dict
)
235 def _ParseCloudLinksFromOutput(output
):
236 html_results_pattern
= re
.compile(
237 r
'\s(?P<VALUES>http://storage.googleapis.com/' +
238 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s',
240 profiler_pattern
= re
.compile(
241 r
'\s(?P<VALUES>https://console.developers.google.com/' +
242 'm/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)\s',
246 'html-results': html_results_pattern
.findall(output
),
247 'profiler': profiler_pattern
.findall(output
),
253 def _ParseAndOutputCloudLinks(
254 results_without_patch
, results_with_patch
, annotations_dict
):
255 cloud_links_without_patch
= _ParseCloudLinksFromOutput(
256 results_without_patch
[2])
257 cloud_links_with_patch
= _ParseCloudLinksFromOutput(
258 results_with_patch
[2])
260 cloud_file_link
= (cloud_links_without_patch
['html-results'][0]
261 if cloud_links_without_patch
['html-results'] else '')
263 profiler_file_links_with_patch
= cloud_links_with_patch
['profiler']
264 profiler_file_links_without_patch
= cloud_links_without_patch
['profiler']
266 # Calculate the % difference in the means of the 2 runs.
267 percent_diff_in_means
= None
269 if (results_with_patch
[0].has_key('mean') and
270 results_with_patch
[0].has_key('values')):
271 percent_diff_in_means
= (results_with_patch
[0]['mean'] /
272 max(0.0001, results_without_patch
[0]['mean'])) * 100.0 - 100.0
273 std_err
= math_utils
.PooledStandardError(
274 [results_with_patch
[0]['values'], results_without_patch
[0]['values']])
276 if percent_diff_in_means
is not None and std_err
is not None:
277 bisect_utils
.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
278 (percent_diff_in_means
, std_err
))
279 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
280 'Std. Error'.center(20, ' '))
281 print ' %s %s %s' % ('Patch'.center(10, ' '),
282 ('%.02f' % results_with_patch
[0]['mean']).center(20, ' '),
283 ('%.02f' % results_with_patch
[0]['std_err']).center(20, ' '))
284 print ' %s %s %s' % ('No Patch'.center(10, ' '),
285 ('%.02f' % results_without_patch
[0]['mean']).center(20, ' '),
286 ('%.02f' % results_without_patch
[0]['std_err']).center(20, ' '))
288 bisect_utils
.OutputAnnotationStepLink('HTML Results', cloud_file_link
)
289 bisect_utils
.OutputAnnotationStepClosed()
290 elif cloud_file_link
:
291 bisect_utils
.OutputAnnotationStepLink('HTML Results', cloud_file_link
)
293 if profiler_file_links_with_patch
and profiler_file_links_without_patch
:
294 for i
in xrange(len(profiler_file_links_with_patch
)):
295 bisect_utils
.OutputAnnotationStepLink(
296 '%s[%d]' % (annotations_dict
.get('profiler_link1'), i
),
297 profiler_file_links_with_patch
[i
])
298 for i
in xrange(len(profiler_file_links_without_patch
)):
299 bisect_utils
.OutputAnnotationStepLink(
300 '%s[%d]' % (annotations_dict
.get('profiler_link2'), i
),
301 profiler_file_links_without_patch
[i
])
304 def _ResolveRevisionsFromConfig(config
):
305 if not 'good_revision' in config
and not 'bad_revision' in config
:
308 bad_revision
= source_control
.ResolveToRevision(
309 config
['bad_revision'], 'chromium', bisect_utils
.DEPOT_DEPS_NAME
, 100)
311 raise RuntimeError('Failed to resolve [%s] to git hash.',
312 config
['bad_revision'])
313 good_revision
= source_control
.ResolveToRevision(
314 config
['good_revision'], 'chromium', bisect_utils
.DEPOT_DEPS_NAME
, -100)
315 if not good_revision
:
316 raise RuntimeError('Failed to resolve [%s] to git hash.',
317 config
['good_revision'])
319 return (good_revision
, bad_revision
)
322 def _GetStepAnnotationStringsDict(config
):
323 if 'good_revision' in config
and 'bad_revision' in config
:
325 'build1': 'Building [%s]' % config
['good_revision'],
326 'build2': 'Building [%s]' % config
['bad_revision'],
327 'run1': 'Running [%s]' % config
['good_revision'],
328 'run2': 'Running [%s]' % config
['bad_revision'],
329 'sync1': 'Syncing [%s]' % config
['good_revision'],
330 'sync2': 'Syncing [%s]' % config
['bad_revision'],
331 'results_label1': config
['good_revision'],
332 'results_label2': config
['bad_revision'],
333 'profiler_link1': 'Profiler Data - %s' % config
['good_revision'],
334 'profiler_link2': 'Profiler Data - %s' % config
['bad_revision'],
338 'build1': 'Building With Patch',
339 'build2': 'Building Without Patch',
340 'run1': 'Running With Patch',
341 'run2': 'Running Without Patch',
342 'results_label1': 'Patch',
343 'results_label2': 'ToT',
344 'profiler_link1': 'With Patch - Profiler Data',
345 'profiler_link2': 'Without Patch - Profiler Data',
349 def _RunBuildStepForPerformanceTest(bisect_instance
,
354 bisect_utils
.OutputAnnotationStepStart(sync_string
)
355 if not source_control
.SyncToRevision(revision
, 'gclient'):
356 raise RuntimeError('Failed [%s].' % sync_string
)
357 bisect_utils
.OutputAnnotationStepClosed()
359 bisect_utils
.OutputAnnotationStepStart(build_string
)
361 if bisect_utils
.RunGClient(['runhooks']):
362 raise RuntimeError('Failed to run gclient runhooks')
364 if not bisect_instance
.ObtainBuild('chromium'):
365 raise RuntimeError('Patched version failed to build.')
367 bisect_utils
.OutputAnnotationStepClosed()
370 def _RunCommandStepForPerformanceTest(bisect_instance
,
376 bisect_utils
.OutputAnnotationStepStart(run_string
)
378 results
= bisect_instance
.RunPerformanceTestAndParseResults(
381 reset_on_first_run
=reset_on_first_run
,
382 upload_on_last_run
=upload_on_last_run
,
383 results_label
=results_label
,
387 raise RuntimeError('Patched version failed to run performance test.')
389 bisect_utils
.OutputAnnotationStepClosed()
394 def _RunPerformanceTest(config
):
395 """Runs a performance test with and without the current patch.
398 config: Contents of the config file, a dictionary.
400 Attempts to build and run the current revision with and without the
401 current patch, with the parameters passed in.
403 # Bisect script expects to be run from the src directory
406 opts
= _CreateBisectOptionsFromConfig(config
)
407 revisions
= _ResolveRevisionsFromConfig(config
)
408 annotations_dict
= _GetStepAnnotationStringsDict(config
)
409 b
= bisect_perf_regression
.BisectPerformanceMetrics(opts
, os
.getcwd())
411 _RunBuildStepForPerformanceTest(b
,
412 annotations_dict
.get('build1'),
413 annotations_dict
.get('sync1'),
416 results_with_patch
= _RunCommandStepForPerformanceTest(
417 b
, opts
, True, True, annotations_dict
['results_label1'],
418 annotations_dict
['run1'])
420 bisect_utils
.OutputAnnotationStepStart('Reverting Patch')
421 # TODO: When this is re-written to recipes, this should use bot_update's
422 # revert mechanism to fully revert the client. But for now, since we know that
423 # the perf try bot currently only supports src/ and src/third_party/WebKit, we
424 # simply reset those two directories.
425 bisect_utils
.CheckRunGit(['reset', '--hard'])
426 bisect_utils
.CheckRunGit(['reset', '--hard'],
427 os
.path
.join('third_party', 'WebKit'))
428 bisect_utils
.OutputAnnotationStepClosed()
430 _RunBuildStepForPerformanceTest(b
,
431 annotations_dict
.get('build2'),
432 annotations_dict
.get('sync2'),
435 results_without_patch
= _RunCommandStepForPerformanceTest(
436 b
, opts
, False, True, annotations_dict
['results_label2'],
437 annotations_dict
['run2'])
439 # Find the link to the cloud stored results file.
440 _ParseAndOutputCloudLinks(
441 results_without_patch
, results_with_patch
, annotations_dict
)
444 def _SetupAndRunPerformanceTest(config
, path_to_goma
, is_cq_tryjob
=False):
445 """Attempts to build and run the current revision with and without the
446 current patch, with the parameters passed in.
449 config: The config read from run-perf-test.cfg.
450 path_to_goma: Path to goma directory.
451 is_cq_tryjob: Whether or not the try job was initiated by commit queue.
454 An exit code: 0 on success, otherwise 1.
456 if platform
.release() == 'XP':
457 print 'Windows XP is not supported for perf try jobs because it lacks '
458 print 'goma support. Please refer to crbug.com/330900.'
461 with
Goma(path_to_goma
) as _
:
462 config
['use_goma'] = bool(path_to_goma
)
463 if config
['use_goma']:
464 config
['goma_dir'] = os
.path
.abspath(path_to_goma
)
466 _RunPerformanceTest(config
)
468 return _RunBenchmarksForCommitQueue(config
)
470 except RuntimeError, e
:
471 bisect_utils
.OutputAnnotationStepFailure()
472 bisect_utils
.OutputAnnotationStepClosed()
473 _OutputFailedResults('Error: %s' % e
.message
)
477 def _RunBisectionScript(
478 config
, working_directory
, path_to_goma
, path_to_extra_src
, dry_run
):
479 """Attempts to execute the bisect script with the given parameters.
482 config: A dict containing the parameters to pass to the script.
483 working_directory: A working directory to provide to the bisect script,
484 where it will store it's own copy of the depot.
485 path_to_goma: Path to goma directory.
486 path_to_extra_src: Path to extra source file.
487 dry_run: Do a dry run, skipping sync, build, and performance testing steps.
490 An exit status code: 0 on success, otherwise 1.
492 _PrintConfigStep(config
)
494 # Construct the basic command with all necessary arguments.
497 os
.path
.join(BISECT_SCRIPT_DIR
, 'bisect_perf_regression.py'),
498 '--command', config
['command'],
499 '--good_revision', config
['good_revision'],
500 '--bad_revision', config
['bad_revision'],
501 '--working_directory', working_directory
,
502 '--output_buildbot_annotations'
505 # Add flags for any optional config parameters if given in the config.
507 ('metric', '--metric'),
508 ('repeat_count', '--repeat_test_count'),
509 ('truncate_percent', '--truncate_percent'),
510 ('max_time_minutes', '--max_time_minutes'),
511 ('bisect_mode', '--bisect_mode'),
512 ('improvement_direction', '--improvement_direction'),
513 ('bug_id', '--bug_id'),
514 ('builder_type', '--builder_type'),
515 ('target_arch', '--target_arch'),
517 for config_key
, flag
in options
:
518 if config
.has_key(config_key
):
519 cmd
.extend([flag
, config
[config_key
]])
521 cmd
.extend(['--build_preference', 'ninja'])
523 # Possibly set the target platform name based on the browser name in a
525 if 'android-chrome-shell' in config
['command']:
526 cmd
.extend(['--target_platform', 'android'])
527 elif 'android-chrome' in config
['command']:
528 cmd
.extend(['--target_platform', 'android-chrome'])
529 elif 'android' in config
['command']:
530 cmd
.extend(['--target_platform', 'android'])
533 # For Windows XP platforms, goma service is not supported.
534 # Moreover we don't compile chrome when gs_bucket flag is set instead
535 # use builds archives, therefore ignore goma service for Windows XP.
536 # See http://crbug.com/330900.
537 if platform
.release() == 'XP':
538 print ('Goma doesn\'t have a win32 binary, therefore it is not supported '
539 'on Windows XP platform. Please refer to crbug.com/330900.')
541 cmd
.append('--use_goma')
542 cmd
.append('--goma_dir')
543 cmd
.append(os
.path
.abspath(path_to_goma
))
545 if path_to_extra_src
:
546 cmd
.extend(['--extra_src', path_to_extra_src
])
550 '--debug_ignore_build',
551 '--debug_ignore_sync',
552 '--debug_ignore_perf_test'
555 cmd
= [str(c
) for c
in cmd
]
557 with
Goma(path_to_goma
) as _
:
558 return_code
= subprocess
.call(cmd
)
561 print ('Error: bisect_perf_regression.py returned with error %d\n'
567 def _PrintConfigStep(config
):
568 """Prints out the given config, along with Buildbot annotations."""
569 bisect_utils
.OutputAnnotationStepStart('Config')
571 for k
, v
in config
.iteritems():
572 print ' %s : %s' % (k
, v
)
574 bisect_utils
.OutputAnnotationStepClosed()
577 def _GetBrowserType(bot_platform
):
578 """Gets the browser type to be used in the run benchmark command."""
579 if bot_platform
== 'android':
580 return 'android-chrome-shell'
581 elif 'x64' in bot_platform
:
588 def _GuessTelemetryTestCommand(bot_platform
, test_name
=None):
589 """Creates a Telemetry benchmark command based on bot and test name."""
591 # On Windows, Python scripts should be prefixed with the python command.
592 if bot_platform
== 'win':
593 command
.append('python')
594 command
.append('tools/perf/run_benchmark')
596 command
.append('--browser=%s' % _GetBrowserType(bot_platform
))
598 command
.append(test_name
)
600 return ' '.join(command
)
603 def _GetConfigBasedOnPlatform(config
, bot_name
, test_name
):
604 """Generates required options to create BisectPerformanceMetrics instance."""
606 'command': _GuessTelemetryTestCommand(bot_name
, test_name
),
607 'target_arch': 'x64' if 'x64' in bot_name
else 'ia32',
608 'build_preference': 'ninja',
609 'output_buildbot_annotations': True,
610 'repeat_test_count': 1,
611 'bisect_mode': bisect_utils
.BISECT_MODE_RETURN_CODE
,
614 if 'use_goma' in config
:
615 opts_dict
['use_goma'] = config
['use_goma']
616 if 'goma_dir' in config
:
617 opts_dict
['goma_dir'] = config
['goma_dir']
618 if 'android-chrome-shell' in opts_dict
['command']:
619 opts_dict
['target_platform'] = 'android'
621 return bisect_perf_regression
.BisectOptions
.FromDict(opts_dict
)
624 def _GetModifiedFilesFromPatch(cwd
=None):
625 """Gets list of files modified in the current patch."""
626 log_output
= bisect_utils
.CheckRunGit(
627 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd
=cwd
)
628 modified_files
= log_output
.split()
629 return modified_files
632 def _GetAffectedBenchmarkModuleNames():
633 """Gets list of modified benchmark files under tools/perf/benchmarks."""
634 all_affected_files
= _GetModifiedFilesFromPatch()
635 modified_benchmarks
= []
636 for affected_file
in all_affected_files
:
637 if affected_file
.startswith(PERF_BENCHMARKS_PATH
):
638 benchmark
= os
.path
.basename(os
.path
.splitext(affected_file
)[0])
639 modified_benchmarks
.append(benchmark
)
640 return modified_benchmarks
643 def _ListAvailableBenchmarks(bot_platform
):
644 """Gets all available benchmarks names as a list."""
645 browser_type
= _GetBrowserType(bot_platform
)
646 if os
.path
.exists(BENCHMARKS_JSON_FILE
):
647 os
.remove(BENCHMARKS_JSON_FILE
)
649 if 'win' in bot_platform
:
650 command
.append('python')
651 command
.append('tools/perf/run_benchmark')
657 BENCHMARKS_JSON_FILE
])
659 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(
660 command
=command
, cwd
=SRC_DIR
)
662 raise RuntimeError('Something went wrong while listing benchmarks. '
663 'Please review the command line: %s.\nERROR: [%s]' %
664 (' '.join(command
), output
))
665 with
open(BENCHMARKS_JSON_FILE
) as tests_json
:
666 tests_data
= json
.load(tests_json
)
667 if tests_data
.get('steps'):
668 return tests_data
.get('steps').keys()
671 if os
.path
.exists(BENCHMARKS_JSON_FILE
):
672 os
.remove(BENCHMARKS_JSON_FILE
)
674 if e
.errno
!= errno
.ENOENT
:
679 def _OutputOverallResults(results
):
680 """Creates results step and prints results on buildbot job."""
681 test_status
= all(current_value
== True for current_value
in results
.values())
682 bisect_utils
.OutputAnnotationStepStart(
683 'Results - %s' % ('Passed' if test_status
else 'Failed'))
685 print 'Results of benchmarks:'
687 for benchmark
, result
in results
.iteritems():
688 print '%s: %s' % (benchmark
, 'Passed' if result
else 'Failed')
690 bisect_utils
.OutputAnnotationStepFailure()
691 bisect_utils
.OutputAnnotationStepClosed()
692 # Returns 0 for success and 1 for failure.
693 return 0 if test_status
else 1
696 def _RunBenchmark(bisect_instance
, opts
, bot_name
, benchmark_name
):
697 """Runs a Telemetry benchmark."""
698 bisect_utils
.OutputAnnotationStepStart(benchmark_name
)
699 command_to_run
= _GuessTelemetryTestCommand(bot_name
, benchmark_name
)
700 args
= shlex
.split(command_to_run
, posix
=not bisect_utils
.IsWindowsHost())
701 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(args
, SRC_DIR
)
702 # A value other than 0 indicates that the test couldn't be run, and results
703 # should also include an error message.
705 print ('Error: Something went wrong running the benchmark: %s.'
706 'Please review the command line:%s\n\n%s' %
707 (benchmark_name
, command_to_run
, output
))
708 bisect_utils
.OutputAnnotationStepFailure()
710 bisect_utils
.OutputAnnotationStepClosed()
711 # results[1] contains the return code from subprocess that executes test
712 # command, On successful test run it contains 0 otherwise any non-zero value.
713 return return_code
== 0
716 def _RunBenchmarksForCommitQueue(config
):
717 """Runs Telemetry benchmark for the commit queue."""
719 # To determine the bot platform by reading buildbot name from environment
721 bot_name
= os
.environ
.get(BUILDBOT_BUILDERNAME
)
723 bot_name
= sys
.platform
724 bot_name
= bot_name
.split('_')[0]
726 affected_benchmarks
= _GetAffectedBenchmarkModuleNames()
727 # Abort if there are no changes to benchmark any existing benchmark files.
728 if not affected_benchmarks
:
729 bisect_utils
.OutputAnnotationStepStart('Results')
731 print ('There are no modification to Telemetry benchmarks,'
732 ' aborting the try job.')
733 bisect_utils
.OutputAnnotationStepClosed()
736 # Bisect script expects to be run from the src directory
737 # Gets required options inorder to create BisectPerformanceMetrics instance.
738 # Since command is a required arg in BisectPerformanceMetrics, we just create
739 # a dummy command for now.
740 opts
= _GetConfigBasedOnPlatform(config
, bot_name
, test_name
='')
741 annotations_dict
= _GetStepAnnotationStringsDict(config
)
742 b
= bisect_perf_regression
.BisectPerformanceMetrics(opts
, os
.getcwd())
743 _RunBuildStepForPerformanceTest(b
,
744 annotations_dict
.get('build1'),
745 annotations_dict
.get('sync1'),
747 available_benchmarks
= _ListAvailableBenchmarks(bot_name
)
749 for affected_benchmark
in affected_benchmarks
:
750 for benchmark
in available_benchmarks
:
751 if (benchmark
.startswith(affected_benchmark
) and
752 not benchmark
.endswith('reference')):
753 overall_results
[benchmark
] = _RunBenchmark(b
, opts
, bot_name
, benchmark
)
755 return _OutputOverallResults(overall_results
)
759 """Returns the options parser for run-bisect-perf-regression.py."""
761 def ConvertJson(option
, _
, value
, parser
):
762 """Provides an OptionParser callback to unmarshal a JSON string."""
763 setattr(parser
.values
, option
.dest
, json
.loads(value
))
765 usage
= ('%prog [options] [-- chromium-options]\n'
766 'Used by a try bot to run the bisection script using the parameters'
767 ' provided in the auto_bisect/bisect.cfg file.')
768 parser
= optparse
.OptionParser(usage
=usage
)
769 parser
.add_option('-w', '--working_directory',
771 help='A working directory to supply to the bisection '
772 'script, which will use it as the location to checkout '
773 'a copy of the chromium depot.')
774 parser
.add_option('-p', '--path_to_goma',
776 help='Path to goma directory. If this is supplied, goma '
777 'builds will be enabled.')
778 parser
.add_option('--path_to_config',
780 help='Path to the config file to use. If this is supplied, '
781 'the bisect script will use this to override the default '
782 'config file path. The script will attempt to load it '
783 'as a bisect config first, then a perf config.')
784 parser
.add_option('--extra_src',
786 help='Path to extra source file. If this is supplied, '
787 'bisect script will use this to override default behavior.')
788 parser
.add_option('--dry_run',
790 help='The script will perform the full bisect, but '
791 'without syncing, building, or running the performance '
793 # This argument is passed by buildbot to supply build properties to the bisect
794 # script. Note: Don't change "--build-properties" property name.
795 parser
.add_option('--build-properties', action
='callback',
796 dest
='build_properties',
797 callback
=ConvertJson
, type='string',
799 help='build properties in JSON format')
805 """Entry point for run-bisect-perf-regression.py.
807 Reads the config file, and then tries to either bisect a regression or
808 just run a performance test, depending on the particular config parameters
809 specified in the config file.
811 parser
= _OptionParser()
812 opts
, _
= parser
.parse_args()
814 # Use the default config file path unless one was specified.
815 config_path
= BISECT_CONFIG_PATH
816 if opts
.path_to_config
:
817 config_path
= opts
.path_to_config
818 config
= _LoadConfigFile(config_path
)
820 # Check if the config is valid for running bisect job.
821 config_is_valid
= _ValidateBisectConfigFile(config
)
823 if config
and config_is_valid
:
824 if not opts
.working_directory
:
825 print 'Error: missing required parameter: --working_directory\n'
829 return _RunBisectionScript(
830 config
, opts
.working_directory
, opts
.path_to_goma
, opts
.extra_src
,
833 # If it wasn't valid for running a bisect, then maybe the user wanted
834 # to run a perf test instead of a bisect job. Try reading any possible
835 # perf test config files.
836 perf_cfg_files
= [RUN_TEST_CONFIG_PATH
, WEBKIT_RUN_TEST_CONFIG_PATH
]
837 for current_perf_cfg_file
in perf_cfg_files
:
838 if opts
.path_to_config
:
839 path_to_perf_cfg
= opts
.path_to_config
841 path_to_perf_cfg
= os
.path
.join(
842 os
.path
.abspath(os
.path
.dirname(sys
.argv
[0])),
843 current_perf_cfg_file
)
845 config
= _LoadConfigFile(path_to_perf_cfg
)
846 config_is_valid
= _ValidatePerfConfigFile(config
)
848 if config
and config_is_valid
:
849 return _SetupAndRunPerformanceTest(config
, opts
.path_to_goma
)
851 # If there are no changes to config file, then check if the request is
852 # from commit-bot, if so then run the modified Telemetry benchmarks for the
854 if opts
.build_properties
.get('requester') == 'commit-bot@chromium.org':
855 return _SetupAndRunPerformanceTest(
856 config
={}, path_to_goma
=opts
.path_to_goma
, is_cq_tryjob
=True)
858 print ('Error: Could not load config file. Double check your changes to '
859 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
863 if __name__
== '__main__':