Add unit test for the Settings API Bubble.
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blobe15cabe1cde820eafaacf25b01994a166378511c
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import imp
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
52 import zipfile
54 import bisect_utils
55 import post_perf_builder_job
58 try:
59 from telemetry.page import cloud_storage
60 except ImportError:
61 sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'telemetry'))
62 from telemetry.page import cloud_storage
64 # The additional repositories that might need to be bisected.
65 # If the repository has any dependant repositories (such as skia/src needs
66 # skia/include and skia/gyp to be updated), specify them in the 'depends'
67 # so that they're synced appropriately.
68 # Format is:
69 # src: path to the working directory.
70 # recurse: True if this repositry will get bisected.
71 # depends: A list of other repositories that are actually part of the same
72 # repository in svn.
73 # svn: Needed for git workflow to resolve hashes to svn revisions.
74 # from: Parent depot that must be bisected before this is bisected.
75 DEPOT_DEPS_NAME = {
76 'chromium' : {
77 "src" : "src",
78 "recurse" : True,
79 "depends" : None,
80 "from" : ['cros', 'android-chrome'],
81 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision='
83 'webkit' : {
84 "src" : "src/third_party/WebKit",
85 "recurse" : True,
86 "depends" : None,
87 "from" : ['chromium'],
88 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision='
90 'angle' : {
91 "src" : "src/third_party/angle",
92 "src_old" : "src/third_party/angle_dx11",
93 "recurse" : True,
94 "depends" : None,
95 "from" : ['chromium'],
96 "platform": 'nt',
98 'v8' : {
99 "src" : "src/v8",
100 "recurse" : True,
101 "depends" : None,
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
106 'v8_bleeding_edge' : {
107 "src" : "src/v8_bleeding_edge",
108 "recurse" : True,
109 "depends" : None,
110 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
111 "from" : ['v8'],
112 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'skia/src' : {
115 "src" : "src/third_party/skia/src",
116 "recurse" : True,
117 "svn" : "http://skia.googlecode.com/svn/trunk/src",
118 "depends" : ['skia/include', 'skia/gyp'],
119 "from" : ['chromium'],
120 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
122 'skia/include' : {
123 "src" : "src/third_party/skia/include",
124 "recurse" : False,
125 "svn" : "http://skia.googlecode.com/svn/trunk/include",
126 "depends" : None,
127 "from" : ['chromium'],
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
130 'skia/gyp' : {
131 "src" : "src/third_party/skia/gyp",
132 "recurse" : False,
133 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
134 "depends" : None,
135 "from" : ['chromium'],
136 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
140 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
141 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
142 CROS_VERSION_PATTERN = 'new version number from %s'
143 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
144 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
145 'testing_rsa')
146 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
147 'mod_for_test_scripts', 'ssh_keys',
148 'testing_rsa')
150 BUILD_RESULT_SUCCEED = 0
151 BUILD_RESULT_FAIL = 1
152 BUILD_RESULT_SKIPPED = 2
155 def _AddAdditionalDepotInfo(depot_info):
156 """Adds additional depot info to the global depot variables."""
157 global DEPOT_DEPS_NAME
158 global DEPOT_NAMES
159 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
160 depot_info.items())
161 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
164 def CalculateTruncatedMean(data_set, truncate_percent):
165 """Calculates the truncated mean of a set of values.
167 Args:
168 data_set: Set of values to use in calculation.
169 truncate_percent: The % from the upper/lower portions of the data set to
170 discard, expressed as a value in [0, 1].
172 Returns:
173 The truncated mean as a float.
175 if len(data_set) > 2:
176 data_set = sorted(data_set)
178 discard_num_float = len(data_set) * truncate_percent
179 discard_num_int = int(math.floor(discard_num_float))
180 kept_weight = len(data_set) - discard_num_float * 2
182 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
184 weight_left = 1.0 - (discard_num_float - discard_num_int)
186 if weight_left < 1:
187 # If the % to discard leaves a fractional portion, need to weight those
188 # values.
189 unweighted_vals = data_set[1:len(data_set)-1]
190 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
191 weighted_vals = [w * weight_left for w in weighted_vals]
192 data_set = weighted_vals + unweighted_vals
193 else:
194 kept_weight = len(data_set)
196 truncated_mean = reduce(lambda x, y: float(x) + float(y),
197 data_set) / kept_weight
199 return truncated_mean
202 def CalculateStandardDeviation(v):
203 if len(v) == 1:
204 return 0.0
206 mean = CalculateTruncatedMean(v, 0.0)
207 variances = [float(x) - mean for x in v]
208 variances = [x * x for x in variances]
209 variance = reduce(lambda x, y: float(x) + float(y), variances) / (len(v) - 1)
210 std_dev = math.sqrt(variance)
212 return std_dev
215 def CalculatePooledStandardError(work_sets):
216 numerator = 0.0
217 denominator1 = 0.0
218 denominator2 = 0.0
220 for current_set in work_sets:
221 std_dev = CalculateStandardDeviation(current_set)
222 numerator += (len(current_set) - 1) * std_dev ** 2
223 denominator1 += len(current_set) - 1
224 denominator2 += 1.0 / len(current_set)
226 if denominator1:
227 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
228 return 0.0
231 def CalculateStandardError(v):
232 if len(v) <= 1:
233 return 0.0
235 std_dev = CalculateStandardDeviation(v)
237 return std_dev / math.sqrt(len(v))
240 def IsStringFloat(string_to_check):
241 """Checks whether or not the given string can be converted to a floating
242 point number.
244 Args:
245 string_to_check: Input string to check if it can be converted to a float.
247 Returns:
248 True if the string can be converted to a float.
250 try:
251 float(string_to_check)
253 return True
254 except ValueError:
255 return False
258 def IsStringInt(string_to_check):
259 """Checks whether or not the given string can be converted to a integer.
261 Args:
262 string_to_check: Input string to check if it can be converted to an int.
264 Returns:
265 True if the string can be converted to an int.
267 try:
268 int(string_to_check)
270 return True
271 except ValueError:
272 return False
275 def IsWindows():
276 """Checks whether or not the script is running on Windows.
278 Returns:
279 True if running on Windows.
281 return sys.platform == 'cygwin' or sys.platform.startswith('win')
284 def Is64BitWindows():
285 """Returns whether or not Windows is a 64-bit version.
287 Returns:
288 True if Windows is 64-bit, False if 32-bit.
290 platform = os.environ['PROCESSOR_ARCHITECTURE']
291 try:
292 platform = os.environ['PROCESSOR_ARCHITEW6432']
293 except KeyError:
294 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
295 pass
297 return platform in ['AMD64', 'I64']
300 def IsLinux():
301 """Checks whether or not the script is running on Linux.
303 Returns:
304 True if running on Linux.
306 return sys.platform.startswith('linux')
309 def IsMac():
310 """Checks whether or not the script is running on Mac.
312 Returns:
313 True if running on Mac.
315 return sys.platform.startswith('darwin')
318 def GetZipFileName(build_revision=None, target_arch='ia32'):
319 """Gets the archive file name for the given revision."""
320 def PlatformName():
321 """Return a string to be used in paths for the platform."""
322 if IsWindows():
323 # Build archive for x64 is still stored with 'win32'suffix
324 # (chromium_utils.PlatformName()).
325 if Is64BitWindows() and target_arch == 'x64':
326 return 'win32'
327 return 'win32'
328 if IsLinux():
329 return 'linux'
330 if IsMac():
331 return 'mac'
332 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
334 base_name = 'full-build-%s' % PlatformName()
335 if not build_revision:
336 return base_name
337 return '%s_%s.zip' % (base_name, build_revision)
340 def GetRemoteBuildPath(build_revision, target_arch='ia32'):
341 """Compute the url to download the build from."""
342 def GetGSRootFolderName():
343 """Gets Google Cloud Storage root folder names"""
344 if IsWindows():
345 if Is64BitWindows() and target_arch == 'x64':
346 return 'Win x64 Builder'
347 return 'Win Builder'
348 if IsLinux():
349 return 'Linux Builder'
350 if IsMac():
351 return 'Mac Builder'
352 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
354 base_filename = GetZipFileName(build_revision, target_arch)
355 builder_folder = GetGSRootFolderName()
356 return '%s/%s' % (builder_folder, base_filename)
359 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
360 """Fetches file(s) from the Google Cloud Storage.
362 Args:
363 bucket_name: Google Storage bucket name.
364 source_path: Source file path.
365 destination_path: Destination file path.
367 Returns:
368 True if the fetching succeeds, otherwise False.
370 target_file = os.path.join(destination_path, os.path.basename(source_path))
371 try:
372 if cloud_storage.Exists(bucket_name, source_path):
373 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
374 cloud_storage.Get(bucket_name, source_path, destination_path)
375 if os.path.exists(target_file):
376 return True
377 else:
378 print ('File gs://%s/%s not found in cloud storage.' % (
379 bucket_name, source_path))
380 except e:
381 print 'Something went wrong while fetching file from cloud: %s' % e
382 if os.path.exists(target_file):
383 os.remove(target_file)
384 return False
387 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
388 def MaybeMakeDirectory(*path):
389 """Creates an entire path, if it doesn't already exist."""
390 file_path = os.path.join(*path)
391 try:
392 os.makedirs(file_path)
393 except OSError, e:
394 if e.errno != errno.EEXIST:
395 return False
396 return True
399 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
400 def ExtractZip(filename, output_dir, verbose=True):
401 """ Extract the zip archive in the output directory."""
402 MaybeMakeDirectory(output_dir)
404 # On Linux and Mac, we use the unzip command as it will
405 # handle links and file bits (executable), which is much
406 # easier then trying to do that with ZipInfo options.
408 # On Windows, try to use 7z if it is installed, otherwise fall back to python
409 # zip module and pray we don't have files larger than 512MB to unzip.
410 unzip_cmd = None
411 if IsMac() or IsLinux():
412 unzip_cmd = ['unzip', '-o']
413 elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
414 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
416 if unzip_cmd:
417 # Make sure path is absolute before changing directories.
418 filepath = os.path.abspath(filename)
419 saved_dir = os.getcwd()
420 os.chdir(output_dir)
421 command = unzip_cmd + [filepath]
422 result = RunProcess(command)
423 os.chdir(saved_dir)
424 if result:
425 raise IOError('unzip failed: %s => %s' % (str(command), result))
426 else:
427 assert IsWindows()
428 zf = zipfile.ZipFile(filename)
429 for name in zf.namelist():
430 if verbose:
431 print 'Extracting %s' % name
432 zf.extract(name, output_dir)
435 def RunProcess(command):
436 """Run an arbitrary command. If output from the call is needed, use
437 RunProcessAndRetrieveOutput instead.
439 Args:
440 command: A list containing the command and args to execute.
442 Returns:
443 The return code of the call.
445 # On Windows, use shell=True to get PATH interpretation.
446 shell = IsWindows()
447 return subprocess.call(command, shell=shell)
450 def RunProcessAndRetrieveOutput(command, cwd=None):
451 """Run an arbitrary command, returning its output and return code. Since
452 output is collected via communicate(), there will be no output until the
453 call terminates. If you need output while the program runs (ie. so
454 that the buildbot doesn't terminate the script), consider RunProcess().
456 Args:
457 command: A list containing the command and args to execute.
459 Returns:
460 A tuple of the output and return code.
462 # On Windows, use shell=True to get PATH interpretation.
463 shell = IsWindows()
464 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE, cwd=cwd)
466 (output, _) = proc.communicate()
468 return (output, proc.returncode)
471 def RunGit(command, cwd=None):
472 """Run a git subcommand, returning its output and return code.
474 Args:
475 command: A list containing the args to git.
477 Returns:
478 A tuple of the output and return code.
480 command = ['git'] + command
482 return RunProcessAndRetrieveOutput(command, cwd=cwd)
485 def CheckRunGit(command, cwd=None):
486 """Run a git subcommand, returning its output and return code. Asserts if
487 the return code of the call is non-zero.
489 Args:
490 command: A list containing the args to git.
492 Returns:
493 A tuple of the output and return code.
495 (output, return_code) = RunGit(command, cwd=cwd)
497 assert not return_code, 'An error occurred while running'\
498 ' "git %s"' % ' '.join(command)
499 return output
502 def SetBuildSystemDefault(build_system):
503 """Sets up any environment variables needed to build with the specified build
504 system.
506 Args:
507 build_system: A string specifying build system. Currently only 'ninja' or
508 'make' are supported."""
509 if build_system == 'ninja':
510 gyp_var = os.getenv('GYP_GENERATORS')
512 if not gyp_var or not 'ninja' in gyp_var:
513 if gyp_var:
514 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
515 else:
516 os.environ['GYP_GENERATORS'] = 'ninja'
518 if IsWindows():
519 os.environ['GYP_DEFINES'] = 'component=shared_library '\
520 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
521 'chromium_win_pch=0'
522 elif build_system == 'make':
523 os.environ['GYP_GENERATORS'] = 'make'
524 else:
525 raise RuntimeError('%s build not supported.' % build_system)
528 def BuildWithMake(threads, targets):
529 cmd = ['make', 'BUILDTYPE=Release']
531 if threads:
532 cmd.append('-j%d' % threads)
534 cmd += targets
536 return_code = RunProcess(cmd)
538 return not return_code
541 def BuildWithNinja(threads, targets):
542 cmd = ['ninja', '-C', os.path.join('out', 'Release')]
544 if threads:
545 cmd.append('-j%d' % threads)
547 cmd += targets
549 return_code = RunProcess(cmd)
551 return not return_code
554 def BuildWithVisualStudio(targets):
555 path_to_devenv = os.path.abspath(
556 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
557 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
558 cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
560 for t in targets:
561 cmd.extend(['/Project', t])
563 return_code = RunProcess(cmd)
565 return not return_code
568 class Builder(object):
569 """Builder is used by the bisect script to build relevant targets and deploy.
571 def __init__(self, opts):
572 """Performs setup for building with target build system.
574 Args:
575 opts: Options parsed from command line.
577 if IsWindows():
578 if not opts.build_preference:
579 opts.build_preference = 'msvs'
581 if opts.build_preference == 'msvs':
582 if not os.getenv('VS100COMNTOOLS'):
583 raise RuntimeError(
584 'Path to visual studio could not be determined.')
585 else:
586 SetBuildSystemDefault(opts.build_preference)
587 else:
588 if not opts.build_preference:
589 if 'ninja' in os.getenv('GYP_GENERATORS'):
590 opts.build_preference = 'ninja'
591 else:
592 opts.build_preference = 'make'
594 SetBuildSystemDefault(opts.build_preference)
596 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
597 raise RuntimeError('Failed to set platform environment.')
599 bisect_utils.RunGClient(['runhooks'])
601 @staticmethod
602 def FromOpts(opts):
603 builder = None
604 if opts.target_platform == 'cros':
605 builder = CrosBuilder(opts)
606 elif opts.target_platform == 'android':
607 builder = AndroidBuilder(opts)
608 elif opts.target_platform == 'android-chrome':
609 builder = AndroidChromeBuilder(opts)
610 else:
611 builder = DesktopBuilder(opts)
612 return builder
614 def Build(self, depot, opts):
615 raise NotImplementedError()
617 def GetBuildOutputDirectory(self, opts, src_dir=None):
618 raise NotImplementedError()
621 class DesktopBuilder(Builder):
622 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
623 def __init__(self, opts):
624 super(DesktopBuilder, self).__init__(opts)
626 def Build(self, depot, opts):
627 """Builds chromium_builder_perf target using options passed into
628 the script.
630 Args:
631 depot: Current depot being bisected.
632 opts: The options parsed from the command line.
634 Returns:
635 True if build was successful.
637 targets = ['chromium_builder_perf']
639 threads = None
640 if opts.use_goma:
641 threads = 64
643 build_success = False
644 if opts.build_preference == 'make':
645 build_success = BuildWithMake(threads, targets)
646 elif opts.build_preference == 'ninja':
647 build_success = BuildWithNinja(threads, targets)
648 elif opts.build_preference == 'msvs':
649 assert IsWindows(), 'msvs is only supported on Windows.'
650 build_success = BuildWithVisualStudio(targets)
651 else:
652 assert False, 'No build system defined.'
653 return build_success
655 def GetBuildOutputDirectory(self, opts, src_dir=None):
656 """Returns the path to the build directory, relative to the checkout root.
658 Assumes that the current working directory is the checkout root.
660 src_dir = src_dir or 'src'
661 if opts.build_preference == 'ninja' or IsLinux():
662 return os.path.join(src_dir, 'out')
663 if IsMac():
664 return os.path.join(src_dir, 'xcodebuild')
665 if IsWindows():
666 return os.path.join(src_dir, 'build')
667 raise NotImplementedError('Unexpected platform %s' % sys.platform)
670 class AndroidBuilder(Builder):
671 """AndroidBuilder is used to build on android."""
672 def __init__(self, opts):
673 super(AndroidBuilder, self).__init__(opts)
675 def _GetTargets(self):
676 return ['chrome_shell', 'cc_perftests_apk', 'android_tools']
678 def Build(self, depot, opts):
679 """Builds the android content shell and other necessary tools using options
680 passed into the script.
682 Args:
683 depot: Current depot being bisected.
684 opts: The options parsed from the command line.
686 Returns:
687 True if build was successful.
689 threads = None
690 if opts.use_goma:
691 threads = 64
693 build_success = False
694 if opts.build_preference == 'ninja':
695 build_success = BuildWithNinja(threads, self._GetTargets())
696 else:
697 assert False, 'No build system defined.'
699 return build_success
702 class AndroidChromeBuilder(AndroidBuilder):
703 """AndroidBuilder is used to build on android's chrome."""
704 def __init__(self, opts):
705 super(AndroidChromeBuilder, self).__init__(opts)
707 def _GetTargets(self):
708 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
711 class CrosBuilder(Builder):
712 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
713 target platform."""
714 def __init__(self, opts):
715 super(CrosBuilder, self).__init__(opts)
717 def ImageToTarget(self, opts):
718 """Installs latest image to target specified by opts.cros_remote_ip.
720 Args:
721 opts: Program options containing cros_board and cros_remote_ip.
723 Returns:
724 True if successful.
726 try:
727 # Keys will most likely be set to 0640 after wiping the chroot.
728 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
729 os.chmod(CROS_TEST_KEY_PATH, 0600)
730 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
731 '--remote=%s' % opts.cros_remote_ip,
732 '--board=%s' % opts.cros_board, '--test', '--verbose']
734 return_code = RunProcess(cmd)
735 return not return_code
736 except OSError, e:
737 return False
739 def BuildPackages(self, opts, depot):
740 """Builds packages for cros.
742 Args:
743 opts: Program options containing cros_board.
744 depot: The depot being bisected.
746 Returns:
747 True if successful.
749 cmd = [CROS_SDK_PATH]
751 if depot != 'cros':
752 path_to_chrome = os.path.join(os.getcwd(), '..')
753 cmd += ['--chrome_root=%s' % path_to_chrome]
755 cmd += ['--']
757 if depot != 'cros':
758 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
760 cmd += ['BUILDTYPE=Release', './build_packages',
761 '--board=%s' % opts.cros_board]
762 return_code = RunProcess(cmd)
764 return not return_code
766 def BuildImage(self, opts, depot):
767 """Builds test image for cros.
769 Args:
770 opts: Program options containing cros_board.
771 depot: The depot being bisected.
773 Returns:
774 True if successful.
776 cmd = [CROS_SDK_PATH]
778 if depot != 'cros':
779 path_to_chrome = os.path.join(os.getcwd(), '..')
780 cmd += ['--chrome_root=%s' % path_to_chrome]
782 cmd += ['--']
784 if depot != 'cros':
785 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
787 cmd += ['BUILDTYPE=Release', '--', './build_image',
788 '--board=%s' % opts.cros_board, 'test']
790 return_code = RunProcess(cmd)
792 return not return_code
794 def Build(self, depot, opts):
795 """Builds targets using options passed into the script.
797 Args:
798 depot: Current depot being bisected.
799 opts: The options parsed from the command line.
801 Returns:
802 True if build was successful.
804 if self.BuildPackages(opts, depot):
805 if self.BuildImage(opts, depot):
806 return self.ImageToTarget(opts)
807 return False
810 class SourceControl(object):
811 """SourceControl is an abstraction over the underlying source control
812 system used for chromium. For now only git is supported, but in the
813 future, the svn workflow could be added as well."""
814 def __init__(self):
815 super(SourceControl, self).__init__()
817 def SyncToRevisionWithGClient(self, revision):
818 """Uses gclient to sync to the specified revision.
820 ie. gclient sync --revision <revision>
822 Args:
823 revision: The git SHA1 or svn CL (depending on workflow).
825 Returns:
826 The return code of the call.
828 return bisect_utils.RunGClient(['sync', '--revision',
829 revision, '--verbose', '--nohooks', '--reset', '--force'])
831 def SyncToRevisionWithRepo(self, timestamp):
832 """Uses repo to sync all the underlying git depots to the specified
833 time.
835 Args:
836 timestamp: The unix timestamp to sync to.
838 Returns:
839 The return code of the call.
841 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
844 class GitSourceControl(SourceControl):
845 """GitSourceControl is used to query the underlying source control. """
846 def __init__(self, opts):
847 super(GitSourceControl, self).__init__()
848 self.opts = opts
850 def IsGit(self):
851 return True
853 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
854 """Retrieves a list of revisions between |revision_range_start| and
855 |revision_range_end|.
857 Args:
858 revision_range_end: The SHA1 for the end of the range.
859 revision_range_start: The SHA1 for the beginning of the range.
861 Returns:
862 A list of the revisions between |revision_range_start| and
863 |revision_range_end| (inclusive).
865 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
866 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
867 log_output = CheckRunGit(cmd, cwd=cwd)
869 revision_hash_list = log_output.split()
870 revision_hash_list.append(revision_range_start)
872 return revision_hash_list
874 def SyncToRevision(self, revision, sync_client=None):
875 """Syncs to the specified revision.
877 Args:
878 revision: The revision to sync to.
879 use_gclient: Specifies whether or not we should sync using gclient or
880 just use source control directly.
882 Returns:
883 True if successful.
886 if not sync_client:
887 results = RunGit(['checkout', revision])[1]
888 elif sync_client == 'gclient':
889 results = self.SyncToRevisionWithGClient(revision)
890 elif sync_client == 'repo':
891 results = self.SyncToRevisionWithRepo(revision)
893 return not results
895 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
896 """If an SVN revision is supplied, try to resolve it to a git SHA1.
898 Args:
899 revision_to_check: The user supplied revision string that may need to be
900 resolved to a git SHA1.
901 depot: The depot the revision_to_check is from.
902 search: The number of changelists to try if the first fails to resolve
903 to a git hash. If the value is negative, the function will search
904 backwards chronologically, otherwise it will search forward.
906 Returns:
907 A string containing a git SHA1 hash, otherwise None.
909 # Android-chrome is git only, so no need to resolve this to anything else.
910 if depot == 'android-chrome':
911 return revision_to_check
913 if depot != 'cros':
914 if not IsStringInt(revision_to_check):
915 return revision_to_check
917 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
919 if depot != 'chromium':
920 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
922 svn_revision = int(revision_to_check)
923 git_revision = None
925 if search > 0:
926 search_range = xrange(svn_revision, svn_revision + search, 1)
927 else:
928 search_range = xrange(svn_revision, svn_revision + search, -1)
930 for i in search_range:
931 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
932 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
933 'origin/master']
935 (log_output, return_code) = RunGit(cmd, cwd=cwd)
937 assert not return_code, 'An error occurred while running'\
938 ' "git %s"' % ' '.join(cmd)
940 if not return_code:
941 log_output = log_output.strip()
943 if log_output:
944 git_revision = log_output
946 break
948 return git_revision
949 else:
950 if IsStringInt(revision_to_check):
951 return int(revision_to_check)
952 else:
953 cwd = os.getcwd()
954 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
955 'chromiumos-overlay'))
956 pattern = CROS_VERSION_PATTERN % revision_to_check
957 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
959 git_revision = None
961 log_output = CheckRunGit(cmd, cwd=cwd)
962 if log_output:
963 git_revision = log_output
964 git_revision = int(log_output.strip())
965 os.chdir(cwd)
967 return git_revision
969 def IsInProperBranch(self):
970 """Confirms they're in the master branch for performing the bisection.
971 This is needed or gclient will fail to sync properly.
973 Returns:
974 True if the current branch on src is 'master'
976 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
977 log_output = CheckRunGit(cmd)
978 log_output = log_output.strip()
980 return log_output == "master"
982 def SVNFindRev(self, revision):
983 """Maps directly to the 'git svn find-rev' command.
985 Args:
986 revision: The git SHA1 to use.
988 Returns:
989 An integer changelist #, otherwise None.
992 cmd = ['svn', 'find-rev', revision]
994 output = CheckRunGit(cmd)
995 svn_revision = output.strip()
997 if IsStringInt(svn_revision):
998 return int(svn_revision)
1000 return None
1002 def QueryRevisionInfo(self, revision, cwd=None):
1003 """Gathers information on a particular revision, such as author's name,
1004 email, subject, and date.
1006 Args:
1007 revision: Revision you want to gather information on.
1008 Returns:
1009 A dict in the following format:
1011 'author': %s,
1012 'email': %s,
1013 'date': %s,
1014 'subject': %s,
1015 'body': %s,
1018 commit_info = {}
1020 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1021 targets = ['author', 'email', 'subject', 'date', 'body']
1023 for i in xrange(len(formats)):
1024 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1025 output = CheckRunGit(cmd, cwd=cwd)
1026 commit_info[targets[i]] = output.rstrip()
1028 return commit_info
1030 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1031 """Performs a checkout on a file at the given revision.
1033 Returns:
1034 True if successful.
1036 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1038 def RevertFileToHead(self, file_name):
1039 """Unstages a file and returns it to HEAD.
1041 Returns:
1042 True if successful.
1044 # Reset doesn't seem to return 0 on success.
1045 RunGit(['reset', 'HEAD', bisect_utils.FILE_DEPS_GIT])
1047 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1049 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1050 """Returns a list of commits that modified this file.
1052 Args:
1053 filename: Name of file.
1054 revision_start: Start of revision range.
1055 revision_end: End of revision range.
1057 Returns:
1058 Returns a list of commits that touched this file.
1060 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1061 filename]
1062 output = CheckRunGit(cmd)
1064 return [o for o in output.split('\n') if o]
1066 class BisectPerformanceMetrics(object):
1067 """BisectPerformanceMetrics performs a bisection against a list of range
1068 of revisions to narrow down where performance regressions may have
1069 occurred."""
1071 def __init__(self, source_control, opts):
1072 super(BisectPerformanceMetrics, self).__init__()
1074 self.opts = opts
1075 self.source_control = source_control
1076 self.src_cwd = os.getcwd()
1077 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1078 self.depot_cwd = {}
1079 self.cleanup_commands = []
1080 self.warnings = []
1081 self.builder = Builder.FromOpts(opts)
1083 # This always starts true since the script grabs latest first.
1084 self.was_blink = True
1086 for d in DEPOT_NAMES:
1087 # The working directory of each depot is just the path to the depot, but
1088 # since we're already in 'src', we can skip that part.
1090 self.depot_cwd[d] = os.path.join(
1091 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1093 def PerformCleanup(self):
1094 """Performs cleanup when script is finished."""
1095 os.chdir(self.src_cwd)
1096 for c in self.cleanup_commands:
1097 if c[0] == 'mv':
1098 shutil.move(c[1], c[2])
1099 else:
1100 assert False, 'Invalid cleanup command.'
1102 def GetRevisionList(self, depot, bad_revision, good_revision):
1103 """Retrieves a list of all the commits between the bad revision and
1104 last known good revision."""
1106 revision_work_list = []
1108 if depot == 'cros':
1109 revision_range_start = good_revision
1110 revision_range_end = bad_revision
1112 cwd = os.getcwd()
1113 self.ChangeToDepotWorkingDirectory('cros')
1115 # Print the commit timestamps for every commit in the revision time
1116 # range. We'll sort them and bisect by that. There is a remote chance that
1117 # 2 (or more) commits will share the exact same timestamp, but it's
1118 # probably safe to ignore that case.
1119 cmd = ['repo', 'forall', '-c',
1120 'git log --format=%%ct --before=%d --after=%d' % (
1121 revision_range_end, revision_range_start)]
1122 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1124 assert not return_code, 'An error occurred while running'\
1125 ' "%s"' % ' '.join(cmd)
1127 os.chdir(cwd)
1129 revision_work_list = list(set(
1130 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1131 revision_work_list = sorted(revision_work_list, reverse=True)
1132 else:
1133 cwd = self._GetDepotDirectory(depot)
1134 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1135 good_revision, cwd=cwd)
1137 return revision_work_list
1139 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1140 svn_revision = self.source_control.SVNFindRev(revision)
1142 if IsStringInt(svn_revision):
1143 # V8 is tricky to bisect, in that there are only a few instances when
1144 # we can dive into bleeding_edge and get back a meaningful result.
1145 # Try to detect a V8 "business as usual" case, which is when:
1146 # 1. trunk revision N has description "Version X.Y.Z"
1147 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1148 # trunk. Now working on X.Y.(Z+1)."
1150 # As of 01/24/2014, V8 trunk descriptions are formatted:
1151 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1152 # So we can just try parsing that out first and fall back to the old way.
1153 v8_dir = self._GetDepotDirectory('v8')
1154 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1156 revision_info = self.source_control.QueryRevisionInfo(revision,
1157 cwd=v8_dir)
1159 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1161 regex_results = version_re.search(revision_info['subject'])
1163 if regex_results:
1164 git_revision = None
1166 # Look for "based on bleeding_edge" and parse out revision
1167 if 'based on bleeding_edge' in revision_info['subject']:
1168 try:
1169 bleeding_edge_revision = revision_info['subject'].split(
1170 'bleeding_edge revision r')[1]
1171 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1172 git_revision = self.source_control.ResolveToRevision(
1173 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1174 cwd=v8_bleeding_edge_dir)
1175 except IndexError, ValueError:
1176 pass
1178 if not git_revision:
1179 # Wasn't successful, try the old way of looking for "Prepare push to"
1180 git_revision = self.source_control.ResolveToRevision(
1181 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1182 cwd=v8_bleeding_edge_dir)
1184 if git_revision:
1185 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1186 cwd=v8_bleeding_edge_dir)
1188 if 'Prepare push to trunk' in revision_info['subject']:
1189 return git_revision
1190 return None
1192 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1193 cwd = self._GetDepotDirectory('v8')
1194 cmd = ['log', '--format=%ct', '-1', revision]
1195 output = CheckRunGit(cmd, cwd=cwd)
1196 commit_time = int(output)
1197 commits = []
1199 if search_forward:
1200 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1201 'origin/master']
1202 output = CheckRunGit(cmd, cwd=cwd)
1203 output = output.split()
1204 commits = output
1205 commits = reversed(commits)
1206 else:
1207 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1208 'origin/master']
1209 output = CheckRunGit(cmd, cwd=cwd)
1210 output = output.split()
1211 commits = output
1213 bleeding_edge_revision = None
1215 for c in commits:
1216 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1217 if bleeding_edge_revision:
1218 break
1220 return bleeding_edge_revision
1222 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1223 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1225 Returns:
1226 A dict in the format {depot:revision} if successful, otherwise None.
1229 cwd = os.getcwd()
1230 self.ChangeToDepotWorkingDirectory(depot)
1232 results = {}
1234 if depot == 'chromium' or depot == 'android-chrome':
1235 locals = {'Var': lambda _: locals["vars"][_],
1236 'From': lambda *args: None}
1237 execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
1239 os.chdir(cwd)
1241 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1243 for d in DEPOT_NAMES:
1244 if DEPOT_DEPS_NAME[d].has_key('platform'):
1245 if DEPOT_DEPS_NAME[d]['platform'] != os.name:
1246 continue
1248 if (DEPOT_DEPS_NAME[d]['recurse'] and
1249 depot in DEPOT_DEPS_NAME[d]['from']):
1250 if (locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']) or
1251 locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old'])):
1252 if locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']):
1253 re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src']])
1254 self.depot_cwd[d] =\
1255 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1256 elif locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old']):
1257 re_results =\
1258 rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src_old']])
1259 self.depot_cwd[d] =\
1260 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src_old'][4:])
1262 if re_results:
1263 results[d] = re_results.group('revision')
1264 else:
1265 print 'Couldn\'t parse revision for %s.' % d
1266 print
1267 return None
1268 else:
1269 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1270 print
1271 return None
1272 elif depot == 'cros':
1273 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1274 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1275 CROS_CHROMEOS_PATTERN]
1276 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1278 assert not return_code, 'An error occurred while running'\
1279 ' "%s"' % ' '.join(cmd)
1281 if len(output) > CROS_CHROMEOS_PATTERN:
1282 output = output[len(CROS_CHROMEOS_PATTERN):]
1284 if len(output) > 1:
1285 output = output.split('_')[0]
1287 if len(output) > 3:
1288 contents = output.split('.')
1290 version = contents[2]
1292 if contents[3] != '0':
1293 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' %\
1294 (version, contents[3], version)
1295 if not warningText in self.warnings:
1296 self.warnings.append(warningText)
1298 cwd = os.getcwd()
1299 self.ChangeToDepotWorkingDirectory('chromium')
1300 return_code = CheckRunGit(['log', '-1', '--format=%H',
1301 '--author=chrome-release@google.com', '--grep=to %s' % version,
1302 'origin/master'])
1303 os.chdir(cwd)
1305 results['chromium'] = output.strip()
1306 elif depot == 'v8':
1307 # We can't try to map the trunk revision to bleeding edge yet, because
1308 # we don't know which direction to try to search in. Have to wait until
1309 # the bisect has narrowed the results down to 2 v8 rolls.
1310 results['v8_bleeding_edge'] = None
1312 return results
1314 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1315 """Backs up or restores build output directory based on restore argument.
1317 Args:
1318 restore: Indicates whether to restore or backup. Default is False(Backup)
1319 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1321 Returns:
1322 Path to backup or restored location as string. otherwise None if it fails.
1324 build_dir = os.path.abspath(
1325 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1326 source_dir = os.path.join(build_dir, build_type)
1327 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1328 if restore:
1329 source_dir, destination_dir = destination_dir, source_dir
1330 if os.path.exists(source_dir):
1331 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1332 shutil.move(source_dir, destination_dir)
1333 return destination_dir
1334 return None
1336 def DownloadCurrentBuild(self, revision, build_type='Release'):
1337 """Download the build archive for the given revision.
1339 Args:
1340 revision: The SVN revision to build.
1341 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1343 Returns:
1344 True if download succeeds, otherwise False.
1346 abs_build_dir = os.path.abspath(
1347 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1348 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1349 # Get build target architecture.
1350 build_arch = self.opts.target_arch
1351 # File path of the downloaded archive file.
1352 archive_file_dest = os.path.join(abs_build_dir,
1353 GetZipFileName(revision, build_arch))
1354 remote_build = GetRemoteBuildPath(revision, build_arch)
1355 fetch_build_func = lambda: FetchFromCloudStorage(self.opts.gs_bucket,
1356 remote_build,
1357 abs_build_dir)
1358 if not fetch_build_func():
1359 if not self.PostBuildRequestAndWait(revision, condition=fetch_build_func):
1360 raise RuntimeError('Somewthing went wrong while processing build'
1361 'request for: %s' % revision)
1363 # Generic name for the archive, created when archive file is extracted.
1364 output_dir = os.path.join(abs_build_dir,
1365 GetZipFileName(target_arch=build_arch))
1366 # Unzip build archive directory.
1367 try:
1368 RmTreeAndMkDir(output_dir, skip_makedir=True)
1369 ExtractZip(archive_file_dest, abs_build_dir)
1370 if os.path.exists(output_dir):
1371 self.BackupOrRestoreOutputdirectory(restore=False)
1372 print 'Moving build from %s to %s' % (
1373 output_dir, target_build_output_dir)
1374 shutil.move(output_dir, target_build_output_dir)
1375 return True
1376 raise IOError('Missing extracted folder %s ' % output_dir)
1377 except e:
1378 print 'Somewthing went wrong while extracting archive file: %s' % e
1379 self.BackupOrRestoreOutputdirectory(restore=True)
1380 # Cleanup any leftovers from unzipping.
1381 if os.path.exists(output_dir):
1382 RmTreeAndMkDir(output_dir, skip_makedir=True)
1383 finally:
1384 # Delete downloaded archive
1385 if os.path.exists(archive_file_dest):
1386 os.remove(archive_file_dest)
1387 return False
1389 def PostBuildRequestAndWait(self, revision, condition, patch=None):
1390 """POSTs the build request job to the tryserver instance."""
1392 def GetBuilderNameAndBuildTime(target_arch='ia32'):
1393 """Gets builder name and buildtime in seconds based on platform."""
1394 if IsWindows():
1395 if Is64BitWindows() and target_arch == 'x64':
1396 return ('Win x64 Bisect Builder', 3600)
1397 return ('Win Bisect Builder', 3600)
1398 if IsLinux():
1399 return ('Linux Bisect Builder', 1800)
1400 if IsMac():
1401 return ('Mac Bisect Builder', 2700)
1402 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1403 if not condition:
1404 return False
1406 bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1408 # Creates a try job description.
1409 job_args = {'host': self.opts.builder_host,
1410 'port': self.opts.builder_port,
1411 'revision': revision,
1412 'bot': bot_name,
1413 'name': 'Bisect Job-%s' % revision
1415 # Update patch information if supplied.
1416 if patch:
1417 job_args['patch'] = patch
1418 # Posts job to build the revision on the server.
1419 if post_perf_builder_job.PostTryJob(job_args):
1420 poll_interval = 60
1421 start_time = time.time()
1422 while True:
1423 res = condition()
1424 if res:
1425 return res
1426 elapsed_time = time.time() - start_time
1427 if elapsed_time > build_timeout:
1428 raise RuntimeError('Timed out while waiting %ds for %s build.' %
1429 (build_timeout, revision))
1430 print ('Time elapsed: %ss, still waiting for %s build' %
1431 (elapsed_time, revision))
1432 time.sleep(poll_interval)
1433 return False
1435 def BuildCurrentRevision(self, depot, revision=None):
1436 """Builds chrome and performance_ui_tests on the current revision.
1438 Returns:
1439 True if the build was successful.
1441 if self.opts.debug_ignore_build:
1442 return True
1443 cwd = os.getcwd()
1444 os.chdir(self.src_cwd)
1445 # Fetch build archive for the given revision from the cloud storage when
1446 # the storage bucket is passed.
1447 if depot == 'chromium' and self.opts.gs_bucket and revision:
1448 # Get SVN revision for the given SHA, since builds are archived using SVN
1449 # revision.
1450 revision = self.source_control.SVNFindRev(revision)
1451 if not revision:
1452 raise RuntimeError(
1453 'Failed to determine SVN revision for %s' % sha_revision)
1454 if self.DownloadCurrentBuild(revision):
1455 os.chdir(cwd)
1456 return True
1457 raise RuntimeError('Failed to download build archive for revision %s.\n'
1458 'Unfortunately, bisection couldn\'t continue any '
1459 'further. Please try running script without '
1460 '--gs_bucket flag to produce local builds.' % revision)
1463 build_success = self.builder.Build(depot, self.opts)
1464 os.chdir(cwd)
1465 return build_success
1467 def RunGClientHooks(self):
1468 """Runs gclient with runhooks command.
1470 Returns:
1471 True if gclient reports no errors.
1474 if self.opts.debug_ignore_build:
1475 return True
1477 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1479 def TryParseHistogramValuesFromOutput(self, metric, text):
1480 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1482 Args:
1483 metric: The metric as a list of [<trace>, <value>] strings.
1484 text: The text to parse the metric values from.
1486 Returns:
1487 A list of floating point numbers found.
1489 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1491 text_lines = text.split('\n')
1492 values_list = []
1494 for current_line in text_lines:
1495 if metric_formatted in current_line:
1496 current_line = current_line[len(metric_formatted):]
1498 try:
1499 histogram_values = eval(current_line)
1501 for b in histogram_values['buckets']:
1502 average_for_bucket = float(b['high'] + b['low']) * 0.5
1503 # Extends the list with N-elements with the average for that bucket.
1504 values_list.extend([average_for_bucket] * b['count'])
1505 except:
1506 pass
1508 return values_list
1510 def TryParseResultValuesFromOutput(self, metric, text):
1511 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1513 Args:
1514 metric: The metric as a list of [<trace>, <value>] strings.
1515 text: The text to parse the metric values from.
1517 Returns:
1518 A list of floating point numbers found.
1520 # Format is: RESULT <graph>: <trace>= <value> <units>
1521 metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1523 text_lines = text.split('\n')
1524 values_list = []
1526 for current_line in text_lines:
1527 # Parse the output from the performance test for the metric we're
1528 # interested in.
1529 metric_re = metric_formatted +\
1530 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1531 metric_re = re.compile(metric_re)
1532 regex_results = metric_re.search(current_line)
1534 if not regex_results is None:
1535 values_list += [regex_results.group('values')]
1536 else:
1537 metric_re = metric_formatted +\
1538 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1539 metric_re = re.compile(metric_re)
1540 regex_results = metric_re.search(current_line)
1542 if not regex_results is None:
1543 metric_values = regex_results.group('values')
1545 values_list += metric_values.split(',')
1547 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1549 # If the metric is times/t, we need to sum the timings in order to get
1550 # similar regression results as the try-bots.
1551 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1552 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1554 if metric in metrics_to_sum:
1555 if values_list:
1556 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1558 return values_list
1560 def ParseMetricValuesFromOutput(self, metric, text):
1561 """Parses output from performance_ui_tests and retrieves the results for
1562 a given metric.
1564 Args:
1565 metric: The metric as a list of [<trace>, <value>] strings.
1566 text: The text to parse the metric values from.
1568 Returns:
1569 A list of floating point numbers found.
1571 metric_values = self.TryParseResultValuesFromOutput(metric, text)
1573 if not metric_values:
1574 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
1576 return metric_values
1578 def _GenerateProfileIfNecessary(self, command_args):
1579 """Checks the command line of the performance test for dependencies on
1580 profile generation, and runs tools/perf/generate_profile as necessary.
1582 Args:
1583 command_args: Command line being passed to performance test, as a list.
1585 Returns:
1586 False if profile generation was necessary and failed, otherwise True.
1589 if '--profile-dir' in ' '.join(command_args):
1590 # If we were using python 2.7+, we could just use the argparse
1591 # module's parse_known_args to grab --profile-dir. Since some of the
1592 # bots still run 2.6, have to grab the arguments manually.
1593 arg_dict = {}
1594 args_to_parse = ['--profile-dir', '--browser']
1596 for arg_to_parse in args_to_parse:
1597 for i, current_arg in enumerate(command_args):
1598 if arg_to_parse in current_arg:
1599 current_arg_split = current_arg.split('=')
1601 # Check 2 cases, --arg=<val> and --arg <val>
1602 if len(current_arg_split) == 2:
1603 arg_dict[arg_to_parse] = current_arg_split[1]
1604 elif i + 1 < len(command_args):
1605 arg_dict[arg_to_parse] = command_args[i+1]
1607 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1609 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1610 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1611 return not RunProcess(['python', path_to_generate,
1612 '--profile-type-to-generate', profile_type,
1613 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1614 return False
1615 return True
1617 def RunPerformanceTestAndParseResults(self, command_to_run, metric,
1618 reset_on_first_run=False, upload_on_last_run=False, results_label=None):
1619 """Runs a performance test on the current revision by executing the
1620 'command_to_run' and parses the results.
1622 Args:
1623 command_to_run: The command to be run to execute the performance test.
1624 metric: The metric to parse out from the results of the performance test.
1626 Returns:
1627 On success, it will return a tuple of the average value of the metric,
1628 and a success code of 0.
1631 if self.opts.debug_ignore_perf_test:
1632 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1634 if IsWindows():
1635 command_to_run = command_to_run.replace('/', r'\\')
1637 args = shlex.split(command_to_run)
1639 if not self._GenerateProfileIfNecessary(args):
1640 return ('Failed to generate profile for performance test.', -1)
1642 # If running a telemetry test for cros, insert the remote ip, and
1643 # identity parameters.
1644 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1645 if self.opts.target_platform == 'cros' and is_telemetry:
1646 args.append('--remote=%s' % self.opts.cros_remote_ip)
1647 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1649 cwd = os.getcwd()
1650 os.chdir(self.src_cwd)
1652 start_time = time.time()
1654 metric_values = []
1655 output_of_all_runs = ''
1656 for i in xrange(self.opts.repeat_test_count):
1657 # Can ignore the return code since if the tests fail, it won't return 0.
1658 try:
1659 current_args = copy.copy(args)
1660 if is_telemetry:
1661 if i == 0 and reset_on_first_run:
1662 current_args.append('--reset-results')
1663 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1664 current_args.append('--upload-results')
1665 if results_label:
1666 current_args.append('--results-label=%s' % results_label)
1667 (output, return_code) = RunProcessAndRetrieveOutput(current_args)
1668 except OSError, e:
1669 if e.errno == errno.ENOENT:
1670 err_text = ("Something went wrong running the performance test. "
1671 "Please review the command line:\n\n")
1672 if 'src/' in ' '.join(args):
1673 err_text += ("Check that you haven't accidentally specified a path "
1674 "with src/ in the command.\n\n")
1675 err_text += ' '.join(args)
1676 err_text += '\n'
1678 return (err_text, -1)
1679 raise
1681 output_of_all_runs += output
1682 if self.opts.output_buildbot_annotations:
1683 print output
1685 metric_values += self.ParseMetricValuesFromOutput(metric, output)
1687 elapsed_minutes = (time.time() - start_time) / 60.0
1689 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values:
1690 break
1692 os.chdir(cwd)
1694 # Need to get the average value if there were multiple values.
1695 if metric_values:
1696 truncated_mean = CalculateTruncatedMean(metric_values,
1697 self.opts.truncate_percent)
1698 standard_err = CalculateStandardError(metric_values)
1699 standard_dev = CalculateStandardDeviation(metric_values)
1701 values = {
1702 'mean': truncated_mean,
1703 'std_err': standard_err,
1704 'std_dev': standard_dev,
1705 'values': metric_values,
1708 print 'Results of performance test: %12f %12f' % (
1709 truncated_mean, standard_err)
1710 print
1711 return (values, 0, output_of_all_runs)
1712 else:
1713 return ('Invalid metric specified, or no values returned from '
1714 'performance test.', -1, output_of_all_runs)
1716 def FindAllRevisionsToSync(self, revision, depot):
1717 """Finds all dependant revisions and depots that need to be synced for a
1718 given revision. This is only useful in the git workflow, as an svn depot
1719 may be split into multiple mirrors.
1721 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1722 skia/include. To sync skia/src properly, one has to find the proper
1723 revisions in skia/gyp and skia/include.
1725 Args:
1726 revision: The revision to sync to.
1727 depot: The depot in use at the moment (probably skia).
1729 Returns:
1730 A list of [depot, revision] pairs that need to be synced.
1732 revisions_to_sync = [[depot, revision]]
1734 is_base = ((depot == 'chromium') or (depot == 'cros') or
1735 (depot == 'android-chrome'))
1737 # Some SVN depots were split into multiple git depots, so we need to
1738 # figure out for each mirror which git revision to grab. There's no
1739 # guarantee that the SVN revision will exist for each of the dependant
1740 # depots, so we have to grep the git logs and grab the next earlier one.
1741 if not is_base and\
1742 DEPOT_DEPS_NAME[depot]['depends'] and\
1743 self.source_control.IsGit():
1744 svn_rev = self.source_control.SVNFindRev(revision)
1746 for d in DEPOT_DEPS_NAME[depot]['depends']:
1747 self.ChangeToDepotWorkingDirectory(d)
1749 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
1751 if dependant_rev:
1752 revisions_to_sync.append([d, dependant_rev])
1754 num_resolved = len(revisions_to_sync)
1755 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
1757 self.ChangeToDepotWorkingDirectory(depot)
1759 if not ((num_resolved - 1) == num_needed):
1760 return None
1762 return revisions_to_sync
1764 def PerformPreBuildCleanup(self):
1765 """Performs necessary cleanup between runs."""
1766 print 'Cleaning up between runs.'
1767 print
1769 # Having these pyc files around between runs can confuse the
1770 # perf tests and cause them to crash.
1771 for (path, dir, files) in os.walk(self.src_cwd):
1772 for cur_file in files:
1773 if cur_file.endswith('.pyc'):
1774 path_to_file = os.path.join(path, cur_file)
1775 os.remove(path_to_file)
1777 def PerformWebkitDirectoryCleanup(self, revision):
1778 """If the script is switching between Blink and WebKit during bisect,
1779 its faster to just delete the directory rather than leave it up to git
1780 to sync.
1782 Returns:
1783 True if successful.
1785 if not self.source_control.CheckoutFileAtRevision(
1786 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
1787 return False
1789 cwd = os.getcwd()
1790 os.chdir(self.src_cwd)
1792 is_blink = bisect_utils.IsDepsFileBlink()
1794 os.chdir(cwd)
1796 if not self.source_control.RevertFileToHead(
1797 bisect_utils.FILE_DEPS_GIT):
1798 return False
1800 if self.was_blink != is_blink:
1801 self.was_blink = is_blink
1802 return bisect_utils.RemoveThirdPartyWebkitDirectory()
1803 return True
1805 def PerformCrosChrootCleanup(self):
1806 """Deletes the chroot.
1808 Returns:
1809 True if successful.
1811 cwd = os.getcwd()
1812 self.ChangeToDepotWorkingDirectory('cros')
1813 cmd = [CROS_SDK_PATH, '--delete']
1814 return_code = RunProcess(cmd)
1815 os.chdir(cwd)
1816 return not return_code
1818 def CreateCrosChroot(self):
1819 """Creates a new chroot.
1821 Returns:
1822 True if successful.
1824 cwd = os.getcwd()
1825 self.ChangeToDepotWorkingDirectory('cros')
1826 cmd = [CROS_SDK_PATH, '--create']
1827 return_code = RunProcess(cmd)
1828 os.chdir(cwd)
1829 return not return_code
1831 def PerformPreSyncCleanup(self, revision, depot):
1832 """Performs any necessary cleanup before syncing.
1834 Returns:
1835 True if successful.
1837 if depot == 'chromium':
1838 if not bisect_utils.RemoveThirdPartyLibjingleDirectory():
1839 return False
1840 return self.PerformWebkitDirectoryCleanup(revision)
1841 elif depot == 'cros':
1842 return self.PerformCrosChrootCleanup()
1843 return True
1845 def RunPostSync(self, depot):
1846 """Performs any work after syncing.
1848 Returns:
1849 True if successful.
1851 if self.opts.target_platform == 'android':
1852 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
1853 path_to_src=self.src_cwd):
1854 return False
1856 if depot == 'cros':
1857 return self.CreateCrosChroot()
1858 else:
1859 return self.RunGClientHooks()
1860 return True
1862 def ShouldSkipRevision(self, depot, revision):
1863 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1864 is git based those changes would have no effect.
1866 Args:
1867 depot: The depot being bisected.
1868 revision: Current revision we're synced to.
1870 Returns:
1871 True if we should skip building/testing this revision.
1873 if depot == 'chromium':
1874 if self.source_control.IsGit():
1875 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
1876 output = CheckRunGit(cmd)
1878 files = output.splitlines()
1880 if len(files) == 1 and files[0] == 'DEPS':
1881 return True
1883 return False
1885 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
1886 skippable=False):
1887 """Performs a full sync/build/run of the specified revision.
1889 Args:
1890 revision: The revision to sync to.
1891 depot: The depot that's being used at the moment (src, webkit, etc.)
1892 command_to_run: The command to execute the performance test.
1893 metric: The performance metric being tested.
1895 Returns:
1896 On success, a tuple containing the results of the performance test.
1897 Otherwise, a tuple with the error message.
1899 sync_client = None
1900 if depot == 'chromium' or depot == 'android-chrome':
1901 sync_client = 'gclient'
1902 elif depot == 'cros':
1903 sync_client = 'repo'
1905 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
1907 if not revisions_to_sync:
1908 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
1910 if not self.PerformPreSyncCleanup(revision, depot):
1911 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
1913 success = True
1915 if not self.opts.debug_ignore_sync:
1916 for r in revisions_to_sync:
1917 self.ChangeToDepotWorkingDirectory(r[0])
1919 if sync_client:
1920 self.PerformPreBuildCleanup()
1922 # If you're using gclient to sync, you need to specify the depot you
1923 # want so that all the dependencies sync properly as well.
1924 # ie. gclient sync src@<SHA1>
1925 current_revision = r[1]
1926 if sync_client == 'gclient':
1927 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
1928 current_revision)
1929 if not self.source_control.SyncToRevision(current_revision,
1930 sync_client):
1931 success = False
1933 break
1935 if success:
1936 success = self.RunPostSync(depot)
1937 if success:
1938 if skippable and self.ShouldSkipRevision(depot, revision):
1939 return ('Skipped revision: [%s]' % str(revision),
1940 BUILD_RESULT_SKIPPED)
1942 start_build_time = time.time()
1943 if self.BuildCurrentRevision(depot, revision):
1944 after_build_time = time.time()
1945 results = self.RunPerformanceTestAndParseResults(command_to_run,
1946 metric)
1947 # Restore build output directory once the tests are done, to avoid
1948 # any descrepancy.
1949 if depot == 'chromium' and self.opts.gs_bucket and revision:
1950 self.BackupOrRestoreOutputdirectory(restore=True)
1952 if results[1] == 0:
1953 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
1954 depot, revision)
1956 if not external_revisions is None:
1957 return (results[0], results[1], external_revisions,
1958 time.time() - after_build_time, after_build_time -
1959 start_build_time)
1960 else:
1961 return ('Failed to parse DEPS file for external revisions.',
1962 BUILD_RESULT_FAIL)
1963 else:
1964 return results
1965 else:
1966 return ('Failed to build revision: [%s]' % (str(revision, )),
1967 BUILD_RESULT_FAIL)
1968 else:
1969 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
1970 else:
1971 return ('Failed to sync revision: [%s]' % (str(revision, )),
1972 BUILD_RESULT_FAIL)
1974 def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
1975 """Given known good and bad values, decide if the current_value passed
1976 or failed.
1978 Args:
1979 current_value: The value of the metric being checked.
1980 known_bad_value: The reference value for a "failed" run.
1981 known_good_value: The reference value for a "passed" run.
1983 Returns:
1984 True if the current_value is closer to the known_good_value than the
1985 known_bad_value.
1987 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
1988 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
1990 return dist_to_good_value < dist_to_bad_value
1992 def _GetDepotDirectory(self, depot_name):
1993 if depot_name == 'chromium':
1994 return self.src_cwd
1995 elif depot_name == 'cros':
1996 return self.cros_cwd
1997 elif depot_name in DEPOT_NAMES:
1998 return self.depot_cwd[depot_name]
1999 else:
2000 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2001 ' was added without proper support?' %\
2002 (depot_name,)
2004 def ChangeToDepotWorkingDirectory(self, depot_name):
2005 """Given a depot, changes to the appropriate working directory.
2007 Args:
2008 depot_name: The name of the depot (see DEPOT_NAMES).
2010 os.chdir(self._GetDepotDirectory(depot_name))
2012 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2013 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2014 search_forward=True)
2015 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2016 search_forward=False)
2017 min_revision_data['external']['v8_bleeding_edge'] = r1
2018 max_revision_data['external']['v8_bleeding_edge'] = r2
2020 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2021 min_revision_data['revision']) or
2022 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2023 max_revision_data['revision'])):
2024 self.warnings.append('Trunk revisions in V8 did not map directly to '
2025 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2026 'did map directly to bleeding_edge revisions, but results might not '
2027 'be valid.')
2029 def _FindNextDepotToBisect(self, current_depot, current_revision,
2030 min_revision_data, max_revision_data):
2031 """Given the state of the bisect, decides which depot the script should
2032 dive into next (if any).
2034 Args:
2035 current_depot: Current depot being bisected.
2036 current_revision: Current revision synced to.
2037 min_revision_data: Data about the earliest revision in the bisect range.
2038 max_revision_data: Data about the latest revision in the bisect range.
2040 Returns:
2041 The depot to bisect next, or None.
2043 external_depot = None
2044 for next_depot in DEPOT_NAMES:
2045 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2046 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2047 continue
2049 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2050 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2051 continue
2053 if current_depot == 'v8':
2054 # We grab the bleeding_edge info here rather than earlier because we
2055 # finally have the revision range. From that we can search forwards and
2056 # backwards to try to match trunk revisions to bleeding_edge.
2057 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2059 if (min_revision_data['external'][next_depot] ==
2060 max_revision_data['external'][next_depot]):
2061 continue
2063 if (min_revision_data['external'][next_depot] and
2064 max_revision_data['external'][next_depot]):
2065 external_depot = next_depot
2066 break
2068 return external_depot
2070 def PrepareToBisectOnDepot(self,
2071 current_depot,
2072 end_revision,
2073 start_revision,
2074 previous_depot,
2075 previous_revision):
2076 """Changes to the appropriate directory and gathers a list of revisions
2077 to bisect between |start_revision| and |end_revision|.
2079 Args:
2080 current_depot: The depot we want to bisect.
2081 end_revision: End of the revision range.
2082 start_revision: Start of the revision range.
2083 previous_depot: The depot we were previously bisecting.
2084 previous_revision: The last revision we synced to on |previous_depot|.
2086 Returns:
2087 A list containing the revisions between |start_revision| and
2088 |end_revision| inclusive.
2090 # Change into working directory of external library to run
2091 # subsequent commands.
2092 self.ChangeToDepotWorkingDirectory(current_depot)
2094 # V8 (and possibly others) is merged in periodically. Bisecting
2095 # this directory directly won't give much good info.
2096 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2097 config_path = os.path.join(self.src_cwd, '..')
2098 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2099 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2100 return []
2101 if bisect_utils.RunGClient(
2102 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2103 return []
2105 if current_depot == 'v8_bleeding_edge':
2106 self.ChangeToDepotWorkingDirectory('chromium')
2108 shutil.move('v8', 'v8.bak')
2109 shutil.move('v8_bleeding_edge', 'v8')
2111 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2112 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2114 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2115 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2117 self.ChangeToDepotWorkingDirectory(current_depot)
2119 depot_revision_list = self.GetRevisionList(current_depot,
2120 end_revision,
2121 start_revision)
2123 self.ChangeToDepotWorkingDirectory('chromium')
2125 return depot_revision_list
2127 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2128 """Gathers reference values by running the performance tests on the
2129 known good and bad revisions.
2131 Args:
2132 good_rev: The last known good revision where the performance regression
2133 has not occurred yet.
2134 bad_rev: A revision where the performance regression has already occurred.
2135 cmd: The command to execute the performance test.
2136 metric: The metric being tested for regression.
2138 Returns:
2139 A tuple with the results of building and running each revision.
2141 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2142 target_depot,
2143 cmd,
2144 metric)
2146 good_run_results = None
2148 if not bad_run_results[1]:
2149 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2150 target_depot,
2151 cmd,
2152 metric)
2154 return (bad_run_results, good_run_results)
2156 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2157 """Adds new revisions to the revision_data dict and initializes them.
2159 Args:
2160 revisions: List of revisions to add.
2161 depot: Depot that's currently in use (src, webkit, etc...)
2162 sort: Sorting key for displaying revisions.
2163 revision_data: A dict to add the new revisions into. Existing revisions
2164 will have their sort keys offset.
2167 num_depot_revisions = len(revisions)
2169 for k, v in revision_data.iteritems():
2170 if v['sort'] > sort:
2171 v['sort'] += num_depot_revisions
2173 for i in xrange(num_depot_revisions):
2174 r = revisions[i]
2176 revision_data[r] = {'revision' : r,
2177 'depot' : depot,
2178 'value' : None,
2179 'perf_time' : 0,
2180 'build_time' : 0,
2181 'passed' : '?',
2182 'sort' : i + sort + 1}
2184 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2185 if self.opts.output_buildbot_annotations:
2186 step_name = 'Bisection Range: [%s - %s]' % (
2187 revision_list[len(revision_list)-1], revision_list[0])
2188 bisect_utils.OutputAnnotationStepStart(step_name)
2190 print
2191 print 'Revisions to bisect on [%s]:' % depot
2192 for revision_id in revision_list:
2193 print ' -> %s' % (revision_id, )
2194 print
2196 if self.opts.output_buildbot_annotations:
2197 bisect_utils.OutputAnnotationStepClosed()
2199 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2200 """Checks to see if changes to DEPS file occurred, and that the revision
2201 range also includes the change to .DEPS.git. If it doesn't, attempts to
2202 expand the revision range to include it.
2204 Args:
2205 bad_rev: First known bad revision.
2206 good_revision: Last known good revision.
2208 Returns:
2209 A tuple with the new bad and good revisions.
2211 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2212 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2213 'DEPS', good_revision, bad_revision)
2215 if changes_to_deps:
2216 # DEPS file was changed, search from the oldest change to DEPS file to
2217 # bad_revision to see if there are matching .DEPS.git changes.
2218 oldest_deps_change = changes_to_deps[-1]
2219 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2220 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2222 if len(changes_to_deps) != len(changes_to_gitdeps):
2223 # Grab the timestamp of the last DEPS change
2224 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2225 output = CheckRunGit(cmd)
2226 commit_time = int(output)
2228 # Try looking for a commit that touches the .DEPS.git file in the
2229 # next 15 minutes after the DEPS file change.
2230 cmd = ['log', '--format=%H', '-1',
2231 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2232 'origin/master', bisect_utils.FILE_DEPS_GIT]
2233 output = CheckRunGit(cmd)
2234 output = output.strip()
2235 if output:
2236 self.warnings.append('Detected change to DEPS and modified '
2237 'revision range to include change to .DEPS.git')
2238 return (output, good_revision)
2239 else:
2240 self.warnings.append('Detected change to DEPS but couldn\'t find '
2241 'matching change to .DEPS.git')
2242 return (bad_revision, good_revision)
2244 def CheckIfRevisionsInProperOrder(self,
2245 target_depot,
2246 good_revision,
2247 bad_revision):
2248 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2250 Args:
2251 good_revision: Number/tag of the known good revision.
2252 bad_revision: Number/tag of the known bad revision.
2254 Returns:
2255 True if the revisions are in the proper order (good earlier than bad).
2257 if self.source_control.IsGit() and target_depot != 'cros':
2258 cmd = ['log', '--format=%ct', '-1', good_revision]
2259 cwd = self._GetDepotDirectory(target_depot)
2261 output = CheckRunGit(cmd, cwd=cwd)
2262 good_commit_time = int(output)
2264 cmd = ['log', '--format=%ct', '-1', bad_revision]
2265 output = CheckRunGit(cmd, cwd=cwd)
2266 bad_commit_time = int(output)
2268 return good_commit_time <= bad_commit_time
2269 else:
2270 # Cros/svn use integers
2271 return int(good_revision) <= int(bad_revision)
2273 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2274 """Given known good and bad revisions, run a binary search on all
2275 intermediate revisions to determine the CL where the performance regression
2276 occurred.
2278 Args:
2279 command_to_run: Specify the command to execute the performance test.
2280 good_revision: Number/tag of the known good revision.
2281 bad_revision: Number/tag of the known bad revision.
2282 metric: The performance metric to monitor.
2284 Returns:
2285 A dict with 2 members, 'revision_data' and 'error'. On success,
2286 'revision_data' will contain a dict mapping revision ids to
2287 data about that revision. Each piece of revision data consists of a
2288 dict with the following keys:
2290 'passed': Represents whether the performance test was successful at
2291 that revision. Possible values include: 1 (passed), 0 (failed),
2292 '?' (skipped), 'F' (build failed).
2293 'depot': The depot that this revision is from (ie. WebKit)
2294 'external': If the revision is a 'src' revision, 'external' contains
2295 the revisions of each of the external libraries.
2296 'sort': A sort value for sorting the dict in order of commits.
2298 For example:
2300 'error':None,
2301 'revision_data':
2303 'CL #1':
2305 'passed':False,
2306 'depot':'chromium',
2307 'external':None,
2308 'sort':0
2313 If an error occurred, the 'error' field will contain the message and
2314 'revision_data' will be empty.
2316 results = {'revision_data' : {},
2317 'error' : None}
2319 # Choose depot to bisect first
2320 target_depot = 'chromium'
2321 if self.opts.target_platform == 'cros':
2322 target_depot = 'cros'
2323 elif self.opts.target_platform == 'android-chrome':
2324 target_depot = 'android-chrome'
2326 cwd = os.getcwd()
2327 self.ChangeToDepotWorkingDirectory(target_depot)
2329 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2330 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2331 target_depot, 100)
2332 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2333 target_depot, -100)
2335 os.chdir(cwd)
2338 if bad_revision is None:
2339 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2340 return results
2342 if good_revision is None:
2343 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2344 return results
2346 # Check that they didn't accidentally swap good and bad revisions.
2347 if not self.CheckIfRevisionsInProperOrder(
2348 target_depot, good_revision, bad_revision):
2349 results['error'] = 'bad_revision < good_revision, did you swap these '\
2350 'by mistake?'
2351 return results
2353 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2354 bad_revision, good_revision)
2356 if self.opts.output_buildbot_annotations:
2357 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2359 print 'Gathering revision range for bisection.'
2360 # Retrieve a list of revisions to do bisection on.
2361 src_revision_list = self.GetRevisionList(target_depot,
2362 bad_revision,
2363 good_revision)
2365 if self.opts.output_buildbot_annotations:
2366 bisect_utils.OutputAnnotationStepClosed()
2368 if src_revision_list:
2369 # revision_data will store information about a revision such as the
2370 # depot it came from, the webkit/V8 revision at that time,
2371 # performance timing, build state, etc...
2372 revision_data = results['revision_data']
2374 # revision_list is the list we're binary searching through at the moment.
2375 revision_list = []
2377 sort_key_ids = 0
2379 for current_revision_id in src_revision_list:
2380 sort_key_ids += 1
2382 revision_data[current_revision_id] = {'value' : None,
2383 'passed' : '?',
2384 'depot' : target_depot,
2385 'external' : None,
2386 'perf_time' : 0,
2387 'build_time' : 0,
2388 'sort' : sort_key_ids}
2389 revision_list.append(current_revision_id)
2391 min_revision = 0
2392 max_revision = len(revision_list) - 1
2394 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2396 if self.opts.output_buildbot_annotations:
2397 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2399 print 'Gathering reference values for bisection.'
2401 # Perform the performance tests on the good and bad revisions, to get
2402 # reference values.
2403 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2404 bad_revision,
2405 command_to_run,
2406 metric,
2407 target_depot)
2409 if self.opts.output_buildbot_annotations:
2410 bisect_utils.OutputAnnotationStepClosed()
2412 if bad_results[1]:
2413 results['error'] = ('An error occurred while building and running '
2414 'the \'bad\' reference value. The bisect cannot continue without '
2415 'a working \'bad\' revision to start from.\n\nError: %s' %
2416 bad_results[0])
2417 return results
2419 if good_results[1]:
2420 results['error'] = ('An error occurred while building and running '
2421 'the \'good\' reference value. The bisect cannot continue without '
2422 'a working \'good\' revision to start from.\n\nError: %s' %
2423 good_results[0])
2424 return results
2427 # We need these reference values to determine if later runs should be
2428 # classified as pass or fail.
2429 known_bad_value = bad_results[0]
2430 known_good_value = good_results[0]
2432 # Can just mark the good and bad revisions explicitly here since we
2433 # already know the results.
2434 bad_revision_data = revision_data[revision_list[0]]
2435 bad_revision_data['external'] = bad_results[2]
2436 bad_revision_data['perf_time'] = bad_results[3]
2437 bad_revision_data['build_time'] = bad_results[4]
2438 bad_revision_data['passed'] = False
2439 bad_revision_data['value'] = known_bad_value
2441 good_revision_data = revision_data[revision_list[max_revision]]
2442 good_revision_data['external'] = good_results[2]
2443 good_revision_data['perf_time'] = good_results[3]
2444 good_revision_data['build_time'] = good_results[4]
2445 good_revision_data['passed'] = True
2446 good_revision_data['value'] = known_good_value
2448 next_revision_depot = target_depot
2450 while True:
2451 if not revision_list:
2452 break
2454 min_revision_data = revision_data[revision_list[min_revision]]
2455 max_revision_data = revision_data[revision_list[max_revision]]
2457 if max_revision - min_revision <= 1:
2458 current_depot = min_revision_data['depot']
2459 if min_revision_data['passed'] == '?':
2460 next_revision_index = min_revision
2461 elif max_revision_data['passed'] == '?':
2462 next_revision_index = max_revision
2463 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2464 previous_revision = revision_list[min_revision]
2465 # If there were changes to any of the external libraries we track,
2466 # should bisect the changes there as well.
2467 external_depot = self._FindNextDepotToBisect(current_depot,
2468 previous_revision, min_revision_data, max_revision_data)
2470 # If there was no change in any of the external depots, the search
2471 # is over.
2472 if not external_depot:
2473 if current_depot == 'v8':
2474 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2475 'continue any further. The script can only bisect into '
2476 'V8\'s bleeding_edge repository if both the current and '
2477 'previous revisions in trunk map directly to revisions in '
2478 'bleeding_edge.')
2479 break
2481 earliest_revision = max_revision_data['external'][external_depot]
2482 latest_revision = min_revision_data['external'][external_depot]
2484 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2485 latest_revision,
2486 earliest_revision,
2487 next_revision_depot,
2488 previous_revision)
2490 if not new_revision_list:
2491 results['error'] = 'An error occurred attempting to retrieve'\
2492 ' revision range: [%s..%s]' %\
2493 (earliest_revision, latest_revision)
2494 return results
2496 self.AddRevisionsIntoRevisionData(new_revision_list,
2497 external_depot,
2498 min_revision_data['sort'],
2499 revision_data)
2501 # Reset the bisection and perform it on the newly inserted
2502 # changelists.
2503 revision_list = new_revision_list
2504 min_revision = 0
2505 max_revision = len(revision_list) - 1
2506 sort_key_ids += len(revision_list)
2508 print 'Regression in metric:%s appears to be the result of changes'\
2509 ' in [%s].' % (metric, external_depot)
2511 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2513 continue
2514 else:
2515 break
2516 else:
2517 next_revision_index = int((max_revision - min_revision) / 2) +\
2518 min_revision
2520 next_revision_id = revision_list[next_revision_index]
2521 next_revision_data = revision_data[next_revision_id]
2522 next_revision_depot = next_revision_data['depot']
2524 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2526 if self.opts.output_buildbot_annotations:
2527 step_name = 'Working on [%s]' % next_revision_id
2528 bisect_utils.OutputAnnotationStepStart(step_name)
2530 print 'Working on revision: [%s]' % next_revision_id
2532 run_results = self.SyncBuildAndRunRevision(next_revision_id,
2533 next_revision_depot,
2534 command_to_run,
2535 metric, skippable=True)
2537 # If the build is successful, check whether or not the metric
2538 # had regressed.
2539 if not run_results[1]:
2540 if len(run_results) > 2:
2541 next_revision_data['external'] = run_results[2]
2542 next_revision_data['perf_time'] = run_results[3]
2543 next_revision_data['build_time'] = run_results[4]
2545 passed_regression = self.CheckIfRunPassed(run_results[0],
2546 known_good_value,
2547 known_bad_value)
2549 next_revision_data['passed'] = passed_regression
2550 next_revision_data['value'] = run_results[0]
2552 if passed_regression:
2553 max_revision = next_revision_index
2554 else:
2555 min_revision = next_revision_index
2556 else:
2557 if run_results[1] == BUILD_RESULT_SKIPPED:
2558 next_revision_data['passed'] = 'Skipped'
2559 elif run_results[1] == BUILD_RESULT_FAIL:
2560 next_revision_data['passed'] = 'Build Failed'
2562 print run_results[0]
2564 # If the build is broken, remove it and redo search.
2565 revision_list.pop(next_revision_index)
2567 max_revision -= 1
2569 if self.opts.output_buildbot_annotations:
2570 self._PrintPartialResults(results)
2571 bisect_utils.OutputAnnotationStepClosed()
2572 else:
2573 # Weren't able to sync and retrieve the revision range.
2574 results['error'] = 'An error occurred attempting to retrieve revision '\
2575 'range: [%s..%s]' % (good_revision, bad_revision)
2577 return results
2579 def _PrintPartialResults(self, results_dict):
2580 revision_data = results_dict['revision_data']
2581 revision_data_sorted = sorted(revision_data.iteritems(),
2582 key = lambda x: x[1]['sort'])
2583 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2584 first_working_revision = results_dict['first_working_revision']
2585 last_broken_revision = results_dict['last_broken_revision']
2587 self._PrintTestedCommitsTable(revision_data_sorted,
2588 results_dict['first_working_revision'],
2589 results_dict['last_broken_revision'],
2590 100, final_step=False)
2592 def _PrintConfidence(self, results_dict):
2593 # The perf dashboard specifically looks for the string
2594 # "Confidence in Bisection Results: 100%" to decide whether or not
2595 # to cc the author(s). If you change this, please update the perf
2596 # dashboard as well.
2597 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
2599 def _PrintBanner(self, results_dict):
2600 print
2601 print " __o_\___ Aw Snap! We hit a speed bump!"
2602 print "=-O----O-'__.~.___________________________________"
2603 print
2604 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2605 results_dict['regression_size'], results_dict['regression_std_err'],
2606 '/'.join(self.opts.metric))
2607 self._PrintConfidence(results_dict)
2609 def _PrintFailedBanner(self, results_dict):
2610 print
2611 print ('Bisect could not reproduce a change in the '
2612 '%s/%s metric.' % (self.opts.metric[0], self.opts.metric[1]))
2613 print
2614 self._PrintConfidence(results_dict)
2616 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2617 info = self.source_control.QueryRevisionInfo(cl,
2618 self._GetDepotDirectory(depot))
2619 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2620 try:
2621 # Format is "git-svn-id: svn://....@123456 <other data>"
2622 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2623 svn_revision = svn_line[0].split('@')
2624 svn_revision = svn_revision[1].split(' ')[0]
2625 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2626 except IndexError:
2627 return ''
2628 return ''
2630 def _PrintRevisionInfo(self, cl, info, depot=None):
2631 # The perf dashboard specifically looks for the string
2632 # "Author : " to parse out who to cc on a bug. If you change the
2633 # formatting here, please update the perf dashboard as well.
2634 print
2635 print 'Subject : %s' % info['subject']
2636 print 'Author : %s' % info['author']
2637 if not info['email'].startswith(info['author']):
2638 print 'Email : %s' % info['email']
2639 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2640 if commit_link:
2641 print 'Link : %s' % commit_link
2642 else:
2643 print
2644 print 'Failed to parse svn revision from body:'
2645 print
2646 print info['body']
2647 print
2648 print 'Commit : %s' % cl
2649 print 'Date : %s' % info['date']
2651 def _PrintTestedCommitsTable(self, revision_data_sorted,
2652 first_working_revision, last_broken_revision, confidence,
2653 final_step=True):
2654 print
2655 if final_step:
2656 print 'Tested commits:'
2657 else:
2658 print 'Partial results:'
2659 print ' %20s %70s %12s %14s %13s' % ('Depot'.center(20, ' '),
2660 'Commit SHA'.center(70, ' '), 'Mean'.center(12, ' '),
2661 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2662 state = 0
2663 for current_id, current_data in revision_data_sorted:
2664 if current_data['value']:
2665 if (current_id == last_broken_revision or
2666 current_id == first_working_revision):
2667 # If confidence is too low, don't add this empty line since it's
2668 # used to put focus on a suspected CL.
2669 if confidence and final_step:
2670 print
2671 state += 1
2672 if state == 2 and not final_step:
2673 # Just want a separation between "bad" and "good" cl's.
2674 print
2676 state_str = 'Bad'
2677 if state == 1 and final_step:
2678 state_str = 'Suspected CL'
2679 elif state == 2:
2680 state_str = 'Good'
2682 # If confidence is too low, don't bother outputting good/bad.
2683 if not confidence:
2684 state_str = ''
2685 state_str = state_str.center(13, ' ')
2687 std_error = ('+-%.02f' %
2688 current_data['value']['std_err']).center(14, ' ')
2689 mean = ('%.02f' % current_data['value']['mean']).center(12, ' ')
2690 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2691 current_data['depot'])
2692 if not cl_link:
2693 cl_link = current_id
2694 print ' %20s %70s %12s %14s %13s' % (
2695 current_data['depot'].center(20, ' '), cl_link.center(70, ' '),
2696 mean, std_error, state_str)
2698 def _PrintReproSteps(self):
2699 print
2700 print 'To reproduce locally:'
2701 print '$ ' + self.opts.command
2702 if bisect_utils.IsTelemetryCommand(self.opts.command):
2703 print
2704 print 'Also consider passing --profiler=list to see available profilers.'
2706 def _PrintOtherRegressions(self, other_regressions, revision_data):
2707 print
2708 print 'Other regressions may have occurred:'
2709 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
2710 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
2711 for regression in other_regressions:
2712 current_id, previous_id, confidence = regression
2713 current_data = revision_data[current_id]
2714 previous_data = revision_data[previous_id]
2716 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2717 current_data['depot'])
2718 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
2719 previous_data['depot'])
2721 # If we can't map it to a viewable URL, at least show the original hash.
2722 if not current_link:
2723 current_link = current_id
2724 if not previous_link:
2725 previous_link = previous_id
2727 print ' %8s %70s %s' % (
2728 current_data['depot'], current_link,
2729 ('%d%%' % confidence).center(10, ' '))
2730 print ' %8s %70s' % (
2731 previous_data['depot'], previous_link)
2732 print
2734 def _PrintStepTime(self, revision_data_sorted):
2735 step_perf_time_avg = 0.0
2736 step_build_time_avg = 0.0
2737 step_count = 0.0
2738 for _, current_data in revision_data_sorted:
2739 if current_data['value']:
2740 step_perf_time_avg += current_data['perf_time']
2741 step_build_time_avg += current_data['build_time']
2742 step_count += 1
2743 if step_count:
2744 step_perf_time_avg = step_perf_time_avg / step_count
2745 step_build_time_avg = step_build_time_avg / step_count
2746 print
2747 print 'Average build time : %s' % datetime.timedelta(
2748 seconds=int(step_build_time_avg))
2749 print 'Average test time : %s' % datetime.timedelta(
2750 seconds=int(step_perf_time_avg))
2752 def _PrintWarnings(self):
2753 if not self.warnings:
2754 return
2755 print
2756 print 'WARNINGS:'
2757 for w in set(self.warnings):
2758 print ' !!! %s' % w
2760 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
2761 other_regressions = []
2762 previous_values = []
2763 previous_id = None
2764 for current_id, current_data in revision_data_sorted:
2765 current_values = current_data['value']
2766 if current_values:
2767 current_values = current_values['values']
2768 if previous_values:
2769 confidence = self._CalculateConfidence(previous_values,
2770 [current_values])
2771 mean_of_prev_runs = CalculateTruncatedMean(
2772 sum(previous_values, []), 0)
2773 mean_of_current_runs = CalculateTruncatedMean(current_values, 0)
2775 # Check that the potential regression is in the same direction as
2776 # the overall regression. If the mean of the previous runs < the
2777 # mean of the current runs, this local regression is in same
2778 # direction.
2779 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
2780 is_same_direction = (prev_less_than_current if
2781 bad_greater_than_good else not prev_less_than_current)
2783 # Only report potential regressions with high confidence.
2784 if is_same_direction and confidence > 50:
2785 other_regressions.append([current_id, previous_id, confidence])
2786 previous_values.append(current_values)
2787 previous_id = current_id
2788 return other_regressions
2790 def _CalculateConfidence(self, working_means, broken_means):
2791 bounds_working = []
2792 bounds_broken = []
2793 for m in working_means:
2794 current_mean = CalculateTruncatedMean(m, 0)
2795 if bounds_working:
2796 bounds_working[0] = min(current_mean, bounds_working[0])
2797 bounds_working[1] = max(current_mean, bounds_working[0])
2798 else:
2799 bounds_working = [current_mean, current_mean]
2800 for m in broken_means:
2801 current_mean = CalculateTruncatedMean(m, 0)
2802 if bounds_broken:
2803 bounds_broken[0] = min(current_mean, bounds_broken[0])
2804 bounds_broken[1] = max(current_mean, bounds_broken[0])
2805 else:
2806 bounds_broken = [current_mean, current_mean]
2807 dist_between_groups = min(math.fabs(bounds_broken[1] - bounds_working[0]),
2808 math.fabs(bounds_broken[0] - bounds_working[1]))
2809 working_mean = sum(working_means, [])
2810 broken_mean = sum(broken_means, [])
2811 len_working_group = CalculateStandardDeviation(working_mean)
2812 len_broken_group = CalculateStandardDeviation(broken_mean)
2814 confidence = (dist_between_groups / (
2815 max(0.0001, (len_broken_group + len_working_group ))))
2816 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
2817 return confidence
2819 def _GetResultsDict(self, revision_data, revision_data_sorted):
2820 # Find range where it possibly broke.
2821 first_working_revision = None
2822 first_working_revision_index = -1
2823 last_broken_revision = None
2824 last_broken_revision_index = -1
2826 for i in xrange(len(revision_data_sorted)):
2827 k, v = revision_data_sorted[i]
2828 if v['passed'] == 1:
2829 if not first_working_revision:
2830 first_working_revision = k
2831 first_working_revision_index = i
2833 if not v['passed']:
2834 last_broken_revision = k
2835 last_broken_revision_index = i
2837 if last_broken_revision != None and first_working_revision != None:
2838 broken_means = []
2839 for i in xrange(0, last_broken_revision_index + 1):
2840 if revision_data_sorted[i][1]['value']:
2841 broken_means.append(revision_data_sorted[i][1]['value']['values'])
2843 working_means = []
2844 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
2845 if revision_data_sorted[i][1]['value']:
2846 working_means.append(revision_data_sorted[i][1]['value']['values'])
2848 # Flatten the lists to calculate mean of all values.
2849 working_mean = sum(working_means, [])
2850 broken_mean = sum(broken_means, [])
2852 # Calculate the approximate size of the regression
2853 mean_of_bad_runs = CalculateTruncatedMean(broken_mean, 0.0)
2854 mean_of_good_runs = CalculateTruncatedMean(working_mean, 0.0)
2856 regression_size = math.fabs(max(mean_of_good_runs, mean_of_bad_runs) /
2857 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0 - 100.0
2859 regression_std_err = math.fabs(CalculatePooledStandardError(
2860 [working_mean, broken_mean]) /
2861 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
2863 # Give a "confidence" in the bisect. At the moment we use how distinct the
2864 # values are before and after the last broken revision, and how noisy the
2865 # overall graph is.
2866 confidence = self._CalculateConfidence(working_means, broken_means)
2868 culprit_revisions = []
2870 cwd = os.getcwd()
2871 self.ChangeToDepotWorkingDirectory(
2872 revision_data[last_broken_revision]['depot'])
2874 if revision_data[last_broken_revision]['depot'] == 'cros':
2875 # Want to get a list of all the commits and what depots they belong
2876 # to so that we can grab info about each.
2877 cmd = ['repo', 'forall', '-c',
2878 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2879 last_broken_revision, first_working_revision + 1)]
2880 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
2882 changes = []
2883 assert not return_code, 'An error occurred while running'\
2884 ' "%s"' % ' '.join(cmd)
2885 last_depot = None
2886 cwd = os.getcwd()
2887 for l in output.split('\n'):
2888 if l:
2889 # Output will be in form:
2890 # /path_to_depot
2891 # /path_to_other_depot
2892 # <SHA1>
2893 # /path_again
2894 # <SHA1>
2895 # etc.
2896 if l[0] == '/':
2897 last_depot = l
2898 else:
2899 contents = l.split(' ')
2900 if len(contents) > 1:
2901 changes.append([last_depot, contents[0]])
2902 for c in changes:
2903 os.chdir(c[0])
2904 info = self.source_control.QueryRevisionInfo(c[1])
2905 culprit_revisions.append((c[1], info, None))
2906 else:
2907 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
2908 k, v = revision_data_sorted[i]
2909 if k == first_working_revision:
2910 break
2911 self.ChangeToDepotWorkingDirectory(v['depot'])
2912 info = self.source_control.QueryRevisionInfo(k)
2913 culprit_revisions.append((k, info, v['depot']))
2914 os.chdir(cwd)
2916 # Check for any other possible regression ranges
2917 other_regressions = self._FindOtherRegressions(revision_data_sorted,
2918 mean_of_bad_runs > mean_of_good_runs)
2920 # Check for warnings:
2921 if len(culprit_revisions) > 1:
2922 self.warnings.append('Due to build errors, regression range could '
2923 'not be narrowed down to a single commit.')
2924 if self.opts.repeat_test_count == 1:
2925 self.warnings.append('Tests were only set to run once. This may '
2926 'be insufficient to get meaningful results.')
2927 if confidence < 100:
2928 if confidence:
2929 self.warnings.append(
2930 'Confidence is less than 100%. There could be other candidates for '
2931 'this regression. Try bisecting again with increased repeat_count '
2932 'or on a sub-metric that shows the regression more clearly.')
2933 else:
2934 self.warnings.append(
2935 'Confidence is 0%. Try bisecting again on another platform, with '
2936 'increased repeat_count or on a sub-metric that shows the regression '
2937 'more clearly.')
2939 return {
2940 'first_working_revision': first_working_revision,
2941 'last_broken_revision': last_broken_revision,
2942 'culprit_revisions': culprit_revisions,
2943 'other_regressions': other_regressions,
2944 'regression_size': regression_size,
2945 'regression_std_err': regression_std_err,
2946 'confidence': confidence,
2949 def FormatAndPrintResults(self, bisect_results):
2950 """Prints the results from a bisection run in a readable format.
2952 Args
2953 bisect_results: The results from a bisection test run.
2955 revision_data = bisect_results['revision_data']
2956 revision_data_sorted = sorted(revision_data.iteritems(),
2957 key = lambda x: x[1]['sort'])
2958 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2960 if self.opts.output_buildbot_annotations:
2961 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
2963 print
2964 print 'Full results of bisection:'
2965 for current_id, current_data in revision_data_sorted:
2966 build_status = current_data['passed']
2968 if type(build_status) is bool:
2969 if build_status:
2970 build_status = 'Good'
2971 else:
2972 build_status = 'Bad'
2974 print ' %20s %40s %s' % (current_data['depot'],
2975 current_id, build_status)
2976 print
2978 if self.opts.output_buildbot_annotations:
2979 bisect_utils.OutputAnnotationStepClosed()
2980 # The perf dashboard scrapes the "results" step in order to comment on
2981 # bugs. If you change this, please update the perf dashboard as well.
2982 bisect_utils.OutputAnnotationStepStart('Results')
2984 if results_dict['culprit_revisions'] and results_dict['confidence']:
2985 self._PrintBanner(results_dict)
2986 for culprit in results_dict['culprit_revisions']:
2987 cl, info, depot = culprit
2988 self._PrintRevisionInfo(cl, info, depot)
2989 self._PrintReproSteps()
2990 if results_dict['other_regressions']:
2991 self._PrintOtherRegressions(results_dict['other_regressions'],
2992 revision_data)
2993 else:
2994 self._PrintFailedBanner(results_dict)
2995 self._PrintReproSteps()
2997 self._PrintTestedCommitsTable(revision_data_sorted,
2998 results_dict['first_working_revision'],
2999 results_dict['last_broken_revision'],
3000 results_dict['confidence'])
3001 self._PrintStepTime(revision_data_sorted)
3002 self._PrintWarnings()
3004 if self.opts.output_buildbot_annotations:
3005 bisect_utils.OutputAnnotationStepClosed()
3008 def DetermineAndCreateSourceControl(opts):
3009 """Attempts to determine the underlying source control workflow and returns
3010 a SourceControl object.
3012 Returns:
3013 An instance of a SourceControl object, or None if the current workflow
3014 is unsupported.
3017 (output, return_code) = RunGit(['rev-parse', '--is-inside-work-tree'])
3019 if output.strip() == 'true':
3020 return GitSourceControl(opts)
3022 return None
3025 def IsPlatformSupported(opts):
3026 """Checks that this platform and build system are supported.
3028 Args:
3029 opts: The options parsed from the command line.
3031 Returns:
3032 True if the platform and build system are supported.
3034 # Haven't tested the script out on any other platforms yet.
3035 supported = ['posix', 'nt']
3036 return os.name in supported
3039 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3040 """Removes the directory tree specified, and then creates an empty
3041 directory in the same location (if not specified to skip).
3043 Args:
3044 path_to_dir: Path to the directory tree.
3045 skip_makedir: Whether to skip creating empty directory, default is False.
3047 Returns:
3048 True if successful, False if an error occurred.
3050 try:
3051 if os.path.exists(path_to_dir):
3052 shutil.rmtree(path_to_dir)
3053 except OSError, e:
3054 if e.errno != errno.ENOENT:
3055 return False
3057 if not skip_makedir:
3058 return MaybeMakeDirectory(path_to_dir)
3060 return True
3063 def RemoveBuildFiles():
3064 """Removes build files from previous runs."""
3065 if RmTreeAndMkDir(os.path.join('out', 'Release')):
3066 if RmTreeAndMkDir(os.path.join('build', 'Release')):
3067 return True
3068 return False
3071 class BisectOptions(object):
3072 """Options to be used when running bisection."""
3073 def __init__(self):
3074 super(BisectOptions, self).__init__()
3076 self.target_platform = 'chromium'
3077 self.build_preference = None
3078 self.good_revision = None
3079 self.bad_revision = None
3080 self.use_goma = None
3081 self.cros_board = None
3082 self.cros_remote_ip = None
3083 self.repeat_test_count = 20
3084 self.truncate_percent = 25
3085 self.max_time_minutes = 20
3086 self.metric = None
3087 self.command = None
3088 self.output_buildbot_annotations = None
3089 self.no_custom_deps = False
3090 self.working_directory = None
3091 self.extra_src = None
3092 self.debug_ignore_build = None
3093 self.debug_ignore_sync = None
3094 self.debug_ignore_perf_test = None
3095 self.gs_bucket = None
3096 self.target_arch = 'ia32'
3097 self.builder_host = None
3098 self.builder_port = None
3100 def _CreateCommandLineParser(self):
3101 """Creates a parser with bisect options.
3103 Returns:
3104 An instance of optparse.OptionParser.
3106 usage = ('%prog [options] [-- chromium-options]\n'
3107 'Perform binary search on revision history to find a minimal '
3108 'range of revisions where a peformance metric regressed.\n')
3110 parser = optparse.OptionParser(usage=usage)
3112 group = optparse.OptionGroup(parser, 'Bisect options')
3113 group.add_option('-c', '--command',
3114 type='str',
3115 help='A command to execute your performance test at' +
3116 ' each point in the bisection.')
3117 group.add_option('-b', '--bad_revision',
3118 type='str',
3119 help='A bad revision to start bisection. ' +
3120 'Must be later than good revision. May be either a git' +
3121 ' or svn revision.')
3122 group.add_option('-g', '--good_revision',
3123 type='str',
3124 help='A revision to start bisection where performance' +
3125 ' test is known to pass. Must be earlier than the ' +
3126 'bad revision. May be either a git or svn revision.')
3127 group.add_option('-m', '--metric',
3128 type='str',
3129 help='The desired metric to bisect on. For example ' +
3130 '"vm_rss_final_b/vm_rss_f_b"')
3131 group.add_option('-r', '--repeat_test_count',
3132 type='int',
3133 default=20,
3134 help='The number of times to repeat the performance '
3135 'test. Values will be clamped to range [1, 100]. '
3136 'Default value is 20.')
3137 group.add_option('--max_time_minutes',
3138 type='int',
3139 default=20,
3140 help='The maximum time (in minutes) to take running the '
3141 'performance tests. The script will run the performance '
3142 'tests according to --repeat_test_count, so long as it '
3143 'doesn\'t exceed --max_time_minutes. Values will be '
3144 'clamped to range [1, 60].'
3145 'Default value is 20.')
3146 group.add_option('-t', '--truncate_percent',
3147 type='int',
3148 default=25,
3149 help='The highest/lowest % are discarded to form a '
3150 'truncated mean. Values will be clamped to range [0, '
3151 '25]. Default value is 25 (highest/lowest 25% will be '
3152 'discarded).')
3153 parser.add_option_group(group)
3155 group = optparse.OptionGroup(parser, 'Build options')
3156 group.add_option('-w', '--working_directory',
3157 type='str',
3158 help='Path to the working directory where the script '
3159 'will do an initial checkout of the chromium depot. The '
3160 'files will be placed in a subdirectory "bisect" under '
3161 'working_directory and that will be used to perform the '
3162 'bisection. This parameter is optional, if it is not '
3163 'supplied, the script will work from the current depot.')
3164 group.add_option('--build_preference',
3165 type='choice',
3166 choices=['msvs', 'ninja', 'make'],
3167 help='The preferred build system to use. On linux/mac '
3168 'the options are make/ninja. On Windows, the options '
3169 'are msvs/ninja.')
3170 group.add_option('--target_platform',
3171 type='choice',
3172 choices=['chromium', 'cros', 'android', 'android-chrome'],
3173 default='chromium',
3174 help='The target platform. Choices are "chromium" '
3175 '(current platform), "cros", or "android". If you '
3176 'specify something other than "chromium", you must be '
3177 'properly set up to build that platform.')
3178 group.add_option('--no_custom_deps',
3179 dest='no_custom_deps',
3180 action="store_true",
3181 default=False,
3182 help='Run the script with custom_deps or not.')
3183 group.add_option('--extra_src',
3184 type='str',
3185 help='Path to a script which can be used to modify '
3186 'the bisect script\'s behavior.')
3187 group.add_option('--cros_board',
3188 type='str',
3189 help='The cros board type to build.')
3190 group.add_option('--cros_remote_ip',
3191 type='str',
3192 help='The remote machine to image to.')
3193 group.add_option('--use_goma',
3194 action="store_true",
3195 help='Add a bunch of extra threads for goma.')
3196 group.add_option('--output_buildbot_annotations',
3197 action="store_true",
3198 help='Add extra annotation output for buildbot.')
3199 group.add_option('--gs_bucket',
3200 default='',
3201 dest='gs_bucket',
3202 type='str',
3203 help=('Name of Google Storage bucket to upload or '
3204 'download build. e.g., chrome-perf'))
3205 group.add_option('--target_arch',
3206 type='choice',
3207 choices=['ia32', 'x64', 'arm'],
3208 default='ia32',
3209 dest='target_arch',
3210 help=('The target build architecture. Choices are "ia32" '
3211 '(default), "x64" or "arm".'))
3212 group.add_option('--builder_host',
3213 dest='builder_host',
3214 type='str',
3215 help=('Host address of server to produce build by posting'
3216 ' try job request.'))
3217 group.add_option('--builder_port',
3218 dest='builder_port',
3219 type='int',
3220 help=('HTTP port of the server to produce build by posting'
3221 ' try job request.'))
3222 parser.add_option_group(group)
3224 group = optparse.OptionGroup(parser, 'Debug options')
3225 group.add_option('--debug_ignore_build',
3226 action="store_true",
3227 help='DEBUG: Don\'t perform builds.')
3228 group.add_option('--debug_ignore_sync',
3229 action="store_true",
3230 help='DEBUG: Don\'t perform syncs.')
3231 group.add_option('--debug_ignore_perf_test',
3232 action="store_true",
3233 help='DEBUG: Don\'t perform performance tests.')
3234 parser.add_option_group(group)
3235 return parser
3237 def ParseCommandLine(self):
3238 """Parses the command line for bisect options."""
3239 parser = self._CreateCommandLineParser()
3240 (opts, args) = parser.parse_args()
3242 try:
3243 if not opts.command:
3244 raise RuntimeError('missing required parameter: --command')
3246 if not opts.good_revision:
3247 raise RuntimeError('missing required parameter: --good_revision')
3249 if not opts.bad_revision:
3250 raise RuntimeError('missing required parameter: --bad_revision')
3252 if not opts.metric:
3253 raise RuntimeError('missing required parameter: --metric')
3255 if opts.gs_bucket:
3256 if not cloud_storage.List(opts.gs_bucket):
3257 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3258 if not opts.builder_host:
3259 raise RuntimeError('Must specify try server hostname, when '
3260 'gs_bucket is used: --builder_host')
3261 if not opts.builder_port:
3262 raise RuntimeError('Must specify try server port number, when '
3263 'gs_bucket is used: --builder_port')
3264 if opts.target_platform == 'cros':
3265 # Run sudo up front to make sure credentials are cached for later.
3266 print 'Sudo is required to build cros:'
3267 print
3268 RunProcess(['sudo', 'true'])
3270 if not opts.cros_board:
3271 raise RuntimeError('missing required parameter: --cros_board')
3273 if not opts.cros_remote_ip:
3274 raise RuntimeError('missing required parameter: --cros_remote_ip')
3276 if not opts.working_directory:
3277 raise RuntimeError('missing required parameter: --working_directory')
3279 metric_values = opts.metric.split('/')
3280 if len(metric_values) != 2:
3281 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3283 opts.metric = metric_values
3284 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3285 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3286 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3287 opts.truncate_percent = opts.truncate_percent / 100.0
3289 for k, v in opts.__dict__.iteritems():
3290 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3291 setattr(self, k, v)
3292 except RuntimeError, e:
3293 output_string = StringIO.StringIO()
3294 parser.print_help(file=output_string)
3295 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3296 output_string.close()
3297 raise RuntimeError(error_message)
3299 @staticmethod
3300 def FromDict(values):
3301 """Creates an instance of BisectOptions with the values parsed from a
3302 .cfg file.
3304 Args:
3305 values: a dict containing options to set.
3307 Returns:
3308 An instance of BisectOptions.
3310 opts = BisectOptions()
3311 for k, v in values.iteritems():
3312 assert hasattr(opts, k), 'Invalid %s attribute in '\
3313 'BisectOptions.' % k
3314 setattr(opts, k, v)
3316 metric_values = opts.metric.split('/')
3317 if len(metric_values) != 2:
3318 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3320 opts.metric = metric_values
3321 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3322 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3323 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3324 opts.truncate_percent = opts.truncate_percent / 100.0
3326 return opts
3329 def main():
3331 try:
3332 opts = BisectOptions()
3333 parse_results = opts.ParseCommandLine()
3335 if opts.extra_src:
3336 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3337 if not extra_src:
3338 raise RuntimeError("Invalid or missing --extra_src.")
3339 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3341 if opts.working_directory:
3342 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3343 if opts.no_custom_deps:
3344 custom_deps = None
3345 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3347 os.chdir(os.path.join(os.getcwd(), 'src'))
3349 if not RemoveBuildFiles():
3350 raise RuntimeError('Something went wrong removing the build files.')
3352 if not IsPlatformSupported(opts):
3353 raise RuntimeError("Sorry, this platform isn't supported yet.")
3355 # Check what source control method they're using. Only support git workflow
3356 # at the moment.
3357 source_control = DetermineAndCreateSourceControl(opts)
3359 if not source_control:
3360 raise RuntimeError("Sorry, only the git workflow is supported at the "
3361 "moment.")
3363 # gClient sync seems to fail if you're not in master branch.
3364 if (not source_control.IsInProperBranch() and
3365 not opts.debug_ignore_sync and
3366 not opts.working_directory):
3367 raise RuntimeError("You must switch to master branch to run bisection.")
3368 bisect_test = BisectPerformanceMetrics(source_control, opts)
3369 try:
3370 bisect_results = bisect_test.Run(opts.command,
3371 opts.bad_revision,
3372 opts.good_revision,
3373 opts.metric)
3374 if bisect_results['error']:
3375 raise RuntimeError(bisect_results['error'])
3376 bisect_test.FormatAndPrintResults(bisect_results)
3377 return 0
3378 finally:
3379 bisect_test.PerformCleanup()
3380 except RuntimeError, e:
3381 if opts.output_buildbot_annotations:
3382 # The perf dashboard scrapes the "results" step in order to comment on
3383 # bugs. If you change this, please update the perf dashboard as well.
3384 bisect_utils.OutputAnnotationStepStart('Results')
3385 print 'Error: %s' % e.message
3386 if opts.output_buildbot_annotations:
3387 bisect_utils.OutputAnnotationStepClosed()
3388 return 1
3390 if __name__ == '__main__':
3391 sys.exit(main())