2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
54 sys
.path
.append(os
.path
.join(os
.path
.dirname(__file__
), 'telemetry'))
57 import post_perf_builder_job
58 from telemetry
.page
import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
82 "src" : "src/third_party/WebKit",
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
94 "from" : ['chromium'],
96 'deps_var': 'angle_revision'
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
117 "src" : "src/third_party/skia/src",
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
126 "src" : "src/third_party/skia/include",
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
135 "src" : "src/third_party/skia/gyp",
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
145 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
146 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN
= 'new version number from %s'
148 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
151 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
155 BUILD_RESULT_SUCCEED
= 0
156 BUILD_RESULT_FAIL
= 1
157 BUILD_RESULT_SKIPPED
= 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
162 MAX_MAC_BUILD_TIME
= 14400
163 MAX_WIN_BUILD_TIME
= 14400
164 MAX_LINUX_BUILD_TIME
= 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH
= """diff --git src/DEPS.sha src/DEPS.sha
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN
= 'mean'
183 BISECT_MODE_STD_DEV
= 'std_dev'
184 BISECT_MODE_RETURN_CODE
= 'return_code'
187 def _AddAdditionalDepotInfo(depot_info
):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
191 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() +
193 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
196 def CalculateTruncatedMean(data_set
, truncate_percent
):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
209 The truncated mean as a float.
212 TypeError: The data set was empty after discarding values.
214 if len(data_set
) > 2:
215 data_set
= sorted(data_set
)
217 discard_num_float
= len(data_set
) * truncate_percent
218 discard_num_int
= int(math
.floor(discard_num_float
))
219 kept_weight
= len(data_set
) - discard_num_float
* 2
221 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
223 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
226 # If the % to discard leaves a fractional portion, need to weight those
228 unweighted_vals
= data_set
[1:len(data_set
)-1]
229 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
230 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
231 data_set
= weighted_vals
+ unweighted_vals
233 kept_weight
= len(data_set
)
235 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
236 data_set
) / kept_weight
238 return truncated_mean
241 def CalculateMean(values
):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values
, 0.0)
246 def CalculateConfidence(good_results_lists
, bad_results_lists
):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good
= map(CalculateMean
, good_results_lists
)
268 means_bad
= map(CalculateMean
, bad_results_lists
)
269 bounds_good
= (min(means_good
), max(means_good
))
270 bounds_bad
= (min(means_bad
), max(means_bad
))
271 dist_between_groups
= min(
272 math
.fabs(bounds_bad
[1] - bounds_good
[0]),
273 math
.fabs(bounds_bad
[0] - bounds_good
[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened
= sum(good_results_lists
, [])
277 bad_results_flattened
= sum(bad_results_lists
, [])
278 stddev_good
= CalculateStandardDeviation(good_results_flattened
)
279 stddev_bad
= CalculateStandardDeviation(bad_results_flattened
)
280 stddev_sum
= stddev_good
+ stddev_bad
282 confidence
= dist_between_groups
/ (max(0.0001, stddev_sum
))
283 confidence
= int(min(1.0, max(confidence
, 0.0)) * 100.0)
287 def CalculateStandardDeviation(values
):
288 """Calculates the sample standard deviation of the given list of values."""
292 mean
= CalculateMean(values
)
293 differences_from_mean
= [float(x
) - mean
for x
in values
]
294 squared_differences
= [float(x
* x
) for x
in differences_from_mean
]
295 variance
= sum(squared_differences
) / (len(values
) - 1)
296 std_dev
= math
.sqrt(variance
)
301 def CalculateRelativeChange(before
, after
):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
314 A non-negative floating point number; 0.1 represents a 10% change.
320 difference
= after
- before
321 return math
.fabs(difference
/ before
)
324 def CalculatePooledStandardError(work_sets
):
329 for current_set
in work_sets
:
330 std_dev
= CalculateStandardDeviation(current_set
)
331 numerator
+= (len(current_set
) - 1) * std_dev
** 2
332 denominator1
+= len(current_set
) - 1
333 denominator2
+= 1.0 / len(current_set
)
336 return math
.sqrt(numerator
/ denominator1
) * math
.sqrt(denominator2
)
340 def CalculateStandardError(values
):
341 """Calculates the standard error of a list of values."""
345 std_dev
= CalculateStandardDeviation(values
)
347 return std_dev
/ math
.sqrt(len(values
))
350 def IsStringFloat(string_to_check
):
351 """Checks whether or not the given string can be converted to a floating
355 string_to_check: Input string to check if it can be converted to a float.
358 True if the string can be converted to a float.
361 float(string_to_check
)
368 def IsStringInt(string_to_check
):
369 """Checks whether or not the given string can be converted to a integer.
372 string_to_check: Input string to check if it can be converted to an int.
375 True if the string can be converted to an int.
386 """Checks whether or not the script is running on Windows.
389 True if running on Windows.
391 return sys
.platform
== 'cygwin' or sys
.platform
.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
398 True if Windows is 64-bit, False if 32-bit.
400 platform
= os
.environ
['PROCESSOR_ARCHITECTURE']
402 platform
= os
.environ
['PROCESSOR_ARCHITEW6432']
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
407 return platform
in ['AMD64', 'I64']
411 """Checks whether or not the script is running on Linux.
414 True if running on Linux.
416 return sys
.platform
.startswith('linux')
420 """Checks whether or not the script is running on Mac.
423 True if running on Mac.
425 return sys
.platform
.startswith('darwin')
428 def GetSHA1HexDigest(contents
):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib
.sha1(contents
).hexdigest()
433 def GetZipFileName(build_revision
=None, target_arch
='ia32', patch_sha
=None):
434 """Gets the archive file name for the given revision."""
436 """Return a string to be used in paths for the platform."""
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch
== 'x64':
447 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
449 base_name
= 'full-build-%s' % PlatformName()
450 if not build_revision
:
453 build_revision
= '%s_%s' % (build_revision
, patch_sha
)
454 return '%s_%s.zip' % (base_name
, build_revision
)
457 def GetRemoteBuildPath(build_revision
, target_arch
='ia32', patch_sha
=None):
458 """Compute the url to download the build from."""
459 def GetGSRootFolderName():
460 """Gets Google Cloud Storage root folder names"""
462 if Is64BitWindows() and target_arch
== 'x64':
463 return 'Win x64 Builder'
466 return 'Linux Builder'
469 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
471 base_filename
= GetZipFileName(build_revision
, target_arch
, patch_sha
)
472 builder_folder
= GetGSRootFolderName()
473 return '%s/%s' % (builder_folder
, base_filename
)
476 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
477 """Fetches file(s) from the Google Cloud Storage.
480 bucket_name: Google Storage bucket name.
481 source_path: Source file path.
482 destination_path: Destination file path.
485 True if the fetching succeeds, otherwise False.
487 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
489 if cloud_storage
.Exists(bucket_name
, source_path
):
490 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
491 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
492 if os
.path
.exists(target_file
):
495 print ('File gs://%s/%s not found in cloud storage.' % (
496 bucket_name
, source_path
))
497 except Exception as e
:
498 print 'Something went wrong while fetching file from cloud: %s' % e
499 if os
.path
.exists(target_file
):
500 os
.remove(target_file
)
504 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
505 def MaybeMakeDirectory(*path
):
506 """Creates an entire path, if it doesn't already exist."""
507 file_path
= os
.path
.join(*path
)
509 os
.makedirs(file_path
)
511 if e
.errno
!= errno
.EEXIST
:
516 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
517 def ExtractZip(filename
, output_dir
, verbose
=True):
518 """ Extract the zip archive in the output directory."""
519 MaybeMakeDirectory(output_dir
)
521 # On Linux and Mac, we use the unzip command as it will
522 # handle links and file bits (executable), which is much
523 # easier then trying to do that with ZipInfo options.
525 # On Windows, try to use 7z if it is installed, otherwise fall back to python
526 # zip module and pray we don't have files larger than 512MB to unzip.
528 if IsMac() or IsLinux():
529 unzip_cmd
= ['unzip', '-o']
530 elif IsWindows() and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe'):
531 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
534 # Make sure path is absolute before changing directories.
535 filepath
= os
.path
.abspath(filename
)
536 saved_dir
= os
.getcwd()
538 command
= unzip_cmd
+ [filepath
]
539 result
= RunProcess(command
)
542 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
545 zf
= zipfile
.ZipFile(filename
)
546 for name
in zf
.namelist():
548 print 'Extracting %s' % name
549 zf
.extract(name
, output_dir
)
552 def RunProcess(command
):
553 """Runs an arbitrary command.
555 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
558 command: A list containing the command and args to execute.
561 The return code of the call.
563 # On Windows, use shell=True to get PATH interpretation.
565 return subprocess
.call(command
, shell
=shell
)
568 def RunProcessAndRetrieveOutput(command
, cwd
=None):
569 """Runs an arbitrary command, returning its output and return code.
571 Since output is collected via communicate(), there will be no output until
572 the call terminates. If you need output while the program runs (ie. so
573 that the buildbot doesn't terminate the script), consider RunProcess().
576 command: A list containing the command and args to execute.
577 cwd: A directory to change to while running the command. The command can be
578 relative to this directory. If this is None, the command will be run in
579 the current directory.
582 A tuple of the output and return code.
585 original_cwd
= os
.getcwd()
588 # On Windows, use shell=True to get PATH interpretation.
590 proc
= subprocess
.Popen(command
, shell
=shell
, stdout
=subprocess
.PIPE
)
591 (output
, _
) = proc
.communicate()
594 os
.chdir(original_cwd
)
596 return (output
, proc
.returncode
)
599 def RunGit(command
, cwd
=None):
600 """Run a git subcommand, returning its output and return code.
603 command: A list containing the args to git.
604 cwd: A directory to change to while running the git command (optional).
607 A tuple of the output and return code.
609 command
= ['git'] + command
611 return RunProcessAndRetrieveOutput(command
, cwd
=cwd
)
614 def CheckRunGit(command
, cwd
=None):
615 """Run a git subcommand, returning its output and return code. Asserts if
616 the return code of the call is non-zero.
619 command: A list containing the args to git.
622 A tuple of the output and return code.
624 (output
, return_code
) = RunGit(command
, cwd
=cwd
)
626 assert not return_code
, 'An error occurred while running'\
627 ' "git %s"' % ' '.join(command
)
631 def SetBuildSystemDefault(build_system
):
632 """Sets up any environment variables needed to build with the specified build
636 build_system: A string specifying build system. Currently only 'ninja' or
637 'make' are supported."""
638 if build_system
== 'ninja':
639 gyp_var
= os
.getenv('GYP_GENERATORS')
641 if not gyp_var
or not 'ninja' in gyp_var
:
643 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
645 os
.environ
['GYP_GENERATORS'] = 'ninja'
648 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
649 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
651 elif build_system
== 'make':
652 os
.environ
['GYP_GENERATORS'] = 'make'
654 raise RuntimeError('%s build not supported.' % build_system
)
657 def BuildWithMake(threads
, targets
, build_type
='Release'):
658 cmd
= ['make', 'BUILDTYPE=%s' % build_type
]
661 cmd
.append('-j%d' % threads
)
665 return_code
= RunProcess(cmd
)
667 return not return_code
670 def BuildWithNinja(threads
, targets
, build_type
='Release'):
671 cmd
= ['ninja', '-C', os
.path
.join('out', build_type
)]
674 cmd
.append('-j%d' % threads
)
678 return_code
= RunProcess(cmd
)
680 return not return_code
683 def BuildWithVisualStudio(targets
, build_type
='Release'):
684 path_to_devenv
= os
.path
.abspath(
685 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
686 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
687 cmd
= [path_to_devenv
, '/build', build_type
, path_to_sln
]
690 cmd
.extend(['/Project', t
])
692 return_code
= RunProcess(cmd
)
694 return not return_code
697 def WriteStringToFile(text
, file_name
):
699 with
open(file_name
, "wb") as f
:
702 raise RuntimeError('Error writing to file [%s]' % file_name
)
705 def ReadStringFromFile(file_name
):
707 with
open(file_name
) as f
:
710 raise RuntimeError('Error reading file [%s]' % file_name
)
713 def ChangeBackslashToSlashInPatch(diff_text
):
714 """Formats file paths in the given text to unix-style paths."""
716 diff_lines
= diff_text
.split('\n')
717 for i
in range(len(diff_lines
)):
718 if (diff_lines
[i
].startswith('--- ') or
719 diff_lines
[i
].startswith('+++ ')):
720 diff_lines
[i
] = diff_lines
[i
].replace('\\', '/')
721 return '\n'.join(diff_lines
)
725 class Builder(object):
726 """Builder is used by the bisect script to build relevant targets and deploy.
728 def __init__(self
, opts
):
729 """Performs setup for building with target build system.
732 opts: Options parsed from command line.
735 if not opts
.build_preference
:
736 opts
.build_preference
= 'msvs'
738 if opts
.build_preference
== 'msvs':
739 if not os
.getenv('VS100COMNTOOLS'):
741 'Path to visual studio could not be determined.')
743 SetBuildSystemDefault(opts
.build_preference
)
745 if not opts
.build_preference
:
746 if 'ninja' in os
.getenv('GYP_GENERATORS'):
747 opts
.build_preference
= 'ninja'
749 opts
.build_preference
= 'make'
751 SetBuildSystemDefault(opts
.build_preference
)
753 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
754 raise RuntimeError('Failed to set platform environment.')
759 if opts
.target_platform
== 'cros':
760 builder
= CrosBuilder(opts
)
761 elif opts
.target_platform
== 'android':
762 builder
= AndroidBuilder(opts
)
763 elif opts
.target_platform
== 'android-chrome':
764 builder
= AndroidChromeBuilder(opts
)
766 builder
= DesktopBuilder(opts
)
769 def Build(self
, depot
, opts
):
770 raise NotImplementedError()
772 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
773 raise NotImplementedError()
776 class DesktopBuilder(Builder
):
777 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
778 def __init__(self
, opts
):
779 super(DesktopBuilder
, self
).__init
__(opts
)
781 def Build(self
, depot
, opts
):
782 """Builds chromium_builder_perf target using options passed into
786 depot: Current depot being bisected.
787 opts: The options parsed from the command line.
790 True if build was successful.
792 targets
= ['chromium_builder_perf']
798 build_success
= False
799 if opts
.build_preference
== 'make':
800 build_success
= BuildWithMake(threads
, targets
, opts
.target_build_type
)
801 elif opts
.build_preference
== 'ninja':
802 build_success
= BuildWithNinja(threads
, targets
, opts
.target_build_type
)
803 elif opts
.build_preference
== 'msvs':
804 assert IsWindows(), 'msvs is only supported on Windows.'
805 build_success
= BuildWithVisualStudio(targets
, opts
.target_build_type
)
807 assert False, 'No build system defined.'
810 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
811 """Returns the path to the build directory, relative to the checkout root.
813 Assumes that the current working directory is the checkout root.
815 src_dir
= src_dir
or 'src'
816 if opts
.build_preference
== 'ninja' or IsLinux():
817 return os
.path
.join(src_dir
, 'out')
819 return os
.path
.join(src_dir
, 'xcodebuild')
821 return os
.path
.join(src_dir
, 'build')
822 raise NotImplementedError('Unexpected platform %s' % sys
.platform
)
825 class AndroidBuilder(Builder
):
826 """AndroidBuilder is used to build on android."""
827 def __init__(self
, opts
):
828 super(AndroidBuilder
, self
).__init
__(opts
)
830 def _GetTargets(self
):
831 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
833 def Build(self
, depot
, opts
):
834 """Builds the android content shell and other necessary tools using options
835 passed into the script.
838 depot: Current depot being bisected.
839 opts: The options parsed from the command line.
842 True if build was successful.
848 build_success
= False
849 if opts
.build_preference
== 'ninja':
850 build_success
= BuildWithNinja(
851 threads
, self
._GetTargets
(), opts
.target_build_type
)
853 assert False, 'No build system defined.'
858 class AndroidChromeBuilder(AndroidBuilder
):
859 """AndroidBuilder is used to build on android's chrome."""
860 def __init__(self
, opts
):
861 super(AndroidChromeBuilder
, self
).__init
__(opts
)
863 def _GetTargets(self
):
864 return AndroidBuilder
._GetTargets
(self
) + ['chrome_apk']
867 class CrosBuilder(Builder
):
868 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
870 def __init__(self
, opts
):
871 super(CrosBuilder
, self
).__init
__(opts
)
873 def ImageToTarget(self
, opts
):
874 """Installs latest image to target specified by opts.cros_remote_ip.
877 opts: Program options containing cros_board and cros_remote_ip.
883 # Keys will most likely be set to 0640 after wiping the chroot.
884 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
885 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
886 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
887 '--remote=%s' % opts
.cros_remote_ip
,
888 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
890 return_code
= RunProcess(cmd
)
891 return not return_code
895 def BuildPackages(self
, opts
, depot
):
896 """Builds packages for cros.
899 opts: Program options containing cros_board.
900 depot: The depot being bisected.
905 cmd
= [CROS_SDK_PATH
]
908 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
909 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
914 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
916 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, './build_packages',
917 '--board=%s' % opts
.cros_board
]
918 return_code
= RunProcess(cmd
)
920 return not return_code
922 def BuildImage(self
, opts
, depot
):
923 """Builds test image for cros.
926 opts: Program options containing cros_board.
927 depot: The depot being bisected.
932 cmd
= [CROS_SDK_PATH
]
935 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
936 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
941 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
943 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, '--', './build_image',
944 '--board=%s' % opts
.cros_board
, 'test']
946 return_code
= RunProcess(cmd
)
948 return not return_code
950 def Build(self
, depot
, opts
):
951 """Builds targets using options passed into the script.
954 depot: Current depot being bisected.
955 opts: The options parsed from the command line.
958 True if build was successful.
960 if self
.BuildPackages(opts
, depot
):
961 if self
.BuildImage(opts
, depot
):
962 return self
.ImageToTarget(opts
)
966 class SourceControl(object):
967 """SourceControl is an abstraction over the underlying source control
968 system used for chromium. For now only git is supported, but in the
969 future, the svn workflow could be added as well."""
971 super(SourceControl
, self
).__init
__()
973 def SyncToRevisionWithGClient(self
, revision
):
974 """Uses gclient to sync to the specified revision.
976 ie. gclient sync --revision <revision>
979 revision: The git SHA1 or svn CL (depending on workflow).
982 The return code of the call.
984 return bisect_utils
.RunGClient(['sync', '--revision',
985 revision
, '--verbose', '--nohooks', '--reset', '--force'])
987 def SyncToRevisionWithRepo(self
, timestamp
):
988 """Uses repo to sync all the underlying git depots to the specified
992 timestamp: The unix timestamp to sync to.
995 The return code of the call.
997 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
1000 class GitSourceControl(SourceControl
):
1001 """GitSourceControl is used to query the underlying source control. """
1002 def __init__(self
, opts
):
1003 super(GitSourceControl
, self
).__init
__()
1009 def GetRevisionList(self
, revision_range_end
, revision_range_start
, cwd
=None):
1010 """Retrieves a list of revisions between |revision_range_start| and
1011 |revision_range_end|.
1014 revision_range_end: The SHA1 for the end of the range.
1015 revision_range_start: The SHA1 for the beginning of the range.
1018 A list of the revisions between |revision_range_start| and
1019 |revision_range_end| (inclusive).
1021 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
1022 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
1023 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
1025 revision_hash_list
= log_output
.split()
1026 revision_hash_list
.append(revision_range_start
)
1028 return revision_hash_list
1030 def SyncToRevision(self
, revision
, sync_client
=None):
1031 """Syncs to the specified revision.
1034 revision: The revision to sync to.
1035 use_gclient: Specifies whether or not we should sync using gclient or
1036 just use source control directly.
1043 results
= RunGit(['checkout', revision
])[1]
1044 elif sync_client
== 'gclient':
1045 results
= self
.SyncToRevisionWithGClient(revision
)
1046 elif sync_client
== 'repo':
1047 results
= self
.SyncToRevisionWithRepo(revision
)
1051 def ResolveToRevision(self
, revision_to_check
, depot
, search
, cwd
=None):
1052 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1055 revision_to_check: The user supplied revision string that may need to be
1056 resolved to a git SHA1.
1057 depot: The depot the revision_to_check is from.
1058 search: The number of changelists to try if the first fails to resolve
1059 to a git hash. If the value is negative, the function will search
1060 backwards chronologically, otherwise it will search forward.
1063 A string containing a git SHA1 hash, otherwise None.
1065 # Android-chrome is git only, so no need to resolve this to anything else.
1066 if depot
== 'android-chrome':
1067 return revision_to_check
1070 if not IsStringInt(revision_to_check
):
1071 return revision_to_check
1073 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
1075 if depot
!= 'chromium':
1076 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
1078 svn_revision
= int(revision_to_check
)
1082 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
1084 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
1086 for i
in search_range
:
1087 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
1088 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
1091 (log_output
, return_code
) = RunGit(cmd
, cwd
=cwd
)
1093 assert not return_code
, 'An error occurred while running'\
1094 ' "git %s"' % ' '.join(cmd
)
1097 log_output
= log_output
.strip()
1100 git_revision
= log_output
1106 if IsStringInt(revision_to_check
):
1107 return int(revision_to_check
)
1110 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
1111 'chromiumos-overlay'))
1112 pattern
= CROS_VERSION_PATTERN
% revision_to_check
1113 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
1117 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
1119 git_revision
= log_output
1120 git_revision
= int(log_output
.strip())
1125 def IsInProperBranch(self
):
1126 """Confirms they're in the master branch for performing the bisection.
1127 This is needed or gclient will fail to sync properly.
1130 True if the current branch on src is 'master'
1132 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
1133 log_output
= CheckRunGit(cmd
)
1134 log_output
= log_output
.strip()
1136 return log_output
== "master"
1138 def SVNFindRev(self
, revision
, cwd
=None):
1139 """Maps directly to the 'git svn find-rev' command.
1142 revision: The git SHA1 to use.
1145 An integer changelist #, otherwise None.
1148 cmd
= ['svn', 'find-rev', revision
]
1150 output
= CheckRunGit(cmd
, cwd
)
1151 svn_revision
= output
.strip()
1153 if IsStringInt(svn_revision
):
1154 return int(svn_revision
)
1158 def QueryRevisionInfo(self
, revision
, cwd
=None):
1159 """Gathers information on a particular revision, such as author's name,
1160 email, subject, and date.
1163 revision: Revision you want to gather information on.
1165 A dict in the following format:
1176 formats
= ['%cN', '%cE', '%s', '%cD', '%b']
1177 targets
= ['author', 'email', 'subject', 'date', 'body']
1179 for i
in xrange(len(formats
)):
1180 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
1181 output
= CheckRunGit(cmd
, cwd
=cwd
)
1182 commit_info
[targets
[i
]] = output
.rstrip()
1186 def CheckoutFileAtRevision(self
, file_name
, revision
, cwd
=None):
1187 """Performs a checkout on a file at the given revision.
1192 return not RunGit(['checkout', revision
, file_name
], cwd
=cwd
)[1]
1194 def RevertFileToHead(self
, file_name
):
1195 """Unstages a file and returns it to HEAD.
1200 # Reset doesn't seem to return 0 on success.
1201 RunGit(['reset', 'HEAD', file_name
])
1203 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
1205 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
1206 """Returns a list of commits that modified this file.
1209 filename: Name of file.
1210 revision_start: Start of revision range.
1211 revision_end: End of revision range.
1214 Returns a list of commits that touched this file.
1216 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
1218 output
= CheckRunGit(cmd
)
1220 return [o
for o
in output
.split('\n') if o
]
1223 class BisectPerformanceMetrics(object):
1224 """This class contains functionality to perform a bisection of a range of
1225 revisions to narrow down where performance regressions may have occurred.
1227 The main entry-point is the Run method.
1230 def __init__(self
, source_control
, opts
):
1231 super(BisectPerformanceMetrics
, self
).__init
__()
1234 self
.source_control
= source_control
1235 self
.src_cwd
= os
.getcwd()
1236 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
1238 self
.cleanup_commands
= []
1240 self
.builder
= Builder
.FromOpts(opts
)
1242 # This always starts true since the script grabs latest first.
1243 self
.was_blink
= True
1245 for d
in DEPOT_NAMES
:
1246 # The working directory of each depot is just the path to the depot, but
1247 # since we're already in 'src', we can skip that part.
1249 self
.depot_cwd
[d
] = os
.path
.join(
1250 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1252 def PerformCleanup(self
):
1253 """Performs cleanup when script is finished."""
1254 os
.chdir(self
.src_cwd
)
1255 for c
in self
.cleanup_commands
:
1257 shutil
.move(c
[1], c
[2])
1259 assert False, 'Invalid cleanup command.'
1261 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
1262 """Retrieves a list of all the commits between the bad revision and
1263 last known good revision."""
1265 revision_work_list
= []
1268 revision_range_start
= good_revision
1269 revision_range_end
= bad_revision
1272 self
.ChangeToDepotWorkingDirectory('cros')
1274 # Print the commit timestamps for every commit in the revision time
1275 # range. We'll sort them and bisect by that. There is a remote chance that
1276 # 2 (or more) commits will share the exact same timestamp, but it's
1277 # probably safe to ignore that case.
1278 cmd
= ['repo', 'forall', '-c',
1279 'git log --format=%%ct --before=%d --after=%d' % (
1280 revision_range_end
, revision_range_start
)]
1281 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1283 assert not return_code
, 'An error occurred while running'\
1284 ' "%s"' % ' '.join(cmd
)
1288 revision_work_list
= list(set(
1289 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
1290 revision_work_list
= sorted(revision_work_list
, reverse
=True)
1292 cwd
= self
._GetDepotDirectory
(depot
)
1293 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
1294 good_revision
, cwd
=cwd
)
1296 return revision_work_list
1298 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
1299 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1301 if IsStringInt(svn_revision
):
1302 # V8 is tricky to bisect, in that there are only a few instances when
1303 # we can dive into bleeding_edge and get back a meaningful result.
1304 # Try to detect a V8 "business as usual" case, which is when:
1305 # 1. trunk revision N has description "Version X.Y.Z"
1306 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1307 # trunk. Now working on X.Y.(Z+1)."
1309 # As of 01/24/2014, V8 trunk descriptions are formatted:
1310 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1311 # So we can just try parsing that out first and fall back to the old way.
1312 v8_dir
= self
._GetDepotDirectory
('v8')
1313 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
1315 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
1318 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
1320 regex_results
= version_re
.search(revision_info
['subject'])
1325 # Look for "based on bleeding_edge" and parse out revision
1326 if 'based on bleeding_edge' in revision_info
['subject']:
1328 bleeding_edge_revision
= revision_info
['subject'].split(
1329 'bleeding_edge revision r')[1]
1330 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1331 git_revision
= self
.source_control
.ResolveToRevision(
1332 bleeding_edge_revision
, 'v8_bleeding_edge', 1,
1333 cwd
=v8_bleeding_edge_dir
)
1335 except (IndexError, ValueError):
1338 if not git_revision
:
1339 # Wasn't successful, try the old way of looking for "Prepare push to"
1340 git_revision
= self
.source_control
.ResolveToRevision(
1341 int(svn_revision
) - 1, 'v8_bleeding_edge', -1,
1342 cwd
=v8_bleeding_edge_dir
)
1345 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1346 cwd
=v8_bleeding_edge_dir
)
1348 if 'Prepare push to trunk' in revision_info
['subject']:
1352 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1353 cwd
= self
._GetDepotDirectory
('v8')
1354 cmd
= ['log', '--format=%ct', '-1', revision
]
1355 output
= CheckRunGit(cmd
, cwd
=cwd
)
1356 commit_time
= int(output
)
1360 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1362 output
= CheckRunGit(cmd
, cwd
=cwd
)
1363 output
= output
.split()
1365 commits
= reversed(commits
)
1367 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1369 output
= CheckRunGit(cmd
, cwd
=cwd
)
1370 output
= output
.split()
1373 bleeding_edge_revision
= None
1376 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1377 if bleeding_edge_revision
:
1380 return bleeding_edge_revision
1382 def _ParseRevisionsFromDEPSFileManually(self
, deps_file_contents
):
1383 """Manually parses the vars section of the DEPS file to determine
1384 chromium/blink/etc... revisions.
1387 A dict in the format {depot:revision} if successful, otherwise None.
1389 # We'll parse the "vars" section of the DEPS file.
1390 rxp
= re
.compile('vars = {(?P<vars_body>[^}]+)', re
.MULTILINE
)
1391 re_results
= rxp
.search(deps_file_contents
)
1397 # We should be left with a series of entries in the vars component of
1398 # the DEPS file with the following format:
1399 # 'depot_name': 'revision',
1400 vars_body
= re_results
.group('vars_body')
1401 rxp
= re
.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1403 re_results
= rxp
.findall(vars_body
)
1405 return dict(re_results
)
1407 def _ParseRevisionsFromDEPSFile(self
, depot
):
1408 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1409 be needed if the bisect recurses into those depots later.
1412 depot: Depot being bisected.
1415 A dict in the format {depot:revision} if successful, otherwise None.
1418 deps_data
= {'Var': lambda _
: deps_data
["vars"][_
],
1419 'From': lambda *args
: None
1421 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, deps_data
)
1422 deps_data
= deps_data
['deps']
1424 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1426 for depot_name
, depot_data
in DEPOT_DEPS_NAME
.iteritems():
1427 if (depot_data
.get('platform') and
1428 depot_data
.get('platform') != os
.name
):
1431 if (depot_data
.get('recurse') and depot
in depot_data
.get('from')):
1432 src_dir
= depot_data
.get('src') or depot_data
.get('src_old')
1434 self
.depot_cwd
[depot_name
] = os
.path
.join(self
.src_cwd
, src_dir
[4:])
1435 re_results
= rxp
.search(deps_data
.get(src_dir
, ''))
1437 results
[depot_name
] = re_results
.group('revision')
1439 warning_text
= ('Couldn\'t parse revision for %s while bisecting '
1440 '%s' % (depot_name
, depot
))
1441 if not warning_text
in self
.warnings
:
1442 self
.warnings
.append(warning_text
)
1445 deps_file_contents
= ReadStringFromFile(bisect_utils
.FILE_DEPS_GIT
)
1446 parse_results
= self
._ParseRevisionsFromDEPSFileManually
(
1449 for depot_name
, depot_revision
in parse_results
.iteritems():
1450 depot_revision
= depot_revision
.strip('@')
1451 print depot_name
, depot_revision
1452 for current_name
, current_data
in DEPOT_DEPS_NAME
.iteritems():
1453 if (current_data
.has_key('deps_var') and
1454 current_data
['deps_var'] == depot_name
):
1455 src_name
= current_name
1456 results
[src_name
] = depot_revision
1460 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
, revision
):
1461 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1464 A dict in the format {depot:revision} if successful, otherwise None.
1467 self
.ChangeToDepotWorkingDirectory(depot
)
1471 if depot
== 'chromium' or depot
== 'android-chrome':
1472 results
= self
._ParseRevisionsFromDEPSFile
(depot
)
1474 elif depot
== 'cros':
1475 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1476 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1477 CROS_CHROMEOS_PATTERN
]
1478 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1480 assert not return_code
, 'An error occurred while running' \
1481 ' "%s"' % ' '.join(cmd
)
1483 if len(output
) > CROS_CHROMEOS_PATTERN
:
1484 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1487 output
= output
.split('_')[0]
1490 contents
= output
.split('.')
1492 version
= contents
[2]
1494 if contents
[3] != '0':
1495 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1496 (version
, contents
[3], version
)
1497 if not warningText
in self
.warnings
:
1498 self
.warnings
.append(warningText
)
1501 self
.ChangeToDepotWorkingDirectory('chromium')
1502 return_code
= CheckRunGit(['log', '-1', '--format=%H',
1503 '--author=chrome-release@google.com', '--grep=to %s' % version
,
1507 results
['chromium'] = output
.strip()
1509 # We can't try to map the trunk revision to bleeding edge yet, because
1510 # we don't know which direction to try to search in. Have to wait until
1511 # the bisect has narrowed the results down to 2 v8 rolls.
1512 results
['v8_bleeding_edge'] = None
1516 def BackupOrRestoreOutputdirectory(self
, restore
=False, build_type
='Release'):
1517 """Backs up or restores build output directory based on restore argument.
1520 restore: Indicates whether to restore or backup. Default is False(Backup)
1521 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1524 Path to backup or restored location as string. otherwise None if it fails.
1526 build_dir
= os
.path
.abspath(
1527 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1528 source_dir
= os
.path
.join(build_dir
, build_type
)
1529 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1531 source_dir
, destination_dir
= destination_dir
, source_dir
1532 if os
.path
.exists(source_dir
):
1533 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1534 shutil
.move(source_dir
, destination_dir
)
1535 return destination_dir
1538 def DownloadCurrentBuild(self
, revision
, build_type
='Release', patch
=None):
1539 """Downloads the build archive for the given revision.
1542 revision: The SVN revision to build.
1543 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1546 True if download succeeds, otherwise False.
1550 # Get the SHA of the DEPS changes patch.
1551 patch_sha
= GetSHA1HexDigest(patch
)
1553 # Update the DEPS changes patch with a patch to create a new file named
1554 # 'DEPS.sha' and add patch_sha evaluated above to it.
1555 patch
= '%s\n%s' % (patch
, DEPS_SHA_PATCH
% {'deps_sha': patch_sha
})
1557 # Source archive file path on cloud storage.
1558 source_file
= GetRemoteBuildPath(revision
, self
.opts
.target_arch
, patch_sha
)
1560 # Get Build output directory
1561 abs_build_dir
= os
.path
.abspath(
1562 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1563 # Downloaded archive file path.
1564 downloaded_file
= os
.path
.join(
1566 GetZipFileName(revision
, self
.opts
.target_arch
, patch_sha
))
1568 fetch_build_func
= lambda: FetchFromCloudStorage(self
.opts
.gs_bucket
,
1572 if not fetch_build_func():
1573 if not self
.PostBuildRequestAndWait(revision
,
1574 condition
=fetch_build_func
,
1576 raise RuntimeError('Somewthing went wrong while processing build'
1577 'request for: %s' % revision
)
1578 # Generic name for the archive, created when archive file is extracted.
1579 output_dir
= os
.path
.join(
1580 abs_build_dir
, GetZipFileName(target_arch
=self
.opts
.target_arch
))
1581 # Unzip build archive directory.
1583 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1584 ExtractZip(downloaded_file
, abs_build_dir
)
1585 if os
.path
.exists(output_dir
):
1586 self
.BackupOrRestoreOutputdirectory(restore
=False)
1587 # Build output directory based on target(e.g. out/Release, out/Debug).
1588 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1589 print 'Moving build from %s to %s' % (
1590 output_dir
, target_build_output_dir
)
1591 shutil
.move(output_dir
, target_build_output_dir
)
1593 raise IOError('Missing extracted folder %s ' % output_dir
)
1594 except Exception as e
:
1595 print 'Somewthing went wrong while extracting archive file: %s' % e
1596 self
.BackupOrRestoreOutputdirectory(restore
=True)
1597 # Cleanup any leftovers from unzipping.
1598 if os
.path
.exists(output_dir
):
1599 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1601 # Delete downloaded archive
1602 if os
.path
.exists(downloaded_file
):
1603 os
.remove(downloaded_file
)
1606 def PostBuildRequestAndWait(self
, revision
, condition
, patch
=None):
1607 """POSTs the build request job to the tryserver instance."""
1609 def GetBuilderNameAndBuildTime(target_arch
='ia32'):
1610 """Gets builder bot name and buildtime in seconds based on platform."""
1611 # Bot names should match the one listed in tryserver.chromium's
1612 # master.cfg which produces builds for bisect.
1614 if Is64BitWindows() and target_arch
== 'x64':
1615 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1616 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1618 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1620 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME
)
1621 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
1625 bot_name
, build_timeout
= GetBuilderNameAndBuildTime(self
.opts
.target_arch
)
1627 # Create a unique ID for each build request posted to try server builders.
1628 # This ID is added to "Reason" property in build's json.
1629 # TODO: Use this id to track the build status.
1630 build_request_id
= GetSHA1HexDigest('%s-%s' % (revision
, patch
))
1632 # Creates a try job description.
1633 job_args
= {'host': self
.opts
.builder_host
,
1634 'port': self
.opts
.builder_port
,
1635 'revision': 'src@%s' % revision
,
1637 'name': build_request_id
1639 # Update patch information if supplied.
1641 job_args
['patch'] = patch
1642 # Posts job to build the revision on the server.
1643 if post_perf_builder_job
.PostTryJob(job_args
):
1645 start_time
= time
.time()
1650 elapsed_time
= time
.time() - start_time
1651 if elapsed_time
> build_timeout
:
1652 raise RuntimeError('Timed out while waiting %ds for %s build.' %
1653 (build_timeout
, revision
))
1654 print ('Time elapsed: %ss, still waiting for %s build' %
1655 (elapsed_time
, revision
))
1656 time
.sleep(poll_interval
)
1659 def IsDownloadable(self
, depot
):
1660 """Checks if build is downloadable based on target platform and depot."""
1661 if self
.opts
.target_platform
in ['chromium'] and self
.opts
.gs_bucket
:
1662 return (depot
== 'chromium' or
1663 'chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1664 'v8' in DEPOT_DEPS_NAME
[depot
]['from'])
1667 def UpdateDeps(self
, revision
, depot
, deps_file
):
1668 """Updates DEPS file with new revision of dependency repository.
1670 This method search DEPS for a particular pattern in which depot revision
1671 is specified (e.g "webkit_revision": "123456"). If a match is found then
1672 it resolves the given git hash to SVN revision and replace it in DEPS file.
1675 revision: A git hash revision of the dependency repository.
1676 depot: Current depot being bisected.
1677 deps_file: Path to DEPS file.
1680 True if DEPS file is modified successfully, otherwise False.
1682 if not os
.path
.exists(deps_file
):
1685 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1686 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1688 print 'DEPS update not supported for Depot: %s', depot
1691 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1692 # contains "angle_revision" key that holds git hash instead of SVN revision.
1693 # And sometime "angle_revision" key is not specified in "vars" variable,
1694 # in such cases check "deps" dictionary variable that matches
1695 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1696 if depot
== 'angle':
1697 return self
.UpdateDEPSForAngle(revision
, depot
, deps_file
)
1700 deps_contents
= ReadStringFromFile(deps_file
)
1701 # Check whether the depot and revision pattern in DEPS file vars
1702 # e.g. for webkit the format is "webkit_revision": "12345".
1703 deps_revision
= re
.compile(r
'(?<="%s": ")([0-9]+)(?=")' % deps_var
,
1705 match
= re
.search(deps_revision
, deps_contents
)
1707 svn_revision
= self
.source_control
.SVNFindRev(
1708 revision
, self
._GetDepotDirectory
(depot
))
1709 if not svn_revision
:
1710 print 'Could not determine SVN revision for %s' % revision
1712 # Update the revision information for the given depot
1713 new_data
= re
.sub(deps_revision
, str(svn_revision
), deps_contents
)
1715 # For v8_bleeding_edge revisions change V8 branch in order
1716 # to fetch bleeding edge revision.
1717 if depot
== 'v8_bleeding_edge':
1718 new_data
= self
.UpdateV8Branch(new_data
)
1721 # Write changes to DEPS file
1722 WriteStringToFile(new_data
, deps_file
)
1725 print 'Something went wrong while updating DEPS file. [%s]' % e
1728 def UpdateV8Branch(self
, deps_content
):
1729 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1731 Check for "v8_branch" in DEPS file if exists update its value
1732 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1733 variable from DEPS revision 254916, therefore check for "src/v8":
1734 <v8 source path> in DEPS in order to support prior DEPS revisions
1738 deps_content: DEPS file contents to be modified.
1741 Modified DEPS file contents as a string.
1743 new_branch
= r
'branches/bleeding_edge'
1744 v8_branch_pattern
= re
.compile(r
'(?<="v8_branch": ")(.*)(?=")')
1745 if re
.search(v8_branch_pattern
, deps_content
):
1746 deps_content
= re
.sub(v8_branch_pattern
, new_branch
, deps_content
)
1748 # Replaces the branch assigned to "src/v8" key in DEPS file.
1749 # Format of "src/v8" in DEPS:
1751 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1752 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1753 v8_src_pattern
= re
.compile(
1754 r
'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re
.MULTILINE
)
1755 if re
.search(v8_src_pattern
, deps_content
):
1756 deps_content
= re
.sub(v8_src_pattern
, new_branch
, deps_content
)
1759 def UpdateDEPSForAngle(self
, revision
, depot
, deps_file
):
1760 """Updates DEPS file with new revision for Angle repository.
1762 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1763 variable contains "angle_revision" key that holds git hash instead of
1766 And sometimes "angle_revision" key is not specified in "vars" variable,
1767 in such cases check "deps" dictionary variable that matches
1768 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1770 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1772 deps_contents
= ReadStringFromFile(deps_file
)
1773 # Check whether the depot and revision pattern in DEPS file vars variable
1774 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1775 angle_rev_pattern
= re
.compile(r
'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1776 deps_var
, re
.MULTILINE
)
1777 match
= re
.search(angle_rev_pattern
% deps_var
, deps_contents
)
1779 # Update the revision information for the given depot
1780 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
1782 # Check whether the depot and revision pattern in DEPS file deps
1784 # "src/third_party/angle": Var("chromium_git") +
1785 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1786 angle_rev_pattern
= re
.compile(
1787 r
'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re
.MULTILINE
)
1788 match
= re
.search(angle_rev_pattern
, deps_contents
)
1790 print 'Could not find angle revision information in DEPS file.'
1792 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
1793 # Write changes to DEPS file
1794 WriteStringToFile(new_data
, deps_file
)
1797 print 'Something went wrong while updating DEPS file, %s' % e
1800 def CreateDEPSPatch(self
, depot
, revision
):
1801 """Modifies DEPS and returns diff as text.
1804 depot: Current depot being bisected.
1805 revision: A git hash revision of the dependency repository.
1808 A tuple with git hash of chromium revision and DEPS patch text.
1810 deps_file_path
= os
.path
.join(self
.src_cwd
, bisect_utils
.FILE_DEPS
)
1811 if not os
.path
.exists(deps_file_path
):
1812 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path
)
1813 # Get current chromium revision (git hash).
1814 chromium_sha
= CheckRunGit(['rev-parse', 'HEAD']).strip()
1815 if not chromium_sha
:
1816 raise RuntimeError('Failed to determine Chromium revision for %s' %
1818 if ('chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1819 'v8' in DEPOT_DEPS_NAME
[depot
]['from']):
1820 # Checkout DEPS file for the current chromium revision.
1821 if self
.source_control
.CheckoutFileAtRevision(bisect_utils
.FILE_DEPS
,
1824 if self
.UpdateDeps(revision
, depot
, deps_file_path
):
1825 diff_command
= ['diff',
1826 '--src-prefix=src/',
1827 '--dst-prefix=src/',
1829 bisect_utils
.FILE_DEPS
]
1830 diff_text
= CheckRunGit(diff_command
, cwd
=self
.src_cwd
)
1831 return (chromium_sha
, ChangeBackslashToSlashInPatch(diff_text
))
1833 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1836 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1840 def BuildCurrentRevision(self
, depot
, revision
=None):
1841 """Builds chrome and performance_ui_tests on the current revision.
1844 True if the build was successful.
1846 if self
.opts
.debug_ignore_build
:
1849 os
.chdir(self
.src_cwd
)
1850 # Fetch build archive for the given revision from the cloud storage when
1851 # the storage bucket is passed.
1852 if self
.IsDownloadable(depot
) and revision
:
1854 if depot
!= 'chromium':
1855 # Create a DEPS patch with new revision for dependency repository.
1856 (revision
, deps_patch
) = self
.CreateDEPSPatch(depot
, revision
)
1857 # Get SVN revision for the given SHA, since builds are archived using SVN
1859 chromium_revision
= self
.source_control
.SVNFindRev(revision
)
1860 if not chromium_revision
:
1862 'Failed to determine SVN revision for %s' % revision
)
1863 if self
.DownloadCurrentBuild(chromium_revision
, patch
=deps_patch
):
1866 # Reverts the changes to DEPS file.
1867 self
.source_control
.CheckoutFileAtRevision(bisect_utils
.FILE_DEPS
,
1871 raise RuntimeError('Failed to download build archive for revision %s.\n'
1872 'Unfortunately, bisection couldn\'t continue any '
1873 'further. Please try running script without '
1874 '--gs_bucket flag to produce local builds.' % revision
)
1877 build_success
= self
.builder
.Build(depot
, self
.opts
)
1879 return build_success
1881 def RunGClientHooks(self
):
1882 """Runs gclient with runhooks command.
1885 True if gclient reports no errors.
1888 if self
.opts
.debug_ignore_build
:
1891 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
1893 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
1894 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1897 metric: The metric as a list of [<trace>, <value>] strings.
1898 text: The text to parse the metric values from.
1901 A list of floating point numbers found.
1903 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
1905 text_lines
= text
.split('\n')
1908 for current_line
in text_lines
:
1909 if metric_formatted
in current_line
:
1910 current_line
= current_line
[len(metric_formatted
):]
1913 histogram_values
= eval(current_line
)
1915 for b
in histogram_values
['buckets']:
1916 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
1917 # Extends the list with N-elements with the average for that bucket.
1918 values_list
.extend([average_for_bucket
] * b
['count'])
1924 def TryParseResultValuesFromOutput(self
, metric
, text
):
1925 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
1928 metric: The metric as a list of [<trace>, <value>] strings.
1929 text: The text to parse the metric values from.
1932 A list of floating point numbers found.
1934 # Format is: RESULT <graph>: <trace>= <value> <units>
1935 metric_re
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
1937 # The log will be parsed looking for format:
1938 # <*>RESULT <graph_name>: <trace_name>= <value>
1939 single_result_re
= re
.compile(
1940 metric_re
+ '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
1942 # The log will be parsed looking for format:
1943 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
1944 multi_results_re
= re
.compile(
1945 metric_re
+ '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
1947 # The log will be parsed looking for format:
1948 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
1949 mean_stddev_re
= re
.compile(
1951 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
1953 text_lines
= text
.split('\n')
1955 for current_line
in text_lines
:
1956 # Parse the output from the performance test for the metric we're
1958 single_result_match
= single_result_re
.search(current_line
)
1959 multi_results_match
= multi_results_re
.search(current_line
)
1960 mean_stddev_match
= mean_stddev_re
.search(current_line
)
1961 if (not single_result_match
is None and
1962 single_result_match
.group('VALUE')):
1963 values_list
+= [single_result_match
.group('VALUE')]
1964 elif (not multi_results_match
is None and
1965 multi_results_match
.group('VALUES')):
1966 metric_values
= multi_results_match
.group('VALUES')
1967 values_list
+= metric_values
.split(',')
1968 elif (not mean_stddev_match
is None and
1969 mean_stddev_match
.group('MEAN')):
1970 values_list
+= [mean_stddev_match
.group('MEAN')]
1972 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
1974 # If the metric is times/t, we need to sum the timings in order to get
1975 # similar regression results as the try-bots.
1976 metrics_to_sum
= [['times', 't'], ['times', 'page_load_time'],
1977 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1979 if metric
in metrics_to_sum
:
1981 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
1985 def ParseMetricValuesFromOutput(self
, metric
, text
):
1986 """Parses output from performance_ui_tests and retrieves the results for
1990 metric: The metric as a list of [<trace>, <value>] strings.
1991 text: The text to parse the metric values from.
1994 A list of floating point numbers found.
1996 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
1998 if not metric_values
:
1999 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
2001 return metric_values
2003 def _GenerateProfileIfNecessary(self
, command_args
):
2004 """Checks the command line of the performance test for dependencies on
2005 profile generation, and runs tools/perf/generate_profile as necessary.
2008 command_args: Command line being passed to performance test, as a list.
2011 False if profile generation was necessary and failed, otherwise True.
2014 if '--profile-dir' in ' '.join(command_args
):
2015 # If we were using python 2.7+, we could just use the argparse
2016 # module's parse_known_args to grab --profile-dir. Since some of the
2017 # bots still run 2.6, have to grab the arguments manually.
2019 args_to_parse
= ['--profile-dir', '--browser']
2021 for arg_to_parse
in args_to_parse
:
2022 for i
, current_arg
in enumerate(command_args
):
2023 if arg_to_parse
in current_arg
:
2024 current_arg_split
= current_arg
.split('=')
2026 # Check 2 cases, --arg=<val> and --arg <val>
2027 if len(current_arg_split
) == 2:
2028 arg_dict
[arg_to_parse
] = current_arg_split
[1]
2029 elif i
+ 1 < len(command_args
):
2030 arg_dict
[arg_to_parse
] = command_args
[i
+1]
2032 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
2034 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
2035 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
2036 return not RunProcess(['python', path_to_generate
,
2037 '--profile-type-to-generate', profile_type
,
2038 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
2042 def _IsBisectModeUsingMetric(self
):
2043 return self
.opts
.bisect_mode
in [BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
]
2045 def _IsBisectModeReturnCode(self
):
2046 return self
.opts
.bisect_mode
in [BISECT_MODE_RETURN_CODE
]
2048 def _IsBisectModeStandardDeviation(self
):
2049 return self
.opts
.bisect_mode
in [BISECT_MODE_STD_DEV
]
2051 def RunPerformanceTestAndParseResults(
2052 self
, command_to_run
, metric
, reset_on_first_run
=False,
2053 upload_on_last_run
=False, results_label
=None):
2054 """Runs a performance test on the current revision and parses the results.
2057 command_to_run: The command to be run to execute the performance test.
2058 metric: The metric to parse out from the results of the performance test.
2059 This is the result chart name and trace name, separated by slash.
2060 reset_on_first_run: If True, pass the flag --reset-results on first run.
2061 upload_on_last_run: If True, pass the flag --upload-results on last run.
2062 results_label: A value for the option flag --results-label.
2063 The arguments reset_on_first_run, upload_on_last_run and results_label
2064 are all ignored if the test is not a Telemetry test.
2067 (values dict, 0) if --debug_ignore_perf_test was passed.
2068 (values dict, 0, test output) if the test was run successfully.
2069 (error message, -1) if the test couldn't be run.
2070 (error message, -1, test output) if the test ran but there was an error.
2072 success_code
, failure_code
= 0, -1
2074 if self
.opts
.debug_ignore_perf_test
:
2081 return (fake_results
, success_code
)
2083 # For Windows platform set posix=False, to parse windows paths correctly.
2084 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2085 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2086 args
= shlex
.split(command_to_run
, posix
=not IsWindows())
2088 if not self
._GenerateProfileIfNecessary
(args
):
2089 err_text
= 'Failed to generate profile for performance test.'
2090 return (err_text
, failure_code
)
2092 # If running a Telemetry test for Chrome OS, insert the remote IP and
2093 # identity parameters.
2094 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
2095 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
2096 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
2097 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
2099 start_time
= time
.time()
2102 output_of_all_runs
= ''
2103 for i
in xrange(self
.opts
.repeat_test_count
):
2104 # Can ignore the return code since if the tests fail, it won't return 0.
2105 current_args
= copy
.copy(args
)
2107 if i
== 0 and reset_on_first_run
:
2108 current_args
.append('--reset-results')
2109 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
2110 current_args
.append('--upload-results')
2112 current_args
.append('--results-label=%s' % results_label
)
2114 (output
, return_code
) = RunProcessAndRetrieveOutput(current_args
,
2117 if e
.errno
== errno
.ENOENT
:
2118 err_text
= ('Something went wrong running the performance test. '
2119 'Please review the command line:\n\n')
2120 if 'src/' in ' '.join(args
):
2121 err_text
+= ('Check that you haven\'t accidentally specified a '
2122 'path with src/ in the command.\n\n')
2123 err_text
+= ' '.join(args
)
2126 return (err_text
, failure_code
)
2129 output_of_all_runs
+= output
2130 if self
.opts
.output_buildbot_annotations
:
2133 if self
._IsBisectModeUsingMetric
():
2134 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
2135 # If we're bisecting on a metric (ie, changes in the mean or
2136 # standard deviation) and no metric values are produced, bail out.
2137 if not metric_values
:
2139 elif self
._IsBisectModeReturnCode
():
2140 metric_values
.append(return_code
)
2142 elapsed_minutes
= (time
.time() - start_time
) / 60.0
2143 if elapsed_minutes
>= self
.opts
.max_time_minutes
:
2146 if len(metric_values
) == 0:
2147 err_text
= 'Metric %s was not found in the test output.' % metric
2148 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2149 # that were found in the output here.
2150 return (err_text
, failure_code
, output_of_all_runs
)
2152 # If we're bisecting on return codes, we're really just looking for zero vs
2154 if self
._IsBisectModeReturnCode
():
2155 # If any of the return codes is non-zero, output 1.
2156 overall_return_code
= 0 if (
2157 all(current_value
== 0 for current_value
in metric_values
)) else 1
2160 'mean': overall_return_code
,
2163 'values': metric_values
,
2166 print 'Results of performance test: Command returned with %d' % (
2167 overall_return_code
)
2170 # Need to get the average value if there were multiple values.
2171 truncated_mean
= CalculateTruncatedMean(metric_values
,
2172 self
.opts
.truncate_percent
)
2173 standard_err
= CalculateStandardError(metric_values
)
2174 standard_dev
= CalculateStandardDeviation(metric_values
)
2176 if self
._IsBisectModeStandardDeviation
():
2177 metric_values
= [standard_dev
]
2180 'mean': truncated_mean
,
2181 'std_err': standard_err
,
2182 'std_dev': standard_dev
,
2183 'values': metric_values
,
2186 print 'Results of performance test: %12f %12f' % (
2187 truncated_mean
, standard_err
)
2189 return (values
, success_code
, output_of_all_runs
)
2191 def FindAllRevisionsToSync(self
, revision
, depot
):
2192 """Finds all dependant revisions and depots that need to be synced for a
2193 given revision. This is only useful in the git workflow, as an svn depot
2194 may be split into multiple mirrors.
2196 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2197 skia/include. To sync skia/src properly, one has to find the proper
2198 revisions in skia/gyp and skia/include.
2201 revision: The revision to sync to.
2202 depot: The depot in use at the moment (probably skia).
2205 A list of [depot, revision] pairs that need to be synced.
2207 revisions_to_sync
= [[depot
, revision
]]
2209 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
2210 (depot
== 'android-chrome'))
2212 # Some SVN depots were split into multiple git depots, so we need to
2213 # figure out for each mirror which git revision to grab. There's no
2214 # guarantee that the SVN revision will exist for each of the dependant
2215 # depots, so we have to grep the git logs and grab the next earlier one.
2217 DEPOT_DEPS_NAME
[depot
]['depends'] and\
2218 self
.source_control
.IsGit():
2219 svn_rev
= self
.source_control
.SVNFindRev(revision
)
2221 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
2222 self
.ChangeToDepotWorkingDirectory(d
)
2224 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
2227 revisions_to_sync
.append([d
, dependant_rev
])
2229 num_resolved
= len(revisions_to_sync
)
2230 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
2232 self
.ChangeToDepotWorkingDirectory(depot
)
2234 if not ((num_resolved
- 1) == num_needed
):
2237 return revisions_to_sync
2239 def PerformPreBuildCleanup(self
):
2240 """Performs necessary cleanup between runs."""
2241 print 'Cleaning up between runs.'
2244 # Having these pyc files around between runs can confuse the
2245 # perf tests and cause them to crash.
2246 for (path
, _
, files
) in os
.walk(self
.src_cwd
):
2247 for cur_file
in files
:
2248 if cur_file
.endswith('.pyc'):
2249 path_to_file
= os
.path
.join(path
, cur_file
)
2250 os
.remove(path_to_file
)
2252 def PerformWebkitDirectoryCleanup(self
, revision
):
2253 """If the script is switching between Blink and WebKit during bisect,
2254 its faster to just delete the directory rather than leave it up to git
2260 if not self
.source_control
.CheckoutFileAtRevision(
2261 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
2265 os
.chdir(self
.src_cwd
)
2267 is_blink
= bisect_utils
.IsDepsFileBlink()
2271 if not self
.source_control
.RevertFileToHead(
2272 bisect_utils
.FILE_DEPS_GIT
):
2275 if self
.was_blink
!= is_blink
:
2276 self
.was_blink
= is_blink
2277 return bisect_utils
.RemoveThirdPartyWebkitDirectory()
2280 def PerformCrosChrootCleanup(self
):
2281 """Deletes the chroot.
2287 self
.ChangeToDepotWorkingDirectory('cros')
2288 cmd
= [CROS_SDK_PATH
, '--delete']
2289 return_code
= RunProcess(cmd
)
2291 return not return_code
2293 def CreateCrosChroot(self
):
2294 """Creates a new chroot.
2300 self
.ChangeToDepotWorkingDirectory('cros')
2301 cmd
= [CROS_SDK_PATH
, '--create']
2302 return_code
= RunProcess(cmd
)
2304 return not return_code
2306 def PerformPreSyncCleanup(self
, revision
, depot
):
2307 """Performs any necessary cleanup before syncing.
2312 if depot
== 'chromium':
2313 if not bisect_utils
.RemoveThirdPartyLibjingleDirectory():
2315 return self
.PerformWebkitDirectoryCleanup(revision
)
2316 elif depot
== 'cros':
2317 return self
.PerformCrosChrootCleanup()
2320 def RunPostSync(self
, depot
):
2321 """Performs any work after syncing.
2326 if self
.opts
.target_platform
== 'android':
2327 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
2328 path_to_src
=self
.src_cwd
):
2332 return self
.CreateCrosChroot()
2334 return self
.RunGClientHooks()
2337 def ShouldSkipRevision(self
, depot
, revision
):
2338 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2339 is git based those changes would have no effect.
2342 depot: The depot being bisected.
2343 revision: Current revision we're synced to.
2346 True if we should skip building/testing this revision.
2348 if depot
== 'chromium':
2349 if self
.source_control
.IsGit():
2350 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
2351 output
= CheckRunGit(cmd
)
2353 files
= output
.splitlines()
2355 if len(files
) == 1 and files
[0] == 'DEPS':
2360 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
2362 """Performs a full sync/build/run of the specified revision.
2365 revision: The revision to sync to.
2366 depot: The depot that's being used at the moment (src, webkit, etc.)
2367 command_to_run: The command to execute the performance test.
2368 metric: The performance metric being tested.
2371 On success, a tuple containing the results of the performance test.
2372 Otherwise, a tuple with the error message.
2375 if depot
== 'chromium' or depot
== 'android-chrome':
2376 sync_client
= 'gclient'
2377 elif depot
== 'cros':
2378 sync_client
= 'repo'
2380 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
2382 if not revisions_to_sync
:
2383 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
2385 if not self
.PerformPreSyncCleanup(revision
, depot
):
2386 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
2390 if not self
.opts
.debug_ignore_sync
:
2391 for r
in revisions_to_sync
:
2392 self
.ChangeToDepotWorkingDirectory(r
[0])
2395 self
.PerformPreBuildCleanup()
2397 # If you're using gclient to sync, you need to specify the depot you
2398 # want so that all the dependencies sync properly as well.
2399 # ie. gclient sync src@<SHA1>
2400 current_revision
= r
[1]
2401 if sync_client
== 'gclient':
2402 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
2404 if not self
.source_control
.SyncToRevision(current_revision
,
2411 success
= self
.RunPostSync(depot
)
2413 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
2414 return ('Skipped revision: [%s]' % str(revision
),
2415 BUILD_RESULT_SKIPPED
)
2417 start_build_time
= time
.time()
2418 if self
.BuildCurrentRevision(depot
, revision
):
2419 after_build_time
= time
.time()
2420 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
2422 # Restore build output directory once the tests are done, to avoid
2424 if self
.IsDownloadable(depot
) and revision
:
2425 self
.BackupOrRestoreOutputdirectory(restore
=True)
2428 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
2431 if not external_revisions
is None:
2432 return (results
[0], results
[1], external_revisions
,
2433 time
.time() - after_build_time
, after_build_time
-
2436 return ('Failed to parse DEPS file for external revisions.',
2441 return ('Failed to build revision: [%s]' % (str(revision
, )),
2444 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
2446 return ('Failed to sync revision: [%s]' % (str(revision
, )),
2449 def _CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
2450 """Given known good and bad values, decide if the current_value passed
2454 current_value: The value of the metric being checked.
2455 known_bad_value: The reference value for a "failed" run.
2456 known_good_value: The reference value for a "passed" run.
2459 True if the current_value is closer to the known_good_value than the
2462 if self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2463 dist_to_good_value
= abs(current_value
['std_dev'] -
2464 known_good_value
['std_dev'])
2465 dist_to_bad_value
= abs(current_value
['std_dev'] -
2466 known_bad_value
['std_dev'])
2468 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
2469 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
2471 return dist_to_good_value
< dist_to_bad_value
2473 def _GetDepotDirectory(self
, depot_name
):
2474 if depot_name
== 'chromium':
2476 elif depot_name
== 'cros':
2477 return self
.cros_cwd
2478 elif depot_name
in DEPOT_NAMES
:
2479 return self
.depot_cwd
[depot_name
]
2481 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2482 ' was added without proper support?' % depot_name
2484 def ChangeToDepotWorkingDirectory(self
, depot_name
):
2485 """Given a depot, changes to the appropriate working directory.
2488 depot_name: The name of the depot (see DEPOT_NAMES).
2490 os
.chdir(self
._GetDepotDirectory
(depot_name
))
2492 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
2493 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
2494 search_forward
=True)
2495 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
2496 search_forward
=False)
2497 min_revision_data
['external']['v8_bleeding_edge'] = r1
2498 max_revision_data
['external']['v8_bleeding_edge'] = r2
2500 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2501 min_revision_data
['revision']) or
2502 not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2503 max_revision_data
['revision'])):
2504 self
.warnings
.append('Trunk revisions in V8 did not map directly to '
2505 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2506 'did map directly to bleeding_edge revisions, but results might not '
2509 def _FindNextDepotToBisect(self
, current_depot
, current_revision
,
2510 min_revision_data
, max_revision_data
):
2511 """Given the state of the bisect, decides which depot the script should
2512 dive into next (if any).
2515 current_depot: Current depot being bisected.
2516 current_revision: Current revision synced to.
2517 min_revision_data: Data about the earliest revision in the bisect range.
2518 max_revision_data: Data about the latest revision in the bisect range.
2521 The depot to bisect next, or None.
2523 external_depot
= None
2524 for next_depot
in DEPOT_NAMES
:
2525 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
2526 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
2529 if not (DEPOT_DEPS_NAME
[next_depot
]["recurse"] and
2530 min_revision_data
['depot'] in DEPOT_DEPS_NAME
[next_depot
]['from']):
2533 if current_depot
== 'v8':
2534 # We grab the bleeding_edge info here rather than earlier because we
2535 # finally have the revision range. From that we can search forwards and
2536 # backwards to try to match trunk revisions to bleeding_edge.
2537 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
2539 if (min_revision_data
['external'][next_depot
] ==
2540 max_revision_data
['external'][next_depot
]):
2543 if (min_revision_data
['external'][next_depot
] and
2544 max_revision_data
['external'][next_depot
]):
2545 external_depot
= next_depot
2548 return external_depot
2550 def PrepareToBisectOnDepot(self
,
2556 """Changes to the appropriate directory and gathers a list of revisions
2557 to bisect between |start_revision| and |end_revision|.
2560 current_depot: The depot we want to bisect.
2561 end_revision: End of the revision range.
2562 start_revision: Start of the revision range.
2563 previous_depot: The depot we were previously bisecting.
2564 previous_revision: The last revision we synced to on |previous_depot|.
2567 A list containing the revisions between |start_revision| and
2568 |end_revision| inclusive.
2570 # Change into working directory of external library to run
2571 # subsequent commands.
2572 self
.ChangeToDepotWorkingDirectory(current_depot
)
2574 # V8 (and possibly others) is merged in periodically. Bisecting
2575 # this directory directly won't give much good info.
2576 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
2577 config_path
= os
.path
.join(self
.src_cwd
, '..')
2578 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
2579 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
2581 if bisect_utils
.RunGClient(
2582 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
2585 if current_depot
== 'v8_bleeding_edge':
2586 self
.ChangeToDepotWorkingDirectory('chromium')
2588 shutil
.move('v8', 'v8.bak')
2589 shutil
.move('v8_bleeding_edge', 'v8')
2591 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
2592 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
2594 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
2595 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
2597 self
.ChangeToDepotWorkingDirectory(current_depot
)
2599 depot_revision_list
= self
.GetRevisionList(current_depot
,
2603 self
.ChangeToDepotWorkingDirectory('chromium')
2605 return depot_revision_list
2607 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
2608 """Gathers reference values by running the performance tests on the
2609 known good and bad revisions.
2612 good_rev: The last known good revision where the performance regression
2613 has not occurred yet.
2614 bad_rev: A revision where the performance regression has already occurred.
2615 cmd: The command to execute the performance test.
2616 metric: The metric being tested for regression.
2619 A tuple with the results of building and running each revision.
2621 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
2626 good_run_results
= None
2628 if not bad_run_results
[1]:
2629 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
2634 return (bad_run_results
, good_run_results
)
2636 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
2637 """Adds new revisions to the revision_data dict and initializes them.
2640 revisions: List of revisions to add.
2641 depot: Depot that's currently in use (src, webkit, etc...)
2642 sort: Sorting key for displaying revisions.
2643 revision_data: A dict to add the new revisions into. Existing revisions
2644 will have their sort keys offset.
2647 num_depot_revisions
= len(revisions
)
2649 for _
, v
in revision_data
.iteritems():
2650 if v
['sort'] > sort
:
2651 v
['sort'] += num_depot_revisions
2653 for i
in xrange(num_depot_revisions
):
2656 revision_data
[r
] = {'revision' : r
,
2662 'sort' : i
+ sort
+ 1}
2664 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
2665 if self
.opts
.output_buildbot_annotations
:
2666 step_name
= 'Bisection Range: [%s - %s]' % (
2667 revision_list
[len(revision_list
)-1], revision_list
[0])
2668 bisect_utils
.OutputAnnotationStepStart(step_name
)
2671 print 'Revisions to bisect on [%s]:' % depot
2672 for revision_id
in revision_list
:
2673 print ' -> %s' % (revision_id
, )
2676 if self
.opts
.output_buildbot_annotations
:
2677 bisect_utils
.OutputAnnotationStepClosed()
2679 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
2680 """Checks to see if changes to DEPS file occurred, and that the revision
2681 range also includes the change to .DEPS.git. If it doesn't, attempts to
2682 expand the revision range to include it.
2685 bad_rev: First known bad revision.
2686 good_revision: Last known good revision.
2689 A tuple with the new bad and good revisions.
2691 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
2692 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
2693 'DEPS', good_revision
, bad_revision
)
2696 # DEPS file was changed, search from the oldest change to DEPS file to
2697 # bad_revision to see if there are matching .DEPS.git changes.
2698 oldest_deps_change
= changes_to_deps
[-1]
2699 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
2700 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
2702 if len(changes_to_deps
) != len(changes_to_gitdeps
):
2703 # Grab the timestamp of the last DEPS change
2704 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
2705 output
= CheckRunGit(cmd
)
2706 commit_time
= int(output
)
2708 # Try looking for a commit that touches the .DEPS.git file in the
2709 # next 15 minutes after the DEPS file change.
2710 cmd
= ['log', '--format=%H', '-1',
2711 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
2712 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
2713 output
= CheckRunGit(cmd
)
2714 output
= output
.strip()
2716 self
.warnings
.append('Detected change to DEPS and modified '
2717 'revision range to include change to .DEPS.git')
2718 return (output
, good_revision
)
2720 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
2721 'matching change to .DEPS.git')
2722 return (bad_revision
, good_revision
)
2724 def CheckIfRevisionsInProperOrder(self
,
2728 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2731 good_revision: Number/tag of the known good revision.
2732 bad_revision: Number/tag of the known bad revision.
2735 True if the revisions are in the proper order (good earlier than bad).
2737 if self
.source_control
.IsGit() and target_depot
!= 'cros':
2738 cmd
= ['log', '--format=%ct', '-1', good_revision
]
2739 cwd
= self
._GetDepotDirectory
(target_depot
)
2741 output
= CheckRunGit(cmd
, cwd
=cwd
)
2742 good_commit_time
= int(output
)
2744 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
2745 output
= CheckRunGit(cmd
, cwd
=cwd
)
2746 bad_commit_time
= int(output
)
2748 return good_commit_time
<= bad_commit_time
2750 # Cros/svn use integers
2751 return int(good_revision
) <= int(bad_revision
)
2753 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
2754 """Given known good and bad revisions, run a binary search on all
2755 intermediate revisions to determine the CL where the performance regression
2759 command_to_run: Specify the command to execute the performance test.
2760 good_revision: Number/tag of the known good revision.
2761 bad_revision: Number/tag of the known bad revision.
2762 metric: The performance metric to monitor.
2765 A dict with 2 members, 'revision_data' and 'error'. On success,
2766 'revision_data' will contain a dict mapping revision ids to
2767 data about that revision. Each piece of revision data consists of a
2768 dict with the following keys:
2770 'passed': Represents whether the performance test was successful at
2771 that revision. Possible values include: 1 (passed), 0 (failed),
2772 '?' (skipped), 'F' (build failed).
2773 'depot': The depot that this revision is from (ie. WebKit)
2774 'external': If the revision is a 'src' revision, 'external' contains
2775 the revisions of each of the external libraries.
2776 'sort': A sort value for sorting the dict in order of commits.
2793 If an error occurred, the 'error' field will contain the message and
2794 'revision_data' will be empty.
2796 results
= {'revision_data' : {},
2799 # Choose depot to bisect first
2800 target_depot
= 'chromium'
2801 if self
.opts
.target_platform
== 'cros':
2802 target_depot
= 'cros'
2803 elif self
.opts
.target_platform
== 'android-chrome':
2804 target_depot
= 'android-chrome'
2807 self
.ChangeToDepotWorkingDirectory(target_depot
)
2809 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2810 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
2812 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
2818 if bad_revision
is None:
2819 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
2822 if good_revision
is None:
2823 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
2826 # Check that they didn't accidentally swap good and bad revisions.
2827 if not self
.CheckIfRevisionsInProperOrder(
2828 target_depot
, good_revision
, bad_revision
):
2829 results
['error'] = 'bad_revision < good_revision, did you swap these '\
2833 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
2834 bad_revision
, good_revision
)
2836 if self
.opts
.output_buildbot_annotations
:
2837 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
2839 print 'Gathering revision range for bisection.'
2840 # Retrieve a list of revisions to do bisection on.
2841 src_revision_list
= self
.GetRevisionList(target_depot
,
2845 if self
.opts
.output_buildbot_annotations
:
2846 bisect_utils
.OutputAnnotationStepClosed()
2848 if src_revision_list
:
2849 # revision_data will store information about a revision such as the
2850 # depot it came from, the webkit/V8 revision at that time,
2851 # performance timing, build state, etc...
2852 revision_data
= results
['revision_data']
2854 # revision_list is the list we're binary searching through at the moment.
2859 for current_revision_id
in src_revision_list
:
2862 revision_data
[current_revision_id
] = {'value' : None,
2864 'depot' : target_depot
,
2868 'sort' : sort_key_ids
}
2869 revision_list
.append(current_revision_id
)
2872 max_revision
= len(revision_list
) - 1
2874 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
2876 if self
.opts
.output_buildbot_annotations
:
2877 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
2879 print 'Gathering reference values for bisection.'
2881 # Perform the performance tests on the good and bad revisions, to get
2883 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
2889 if self
.opts
.output_buildbot_annotations
:
2890 bisect_utils
.OutputAnnotationStepClosed()
2893 results
['error'] = ('An error occurred while building and running '
2894 'the \'bad\' reference value. The bisect cannot continue without '
2895 'a working \'bad\' revision to start from.\n\nError: %s' %
2900 results
['error'] = ('An error occurred while building and running '
2901 'the \'good\' reference value. The bisect cannot continue without '
2902 'a working \'good\' revision to start from.\n\nError: %s' %
2907 # We need these reference values to determine if later runs should be
2908 # classified as pass or fail.
2909 known_bad_value
= bad_results
[0]
2910 known_good_value
= good_results
[0]
2912 # Can just mark the good and bad revisions explicitly here since we
2913 # already know the results.
2914 bad_revision_data
= revision_data
[revision_list
[0]]
2915 bad_revision_data
['external'] = bad_results
[2]
2916 bad_revision_data
['perf_time'] = bad_results
[3]
2917 bad_revision_data
['build_time'] = bad_results
[4]
2918 bad_revision_data
['passed'] = False
2919 bad_revision_data
['value'] = known_bad_value
2921 good_revision_data
= revision_data
[revision_list
[max_revision
]]
2922 good_revision_data
['external'] = good_results
[2]
2923 good_revision_data
['perf_time'] = good_results
[3]
2924 good_revision_data
['build_time'] = good_results
[4]
2925 good_revision_data
['passed'] = True
2926 good_revision_data
['value'] = known_good_value
2928 next_revision_depot
= target_depot
2931 if not revision_list
:
2934 min_revision_data
= revision_data
[revision_list
[min_revision
]]
2935 max_revision_data
= revision_data
[revision_list
[max_revision
]]
2937 if max_revision
- min_revision
<= 1:
2938 current_depot
= min_revision_data
['depot']
2939 if min_revision_data
['passed'] == '?':
2940 next_revision_index
= min_revision
2941 elif max_revision_data
['passed'] == '?':
2942 next_revision_index
= max_revision
2943 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
2944 previous_revision
= revision_list
[min_revision
]
2945 # If there were changes to any of the external libraries we track,
2946 # should bisect the changes there as well.
2947 external_depot
= self
._FindNextDepotToBisect
(current_depot
,
2948 previous_revision
, min_revision_data
, max_revision_data
)
2950 # If there was no change in any of the external depots, the search
2952 if not external_depot
:
2953 if current_depot
== 'v8':
2954 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
2955 'continue any further. The script can only bisect into '
2956 'V8\'s bleeding_edge repository if both the current and '
2957 'previous revisions in trunk map directly to revisions in '
2961 earliest_revision
= max_revision_data
['external'][external_depot
]
2962 latest_revision
= min_revision_data
['external'][external_depot
]
2964 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
2967 next_revision_depot
,
2970 if not new_revision_list
:
2971 results
['error'] = 'An error occurred attempting to retrieve'\
2972 ' revision range: [%s..%s]' % \
2973 (earliest_revision
, latest_revision
)
2976 self
.AddRevisionsIntoRevisionData(new_revision_list
,
2978 min_revision_data
['sort'],
2981 # Reset the bisection and perform it on the newly inserted
2983 revision_list
= new_revision_list
2985 max_revision
= len(revision_list
) - 1
2986 sort_key_ids
+= len(revision_list
)
2988 print 'Regression in metric:%s appears to be the result of changes'\
2989 ' in [%s].' % (metric
, external_depot
)
2991 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
2997 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
3000 next_revision_id
= revision_list
[next_revision_index
]
3001 next_revision_data
= revision_data
[next_revision_id
]
3002 next_revision_depot
= next_revision_data
['depot']
3004 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
3006 if self
.opts
.output_buildbot_annotations
:
3007 step_name
= 'Working on [%s]' % next_revision_id
3008 bisect_utils
.OutputAnnotationStepStart(step_name
)
3010 print 'Working on revision: [%s]' % next_revision_id
3012 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
3013 next_revision_depot
,
3015 metric
, skippable
=True)
3017 # If the build is successful, check whether or not the metric
3019 if not run_results
[1]:
3020 if len(run_results
) > 2:
3021 next_revision_data
['external'] = run_results
[2]
3022 next_revision_data
['perf_time'] = run_results
[3]
3023 next_revision_data
['build_time'] = run_results
[4]
3025 passed_regression
= self
._CheckIfRunPassed
(run_results
[0],
3029 next_revision_data
['passed'] = passed_regression
3030 next_revision_data
['value'] = run_results
[0]
3032 if passed_regression
:
3033 max_revision
= next_revision_index
3035 min_revision
= next_revision_index
3037 if run_results
[1] == BUILD_RESULT_SKIPPED
:
3038 next_revision_data
['passed'] = 'Skipped'
3039 elif run_results
[1] == BUILD_RESULT_FAIL
:
3040 next_revision_data
['passed'] = 'Build Failed'
3042 print run_results
[0]
3044 # If the build is broken, remove it and redo search.
3045 revision_list
.pop(next_revision_index
)
3049 if self
.opts
.output_buildbot_annotations
:
3050 self
._PrintPartialResults
(results
)
3051 bisect_utils
.OutputAnnotationStepClosed()
3053 # Weren't able to sync and retrieve the revision range.
3054 results
['error'] = 'An error occurred attempting to retrieve revision '\
3055 'range: [%s..%s]' % (good_revision
, bad_revision
)
3059 def _PrintPartialResults(self
, results_dict
):
3060 revision_data
= results_dict
['revision_data']
3061 revision_data_sorted
= sorted(revision_data
.iteritems(),
3062 key
= lambda x
: x
[1]['sort'])
3063 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3065 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3066 results_dict
['first_working_revision'],
3067 results_dict
['last_broken_revision'],
3068 100, final_step
=False)
3070 def _PrintConfidence(self
, results_dict
):
3071 # The perf dashboard specifically looks for the string
3072 # "Confidence in Bisection Results: 100%" to decide whether or not
3073 # to cc the author(s). If you change this, please update the perf
3074 # dashboard as well.
3075 print 'Confidence in Bisection Results: %d%%' % results_dict
['confidence']
3077 def _PrintBanner(self
, results_dict
):
3079 print " __o_\___ Aw Snap! We hit a speed bump!"
3080 print "=-O----O-'__.~.___________________________________"
3082 if self
._IsBisectModeReturnCode
():
3083 print ('Bisect reproduced a change in return codes while running the '
3084 'performance test.')
3086 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3087 '%s metric.' % (results_dict
['regression_size'],
3088 results_dict
['regression_std_err'], '/'.join(self
.opts
.metric
)))
3089 self
._PrintConfidence
(results_dict
)
3091 def _PrintFailedBanner(self
, results_dict
):
3093 if self
._IsBisectModeReturnCode
():
3094 print 'Bisect could not reproduce a change in the return code.'
3096 print ('Bisect could not reproduce a change in the '
3097 '%s metric.' % '/'.join(self
.opts
.metric
))
3100 def _GetViewVCLinkFromDepotAndHash(self
, cl
, depot
):
3101 info
= self
.source_control
.QueryRevisionInfo(cl
,
3102 self
._GetDepotDirectory
(depot
))
3103 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
3105 # Format is "git-svn-id: svn://....@123456 <other data>"
3106 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
3107 svn_revision
= svn_line
[0].split('@')
3108 svn_revision
= svn_revision
[1].split(' ')[0]
3109 return DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
3114 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
3115 # The perf dashboard specifically looks for the string
3116 # "Author : " to parse out who to cc on a bug. If you change the
3117 # formatting here, please update the perf dashboard as well.
3119 print 'Subject : %s' % info
['subject']
3120 print 'Author : %s' % info
['author']
3121 if not info
['email'].startswith(info
['author']):
3122 print 'Email : %s' % info
['email']
3123 commit_link
= self
._GetViewVCLinkFromDepotAndHash
(cl
, depot
)
3125 print 'Link : %s' % commit_link
3128 print 'Failed to parse svn revision from body:'
3132 print 'Commit : %s' % cl
3133 print 'Date : %s' % info
['date']
3135 def _PrintTableRow(self
, column_widths
, row_data
):
3136 assert len(column_widths
) == len(row_data
)
3139 for i
in xrange(len(column_widths
)):
3140 current_row_data
= row_data
[i
].center(column_widths
[i
], ' ')
3141 text
+= ('%%%ds' % column_widths
[i
]) % current_row_data
3144 def _PrintTestedCommitsHeader(self
):
3145 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
3146 self
._PrintTableRow
(
3147 [20, 70, 14, 12, 13],
3148 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3149 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
3150 self
._PrintTableRow
(
3151 [20, 70, 14, 12, 13],
3152 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3153 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
3154 self
._PrintTableRow
(
3156 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3158 assert False, "Invalid bisect_mode specified."
3159 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3160 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3161 'State'.center(13, ' '))
3163 def _PrintTestedCommitsEntry(self
, current_data
, cl_link
, state_str
):
3164 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
3165 std_error
= '+-%.02f' % current_data
['value']['std_err']
3166 mean
= '%.02f' % current_data
['value']['mean']
3167 self
._PrintTableRow
(
3168 [20, 70, 12, 14, 13],
3169 [current_data
['depot'], cl_link
, mean
, std_error
, state_str
])
3170 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
3171 std_error
= '+-%.02f' % current_data
['value']['std_err']
3172 mean
= '%.02f' % current_data
['value']['mean']
3173 self
._PrintTableRow
(
3174 [20, 70, 12, 14, 13],
3175 [current_data
['depot'], cl_link
, std_error
, mean
, state_str
])
3176 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
3177 mean
= '%d' % current_data
['value']['mean']
3178 self
._PrintTableRow
(
3180 [current_data
['depot'], cl_link
, mean
, state_str
])
3182 def _PrintTestedCommitsTable(self
, revision_data_sorted
,
3183 first_working_revision
, last_broken_revision
, confidence
,
3187 print 'Tested commits:'
3189 print 'Partial results:'
3190 self
._PrintTestedCommitsHeader
()
3192 for current_id
, current_data
in revision_data_sorted
:
3193 if current_data
['value']:
3194 if (current_id
== last_broken_revision
or
3195 current_id
== first_working_revision
):
3196 # If confidence is too low, don't add this empty line since it's
3197 # used to put focus on a suspected CL.
3198 if confidence
and final_step
:
3201 if state
== 2 and not final_step
:
3202 # Just want a separation between "bad" and "good" cl's.
3206 if state
== 1 and final_step
:
3207 state_str
= 'Suspected CL'
3211 # If confidence is too low, don't bother outputting good/bad.
3214 state_str
= state_str
.center(13, ' ')
3216 cl_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3217 current_data
['depot'])
3219 cl_link
= current_id
3220 self
._PrintTestedCommitsEntry
(current_data
, cl_link
, state_str
)
3222 def _PrintReproSteps(self
):
3224 print 'To reproduce locally:'
3225 print '$ ' + self
.opts
.command
3226 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
3228 print 'Also consider passing --profiler=list to see available profilers.'
3230 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
3232 print 'Other regressions may have occurred:'
3233 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3234 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3235 for regression
in other_regressions
:
3236 current_id
, previous_id
, confidence
= regression
3237 current_data
= revision_data
[current_id
]
3238 previous_data
= revision_data
[previous_id
]
3240 current_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3241 current_data
['depot'])
3242 previous_link
= self
._GetViewVCLinkFromDepotAndHash
(previous_id
,
3243 previous_data
['depot'])
3245 # If we can't map it to a viewable URL, at least show the original hash.
3246 if not current_link
:
3247 current_link
= current_id
3248 if not previous_link
:
3249 previous_link
= previous_id
3251 print ' %8s %70s %s' % (
3252 current_data
['depot'], current_link
,
3253 ('%d%%' % confidence
).center(10, ' '))
3254 print ' %8s %70s' % (
3255 previous_data
['depot'], previous_link
)
3258 def _PrintStepTime(self
, revision_data_sorted
):
3259 step_perf_time_avg
= 0.0
3260 step_build_time_avg
= 0.0
3262 for _
, current_data
in revision_data_sorted
:
3263 if current_data
['value']:
3264 step_perf_time_avg
+= current_data
['perf_time']
3265 step_build_time_avg
+= current_data
['build_time']
3268 step_perf_time_avg
= step_perf_time_avg
/ step_count
3269 step_build_time_avg
= step_build_time_avg
/ step_count
3271 print 'Average build time : %s' % datetime
.timedelta(
3272 seconds
=int(step_build_time_avg
))
3273 print 'Average test time : %s' % datetime
.timedelta(
3274 seconds
=int(step_perf_time_avg
))
3276 def _PrintWarnings(self
):
3277 if not self
.warnings
:
3281 for w
in set(self
.warnings
):
3284 def _FindOtherRegressions(self
, revision_data_sorted
, bad_greater_than_good
):
3285 other_regressions
= []
3286 previous_values
= []
3288 for current_id
, current_data
in revision_data_sorted
:
3289 current_values
= current_data
['value']
3291 current_values
= current_values
['values']
3293 confidence
= CalculateConfidence(previous_values
, [current_values
])
3294 mean_of_prev_runs
= CalculateMean(sum(previous_values
, []))
3295 mean_of_current_runs
= CalculateMean(current_values
)
3297 # Check that the potential regression is in the same direction as
3298 # the overall regression. If the mean of the previous runs < the
3299 # mean of the current runs, this local regression is in same
3301 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
3302 is_same_direction
= (prev_less_than_current
if
3303 bad_greater_than_good
else not prev_less_than_current
)
3305 # Only report potential regressions with high confidence.
3306 if is_same_direction
and confidence
> 50:
3307 other_regressions
.append([current_id
, previous_id
, confidence
])
3308 previous_values
.append(current_values
)
3309 previous_id
= current_id
3310 return other_regressions
3313 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
3314 # Find range where it possibly broke.
3315 first_working_revision
= None
3316 first_working_revision_index
= -1
3317 last_broken_revision
= None
3318 last_broken_revision_index
= -1
3320 for i
in xrange(len(revision_data_sorted
)):
3321 k
, v
= revision_data_sorted
[i
]
3322 if v
['passed'] == 1:
3323 if not first_working_revision
:
3324 first_working_revision
= k
3325 first_working_revision_index
= i
3328 last_broken_revision
= k
3329 last_broken_revision_index
= i
3331 if last_broken_revision
!= None and first_working_revision
!= None:
3333 for i
in xrange(0, last_broken_revision_index
+ 1):
3334 if revision_data_sorted
[i
][1]['value']:
3335 broken_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3338 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
3339 if revision_data_sorted
[i
][1]['value']:
3340 working_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3342 # Flatten the lists to calculate mean of all values.
3343 working_mean
= sum(working_means
, [])
3344 broken_mean
= sum(broken_means
, [])
3346 # Calculate the approximate size of the regression
3347 mean_of_bad_runs
= CalculateMean(broken_mean
)
3348 mean_of_good_runs
= CalculateMean(working_mean
)
3350 regression_size
= 100 * CalculateRelativeChange(mean_of_good_runs
,
3352 if math
.isnan(regression_size
):
3353 regression_size
= 'zero-to-nonzero'
3355 regression_std_err
= math
.fabs(CalculatePooledStandardError(
3356 [working_mean
, broken_mean
]) /
3357 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
3359 # Give a "confidence" in the bisect. At the moment we use how distinct the
3360 # values are before and after the last broken revision, and how noisy the
3362 confidence
= CalculateConfidence(working_means
, broken_means
)
3364 culprit_revisions
= []
3367 self
.ChangeToDepotWorkingDirectory(
3368 revision_data
[last_broken_revision
]['depot'])
3370 if revision_data
[last_broken_revision
]['depot'] == 'cros':
3371 # Want to get a list of all the commits and what depots they belong
3372 # to so that we can grab info about each.
3373 cmd
= ['repo', 'forall', '-c',
3374 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3375 last_broken_revision
, first_working_revision
+ 1)]
3376 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
3379 assert not return_code
, 'An error occurred while running'\
3380 ' "%s"' % ' '.join(cmd
)
3383 for l
in output
.split('\n'):
3385 # Output will be in form:
3387 # /path_to_other_depot
3395 contents
= l
.split(' ')
3396 if len(contents
) > 1:
3397 changes
.append([last_depot
, contents
[0]])
3400 info
= self
.source_control
.QueryRevisionInfo(c
[1])
3401 culprit_revisions
.append((c
[1], info
, None))
3403 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
3404 k
, v
= revision_data_sorted
[i
]
3405 if k
== first_working_revision
:
3407 self
.ChangeToDepotWorkingDirectory(v
['depot'])
3408 info
= self
.source_control
.QueryRevisionInfo(k
)
3409 culprit_revisions
.append((k
, info
, v
['depot']))
3412 # Check for any other possible regression ranges
3413 other_regressions
= self
._FindOtherRegressions
(revision_data_sorted
,
3414 mean_of_bad_runs
> mean_of_good_runs
)
3416 # Check for warnings:
3417 if len(culprit_revisions
) > 1:
3418 self
.warnings
.append('Due to build errors, regression range could '
3419 'not be narrowed down to a single commit.')
3420 if self
.opts
.repeat_test_count
== 1:
3421 self
.warnings
.append('Tests were only set to run once. This may '
3422 'be insufficient to get meaningful results.')
3423 if confidence
< 100:
3425 self
.warnings
.append(
3426 'Confidence is less than 100%. There could be other candidates for '
3427 'this regression. Try bisecting again with increased repeat_count '
3428 'or on a sub-metric that shows the regression more clearly.')
3430 self
.warnings
.append(
3431 'Confidence is 0%. Try bisecting again on another platform, with '
3432 'increased repeat_count or on a sub-metric that shows the regression '
3436 'first_working_revision': first_working_revision
,
3437 'last_broken_revision': last_broken_revision
,
3438 'culprit_revisions': culprit_revisions
,
3439 'other_regressions': other_regressions
,
3440 'regression_size': regression_size
,
3441 'regression_std_err': regression_std_err
,
3442 'confidence': confidence
,
3445 def FormatAndPrintResults(self
, bisect_results
):
3446 """Prints the results from a bisection run in a readable format.
3449 bisect_results: The results from a bisection test run.
3451 revision_data
= bisect_results
['revision_data']
3452 revision_data_sorted
= sorted(revision_data
.iteritems(),
3453 key
= lambda x
: x
[1]['sort'])
3454 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3456 if self
.opts
.output_buildbot_annotations
:
3457 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
3460 print 'Full results of bisection:'
3461 for current_id
, current_data
in revision_data_sorted
:
3462 build_status
= current_data
['passed']
3464 if type(build_status
) is bool:
3466 build_status
= 'Good'
3468 build_status
= 'Bad'
3470 print ' %20s %40s %s' % (current_data
['depot'],
3471 current_id
, build_status
)
3474 if self
.opts
.output_buildbot_annotations
:
3475 bisect_utils
.OutputAnnotationStepClosed()
3476 # The perf dashboard scrapes the "results" step in order to comment on
3477 # bugs. If you change this, please update the perf dashboard as well.
3478 bisect_utils
.OutputAnnotationStepStart('Results')
3480 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
3481 self
._PrintBanner
(results_dict
)
3482 for culprit
in results_dict
['culprit_revisions']:
3483 cl
, info
, depot
= culprit
3484 self
._PrintRevisionInfo
(cl
, info
, depot
)
3485 self
._PrintReproSteps
()
3486 if results_dict
['other_regressions']:
3487 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
3490 self
._PrintFailedBanner
(results_dict
)
3491 self
._PrintReproSteps
()
3493 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3494 results_dict
['first_working_revision'],
3495 results_dict
['last_broken_revision'],
3496 results_dict
['confidence'])
3497 self
._PrintStepTime
(revision_data_sorted
)
3498 self
._PrintWarnings
()
3500 if self
.opts
.output_buildbot_annotations
:
3501 bisect_utils
.OutputAnnotationStepClosed()
3504 def DetermineAndCreateSourceControl(opts
):
3505 """Attempts to determine the underlying source control workflow and returns
3506 a SourceControl object.
3509 An instance of a SourceControl object, or None if the current workflow
3513 (output
, _
) = RunGit(['rev-parse', '--is-inside-work-tree'])
3515 if output
.strip() == 'true':
3516 return GitSourceControl(opts
)
3521 def IsPlatformSupported(opts
):
3522 """Checks that this platform and build system are supported.
3525 opts: The options parsed from the command line.
3528 True if the platform and build system are supported.
3530 # Haven't tested the script out on any other platforms yet.
3531 supported
= ['posix', 'nt']
3532 return os
.name
in supported
3535 def RmTreeAndMkDir(path_to_dir
, skip_makedir
=False):
3536 """Removes the directory tree specified, and then creates an empty
3537 directory in the same location (if not specified to skip).
3540 path_to_dir: Path to the directory tree.
3541 skip_makedir: Whether to skip creating empty directory, default is False.
3544 True if successful, False if an error occurred.
3547 if os
.path
.exists(path_to_dir
):
3548 shutil
.rmtree(path_to_dir
)
3550 if e
.errno
!= errno
.ENOENT
:
3553 if not skip_makedir
:
3554 return MaybeMakeDirectory(path_to_dir
)
3559 def RemoveBuildFiles(build_type
):
3560 """Removes build files from previous runs."""
3561 if RmTreeAndMkDir(os
.path
.join('out', build_type
)):
3562 if RmTreeAndMkDir(os
.path
.join('build', build_type
)):
3567 class BisectOptions(object):
3568 """Options to be used when running bisection."""
3570 super(BisectOptions
, self
).__init
__()
3572 self
.target_platform
= 'chromium'
3573 self
.build_preference
= None
3574 self
.good_revision
= None
3575 self
.bad_revision
= None
3576 self
.use_goma
= None
3577 self
.cros_board
= None
3578 self
.cros_remote_ip
= None
3579 self
.repeat_test_count
= 20
3580 self
.truncate_percent
= 25
3581 self
.max_time_minutes
= 20
3584 self
.output_buildbot_annotations
= None
3585 self
.no_custom_deps
= False
3586 self
.working_directory
= None
3587 self
.extra_src
= None
3588 self
.debug_ignore_build
= None
3589 self
.debug_ignore_sync
= None
3590 self
.debug_ignore_perf_test
= None
3591 self
.gs_bucket
= None
3592 self
.target_arch
= 'ia32'
3593 self
.target_build_type
= 'Release'
3594 self
.builder_host
= None
3595 self
.builder_port
= None
3596 self
.bisect_mode
= BISECT_MODE_MEAN
3598 def _CreateCommandLineParser(self
):
3599 """Creates a parser with bisect options.
3602 An instance of optparse.OptionParser.
3604 usage
= ('%prog [options] [-- chromium-options]\n'
3605 'Perform binary search on revision history to find a minimal '
3606 'range of revisions where a peformance metric regressed.\n')
3608 parser
= optparse
.OptionParser(usage
=usage
)
3610 group
= optparse
.OptionGroup(parser
, 'Bisect options')
3611 group
.add_option('-c', '--command',
3613 help='A command to execute your performance test at' +
3614 ' each point in the bisection.')
3615 group
.add_option('-b', '--bad_revision',
3617 help='A bad revision to start bisection. ' +
3618 'Must be later than good revision. May be either a git' +
3619 ' or svn revision.')
3620 group
.add_option('-g', '--good_revision',
3622 help='A revision to start bisection where performance' +
3623 ' test is known to pass. Must be earlier than the ' +
3624 'bad revision. May be either a git or svn revision.')
3625 group
.add_option('-m', '--metric',
3627 help='The desired metric to bisect on. For example ' +
3628 '"vm_rss_final_b/vm_rss_f_b"')
3629 group
.add_option('-r', '--repeat_test_count',
3632 help='The number of times to repeat the performance '
3633 'test. Values will be clamped to range [1, 100]. '
3634 'Default value is 20.')
3635 group
.add_option('--max_time_minutes',
3638 help='The maximum time (in minutes) to take running the '
3639 'performance tests. The script will run the performance '
3640 'tests according to --repeat_test_count, so long as it '
3641 'doesn\'t exceed --max_time_minutes. Values will be '
3642 'clamped to range [1, 60].'
3643 'Default value is 20.')
3644 group
.add_option('-t', '--truncate_percent',
3647 help='The highest/lowest % are discarded to form a '
3648 'truncated mean. Values will be clamped to range [0, '
3649 '25]. Default value is 25 (highest/lowest 25% will be '
3651 group
.add_option('--bisect_mode',
3653 choices
=[BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
,
3654 BISECT_MODE_RETURN_CODE
],
3655 default
=BISECT_MODE_MEAN
,
3656 help='The bisect mode. Choices are to bisect on the '
3657 'difference in mean, std_dev, or return_code.')
3658 parser
.add_option_group(group
)
3660 group
= optparse
.OptionGroup(parser
, 'Build options')
3661 group
.add_option('-w', '--working_directory',
3663 help='Path to the working directory where the script '
3664 'will do an initial checkout of the chromium depot. The '
3665 'files will be placed in a subdirectory "bisect" under '
3666 'working_directory and that will be used to perform the '
3667 'bisection. This parameter is optional, if it is not '
3668 'supplied, the script will work from the current depot.')
3669 group
.add_option('--build_preference',
3671 choices
=['msvs', 'ninja', 'make'],
3672 help='The preferred build system to use. On linux/mac '
3673 'the options are make/ninja. On Windows, the options '
3675 group
.add_option('--target_platform',
3677 choices
=['chromium', 'cros', 'android', 'android-chrome'],
3679 help='The target platform. Choices are "chromium" '
3680 '(current platform), "cros", or "android". If you '
3681 'specify something other than "chromium", you must be '
3682 'properly set up to build that platform.')
3683 group
.add_option('--no_custom_deps',
3684 dest
='no_custom_deps',
3685 action
="store_true",
3687 help='Run the script with custom_deps or not.')
3688 group
.add_option('--extra_src',
3690 help='Path to a script which can be used to modify '
3691 'the bisect script\'s behavior.')
3692 group
.add_option('--cros_board',
3694 help='The cros board type to build.')
3695 group
.add_option('--cros_remote_ip',
3697 help='The remote machine to image to.')
3698 group
.add_option('--use_goma',
3699 action
="store_true",
3700 help='Add a bunch of extra threads for goma.')
3701 group
.add_option('--output_buildbot_annotations',
3702 action
="store_true",
3703 help='Add extra annotation output for buildbot.')
3704 group
.add_option('--gs_bucket',
3708 help=('Name of Google Storage bucket to upload or '
3709 'download build. e.g., chrome-perf'))
3710 group
.add_option('--target_arch',
3712 choices
=['ia32', 'x64', 'arm'],
3715 help=('The target build architecture. Choices are "ia32" '
3716 '(default), "x64" or "arm".'))
3717 group
.add_option('--target_build_type',
3719 choices
=['Release', 'Debug'],
3721 help='The target build type. Choices are "Release" '
3722 '(default), or "Debug".')
3723 group
.add_option('--builder_host',
3724 dest
='builder_host',
3726 help=('Host address of server to produce build by posting'
3727 ' try job request.'))
3728 group
.add_option('--builder_port',
3729 dest
='builder_port',
3731 help=('HTTP port of the server to produce build by posting'
3732 ' try job request.'))
3733 parser
.add_option_group(group
)
3735 group
= optparse
.OptionGroup(parser
, 'Debug options')
3736 group
.add_option('--debug_ignore_build',
3737 action
="store_true",
3738 help='DEBUG: Don\'t perform builds.')
3739 group
.add_option('--debug_ignore_sync',
3740 action
="store_true",
3741 help='DEBUG: Don\'t perform syncs.')
3742 group
.add_option('--debug_ignore_perf_test',
3743 action
="store_true",
3744 help='DEBUG: Don\'t perform performance tests.')
3745 parser
.add_option_group(group
)
3748 def ParseCommandLine(self
):
3749 """Parses the command line for bisect options."""
3750 parser
= self
._CreateCommandLineParser
()
3751 (opts
, _
) = parser
.parse_args()
3754 if not opts
.command
:
3755 raise RuntimeError('missing required parameter: --command')
3757 if not opts
.good_revision
:
3758 raise RuntimeError('missing required parameter: --good_revision')
3760 if not opts
.bad_revision
:
3761 raise RuntimeError('missing required parameter: --bad_revision')
3763 if not opts
.metric
and opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
:
3764 raise RuntimeError('missing required parameter: --metric')
3767 if not cloud_storage
.List(opts
.gs_bucket
):
3768 raise RuntimeError('Invalid Google Storage: gs://%s' % opts
.gs_bucket
)
3769 if not opts
.builder_host
:
3770 raise RuntimeError('Must specify try server hostname, when '
3771 'gs_bucket is used: --builder_host')
3772 if not opts
.builder_port
:
3773 raise RuntimeError('Must specify try server port number, when '
3774 'gs_bucket is used: --builder_port')
3775 if opts
.target_platform
== 'cros':
3776 # Run sudo up front to make sure credentials are cached for later.
3777 print 'Sudo is required to build cros:'
3779 RunProcess(['sudo', 'true'])
3781 if not opts
.cros_board
:
3782 raise RuntimeError('missing required parameter: --cros_board')
3784 if not opts
.cros_remote_ip
:
3785 raise RuntimeError('missing required parameter: --cros_remote_ip')
3787 if not opts
.working_directory
:
3788 raise RuntimeError('missing required parameter: --working_directory')
3790 metric_values
= opts
.metric
.split('/')
3791 if (len(metric_values
) != 2 and
3792 opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
):
3793 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
3795 opts
.metric
= metric_values
3796 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3797 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3798 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3799 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3801 for k
, v
in opts
.__dict
__.iteritems():
3802 assert hasattr(self
, k
), "Invalid %s attribute in BisectOptions." % k
3804 except RuntimeError, e
:
3805 output_string
= StringIO
.StringIO()
3806 parser
.print_help(file=output_string
)
3807 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
3808 output_string
.close()
3809 raise RuntimeError(error_message
)
3812 def FromDict(values
):
3813 """Creates an instance of BisectOptions with the values parsed from a
3817 values: a dict containing options to set.
3820 An instance of BisectOptions.
3822 opts
= BisectOptions()
3823 for k
, v
in values
.iteritems():
3824 assert hasattr(opts
, k
), 'Invalid %s attribute in '\
3825 'BisectOptions.' % k
3828 metric_values
= opts
.metric
.split('/')
3829 if len(metric_values
) != 2:
3830 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
3832 opts
.metric
= metric_values
3833 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3834 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3835 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3836 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3844 opts
= BisectOptions()
3845 opts
.ParseCommandLine()
3848 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
3850 raise RuntimeError("Invalid or missing --extra_src.")
3851 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
3853 if opts
.working_directory
:
3854 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
3855 if opts
.no_custom_deps
:
3857 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
3859 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
3861 if not RemoveBuildFiles(opts
.target_build_type
):
3862 raise RuntimeError('Something went wrong removing the build files.')
3864 if not IsPlatformSupported(opts
):
3865 raise RuntimeError("Sorry, this platform isn't supported yet.")
3867 # Check what source control method they're using. Only support git workflow
3869 source_control
= DetermineAndCreateSourceControl(opts
)
3871 if not source_control
:
3872 raise RuntimeError("Sorry, only the git workflow is supported at the "
3875 # gClient sync seems to fail if you're not in master branch.
3876 if (not source_control
.IsInProperBranch() and
3877 not opts
.debug_ignore_sync
and
3878 not opts
.working_directory
):
3879 raise RuntimeError("You must switch to master branch to run bisection.")
3880 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
3882 bisect_results
= bisect_test
.Run(opts
.command
,
3886 if bisect_results
['error']:
3887 raise RuntimeError(bisect_results
['error'])
3888 bisect_test
.FormatAndPrintResults(bisect_results
)
3891 bisect_test
.PerformCleanup()
3892 except RuntimeError, e
:
3893 if opts
.output_buildbot_annotations
:
3894 # The perf dashboard scrapes the "results" step in order to comment on
3895 # bugs. If you change this, please update the perf dashboard as well.
3896 bisect_utils
.OutputAnnotationStepStart('Results')
3897 print 'Error: %s' % e
.message
3898 if opts
.output_buildbot_annotations
:
3899 bisect_utils
.OutputAnnotationStepClosed()
3902 if __name__
== '__main__':