2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
54 sys
.path
.append(os
.path
.join(os
.path
.dirname(__file__
), 'telemetry'))
57 import post_perf_builder_job
as bisect_builder
58 from telemetry
.page
import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
82 "src" : "src/third_party/WebKit",
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
94 "from" : ['chromium'],
96 'deps_var': 'angle_revision'
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
117 "src" : "src/third_party/skia/src",
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
126 "src" : "src/third_party/skia/include",
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
135 "src" : "src/third_party/skia/gyp",
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
145 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
146 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN
= 'new version number from %s'
148 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
151 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
155 BUILD_RESULT_SUCCEED
= 0
156 BUILD_RESULT_FAIL
= 1
157 BUILD_RESULT_SKIPPED
= 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
162 MAX_MAC_BUILD_TIME
= 14400
163 MAX_WIN_BUILD_TIME
= 14400
164 MAX_LINUX_BUILD_TIME
= 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH
= """diff --git src/DEPS.sha src/DEPS.sha
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN
= 'mean'
183 BISECT_MODE_STD_DEV
= 'std_dev'
184 BISECT_MODE_RETURN_CODE
= 'return_code'
187 def _AddAdditionalDepotInfo(depot_info
):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
191 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() +
193 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
196 def CalculateTruncatedMean(data_set
, truncate_percent
):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
209 The truncated mean as a float.
212 TypeError: The data set was empty after discarding values.
214 if len(data_set
) > 2:
215 data_set
= sorted(data_set
)
217 discard_num_float
= len(data_set
) * truncate_percent
218 discard_num_int
= int(math
.floor(discard_num_float
))
219 kept_weight
= len(data_set
) - discard_num_float
* 2
221 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
223 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
226 # If the % to discard leaves a fractional portion, need to weight those
228 unweighted_vals
= data_set
[1:len(data_set
)-1]
229 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
230 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
231 data_set
= weighted_vals
+ unweighted_vals
233 kept_weight
= len(data_set
)
235 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
236 data_set
) / kept_weight
238 return truncated_mean
241 def CalculateMean(values
):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values
, 0.0)
246 def CalculateConfidence(good_results_lists
, bad_results_lists
):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good
= map(CalculateMean
, good_results_lists
)
268 means_bad
= map(CalculateMean
, bad_results_lists
)
269 bounds_good
= (min(means_good
), max(means_good
))
270 bounds_bad
= (min(means_bad
), max(means_bad
))
271 dist_between_groups
= min(
272 math
.fabs(bounds_bad
[1] - bounds_good
[0]),
273 math
.fabs(bounds_bad
[0] - bounds_good
[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened
= sum(good_results_lists
, [])
277 bad_results_flattened
= sum(bad_results_lists
, [])
278 stddev_good
= CalculateStandardDeviation(good_results_flattened
)
279 stddev_bad
= CalculateStandardDeviation(bad_results_flattened
)
280 stddev_sum
= stddev_good
+ stddev_bad
282 confidence
= dist_between_groups
/ (max(0.0001, stddev_sum
))
283 confidence
= int(min(1.0, max(confidence
, 0.0)) * 100.0)
287 def CalculateStandardDeviation(values
):
288 """Calculates the sample standard deviation of the given list of values."""
292 mean
= CalculateMean(values
)
293 differences_from_mean
= [float(x
) - mean
for x
in values
]
294 squared_differences
= [float(x
* x
) for x
in differences_from_mean
]
295 variance
= sum(squared_differences
) / (len(values
) - 1)
296 std_dev
= math
.sqrt(variance
)
301 def CalculateRelativeChange(before
, after
):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
314 A non-negative floating point number; 0.1 represents a 10% change.
320 difference
= after
- before
321 return math
.fabs(difference
/ before
)
324 def CalculatePooledStandardError(work_sets
):
329 for current_set
in work_sets
:
330 std_dev
= CalculateStandardDeviation(current_set
)
331 numerator
+= (len(current_set
) - 1) * std_dev
** 2
332 denominator1
+= len(current_set
) - 1
333 denominator2
+= 1.0 / len(current_set
)
336 return math
.sqrt(numerator
/ denominator1
) * math
.sqrt(denominator2
)
340 def CalculateStandardError(values
):
341 """Calculates the standard error of a list of values."""
345 std_dev
= CalculateStandardDeviation(values
)
347 return std_dev
/ math
.sqrt(len(values
))
350 def IsStringFloat(string_to_check
):
351 """Checks whether or not the given string can be converted to a floating
355 string_to_check: Input string to check if it can be converted to a float.
358 True if the string can be converted to a float.
361 float(string_to_check
)
368 def IsStringInt(string_to_check
):
369 """Checks whether or not the given string can be converted to a integer.
372 string_to_check: Input string to check if it can be converted to an int.
375 True if the string can be converted to an int.
386 """Checks whether or not the script is running on Windows.
389 True if running on Windows.
391 return sys
.platform
== 'cygwin' or sys
.platform
.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
398 True if Windows is 64-bit, False if 32-bit.
400 platform
= os
.environ
['PROCESSOR_ARCHITECTURE']
402 platform
= os
.environ
['PROCESSOR_ARCHITEW6432']
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
407 return platform
in ['AMD64', 'I64']
411 """Checks whether or not the script is running on Linux.
414 True if running on Linux.
416 return sys
.platform
.startswith('linux')
420 """Checks whether or not the script is running on Mac.
423 True if running on Mac.
425 return sys
.platform
.startswith('darwin')
428 def GetSHA1HexDigest(contents
):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib
.sha1(contents
).hexdigest()
433 def GetZipFileName(build_revision
=None, target_arch
='ia32', patch_sha
=None):
434 """Gets the archive file name for the given revision."""
436 """Return a string to be used in paths for the platform."""
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch
== 'x64':
444 # Android builds too are archived with full-build-linux* prefix.
448 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
450 base_name
= 'full-build-%s' % PlatformName()
451 if not build_revision
:
454 build_revision
= '%s_%s' % (build_revision
, patch_sha
)
455 return '%s_%s.zip' % (base_name
, build_revision
)
458 def GetRemoteBuildPath(build_revision
, target_platform
='chromium',
459 target_arch
='ia32', patch_sha
=None):
460 """Compute the url to download the build from."""
461 def GetGSRootFolderName(target_platform
):
462 """Gets Google Cloud Storage root folder names"""
464 if Is64BitWindows() and target_arch
== 'x64':
465 return 'Win x64 Builder'
468 if target_platform
== 'android':
469 return 'android_perf_rel'
470 return 'Linux Builder'
473 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
475 base_filename
= GetZipFileName(
476 build_revision
, target_arch
, patch_sha
)
477 builder_folder
= GetGSRootFolderName(target_platform
)
478 return '%s/%s' % (builder_folder
, base_filename
)
481 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
482 """Fetches file(s) from the Google Cloud Storage.
485 bucket_name: Google Storage bucket name.
486 source_path: Source file path.
487 destination_path: Destination file path.
490 Downloaded file path if exisits, otherwise None.
492 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
494 if cloud_storage
.Exists(bucket_name
, source_path
):
495 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
496 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
497 if os
.path
.exists(target_file
):
500 print ('File gs://%s/%s not found in cloud storage.' % (
501 bucket_name
, source_path
))
502 except Exception as e
:
503 print 'Something went wrong while fetching file from cloud: %s' % e
504 if os
.path
.exists(target_file
):
505 os
.remove(target_file
)
509 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
510 def MaybeMakeDirectory(*path
):
511 """Creates an entire path, if it doesn't already exist."""
512 file_path
= os
.path
.join(*path
)
514 os
.makedirs(file_path
)
516 if e
.errno
!= errno
.EEXIST
:
521 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
522 def ExtractZip(filename
, output_dir
, verbose
=True):
523 """ Extract the zip archive in the output directory."""
524 MaybeMakeDirectory(output_dir
)
526 # On Linux and Mac, we use the unzip command as it will
527 # handle links and file bits (executable), which is much
528 # easier then trying to do that with ZipInfo options.
530 # The Mac Version of unzip unfortunately does not support Zip64, whereas
531 # the python module does, so we have to fallback to the python zip module
532 # on Mac if the filesize is greater than 4GB.
534 # On Windows, try to use 7z if it is installed, otherwise fall back to python
535 # zip module and pray we don't have files larger than 512MB to unzip.
537 if ((IsMacHost() and os
.path
.getsize(filename
) < 4 * 1024 * 1024 * 1024)
539 unzip_cmd
= ['unzip', '-o']
540 elif IsWindowsHost() and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe'):
541 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
544 # Make sure path is absolute before changing directories.
545 filepath
= os
.path
.abspath(filename
)
546 saved_dir
= os
.getcwd()
548 command
= unzip_cmd
+ [filepath
]
549 result
= RunProcess(command
)
552 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
554 assert IsWindowsHost() or IsMacHost()
555 zf
= zipfile
.ZipFile(filename
)
556 for name
in zf
.namelist():
558 print 'Extracting %s' % name
559 zf
.extract(name
, output_dir
)
561 # Restore permission bits.
562 os
.chmod(os
.path
.join(output_dir
, name
),
563 zf
.getinfo(name
).external_attr
>> 16L)
566 def RunProcess(command
):
567 """Runs an arbitrary command.
569 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
572 command: A list containing the command and args to execute.
575 The return code of the call.
577 # On Windows, use shell=True to get PATH interpretation.
578 shell
= IsWindowsHost()
579 return subprocess
.call(command
, shell
=shell
)
582 def RunProcessAndRetrieveOutput(command
, cwd
=None):
583 """Runs an arbitrary command, returning its output and return code.
585 Since output is collected via communicate(), there will be no output until
586 the call terminates. If you need output while the program runs (ie. so
587 that the buildbot doesn't terminate the script), consider RunProcess().
590 command: A list containing the command and args to execute.
591 cwd: A directory to change to while running the command. The command can be
592 relative to this directory. If this is None, the command will be run in
593 the current directory.
596 A tuple of the output and return code.
599 original_cwd
= os
.getcwd()
602 # On Windows, use shell=True to get PATH interpretation.
603 shell
= IsWindowsHost()
604 proc
= subprocess
.Popen(command
, shell
=shell
, stdout
=subprocess
.PIPE
)
605 (output
, _
) = proc
.communicate()
608 os
.chdir(original_cwd
)
610 return (output
, proc
.returncode
)
613 def RunGit(command
, cwd
=None):
614 """Run a git subcommand, returning its output and return code.
617 command: A list containing the args to git.
618 cwd: A directory to change to while running the git command (optional).
621 A tuple of the output and return code.
623 command
= ['git'] + command
625 return RunProcessAndRetrieveOutput(command
, cwd
=cwd
)
628 def CheckRunGit(command
, cwd
=None):
629 """Run a git subcommand, returning its output and return code. Asserts if
630 the return code of the call is non-zero.
633 command: A list containing the args to git.
636 A tuple of the output and return code.
638 (output
, return_code
) = RunGit(command
, cwd
=cwd
)
640 assert not return_code
, 'An error occurred while running'\
641 ' "git %s"' % ' '.join(command
)
645 def SetBuildSystemDefault(build_system
, use_goma
):
646 """Sets up any environment variables needed to build with the specified build
650 build_system: A string specifying build system. Currently only 'ninja' or
651 'make' are supported."""
652 if build_system
== 'ninja':
653 gyp_var
= os
.getenv('GYP_GENERATORS')
655 if not gyp_var
or not 'ninja' in gyp_var
:
657 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
659 os
.environ
['GYP_GENERATORS'] = 'ninja'
662 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
663 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
666 elif build_system
== 'make':
667 os
.environ
['GYP_GENERATORS'] = 'make'
669 raise RuntimeError('%s build not supported.' % build_system
)
672 os
.environ
['GYP_DEFINES'] = '%s %s' % (os
.getenv('GYP_DEFINES', ''),
676 def BuildWithMake(threads
, targets
, build_type
='Release'):
677 cmd
= ['make', 'BUILDTYPE=%s' % build_type
]
680 cmd
.append('-j%d' % threads
)
684 return_code
= RunProcess(cmd
)
686 return not return_code
689 def BuildWithNinja(threads
, targets
, build_type
='Release'):
690 cmd
= ['ninja', '-C', os
.path
.join('out', build_type
)]
693 cmd
.append('-j%d' % threads
)
697 return_code
= RunProcess(cmd
)
699 return not return_code
702 def BuildWithVisualStudio(targets
, build_type
='Release'):
703 path_to_devenv
= os
.path
.abspath(
704 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
705 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
706 cmd
= [path_to_devenv
, '/build', build_type
, path_to_sln
]
709 cmd
.extend(['/Project', t
])
711 return_code
= RunProcess(cmd
)
713 return not return_code
716 def WriteStringToFile(text
, file_name
):
718 with
open(file_name
, "wb") as f
:
721 raise RuntimeError('Error writing to file [%s]' % file_name
)
724 def ReadStringFromFile(file_name
):
726 with
open(file_name
) as f
:
729 raise RuntimeError('Error reading file [%s]' % file_name
)
732 def ChangeBackslashToSlashInPatch(diff_text
):
733 """Formats file paths in the given text to unix-style paths."""
735 diff_lines
= diff_text
.split('\n')
736 for i
in range(len(diff_lines
)):
737 if (diff_lines
[i
].startswith('--- ') or
738 diff_lines
[i
].startswith('+++ ')):
739 diff_lines
[i
] = diff_lines
[i
].replace('\\', '/')
740 return '\n'.join(diff_lines
)
744 class Builder(object):
745 """Builder is used by the bisect script to build relevant targets and deploy.
747 def __init__(self
, opts
):
748 """Performs setup for building with target build system.
751 opts: Options parsed from command line.
754 if not opts
.build_preference
:
755 opts
.build_preference
= 'msvs'
757 if opts
.build_preference
== 'msvs':
758 if not os
.getenv('VS100COMNTOOLS'):
760 'Path to visual studio could not be determined.')
762 SetBuildSystemDefault(opts
.build_preference
, opts
.use_goma
)
764 if not opts
.build_preference
:
765 if 'ninja' in os
.getenv('GYP_GENERATORS'):
766 opts
.build_preference
= 'ninja'
768 opts
.build_preference
= 'make'
770 SetBuildSystemDefault(opts
.build_preference
, opts
.use_goma
)
772 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
773 raise RuntimeError('Failed to set platform environment.')
778 if opts
.target_platform
== 'cros':
779 builder
= CrosBuilder(opts
)
780 elif opts
.target_platform
== 'android':
781 builder
= AndroidBuilder(opts
)
782 elif opts
.target_platform
== 'android-chrome':
783 builder
= AndroidChromeBuilder(opts
)
785 builder
= DesktopBuilder(opts
)
788 def Build(self
, depot
, opts
):
789 raise NotImplementedError()
791 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
792 """Returns the path to the build directory, relative to the checkout root.
794 Assumes that the current working directory is the checkout root.
796 src_dir
= src_dir
or 'src'
797 if opts
.build_preference
== 'ninja' or IsLinuxHost():
798 return os
.path
.join(src_dir
, 'out')
800 return os
.path
.join(src_dir
, 'xcodebuild')
802 return os
.path
.join(src_dir
, 'build')
803 raise NotImplementedError('Unexpected platform %s' % sys
.platform
)
806 class DesktopBuilder(Builder
):
807 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
808 def __init__(self
, opts
):
809 super(DesktopBuilder
, self
).__init
__(opts
)
811 def Build(self
, depot
, opts
):
812 """Builds chromium_builder_perf target using options passed into
816 depot: Current depot being bisected.
817 opts: The options parsed from the command line.
820 True if build was successful.
822 targets
= ['chromium_builder_perf']
828 build_success
= False
829 if opts
.build_preference
== 'make':
830 build_success
= BuildWithMake(threads
, targets
, opts
.target_build_type
)
831 elif opts
.build_preference
== 'ninja':
832 build_success
= BuildWithNinja(threads
, targets
, opts
.target_build_type
)
833 elif opts
.build_preference
== 'msvs':
834 assert IsWindowsHost(), 'msvs is only supported on Windows.'
835 build_success
= BuildWithVisualStudio(targets
, opts
.target_build_type
)
837 assert False, 'No build system defined.'
841 class AndroidBuilder(Builder
):
842 """AndroidBuilder is used to build on android."""
843 def __init__(self
, opts
):
844 super(AndroidBuilder
, self
).__init
__(opts
)
846 def _GetTargets(self
):
847 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
849 def Build(self
, depot
, opts
):
850 """Builds the android content shell and other necessary tools using options
851 passed into the script.
854 depot: Current depot being bisected.
855 opts: The options parsed from the command line.
858 True if build was successful.
864 build_success
= False
865 if opts
.build_preference
== 'ninja':
866 build_success
= BuildWithNinja(
867 threads
, self
._GetTargets
(), opts
.target_build_type
)
869 assert False, 'No build system defined.'
874 class AndroidChromeBuilder(AndroidBuilder
):
875 """AndroidBuilder is used to build on android's chrome."""
876 def __init__(self
, opts
):
877 super(AndroidChromeBuilder
, self
).__init
__(opts
)
879 def _GetTargets(self
):
880 return AndroidBuilder
._GetTargets
(self
) + ['chrome_apk']
883 class CrosBuilder(Builder
):
884 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
886 def __init__(self
, opts
):
887 super(CrosBuilder
, self
).__init
__(opts
)
889 def ImageToTarget(self
, opts
):
890 """Installs latest image to target specified by opts.cros_remote_ip.
893 opts: Program options containing cros_board and cros_remote_ip.
899 # Keys will most likely be set to 0640 after wiping the chroot.
900 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
901 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
902 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
903 '--remote=%s' % opts
.cros_remote_ip
,
904 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
906 return_code
= RunProcess(cmd
)
907 return not return_code
911 def BuildPackages(self
, opts
, depot
):
912 """Builds packages for cros.
915 opts: Program options containing cros_board.
916 depot: The depot being bisected.
921 cmd
= [CROS_SDK_PATH
]
924 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
925 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
930 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
932 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, './build_packages',
933 '--board=%s' % opts
.cros_board
]
934 return_code
= RunProcess(cmd
)
936 return not return_code
938 def BuildImage(self
, opts
, depot
):
939 """Builds test image for cros.
942 opts: Program options containing cros_board.
943 depot: The depot being bisected.
948 cmd
= [CROS_SDK_PATH
]
951 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
952 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
957 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
959 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, '--', './build_image',
960 '--board=%s' % opts
.cros_board
, 'test']
962 return_code
= RunProcess(cmd
)
964 return not return_code
966 def Build(self
, depot
, opts
):
967 """Builds targets using options passed into the script.
970 depot: Current depot being bisected.
971 opts: The options parsed from the command line.
974 True if build was successful.
976 if self
.BuildPackages(opts
, depot
):
977 if self
.BuildImage(opts
, depot
):
978 return self
.ImageToTarget(opts
)
982 class SourceControl(object):
983 """SourceControl is an abstraction over the underlying source control
984 system used for chromium. For now only git is supported, but in the
985 future, the svn workflow could be added as well."""
987 super(SourceControl
, self
).__init
__()
989 def SyncToRevisionWithGClient(self
, revision
):
990 """Uses gclient to sync to the specified revision.
992 ie. gclient sync --revision <revision>
995 revision: The git SHA1 or svn CL (depending on workflow).
998 The return code of the call.
1000 return bisect_utils
.RunGClient(['sync', '--verbose', '--reset', '--force',
1001 '--delete_unversioned_trees', '--nohooks', '--revision', revision
])
1003 def SyncToRevisionWithRepo(self
, timestamp
):
1004 """Uses repo to sync all the underlying git depots to the specified
1008 timestamp: The unix timestamp to sync to.
1011 The return code of the call.
1013 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
1016 class GitSourceControl(SourceControl
):
1017 """GitSourceControl is used to query the underlying source control. """
1018 def __init__(self
, opts
):
1019 super(GitSourceControl
, self
).__init
__()
1025 def GetRevisionList(self
, revision_range_end
, revision_range_start
, cwd
=None):
1026 """Retrieves a list of revisions between |revision_range_start| and
1027 |revision_range_end|.
1030 revision_range_end: The SHA1 for the end of the range.
1031 revision_range_start: The SHA1 for the beginning of the range.
1034 A list of the revisions between |revision_range_start| and
1035 |revision_range_end| (inclusive).
1037 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
1038 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
1039 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
1041 revision_hash_list
= log_output
.split()
1042 revision_hash_list
.append(revision_range_start
)
1044 return revision_hash_list
1046 def SyncToRevision(self
, revision
, sync_client
=None):
1047 """Syncs to the specified revision.
1050 revision: The revision to sync to.
1051 use_gclient: Specifies whether or not we should sync using gclient or
1052 just use source control directly.
1059 results
= RunGit(['checkout', revision
])[1]
1060 elif sync_client
== 'gclient':
1061 results
= self
.SyncToRevisionWithGClient(revision
)
1062 elif sync_client
== 'repo':
1063 results
= self
.SyncToRevisionWithRepo(revision
)
1067 def ResolveToRevision(self
, revision_to_check
, depot
, search
, cwd
=None):
1068 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1071 revision_to_check: The user supplied revision string that may need to be
1072 resolved to a git SHA1.
1073 depot: The depot the revision_to_check is from.
1074 search: The number of changelists to try if the first fails to resolve
1075 to a git hash. If the value is negative, the function will search
1076 backwards chronologically, otherwise it will search forward.
1079 A string containing a git SHA1 hash, otherwise None.
1081 # Android-chrome is git only, so no need to resolve this to anything else.
1082 if depot
== 'android-chrome':
1083 return revision_to_check
1086 if not IsStringInt(revision_to_check
):
1087 return revision_to_check
1089 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
1091 if depot
!= 'chromium':
1092 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
1094 svn_revision
= int(revision_to_check
)
1098 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
1100 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
1102 for i
in search_range
:
1103 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
1104 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
1107 (log_output
, return_code
) = RunGit(cmd
, cwd
=cwd
)
1109 assert not return_code
, 'An error occurred while running'\
1110 ' "git %s"' % ' '.join(cmd
)
1113 log_output
= log_output
.strip()
1116 git_revision
= log_output
1122 if IsStringInt(revision_to_check
):
1123 return int(revision_to_check
)
1126 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
1127 'chromiumos-overlay'))
1128 pattern
= CROS_VERSION_PATTERN
% revision_to_check
1129 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
1133 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
1135 git_revision
= log_output
1136 git_revision
= int(log_output
.strip())
1141 def IsInProperBranch(self
):
1142 """Confirms they're in the master branch for performing the bisection.
1143 This is needed or gclient will fail to sync properly.
1146 True if the current branch on src is 'master'
1148 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
1149 log_output
= CheckRunGit(cmd
)
1150 log_output
= log_output
.strip()
1152 return log_output
== "master"
1154 def SVNFindRev(self
, revision
, cwd
=None):
1155 """Maps directly to the 'git svn find-rev' command.
1158 revision: The git SHA1 to use.
1161 An integer changelist #, otherwise None.
1164 cmd
= ['svn', 'find-rev', revision
]
1166 output
= CheckRunGit(cmd
, cwd
)
1167 svn_revision
= output
.strip()
1169 if IsStringInt(svn_revision
):
1170 return int(svn_revision
)
1174 def QueryRevisionInfo(self
, revision
, cwd
=None):
1175 """Gathers information on a particular revision, such as author's name,
1176 email, subject, and date.
1179 revision: Revision you want to gather information on.
1181 A dict in the following format:
1192 formats
= ['%cN', '%cE', '%s', '%cD', '%b']
1193 targets
= ['author', 'email', 'subject', 'date', 'body']
1195 for i
in xrange(len(formats
)):
1196 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
1197 output
= CheckRunGit(cmd
, cwd
=cwd
)
1198 commit_info
[targets
[i
]] = output
.rstrip()
1202 def CheckoutFileAtRevision(self
, file_name
, revision
, cwd
=None):
1203 """Performs a checkout on a file at the given revision.
1208 return not RunGit(['checkout', revision
, file_name
], cwd
=cwd
)[1]
1210 def RevertFileToHead(self
, file_name
):
1211 """Unstages a file and returns it to HEAD.
1216 # Reset doesn't seem to return 0 on success.
1217 RunGit(['reset', 'HEAD', file_name
])
1219 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
1221 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
1222 """Returns a list of commits that modified this file.
1225 filename: Name of file.
1226 revision_start: Start of revision range.
1227 revision_end: End of revision range.
1230 Returns a list of commits that touched this file.
1232 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
1234 output
= CheckRunGit(cmd
)
1236 return [o
for o
in output
.split('\n') if o
]
1239 class BisectPerformanceMetrics(object):
1240 """This class contains functionality to perform a bisection of a range of
1241 revisions to narrow down where performance regressions may have occurred.
1243 The main entry-point is the Run method.
1246 def __init__(self
, source_control
, opts
):
1247 super(BisectPerformanceMetrics
, self
).__init
__()
1250 self
.source_control
= source_control
1251 self
.src_cwd
= os
.getcwd()
1252 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
1254 self
.cleanup_commands
= []
1256 self
.builder
= Builder
.FromOpts(opts
)
1258 # This always starts true since the script grabs latest first.
1259 self
.was_blink
= True
1261 for d
in DEPOT_NAMES
:
1262 # The working directory of each depot is just the path to the depot, but
1263 # since we're already in 'src', we can skip that part.
1265 self
.depot_cwd
[d
] = os
.path
.join(
1266 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1268 def PerformCleanup(self
):
1269 """Performs cleanup when script is finished."""
1270 os
.chdir(self
.src_cwd
)
1271 for c
in self
.cleanup_commands
:
1273 shutil
.move(c
[1], c
[2])
1275 assert False, 'Invalid cleanup command.'
1277 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
1278 """Retrieves a list of all the commits between the bad revision and
1279 last known good revision."""
1281 revision_work_list
= []
1284 revision_range_start
= good_revision
1285 revision_range_end
= bad_revision
1288 self
.ChangeToDepotWorkingDirectory('cros')
1290 # Print the commit timestamps for every commit in the revision time
1291 # range. We'll sort them and bisect by that. There is a remote chance that
1292 # 2 (or more) commits will share the exact same timestamp, but it's
1293 # probably safe to ignore that case.
1294 cmd
= ['repo', 'forall', '-c',
1295 'git log --format=%%ct --before=%d --after=%d' % (
1296 revision_range_end
, revision_range_start
)]
1297 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1299 assert not return_code
, 'An error occurred while running'\
1300 ' "%s"' % ' '.join(cmd
)
1304 revision_work_list
= list(set(
1305 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
1306 revision_work_list
= sorted(revision_work_list
, reverse
=True)
1308 cwd
= self
._GetDepotDirectory
(depot
)
1309 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
1310 good_revision
, cwd
=cwd
)
1312 return revision_work_list
1314 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
1315 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1317 if IsStringInt(svn_revision
):
1318 # V8 is tricky to bisect, in that there are only a few instances when
1319 # we can dive into bleeding_edge and get back a meaningful result.
1320 # Try to detect a V8 "business as usual" case, which is when:
1321 # 1. trunk revision N has description "Version X.Y.Z"
1322 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1323 # trunk. Now working on X.Y.(Z+1)."
1325 # As of 01/24/2014, V8 trunk descriptions are formatted:
1326 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1327 # So we can just try parsing that out first and fall back to the old way.
1328 v8_dir
= self
._GetDepotDirectory
('v8')
1329 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
1331 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
1334 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
1336 regex_results
= version_re
.search(revision_info
['subject'])
1341 # Look for "based on bleeding_edge" and parse out revision
1342 if 'based on bleeding_edge' in revision_info
['subject']:
1344 bleeding_edge_revision
= revision_info
['subject'].split(
1345 'bleeding_edge revision r')[1]
1346 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1347 git_revision
= self
.source_control
.ResolveToRevision(
1348 bleeding_edge_revision
, 'v8_bleeding_edge', 1,
1349 cwd
=v8_bleeding_edge_dir
)
1351 except (IndexError, ValueError):
1354 if not git_revision
:
1355 # Wasn't successful, try the old way of looking for "Prepare push to"
1356 git_revision
= self
.source_control
.ResolveToRevision(
1357 int(svn_revision
) - 1, 'v8_bleeding_edge', -1,
1358 cwd
=v8_bleeding_edge_dir
)
1361 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1362 cwd
=v8_bleeding_edge_dir
)
1364 if 'Prepare push to trunk' in revision_info
['subject']:
1368 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1369 cwd
= self
._GetDepotDirectory
('v8')
1370 cmd
= ['log', '--format=%ct', '-1', revision
]
1371 output
= CheckRunGit(cmd
, cwd
=cwd
)
1372 commit_time
= int(output
)
1376 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1378 output
= CheckRunGit(cmd
, cwd
=cwd
)
1379 output
= output
.split()
1381 commits
= reversed(commits
)
1383 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1385 output
= CheckRunGit(cmd
, cwd
=cwd
)
1386 output
= output
.split()
1389 bleeding_edge_revision
= None
1392 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1393 if bleeding_edge_revision
:
1396 return bleeding_edge_revision
1398 def _ParseRevisionsFromDEPSFileManually(self
, deps_file_contents
):
1399 """Manually parses the vars section of the DEPS file to determine
1400 chromium/blink/etc... revisions.
1403 A dict in the format {depot:revision} if successful, otherwise None.
1405 # We'll parse the "vars" section of the DEPS file.
1406 rxp
= re
.compile('vars = {(?P<vars_body>[^}]+)', re
.MULTILINE
)
1407 re_results
= rxp
.search(deps_file_contents
)
1413 # We should be left with a series of entries in the vars component of
1414 # the DEPS file with the following format:
1415 # 'depot_name': 'revision',
1416 vars_body
= re_results
.group('vars_body')
1417 rxp
= re
.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1419 re_results
= rxp
.findall(vars_body
)
1421 return dict(re_results
)
1423 def _ParseRevisionsFromDEPSFile(self
, depot
):
1424 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1425 be needed if the bisect recurses into those depots later.
1428 depot: Depot being bisected.
1431 A dict in the format {depot:revision} if successful, otherwise None.
1434 deps_data
= {'Var': lambda _
: deps_data
["vars"][_
],
1435 'From': lambda *args
: None
1437 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, deps_data
)
1438 deps_data
= deps_data
['deps']
1440 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1442 for depot_name
, depot_data
in DEPOT_DEPS_NAME
.iteritems():
1443 if (depot_data
.get('platform') and
1444 depot_data
.get('platform') != os
.name
):
1447 if (depot_data
.get('recurse') and depot
in depot_data
.get('from')):
1448 depot_data_src
= depot_data
.get('src') or depot_data
.get('src_old')
1449 src_dir
= deps_data
.get(depot_data_src
)
1451 self
.depot_cwd
[depot_name
] = os
.path
.join(self
.src_cwd
,
1453 re_results
= rxp
.search(src_dir
)
1455 results
[depot_name
] = re_results
.group('revision')
1457 warning_text
= ('Couldn\'t parse revision for %s while bisecting '
1458 '%s' % (depot_name
, depot
))
1459 if not warning_text
in self
.warnings
:
1460 self
.warnings
.append(warning_text
)
1462 results
[depot_name
] = None
1465 deps_file_contents
= ReadStringFromFile(bisect_utils
.FILE_DEPS_GIT
)
1466 parse_results
= self
._ParseRevisionsFromDEPSFileManually
(
1469 for depot_name
, depot_revision
in parse_results
.iteritems():
1470 depot_revision
= depot_revision
.strip('@')
1471 print depot_name
, depot_revision
1472 for current_name
, current_data
in DEPOT_DEPS_NAME
.iteritems():
1473 if (current_data
.has_key('deps_var') and
1474 current_data
['deps_var'] == depot_name
):
1475 src_name
= current_name
1476 results
[src_name
] = depot_revision
1480 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
, revision
):
1481 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1484 A dict in the format {depot:revision} if successful, otherwise None.
1487 self
.ChangeToDepotWorkingDirectory(depot
)
1491 if depot
== 'chromium' or depot
== 'android-chrome':
1492 results
= self
._ParseRevisionsFromDEPSFile
(depot
)
1494 elif depot
== 'cros':
1495 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1496 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1497 CROS_CHROMEOS_PATTERN
]
1498 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1500 assert not return_code
, 'An error occurred while running' \
1501 ' "%s"' % ' '.join(cmd
)
1503 if len(output
) > CROS_CHROMEOS_PATTERN
:
1504 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1507 output
= output
.split('_')[0]
1510 contents
= output
.split('.')
1512 version
= contents
[2]
1514 if contents
[3] != '0':
1515 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1516 (version
, contents
[3], version
)
1517 if not warningText
in self
.warnings
:
1518 self
.warnings
.append(warningText
)
1521 self
.ChangeToDepotWorkingDirectory('chromium')
1522 return_code
= CheckRunGit(['log', '-1', '--format=%H',
1523 '--author=chrome-release@google.com', '--grep=to %s' % version
,
1527 results
['chromium'] = output
.strip()
1529 # We can't try to map the trunk revision to bleeding edge yet, because
1530 # we don't know which direction to try to search in. Have to wait until
1531 # the bisect has narrowed the results down to 2 v8 rolls.
1532 results
['v8_bleeding_edge'] = None
1536 def BackupOrRestoreOutputdirectory(self
, restore
=False, build_type
='Release'):
1537 """Backs up or restores build output directory based on restore argument.
1540 restore: Indicates whether to restore or backup. Default is False(Backup)
1541 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1544 Path to backup or restored location as string. otherwise None if it fails.
1546 build_dir
= os
.path
.abspath(
1547 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1548 source_dir
= os
.path
.join(build_dir
, build_type
)
1549 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1551 source_dir
, destination_dir
= destination_dir
, source_dir
1552 if os
.path
.exists(source_dir
):
1553 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1554 shutil
.move(source_dir
, destination_dir
)
1555 return destination_dir
1558 def GetBuildArchiveForRevision(self
, revision
, gs_bucket
, target_arch
,
1559 patch_sha
, out_dir
):
1560 """Checks and downloads build archive for a given revision.
1562 Checks for build archive with Git hash or SVN revision. If either of the
1563 file exists, then downloads the archive file.
1566 revision: A Git hash revision.
1567 gs_bucket: Cloud storage bucket name
1568 target_arch: 32 or 64 bit build target
1569 patch: A DEPS patch (used while bisecting 3rd party repositories).
1570 out_dir: Build output directory where downloaded file is stored.
1573 Downloaded archive file path if exists, otherwise None.
1575 # Source archive file path on cloud storage using Git revision.
1576 source_file
= GetRemoteBuildPath(
1577 revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1578 downloaded_archive
= FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1579 if not downloaded_archive
:
1580 # Get SVN revision for the given SHA.
1581 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1583 # Source archive file path on cloud storage using SVN revision.
1584 source_file
= GetRemoteBuildPath(
1585 svn_revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1586 return FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1587 return downloaded_archive
1589 def DownloadCurrentBuild(self
, revision
, build_type
='Release', patch
=None):
1590 """Downloads the build archive for the given revision.
1593 revision: The Git revision to download or build.
1594 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1595 patch: A DEPS patch (used while bisecting 3rd party repositories).
1598 True if download succeeds, otherwise False.
1602 # Get the SHA of the DEPS changes patch.
1603 patch_sha
= GetSHA1HexDigest(patch
)
1605 # Update the DEPS changes patch with a patch to create a new file named
1606 # 'DEPS.sha' and add patch_sha evaluated above to it.
1607 patch
= '%s\n%s' % (patch
, DEPS_SHA_PATCH
% {'deps_sha': patch_sha
})
1609 # Get Build output directory
1610 abs_build_dir
= os
.path
.abspath(
1611 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1613 fetch_build_func
= lambda: self
.GetBuildArchiveForRevision(
1614 revision
, self
.opts
.gs_bucket
, self
.opts
.target_arch
,
1615 patch_sha
, abs_build_dir
)
1617 # Downloaded archive file path, downloads build archive for given revision.
1618 downloaded_file
= fetch_build_func()
1620 # When build archive doesn't exists, post a build request to tryserver
1621 # and wait for the build to be produced.
1622 if not downloaded_file
:
1623 downloaded_file
= self
.PostBuildRequestAndWait(
1624 revision
, fetch_build
=fetch_build_func
, patch
=patch
)
1625 if not downloaded_file
:
1628 # Generic name for the archive, created when archive file is extracted.
1629 output_dir
= os
.path
.join(
1630 abs_build_dir
, GetZipFileName(target_arch
=self
.opts
.target_arch
))
1631 # Unzip build archive directory.
1633 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1634 ExtractZip(downloaded_file
, abs_build_dir
)
1635 if os
.path
.exists(output_dir
):
1636 self
.BackupOrRestoreOutputdirectory(restore
=False)
1637 # Build output directory based on target(e.g. out/Release, out/Debug).
1638 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1639 print 'Moving build from %s to %s' % (
1640 output_dir
, target_build_output_dir
)
1641 shutil
.move(output_dir
, target_build_output_dir
)
1643 raise IOError('Missing extracted folder %s ' % output_dir
)
1644 except Exception as e
:
1645 print 'Somewthing went wrong while extracting archive file: %s' % e
1646 self
.BackupOrRestoreOutputdirectory(restore
=True)
1647 # Cleanup any leftovers from unzipping.
1648 if os
.path
.exists(output_dir
):
1649 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1651 # Delete downloaded archive
1652 if os
.path
.exists(downloaded_file
):
1653 os
.remove(downloaded_file
)
1656 def WaitUntilBuildIsReady(self
, fetch_build
, bot_name
, builder_host
,
1657 builder_port
, build_request_id
, max_timeout
):
1658 """Waits until build is produced by bisect builder on tryserver.
1661 fetch_build: Function to check and download build from cloud storage.
1662 bot_name: Builder bot name on tryserver.
1663 builder_host Tryserver hostname.
1664 builder_port: Tryserver port.
1665 build_request_id: A unique ID of the build request posted to tryserver.
1666 max_timeout: Maximum time to wait for the build.
1669 Downloaded archive file path if exists, otherwise None.
1671 # Build number on the tryserver.
1673 # Interval to check build on cloud storage.
1675 # Interval to check build status on tryserver.
1676 status_check_interval
= 600
1677 last_status_check
= time
.time()
1678 start_time
= time
.time()
1680 # Checks for build on gs://chrome-perf and download if exists.
1683 return (res
, 'Build successfully found')
1684 elapsed_status_check
= time
.time() - last_status_check
1685 # To avoid overloading tryserver with status check requests, we check
1686 # build status for every 10 mins.
1687 if elapsed_status_check
> status_check_interval
:
1688 last_status_check
= time
.time()
1690 # Get the build number on tryserver for the current build.
1691 build_num
= bisect_builder
.GetBuildNumFromBuilder(
1692 build_request_id
, bot_name
, builder_host
, builder_port
)
1693 # Check the status of build using the build number.
1694 # Note: Build is treated as PENDING if build number is not found
1695 # on the the tryserver.
1696 build_status
, status_link
= bisect_builder
.GetBuildStatus(
1697 build_num
, bot_name
, builder_host
, builder_port
)
1698 if build_status
== bisect_builder
.FAILED
:
1699 return (None, 'Failed to produce build, log: %s' % status_link
)
1700 elapsed_time
= time
.time() - start_time
1701 if elapsed_time
> max_timeout
:
1702 return (None, 'Timed out: %ss without build' % max_timeout
)
1704 print 'Time elapsed: %ss without build.' % elapsed_time
1705 time
.sleep(poll_interval
)
1707 def PostBuildRequestAndWait(self
, revision
, fetch_build
, patch
=None):
1708 """POSTs the build request job to the tryserver instance.
1710 A try job build request is posted to tryserver.chromium.perf master,
1711 and waits for the binaries to be produced and archived on cloud storage.
1712 Once the build is ready and stored onto cloud, build archive is downloaded
1713 into the output folder.
1716 revision: A Git hash revision.
1717 fetch_build: Function to check and download build from cloud storage.
1718 patch: A DEPS patch (used while bisecting 3rd party repositories).
1721 Downloaded archive file path when requested build exists and download is
1722 successful, otherwise None.
1724 # Get SVN revision for the given SHA.
1725 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1726 if not svn_revision
:
1728 'Failed to determine SVN revision for %s' % revision
)
1730 def GetBuilderNameAndBuildTime(target_platform
, target_arch
='ia32'):
1731 """Gets builder bot name and buildtime in seconds based on platform."""
1732 # Bot names should match the one listed in tryserver.chromium's
1733 # master.cfg which produces builds for bisect.
1735 if Is64BitWindows() and target_arch
== 'x64':
1736 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1737 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1739 if target_platform
== 'android':
1740 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1741 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1743 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME
)
1744 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
1748 bot_name
, build_timeout
= GetBuilderNameAndBuildTime(
1749 self
.opts
.target_platform
, self
.opts
.target_arch
)
1750 builder_host
= self
.opts
.builder_host
1751 builder_port
= self
.opts
.builder_port
1752 # Create a unique ID for each build request posted to tryserver builders.
1753 # This ID is added to "Reason" property in build's json.
1754 build_request_id
= GetSHA1HexDigest(
1755 '%s-%s-%s' % (svn_revision
, patch
, time
.time()))
1757 # Creates a try job description.
1758 job_args
= {'host': builder_host
,
1759 'port': builder_port
,
1760 'revision': 'src@%s' % svn_revision
,
1762 'name': build_request_id
1764 # Update patch information if supplied.
1766 job_args
['patch'] = patch
1767 # Posts job to build the revision on the server.
1768 if bisect_builder
.PostTryJob(job_args
):
1769 target_file
, error_msg
= self
.WaitUntilBuildIsReady(fetch_build
,
1776 print '%s [revision: %s]' % (error_msg
, svn_revision
)
1779 print 'Failed to post build request for revision: [%s]' % svn_revision
1782 def IsDownloadable(self
, depot
):
1783 """Checks if build is downloadable based on target platform and depot."""
1784 if (self
.opts
.target_platform
in ['chromium', 'android'] and
1785 self
.opts
.gs_bucket
):
1786 return (depot
== 'chromium' or
1787 'chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1788 'v8' in DEPOT_DEPS_NAME
[depot
]['from'])
1791 def UpdateDeps(self
, revision
, depot
, deps_file
):
1792 """Updates DEPS file with new revision of dependency repository.
1794 This method search DEPS for a particular pattern in which depot revision
1795 is specified (e.g "webkit_revision": "123456"). If a match is found then
1796 it resolves the given git hash to SVN revision and replace it in DEPS file.
1799 revision: A git hash revision of the dependency repository.
1800 depot: Current depot being bisected.
1801 deps_file: Path to DEPS file.
1804 True if DEPS file is modified successfully, otherwise False.
1806 if not os
.path
.exists(deps_file
):
1809 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1810 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1812 print 'DEPS update not supported for Depot: %s', depot
1815 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1816 # contains "angle_revision" key that holds git hash instead of SVN revision.
1817 # And sometime "angle_revision" key is not specified in "vars" variable,
1818 # in such cases check "deps" dictionary variable that matches
1819 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1820 if depot
== 'angle':
1821 return self
.UpdateDEPSForAngle(revision
, depot
, deps_file
)
1824 deps_contents
= ReadStringFromFile(deps_file
)
1825 # Check whether the depot and revision pattern in DEPS file vars
1826 # e.g. for webkit the format is "webkit_revision": "12345".
1827 deps_revision
= re
.compile(r
'(?<="%s": ")([0-9]+)(?=")' % deps_var
,
1829 match
= re
.search(deps_revision
, deps_contents
)
1831 svn_revision
= self
.source_control
.SVNFindRev(
1832 revision
, self
._GetDepotDirectory
(depot
))
1833 if not svn_revision
:
1834 print 'Could not determine SVN revision for %s' % revision
1836 # Update the revision information for the given depot
1837 new_data
= re
.sub(deps_revision
, str(svn_revision
), deps_contents
)
1839 # For v8_bleeding_edge revisions change V8 branch in order
1840 # to fetch bleeding edge revision.
1841 if depot
== 'v8_bleeding_edge':
1842 new_data
= self
.UpdateV8Branch(new_data
)
1845 # Write changes to DEPS file
1846 WriteStringToFile(new_data
, deps_file
)
1849 print 'Something went wrong while updating DEPS file. [%s]' % e
1852 def UpdateV8Branch(self
, deps_content
):
1853 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1855 Check for "v8_branch" in DEPS file if exists update its value
1856 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1857 variable from DEPS revision 254916, therefore check for "src/v8":
1858 <v8 source path> in DEPS in order to support prior DEPS revisions
1862 deps_content: DEPS file contents to be modified.
1865 Modified DEPS file contents as a string.
1867 new_branch
= r
'branches/bleeding_edge'
1868 v8_branch_pattern
= re
.compile(r
'(?<="v8_branch": ")(.*)(?=")')
1869 if re
.search(v8_branch_pattern
, deps_content
):
1870 deps_content
= re
.sub(v8_branch_pattern
, new_branch
, deps_content
)
1872 # Replaces the branch assigned to "src/v8" key in DEPS file.
1873 # Format of "src/v8" in DEPS:
1875 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1876 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1877 v8_src_pattern
= re
.compile(
1878 r
'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re
.MULTILINE
)
1879 if re
.search(v8_src_pattern
, deps_content
):
1880 deps_content
= re
.sub(v8_src_pattern
, new_branch
, deps_content
)
1883 def UpdateDEPSForAngle(self
, revision
, depot
, deps_file
):
1884 """Updates DEPS file with new revision for Angle repository.
1886 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1887 variable contains "angle_revision" key that holds git hash instead of
1890 And sometimes "angle_revision" key is not specified in "vars" variable,
1891 in such cases check "deps" dictionary variable that matches
1892 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1894 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1896 deps_contents
= ReadStringFromFile(deps_file
)
1897 # Check whether the depot and revision pattern in DEPS file vars variable
1898 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1899 angle_rev_pattern
= re
.compile(r
'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1900 deps_var
, re
.MULTILINE
)
1901 match
= re
.search(angle_rev_pattern
% deps_var
, deps_contents
)
1903 # Update the revision information for the given depot
1904 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
1906 # Check whether the depot and revision pattern in DEPS file deps
1908 # "src/third_party/angle": Var("chromium_git") +
1909 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1910 angle_rev_pattern
= re
.compile(
1911 r
'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re
.MULTILINE
)
1912 match
= re
.search(angle_rev_pattern
, deps_contents
)
1914 print 'Could not find angle revision information in DEPS file.'
1916 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
1917 # Write changes to DEPS file
1918 WriteStringToFile(new_data
, deps_file
)
1921 print 'Something went wrong while updating DEPS file, %s' % e
1924 def CreateDEPSPatch(self
, depot
, revision
):
1925 """Modifies DEPS and returns diff as text.
1928 depot: Current depot being bisected.
1929 revision: A git hash revision of the dependency repository.
1932 A tuple with git hash of chromium revision and DEPS patch text.
1934 deps_file_path
= os
.path
.join(self
.src_cwd
, bisect_utils
.FILE_DEPS
)
1935 if not os
.path
.exists(deps_file_path
):
1936 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path
)
1937 # Get current chromium revision (git hash).
1938 chromium_sha
= CheckRunGit(['rev-parse', 'HEAD']).strip()
1939 if not chromium_sha
:
1940 raise RuntimeError('Failed to determine Chromium revision for %s' %
1942 if ('chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1943 'v8' in DEPOT_DEPS_NAME
[depot
]['from']):
1944 # Checkout DEPS file for the current chromium revision.
1945 if self
.source_control
.CheckoutFileAtRevision(bisect_utils
.FILE_DEPS
,
1948 if self
.UpdateDeps(revision
, depot
, deps_file_path
):
1949 diff_command
= ['diff',
1950 '--src-prefix=src/',
1951 '--dst-prefix=src/',
1953 bisect_utils
.FILE_DEPS
]
1954 diff_text
= CheckRunGit(diff_command
, cwd
=self
.src_cwd
)
1955 return (chromium_sha
, ChangeBackslashToSlashInPatch(diff_text
))
1957 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1960 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1964 def BuildCurrentRevision(self
, depot
, revision
=None):
1965 """Builds chrome and performance_ui_tests on the current revision.
1968 True if the build was successful.
1970 if self
.opts
.debug_ignore_build
:
1973 os
.chdir(self
.src_cwd
)
1974 # Fetch build archive for the given revision from the cloud storage when
1975 # the storage bucket is passed.
1976 if self
.IsDownloadable(depot
) and revision
:
1978 if depot
!= 'chromium':
1979 # Create a DEPS patch with new revision for dependency repository.
1980 (revision
, deps_patch
) = self
.CreateDEPSPatch(depot
, revision
)
1981 if self
.DownloadCurrentBuild(revision
, patch
=deps_patch
):
1984 # Reverts the changes to DEPS file.
1985 self
.source_control
.CheckoutFileAtRevision(bisect_utils
.FILE_DEPS
,
1991 # These codes are executed when bisect bots builds binaries locally.
1992 build_success
= self
.builder
.Build(depot
, self
.opts
)
1994 return build_success
1996 def RunGClientHooks(self
):
1997 """Runs gclient with runhooks command.
2000 True if gclient reports no errors.
2003 if self
.opts
.debug_ignore_build
:
2006 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
2008 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
2009 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
2012 metric: The metric as a list of [<trace>, <value>] strings.
2013 text: The text to parse the metric values from.
2016 A list of floating point numbers found.
2018 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
2020 text_lines
= text
.split('\n')
2023 for current_line
in text_lines
:
2024 if metric_formatted
in current_line
:
2025 current_line
= current_line
[len(metric_formatted
):]
2028 histogram_values
= eval(current_line
)
2030 for b
in histogram_values
['buckets']:
2031 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
2032 # Extends the list with N-elements with the average for that bucket.
2033 values_list
.extend([average_for_bucket
] * b
['count'])
2039 def TryParseResultValuesFromOutput(self
, metric
, text
):
2040 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
2043 metric: The metric as a list of [<trace>, <value>] strings.
2044 text: The text to parse the metric values from.
2047 A list of floating point numbers found.
2049 # Format is: RESULT <graph>: <trace>= <value> <units>
2050 metric_re
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
2052 # The log will be parsed looking for format:
2053 # <*>RESULT <graph_name>: <trace_name>= <value>
2054 single_result_re
= re
.compile(
2055 metric_re
+ '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
2057 # The log will be parsed looking for format:
2058 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
2059 multi_results_re
= re
.compile(
2060 metric_re
+ '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
2062 # The log will be parsed looking for format:
2063 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
2064 mean_stddev_re
= re
.compile(
2066 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
2068 text_lines
= text
.split('\n')
2070 for current_line
in text_lines
:
2071 # Parse the output from the performance test for the metric we're
2073 single_result_match
= single_result_re
.search(current_line
)
2074 multi_results_match
= multi_results_re
.search(current_line
)
2075 mean_stddev_match
= mean_stddev_re
.search(current_line
)
2076 if (not single_result_match
is None and
2077 single_result_match
.group('VALUE')):
2078 values_list
+= [single_result_match
.group('VALUE')]
2079 elif (not multi_results_match
is None and
2080 multi_results_match
.group('VALUES')):
2081 metric_values
= multi_results_match
.group('VALUES')
2082 values_list
+= metric_values
.split(',')
2083 elif (not mean_stddev_match
is None and
2084 mean_stddev_match
.group('MEAN')):
2085 values_list
+= [mean_stddev_match
.group('MEAN')]
2087 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
2089 # If the metric is times/t, we need to sum the timings in order to get
2090 # similar regression results as the try-bots.
2091 metrics_to_sum
= [['times', 't'], ['times', 'page_load_time'],
2092 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
2094 if metric
in metrics_to_sum
:
2096 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
2100 def ParseMetricValuesFromOutput(self
, metric
, text
):
2101 """Parses output from performance_ui_tests and retrieves the results for
2105 metric: The metric as a list of [<trace>, <value>] strings.
2106 text: The text to parse the metric values from.
2109 A list of floating point numbers found.
2111 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
2113 if not metric_values
:
2114 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
2116 return metric_values
2118 def _GenerateProfileIfNecessary(self
, command_args
):
2119 """Checks the command line of the performance test for dependencies on
2120 profile generation, and runs tools/perf/generate_profile as necessary.
2123 command_args: Command line being passed to performance test, as a list.
2126 False if profile generation was necessary and failed, otherwise True.
2129 if '--profile-dir' in ' '.join(command_args
):
2130 # If we were using python 2.7+, we could just use the argparse
2131 # module's parse_known_args to grab --profile-dir. Since some of the
2132 # bots still run 2.6, have to grab the arguments manually.
2134 args_to_parse
= ['--profile-dir', '--browser']
2136 for arg_to_parse
in args_to_parse
:
2137 for i
, current_arg
in enumerate(command_args
):
2138 if arg_to_parse
in current_arg
:
2139 current_arg_split
= current_arg
.split('=')
2141 # Check 2 cases, --arg=<val> and --arg <val>
2142 if len(current_arg_split
) == 2:
2143 arg_dict
[arg_to_parse
] = current_arg_split
[1]
2144 elif i
+ 1 < len(command_args
):
2145 arg_dict
[arg_to_parse
] = command_args
[i
+1]
2147 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
2149 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
2150 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
2151 return not RunProcess(['python', path_to_generate
,
2152 '--profile-type-to-generate', profile_type
,
2153 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
2157 def _IsBisectModeUsingMetric(self
):
2158 return self
.opts
.bisect_mode
in [BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
]
2160 def _IsBisectModeReturnCode(self
):
2161 return self
.opts
.bisect_mode
in [BISECT_MODE_RETURN_CODE
]
2163 def _IsBisectModeStandardDeviation(self
):
2164 return self
.opts
.bisect_mode
in [BISECT_MODE_STD_DEV
]
2166 def RunPerformanceTestAndParseResults(
2167 self
, command_to_run
, metric
, reset_on_first_run
=False,
2168 upload_on_last_run
=False, results_label
=None):
2169 """Runs a performance test on the current revision and parses the results.
2172 command_to_run: The command to be run to execute the performance test.
2173 metric: The metric to parse out from the results of the performance test.
2174 This is the result chart name and trace name, separated by slash.
2175 reset_on_first_run: If True, pass the flag --reset-results on first run.
2176 upload_on_last_run: If True, pass the flag --upload-results on last run.
2177 results_label: A value for the option flag --results-label.
2178 The arguments reset_on_first_run, upload_on_last_run and results_label
2179 are all ignored if the test is not a Telemetry test.
2182 (values dict, 0) if --debug_ignore_perf_test was passed.
2183 (values dict, 0, test output) if the test was run successfully.
2184 (error message, -1) if the test couldn't be run.
2185 (error message, -1, test output) if the test ran but there was an error.
2187 success_code
, failure_code
= 0, -1
2189 if self
.opts
.debug_ignore_perf_test
:
2196 return (fake_results
, success_code
)
2198 # For Windows platform set posix=False, to parse windows paths correctly.
2199 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2200 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2201 args
= shlex
.split(command_to_run
, posix
=not IsWindowsHost())
2203 if not self
._GenerateProfileIfNecessary
(args
):
2204 err_text
= 'Failed to generate profile for performance test.'
2205 return (err_text
, failure_code
)
2207 # If running a Telemetry test for Chrome OS, insert the remote IP and
2208 # identity parameters.
2209 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
2210 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
2211 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
2212 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
2214 start_time
= time
.time()
2217 output_of_all_runs
= ''
2218 for i
in xrange(self
.opts
.repeat_test_count
):
2219 # Can ignore the return code since if the tests fail, it won't return 0.
2220 current_args
= copy
.copy(args
)
2222 if i
== 0 and reset_on_first_run
:
2223 current_args
.append('--reset-results')
2224 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
2225 current_args
.append('--upload-results')
2227 current_args
.append('--results-label=%s' % results_label
)
2229 (output
, return_code
) = RunProcessAndRetrieveOutput(current_args
,
2232 if e
.errno
== errno
.ENOENT
:
2233 err_text
= ('Something went wrong running the performance test. '
2234 'Please review the command line:\n\n')
2235 if 'src/' in ' '.join(args
):
2236 err_text
+= ('Check that you haven\'t accidentally specified a '
2237 'path with src/ in the command.\n\n')
2238 err_text
+= ' '.join(args
)
2241 return (err_text
, failure_code
)
2244 output_of_all_runs
+= output
2245 if self
.opts
.output_buildbot_annotations
:
2248 if self
._IsBisectModeUsingMetric
():
2249 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
2250 # If we're bisecting on a metric (ie, changes in the mean or
2251 # standard deviation) and no metric values are produced, bail out.
2252 if not metric_values
:
2254 elif self
._IsBisectModeReturnCode
():
2255 metric_values
.append(return_code
)
2257 elapsed_minutes
= (time
.time() - start_time
) / 60.0
2258 if elapsed_minutes
>= self
.opts
.max_time_minutes
:
2261 if len(metric_values
) == 0:
2262 err_text
= 'Metric %s was not found in the test output.' % metric
2263 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2264 # that were found in the output here.
2265 return (err_text
, failure_code
, output_of_all_runs
)
2267 # If we're bisecting on return codes, we're really just looking for zero vs
2269 if self
._IsBisectModeReturnCode
():
2270 # If any of the return codes is non-zero, output 1.
2271 overall_return_code
= 0 if (
2272 all(current_value
== 0 for current_value
in metric_values
)) else 1
2275 'mean': overall_return_code
,
2278 'values': metric_values
,
2281 print 'Results of performance test: Command returned with %d' % (
2282 overall_return_code
)
2285 # Need to get the average value if there were multiple values.
2286 truncated_mean
= CalculateTruncatedMean(metric_values
,
2287 self
.opts
.truncate_percent
)
2288 standard_err
= CalculateStandardError(metric_values
)
2289 standard_dev
= CalculateStandardDeviation(metric_values
)
2291 if self
._IsBisectModeStandardDeviation
():
2292 metric_values
= [standard_dev
]
2295 'mean': truncated_mean
,
2296 'std_err': standard_err
,
2297 'std_dev': standard_dev
,
2298 'values': metric_values
,
2301 print 'Results of performance test: %12f %12f' % (
2302 truncated_mean
, standard_err
)
2304 return (values
, success_code
, output_of_all_runs
)
2306 def FindAllRevisionsToSync(self
, revision
, depot
):
2307 """Finds all dependant revisions and depots that need to be synced for a
2308 given revision. This is only useful in the git workflow, as an svn depot
2309 may be split into multiple mirrors.
2311 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2312 skia/include. To sync skia/src properly, one has to find the proper
2313 revisions in skia/gyp and skia/include.
2316 revision: The revision to sync to.
2317 depot: The depot in use at the moment (probably skia).
2320 A list of [depot, revision] pairs that need to be synced.
2322 revisions_to_sync
= [[depot
, revision
]]
2324 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
2325 (depot
== 'android-chrome'))
2327 # Some SVN depots were split into multiple git depots, so we need to
2328 # figure out for each mirror which git revision to grab. There's no
2329 # guarantee that the SVN revision will exist for each of the dependant
2330 # depots, so we have to grep the git logs and grab the next earlier one.
2332 DEPOT_DEPS_NAME
[depot
]['depends'] and\
2333 self
.source_control
.IsGit():
2334 svn_rev
= self
.source_control
.SVNFindRev(revision
)
2336 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
2337 self
.ChangeToDepotWorkingDirectory(d
)
2339 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
2342 revisions_to_sync
.append([d
, dependant_rev
])
2344 num_resolved
= len(revisions_to_sync
)
2345 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
2347 self
.ChangeToDepotWorkingDirectory(depot
)
2349 if not ((num_resolved
- 1) == num_needed
):
2352 return revisions_to_sync
2354 def PerformPreBuildCleanup(self
):
2355 """Performs necessary cleanup between runs."""
2356 print 'Cleaning up between runs.'
2359 # Having these pyc files around between runs can confuse the
2360 # perf tests and cause them to crash.
2361 for (path
, _
, files
) in os
.walk(self
.src_cwd
):
2362 for cur_file
in files
:
2363 if cur_file
.endswith('.pyc'):
2364 path_to_file
= os
.path
.join(path
, cur_file
)
2365 os
.remove(path_to_file
)
2367 def PerformWebkitDirectoryCleanup(self
, revision
):
2368 """If the script is switching between Blink and WebKit during bisect,
2369 its faster to just delete the directory rather than leave it up to git
2375 if not self
.source_control
.CheckoutFileAtRevision(
2376 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
2380 os
.chdir(self
.src_cwd
)
2382 is_blink
= bisect_utils
.IsDepsFileBlink()
2386 if not self
.source_control
.RevertFileToHead(
2387 bisect_utils
.FILE_DEPS_GIT
):
2390 if self
.was_blink
!= is_blink
:
2391 self
.was_blink
= is_blink
2392 # Removes third_party/Webkit directory.
2393 return bisect_utils
.RemoveThirdPartyDirectory('Webkit')
2396 def PerformCrosChrootCleanup(self
):
2397 """Deletes the chroot.
2403 self
.ChangeToDepotWorkingDirectory('cros')
2404 cmd
= [CROS_SDK_PATH
, '--delete']
2405 return_code
= RunProcess(cmd
)
2407 return not return_code
2409 def CreateCrosChroot(self
):
2410 """Creates a new chroot.
2416 self
.ChangeToDepotWorkingDirectory('cros')
2417 cmd
= [CROS_SDK_PATH
, '--create']
2418 return_code
= RunProcess(cmd
)
2420 return not return_code
2422 def PerformPreSyncCleanup(self
, revision
, depot
):
2423 """Performs any necessary cleanup before syncing.
2428 if depot
== 'chromium' or depot
== 'android-chrome':
2429 # Removes third_party/libjingle. At some point, libjingle was causing
2430 # issues syncing when using the git workflow (crbug.com/266324).
2431 os
.chdir(self
.src_cwd
)
2432 if not bisect_utils
.RemoveThirdPartyDirectory('libjingle'):
2434 # Removes third_party/skia. At some point, skia was causing
2435 # issues syncing when using the git workflow (crbug.com/377951).
2436 if not bisect_utils
.RemoveThirdPartyDirectory('skia'):
2438 if depot
== 'chromium':
2439 # The fast webkit cleanup doesn't work for android_chrome
2440 # The switch from Webkit to Blink that this deals with now happened
2441 # quite a long time ago so this is unlikely to be a problem.
2442 return self
.PerformWebkitDirectoryCleanup(revision
)
2443 elif depot
== 'cros':
2444 return self
.PerformCrosChrootCleanup()
2447 def RunPostSync(self
, depot
):
2448 """Performs any work after syncing.
2453 if self
.opts
.target_platform
== 'android':
2454 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
2455 path_to_src
=self
.src_cwd
):
2459 return self
.CreateCrosChroot()
2461 return self
.RunGClientHooks()
2464 def ShouldSkipRevision(self
, depot
, revision
):
2465 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2466 is git based those changes would have no effect.
2469 depot: The depot being bisected.
2470 revision: Current revision we're synced to.
2473 True if we should skip building/testing this revision.
2475 if depot
== 'chromium':
2476 if self
.source_control
.IsGit():
2477 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
2478 output
= CheckRunGit(cmd
)
2480 files
= output
.splitlines()
2482 if len(files
) == 1 and files
[0] == 'DEPS':
2487 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
2489 """Performs a full sync/build/run of the specified revision.
2492 revision: The revision to sync to.
2493 depot: The depot that's being used at the moment (src, webkit, etc.)
2494 command_to_run: The command to execute the performance test.
2495 metric: The performance metric being tested.
2498 On success, a tuple containing the results of the performance test.
2499 Otherwise, a tuple with the error message.
2502 if depot
== 'chromium' or depot
== 'android-chrome':
2503 sync_client
= 'gclient'
2504 elif depot
== 'cros':
2505 sync_client
= 'repo'
2507 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
2509 if not revisions_to_sync
:
2510 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
2512 if not self
.PerformPreSyncCleanup(revision
, depot
):
2513 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
2517 if not self
.opts
.debug_ignore_sync
:
2518 for r
in revisions_to_sync
:
2519 self
.ChangeToDepotWorkingDirectory(r
[0])
2522 self
.PerformPreBuildCleanup()
2524 # If you're using gclient to sync, you need to specify the depot you
2525 # want so that all the dependencies sync properly as well.
2526 # ie. gclient sync src@<SHA1>
2527 current_revision
= r
[1]
2528 if sync_client
== 'gclient':
2529 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
2531 if not self
.source_control
.SyncToRevision(current_revision
,
2538 success
= self
.RunPostSync(depot
)
2540 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
2541 return ('Skipped revision: [%s]' % str(revision
),
2542 BUILD_RESULT_SKIPPED
)
2544 start_build_time
= time
.time()
2545 if self
.BuildCurrentRevision(depot
, revision
):
2546 after_build_time
= time
.time()
2547 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
2549 # Restore build output directory once the tests are done, to avoid
2551 if self
.IsDownloadable(depot
) and revision
:
2552 self
.BackupOrRestoreOutputdirectory(restore
=True)
2555 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
2558 if not external_revisions
is None:
2559 return (results
[0], results
[1], external_revisions
,
2560 time
.time() - after_build_time
, after_build_time
-
2563 return ('Failed to parse DEPS file for external revisions.',
2568 return ('Failed to build revision: [%s]' % (str(revision
, )),
2571 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
2573 return ('Failed to sync revision: [%s]' % (str(revision
, )),
2576 def _CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
2577 """Given known good and bad values, decide if the current_value passed
2581 current_value: The value of the metric being checked.
2582 known_bad_value: The reference value for a "failed" run.
2583 known_good_value: The reference value for a "passed" run.
2586 True if the current_value is closer to the known_good_value than the
2589 if self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2590 dist_to_good_value
= abs(current_value
['std_dev'] -
2591 known_good_value
['std_dev'])
2592 dist_to_bad_value
= abs(current_value
['std_dev'] -
2593 known_bad_value
['std_dev'])
2595 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
2596 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
2598 return dist_to_good_value
< dist_to_bad_value
2600 def _GetDepotDirectory(self
, depot_name
):
2601 if depot_name
== 'chromium':
2603 elif depot_name
== 'cros':
2604 return self
.cros_cwd
2605 elif depot_name
in DEPOT_NAMES
:
2606 return self
.depot_cwd
[depot_name
]
2608 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2609 ' was added without proper support?' % depot_name
2611 def ChangeToDepotWorkingDirectory(self
, depot_name
):
2612 """Given a depot, changes to the appropriate working directory.
2615 depot_name: The name of the depot (see DEPOT_NAMES).
2617 os
.chdir(self
._GetDepotDirectory
(depot_name
))
2619 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
2620 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
2621 search_forward
=True)
2622 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
2623 search_forward
=False)
2624 min_revision_data
['external']['v8_bleeding_edge'] = r1
2625 max_revision_data
['external']['v8_bleeding_edge'] = r2
2627 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2628 min_revision_data
['revision']) or
2629 not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2630 max_revision_data
['revision'])):
2631 self
.warnings
.append('Trunk revisions in V8 did not map directly to '
2632 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2633 'did map directly to bleeding_edge revisions, but results might not '
2636 def _FindNextDepotToBisect(self
, current_depot
, current_revision
,
2637 min_revision_data
, max_revision_data
):
2638 """Given the state of the bisect, decides which depot the script should
2639 dive into next (if any).
2642 current_depot: Current depot being bisected.
2643 current_revision: Current revision synced to.
2644 min_revision_data: Data about the earliest revision in the bisect range.
2645 max_revision_data: Data about the latest revision in the bisect range.
2648 The depot to bisect next, or None.
2650 external_depot
= None
2651 for next_depot
in DEPOT_NAMES
:
2652 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
2653 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
2656 if not (DEPOT_DEPS_NAME
[next_depot
]["recurse"] and
2657 min_revision_data
['depot'] in DEPOT_DEPS_NAME
[next_depot
]['from']):
2660 if current_depot
== 'v8':
2661 # We grab the bleeding_edge info here rather than earlier because we
2662 # finally have the revision range. From that we can search forwards and
2663 # backwards to try to match trunk revisions to bleeding_edge.
2664 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
2666 if (min_revision_data
['external'].get(next_depot
) ==
2667 max_revision_data
['external'].get(next_depot
)):
2670 if (min_revision_data
['external'].get(next_depot
) and
2671 max_revision_data
['external'].get(next_depot
)):
2672 external_depot
= next_depot
2675 return external_depot
2677 def PrepareToBisectOnDepot(self
,
2683 """Changes to the appropriate directory and gathers a list of revisions
2684 to bisect between |start_revision| and |end_revision|.
2687 current_depot: The depot we want to bisect.
2688 end_revision: End of the revision range.
2689 start_revision: Start of the revision range.
2690 previous_depot: The depot we were previously bisecting.
2691 previous_revision: The last revision we synced to on |previous_depot|.
2694 A list containing the revisions between |start_revision| and
2695 |end_revision| inclusive.
2697 # Change into working directory of external library to run
2698 # subsequent commands.
2699 self
.ChangeToDepotWorkingDirectory(current_depot
)
2701 # V8 (and possibly others) is merged in periodically. Bisecting
2702 # this directory directly won't give much good info.
2703 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
2704 config_path
= os
.path
.join(self
.src_cwd
, '..')
2705 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
2706 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
2708 if bisect_utils
.RunGClient(
2709 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
2712 if current_depot
== 'v8_bleeding_edge':
2713 self
.ChangeToDepotWorkingDirectory('chromium')
2715 shutil
.move('v8', 'v8.bak')
2716 shutil
.move('v8_bleeding_edge', 'v8')
2718 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
2719 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
2721 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
2722 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
2724 self
.ChangeToDepotWorkingDirectory(current_depot
)
2726 depot_revision_list
= self
.GetRevisionList(current_depot
,
2730 self
.ChangeToDepotWorkingDirectory('chromium')
2732 return depot_revision_list
2734 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
2735 """Gathers reference values by running the performance tests on the
2736 known good and bad revisions.
2739 good_rev: The last known good revision where the performance regression
2740 has not occurred yet.
2741 bad_rev: A revision where the performance regression has already occurred.
2742 cmd: The command to execute the performance test.
2743 metric: The metric being tested for regression.
2746 A tuple with the results of building and running each revision.
2748 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
2753 good_run_results
= None
2755 if not bad_run_results
[1]:
2756 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
2761 return (bad_run_results
, good_run_results
)
2763 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
2764 """Adds new revisions to the revision_data dict and initializes them.
2767 revisions: List of revisions to add.
2768 depot: Depot that's currently in use (src, webkit, etc...)
2769 sort: Sorting key for displaying revisions.
2770 revision_data: A dict to add the new revisions into. Existing revisions
2771 will have their sort keys offset.
2774 num_depot_revisions
= len(revisions
)
2776 for _
, v
in revision_data
.iteritems():
2777 if v
['sort'] > sort
:
2778 v
['sort'] += num_depot_revisions
2780 for i
in xrange(num_depot_revisions
):
2783 revision_data
[r
] = {'revision' : r
,
2789 'sort' : i
+ sort
+ 1}
2791 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
2792 if self
.opts
.output_buildbot_annotations
:
2793 step_name
= 'Bisection Range: [%s - %s]' % (
2794 revision_list
[len(revision_list
)-1], revision_list
[0])
2795 bisect_utils
.OutputAnnotationStepStart(step_name
)
2798 print 'Revisions to bisect on [%s]:' % depot
2799 for revision_id
in revision_list
:
2800 print ' -> %s' % (revision_id
, )
2803 if self
.opts
.output_buildbot_annotations
:
2804 bisect_utils
.OutputAnnotationStepClosed()
2806 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
2807 """Checks to see if changes to DEPS file occurred, and that the revision
2808 range also includes the change to .DEPS.git. If it doesn't, attempts to
2809 expand the revision range to include it.
2812 bad_rev: First known bad revision.
2813 good_revision: Last known good revision.
2816 A tuple with the new bad and good revisions.
2818 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
2819 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
2820 'DEPS', good_revision
, bad_revision
)
2823 # DEPS file was changed, search from the oldest change to DEPS file to
2824 # bad_revision to see if there are matching .DEPS.git changes.
2825 oldest_deps_change
= changes_to_deps
[-1]
2826 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
2827 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
2829 if len(changes_to_deps
) != len(changes_to_gitdeps
):
2830 # Grab the timestamp of the last DEPS change
2831 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
2832 output
= CheckRunGit(cmd
)
2833 commit_time
= int(output
)
2835 # Try looking for a commit that touches the .DEPS.git file in the
2836 # next 15 minutes after the DEPS file change.
2837 cmd
= ['log', '--format=%H', '-1',
2838 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
2839 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
2840 output
= CheckRunGit(cmd
)
2841 output
= output
.strip()
2843 self
.warnings
.append('Detected change to DEPS and modified '
2844 'revision range to include change to .DEPS.git')
2845 return (output
, good_revision
)
2847 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
2848 'matching change to .DEPS.git')
2849 return (bad_revision
, good_revision
)
2851 def CheckIfRevisionsInProperOrder(self
,
2855 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2858 good_revision: Number/tag of the known good revision.
2859 bad_revision: Number/tag of the known bad revision.
2862 True if the revisions are in the proper order (good earlier than bad).
2864 if self
.source_control
.IsGit() and target_depot
!= 'cros':
2865 cmd
= ['log', '--format=%ct', '-1', good_revision
]
2866 cwd
= self
._GetDepotDirectory
(target_depot
)
2868 output
= CheckRunGit(cmd
, cwd
=cwd
)
2869 good_commit_time
= int(output
)
2871 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
2872 output
= CheckRunGit(cmd
, cwd
=cwd
)
2873 bad_commit_time
= int(output
)
2875 return good_commit_time
<= bad_commit_time
2877 # Cros/svn use integers
2878 return int(good_revision
) <= int(bad_revision
)
2880 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
2881 """Given known good and bad revisions, run a binary search on all
2882 intermediate revisions to determine the CL where the performance regression
2886 command_to_run: Specify the command to execute the performance test.
2887 good_revision: Number/tag of the known good revision.
2888 bad_revision: Number/tag of the known bad revision.
2889 metric: The performance metric to monitor.
2892 A dict with 2 members, 'revision_data' and 'error'. On success,
2893 'revision_data' will contain a dict mapping revision ids to
2894 data about that revision. Each piece of revision data consists of a
2895 dict with the following keys:
2897 'passed': Represents whether the performance test was successful at
2898 that revision. Possible values include: 1 (passed), 0 (failed),
2899 '?' (skipped), 'F' (build failed).
2900 'depot': The depot that this revision is from (ie. WebKit)
2901 'external': If the revision is a 'src' revision, 'external' contains
2902 the revisions of each of the external libraries.
2903 'sort': A sort value for sorting the dict in order of commits.
2920 If an error occurred, the 'error' field will contain the message and
2921 'revision_data' will be empty.
2923 results
= {'revision_data' : {},
2926 # Choose depot to bisect first
2927 target_depot
= 'chromium'
2928 if self
.opts
.target_platform
== 'cros':
2929 target_depot
= 'cros'
2930 elif self
.opts
.target_platform
== 'android-chrome':
2931 target_depot
= 'android-chrome'
2934 self
.ChangeToDepotWorkingDirectory(target_depot
)
2936 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2937 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
2939 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
2945 if bad_revision
is None:
2946 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
2949 if good_revision
is None:
2950 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
2953 # Check that they didn't accidentally swap good and bad revisions.
2954 if not self
.CheckIfRevisionsInProperOrder(
2955 target_depot
, good_revision
, bad_revision
):
2956 results
['error'] = 'bad_revision < good_revision, did you swap these '\
2960 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
2961 bad_revision
, good_revision
)
2963 if self
.opts
.output_buildbot_annotations
:
2964 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
2966 print 'Gathering revision range for bisection.'
2967 # Retrieve a list of revisions to do bisection on.
2968 src_revision_list
= self
.GetRevisionList(target_depot
,
2972 if self
.opts
.output_buildbot_annotations
:
2973 bisect_utils
.OutputAnnotationStepClosed()
2975 if src_revision_list
:
2976 # revision_data will store information about a revision such as the
2977 # depot it came from, the webkit/V8 revision at that time,
2978 # performance timing, build state, etc...
2979 revision_data
= results
['revision_data']
2981 # revision_list is the list we're binary searching through at the moment.
2986 for current_revision_id
in src_revision_list
:
2989 revision_data
[current_revision_id
] = {'value' : None,
2991 'depot' : target_depot
,
2995 'sort' : sort_key_ids
}
2996 revision_list
.append(current_revision_id
)
2999 max_revision
= len(revision_list
) - 1
3001 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
3003 if self
.opts
.output_buildbot_annotations
:
3004 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
3006 print 'Gathering reference values for bisection.'
3008 # Perform the performance tests on the good and bad revisions, to get
3010 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
3016 if self
.opts
.output_buildbot_annotations
:
3017 bisect_utils
.OutputAnnotationStepClosed()
3020 results
['error'] = ('An error occurred while building and running '
3021 'the \'bad\' reference value. The bisect cannot continue without '
3022 'a working \'bad\' revision to start from.\n\nError: %s' %
3027 results
['error'] = ('An error occurred while building and running '
3028 'the \'good\' reference value. The bisect cannot continue without '
3029 'a working \'good\' revision to start from.\n\nError: %s' %
3034 # We need these reference values to determine if later runs should be
3035 # classified as pass or fail.
3036 known_bad_value
= bad_results
[0]
3037 known_good_value
= good_results
[0]
3039 # Can just mark the good and bad revisions explicitly here since we
3040 # already know the results.
3041 bad_revision_data
= revision_data
[revision_list
[0]]
3042 bad_revision_data
['external'] = bad_results
[2]
3043 bad_revision_data
['perf_time'] = bad_results
[3]
3044 bad_revision_data
['build_time'] = bad_results
[4]
3045 bad_revision_data
['passed'] = False
3046 bad_revision_data
['value'] = known_bad_value
3048 good_revision_data
= revision_data
[revision_list
[max_revision
]]
3049 good_revision_data
['external'] = good_results
[2]
3050 good_revision_data
['perf_time'] = good_results
[3]
3051 good_revision_data
['build_time'] = good_results
[4]
3052 good_revision_data
['passed'] = True
3053 good_revision_data
['value'] = known_good_value
3055 next_revision_depot
= target_depot
3058 if not revision_list
:
3061 min_revision_data
= revision_data
[revision_list
[min_revision
]]
3062 max_revision_data
= revision_data
[revision_list
[max_revision
]]
3064 if max_revision
- min_revision
<= 1:
3065 current_depot
= min_revision_data
['depot']
3066 if min_revision_data
['passed'] == '?':
3067 next_revision_index
= min_revision
3068 elif max_revision_data
['passed'] == '?':
3069 next_revision_index
= max_revision
3070 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
3071 previous_revision
= revision_list
[min_revision
]
3072 # If there were changes to any of the external libraries we track,
3073 # should bisect the changes there as well.
3074 external_depot
= self
._FindNextDepotToBisect
(current_depot
,
3075 previous_revision
, min_revision_data
, max_revision_data
)
3077 # If there was no change in any of the external depots, the search
3079 if not external_depot
:
3080 if current_depot
== 'v8':
3081 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
3082 'continue any further. The script can only bisect into '
3083 'V8\'s bleeding_edge repository if both the current and '
3084 'previous revisions in trunk map directly to revisions in '
3088 earliest_revision
= max_revision_data
['external'][external_depot
]
3089 latest_revision
= min_revision_data
['external'][external_depot
]
3091 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
3094 next_revision_depot
,
3097 if not new_revision_list
:
3098 results
['error'] = 'An error occurred attempting to retrieve'\
3099 ' revision range: [%s..%s]' % \
3100 (earliest_revision
, latest_revision
)
3103 self
.AddRevisionsIntoRevisionData(new_revision_list
,
3105 min_revision_data
['sort'],
3108 # Reset the bisection and perform it on the newly inserted
3110 revision_list
= new_revision_list
3112 max_revision
= len(revision_list
) - 1
3113 sort_key_ids
+= len(revision_list
)
3115 print 'Regression in metric:%s appears to be the result of changes'\
3116 ' in [%s].' % (metric
, external_depot
)
3118 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
3124 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
3127 next_revision_id
= revision_list
[next_revision_index
]
3128 next_revision_data
= revision_data
[next_revision_id
]
3129 next_revision_depot
= next_revision_data
['depot']
3131 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
3133 if self
.opts
.output_buildbot_annotations
:
3134 step_name
= 'Working on [%s]' % next_revision_id
3135 bisect_utils
.OutputAnnotationStepStart(step_name
)
3137 print 'Working on revision: [%s]' % next_revision_id
3139 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
3140 next_revision_depot
,
3142 metric
, skippable
=True)
3144 # If the build is successful, check whether or not the metric
3146 if not run_results
[1]:
3147 if len(run_results
) > 2:
3148 next_revision_data
['external'] = run_results
[2]
3149 next_revision_data
['perf_time'] = run_results
[3]
3150 next_revision_data
['build_time'] = run_results
[4]
3152 passed_regression
= self
._CheckIfRunPassed
(run_results
[0],
3156 next_revision_data
['passed'] = passed_regression
3157 next_revision_data
['value'] = run_results
[0]
3159 if passed_regression
:
3160 max_revision
= next_revision_index
3162 min_revision
= next_revision_index
3164 if run_results
[1] == BUILD_RESULT_SKIPPED
:
3165 next_revision_data
['passed'] = 'Skipped'
3166 elif run_results
[1] == BUILD_RESULT_FAIL
:
3167 next_revision_data
['passed'] = 'Build Failed'
3169 print run_results
[0]
3171 # If the build is broken, remove it and redo search.
3172 revision_list
.pop(next_revision_index
)
3176 if self
.opts
.output_buildbot_annotations
:
3177 self
._PrintPartialResults
(results
)
3178 bisect_utils
.OutputAnnotationStepClosed()
3180 # Weren't able to sync and retrieve the revision range.
3181 results
['error'] = 'An error occurred attempting to retrieve revision '\
3182 'range: [%s..%s]' % (good_revision
, bad_revision
)
3186 def _PrintPartialResults(self
, results_dict
):
3187 revision_data
= results_dict
['revision_data']
3188 revision_data_sorted
= sorted(revision_data
.iteritems(),
3189 key
= lambda x
: x
[1]['sort'])
3190 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3192 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3193 results_dict
['first_working_revision'],
3194 results_dict
['last_broken_revision'],
3195 100, final_step
=False)
3197 def _PrintConfidence(self
, results_dict
):
3198 # The perf dashboard specifically looks for the string
3199 # "Confidence in Bisection Results: 100%" to decide whether or not
3200 # to cc the author(s). If you change this, please update the perf
3201 # dashboard as well.
3202 print 'Confidence in Bisection Results: %d%%' % results_dict
['confidence']
3204 def _PrintBanner(self
, results_dict
):
3206 print " __o_\___ Aw Snap! We hit a speed bump!"
3207 print "=-O----O-'__.~.___________________________________"
3209 if self
._IsBisectModeReturnCode
():
3210 print ('Bisect reproduced a change in return codes while running the '
3211 'performance test.')
3213 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3214 '%s metric.' % (results_dict
['regression_size'],
3215 results_dict
['regression_std_err'], '/'.join(self
.opts
.metric
)))
3216 self
._PrintConfidence
(results_dict
)
3218 def _PrintFailedBanner(self
, results_dict
):
3220 if self
._IsBisectModeReturnCode
():
3221 print 'Bisect could not reproduce a change in the return code.'
3223 print ('Bisect could not reproduce a change in the '
3224 '%s metric.' % '/'.join(self
.opts
.metric
))
3227 def _GetViewVCLinkFromDepotAndHash(self
, cl
, depot
):
3228 info
= self
.source_control
.QueryRevisionInfo(cl
,
3229 self
._GetDepotDirectory
(depot
))
3230 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
3232 # Format is "git-svn-id: svn://....@123456 <other data>"
3233 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
3234 svn_revision
= svn_line
[0].split('@')
3235 svn_revision
= svn_revision
[1].split(' ')[0]
3236 return DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
3241 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
3242 # The perf dashboard specifically looks for the string
3243 # "Author : " to parse out who to cc on a bug. If you change the
3244 # formatting here, please update the perf dashboard as well.
3246 print 'Subject : %s' % info
['subject']
3247 print 'Author : %s' % info
['author']
3248 if not info
['email'].startswith(info
['author']):
3249 print 'Email : %s' % info
['email']
3250 commit_link
= self
._GetViewVCLinkFromDepotAndHash
(cl
, depot
)
3252 print 'Link : %s' % commit_link
3255 print 'Failed to parse svn revision from body:'
3259 print 'Commit : %s' % cl
3260 print 'Date : %s' % info
['date']
3262 def _PrintTableRow(self
, column_widths
, row_data
):
3263 assert len(column_widths
) == len(row_data
)
3266 for i
in xrange(len(column_widths
)):
3267 current_row_data
= row_data
[i
].center(column_widths
[i
], ' ')
3268 text
+= ('%%%ds' % column_widths
[i
]) % current_row_data
3271 def _PrintTestedCommitsHeader(self
):
3272 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
3273 self
._PrintTableRow
(
3274 [20, 70, 14, 12, 13],
3275 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3276 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
3277 self
._PrintTableRow
(
3278 [20, 70, 14, 12, 13],
3279 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3280 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
3281 self
._PrintTableRow
(
3283 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3285 assert False, "Invalid bisect_mode specified."
3286 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3287 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3288 'State'.center(13, ' '))
3290 def _PrintTestedCommitsEntry(self
, current_data
, cl_link
, state_str
):
3291 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
3292 std_error
= '+-%.02f' % current_data
['value']['std_err']
3293 mean
= '%.02f' % current_data
['value']['mean']
3294 self
._PrintTableRow
(
3295 [20, 70, 12, 14, 13],
3296 [current_data
['depot'], cl_link
, mean
, std_error
, state_str
])
3297 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
3298 std_error
= '+-%.02f' % current_data
['value']['std_err']
3299 mean
= '%.02f' % current_data
['value']['mean']
3300 self
._PrintTableRow
(
3301 [20, 70, 12, 14, 13],
3302 [current_data
['depot'], cl_link
, std_error
, mean
, state_str
])
3303 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
3304 mean
= '%d' % current_data
['value']['mean']
3305 self
._PrintTableRow
(
3307 [current_data
['depot'], cl_link
, mean
, state_str
])
3309 def _PrintTestedCommitsTable(self
, revision_data_sorted
,
3310 first_working_revision
, last_broken_revision
, confidence
,
3314 print 'Tested commits:'
3316 print 'Partial results:'
3317 self
._PrintTestedCommitsHeader
()
3319 for current_id
, current_data
in revision_data_sorted
:
3320 if current_data
['value']:
3321 if (current_id
== last_broken_revision
or
3322 current_id
== first_working_revision
):
3323 # If confidence is too low, don't add this empty line since it's
3324 # used to put focus on a suspected CL.
3325 if confidence
and final_step
:
3328 if state
== 2 and not final_step
:
3329 # Just want a separation between "bad" and "good" cl's.
3333 if state
== 1 and final_step
:
3334 state_str
= 'Suspected CL'
3338 # If confidence is too low, don't bother outputting good/bad.
3341 state_str
= state_str
.center(13, ' ')
3343 cl_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3344 current_data
['depot'])
3346 cl_link
= current_id
3347 self
._PrintTestedCommitsEntry
(current_data
, cl_link
, state_str
)
3349 def _PrintReproSteps(self
):
3351 print 'To reproduce locally:'
3352 print '$ ' + self
.opts
.command
3353 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
3355 print 'Also consider passing --profiler=list to see available profilers.'
3357 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
3359 print 'Other regressions may have occurred:'
3360 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3361 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3362 for regression
in other_regressions
:
3363 current_id
, previous_id
, confidence
= regression
3364 current_data
= revision_data
[current_id
]
3365 previous_data
= revision_data
[previous_id
]
3367 current_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3368 current_data
['depot'])
3369 previous_link
= self
._GetViewVCLinkFromDepotAndHash
(previous_id
,
3370 previous_data
['depot'])
3372 # If we can't map it to a viewable URL, at least show the original hash.
3373 if not current_link
:
3374 current_link
= current_id
3375 if not previous_link
:
3376 previous_link
= previous_id
3378 print ' %8s %70s %s' % (
3379 current_data
['depot'], current_link
,
3380 ('%d%%' % confidence
).center(10, ' '))
3381 print ' %8s %70s' % (
3382 previous_data
['depot'], previous_link
)
3385 def _PrintStepTime(self
, revision_data_sorted
):
3386 step_perf_time_avg
= 0.0
3387 step_build_time_avg
= 0.0
3389 for _
, current_data
in revision_data_sorted
:
3390 if current_data
['value']:
3391 step_perf_time_avg
+= current_data
['perf_time']
3392 step_build_time_avg
+= current_data
['build_time']
3395 step_perf_time_avg
= step_perf_time_avg
/ step_count
3396 step_build_time_avg
= step_build_time_avg
/ step_count
3398 print 'Average build time : %s' % datetime
.timedelta(
3399 seconds
=int(step_build_time_avg
))
3400 print 'Average test time : %s' % datetime
.timedelta(
3401 seconds
=int(step_perf_time_avg
))
3403 def _PrintWarnings(self
):
3404 if not self
.warnings
:
3408 for w
in set(self
.warnings
):
3411 def _FindOtherRegressions(self
, revision_data_sorted
, bad_greater_than_good
):
3412 other_regressions
= []
3413 previous_values
= []
3415 for current_id
, current_data
in revision_data_sorted
:
3416 current_values
= current_data
['value']
3418 current_values
= current_values
['values']
3420 confidence
= CalculateConfidence(previous_values
, [current_values
])
3421 mean_of_prev_runs
= CalculateMean(sum(previous_values
, []))
3422 mean_of_current_runs
= CalculateMean(current_values
)
3424 # Check that the potential regression is in the same direction as
3425 # the overall regression. If the mean of the previous runs < the
3426 # mean of the current runs, this local regression is in same
3428 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
3429 is_same_direction
= (prev_less_than_current
if
3430 bad_greater_than_good
else not prev_less_than_current
)
3432 # Only report potential regressions with high confidence.
3433 if is_same_direction
and confidence
> 50:
3434 other_regressions
.append([current_id
, previous_id
, confidence
])
3435 previous_values
.append(current_values
)
3436 previous_id
= current_id
3437 return other_regressions
3440 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
3441 # Find range where it possibly broke.
3442 first_working_revision
= None
3443 first_working_revision_index
= -1
3444 last_broken_revision
= None
3445 last_broken_revision_index
= -1
3447 for i
in xrange(len(revision_data_sorted
)):
3448 k
, v
= revision_data_sorted
[i
]
3449 if v
['passed'] == 1:
3450 if not first_working_revision
:
3451 first_working_revision
= k
3452 first_working_revision_index
= i
3455 last_broken_revision
= k
3456 last_broken_revision_index
= i
3458 if last_broken_revision
!= None and first_working_revision
!= None:
3460 for i
in xrange(0, last_broken_revision_index
+ 1):
3461 if revision_data_sorted
[i
][1]['value']:
3462 broken_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3465 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
3466 if revision_data_sorted
[i
][1]['value']:
3467 working_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3469 # Flatten the lists to calculate mean of all values.
3470 working_mean
= sum(working_means
, [])
3471 broken_mean
= sum(broken_means
, [])
3473 # Calculate the approximate size of the regression
3474 mean_of_bad_runs
= CalculateMean(broken_mean
)
3475 mean_of_good_runs
= CalculateMean(working_mean
)
3477 regression_size
= 100 * CalculateRelativeChange(mean_of_good_runs
,
3479 if math
.isnan(regression_size
):
3480 regression_size
= 'zero-to-nonzero'
3482 regression_std_err
= math
.fabs(CalculatePooledStandardError(
3483 [working_mean
, broken_mean
]) /
3484 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
3486 # Give a "confidence" in the bisect. At the moment we use how distinct the
3487 # values are before and after the last broken revision, and how noisy the
3489 confidence
= CalculateConfidence(working_means
, broken_means
)
3491 culprit_revisions
= []
3494 self
.ChangeToDepotWorkingDirectory(
3495 revision_data
[last_broken_revision
]['depot'])
3497 if revision_data
[last_broken_revision
]['depot'] == 'cros':
3498 # Want to get a list of all the commits and what depots they belong
3499 # to so that we can grab info about each.
3500 cmd
= ['repo', 'forall', '-c',
3501 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3502 last_broken_revision
, first_working_revision
+ 1)]
3503 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
3506 assert not return_code
, 'An error occurred while running'\
3507 ' "%s"' % ' '.join(cmd
)
3510 for l
in output
.split('\n'):
3512 # Output will be in form:
3514 # /path_to_other_depot
3522 contents
= l
.split(' ')
3523 if len(contents
) > 1:
3524 changes
.append([last_depot
, contents
[0]])
3527 info
= self
.source_control
.QueryRevisionInfo(c
[1])
3528 culprit_revisions
.append((c
[1], info
, None))
3530 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
3531 k
, v
= revision_data_sorted
[i
]
3532 if k
== first_working_revision
:
3534 self
.ChangeToDepotWorkingDirectory(v
['depot'])
3535 info
= self
.source_control
.QueryRevisionInfo(k
)
3536 culprit_revisions
.append((k
, info
, v
['depot']))
3539 # Check for any other possible regression ranges
3540 other_regressions
= self
._FindOtherRegressions
(revision_data_sorted
,
3541 mean_of_bad_runs
> mean_of_good_runs
)
3544 'first_working_revision': first_working_revision
,
3545 'last_broken_revision': last_broken_revision
,
3546 'culprit_revisions': culprit_revisions
,
3547 'other_regressions': other_regressions
,
3548 'regression_size': regression_size
,
3549 'regression_std_err': regression_std_err
,
3550 'confidence': confidence
,
3553 def _CheckForWarnings(self
, results_dict
):
3554 if len(results_dict
['culprit_revisions']) > 1:
3555 self
.warnings
.append('Due to build errors, regression range could '
3556 'not be narrowed down to a single commit.')
3557 if self
.opts
.repeat_test_count
== 1:
3558 self
.warnings
.append('Tests were only set to run once. This may '
3559 'be insufficient to get meaningful results.')
3560 if results_dict
['confidence'] < 100:
3561 if results_dict
['confidence']:
3562 self
.warnings
.append(
3563 'Confidence is less than 100%. There could be other candidates '
3564 'for this regression. Try bisecting again with increased '
3565 'repeat_count or on a sub-metric that shows the regression more '
3568 self
.warnings
.append(
3569 'Confidence is 0%. Try bisecting again on another platform, with '
3570 'increased repeat_count or on a sub-metric that shows the '
3571 'regression more clearly.')
3573 def FormatAndPrintResults(self
, bisect_results
):
3574 """Prints the results from a bisection run in a readable format.
3577 bisect_results: The results from a bisection test run.
3579 revision_data
= bisect_results
['revision_data']
3580 revision_data_sorted
= sorted(revision_data
.iteritems(),
3581 key
= lambda x
: x
[1]['sort'])
3582 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3584 self
._CheckForWarnings
(results_dict
)
3586 if self
.opts
.output_buildbot_annotations
:
3587 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
3590 print 'Full results of bisection:'
3591 for current_id
, current_data
in revision_data_sorted
:
3592 build_status
= current_data
['passed']
3594 if type(build_status
) is bool:
3596 build_status
= 'Good'
3598 build_status
= 'Bad'
3600 print ' %20s %40s %s' % (current_data
['depot'],
3601 current_id
, build_status
)
3604 if self
.opts
.output_buildbot_annotations
:
3605 bisect_utils
.OutputAnnotationStepClosed()
3606 # The perf dashboard scrapes the "results" step in order to comment on
3607 # bugs. If you change this, please update the perf dashboard as well.
3608 bisect_utils
.OutputAnnotationStepStart('Results')
3610 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
3611 self
._PrintBanner
(results_dict
)
3612 for culprit
in results_dict
['culprit_revisions']:
3613 cl
, info
, depot
= culprit
3614 self
._PrintRevisionInfo
(cl
, info
, depot
)
3615 self
._PrintReproSteps
()
3616 if results_dict
['other_regressions']:
3617 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
3620 self
._PrintFailedBanner
(results_dict
)
3621 self
._PrintReproSteps
()
3623 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3624 results_dict
['first_working_revision'],
3625 results_dict
['last_broken_revision'],
3626 results_dict
['confidence'])
3627 self
._PrintStepTime
(revision_data_sorted
)
3628 self
._PrintWarnings
()
3630 if self
.opts
.output_buildbot_annotations
:
3631 bisect_utils
.OutputAnnotationStepClosed()
3634 def DetermineAndCreateSourceControl(opts
):
3635 """Attempts to determine the underlying source control workflow and returns
3636 a SourceControl object.
3639 An instance of a SourceControl object, or None if the current workflow
3643 (output
, _
) = RunGit(['rev-parse', '--is-inside-work-tree'])
3645 if output
.strip() == 'true':
3646 return GitSourceControl(opts
)
3651 def IsPlatformSupported(opts
):
3652 """Checks that this platform and build system are supported.
3655 opts: The options parsed from the command line.
3658 True if the platform and build system are supported.
3660 # Haven't tested the script out on any other platforms yet.
3661 supported
= ['posix', 'nt']
3662 return os
.name
in supported
3665 def RmTreeAndMkDir(path_to_dir
, skip_makedir
=False):
3666 """Removes the directory tree specified, and then creates an empty
3667 directory in the same location (if not specified to skip).
3670 path_to_dir: Path to the directory tree.
3671 skip_makedir: Whether to skip creating empty directory, default is False.
3674 True if successful, False if an error occurred.
3677 if os
.path
.exists(path_to_dir
):
3678 shutil
.rmtree(path_to_dir
)
3680 if e
.errno
!= errno
.ENOENT
:
3683 if not skip_makedir
:
3684 return MaybeMakeDirectory(path_to_dir
)
3689 def RemoveBuildFiles(build_type
):
3690 """Removes build files from previous runs."""
3691 if RmTreeAndMkDir(os
.path
.join('out', build_type
)):
3692 if RmTreeAndMkDir(os
.path
.join('build', build_type
)):
3697 class BisectOptions(object):
3698 """Options to be used when running bisection."""
3700 super(BisectOptions
, self
).__init
__()
3702 self
.target_platform
= 'chromium'
3703 self
.build_preference
= None
3704 self
.good_revision
= None
3705 self
.bad_revision
= None
3706 self
.use_goma
= None
3707 self
.cros_board
= None
3708 self
.cros_remote_ip
= None
3709 self
.repeat_test_count
= 20
3710 self
.truncate_percent
= 25
3711 self
.max_time_minutes
= 20
3714 self
.output_buildbot_annotations
= None
3715 self
.no_custom_deps
= False
3716 self
.working_directory
= None
3717 self
.extra_src
= None
3718 self
.debug_ignore_build
= None
3719 self
.debug_ignore_sync
= None
3720 self
.debug_ignore_perf_test
= None
3721 self
.gs_bucket
= None
3722 self
.target_arch
= 'ia32'
3723 self
.target_build_type
= 'Release'
3724 self
.builder_host
= None
3725 self
.builder_port
= None
3726 self
.bisect_mode
= BISECT_MODE_MEAN
3728 def _CreateCommandLineParser(self
):
3729 """Creates a parser with bisect options.
3732 An instance of optparse.OptionParser.
3734 usage
= ('%prog [options] [-- chromium-options]\n'
3735 'Perform binary search on revision history to find a minimal '
3736 'range of revisions where a peformance metric regressed.\n')
3738 parser
= optparse
.OptionParser(usage
=usage
)
3740 group
= optparse
.OptionGroup(parser
, 'Bisect options')
3741 group
.add_option('-c', '--command',
3743 help='A command to execute your performance test at' +
3744 ' each point in the bisection.')
3745 group
.add_option('-b', '--bad_revision',
3747 help='A bad revision to start bisection. ' +
3748 'Must be later than good revision. May be either a git' +
3749 ' or svn revision.')
3750 group
.add_option('-g', '--good_revision',
3752 help='A revision to start bisection where performance' +
3753 ' test is known to pass. Must be earlier than the ' +
3754 'bad revision. May be either a git or svn revision.')
3755 group
.add_option('-m', '--metric',
3757 help='The desired metric to bisect on. For example ' +
3758 '"vm_rss_final_b/vm_rss_f_b"')
3759 group
.add_option('-r', '--repeat_test_count',
3762 help='The number of times to repeat the performance '
3763 'test. Values will be clamped to range [1, 100]. '
3764 'Default value is 20.')
3765 group
.add_option('--max_time_minutes',
3768 help='The maximum time (in minutes) to take running the '
3769 'performance tests. The script will run the performance '
3770 'tests according to --repeat_test_count, so long as it '
3771 'doesn\'t exceed --max_time_minutes. Values will be '
3772 'clamped to range [1, 60].'
3773 'Default value is 20.')
3774 group
.add_option('-t', '--truncate_percent',
3777 help='The highest/lowest % are discarded to form a '
3778 'truncated mean. Values will be clamped to range [0, '
3779 '25]. Default value is 25 (highest/lowest 25% will be '
3781 group
.add_option('--bisect_mode',
3783 choices
=[BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
,
3784 BISECT_MODE_RETURN_CODE
],
3785 default
=BISECT_MODE_MEAN
,
3786 help='The bisect mode. Choices are to bisect on the '
3787 'difference in mean, std_dev, or return_code.')
3788 parser
.add_option_group(group
)
3790 group
= optparse
.OptionGroup(parser
, 'Build options')
3791 group
.add_option('-w', '--working_directory',
3793 help='Path to the working directory where the script '
3794 'will do an initial checkout of the chromium depot. The '
3795 'files will be placed in a subdirectory "bisect" under '
3796 'working_directory and that will be used to perform the '
3797 'bisection. This parameter is optional, if it is not '
3798 'supplied, the script will work from the current depot.')
3799 group
.add_option('--build_preference',
3801 choices
=['msvs', 'ninja', 'make'],
3802 help='The preferred build system to use. On linux/mac '
3803 'the options are make/ninja. On Windows, the options '
3805 group
.add_option('--target_platform',
3807 choices
=['chromium', 'cros', 'android', 'android-chrome'],
3809 help='The target platform. Choices are "chromium" '
3810 '(current platform), "cros", or "android". If you '
3811 'specify something other than "chromium", you must be '
3812 'properly set up to build that platform.')
3813 group
.add_option('--no_custom_deps',
3814 dest
='no_custom_deps',
3815 action
="store_true",
3817 help='Run the script with custom_deps or not.')
3818 group
.add_option('--extra_src',
3820 help='Path to a script which can be used to modify '
3821 'the bisect script\'s behavior.')
3822 group
.add_option('--cros_board',
3824 help='The cros board type to build.')
3825 group
.add_option('--cros_remote_ip',
3827 help='The remote machine to image to.')
3828 group
.add_option('--use_goma',
3829 action
="store_true",
3830 help='Add a bunch of extra threads for goma, and enable '
3832 group
.add_option('--output_buildbot_annotations',
3833 action
="store_true",
3834 help='Add extra annotation output for buildbot.')
3835 group
.add_option('--gs_bucket',
3839 help=('Name of Google Storage bucket to upload or '
3840 'download build. e.g., chrome-perf'))
3841 group
.add_option('--target_arch',
3843 choices
=['ia32', 'x64', 'arm'],
3846 help=('The target build architecture. Choices are "ia32" '
3847 '(default), "x64" or "arm".'))
3848 group
.add_option('--target_build_type',
3850 choices
=['Release', 'Debug'],
3852 help='The target build type. Choices are "Release" '
3853 '(default), or "Debug".')
3854 group
.add_option('--builder_host',
3855 dest
='builder_host',
3857 help=('Host address of server to produce build by posting'
3858 ' try job request.'))
3859 group
.add_option('--builder_port',
3860 dest
='builder_port',
3862 help=('HTTP port of the server to produce build by posting'
3863 ' try job request.'))
3864 parser
.add_option_group(group
)
3866 group
= optparse
.OptionGroup(parser
, 'Debug options')
3867 group
.add_option('--debug_ignore_build',
3868 action
="store_true",
3869 help='DEBUG: Don\'t perform builds.')
3870 group
.add_option('--debug_ignore_sync',
3871 action
="store_true",
3872 help='DEBUG: Don\'t perform syncs.')
3873 group
.add_option('--debug_ignore_perf_test',
3874 action
="store_true",
3875 help='DEBUG: Don\'t perform performance tests.')
3876 parser
.add_option_group(group
)
3879 def ParseCommandLine(self
):
3880 """Parses the command line for bisect options."""
3881 parser
= self
._CreateCommandLineParser
()
3882 (opts
, _
) = parser
.parse_args()
3885 if not opts
.command
:
3886 raise RuntimeError('missing required parameter: --command')
3888 if not opts
.good_revision
:
3889 raise RuntimeError('missing required parameter: --good_revision')
3891 if not opts
.bad_revision
:
3892 raise RuntimeError('missing required parameter: --bad_revision')
3894 if not opts
.metric
and opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
:
3895 raise RuntimeError('missing required parameter: --metric')
3898 if not cloud_storage
.List(opts
.gs_bucket
):
3899 raise RuntimeError('Invalid Google Storage: gs://%s' % opts
.gs_bucket
)
3900 if not opts
.builder_host
:
3901 raise RuntimeError('Must specify try server hostname, when '
3902 'gs_bucket is used: --builder_host')
3903 if not opts
.builder_port
:
3904 raise RuntimeError('Must specify try server port number, when '
3905 'gs_bucket is used: --builder_port')
3906 if opts
.target_platform
== 'cros':
3907 # Run sudo up front to make sure credentials are cached for later.
3908 print 'Sudo is required to build cros:'
3910 RunProcess(['sudo', 'true'])
3912 if not opts
.cros_board
:
3913 raise RuntimeError('missing required parameter: --cros_board')
3915 if not opts
.cros_remote_ip
:
3916 raise RuntimeError('missing required parameter: --cros_remote_ip')
3918 if not opts
.working_directory
:
3919 raise RuntimeError('missing required parameter: --working_directory')
3921 metric_values
= opts
.metric
.split('/')
3922 if (len(metric_values
) != 2 and
3923 opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
):
3924 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
3926 opts
.metric
= metric_values
3927 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3928 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3929 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3930 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3932 for k
, v
in opts
.__dict
__.iteritems():
3933 assert hasattr(self
, k
), "Invalid %s attribute in BisectOptions." % k
3935 except RuntimeError, e
:
3936 output_string
= StringIO
.StringIO()
3937 parser
.print_help(file=output_string
)
3938 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
3939 output_string
.close()
3940 raise RuntimeError(error_message
)
3943 def FromDict(values
):
3944 """Creates an instance of BisectOptions with the values parsed from a
3948 values: a dict containing options to set.
3951 An instance of BisectOptions.
3953 opts
= BisectOptions()
3954 for k
, v
in values
.iteritems():
3955 assert hasattr(opts
, k
), 'Invalid %s attribute in '\
3956 'BisectOptions.' % k
3959 metric_values
= opts
.metric
.split('/')
3960 if len(metric_values
) != 2:
3961 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
3963 opts
.metric
= metric_values
3964 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3965 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3966 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3967 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3975 opts
= BisectOptions()
3976 opts
.ParseCommandLine()
3979 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
3981 raise RuntimeError("Invalid or missing --extra_src.")
3982 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
3984 if opts
.working_directory
:
3985 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
3986 if opts
.no_custom_deps
:
3988 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
3990 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
3992 if not RemoveBuildFiles(opts
.target_build_type
):
3993 raise RuntimeError('Something went wrong removing the build files.')
3995 if not IsPlatformSupported(opts
):
3996 raise RuntimeError("Sorry, this platform isn't supported yet.")
3998 # Check what source control method they're using. Only support git workflow
4000 source_control
= DetermineAndCreateSourceControl(opts
)
4002 if not source_control
:
4003 raise RuntimeError("Sorry, only the git workflow is supported at the "
4006 # gClient sync seems to fail if you're not in master branch.
4007 if (not source_control
.IsInProperBranch() and
4008 not opts
.debug_ignore_sync
and
4009 not opts
.working_directory
):
4010 raise RuntimeError("You must switch to master branch to run bisection.")
4011 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
4013 bisect_results
= bisect_test
.Run(opts
.command
,
4017 if bisect_results
['error']:
4018 raise RuntimeError(bisect_results
['error'])
4019 bisect_test
.FormatAndPrintResults(bisect_results
)
4022 bisect_test
.PerformCleanup()
4023 except RuntimeError, e
:
4024 if opts
.output_buildbot_annotations
:
4025 # The perf dashboard scrapes the "results" step in order to comment on
4026 # bugs. If you change this, please update the perf dashboard as well.
4027 bisect_utils
.OutputAnnotationStepStart('Results')
4028 print 'Error: %s' % e
.message
4029 if opts
.output_buildbot_annotations
:
4030 bisect_utils
.OutputAnnotationStepClosed()
4033 if __name__
== '__main__':