Escape the ampersand in extension names before showing in menu.
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob674a782df220fea7853c60c400420abcacd398f6
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import hashlib
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
52 import zipfile
54 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
56 import bisect_utils
57 import post_perf_builder_job
58 from telemetry.page import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
64 # Format is:
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn.
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
72 DEPOT_DEPS_NAME = {
73 'chromium' : {
74 "src" : "src",
75 "recurse" : True,
76 "depends" : None,
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
81 'webkit' : {
82 "src" : "src/third_party/WebKit",
83 "recurse" : True,
84 "depends" : None,
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
89 'angle' : {
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
92 "recurse" : True,
93 "depends" : None,
94 "from" : ['chromium'],
95 "platform": 'nt',
96 'deps_var': 'angle_revision'
98 'v8' : {
99 "src" : "src/v8",
100 "recurse" : True,
101 "depends" : None,
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
109 "recurse" : True,
110 "depends" : None,
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
112 "from" : ['v8'],
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
116 'skia/src' : {
117 "src" : "src/third_party/skia/src",
118 "recurse" : True,
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
125 'skia/include' : {
126 "src" : "src/third_party/skia/include",
127 "recurse" : False,
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
129 "depends" : None,
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 'deps_var': 'None'
134 'skia/gyp' : {
135 "src" : "src/third_party/skia/gyp",
136 "recurse" : False,
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
138 "depends" : None,
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141 'deps_var': 'None'
145 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN = 'new version number from %s'
148 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
150 'testing_rsa')
151 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
153 'testing_rsa')
155 BUILD_RESULT_SUCCEED = 0
156 BUILD_RESULT_FAIL = 1
157 BUILD_RESULT_SKIPPED = 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
161 # the tryserver.
162 MAX_MAC_BUILD_TIME = 14400
163 MAX_WIN_BUILD_TIME = 14400
164 MAX_LINUX_BUILD_TIME = 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
173 new file mode 100644
174 --- /dev/null
175 +++ src/DEPS.sha
176 @@ -0,0 +1 @@
177 +%(deps_sha)s
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN = 'mean'
183 BISECT_MODE_STD_DEV = 'std_dev'
184 BISECT_MODE_RETURN_CODE = 'return_code'
187 def _AddAdditionalDepotInfo(depot_info):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
190 global DEPOT_NAMES
191 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
192 depot_info.items())
193 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
196 def CalculateTruncatedMean(data_set, truncate_percent):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
203 Args:
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
208 Returns:
209 The truncated mean as a float.
211 Raises:
212 TypeError: The data set was empty after discarding values.
214 if len(data_set) > 2:
215 data_set = sorted(data_set)
217 discard_num_float = len(data_set) * truncate_percent
218 discard_num_int = int(math.floor(discard_num_float))
219 kept_weight = len(data_set) - discard_num_float * 2
221 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
223 weight_left = 1.0 - (discard_num_float - discard_num_int)
225 if weight_left < 1:
226 # If the % to discard leaves a fractional portion, need to weight those
227 # values.
228 unweighted_vals = data_set[1:len(data_set)-1]
229 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
230 weighted_vals = [w * weight_left for w in weighted_vals]
231 data_set = weighted_vals + unweighted_vals
232 else:
233 kept_weight = len(data_set)
235 truncated_mean = reduce(lambda x, y: float(x) + float(y),
236 data_set) / kept_weight
238 return truncated_mean
241 def CalculateMean(values):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values, 0.0)
246 def CalculateConfidence(good_results_lists, bad_results_lists):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
259 Args:
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
263 Returns:
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good = map(CalculateMean, good_results_lists)
268 means_bad = map(CalculateMean, bad_results_lists)
269 bounds_good = (min(means_good), max(means_good))
270 bounds_bad = (min(means_bad), max(means_bad))
271 dist_between_groups = min(
272 math.fabs(bounds_bad[1] - bounds_good[0]),
273 math.fabs(bounds_bad[0] - bounds_good[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened = sum(good_results_lists, [])
277 bad_results_flattened = sum(bad_results_lists, [])
278 stddev_good = CalculateStandardDeviation(good_results_flattened)
279 stddev_bad = CalculateStandardDeviation(bad_results_flattened)
280 stddev_sum = stddev_good + stddev_bad
282 confidence = dist_between_groups / (max(0.0001, stddev_sum))
283 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
284 return confidence
287 def CalculateStandardDeviation(values):
288 """Calculates the sample standard deviation of the given list of values."""
289 if len(values) == 1:
290 return 0.0
292 mean = CalculateMean(values)
293 differences_from_mean = [float(x) - mean for x in values]
294 squared_differences = [float(x * x) for x in differences_from_mean]
295 variance = sum(squared_differences) / (len(values) - 1)
296 std_dev = math.sqrt(variance)
298 return std_dev
301 def CalculateRelativeChange(before, after):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
309 Args:
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
313 Returns:
314 A non-negative floating point number; 0.1 represents a 10% change.
316 if before == after:
317 return 0.0
318 if before == 0:
319 return float('nan')
320 difference = after - before
321 return math.fabs(difference / before)
324 def CalculatePooledStandardError(work_sets):
325 numerator = 0.0
326 denominator1 = 0.0
327 denominator2 = 0.0
329 for current_set in work_sets:
330 std_dev = CalculateStandardDeviation(current_set)
331 numerator += (len(current_set) - 1) * std_dev ** 2
332 denominator1 += len(current_set) - 1
333 denominator2 += 1.0 / len(current_set)
335 if denominator1:
336 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
337 return 0.0
340 def CalculateStandardError(values):
341 """Calculates the standard error of a list of values."""
342 if len(values) <= 1:
343 return 0.0
345 std_dev = CalculateStandardDeviation(values)
347 return std_dev / math.sqrt(len(values))
350 def IsStringFloat(string_to_check):
351 """Checks whether or not the given string can be converted to a floating
352 point number.
354 Args:
355 string_to_check: Input string to check if it can be converted to a float.
357 Returns:
358 True if the string can be converted to a float.
360 try:
361 float(string_to_check)
363 return True
364 except ValueError:
365 return False
368 def IsStringInt(string_to_check):
369 """Checks whether or not the given string can be converted to a integer.
371 Args:
372 string_to_check: Input string to check if it can be converted to an int.
374 Returns:
375 True if the string can be converted to an int.
377 try:
378 int(string_to_check)
380 return True
381 except ValueError:
382 return False
385 def IsWindows():
386 """Checks whether or not the script is running on Windows.
388 Returns:
389 True if running on Windows.
391 return sys.platform == 'cygwin' or sys.platform.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
397 Returns:
398 True if Windows is 64-bit, False if 32-bit.
400 platform = os.environ['PROCESSOR_ARCHITECTURE']
401 try:
402 platform = os.environ['PROCESSOR_ARCHITEW6432']
403 except KeyError:
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
405 pass
407 return platform in ['AMD64', 'I64']
410 def IsLinux():
411 """Checks whether or not the script is running on Linux.
413 Returns:
414 True if running on Linux.
416 return sys.platform.startswith('linux')
419 def IsMac():
420 """Checks whether or not the script is running on Mac.
422 Returns:
423 True if running on Mac.
425 return sys.platform.startswith('darwin')
428 def GetSHA1HexDigest(contents):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib.sha1(contents).hexdigest()
433 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
434 """Gets the archive file name for the given revision."""
435 def PlatformName():
436 """Return a string to be used in paths for the platform."""
437 if IsWindows():
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch == 'x64':
441 return 'win32'
442 return 'win32'
443 if IsLinux():
444 return 'linux'
445 if IsMac():
446 return 'mac'
447 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
449 base_name = 'full-build-%s' % PlatformName()
450 if not build_revision:
451 return base_name
452 if patch_sha:
453 build_revision = '%s_%s' % (build_revision , patch_sha)
454 return '%s_%s.zip' % (base_name, build_revision)
457 def GetRemoteBuildPath(build_revision, target_arch='ia32', patch_sha=None):
458 """Compute the url to download the build from."""
459 def GetGSRootFolderName():
460 """Gets Google Cloud Storage root folder names"""
461 if IsWindows():
462 if Is64BitWindows() and target_arch == 'x64':
463 return 'Win x64 Builder'
464 return 'Win Builder'
465 if IsLinux():
466 return 'Linux Builder'
467 if IsMac():
468 return 'Mac Builder'
469 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
471 base_filename = GetZipFileName(build_revision, target_arch, patch_sha)
472 builder_folder = GetGSRootFolderName()
473 return '%s/%s' % (builder_folder, base_filename)
476 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
477 """Fetches file(s) from the Google Cloud Storage.
479 Args:
480 bucket_name: Google Storage bucket name.
481 source_path: Source file path.
482 destination_path: Destination file path.
484 Returns:
485 True if the fetching succeeds, otherwise False.
487 target_file = os.path.join(destination_path, os.path.basename(source_path))
488 try:
489 if cloud_storage.Exists(bucket_name, source_path):
490 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
491 cloud_storage.Get(bucket_name, source_path, destination_path)
492 if os.path.exists(target_file):
493 return True
494 else:
495 print ('File gs://%s/%s not found in cloud storage.' % (
496 bucket_name, source_path))
497 except Exception as e:
498 print 'Something went wrong while fetching file from cloud: %s' % e
499 if os.path.exists(target_file):
500 os.remove(target_file)
501 return False
504 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
505 def MaybeMakeDirectory(*path):
506 """Creates an entire path, if it doesn't already exist."""
507 file_path = os.path.join(*path)
508 try:
509 os.makedirs(file_path)
510 except OSError, e:
511 if e.errno != errno.EEXIST:
512 return False
513 return True
516 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
517 def ExtractZip(filename, output_dir, verbose=True):
518 """ Extract the zip archive in the output directory."""
519 MaybeMakeDirectory(output_dir)
521 # On Linux and Mac, we use the unzip command as it will
522 # handle links and file bits (executable), which is much
523 # easier then trying to do that with ZipInfo options.
525 # On Windows, try to use 7z if it is installed, otherwise fall back to python
526 # zip module and pray we don't have files larger than 512MB to unzip.
527 unzip_cmd = None
528 if IsMac() or IsLinux():
529 unzip_cmd = ['unzip', '-o']
530 elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
531 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
533 if unzip_cmd:
534 # Make sure path is absolute before changing directories.
535 filepath = os.path.abspath(filename)
536 saved_dir = os.getcwd()
537 os.chdir(output_dir)
538 command = unzip_cmd + [filepath]
539 result = RunProcess(command)
540 os.chdir(saved_dir)
541 if result:
542 raise IOError('unzip failed: %s => %s' % (str(command), result))
543 else:
544 assert IsWindows()
545 zf = zipfile.ZipFile(filename)
546 for name in zf.namelist():
547 if verbose:
548 print 'Extracting %s' % name
549 zf.extract(name, output_dir)
552 def RunProcess(command):
553 """Runs an arbitrary command.
555 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
557 Args:
558 command: A list containing the command and args to execute.
560 Returns:
561 The return code of the call.
563 # On Windows, use shell=True to get PATH interpretation.
564 shell = IsWindows()
565 return subprocess.call(command, shell=shell)
568 def RunProcessAndRetrieveOutput(command, cwd=None):
569 """Runs an arbitrary command, returning its output and return code.
571 Since output is collected via communicate(), there will be no output until
572 the call terminates. If you need output while the program runs (ie. so
573 that the buildbot doesn't terminate the script), consider RunProcess().
575 Args:
576 command: A list containing the command and args to execute.
577 cwd: A directory to change to while running the command. The command can be
578 relative to this directory. If this is None, the command will be run in
579 the current directory.
581 Returns:
582 A tuple of the output and return code.
584 if cwd:
585 original_cwd = os.getcwd()
586 os.chdir(cwd)
588 # On Windows, use shell=True to get PATH interpretation.
589 shell = IsWindows()
590 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE)
591 (output, _) = proc.communicate()
593 if cwd:
594 os.chdir(original_cwd)
596 return (output, proc.returncode)
599 def RunGit(command, cwd=None):
600 """Run a git subcommand, returning its output and return code.
602 Args:
603 command: A list containing the args to git.
604 cwd: A directory to change to while running the git command (optional).
606 Returns:
607 A tuple of the output and return code.
609 command = ['git'] + command
611 return RunProcessAndRetrieveOutput(command, cwd=cwd)
614 def CheckRunGit(command, cwd=None):
615 """Run a git subcommand, returning its output and return code. Asserts if
616 the return code of the call is non-zero.
618 Args:
619 command: A list containing the args to git.
621 Returns:
622 A tuple of the output and return code.
624 (output, return_code) = RunGit(command, cwd=cwd)
626 assert not return_code, 'An error occurred while running'\
627 ' "git %s"' % ' '.join(command)
628 return output
631 def SetBuildSystemDefault(build_system):
632 """Sets up any environment variables needed to build with the specified build
633 system.
635 Args:
636 build_system: A string specifying build system. Currently only 'ninja' or
637 'make' are supported."""
638 if build_system == 'ninja':
639 gyp_var = os.getenv('GYP_GENERATORS')
641 if not gyp_var or not 'ninja' in gyp_var:
642 if gyp_var:
643 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
644 else:
645 os.environ['GYP_GENERATORS'] = 'ninja'
647 if IsWindows():
648 os.environ['GYP_DEFINES'] = 'component=shared_library '\
649 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
650 'chromium_win_pch=0'
651 elif build_system == 'make':
652 os.environ['GYP_GENERATORS'] = 'make'
653 else:
654 raise RuntimeError('%s build not supported.' % build_system)
657 def BuildWithMake(threads, targets, build_type='Release'):
658 cmd = ['make', 'BUILDTYPE=%s' % build_type]
660 if threads:
661 cmd.append('-j%d' % threads)
663 cmd += targets
665 return_code = RunProcess(cmd)
667 return not return_code
670 def BuildWithNinja(threads, targets, build_type='Release'):
671 cmd = ['ninja', '-C', os.path.join('out', build_type)]
673 if threads:
674 cmd.append('-j%d' % threads)
676 cmd += targets
678 return_code = RunProcess(cmd)
680 return not return_code
683 def BuildWithVisualStudio(targets, build_type='Release'):
684 path_to_devenv = os.path.abspath(
685 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
686 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
687 cmd = [path_to_devenv, '/build', build_type, path_to_sln]
689 for t in targets:
690 cmd.extend(['/Project', t])
692 return_code = RunProcess(cmd)
694 return not return_code
697 def WriteStringToFile(text, file_name):
698 try:
699 with open(file_name, "wb") as f:
700 f.write(text)
701 except IOError as e:
702 raise RuntimeError('Error writing to file [%s]' % file_name )
705 def ReadStringFromFile(file_name):
706 try:
707 with open(file_name) as f:
708 return f.read()
709 except IOError as e:
710 raise RuntimeError('Error reading file [%s]' % file_name )
713 def ChangeBackslashToSlashInPatch(diff_text):
714 """Formats file paths in the given text to unix-style paths."""
715 if diff_text:
716 diff_lines = diff_text.split('\n')
717 for i in range(len(diff_lines)):
718 if (diff_lines[i].startswith('--- ') or
719 diff_lines[i].startswith('+++ ')):
720 diff_lines[i] = diff_lines[i].replace('\\', '/')
721 return '\n'.join(diff_lines)
722 return None
725 class Builder(object):
726 """Builder is used by the bisect script to build relevant targets and deploy.
728 def __init__(self, opts):
729 """Performs setup for building with target build system.
731 Args:
732 opts: Options parsed from command line.
734 if IsWindows():
735 if not opts.build_preference:
736 opts.build_preference = 'msvs'
738 if opts.build_preference == 'msvs':
739 if not os.getenv('VS100COMNTOOLS'):
740 raise RuntimeError(
741 'Path to visual studio could not be determined.')
742 else:
743 SetBuildSystemDefault(opts.build_preference)
744 else:
745 if not opts.build_preference:
746 if 'ninja' in os.getenv('GYP_GENERATORS'):
747 opts.build_preference = 'ninja'
748 else:
749 opts.build_preference = 'make'
751 SetBuildSystemDefault(opts.build_preference)
753 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
754 raise RuntimeError('Failed to set platform environment.')
756 @staticmethod
757 def FromOpts(opts):
758 builder = None
759 if opts.target_platform == 'cros':
760 builder = CrosBuilder(opts)
761 elif opts.target_platform == 'android':
762 builder = AndroidBuilder(opts)
763 elif opts.target_platform == 'android-chrome':
764 builder = AndroidChromeBuilder(opts)
765 else:
766 builder = DesktopBuilder(opts)
767 return builder
769 def Build(self, depot, opts):
770 raise NotImplementedError()
772 def GetBuildOutputDirectory(self, opts, src_dir=None):
773 raise NotImplementedError()
776 class DesktopBuilder(Builder):
777 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
778 def __init__(self, opts):
779 super(DesktopBuilder, self).__init__(opts)
781 def Build(self, depot, opts):
782 """Builds chromium_builder_perf target using options passed into
783 the script.
785 Args:
786 depot: Current depot being bisected.
787 opts: The options parsed from the command line.
789 Returns:
790 True if build was successful.
792 targets = ['chromium_builder_perf']
794 threads = None
795 if opts.use_goma:
796 threads = 64
798 build_success = False
799 if opts.build_preference == 'make':
800 build_success = BuildWithMake(threads, targets, opts.target_build_type)
801 elif opts.build_preference == 'ninja':
802 build_success = BuildWithNinja(threads, targets, opts.target_build_type)
803 elif opts.build_preference == 'msvs':
804 assert IsWindows(), 'msvs is only supported on Windows.'
805 build_success = BuildWithVisualStudio(targets, opts.target_build_type)
806 else:
807 assert False, 'No build system defined.'
808 return build_success
810 def GetBuildOutputDirectory(self, opts, src_dir=None):
811 """Returns the path to the build directory, relative to the checkout root.
813 Assumes that the current working directory is the checkout root.
815 src_dir = src_dir or 'src'
816 if opts.build_preference == 'ninja' or IsLinux():
817 return os.path.join(src_dir, 'out')
818 if IsMac():
819 return os.path.join(src_dir, 'xcodebuild')
820 if IsWindows():
821 return os.path.join(src_dir, 'build')
822 raise NotImplementedError('Unexpected platform %s' % sys.platform)
825 class AndroidBuilder(Builder):
826 """AndroidBuilder is used to build on android."""
827 def __init__(self, opts):
828 super(AndroidBuilder, self).__init__(opts)
830 def _GetTargets(self):
831 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
833 def Build(self, depot, opts):
834 """Builds the android content shell and other necessary tools using options
835 passed into the script.
837 Args:
838 depot: Current depot being bisected.
839 opts: The options parsed from the command line.
841 Returns:
842 True if build was successful.
844 threads = None
845 if opts.use_goma:
846 threads = 64
848 build_success = False
849 if opts.build_preference == 'ninja':
850 build_success = BuildWithNinja(
851 threads, self._GetTargets(), opts.target_build_type)
852 else:
853 assert False, 'No build system defined.'
855 return build_success
858 class AndroidChromeBuilder(AndroidBuilder):
859 """AndroidBuilder is used to build on android's chrome."""
860 def __init__(self, opts):
861 super(AndroidChromeBuilder, self).__init__(opts)
863 def _GetTargets(self):
864 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
867 class CrosBuilder(Builder):
868 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
869 target platform."""
870 def __init__(self, opts):
871 super(CrosBuilder, self).__init__(opts)
873 def ImageToTarget(self, opts):
874 """Installs latest image to target specified by opts.cros_remote_ip.
876 Args:
877 opts: Program options containing cros_board and cros_remote_ip.
879 Returns:
880 True if successful.
882 try:
883 # Keys will most likely be set to 0640 after wiping the chroot.
884 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
885 os.chmod(CROS_TEST_KEY_PATH, 0600)
886 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
887 '--remote=%s' % opts.cros_remote_ip,
888 '--board=%s' % opts.cros_board, '--test', '--verbose']
890 return_code = RunProcess(cmd)
891 return not return_code
892 except OSError, e:
893 return False
895 def BuildPackages(self, opts, depot):
896 """Builds packages for cros.
898 Args:
899 opts: Program options containing cros_board.
900 depot: The depot being bisected.
902 Returns:
903 True if successful.
905 cmd = [CROS_SDK_PATH]
907 if depot != 'cros':
908 path_to_chrome = os.path.join(os.getcwd(), '..')
909 cmd += ['--chrome_root=%s' % path_to_chrome]
911 cmd += ['--']
913 if depot != 'cros':
914 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
916 cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
917 '--board=%s' % opts.cros_board]
918 return_code = RunProcess(cmd)
920 return not return_code
922 def BuildImage(self, opts, depot):
923 """Builds test image for cros.
925 Args:
926 opts: Program options containing cros_board.
927 depot: The depot being bisected.
929 Returns:
930 True if successful.
932 cmd = [CROS_SDK_PATH]
934 if depot != 'cros':
935 path_to_chrome = os.path.join(os.getcwd(), '..')
936 cmd += ['--chrome_root=%s' % path_to_chrome]
938 cmd += ['--']
940 if depot != 'cros':
941 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
943 cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
944 '--board=%s' % opts.cros_board, 'test']
946 return_code = RunProcess(cmd)
948 return not return_code
950 def Build(self, depot, opts):
951 """Builds targets using options passed into the script.
953 Args:
954 depot: Current depot being bisected.
955 opts: The options parsed from the command line.
957 Returns:
958 True if build was successful.
960 if self.BuildPackages(opts, depot):
961 if self.BuildImage(opts, depot):
962 return self.ImageToTarget(opts)
963 return False
966 class SourceControl(object):
967 """SourceControl is an abstraction over the underlying source control
968 system used for chromium. For now only git is supported, but in the
969 future, the svn workflow could be added as well."""
970 def __init__(self):
971 super(SourceControl, self).__init__()
973 def SyncToRevisionWithGClient(self, revision):
974 """Uses gclient to sync to the specified revision.
976 ie. gclient sync --revision <revision>
978 Args:
979 revision: The git SHA1 or svn CL (depending on workflow).
981 Returns:
982 The return code of the call.
984 return bisect_utils.RunGClient(['sync', '--verbose', '--reset', '--force',
985 '--delete_unversioned_trees', '--nohooks', '--revision', revision])
987 def SyncToRevisionWithRepo(self, timestamp):
988 """Uses repo to sync all the underlying git depots to the specified
989 time.
991 Args:
992 timestamp: The unix timestamp to sync to.
994 Returns:
995 The return code of the call.
997 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
1000 class GitSourceControl(SourceControl):
1001 """GitSourceControl is used to query the underlying source control. """
1002 def __init__(self, opts):
1003 super(GitSourceControl, self).__init__()
1004 self.opts = opts
1006 def IsGit(self):
1007 return True
1009 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
1010 """Retrieves a list of revisions between |revision_range_start| and
1011 |revision_range_end|.
1013 Args:
1014 revision_range_end: The SHA1 for the end of the range.
1015 revision_range_start: The SHA1 for the beginning of the range.
1017 Returns:
1018 A list of the revisions between |revision_range_start| and
1019 |revision_range_end| (inclusive).
1021 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
1022 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
1023 log_output = CheckRunGit(cmd, cwd=cwd)
1025 revision_hash_list = log_output.split()
1026 revision_hash_list.append(revision_range_start)
1028 return revision_hash_list
1030 def SyncToRevision(self, revision, sync_client=None):
1031 """Syncs to the specified revision.
1033 Args:
1034 revision: The revision to sync to.
1035 use_gclient: Specifies whether or not we should sync using gclient or
1036 just use source control directly.
1038 Returns:
1039 True if successful.
1042 if not sync_client:
1043 results = RunGit(['checkout', revision])[1]
1044 elif sync_client == 'gclient':
1045 results = self.SyncToRevisionWithGClient(revision)
1046 elif sync_client == 'repo':
1047 results = self.SyncToRevisionWithRepo(revision)
1049 return not results
1051 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1052 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1054 Args:
1055 revision_to_check: The user supplied revision string that may need to be
1056 resolved to a git SHA1.
1057 depot: The depot the revision_to_check is from.
1058 search: The number of changelists to try if the first fails to resolve
1059 to a git hash. If the value is negative, the function will search
1060 backwards chronologically, otherwise it will search forward.
1062 Returns:
1063 A string containing a git SHA1 hash, otherwise None.
1065 # Android-chrome is git only, so no need to resolve this to anything else.
1066 if depot == 'android-chrome':
1067 return revision_to_check
1069 if depot != 'cros':
1070 if not IsStringInt(revision_to_check):
1071 return revision_to_check
1073 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1075 if depot != 'chromium':
1076 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1078 svn_revision = int(revision_to_check)
1079 git_revision = None
1081 if search > 0:
1082 search_range = xrange(svn_revision, svn_revision + search, 1)
1083 else:
1084 search_range = xrange(svn_revision, svn_revision + search, -1)
1086 for i in search_range:
1087 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1088 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1089 'origin/master']
1091 (log_output, return_code) = RunGit(cmd, cwd=cwd)
1093 assert not return_code, 'An error occurred while running'\
1094 ' "git %s"' % ' '.join(cmd)
1096 if not return_code:
1097 log_output = log_output.strip()
1099 if log_output:
1100 git_revision = log_output
1102 break
1104 return git_revision
1105 else:
1106 if IsStringInt(revision_to_check):
1107 return int(revision_to_check)
1108 else:
1109 cwd = os.getcwd()
1110 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1111 'chromiumos-overlay'))
1112 pattern = CROS_VERSION_PATTERN % revision_to_check
1113 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1115 git_revision = None
1117 log_output = CheckRunGit(cmd, cwd=cwd)
1118 if log_output:
1119 git_revision = log_output
1120 git_revision = int(log_output.strip())
1121 os.chdir(cwd)
1123 return git_revision
1125 def IsInProperBranch(self):
1126 """Confirms they're in the master branch for performing the bisection.
1127 This is needed or gclient will fail to sync properly.
1129 Returns:
1130 True if the current branch on src is 'master'
1132 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1133 log_output = CheckRunGit(cmd)
1134 log_output = log_output.strip()
1136 return log_output == "master"
1138 def SVNFindRev(self, revision, cwd=None):
1139 """Maps directly to the 'git svn find-rev' command.
1141 Args:
1142 revision: The git SHA1 to use.
1144 Returns:
1145 An integer changelist #, otherwise None.
1148 cmd = ['svn', 'find-rev', revision]
1150 output = CheckRunGit(cmd, cwd)
1151 svn_revision = output.strip()
1153 if IsStringInt(svn_revision):
1154 return int(svn_revision)
1156 return None
1158 def QueryRevisionInfo(self, revision, cwd=None):
1159 """Gathers information on a particular revision, such as author's name,
1160 email, subject, and date.
1162 Args:
1163 revision: Revision you want to gather information on.
1164 Returns:
1165 A dict in the following format:
1167 'author': %s,
1168 'email': %s,
1169 'date': %s,
1170 'subject': %s,
1171 'body': %s,
1174 commit_info = {}
1176 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1177 targets = ['author', 'email', 'subject', 'date', 'body']
1179 for i in xrange(len(formats)):
1180 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1181 output = CheckRunGit(cmd, cwd=cwd)
1182 commit_info[targets[i]] = output.rstrip()
1184 return commit_info
1186 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1187 """Performs a checkout on a file at the given revision.
1189 Returns:
1190 True if successful.
1192 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1194 def RevertFileToHead(self, file_name):
1195 """Unstages a file and returns it to HEAD.
1197 Returns:
1198 True if successful.
1200 # Reset doesn't seem to return 0 on success.
1201 RunGit(['reset', 'HEAD', file_name])
1203 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1205 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1206 """Returns a list of commits that modified this file.
1208 Args:
1209 filename: Name of file.
1210 revision_start: Start of revision range.
1211 revision_end: End of revision range.
1213 Returns:
1214 Returns a list of commits that touched this file.
1216 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1217 filename]
1218 output = CheckRunGit(cmd)
1220 return [o for o in output.split('\n') if o]
1223 class BisectPerformanceMetrics(object):
1224 """This class contains functionality to perform a bisection of a range of
1225 revisions to narrow down where performance regressions may have occurred.
1227 The main entry-point is the Run method.
1230 def __init__(self, source_control, opts):
1231 super(BisectPerformanceMetrics, self).__init__()
1233 self.opts = opts
1234 self.source_control = source_control
1235 self.src_cwd = os.getcwd()
1236 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1237 self.depot_cwd = {}
1238 self.cleanup_commands = []
1239 self.warnings = []
1240 self.builder = Builder.FromOpts(opts)
1242 # This always starts true since the script grabs latest first.
1243 self.was_blink = True
1245 for d in DEPOT_NAMES:
1246 # The working directory of each depot is just the path to the depot, but
1247 # since we're already in 'src', we can skip that part.
1249 self.depot_cwd[d] = os.path.join(
1250 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1252 def PerformCleanup(self):
1253 """Performs cleanup when script is finished."""
1254 os.chdir(self.src_cwd)
1255 for c in self.cleanup_commands:
1256 if c[0] == 'mv':
1257 shutil.move(c[1], c[2])
1258 else:
1259 assert False, 'Invalid cleanup command.'
1261 def GetRevisionList(self, depot, bad_revision, good_revision):
1262 """Retrieves a list of all the commits between the bad revision and
1263 last known good revision."""
1265 revision_work_list = []
1267 if depot == 'cros':
1268 revision_range_start = good_revision
1269 revision_range_end = bad_revision
1271 cwd = os.getcwd()
1272 self.ChangeToDepotWorkingDirectory('cros')
1274 # Print the commit timestamps for every commit in the revision time
1275 # range. We'll sort them and bisect by that. There is a remote chance that
1276 # 2 (or more) commits will share the exact same timestamp, but it's
1277 # probably safe to ignore that case.
1278 cmd = ['repo', 'forall', '-c',
1279 'git log --format=%%ct --before=%d --after=%d' % (
1280 revision_range_end, revision_range_start)]
1281 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1283 assert not return_code, 'An error occurred while running'\
1284 ' "%s"' % ' '.join(cmd)
1286 os.chdir(cwd)
1288 revision_work_list = list(set(
1289 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1290 revision_work_list = sorted(revision_work_list, reverse=True)
1291 else:
1292 cwd = self._GetDepotDirectory(depot)
1293 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1294 good_revision, cwd=cwd)
1296 return revision_work_list
1298 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1299 svn_revision = self.source_control.SVNFindRev(revision)
1301 if IsStringInt(svn_revision):
1302 # V8 is tricky to bisect, in that there are only a few instances when
1303 # we can dive into bleeding_edge and get back a meaningful result.
1304 # Try to detect a V8 "business as usual" case, which is when:
1305 # 1. trunk revision N has description "Version X.Y.Z"
1306 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1307 # trunk. Now working on X.Y.(Z+1)."
1309 # As of 01/24/2014, V8 trunk descriptions are formatted:
1310 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1311 # So we can just try parsing that out first and fall back to the old way.
1312 v8_dir = self._GetDepotDirectory('v8')
1313 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1315 revision_info = self.source_control.QueryRevisionInfo(revision,
1316 cwd=v8_dir)
1318 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1320 regex_results = version_re.search(revision_info['subject'])
1322 if regex_results:
1323 git_revision = None
1325 # Look for "based on bleeding_edge" and parse out revision
1326 if 'based on bleeding_edge' in revision_info['subject']:
1327 try:
1328 bleeding_edge_revision = revision_info['subject'].split(
1329 'bleeding_edge revision r')[1]
1330 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1331 git_revision = self.source_control.ResolveToRevision(
1332 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1333 cwd=v8_bleeding_edge_dir)
1334 return git_revision
1335 except (IndexError, ValueError):
1336 pass
1338 if not git_revision:
1339 # Wasn't successful, try the old way of looking for "Prepare push to"
1340 git_revision = self.source_control.ResolveToRevision(
1341 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1342 cwd=v8_bleeding_edge_dir)
1344 if git_revision:
1345 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1346 cwd=v8_bleeding_edge_dir)
1348 if 'Prepare push to trunk' in revision_info['subject']:
1349 return git_revision
1350 return None
1352 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1353 cwd = self._GetDepotDirectory('v8')
1354 cmd = ['log', '--format=%ct', '-1', revision]
1355 output = CheckRunGit(cmd, cwd=cwd)
1356 commit_time = int(output)
1357 commits = []
1359 if search_forward:
1360 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1361 'origin/master']
1362 output = CheckRunGit(cmd, cwd=cwd)
1363 output = output.split()
1364 commits = output
1365 commits = reversed(commits)
1366 else:
1367 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1368 'origin/master']
1369 output = CheckRunGit(cmd, cwd=cwd)
1370 output = output.split()
1371 commits = output
1373 bleeding_edge_revision = None
1375 for c in commits:
1376 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1377 if bleeding_edge_revision:
1378 break
1380 return bleeding_edge_revision
1382 def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents):
1383 """Manually parses the vars section of the DEPS file to determine
1384 chromium/blink/etc... revisions.
1386 Returns:
1387 A dict in the format {depot:revision} if successful, otherwise None.
1389 # We'll parse the "vars" section of the DEPS file.
1390 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
1391 re_results = rxp.search(deps_file_contents)
1392 locals = {}
1394 if not re_results:
1395 return None
1397 # We should be left with a series of entries in the vars component of
1398 # the DEPS file with the following format:
1399 # 'depot_name': 'revision',
1400 vars_body = re_results.group('vars_body')
1401 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1402 re.MULTILINE)
1403 re_results = rxp.findall(vars_body)
1405 return dict(re_results)
1407 def _ParseRevisionsFromDEPSFile(self, depot):
1408 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1409 be needed if the bisect recurses into those depots later.
1411 Args:
1412 depot: Depot being bisected.
1414 Returns:
1415 A dict in the format {depot:revision} if successful, otherwise None.
1417 try:
1418 deps_data = {'Var': lambda _: deps_data["vars"][_],
1419 'From': lambda *args: None
1421 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1422 deps_data = deps_data['deps']
1424 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1425 results = {}
1426 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1427 if (depot_data.get('platform') and
1428 depot_data.get('platform') != os.name):
1429 continue
1431 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1432 src_dir = (deps_data.get(depot_data.get('src')) or
1433 deps_data.get(depot_data.get('src_old')))
1434 if src_dir:
1435 self.depot_cwd[depot_name] = os.path.join(self.src_cwd, src_dir[4:])
1436 re_results = rxp.search(deps_data.get(src_dir, ''))
1437 if re_results:
1438 results[depot_name] = re_results.group('revision')
1439 else:
1440 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1441 '%s' % (depot_name, depot))
1442 if not warning_text in self.warnings:
1443 self.warnings.append(warning_text)
1444 else:
1445 results[depot_name] = None
1446 return results
1447 except ImportError:
1448 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1449 parse_results = self._ParseRevisionsFromDEPSFileManually(
1450 deps_file_contents)
1451 results = {}
1452 for depot_name, depot_revision in parse_results.iteritems():
1453 depot_revision = depot_revision.strip('@')
1454 print depot_name, depot_revision
1455 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1456 if (current_data.has_key('deps_var') and
1457 current_data['deps_var'] == depot_name):
1458 src_name = current_name
1459 results[src_name] = depot_revision
1460 break
1461 return results
1463 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1464 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1466 Returns:
1467 A dict in the format {depot:revision} if successful, otherwise None.
1469 cwd = os.getcwd()
1470 self.ChangeToDepotWorkingDirectory(depot)
1472 results = {}
1474 if depot == 'chromium' or depot == 'android-chrome':
1475 results = self._ParseRevisionsFromDEPSFile(depot)
1476 os.chdir(cwd)
1477 elif depot == 'cros':
1478 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1479 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1480 CROS_CHROMEOS_PATTERN]
1481 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1483 assert not return_code, 'An error occurred while running' \
1484 ' "%s"' % ' '.join(cmd)
1486 if len(output) > CROS_CHROMEOS_PATTERN:
1487 output = output[len(CROS_CHROMEOS_PATTERN):]
1489 if len(output) > 1:
1490 output = output.split('_')[0]
1492 if len(output) > 3:
1493 contents = output.split('.')
1495 version = contents[2]
1497 if contents[3] != '0':
1498 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1499 (version, contents[3], version)
1500 if not warningText in self.warnings:
1501 self.warnings.append(warningText)
1503 cwd = os.getcwd()
1504 self.ChangeToDepotWorkingDirectory('chromium')
1505 return_code = CheckRunGit(['log', '-1', '--format=%H',
1506 '--author=chrome-release@google.com', '--grep=to %s' % version,
1507 'origin/master'])
1508 os.chdir(cwd)
1510 results['chromium'] = output.strip()
1511 elif depot == 'v8':
1512 # We can't try to map the trunk revision to bleeding edge yet, because
1513 # we don't know which direction to try to search in. Have to wait until
1514 # the bisect has narrowed the results down to 2 v8 rolls.
1515 results['v8_bleeding_edge'] = None
1517 return results
1519 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1520 """Backs up or restores build output directory based on restore argument.
1522 Args:
1523 restore: Indicates whether to restore or backup. Default is False(Backup)
1524 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1526 Returns:
1527 Path to backup or restored location as string. otherwise None if it fails.
1529 build_dir = os.path.abspath(
1530 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1531 source_dir = os.path.join(build_dir, build_type)
1532 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1533 if restore:
1534 source_dir, destination_dir = destination_dir, source_dir
1535 if os.path.exists(source_dir):
1536 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1537 shutil.move(source_dir, destination_dir)
1538 return destination_dir
1539 return None
1541 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1542 """Downloads the build archive for the given revision.
1544 Args:
1545 revision: The SVN revision to build.
1546 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1548 Returns:
1549 True if download succeeds, otherwise False.
1551 patch_sha = None
1552 if patch:
1553 # Get the SHA of the DEPS changes patch.
1554 patch_sha = GetSHA1HexDigest(patch)
1556 # Update the DEPS changes patch with a patch to create a new file named
1557 # 'DEPS.sha' and add patch_sha evaluated above to it.
1558 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1560 # Source archive file path on cloud storage.
1561 source_file = GetRemoteBuildPath(revision, self.opts.target_arch, patch_sha)
1563 # Get Build output directory
1564 abs_build_dir = os.path.abspath(
1565 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1566 # Downloaded archive file path.
1567 downloaded_file = os.path.join(
1568 abs_build_dir,
1569 GetZipFileName(revision, self.opts.target_arch, patch_sha))
1571 fetch_build_func = lambda: FetchFromCloudStorage(self.opts.gs_bucket,
1572 source_file,
1573 abs_build_dir)
1575 if not fetch_build_func():
1576 if not self.PostBuildRequestAndWait(revision,
1577 condition=fetch_build_func,
1578 patch=patch):
1579 raise RuntimeError('Somewthing went wrong while processing build'
1580 'request for: %s' % revision)
1581 # Generic name for the archive, created when archive file is extracted.
1582 output_dir = os.path.join(
1583 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1584 # Unzip build archive directory.
1585 try:
1586 RmTreeAndMkDir(output_dir, skip_makedir=True)
1587 ExtractZip(downloaded_file, abs_build_dir)
1588 if os.path.exists(output_dir):
1589 self.BackupOrRestoreOutputdirectory(restore=False)
1590 # Build output directory based on target(e.g. out/Release, out/Debug).
1591 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1592 print 'Moving build from %s to %s' % (
1593 output_dir, target_build_output_dir)
1594 shutil.move(output_dir, target_build_output_dir)
1595 return True
1596 raise IOError('Missing extracted folder %s ' % output_dir)
1597 except Exception as e:
1598 print 'Somewthing went wrong while extracting archive file: %s' % e
1599 self.BackupOrRestoreOutputdirectory(restore=True)
1600 # Cleanup any leftovers from unzipping.
1601 if os.path.exists(output_dir):
1602 RmTreeAndMkDir(output_dir, skip_makedir=True)
1603 finally:
1604 # Delete downloaded archive
1605 if os.path.exists(downloaded_file):
1606 os.remove(downloaded_file)
1607 return False
1609 def PostBuildRequestAndWait(self, revision, condition, patch=None):
1610 """POSTs the build request job to the tryserver instance."""
1612 def GetBuilderNameAndBuildTime(target_arch='ia32'):
1613 """Gets builder bot name and buildtime in seconds based on platform."""
1614 # Bot names should match the one listed in tryserver.chromium's
1615 # master.cfg which produces builds for bisect.
1616 if IsWindows():
1617 if Is64BitWindows() and target_arch == 'x64':
1618 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1619 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1620 if IsLinux():
1621 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1622 if IsMac():
1623 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1624 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1625 if not condition:
1626 return False
1628 bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1630 # Create a unique ID for each build request posted to try server builders.
1631 # This ID is added to "Reason" property in build's json.
1632 # TODO: Use this id to track the build status.
1633 build_request_id = GetSHA1HexDigest('%s-%s' % (revision, patch))
1635 # Creates a try job description.
1636 job_args = {'host': self.opts.builder_host,
1637 'port': self.opts.builder_port,
1638 'revision': 'src@%s' % revision,
1639 'bot': bot_name,
1640 'name': build_request_id
1642 # Update patch information if supplied.
1643 if patch:
1644 job_args['patch'] = patch
1645 # Posts job to build the revision on the server.
1646 if post_perf_builder_job.PostTryJob(job_args):
1647 poll_interval = 60
1648 start_time = time.time()
1649 while True:
1650 res = condition()
1651 if res:
1652 return res
1653 elapsed_time = time.time() - start_time
1654 if elapsed_time > build_timeout:
1655 raise RuntimeError('Timed out while waiting %ds for %s build.' %
1656 (build_timeout, revision))
1657 print ('Time elapsed: %ss, still waiting for %s build' %
1658 (elapsed_time, revision))
1659 time.sleep(poll_interval)
1660 return False
1662 def IsDownloadable(self, depot):
1663 """Checks if build is downloadable based on target platform and depot."""
1664 if self.opts.target_platform in ['chromium'] and self.opts.gs_bucket:
1665 return (depot == 'chromium' or
1666 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1667 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1668 return False
1670 def UpdateDeps(self, revision, depot, deps_file):
1671 """Updates DEPS file with new revision of dependency repository.
1673 This method search DEPS for a particular pattern in which depot revision
1674 is specified (e.g "webkit_revision": "123456"). If a match is found then
1675 it resolves the given git hash to SVN revision and replace it in DEPS file.
1677 Args:
1678 revision: A git hash revision of the dependency repository.
1679 depot: Current depot being bisected.
1680 deps_file: Path to DEPS file.
1682 Returns:
1683 True if DEPS file is modified successfully, otherwise False.
1685 if not os.path.exists(deps_file):
1686 return False
1688 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1689 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1690 if not deps_var:
1691 print 'DEPS update not supported for Depot: %s', depot
1692 return False
1694 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1695 # contains "angle_revision" key that holds git hash instead of SVN revision.
1696 # And sometime "angle_revision" key is not specified in "vars" variable,
1697 # in such cases check "deps" dictionary variable that matches
1698 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1699 if depot == 'angle':
1700 return self.UpdateDEPSForAngle(revision, depot, deps_file)
1702 try:
1703 deps_contents = ReadStringFromFile(deps_file)
1704 # Check whether the depot and revision pattern in DEPS file vars
1705 # e.g. for webkit the format is "webkit_revision": "12345".
1706 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1707 re.MULTILINE)
1708 match = re.search(deps_revision, deps_contents)
1709 if match:
1710 svn_revision = self.source_control.SVNFindRev(
1711 revision, self._GetDepotDirectory(depot))
1712 if not svn_revision:
1713 print 'Could not determine SVN revision for %s' % revision
1714 return False
1715 # Update the revision information for the given depot
1716 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1718 # For v8_bleeding_edge revisions change V8 branch in order
1719 # to fetch bleeding edge revision.
1720 if depot == 'v8_bleeding_edge':
1721 new_data = self.UpdateV8Branch(new_data)
1722 if not new_data:
1723 return False
1724 # Write changes to DEPS file
1725 WriteStringToFile(new_data, deps_file)
1726 return True
1727 except IOError, e:
1728 print 'Something went wrong while updating DEPS file. [%s]' % e
1729 return False
1731 def UpdateV8Branch(self, deps_content):
1732 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1734 Check for "v8_branch" in DEPS file if exists update its value
1735 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1736 variable from DEPS revision 254916, therefore check for "src/v8":
1737 <v8 source path> in DEPS in order to support prior DEPS revisions
1738 and update it.
1740 Args:
1741 deps_content: DEPS file contents to be modified.
1743 Returns:
1744 Modified DEPS file contents as a string.
1746 new_branch = r'branches/bleeding_edge'
1747 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1748 if re.search(v8_branch_pattern, deps_content):
1749 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1750 else:
1751 # Replaces the branch assigned to "src/v8" key in DEPS file.
1752 # Format of "src/v8" in DEPS:
1753 # "src/v8":
1754 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1755 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1756 v8_src_pattern = re.compile(
1757 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1758 if re.search(v8_src_pattern, deps_content):
1759 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1760 return deps_content
1762 def UpdateDEPSForAngle(self, revision, depot, deps_file):
1763 """Updates DEPS file with new revision for Angle repository.
1765 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1766 variable contains "angle_revision" key that holds git hash instead of
1767 SVN revision.
1769 And sometimes "angle_revision" key is not specified in "vars" variable,
1770 in such cases check "deps" dictionary variable that matches
1771 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1773 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1774 try:
1775 deps_contents = ReadStringFromFile(deps_file)
1776 # Check whether the depot and revision pattern in DEPS file vars variable
1777 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1778 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1779 deps_var, re.MULTILINE)
1780 match = re.search(angle_rev_pattern % deps_var, deps_contents)
1781 if match:
1782 # Update the revision information for the given depot
1783 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1784 else:
1785 # Check whether the depot and revision pattern in DEPS file deps
1786 # variable. e.g.,
1787 # "src/third_party/angle": Var("chromium_git") +
1788 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1789 angle_rev_pattern = re.compile(
1790 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1791 match = re.search(angle_rev_pattern, deps_contents)
1792 if not match:
1793 print 'Could not find angle revision information in DEPS file.'
1794 return False
1795 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1796 # Write changes to DEPS file
1797 WriteStringToFile(new_data, deps_file)
1798 return True
1799 except IOError, e:
1800 print 'Something went wrong while updating DEPS file, %s' % e
1801 return False
1803 def CreateDEPSPatch(self, depot, revision):
1804 """Modifies DEPS and returns diff as text.
1806 Args:
1807 depot: Current depot being bisected.
1808 revision: A git hash revision of the dependency repository.
1810 Returns:
1811 A tuple with git hash of chromium revision and DEPS patch text.
1813 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1814 if not os.path.exists(deps_file_path):
1815 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1816 # Get current chromium revision (git hash).
1817 chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1818 if not chromium_sha:
1819 raise RuntimeError('Failed to determine Chromium revision for %s' %
1820 revision)
1821 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1822 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1823 # Checkout DEPS file for the current chromium revision.
1824 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1825 chromium_sha,
1826 cwd=self.src_cwd):
1827 if self.UpdateDeps(revision, depot, deps_file_path):
1828 diff_command = ['diff',
1829 '--src-prefix=src/',
1830 '--dst-prefix=src/',
1831 '--no-ext-diff',
1832 bisect_utils.FILE_DEPS]
1833 diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1834 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1835 else:
1836 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1837 chromium_sha)
1838 else:
1839 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1840 chromium_sha)
1841 return (None, None)
1843 def BuildCurrentRevision(self, depot, revision=None):
1844 """Builds chrome and performance_ui_tests on the current revision.
1846 Returns:
1847 True if the build was successful.
1849 if self.opts.debug_ignore_build:
1850 return True
1851 cwd = os.getcwd()
1852 os.chdir(self.src_cwd)
1853 # Fetch build archive for the given revision from the cloud storage when
1854 # the storage bucket is passed.
1855 if self.IsDownloadable(depot) and revision:
1856 deps_patch = None
1857 if depot != 'chromium':
1858 # Create a DEPS patch with new revision for dependency repository.
1859 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1860 # Get SVN revision for the given SHA, since builds are archived using SVN
1861 # revision.
1862 chromium_revision = self.source_control.SVNFindRev(revision)
1863 if not chromium_revision:
1864 raise RuntimeError(
1865 'Failed to determine SVN revision for %s' % revision)
1866 if self.DownloadCurrentBuild(chromium_revision, patch=deps_patch):
1867 os.chdir(cwd)
1868 if deps_patch:
1869 # Reverts the changes to DEPS file.
1870 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1871 revision,
1872 cwd=self.src_cwd)
1873 return True
1874 raise RuntimeError('Failed to download build archive for revision %s.\n'
1875 'Unfortunately, bisection couldn\'t continue any '
1876 'further. Please try running script without '
1877 '--gs_bucket flag to produce local builds.' % revision)
1880 build_success = self.builder.Build(depot, self.opts)
1881 os.chdir(cwd)
1882 return build_success
1884 def RunGClientHooks(self):
1885 """Runs gclient with runhooks command.
1887 Returns:
1888 True if gclient reports no errors.
1891 if self.opts.debug_ignore_build:
1892 return True
1894 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1896 def TryParseHistogramValuesFromOutput(self, metric, text):
1897 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1899 Args:
1900 metric: The metric as a list of [<trace>, <value>] strings.
1901 text: The text to parse the metric values from.
1903 Returns:
1904 A list of floating point numbers found.
1906 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1908 text_lines = text.split('\n')
1909 values_list = []
1911 for current_line in text_lines:
1912 if metric_formatted in current_line:
1913 current_line = current_line[len(metric_formatted):]
1915 try:
1916 histogram_values = eval(current_line)
1918 for b in histogram_values['buckets']:
1919 average_for_bucket = float(b['high'] + b['low']) * 0.5
1920 # Extends the list with N-elements with the average for that bucket.
1921 values_list.extend([average_for_bucket] * b['count'])
1922 except:
1923 pass
1925 return values_list
1927 def TryParseResultValuesFromOutput(self, metric, text):
1928 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
1930 Args:
1931 metric: The metric as a list of [<trace>, <value>] strings.
1932 text: The text to parse the metric values from.
1934 Returns:
1935 A list of floating point numbers found.
1937 # Format is: RESULT <graph>: <trace>= <value> <units>
1938 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1940 # The log will be parsed looking for format:
1941 # <*>RESULT <graph_name>: <trace_name>= <value>
1942 single_result_re = re.compile(
1943 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
1945 # The log will be parsed looking for format:
1946 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
1947 multi_results_re = re.compile(
1948 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
1950 # The log will be parsed looking for format:
1951 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
1952 mean_stddev_re = re.compile(
1953 metric_re +
1954 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
1956 text_lines = text.split('\n')
1957 values_list = []
1958 for current_line in text_lines:
1959 # Parse the output from the performance test for the metric we're
1960 # interested in.
1961 single_result_match = single_result_re.search(current_line)
1962 multi_results_match = multi_results_re.search(current_line)
1963 mean_stddev_match = mean_stddev_re.search(current_line)
1964 if (not single_result_match is None and
1965 single_result_match.group('VALUE')):
1966 values_list += [single_result_match.group('VALUE')]
1967 elif (not multi_results_match is None and
1968 multi_results_match.group('VALUES')):
1969 metric_values = multi_results_match.group('VALUES')
1970 values_list += metric_values.split(',')
1971 elif (not mean_stddev_match is None and
1972 mean_stddev_match.group('MEAN')):
1973 values_list += [mean_stddev_match.group('MEAN')]
1975 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1977 # If the metric is times/t, we need to sum the timings in order to get
1978 # similar regression results as the try-bots.
1979 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1980 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1982 if metric in metrics_to_sum:
1983 if values_list:
1984 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1986 return values_list
1988 def ParseMetricValuesFromOutput(self, metric, text):
1989 """Parses output from performance_ui_tests and retrieves the results for
1990 a given metric.
1992 Args:
1993 metric: The metric as a list of [<trace>, <value>] strings.
1994 text: The text to parse the metric values from.
1996 Returns:
1997 A list of floating point numbers found.
1999 metric_values = self.TryParseResultValuesFromOutput(metric, text)
2001 if not metric_values:
2002 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
2004 return metric_values
2006 def _GenerateProfileIfNecessary(self, command_args):
2007 """Checks the command line of the performance test for dependencies on
2008 profile generation, and runs tools/perf/generate_profile as necessary.
2010 Args:
2011 command_args: Command line being passed to performance test, as a list.
2013 Returns:
2014 False if profile generation was necessary and failed, otherwise True.
2017 if '--profile-dir' in ' '.join(command_args):
2018 # If we were using python 2.7+, we could just use the argparse
2019 # module's parse_known_args to grab --profile-dir. Since some of the
2020 # bots still run 2.6, have to grab the arguments manually.
2021 arg_dict = {}
2022 args_to_parse = ['--profile-dir', '--browser']
2024 for arg_to_parse in args_to_parse:
2025 for i, current_arg in enumerate(command_args):
2026 if arg_to_parse in current_arg:
2027 current_arg_split = current_arg.split('=')
2029 # Check 2 cases, --arg=<val> and --arg <val>
2030 if len(current_arg_split) == 2:
2031 arg_dict[arg_to_parse] = current_arg_split[1]
2032 elif i + 1 < len(command_args):
2033 arg_dict[arg_to_parse] = command_args[i+1]
2035 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
2037 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
2038 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
2039 return not RunProcess(['python', path_to_generate,
2040 '--profile-type-to-generate', profile_type,
2041 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
2042 return False
2043 return True
2045 def _IsBisectModeUsingMetric(self):
2046 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
2048 def _IsBisectModeReturnCode(self):
2049 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
2051 def _IsBisectModeStandardDeviation(self):
2052 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
2054 def RunPerformanceTestAndParseResults(
2055 self, command_to_run, metric, reset_on_first_run=False,
2056 upload_on_last_run=False, results_label=None):
2057 """Runs a performance test on the current revision and parses the results.
2059 Args:
2060 command_to_run: The command to be run to execute the performance test.
2061 metric: The metric to parse out from the results of the performance test.
2062 This is the result chart name and trace name, separated by slash.
2063 reset_on_first_run: If True, pass the flag --reset-results on first run.
2064 upload_on_last_run: If True, pass the flag --upload-results on last run.
2065 results_label: A value for the option flag --results-label.
2066 The arguments reset_on_first_run, upload_on_last_run and results_label
2067 are all ignored if the test is not a Telemetry test.
2069 Returns:
2070 (values dict, 0) if --debug_ignore_perf_test was passed.
2071 (values dict, 0, test output) if the test was run successfully.
2072 (error message, -1) if the test couldn't be run.
2073 (error message, -1, test output) if the test ran but there was an error.
2075 success_code, failure_code = 0, -1
2077 if self.opts.debug_ignore_perf_test:
2078 fake_results = {
2079 'mean': 0.0,
2080 'std_err': 0.0,
2081 'std_dev': 0.0,
2082 'values': [0.0]
2084 return (fake_results, success_code)
2086 # For Windows platform set posix=False, to parse windows paths correctly.
2087 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2088 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2089 args = shlex.split(command_to_run, posix=not IsWindows())
2091 if not self._GenerateProfileIfNecessary(args):
2092 err_text = 'Failed to generate profile for performance test.'
2093 return (err_text, failure_code)
2095 # If running a Telemetry test for Chrome OS, insert the remote IP and
2096 # identity parameters.
2097 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
2098 if self.opts.target_platform == 'cros' and is_telemetry:
2099 args.append('--remote=%s' % self.opts.cros_remote_ip)
2100 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
2102 start_time = time.time()
2104 metric_values = []
2105 output_of_all_runs = ''
2106 for i in xrange(self.opts.repeat_test_count):
2107 # Can ignore the return code since if the tests fail, it won't return 0.
2108 current_args = copy.copy(args)
2109 if is_telemetry:
2110 if i == 0 and reset_on_first_run:
2111 current_args.append('--reset-results')
2112 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
2113 current_args.append('--upload-results')
2114 if results_label:
2115 current_args.append('--results-label=%s' % results_label)
2116 try:
2117 (output, return_code) = RunProcessAndRetrieveOutput(current_args,
2118 cwd=self.src_cwd)
2119 except OSError, e:
2120 if e.errno == errno.ENOENT:
2121 err_text = ('Something went wrong running the performance test. '
2122 'Please review the command line:\n\n')
2123 if 'src/' in ' '.join(args):
2124 err_text += ('Check that you haven\'t accidentally specified a '
2125 'path with src/ in the command.\n\n')
2126 err_text += ' '.join(args)
2127 err_text += '\n'
2129 return (err_text, failure_code)
2130 raise
2132 output_of_all_runs += output
2133 if self.opts.output_buildbot_annotations:
2134 print output
2136 if self._IsBisectModeUsingMetric():
2137 metric_values += self.ParseMetricValuesFromOutput(metric, output)
2138 # If we're bisecting on a metric (ie, changes in the mean or
2139 # standard deviation) and no metric values are produced, bail out.
2140 if not metric_values:
2141 break
2142 elif self._IsBisectModeReturnCode():
2143 metric_values.append(return_code)
2145 elapsed_minutes = (time.time() - start_time) / 60.0
2146 if elapsed_minutes >= self.opts.max_time_minutes:
2147 break
2149 if len(metric_values) == 0:
2150 err_text = 'Metric %s was not found in the test output.' % metric
2151 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2152 # that were found in the output here.
2153 return (err_text, failure_code, output_of_all_runs)
2155 # If we're bisecting on return codes, we're really just looking for zero vs
2156 # non-zero.
2157 if self._IsBisectModeReturnCode():
2158 # If any of the return codes is non-zero, output 1.
2159 overall_return_code = 0 if (
2160 all(current_value == 0 for current_value in metric_values)) else 1
2162 values = {
2163 'mean': overall_return_code,
2164 'std_err': 0.0,
2165 'std_dev': 0.0,
2166 'values': metric_values,
2169 print 'Results of performance test: Command returned with %d' % (
2170 overall_return_code)
2171 print
2172 else:
2173 # Need to get the average value if there were multiple values.
2174 truncated_mean = CalculateTruncatedMean(metric_values,
2175 self.opts.truncate_percent)
2176 standard_err = CalculateStandardError(metric_values)
2177 standard_dev = CalculateStandardDeviation(metric_values)
2179 if self._IsBisectModeStandardDeviation():
2180 metric_values = [standard_dev]
2182 values = {
2183 'mean': truncated_mean,
2184 'std_err': standard_err,
2185 'std_dev': standard_dev,
2186 'values': metric_values,
2189 print 'Results of performance test: %12f %12f' % (
2190 truncated_mean, standard_err)
2191 print
2192 return (values, success_code, output_of_all_runs)
2194 def FindAllRevisionsToSync(self, revision, depot):
2195 """Finds all dependant revisions and depots that need to be synced for a
2196 given revision. This is only useful in the git workflow, as an svn depot
2197 may be split into multiple mirrors.
2199 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2200 skia/include. To sync skia/src properly, one has to find the proper
2201 revisions in skia/gyp and skia/include.
2203 Args:
2204 revision: The revision to sync to.
2205 depot: The depot in use at the moment (probably skia).
2207 Returns:
2208 A list of [depot, revision] pairs that need to be synced.
2210 revisions_to_sync = [[depot, revision]]
2212 is_base = ((depot == 'chromium') or (depot == 'cros') or
2213 (depot == 'android-chrome'))
2215 # Some SVN depots were split into multiple git depots, so we need to
2216 # figure out for each mirror which git revision to grab. There's no
2217 # guarantee that the SVN revision will exist for each of the dependant
2218 # depots, so we have to grep the git logs and grab the next earlier one.
2219 if not is_base and\
2220 DEPOT_DEPS_NAME[depot]['depends'] and\
2221 self.source_control.IsGit():
2222 svn_rev = self.source_control.SVNFindRev(revision)
2224 for d in DEPOT_DEPS_NAME[depot]['depends']:
2225 self.ChangeToDepotWorkingDirectory(d)
2227 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2229 if dependant_rev:
2230 revisions_to_sync.append([d, dependant_rev])
2232 num_resolved = len(revisions_to_sync)
2233 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2235 self.ChangeToDepotWorkingDirectory(depot)
2237 if not ((num_resolved - 1) == num_needed):
2238 return None
2240 return revisions_to_sync
2242 def PerformPreBuildCleanup(self):
2243 """Performs necessary cleanup between runs."""
2244 print 'Cleaning up between runs.'
2245 print
2247 # Having these pyc files around between runs can confuse the
2248 # perf tests and cause them to crash.
2249 for (path, _, files) in os.walk(self.src_cwd):
2250 for cur_file in files:
2251 if cur_file.endswith('.pyc'):
2252 path_to_file = os.path.join(path, cur_file)
2253 os.remove(path_to_file)
2255 def PerformWebkitDirectoryCleanup(self, revision):
2256 """If the script is switching between Blink and WebKit during bisect,
2257 its faster to just delete the directory rather than leave it up to git
2258 to sync.
2260 Returns:
2261 True if successful.
2263 if not self.source_control.CheckoutFileAtRevision(
2264 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2265 return False
2267 cwd = os.getcwd()
2268 os.chdir(self.src_cwd)
2270 is_blink = bisect_utils.IsDepsFileBlink()
2272 os.chdir(cwd)
2274 if not self.source_control.RevertFileToHead(
2275 bisect_utils.FILE_DEPS_GIT):
2276 return False
2278 if self.was_blink != is_blink:
2279 self.was_blink = is_blink
2280 # Removes third_party/Webkit directory.
2281 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2282 return True
2284 def PerformCrosChrootCleanup(self):
2285 """Deletes the chroot.
2287 Returns:
2288 True if successful.
2290 cwd = os.getcwd()
2291 self.ChangeToDepotWorkingDirectory('cros')
2292 cmd = [CROS_SDK_PATH, '--delete']
2293 return_code = RunProcess(cmd)
2294 os.chdir(cwd)
2295 return not return_code
2297 def CreateCrosChroot(self):
2298 """Creates a new chroot.
2300 Returns:
2301 True if successful.
2303 cwd = os.getcwd()
2304 self.ChangeToDepotWorkingDirectory('cros')
2305 cmd = [CROS_SDK_PATH, '--create']
2306 return_code = RunProcess(cmd)
2307 os.chdir(cwd)
2308 return not return_code
2310 def PerformPreSyncCleanup(self, revision, depot):
2311 """Performs any necessary cleanup before syncing.
2313 Returns:
2314 True if successful.
2316 if depot == 'chromium':
2317 # Removes third_party/libjingle. At some point, libjingle was causing
2318 # issues syncing when using the git workflow (crbug.com/266324).
2319 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2320 return False
2321 # Removes third_party/skia. At some point, skia was causing
2322 # issues syncing when using the git workflow (crbug.com/377951).
2323 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2324 return False
2325 return self.PerformWebkitDirectoryCleanup(revision)
2326 elif depot == 'cros':
2327 return self.PerformCrosChrootCleanup()
2328 return True
2330 def RunPostSync(self, depot):
2331 """Performs any work after syncing.
2333 Returns:
2334 True if successful.
2336 if self.opts.target_platform == 'android':
2337 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2338 path_to_src=self.src_cwd):
2339 return False
2341 if depot == 'cros':
2342 return self.CreateCrosChroot()
2343 else:
2344 return self.RunGClientHooks()
2345 return True
2347 def ShouldSkipRevision(self, depot, revision):
2348 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2349 is git based those changes would have no effect.
2351 Args:
2352 depot: The depot being bisected.
2353 revision: Current revision we're synced to.
2355 Returns:
2356 True if we should skip building/testing this revision.
2358 if depot == 'chromium':
2359 if self.source_control.IsGit():
2360 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2361 output = CheckRunGit(cmd)
2363 files = output.splitlines()
2365 if len(files) == 1 and files[0] == 'DEPS':
2366 return True
2368 return False
2370 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2371 skippable=False):
2372 """Performs a full sync/build/run of the specified revision.
2374 Args:
2375 revision: The revision to sync to.
2376 depot: The depot that's being used at the moment (src, webkit, etc.)
2377 command_to_run: The command to execute the performance test.
2378 metric: The performance metric being tested.
2380 Returns:
2381 On success, a tuple containing the results of the performance test.
2382 Otherwise, a tuple with the error message.
2384 sync_client = None
2385 if depot == 'chromium' or depot == 'android-chrome':
2386 sync_client = 'gclient'
2387 elif depot == 'cros':
2388 sync_client = 'repo'
2390 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2392 if not revisions_to_sync:
2393 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2395 if not self.PerformPreSyncCleanup(revision, depot):
2396 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2398 success = True
2400 if not self.opts.debug_ignore_sync:
2401 for r in revisions_to_sync:
2402 self.ChangeToDepotWorkingDirectory(r[0])
2404 if sync_client:
2405 self.PerformPreBuildCleanup()
2407 # If you're using gclient to sync, you need to specify the depot you
2408 # want so that all the dependencies sync properly as well.
2409 # ie. gclient sync src@<SHA1>
2410 current_revision = r[1]
2411 if sync_client == 'gclient':
2412 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2413 current_revision)
2414 if not self.source_control.SyncToRevision(current_revision,
2415 sync_client):
2416 success = False
2418 break
2420 if success:
2421 success = self.RunPostSync(depot)
2422 if success:
2423 if skippable and self.ShouldSkipRevision(depot, revision):
2424 return ('Skipped revision: [%s]' % str(revision),
2425 BUILD_RESULT_SKIPPED)
2427 start_build_time = time.time()
2428 if self.BuildCurrentRevision(depot, revision):
2429 after_build_time = time.time()
2430 results = self.RunPerformanceTestAndParseResults(command_to_run,
2431 metric)
2432 # Restore build output directory once the tests are done, to avoid
2433 # any descrepancy.
2434 if self.IsDownloadable(depot) and revision:
2435 self.BackupOrRestoreOutputdirectory(restore=True)
2437 if results[1] == 0:
2438 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2439 depot, revision)
2441 if not external_revisions is None:
2442 return (results[0], results[1], external_revisions,
2443 time.time() - after_build_time, after_build_time -
2444 start_build_time)
2445 else:
2446 return ('Failed to parse DEPS file for external revisions.',
2447 BUILD_RESULT_FAIL)
2448 else:
2449 return results
2450 else:
2451 return ('Failed to build revision: [%s]' % (str(revision, )),
2452 BUILD_RESULT_FAIL)
2453 else:
2454 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2455 else:
2456 return ('Failed to sync revision: [%s]' % (str(revision, )),
2457 BUILD_RESULT_FAIL)
2459 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2460 """Given known good and bad values, decide if the current_value passed
2461 or failed.
2463 Args:
2464 current_value: The value of the metric being checked.
2465 known_bad_value: The reference value for a "failed" run.
2466 known_good_value: The reference value for a "passed" run.
2468 Returns:
2469 True if the current_value is closer to the known_good_value than the
2470 known_bad_value.
2472 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2473 dist_to_good_value = abs(current_value['std_dev'] -
2474 known_good_value['std_dev'])
2475 dist_to_bad_value = abs(current_value['std_dev'] -
2476 known_bad_value['std_dev'])
2477 else:
2478 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2479 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2481 return dist_to_good_value < dist_to_bad_value
2483 def _GetDepotDirectory(self, depot_name):
2484 if depot_name == 'chromium':
2485 return self.src_cwd
2486 elif depot_name == 'cros':
2487 return self.cros_cwd
2488 elif depot_name in DEPOT_NAMES:
2489 return self.depot_cwd[depot_name]
2490 else:
2491 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2492 ' was added without proper support?' % depot_name
2494 def ChangeToDepotWorkingDirectory(self, depot_name):
2495 """Given a depot, changes to the appropriate working directory.
2497 Args:
2498 depot_name: The name of the depot (see DEPOT_NAMES).
2500 os.chdir(self._GetDepotDirectory(depot_name))
2502 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2503 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2504 search_forward=True)
2505 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2506 search_forward=False)
2507 min_revision_data['external']['v8_bleeding_edge'] = r1
2508 max_revision_data['external']['v8_bleeding_edge'] = r2
2510 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2511 min_revision_data['revision']) or
2512 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2513 max_revision_data['revision'])):
2514 self.warnings.append('Trunk revisions in V8 did not map directly to '
2515 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2516 'did map directly to bleeding_edge revisions, but results might not '
2517 'be valid.')
2519 def _FindNextDepotToBisect(self, current_depot, current_revision,
2520 min_revision_data, max_revision_data):
2521 """Given the state of the bisect, decides which depot the script should
2522 dive into next (if any).
2524 Args:
2525 current_depot: Current depot being bisected.
2526 current_revision: Current revision synced to.
2527 min_revision_data: Data about the earliest revision in the bisect range.
2528 max_revision_data: Data about the latest revision in the bisect range.
2530 Returns:
2531 The depot to bisect next, or None.
2533 external_depot = None
2534 for next_depot in DEPOT_NAMES:
2535 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2536 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2537 continue
2539 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2540 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2541 continue
2543 if current_depot == 'v8':
2544 # We grab the bleeding_edge info here rather than earlier because we
2545 # finally have the revision range. From that we can search forwards and
2546 # backwards to try to match trunk revisions to bleeding_edge.
2547 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2549 if (min_revision_data['external'].get(next_depot) ==
2550 max_revision_data['external'].get(next_depot)):
2551 continue
2553 if (min_revision_data['external'].get(next_depot) and
2554 max_revision_data['external'].get(next_depot)):
2555 external_depot = next_depot
2556 break
2558 return external_depot
2560 def PrepareToBisectOnDepot(self,
2561 current_depot,
2562 end_revision,
2563 start_revision,
2564 previous_depot,
2565 previous_revision):
2566 """Changes to the appropriate directory and gathers a list of revisions
2567 to bisect between |start_revision| and |end_revision|.
2569 Args:
2570 current_depot: The depot we want to bisect.
2571 end_revision: End of the revision range.
2572 start_revision: Start of the revision range.
2573 previous_depot: The depot we were previously bisecting.
2574 previous_revision: The last revision we synced to on |previous_depot|.
2576 Returns:
2577 A list containing the revisions between |start_revision| and
2578 |end_revision| inclusive.
2580 # Change into working directory of external library to run
2581 # subsequent commands.
2582 self.ChangeToDepotWorkingDirectory(current_depot)
2584 # V8 (and possibly others) is merged in periodically. Bisecting
2585 # this directory directly won't give much good info.
2586 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2587 config_path = os.path.join(self.src_cwd, '..')
2588 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2589 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2590 return []
2591 if bisect_utils.RunGClient(
2592 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2593 return []
2595 if current_depot == 'v8_bleeding_edge':
2596 self.ChangeToDepotWorkingDirectory('chromium')
2598 shutil.move('v8', 'v8.bak')
2599 shutil.move('v8_bleeding_edge', 'v8')
2601 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2602 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2604 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2605 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2607 self.ChangeToDepotWorkingDirectory(current_depot)
2609 depot_revision_list = self.GetRevisionList(current_depot,
2610 end_revision,
2611 start_revision)
2613 self.ChangeToDepotWorkingDirectory('chromium')
2615 return depot_revision_list
2617 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2618 """Gathers reference values by running the performance tests on the
2619 known good and bad revisions.
2621 Args:
2622 good_rev: The last known good revision where the performance regression
2623 has not occurred yet.
2624 bad_rev: A revision where the performance regression has already occurred.
2625 cmd: The command to execute the performance test.
2626 metric: The metric being tested for regression.
2628 Returns:
2629 A tuple with the results of building and running each revision.
2631 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2632 target_depot,
2633 cmd,
2634 metric)
2636 good_run_results = None
2638 if not bad_run_results[1]:
2639 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2640 target_depot,
2641 cmd,
2642 metric)
2644 return (bad_run_results, good_run_results)
2646 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2647 """Adds new revisions to the revision_data dict and initializes them.
2649 Args:
2650 revisions: List of revisions to add.
2651 depot: Depot that's currently in use (src, webkit, etc...)
2652 sort: Sorting key for displaying revisions.
2653 revision_data: A dict to add the new revisions into. Existing revisions
2654 will have their sort keys offset.
2657 num_depot_revisions = len(revisions)
2659 for _, v in revision_data.iteritems():
2660 if v['sort'] > sort:
2661 v['sort'] += num_depot_revisions
2663 for i in xrange(num_depot_revisions):
2664 r = revisions[i]
2666 revision_data[r] = {'revision' : r,
2667 'depot' : depot,
2668 'value' : None,
2669 'perf_time' : 0,
2670 'build_time' : 0,
2671 'passed' : '?',
2672 'sort' : i + sort + 1}
2674 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2675 if self.opts.output_buildbot_annotations:
2676 step_name = 'Bisection Range: [%s - %s]' % (
2677 revision_list[len(revision_list)-1], revision_list[0])
2678 bisect_utils.OutputAnnotationStepStart(step_name)
2680 print
2681 print 'Revisions to bisect on [%s]:' % depot
2682 for revision_id in revision_list:
2683 print ' -> %s' % (revision_id, )
2684 print
2686 if self.opts.output_buildbot_annotations:
2687 bisect_utils.OutputAnnotationStepClosed()
2689 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2690 """Checks to see if changes to DEPS file occurred, and that the revision
2691 range also includes the change to .DEPS.git. If it doesn't, attempts to
2692 expand the revision range to include it.
2694 Args:
2695 bad_rev: First known bad revision.
2696 good_revision: Last known good revision.
2698 Returns:
2699 A tuple with the new bad and good revisions.
2701 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2702 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2703 'DEPS', good_revision, bad_revision)
2705 if changes_to_deps:
2706 # DEPS file was changed, search from the oldest change to DEPS file to
2707 # bad_revision to see if there are matching .DEPS.git changes.
2708 oldest_deps_change = changes_to_deps[-1]
2709 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2710 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2712 if len(changes_to_deps) != len(changes_to_gitdeps):
2713 # Grab the timestamp of the last DEPS change
2714 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2715 output = CheckRunGit(cmd)
2716 commit_time = int(output)
2718 # Try looking for a commit that touches the .DEPS.git file in the
2719 # next 15 minutes after the DEPS file change.
2720 cmd = ['log', '--format=%H', '-1',
2721 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2722 'origin/master', bisect_utils.FILE_DEPS_GIT]
2723 output = CheckRunGit(cmd)
2724 output = output.strip()
2725 if output:
2726 self.warnings.append('Detected change to DEPS and modified '
2727 'revision range to include change to .DEPS.git')
2728 return (output, good_revision)
2729 else:
2730 self.warnings.append('Detected change to DEPS but couldn\'t find '
2731 'matching change to .DEPS.git')
2732 return (bad_revision, good_revision)
2734 def CheckIfRevisionsInProperOrder(self,
2735 target_depot,
2736 good_revision,
2737 bad_revision):
2738 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2740 Args:
2741 good_revision: Number/tag of the known good revision.
2742 bad_revision: Number/tag of the known bad revision.
2744 Returns:
2745 True if the revisions are in the proper order (good earlier than bad).
2747 if self.source_control.IsGit() and target_depot != 'cros':
2748 cmd = ['log', '--format=%ct', '-1', good_revision]
2749 cwd = self._GetDepotDirectory(target_depot)
2751 output = CheckRunGit(cmd, cwd=cwd)
2752 good_commit_time = int(output)
2754 cmd = ['log', '--format=%ct', '-1', bad_revision]
2755 output = CheckRunGit(cmd, cwd=cwd)
2756 bad_commit_time = int(output)
2758 return good_commit_time <= bad_commit_time
2759 else:
2760 # Cros/svn use integers
2761 return int(good_revision) <= int(bad_revision)
2763 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2764 """Given known good and bad revisions, run a binary search on all
2765 intermediate revisions to determine the CL where the performance regression
2766 occurred.
2768 Args:
2769 command_to_run: Specify the command to execute the performance test.
2770 good_revision: Number/tag of the known good revision.
2771 bad_revision: Number/tag of the known bad revision.
2772 metric: The performance metric to monitor.
2774 Returns:
2775 A dict with 2 members, 'revision_data' and 'error'. On success,
2776 'revision_data' will contain a dict mapping revision ids to
2777 data about that revision. Each piece of revision data consists of a
2778 dict with the following keys:
2780 'passed': Represents whether the performance test was successful at
2781 that revision. Possible values include: 1 (passed), 0 (failed),
2782 '?' (skipped), 'F' (build failed).
2783 'depot': The depot that this revision is from (ie. WebKit)
2784 'external': If the revision is a 'src' revision, 'external' contains
2785 the revisions of each of the external libraries.
2786 'sort': A sort value for sorting the dict in order of commits.
2788 For example:
2790 'error':None,
2791 'revision_data':
2793 'CL #1':
2795 'passed':False,
2796 'depot':'chromium',
2797 'external':None,
2798 'sort':0
2803 If an error occurred, the 'error' field will contain the message and
2804 'revision_data' will be empty.
2806 results = {'revision_data' : {},
2807 'error' : None}
2809 # Choose depot to bisect first
2810 target_depot = 'chromium'
2811 if self.opts.target_platform == 'cros':
2812 target_depot = 'cros'
2813 elif self.opts.target_platform == 'android-chrome':
2814 target_depot = 'android-chrome'
2816 cwd = os.getcwd()
2817 self.ChangeToDepotWorkingDirectory(target_depot)
2819 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2820 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2821 target_depot, 100)
2822 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2823 target_depot, -100)
2825 os.chdir(cwd)
2828 if bad_revision is None:
2829 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2830 return results
2832 if good_revision is None:
2833 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2834 return results
2836 # Check that they didn't accidentally swap good and bad revisions.
2837 if not self.CheckIfRevisionsInProperOrder(
2838 target_depot, good_revision, bad_revision):
2839 results['error'] = 'bad_revision < good_revision, did you swap these '\
2840 'by mistake?'
2841 return results
2843 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2844 bad_revision, good_revision)
2846 if self.opts.output_buildbot_annotations:
2847 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2849 print 'Gathering revision range for bisection.'
2850 # Retrieve a list of revisions to do bisection on.
2851 src_revision_list = self.GetRevisionList(target_depot,
2852 bad_revision,
2853 good_revision)
2855 if self.opts.output_buildbot_annotations:
2856 bisect_utils.OutputAnnotationStepClosed()
2858 if src_revision_list:
2859 # revision_data will store information about a revision such as the
2860 # depot it came from, the webkit/V8 revision at that time,
2861 # performance timing, build state, etc...
2862 revision_data = results['revision_data']
2864 # revision_list is the list we're binary searching through at the moment.
2865 revision_list = []
2867 sort_key_ids = 0
2869 for current_revision_id in src_revision_list:
2870 sort_key_ids += 1
2872 revision_data[current_revision_id] = {'value' : None,
2873 'passed' : '?',
2874 'depot' : target_depot,
2875 'external' : None,
2876 'perf_time' : 0,
2877 'build_time' : 0,
2878 'sort' : sort_key_ids}
2879 revision_list.append(current_revision_id)
2881 min_revision = 0
2882 max_revision = len(revision_list) - 1
2884 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2886 if self.opts.output_buildbot_annotations:
2887 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2889 print 'Gathering reference values for bisection.'
2891 # Perform the performance tests on the good and bad revisions, to get
2892 # reference values.
2893 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2894 bad_revision,
2895 command_to_run,
2896 metric,
2897 target_depot)
2899 if self.opts.output_buildbot_annotations:
2900 bisect_utils.OutputAnnotationStepClosed()
2902 if bad_results[1]:
2903 results['error'] = ('An error occurred while building and running '
2904 'the \'bad\' reference value. The bisect cannot continue without '
2905 'a working \'bad\' revision to start from.\n\nError: %s' %
2906 bad_results[0])
2907 return results
2909 if good_results[1]:
2910 results['error'] = ('An error occurred while building and running '
2911 'the \'good\' reference value. The bisect cannot continue without '
2912 'a working \'good\' revision to start from.\n\nError: %s' %
2913 good_results[0])
2914 return results
2917 # We need these reference values to determine if later runs should be
2918 # classified as pass or fail.
2919 known_bad_value = bad_results[0]
2920 known_good_value = good_results[0]
2922 # Can just mark the good and bad revisions explicitly here since we
2923 # already know the results.
2924 bad_revision_data = revision_data[revision_list[0]]
2925 bad_revision_data['external'] = bad_results[2]
2926 bad_revision_data['perf_time'] = bad_results[3]
2927 bad_revision_data['build_time'] = bad_results[4]
2928 bad_revision_data['passed'] = False
2929 bad_revision_data['value'] = known_bad_value
2931 good_revision_data = revision_data[revision_list[max_revision]]
2932 good_revision_data['external'] = good_results[2]
2933 good_revision_data['perf_time'] = good_results[3]
2934 good_revision_data['build_time'] = good_results[4]
2935 good_revision_data['passed'] = True
2936 good_revision_data['value'] = known_good_value
2938 next_revision_depot = target_depot
2940 while True:
2941 if not revision_list:
2942 break
2944 min_revision_data = revision_data[revision_list[min_revision]]
2945 max_revision_data = revision_data[revision_list[max_revision]]
2947 if max_revision - min_revision <= 1:
2948 current_depot = min_revision_data['depot']
2949 if min_revision_data['passed'] == '?':
2950 next_revision_index = min_revision
2951 elif max_revision_data['passed'] == '?':
2952 next_revision_index = max_revision
2953 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2954 previous_revision = revision_list[min_revision]
2955 # If there were changes to any of the external libraries we track,
2956 # should bisect the changes there as well.
2957 external_depot = self._FindNextDepotToBisect(current_depot,
2958 previous_revision, min_revision_data, max_revision_data)
2960 # If there was no change in any of the external depots, the search
2961 # is over.
2962 if not external_depot:
2963 if current_depot == 'v8':
2964 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2965 'continue any further. The script can only bisect into '
2966 'V8\'s bleeding_edge repository if both the current and '
2967 'previous revisions in trunk map directly to revisions in '
2968 'bleeding_edge.')
2969 break
2971 earliest_revision = max_revision_data['external'][external_depot]
2972 latest_revision = min_revision_data['external'][external_depot]
2974 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2975 latest_revision,
2976 earliest_revision,
2977 next_revision_depot,
2978 previous_revision)
2980 if not new_revision_list:
2981 results['error'] = 'An error occurred attempting to retrieve'\
2982 ' revision range: [%s..%s]' % \
2983 (earliest_revision, latest_revision)
2984 return results
2986 self.AddRevisionsIntoRevisionData(new_revision_list,
2987 external_depot,
2988 min_revision_data['sort'],
2989 revision_data)
2991 # Reset the bisection and perform it on the newly inserted
2992 # changelists.
2993 revision_list = new_revision_list
2994 min_revision = 0
2995 max_revision = len(revision_list) - 1
2996 sort_key_ids += len(revision_list)
2998 print 'Regression in metric:%s appears to be the result of changes'\
2999 ' in [%s].' % (metric, external_depot)
3001 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
3003 continue
3004 else:
3005 break
3006 else:
3007 next_revision_index = int((max_revision - min_revision) / 2) +\
3008 min_revision
3010 next_revision_id = revision_list[next_revision_index]
3011 next_revision_data = revision_data[next_revision_id]
3012 next_revision_depot = next_revision_data['depot']
3014 self.ChangeToDepotWorkingDirectory(next_revision_depot)
3016 if self.opts.output_buildbot_annotations:
3017 step_name = 'Working on [%s]' % next_revision_id
3018 bisect_utils.OutputAnnotationStepStart(step_name)
3020 print 'Working on revision: [%s]' % next_revision_id
3022 run_results = self.SyncBuildAndRunRevision(next_revision_id,
3023 next_revision_depot,
3024 command_to_run,
3025 metric, skippable=True)
3027 # If the build is successful, check whether or not the metric
3028 # had regressed.
3029 if not run_results[1]:
3030 if len(run_results) > 2:
3031 next_revision_data['external'] = run_results[2]
3032 next_revision_data['perf_time'] = run_results[3]
3033 next_revision_data['build_time'] = run_results[4]
3035 passed_regression = self._CheckIfRunPassed(run_results[0],
3036 known_good_value,
3037 known_bad_value)
3039 next_revision_data['passed'] = passed_regression
3040 next_revision_data['value'] = run_results[0]
3042 if passed_regression:
3043 max_revision = next_revision_index
3044 else:
3045 min_revision = next_revision_index
3046 else:
3047 if run_results[1] == BUILD_RESULT_SKIPPED:
3048 next_revision_data['passed'] = 'Skipped'
3049 elif run_results[1] == BUILD_RESULT_FAIL:
3050 next_revision_data['passed'] = 'Build Failed'
3052 print run_results[0]
3054 # If the build is broken, remove it and redo search.
3055 revision_list.pop(next_revision_index)
3057 max_revision -= 1
3059 if self.opts.output_buildbot_annotations:
3060 self._PrintPartialResults(results)
3061 bisect_utils.OutputAnnotationStepClosed()
3062 else:
3063 # Weren't able to sync and retrieve the revision range.
3064 results['error'] = 'An error occurred attempting to retrieve revision '\
3065 'range: [%s..%s]' % (good_revision, bad_revision)
3067 return results
3069 def _PrintPartialResults(self, results_dict):
3070 revision_data = results_dict['revision_data']
3071 revision_data_sorted = sorted(revision_data.iteritems(),
3072 key = lambda x: x[1]['sort'])
3073 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3075 self._PrintTestedCommitsTable(revision_data_sorted,
3076 results_dict['first_working_revision'],
3077 results_dict['last_broken_revision'],
3078 100, final_step=False)
3080 def _PrintConfidence(self, results_dict):
3081 # The perf dashboard specifically looks for the string
3082 # "Confidence in Bisection Results: 100%" to decide whether or not
3083 # to cc the author(s). If you change this, please update the perf
3084 # dashboard as well.
3085 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
3087 def _PrintBanner(self, results_dict):
3088 print
3089 print " __o_\___ Aw Snap! We hit a speed bump!"
3090 print "=-O----O-'__.~.___________________________________"
3091 print
3092 if self._IsBisectModeReturnCode():
3093 print ('Bisect reproduced a change in return codes while running the '
3094 'performance test.')
3095 else:
3096 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3097 '%s metric.' % (results_dict['regression_size'],
3098 results_dict['regression_std_err'], '/'.join(self.opts.metric)))
3099 self._PrintConfidence(results_dict)
3101 def _PrintFailedBanner(self, results_dict):
3102 print
3103 if self._IsBisectModeReturnCode():
3104 print 'Bisect could not reproduce a change in the return code.'
3105 else:
3106 print ('Bisect could not reproduce a change in the '
3107 '%s metric.' % '/'.join(self.opts.metric))
3108 print
3110 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
3111 info = self.source_control.QueryRevisionInfo(cl,
3112 self._GetDepotDirectory(depot))
3113 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
3114 try:
3115 # Format is "git-svn-id: svn://....@123456 <other data>"
3116 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
3117 svn_revision = svn_line[0].split('@')
3118 svn_revision = svn_revision[1].split(' ')[0]
3119 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
3120 except IndexError:
3121 return ''
3122 return ''
3124 def _PrintRevisionInfo(self, cl, info, depot=None):
3125 # The perf dashboard specifically looks for the string
3126 # "Author : " to parse out who to cc on a bug. If you change the
3127 # formatting here, please update the perf dashboard as well.
3128 print
3129 print 'Subject : %s' % info['subject']
3130 print 'Author : %s' % info['author']
3131 if not info['email'].startswith(info['author']):
3132 print 'Email : %s' % info['email']
3133 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
3134 if commit_link:
3135 print 'Link : %s' % commit_link
3136 else:
3137 print
3138 print 'Failed to parse svn revision from body:'
3139 print
3140 print info['body']
3141 print
3142 print 'Commit : %s' % cl
3143 print 'Date : %s' % info['date']
3145 def _PrintTableRow(self, column_widths, row_data):
3146 assert len(column_widths) == len(row_data)
3148 text = ''
3149 for i in xrange(len(column_widths)):
3150 current_row_data = row_data[i].center(column_widths[i], ' ')
3151 text += ('%%%ds' % column_widths[i]) % current_row_data
3152 print text
3154 def _PrintTestedCommitsHeader(self):
3155 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3156 self._PrintTableRow(
3157 [20, 70, 14, 12, 13],
3158 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3159 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3160 self._PrintTableRow(
3161 [20, 70, 14, 12, 13],
3162 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3163 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3164 self._PrintTableRow(
3165 [20, 70, 14, 13],
3166 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3167 else:
3168 assert False, "Invalid bisect_mode specified."
3169 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3170 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3171 'State'.center(13, ' '))
3173 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
3174 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3175 std_error = '+-%.02f' % current_data['value']['std_err']
3176 mean = '%.02f' % current_data['value']['mean']
3177 self._PrintTableRow(
3178 [20, 70, 12, 14, 13],
3179 [current_data['depot'], cl_link, mean, std_error, state_str])
3180 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3181 std_error = '+-%.02f' % current_data['value']['std_err']
3182 mean = '%.02f' % current_data['value']['mean']
3183 self._PrintTableRow(
3184 [20, 70, 12, 14, 13],
3185 [current_data['depot'], cl_link, std_error, mean, state_str])
3186 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3187 mean = '%d' % current_data['value']['mean']
3188 self._PrintTableRow(
3189 [20, 70, 14, 13],
3190 [current_data['depot'], cl_link, mean, state_str])
3192 def _PrintTestedCommitsTable(self, revision_data_sorted,
3193 first_working_revision, last_broken_revision, confidence,
3194 final_step=True):
3195 print
3196 if final_step:
3197 print 'Tested commits:'
3198 else:
3199 print 'Partial results:'
3200 self._PrintTestedCommitsHeader()
3201 state = 0
3202 for current_id, current_data in revision_data_sorted:
3203 if current_data['value']:
3204 if (current_id == last_broken_revision or
3205 current_id == first_working_revision):
3206 # If confidence is too low, don't add this empty line since it's
3207 # used to put focus on a suspected CL.
3208 if confidence and final_step:
3209 print
3210 state += 1
3211 if state == 2 and not final_step:
3212 # Just want a separation between "bad" and "good" cl's.
3213 print
3215 state_str = 'Bad'
3216 if state == 1 and final_step:
3217 state_str = 'Suspected CL'
3218 elif state == 2:
3219 state_str = 'Good'
3221 # If confidence is too low, don't bother outputting good/bad.
3222 if not confidence:
3223 state_str = ''
3224 state_str = state_str.center(13, ' ')
3226 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3227 current_data['depot'])
3228 if not cl_link:
3229 cl_link = current_id
3230 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3232 def _PrintReproSteps(self):
3233 print
3234 print 'To reproduce locally:'
3235 print '$ ' + self.opts.command
3236 if bisect_utils.IsTelemetryCommand(self.opts.command):
3237 print
3238 print 'Also consider passing --profiler=list to see available profilers.'
3240 def _PrintOtherRegressions(self, other_regressions, revision_data):
3241 print
3242 print 'Other regressions may have occurred:'
3243 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3244 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3245 for regression in other_regressions:
3246 current_id, previous_id, confidence = regression
3247 current_data = revision_data[current_id]
3248 previous_data = revision_data[previous_id]
3250 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3251 current_data['depot'])
3252 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3253 previous_data['depot'])
3255 # If we can't map it to a viewable URL, at least show the original hash.
3256 if not current_link:
3257 current_link = current_id
3258 if not previous_link:
3259 previous_link = previous_id
3261 print ' %8s %70s %s' % (
3262 current_data['depot'], current_link,
3263 ('%d%%' % confidence).center(10, ' '))
3264 print ' %8s %70s' % (
3265 previous_data['depot'], previous_link)
3266 print
3268 def _PrintStepTime(self, revision_data_sorted):
3269 step_perf_time_avg = 0.0
3270 step_build_time_avg = 0.0
3271 step_count = 0.0
3272 for _, current_data in revision_data_sorted:
3273 if current_data['value']:
3274 step_perf_time_avg += current_data['perf_time']
3275 step_build_time_avg += current_data['build_time']
3276 step_count += 1
3277 if step_count:
3278 step_perf_time_avg = step_perf_time_avg / step_count
3279 step_build_time_avg = step_build_time_avg / step_count
3280 print
3281 print 'Average build time : %s' % datetime.timedelta(
3282 seconds=int(step_build_time_avg))
3283 print 'Average test time : %s' % datetime.timedelta(
3284 seconds=int(step_perf_time_avg))
3286 def _PrintWarnings(self):
3287 if not self.warnings:
3288 return
3289 print
3290 print 'WARNINGS:'
3291 for w in set(self.warnings):
3292 print ' !!! %s' % w
3294 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3295 other_regressions = []
3296 previous_values = []
3297 previous_id = None
3298 for current_id, current_data in revision_data_sorted:
3299 current_values = current_data['value']
3300 if current_values:
3301 current_values = current_values['values']
3302 if previous_values:
3303 confidence = CalculateConfidence(previous_values, [current_values])
3304 mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3305 mean_of_current_runs = CalculateMean(current_values)
3307 # Check that the potential regression is in the same direction as
3308 # the overall regression. If the mean of the previous runs < the
3309 # mean of the current runs, this local regression is in same
3310 # direction.
3311 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3312 is_same_direction = (prev_less_than_current if
3313 bad_greater_than_good else not prev_less_than_current)
3315 # Only report potential regressions with high confidence.
3316 if is_same_direction and confidence > 50:
3317 other_regressions.append([current_id, previous_id, confidence])
3318 previous_values.append(current_values)
3319 previous_id = current_id
3320 return other_regressions
3323 def _GetResultsDict(self, revision_data, revision_data_sorted):
3324 # Find range where it possibly broke.
3325 first_working_revision = None
3326 first_working_revision_index = -1
3327 last_broken_revision = None
3328 last_broken_revision_index = -1
3330 for i in xrange(len(revision_data_sorted)):
3331 k, v = revision_data_sorted[i]
3332 if v['passed'] == 1:
3333 if not first_working_revision:
3334 first_working_revision = k
3335 first_working_revision_index = i
3337 if not v['passed']:
3338 last_broken_revision = k
3339 last_broken_revision_index = i
3341 if last_broken_revision != None and first_working_revision != None:
3342 broken_means = []
3343 for i in xrange(0, last_broken_revision_index + 1):
3344 if revision_data_sorted[i][1]['value']:
3345 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3347 working_means = []
3348 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3349 if revision_data_sorted[i][1]['value']:
3350 working_means.append(revision_data_sorted[i][1]['value']['values'])
3352 # Flatten the lists to calculate mean of all values.
3353 working_mean = sum(working_means, [])
3354 broken_mean = sum(broken_means, [])
3356 # Calculate the approximate size of the regression
3357 mean_of_bad_runs = CalculateMean(broken_mean)
3358 mean_of_good_runs = CalculateMean(working_mean)
3360 regression_size = 100 * CalculateRelativeChange(mean_of_good_runs,
3361 mean_of_bad_runs)
3362 if math.isnan(regression_size):
3363 regression_size = 'zero-to-nonzero'
3365 regression_std_err = math.fabs(CalculatePooledStandardError(
3366 [working_mean, broken_mean]) /
3367 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3369 # Give a "confidence" in the bisect. At the moment we use how distinct the
3370 # values are before and after the last broken revision, and how noisy the
3371 # overall graph is.
3372 confidence = CalculateConfidence(working_means, broken_means)
3374 culprit_revisions = []
3376 cwd = os.getcwd()
3377 self.ChangeToDepotWorkingDirectory(
3378 revision_data[last_broken_revision]['depot'])
3380 if revision_data[last_broken_revision]['depot'] == 'cros':
3381 # Want to get a list of all the commits and what depots they belong
3382 # to so that we can grab info about each.
3383 cmd = ['repo', 'forall', '-c',
3384 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3385 last_broken_revision, first_working_revision + 1)]
3386 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3388 changes = []
3389 assert not return_code, 'An error occurred while running'\
3390 ' "%s"' % ' '.join(cmd)
3391 last_depot = None
3392 cwd = os.getcwd()
3393 for l in output.split('\n'):
3394 if l:
3395 # Output will be in form:
3396 # /path_to_depot
3397 # /path_to_other_depot
3398 # <SHA1>
3399 # /path_again
3400 # <SHA1>
3401 # etc.
3402 if l[0] == '/':
3403 last_depot = l
3404 else:
3405 contents = l.split(' ')
3406 if len(contents) > 1:
3407 changes.append([last_depot, contents[0]])
3408 for c in changes:
3409 os.chdir(c[0])
3410 info = self.source_control.QueryRevisionInfo(c[1])
3411 culprit_revisions.append((c[1], info, None))
3412 else:
3413 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3414 k, v = revision_data_sorted[i]
3415 if k == first_working_revision:
3416 break
3417 self.ChangeToDepotWorkingDirectory(v['depot'])
3418 info = self.source_control.QueryRevisionInfo(k)
3419 culprit_revisions.append((k, info, v['depot']))
3420 os.chdir(cwd)
3422 # Check for any other possible regression ranges
3423 other_regressions = self._FindOtherRegressions(revision_data_sorted,
3424 mean_of_bad_runs > mean_of_good_runs)
3426 # Check for warnings:
3427 if len(culprit_revisions) > 1:
3428 self.warnings.append('Due to build errors, regression range could '
3429 'not be narrowed down to a single commit.')
3430 if self.opts.repeat_test_count == 1:
3431 self.warnings.append('Tests were only set to run once. This may '
3432 'be insufficient to get meaningful results.')
3433 if confidence < 100:
3434 if confidence:
3435 self.warnings.append(
3436 'Confidence is less than 100%. There could be other candidates for '
3437 'this regression. Try bisecting again with increased repeat_count '
3438 'or on a sub-metric that shows the regression more clearly.')
3439 else:
3440 self.warnings.append(
3441 'Confidence is 0%. Try bisecting again on another platform, with '
3442 'increased repeat_count or on a sub-metric that shows the regression '
3443 'more clearly.')
3445 return {
3446 'first_working_revision': first_working_revision,
3447 'last_broken_revision': last_broken_revision,
3448 'culprit_revisions': culprit_revisions,
3449 'other_regressions': other_regressions,
3450 'regression_size': regression_size,
3451 'regression_std_err': regression_std_err,
3452 'confidence': confidence,
3455 def FormatAndPrintResults(self, bisect_results):
3456 """Prints the results from a bisection run in a readable format.
3458 Args
3459 bisect_results: The results from a bisection test run.
3461 revision_data = bisect_results['revision_data']
3462 revision_data_sorted = sorted(revision_data.iteritems(),
3463 key = lambda x: x[1]['sort'])
3464 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3466 if self.opts.output_buildbot_annotations:
3467 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3469 print
3470 print 'Full results of bisection:'
3471 for current_id, current_data in revision_data_sorted:
3472 build_status = current_data['passed']
3474 if type(build_status) is bool:
3475 if build_status:
3476 build_status = 'Good'
3477 else:
3478 build_status = 'Bad'
3480 print ' %20s %40s %s' % (current_data['depot'],
3481 current_id, build_status)
3482 print
3484 if self.opts.output_buildbot_annotations:
3485 bisect_utils.OutputAnnotationStepClosed()
3486 # The perf dashboard scrapes the "results" step in order to comment on
3487 # bugs. If you change this, please update the perf dashboard as well.
3488 bisect_utils.OutputAnnotationStepStart('Results')
3490 if results_dict['culprit_revisions'] and results_dict['confidence']:
3491 self._PrintBanner(results_dict)
3492 for culprit in results_dict['culprit_revisions']:
3493 cl, info, depot = culprit
3494 self._PrintRevisionInfo(cl, info, depot)
3495 self._PrintReproSteps()
3496 if results_dict['other_regressions']:
3497 self._PrintOtherRegressions(results_dict['other_regressions'],
3498 revision_data)
3499 else:
3500 self._PrintFailedBanner(results_dict)
3501 self._PrintReproSteps()
3503 self._PrintTestedCommitsTable(revision_data_sorted,
3504 results_dict['first_working_revision'],
3505 results_dict['last_broken_revision'],
3506 results_dict['confidence'])
3507 self._PrintStepTime(revision_data_sorted)
3508 self._PrintWarnings()
3510 if self.opts.output_buildbot_annotations:
3511 bisect_utils.OutputAnnotationStepClosed()
3514 def DetermineAndCreateSourceControl(opts):
3515 """Attempts to determine the underlying source control workflow and returns
3516 a SourceControl object.
3518 Returns:
3519 An instance of a SourceControl object, or None if the current workflow
3520 is unsupported.
3523 (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3525 if output.strip() == 'true':
3526 return GitSourceControl(opts)
3528 return None
3531 def IsPlatformSupported(opts):
3532 """Checks that this platform and build system are supported.
3534 Args:
3535 opts: The options parsed from the command line.
3537 Returns:
3538 True if the platform and build system are supported.
3540 # Haven't tested the script out on any other platforms yet.
3541 supported = ['posix', 'nt']
3542 return os.name in supported
3545 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3546 """Removes the directory tree specified, and then creates an empty
3547 directory in the same location (if not specified to skip).
3549 Args:
3550 path_to_dir: Path to the directory tree.
3551 skip_makedir: Whether to skip creating empty directory, default is False.
3553 Returns:
3554 True if successful, False if an error occurred.
3556 try:
3557 if os.path.exists(path_to_dir):
3558 shutil.rmtree(path_to_dir)
3559 except OSError, e:
3560 if e.errno != errno.ENOENT:
3561 return False
3563 if not skip_makedir:
3564 return MaybeMakeDirectory(path_to_dir)
3566 return True
3569 def RemoveBuildFiles(build_type):
3570 """Removes build files from previous runs."""
3571 if RmTreeAndMkDir(os.path.join('out', build_type)):
3572 if RmTreeAndMkDir(os.path.join('build', build_type)):
3573 return True
3574 return False
3577 class BisectOptions(object):
3578 """Options to be used when running bisection."""
3579 def __init__(self):
3580 super(BisectOptions, self).__init__()
3582 self.target_platform = 'chromium'
3583 self.build_preference = None
3584 self.good_revision = None
3585 self.bad_revision = None
3586 self.use_goma = None
3587 self.cros_board = None
3588 self.cros_remote_ip = None
3589 self.repeat_test_count = 20
3590 self.truncate_percent = 25
3591 self.max_time_minutes = 20
3592 self.metric = None
3593 self.command = None
3594 self.output_buildbot_annotations = None
3595 self.no_custom_deps = False
3596 self.working_directory = None
3597 self.extra_src = None
3598 self.debug_ignore_build = None
3599 self.debug_ignore_sync = None
3600 self.debug_ignore_perf_test = None
3601 self.gs_bucket = None
3602 self.target_arch = 'ia32'
3603 self.target_build_type = 'Release'
3604 self.builder_host = None
3605 self.builder_port = None
3606 self.bisect_mode = BISECT_MODE_MEAN
3608 def _CreateCommandLineParser(self):
3609 """Creates a parser with bisect options.
3611 Returns:
3612 An instance of optparse.OptionParser.
3614 usage = ('%prog [options] [-- chromium-options]\n'
3615 'Perform binary search on revision history to find a minimal '
3616 'range of revisions where a peformance metric regressed.\n')
3618 parser = optparse.OptionParser(usage=usage)
3620 group = optparse.OptionGroup(parser, 'Bisect options')
3621 group.add_option('-c', '--command',
3622 type='str',
3623 help='A command to execute your performance test at' +
3624 ' each point in the bisection.')
3625 group.add_option('-b', '--bad_revision',
3626 type='str',
3627 help='A bad revision to start bisection. ' +
3628 'Must be later than good revision. May be either a git' +
3629 ' or svn revision.')
3630 group.add_option('-g', '--good_revision',
3631 type='str',
3632 help='A revision to start bisection where performance' +
3633 ' test is known to pass. Must be earlier than the ' +
3634 'bad revision. May be either a git or svn revision.')
3635 group.add_option('-m', '--metric',
3636 type='str',
3637 help='The desired metric to bisect on. For example ' +
3638 '"vm_rss_final_b/vm_rss_f_b"')
3639 group.add_option('-r', '--repeat_test_count',
3640 type='int',
3641 default=20,
3642 help='The number of times to repeat the performance '
3643 'test. Values will be clamped to range [1, 100]. '
3644 'Default value is 20.')
3645 group.add_option('--max_time_minutes',
3646 type='int',
3647 default=20,
3648 help='The maximum time (in minutes) to take running the '
3649 'performance tests. The script will run the performance '
3650 'tests according to --repeat_test_count, so long as it '
3651 'doesn\'t exceed --max_time_minutes. Values will be '
3652 'clamped to range [1, 60].'
3653 'Default value is 20.')
3654 group.add_option('-t', '--truncate_percent',
3655 type='int',
3656 default=25,
3657 help='The highest/lowest % are discarded to form a '
3658 'truncated mean. Values will be clamped to range [0, '
3659 '25]. Default value is 25 (highest/lowest 25% will be '
3660 'discarded).')
3661 group.add_option('--bisect_mode',
3662 type='choice',
3663 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3664 BISECT_MODE_RETURN_CODE],
3665 default=BISECT_MODE_MEAN,
3666 help='The bisect mode. Choices are to bisect on the '
3667 'difference in mean, std_dev, or return_code.')
3668 parser.add_option_group(group)
3670 group = optparse.OptionGroup(parser, 'Build options')
3671 group.add_option('-w', '--working_directory',
3672 type='str',
3673 help='Path to the working directory where the script '
3674 'will do an initial checkout of the chromium depot. The '
3675 'files will be placed in a subdirectory "bisect" under '
3676 'working_directory and that will be used to perform the '
3677 'bisection. This parameter is optional, if it is not '
3678 'supplied, the script will work from the current depot.')
3679 group.add_option('--build_preference',
3680 type='choice',
3681 choices=['msvs', 'ninja', 'make'],
3682 help='The preferred build system to use. On linux/mac '
3683 'the options are make/ninja. On Windows, the options '
3684 'are msvs/ninja.')
3685 group.add_option('--target_platform',
3686 type='choice',
3687 choices=['chromium', 'cros', 'android', 'android-chrome'],
3688 default='chromium',
3689 help='The target platform. Choices are "chromium" '
3690 '(current platform), "cros", or "android". If you '
3691 'specify something other than "chromium", you must be '
3692 'properly set up to build that platform.')
3693 group.add_option('--no_custom_deps',
3694 dest='no_custom_deps',
3695 action="store_true",
3696 default=False,
3697 help='Run the script with custom_deps or not.')
3698 group.add_option('--extra_src',
3699 type='str',
3700 help='Path to a script which can be used to modify '
3701 'the bisect script\'s behavior.')
3702 group.add_option('--cros_board',
3703 type='str',
3704 help='The cros board type to build.')
3705 group.add_option('--cros_remote_ip',
3706 type='str',
3707 help='The remote machine to image to.')
3708 group.add_option('--use_goma',
3709 action="store_true",
3710 help='Add a bunch of extra threads for goma.')
3711 group.add_option('--output_buildbot_annotations',
3712 action="store_true",
3713 help='Add extra annotation output for buildbot.')
3714 group.add_option('--gs_bucket',
3715 default='',
3716 dest='gs_bucket',
3717 type='str',
3718 help=('Name of Google Storage bucket to upload or '
3719 'download build. e.g., chrome-perf'))
3720 group.add_option('--target_arch',
3721 type='choice',
3722 choices=['ia32', 'x64', 'arm'],
3723 default='ia32',
3724 dest='target_arch',
3725 help=('The target build architecture. Choices are "ia32" '
3726 '(default), "x64" or "arm".'))
3727 group.add_option('--target_build_type',
3728 type='choice',
3729 choices=['Release', 'Debug'],
3730 default='Release',
3731 help='The target build type. Choices are "Release" '
3732 '(default), or "Debug".')
3733 group.add_option('--builder_host',
3734 dest='builder_host',
3735 type='str',
3736 help=('Host address of server to produce build by posting'
3737 ' try job request.'))
3738 group.add_option('--builder_port',
3739 dest='builder_port',
3740 type='int',
3741 help=('HTTP port of the server to produce build by posting'
3742 ' try job request.'))
3743 parser.add_option_group(group)
3745 group = optparse.OptionGroup(parser, 'Debug options')
3746 group.add_option('--debug_ignore_build',
3747 action="store_true",
3748 help='DEBUG: Don\'t perform builds.')
3749 group.add_option('--debug_ignore_sync',
3750 action="store_true",
3751 help='DEBUG: Don\'t perform syncs.')
3752 group.add_option('--debug_ignore_perf_test',
3753 action="store_true",
3754 help='DEBUG: Don\'t perform performance tests.')
3755 parser.add_option_group(group)
3756 return parser
3758 def ParseCommandLine(self):
3759 """Parses the command line for bisect options."""
3760 parser = self._CreateCommandLineParser()
3761 (opts, _) = parser.parse_args()
3763 try:
3764 if not opts.command:
3765 raise RuntimeError('missing required parameter: --command')
3767 if not opts.good_revision:
3768 raise RuntimeError('missing required parameter: --good_revision')
3770 if not opts.bad_revision:
3771 raise RuntimeError('missing required parameter: --bad_revision')
3773 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3774 raise RuntimeError('missing required parameter: --metric')
3776 if opts.gs_bucket:
3777 if not cloud_storage.List(opts.gs_bucket):
3778 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3779 if not opts.builder_host:
3780 raise RuntimeError('Must specify try server hostname, when '
3781 'gs_bucket is used: --builder_host')
3782 if not opts.builder_port:
3783 raise RuntimeError('Must specify try server port number, when '
3784 'gs_bucket is used: --builder_port')
3785 if opts.target_platform == 'cros':
3786 # Run sudo up front to make sure credentials are cached for later.
3787 print 'Sudo is required to build cros:'
3788 print
3789 RunProcess(['sudo', 'true'])
3791 if not opts.cros_board:
3792 raise RuntimeError('missing required parameter: --cros_board')
3794 if not opts.cros_remote_ip:
3795 raise RuntimeError('missing required parameter: --cros_remote_ip')
3797 if not opts.working_directory:
3798 raise RuntimeError('missing required parameter: --working_directory')
3800 metric_values = opts.metric.split('/')
3801 if (len(metric_values) != 2 and
3802 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3803 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3805 opts.metric = metric_values
3806 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3807 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3808 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3809 opts.truncate_percent = opts.truncate_percent / 100.0
3811 for k, v in opts.__dict__.iteritems():
3812 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3813 setattr(self, k, v)
3814 except RuntimeError, e:
3815 output_string = StringIO.StringIO()
3816 parser.print_help(file=output_string)
3817 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3818 output_string.close()
3819 raise RuntimeError(error_message)
3821 @staticmethod
3822 def FromDict(values):
3823 """Creates an instance of BisectOptions with the values parsed from a
3824 .cfg file.
3826 Args:
3827 values: a dict containing options to set.
3829 Returns:
3830 An instance of BisectOptions.
3832 opts = BisectOptions()
3833 for k, v in values.iteritems():
3834 assert hasattr(opts, k), 'Invalid %s attribute in '\
3835 'BisectOptions.' % k
3836 setattr(opts, k, v)
3838 metric_values = opts.metric.split('/')
3839 if len(metric_values) != 2:
3840 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3842 opts.metric = metric_values
3843 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3844 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3845 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3846 opts.truncate_percent = opts.truncate_percent / 100.0
3848 return opts
3851 def main():
3853 try:
3854 opts = BisectOptions()
3855 opts.ParseCommandLine()
3857 if opts.extra_src:
3858 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3859 if not extra_src:
3860 raise RuntimeError("Invalid or missing --extra_src.")
3861 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3863 if opts.working_directory:
3864 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3865 if opts.no_custom_deps:
3866 custom_deps = None
3867 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3869 os.chdir(os.path.join(os.getcwd(), 'src'))
3871 if not RemoveBuildFiles(opts.target_build_type):
3872 raise RuntimeError('Something went wrong removing the build files.')
3874 if not IsPlatformSupported(opts):
3875 raise RuntimeError("Sorry, this platform isn't supported yet.")
3877 # Check what source control method they're using. Only support git workflow
3878 # at the moment.
3879 source_control = DetermineAndCreateSourceControl(opts)
3881 if not source_control:
3882 raise RuntimeError("Sorry, only the git workflow is supported at the "
3883 "moment.")
3885 # gClient sync seems to fail if you're not in master branch.
3886 if (not source_control.IsInProperBranch() and
3887 not opts.debug_ignore_sync and
3888 not opts.working_directory):
3889 raise RuntimeError("You must switch to master branch to run bisection.")
3890 bisect_test = BisectPerformanceMetrics(source_control, opts)
3891 try:
3892 bisect_results = bisect_test.Run(opts.command,
3893 opts.bad_revision,
3894 opts.good_revision,
3895 opts.metric)
3896 if bisect_results['error']:
3897 raise RuntimeError(bisect_results['error'])
3898 bisect_test.FormatAndPrintResults(bisect_results)
3899 return 0
3900 finally:
3901 bisect_test.PerformCleanup()
3902 except RuntimeError, e:
3903 if opts.output_buildbot_annotations:
3904 # The perf dashboard scrapes the "results" step in order to comment on
3905 # bugs. If you change this, please update the perf dashboard as well.
3906 bisect_utils.OutputAnnotationStepStart('Results')
3907 print 'Error: %s' % e.message
3908 if opts.output_buildbot_annotations:
3909 bisect_utils.OutputAnnotationStepClosed()
3910 return 1
3912 if __name__ == '__main__':
3913 sys.exit(main())