Refactor bisect-perf-regression_test.py and add smoke test.
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blobbbdb6422e27290d585ab35b6d155c01cafffcf44
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
35 """
37 import copy
38 import datetime
39 import errno
40 import hashlib
41 import math
42 import optparse
43 import os
44 import re
45 import shlex
46 import shutil
47 import StringIO
48 import sys
49 import time
50 import zipfile
52 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
54 from auto_bisect import bisect_utils
55 from auto_bisect import math_utils
56 from auto_bisect import post_perf_builder_job as bisect_builder
57 from auto_bisect import source_control as source_control_module
58 from auto_bisect import ttest
59 from telemetry.util import cloud_storage
61 # Below is the map of "depot" names to information about each depot. Each depot
62 # is a repository, and in the process of bisecting, revision ranges in these
63 # repositories may also be bisected.
65 # Each depot information dictionary may contain:
66 # src: Path to the working directory.
67 # recurse: True if this repository will get bisected.
68 # depends: A list of other repositories that are actually part of the same
69 # repository in svn. If the repository has any dependent repositories
70 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then
71 # they are specified here.
72 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to
73 # SVN revisions.
74 # from: Parent depot that must be bisected before this is bisected.
75 # deps_var: Key name in vars variable in DEPS file that has revision
76 # information.
77 DEPOT_DEPS_NAME = {
78 'chromium': {
79 'src': 'src',
80 'recurse': True,
81 'depends': None,
82 'from': ['cros', 'android-chrome'],
83 'viewvc':
84 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
85 'deps_var': 'chromium_rev'
87 'webkit': {
88 'src': 'src/third_party/WebKit',
89 'recurse': True,
90 'depends': None,
91 'from': ['chromium'],
92 'viewvc':
93 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
94 'deps_var': 'webkit_revision'
96 'angle': {
97 'src': 'src/third_party/angle',
98 'src_old': 'src/third_party/angle_dx11',
99 'recurse': True,
100 'depends': None,
101 'from': ['chromium'],
102 'platform': 'nt',
103 'deps_var': 'angle_revision'
105 'v8': {
106 'src': 'src/v8',
107 'recurse': True,
108 'depends': None,
109 'from': ['chromium'],
110 'custom_deps': bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
111 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
112 'deps_var': 'v8_revision'
114 'v8_bleeding_edge': {
115 'src': 'src/v8_bleeding_edge',
116 'recurse': True,
117 'depends': None,
118 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
119 'from': ['v8'],
120 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
121 'deps_var': 'v8_revision'
123 'skia/src': {
124 'src': 'src/third_party/skia/src',
125 'recurse': True,
126 'svn': 'http://skia.googlecode.com/svn/trunk/src',
127 'depends': ['skia/include', 'skia/gyp'],
128 'from': ['chromium'],
129 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
130 'deps_var': 'skia_revision'
132 'skia/include': {
133 'src': 'src/third_party/skia/include',
134 'recurse': False,
135 'svn': 'http://skia.googlecode.com/svn/trunk/include',
136 'depends': None,
137 'from': ['chromium'],
138 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
139 'deps_var': 'None'
141 'skia/gyp': {
142 'src': 'src/third_party/skia/gyp',
143 'recurse': False,
144 'svn': 'http://skia.googlecode.com/svn/trunk/gyp',
145 'depends': None,
146 'from': ['chromium'],
147 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
148 'deps_var': 'None'
152 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
154 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
155 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
156 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
157 'testing_rsa')
158 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
159 'mod_for_test_scripts', 'ssh_keys',
160 'testing_rsa')
162 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision.
163 BUILD_RESULT_SUCCEED = 0
164 BUILD_RESULT_FAIL = 1
165 BUILD_RESULT_SKIPPED = 2
167 # Maximum time in seconds to wait after posting build request to tryserver.
168 # TODO: Change these values based on the actual time taken by buildbots on
169 # the tryserver.
170 MAX_MAC_BUILD_TIME = 14400
171 MAX_WIN_BUILD_TIME = 14400
172 MAX_LINUX_BUILD_TIME = 14400
174 # Patch template to add a new file, DEPS.sha under src folder.
175 # This file contains SHA1 value of the DEPS changes made while bisecting
176 # dependency repositories. This patch send along with DEPS patch to tryserver.
177 # When a build requested is posted with a patch, bisect builders on tryserver,
178 # once build is produced, it reads SHA value from this file and appends it
179 # to build archive filename.
180 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
181 new file mode 100644
182 --- /dev/null
183 +++ src/DEPS.sha
184 @@ -0,0 +1 @@
185 +%(deps_sha)s
188 # The possible values of the --bisect_mode flag, which determines what to
189 # use when classifying a revision as "good" or "bad".
190 BISECT_MODE_MEAN = 'mean'
191 BISECT_MODE_STD_DEV = 'std_dev'
192 BISECT_MODE_RETURN_CODE = 'return_code'
194 # The perf dashboard specifically looks for the string
195 # "Estimated Confidence: 95%" to decide whether or not to cc the author(s).
196 # If you change this, please update the perf dashboard as well.
197 RESULTS_BANNER = """
198 ===== BISECT JOB RESULTS =====
199 Status: %(status)s
201 Test Command: %(command)s
202 Test Metric: %(metrics)s
203 Relative Change: %(change)s
204 Estimated Confidence: %(confidence)d%%"""
206 # The perf dashboard specifically looks for the string
207 # "Author : " to parse out who to cc on a bug. If you change the
208 # formatting here, please update the perf dashboard as well.
209 RESULTS_REVISION_INFO = """
210 ===== SUSPECTED CL(s) =====
211 Subject : %(subject)s
212 Author : %(author)s%(email_info)s%(commit_info)s
213 Commit : %(cl)s
214 Date : %(cl_date)s"""
216 REPRO_STEPS_LOCAL = """
217 ==== INSTRUCTIONS TO REPRODUCE ====
218 To run locally:
219 $%(command)s"""
221 REPRO_STEPS_TRYJOB = """
222 To reproduce on Performance trybot:
223 1. Create new git branch or check out existing branch.
224 2. Edit tools/run-perf-test.cfg (instructions in file) or \
225 third_party/WebKit/Tools/run-perf-test.cfg.
226 a) Take care to strip any src/ directories from the head of \
227 relative path names.
228 b) On desktop, only --browser=release is supported, on android \
229 --browser=android-chromium-testshell.
230 c) Test command to use: %(command)s
231 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
232 committed locally to run-perf-test.cfg.
233 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
234 $ git cl upload --bypass-hooks
235 4. Send your try job to the tryserver. \
236 [Please make sure to use appropriate bot to reproduce]
237 $ git cl try -m tryserver.chromium.perf -b <bot>
239 For more details please visit
240 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots"""
242 RESULTS_THANKYOU = """
243 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
244 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
245 on perf regressions.
246 Contact chrome-perf-dashboard-team with any questions or suggestions about
247 bisecting.
248 . .------.
249 . .---. \ \==)
250 . |PERF\ \ \\
251 . | ---------'-------'-----------.
252 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
253 . \______________.-------._______________)
254 . / /
255 . / /
256 . / /==)
257 . ._______."""
260 def _AddAdditionalDepotInfo(depot_info):
261 """Adds additional depot info to the global depot variables."""
262 global DEPOT_DEPS_NAME
263 global DEPOT_NAMES
264 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items())
265 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
268 def ConfidenceScore(good_results_lists, bad_results_lists):
269 """Calculates a confidence score.
271 This score is a percentage which represents our degree of confidence in the
272 proposition that the good results and bad results are distinct groups, and
273 their differences aren't due to chance alone.
276 Args:
277 good_results_lists: A list of lists of "good" result numbers.
278 bad_results_lists: A list of lists of "bad" result numbers.
280 Returns:
281 A number in the range [0, 100].
283 if not good_results_lists or not bad_results_lists:
284 return 0.0
286 # Flatten the lists of results lists.
287 sample1 = sum(good_results_lists, [])
288 sample2 = sum(bad_results_lists, [])
289 if not sample1 or not sample2:
290 return 0.0
292 # The p-value is approximately the probability of obtaining the given set
293 # of good and bad values just by chance.
294 _, _, p_value = ttest.WelchsTTest(sample1, sample2)
295 return 100.0 * (1.0 - p_value)
298 def GetSHA1HexDigest(contents):
299 """Returns SHA1 hex digest of the given string."""
300 return hashlib.sha1(contents).hexdigest()
303 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
304 """Gets the archive file name for the given revision."""
305 def PlatformName():
306 """Return a string to be used in paths for the platform."""
307 if bisect_utils.IsWindowsHost():
308 # Build archive for x64 is still stored with 'win32'suffix
309 # (chromium_utils.PlatformName()).
310 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
311 return 'win32'
312 return 'win32'
313 if bisect_utils.IsLinuxHost():
314 # Android builds too are archived with full-build-linux* prefix.
315 return 'linux'
316 if bisect_utils.IsMacHost():
317 return 'mac'
318 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
320 base_name = 'full-build-%s' % PlatformName()
321 if not build_revision:
322 return base_name
323 if patch_sha:
324 build_revision = '%s_%s' % (build_revision , patch_sha)
325 return '%s_%s.zip' % (base_name, build_revision)
328 def GetRemoteBuildPath(build_revision, target_platform='chromium',
329 target_arch='ia32', patch_sha=None):
330 """Compute the url to download the build from."""
331 def GetGSRootFolderName(target_platform):
332 """Gets Google Cloud Storage root folder names"""
333 if bisect_utils.IsWindowsHost():
334 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
335 return 'Win x64 Builder'
336 return 'Win Builder'
337 if bisect_utils.IsLinuxHost():
338 if target_platform == 'android':
339 return 'android_perf_rel'
340 return 'Linux Builder'
341 if bisect_utils.IsMacHost():
342 return 'Mac Builder'
343 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
345 base_filename = GetZipFileName(
346 build_revision, target_arch, patch_sha)
347 builder_folder = GetGSRootFolderName(target_platform)
348 return '%s/%s' % (builder_folder, base_filename)
351 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
352 """Fetches file(s) from the Google Cloud Storage.
354 Args:
355 bucket_name: Google Storage bucket name.
356 source_path: Source file path.
357 destination_path: Destination file path.
359 Returns:
360 Downloaded file path if exisits, otherwise None.
362 target_file = os.path.join(destination_path, os.path.basename(source_path))
363 try:
364 if cloud_storage.Exists(bucket_name, source_path):
365 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
366 cloud_storage.Get(bucket_name, source_path, destination_path)
367 if os.path.exists(target_file):
368 return target_file
369 else:
370 print ('File gs://%s/%s not found in cloud storage.' % (
371 bucket_name, source_path))
372 except Exception as e:
373 print 'Something went wrong while fetching file from cloud: %s' % e
374 if os.path.exists(target_file):
375 os.remove(target_file)
376 return None
379 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
380 def MaybeMakeDirectory(*path):
381 """Creates an entire path, if it doesn't already exist."""
382 file_path = os.path.join(*path)
383 try:
384 os.makedirs(file_path)
385 except OSError as e:
386 if e.errno != errno.EEXIST:
387 return False
388 return True
391 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
392 def ExtractZip(filename, output_dir, verbose=True):
393 """ Extract the zip archive in the output directory."""
394 MaybeMakeDirectory(output_dir)
396 # On Linux and Mac, we use the unzip command as it will
397 # handle links and file bits (executable), which is much
398 # easier then trying to do that with ZipInfo options.
400 # The Mac Version of unzip unfortunately does not support Zip64, whereas
401 # the python module does, so we have to fallback to the python zip module
402 # on Mac if the filesize is greater than 4GB.
404 # On Windows, try to use 7z if it is installed, otherwise fall back to python
405 # zip module and pray we don't have files larger than 512MB to unzip.
406 unzip_cmd = None
407 if ((bisect_utils.IsMacHost()
408 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
409 or bisect_utils.IsLinuxHost()):
410 unzip_cmd = ['unzip', '-o']
411 elif (bisect_utils.IsWindowsHost()
412 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')):
413 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
415 if unzip_cmd:
416 # Make sure path is absolute before changing directories.
417 filepath = os.path.abspath(filename)
418 saved_dir = os.getcwd()
419 os.chdir(output_dir)
420 command = unzip_cmd + [filepath]
421 result = bisect_utils.RunProcess(command)
422 os.chdir(saved_dir)
423 if result:
424 raise IOError('unzip failed: %s => %s' % (str(command), result))
425 else:
426 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
427 zf = zipfile.ZipFile(filename)
428 for name in zf.namelist():
429 if verbose:
430 print 'Extracting %s' % name
431 zf.extract(name, output_dir)
432 if bisect_utils.IsMacHost():
433 # Restore permission bits.
434 os.chmod(os.path.join(output_dir, name),
435 zf.getinfo(name).external_attr >> 16L)
438 def SetBuildSystemDefault(build_system, use_goma, goma_dir):
439 """Sets up any environment variables needed to build with the specified build
440 system.
442 Args:
443 build_system: A string specifying build system. Currently only 'ninja' or
444 'make' are supported.
446 if build_system == 'ninja':
447 gyp_var = os.getenv('GYP_GENERATORS', default='')
449 if not gyp_var or not 'ninja' in gyp_var:
450 if gyp_var:
451 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
452 else:
453 os.environ['GYP_GENERATORS'] = 'ninja'
455 if bisect_utils.IsWindowsHost():
456 os.environ['GYP_DEFINES'] = ('component=shared_library '
457 'incremental_chrome_dll=1 '
458 'disable_nacl=1 fastbuild=1 '
459 'chromium_win_pch=0')
461 elif build_system == 'make':
462 os.environ['GYP_GENERATORS'] = 'make'
463 else:
464 raise RuntimeError('%s build not supported.' % build_system)
466 if use_goma:
467 os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', default=''),
468 'use_goma=1')
469 if goma_dir:
470 os.environ['GYP_DEFINES'] += ' gomadir=%s' % goma_dir
473 def BuildWithMake(threads, targets, build_type='Release'):
474 cmd = ['make', 'BUILDTYPE=%s' % build_type]
476 if threads:
477 cmd.append('-j%d' % threads)
479 cmd += targets
481 return_code = bisect_utils.RunProcess(cmd)
483 return not return_code
486 def BuildWithNinja(threads, targets, build_type='Release'):
487 cmd = ['ninja', '-C', os.path.join('out', build_type)]
489 if threads:
490 cmd.append('-j%d' % threads)
492 cmd += targets
494 return_code = bisect_utils.RunProcess(cmd)
496 return not return_code
499 def BuildWithVisualStudio(targets, build_type='Release'):
500 path_to_devenv = os.path.abspath(
501 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
502 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
503 cmd = [path_to_devenv, '/build', build_type, path_to_sln]
505 for t in targets:
506 cmd.extend(['/Project', t])
508 return_code = bisect_utils.RunProcess(cmd)
510 return not return_code
513 def WriteStringToFile(text, file_name):
514 try:
515 with open(file_name, 'wb') as f:
516 f.write(text)
517 except IOError:
518 raise RuntimeError('Error writing to file [%s]' % file_name )
521 def ReadStringFromFile(file_name):
522 try:
523 with open(file_name) as f:
524 return f.read()
525 except IOError:
526 raise RuntimeError('Error reading file [%s]' % file_name )
529 def ChangeBackslashToSlashInPatch(diff_text):
530 """Formats file paths in the given text to unix-style paths."""
531 if diff_text:
532 diff_lines = diff_text.split('\n')
533 for i in range(len(diff_lines)):
534 if (diff_lines[i].startswith('--- ') or
535 diff_lines[i].startswith('+++ ')):
536 diff_lines[i] = diff_lines[i].replace('\\', '/')
537 return '\n'.join(diff_lines)
538 return None
541 class Builder(object):
542 """Builder is used by the bisect script to build relevant targets and deploy.
544 def __init__(self, opts):
545 """Performs setup for building with target build system.
547 Args:
548 opts: Options parsed from command line.
550 if bisect_utils.IsWindowsHost():
551 if not opts.build_preference:
552 opts.build_preference = 'msvs'
554 if opts.build_preference == 'msvs':
555 if not os.getenv('VS100COMNTOOLS'):
556 raise RuntimeError(
557 'Path to visual studio could not be determined.')
558 else:
559 SetBuildSystemDefault(opts.build_preference, opts.use_goma,
560 opts.goma_dir)
561 else:
562 if not opts.build_preference:
563 if 'ninja' in os.getenv('GYP_GENERATORS', default=''):
564 opts.build_preference = 'ninja'
565 else:
566 opts.build_preference = 'make'
568 SetBuildSystemDefault(opts.build_preference, opts.use_goma, opts.goma_dir)
570 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
571 raise RuntimeError('Failed to set platform environment.')
573 @staticmethod
574 def FromOpts(opts):
575 builder = None
576 if opts.target_platform == 'cros':
577 builder = CrosBuilder(opts)
578 elif opts.target_platform == 'android':
579 builder = AndroidBuilder(opts)
580 elif opts.target_platform == 'android-chrome':
581 builder = AndroidChromeBuilder(opts)
582 else:
583 builder = DesktopBuilder(opts)
584 return builder
586 def Build(self, depot, opts):
587 raise NotImplementedError()
589 def GetBuildOutputDirectory(self, opts, src_dir=None):
590 """Returns the path to the build directory, relative to the checkout root.
592 Assumes that the current working directory is the checkout root.
594 src_dir = src_dir or 'src'
595 if opts.build_preference == 'ninja' or bisect_utils.IsLinuxHost():
596 return os.path.join(src_dir, 'out')
597 if bisect_utils.IsMacHost():
598 return os.path.join(src_dir, 'xcodebuild')
599 if bisect_utils.IsWindowsHost():
600 return os.path.join(src_dir, 'build')
601 raise NotImplementedError('Unexpected platform %s' % sys.platform)
604 class DesktopBuilder(Builder):
605 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
606 def __init__(self, opts):
607 super(DesktopBuilder, self).__init__(opts)
609 def Build(self, depot, opts):
610 """Builds chromium_builder_perf target using options passed into
611 the script.
613 Args:
614 depot: Current depot being bisected.
615 opts: The options parsed from the command line.
617 Returns:
618 True if build was successful.
620 targets = ['chromium_builder_perf']
622 threads = None
623 if opts.use_goma:
624 threads = 64
626 build_success = False
627 if opts.build_preference == 'make':
628 build_success = BuildWithMake(threads, targets, opts.target_build_type)
629 elif opts.build_preference == 'ninja':
630 build_success = BuildWithNinja(threads, targets, opts.target_build_type)
631 elif opts.build_preference == 'msvs':
632 assert bisect_utils.IsWindowsHost(), 'msvs is only supported on Windows.'
633 build_success = BuildWithVisualStudio(targets, opts.target_build_type)
634 else:
635 assert False, 'No build system defined.'
636 return build_success
639 class AndroidBuilder(Builder):
640 """AndroidBuilder is used to build on android."""
641 def __init__(self, opts):
642 super(AndroidBuilder, self).__init__(opts)
644 def _GetTargets(self):
645 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
647 def Build(self, depot, opts):
648 """Builds the android content shell and other necessary tools using options
649 passed into the script.
651 Args:
652 depot: Current depot being bisected.
653 opts: The options parsed from the command line.
655 Returns:
656 True if build was successful.
658 threads = None
659 if opts.use_goma:
660 threads = 64
662 build_success = False
663 if opts.build_preference == 'ninja':
664 build_success = BuildWithNinja(
665 threads, self._GetTargets(), opts.target_build_type)
666 else:
667 assert False, 'No build system defined.'
669 return build_success
672 class AndroidChromeBuilder(AndroidBuilder):
673 """AndroidBuilder is used to build on android's chrome."""
674 def __init__(self, opts):
675 super(AndroidChromeBuilder, self).__init__(opts)
677 def _GetTargets(self):
678 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
681 class CrosBuilder(Builder):
682 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
683 target platform."""
684 def __init__(self, opts):
685 super(CrosBuilder, self).__init__(opts)
687 def ImageToTarget(self, opts):
688 """Installs latest image to target specified by opts.cros_remote_ip.
690 Args:
691 opts: Program options containing cros_board and cros_remote_ip.
693 Returns:
694 True if successful.
696 try:
697 # Keys will most likely be set to 0640 after wiping the chroot.
698 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
699 os.chmod(CROS_TEST_KEY_PATH, 0600)
700 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
701 '--remote=%s' % opts.cros_remote_ip,
702 '--board=%s' % opts.cros_board, '--test', '--verbose']
704 return_code = bisect_utils.RunProcess(cmd)
705 return not return_code
706 except OSError:
707 return False
709 def BuildPackages(self, opts, depot):
710 """Builds packages for cros.
712 Args:
713 opts: Program options containing cros_board.
714 depot: The depot being bisected.
716 Returns:
717 True if successful.
719 cmd = [CROS_SDK_PATH]
721 if depot != 'cros':
722 path_to_chrome = os.path.join(os.getcwd(), '..')
723 cmd += ['--chrome_root=%s' % path_to_chrome]
725 cmd += ['--']
727 if depot != 'cros':
728 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
730 cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
731 '--board=%s' % opts.cros_board]
732 return_code = bisect_utils.RunProcess(cmd)
734 return not return_code
736 def BuildImage(self, opts, depot):
737 """Builds test image for cros.
739 Args:
740 opts: Program options containing cros_board.
741 depot: The depot being bisected.
743 Returns:
744 True if successful.
746 cmd = [CROS_SDK_PATH]
748 if depot != 'cros':
749 path_to_chrome = os.path.join(os.getcwd(), '..')
750 cmd += ['--chrome_root=%s' % path_to_chrome]
752 cmd += ['--']
754 if depot != 'cros':
755 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
757 cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
758 '--board=%s' % opts.cros_board, 'test']
760 return_code = bisect_utils.RunProcess(cmd)
762 return not return_code
764 def Build(self, depot, opts):
765 """Builds targets using options passed into the script.
767 Args:
768 depot: Current depot being bisected.
769 opts: The options parsed from the command line.
771 Returns:
772 True if build was successful.
774 if self.BuildPackages(opts, depot):
775 if self.BuildImage(opts, depot):
776 return self.ImageToTarget(opts)
777 return False
780 def _ParseRevisionsFromDEPSFileManually(deps_file_contents):
781 """Parses the vars section of the DEPS file with regex.
783 Args:
784 deps_file_contents: The DEPS file contents as a string.
786 Returns:
787 A dict in the format {depot:revision} if successful, otherwise None.
789 # We'll parse the "vars" section of the DEPS file.
790 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
791 re_results = rxp.search(deps_file_contents)
793 if not re_results:
794 return None
796 # We should be left with a series of entries in the vars component of
797 # the DEPS file with the following format:
798 # 'depot_name': 'revision',
799 vars_body = re_results.group('vars_body')
800 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
801 re.MULTILINE)
802 re_results = rxp.findall(vars_body)
804 return dict(re_results)
807 def _WaitUntilBuildIsReady(
808 fetch_build, bot_name, builder_host, builder_port, build_request_id,
809 max_timeout):
810 """Waits until build is produced by bisect builder on tryserver.
812 Args:
813 fetch_build: Function to check and download build from cloud storage.
814 bot_name: Builder bot name on tryserver.
815 builder_host Tryserver hostname.
816 builder_port: Tryserver port.
817 build_request_id: A unique ID of the build request posted to tryserver.
818 max_timeout: Maximum time to wait for the build.
820 Returns:
821 Downloaded archive file path if exists, otherwise None.
823 # Build number on the tryserver.
824 build_num = None
825 # Interval to check build on cloud storage.
826 poll_interval = 60
827 # Interval to check build status on tryserver.
828 status_check_interval = 600
829 last_status_check = time.time()
830 start_time = time.time()
831 while True:
832 # Checks for build on gs://chrome-perf and download if exists.
833 res = fetch_build()
834 if res:
835 return (res, 'Build successfully found')
836 elapsed_status_check = time.time() - last_status_check
837 # To avoid overloading tryserver with status check requests, we check
838 # build status for every 10 mins.
839 if elapsed_status_check > status_check_interval:
840 last_status_check = time.time()
841 if not build_num:
842 # Get the build number on tryserver for the current build.
843 build_num = bisect_builder.GetBuildNumFromBuilder(
844 build_request_id, bot_name, builder_host, builder_port)
845 # Check the status of build using the build number.
846 # Note: Build is treated as PENDING if build number is not found
847 # on the the tryserver.
848 build_status, status_link = bisect_builder.GetBuildStatus(
849 build_num, bot_name, builder_host, builder_port)
850 if build_status == bisect_builder.FAILED:
851 return (None, 'Failed to produce build, log: %s' % status_link)
852 elapsed_time = time.time() - start_time
853 if elapsed_time > max_timeout:
854 return (None, 'Timed out: %ss without build' % max_timeout)
856 print 'Time elapsed: %ss without build.' % elapsed_time
857 time.sleep(poll_interval)
858 # For some reason, mac bisect bots were not flushing stdout periodically.
859 # As a result buildbot command is timed-out. Flush stdout on all platforms
860 # while waiting for build.
861 sys.stdout.flush()
864 def _UpdateV8Branch(deps_content):
865 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
867 Check for "v8_branch" in DEPS file if exists update its value
868 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
869 variable from DEPS revision 254916, therefore check for "src/v8":
870 <v8 source path> in DEPS in order to support prior DEPS revisions
871 and update it.
873 Args:
874 deps_content: DEPS file contents to be modified.
876 Returns:
877 Modified DEPS file contents as a string.
879 new_branch = r'branches/bleeding_edge'
880 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
881 if re.search(v8_branch_pattern, deps_content):
882 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
883 else:
884 # Replaces the branch assigned to "src/v8" key in DEPS file.
885 # Format of "src/v8" in DEPS:
886 # "src/v8":
887 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
888 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
889 v8_src_pattern = re.compile(
890 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
891 if re.search(v8_src_pattern, deps_content):
892 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
893 return deps_content
896 def _UpdateDEPSForAngle(revision, depot, deps_file):
897 """Updates DEPS file with new revision for Angle repository.
899 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
900 variable contains "angle_revision" key that holds git hash instead of
901 SVN revision.
903 And sometimes "angle_revision" key is not specified in "vars" variable,
904 in such cases check "deps" dictionary variable that matches
905 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
907 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
908 try:
909 deps_contents = ReadStringFromFile(deps_file)
910 # Check whether the depot and revision pattern in DEPS file vars variable
911 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
912 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
913 deps_var, re.MULTILINE)
914 match = re.search(angle_rev_pattern % deps_var, deps_contents)
915 if match:
916 # Update the revision information for the given depot
917 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
918 else:
919 # Check whether the depot and revision pattern in DEPS file deps
920 # variable. e.g.,
921 # "src/third_party/angle": Var("chromium_git") +
922 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
923 angle_rev_pattern = re.compile(
924 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
925 match = re.search(angle_rev_pattern, deps_contents)
926 if not match:
927 print 'Could not find angle revision information in DEPS file.'
928 return False
929 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
930 # Write changes to DEPS file
931 WriteStringToFile(new_data, deps_file)
932 return True
933 except IOError, e:
934 print 'Something went wrong while updating DEPS file, %s' % e
935 return False
938 def _TryParseHistogramValuesFromOutput(metric, text):
939 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
941 Args:
942 metric: The metric as a list of [<trace>, <value>] strings.
943 text: The text to parse the metric values from.
945 Returns:
946 A list of floating point numbers found, [] if none were found.
948 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
950 text_lines = text.split('\n')
951 values_list = []
953 for current_line in text_lines:
954 if metric_formatted in current_line:
955 current_line = current_line[len(metric_formatted):]
957 try:
958 histogram_values = eval(current_line)
960 for b in histogram_values['buckets']:
961 average_for_bucket = float(b['high'] + b['low']) * 0.5
962 # Extends the list with N-elements with the average for that bucket.
963 values_list.extend([average_for_bucket] * b['count'])
964 except Exception:
965 pass
967 return values_list
970 def _TryParseResultValuesFromOutput(metric, text):
971 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
973 Args:
974 metric: The metric as a list of [<trace>, <value>] string pairs.
975 text: The text to parse the metric values from.
977 Returns:
978 A list of floating point numbers found.
980 # Format is: RESULT <graph>: <trace>= <value> <units>
981 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
983 # The log will be parsed looking for format:
984 # <*>RESULT <graph_name>: <trace_name>= <value>
985 single_result_re = re.compile(
986 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
988 # The log will be parsed looking for format:
989 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
990 multi_results_re = re.compile(
991 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
993 # The log will be parsed looking for format:
994 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
995 mean_stddev_re = re.compile(
996 metric_re +
997 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
999 text_lines = text.split('\n')
1000 values_list = []
1001 for current_line in text_lines:
1002 # Parse the output from the performance test for the metric we're
1003 # interested in.
1004 single_result_match = single_result_re.search(current_line)
1005 multi_results_match = multi_results_re.search(current_line)
1006 mean_stddev_match = mean_stddev_re.search(current_line)
1007 if (not single_result_match is None and
1008 single_result_match.group('VALUE')):
1009 values_list += [single_result_match.group('VALUE')]
1010 elif (not multi_results_match is None and
1011 multi_results_match.group('VALUES')):
1012 metric_values = multi_results_match.group('VALUES')
1013 values_list += metric_values.split(',')
1014 elif (not mean_stddev_match is None and
1015 mean_stddev_match.group('MEAN')):
1016 values_list += [mean_stddev_match.group('MEAN')]
1018 values_list = [float(v) for v in values_list
1019 if bisect_utils.IsStringFloat(v)]
1021 # If the metric is times/t, we need to sum the timings in order to get
1022 # similar regression results as the try-bots.
1023 metrics_to_sum = [
1024 ['times', 't'],
1025 ['times', 'page_load_time'],
1026 ['cold_times', 'page_load_time'],
1027 ['warm_times', 'page_load_time'],
1030 if metric in metrics_to_sum:
1031 if values_list:
1032 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1034 return values_list
1037 def _ParseMetricValuesFromOutput(metric, text):
1038 """Parses output from performance_ui_tests and retrieves the results for
1039 a given metric.
1041 Args:
1042 metric: The metric as a list of [<trace>, <value>] strings.
1043 text: The text to parse the metric values from.
1045 Returns:
1046 A list of floating point numbers found.
1048 metric_values = _TryParseResultValuesFromOutput(metric, text)
1050 if not metric_values:
1051 metric_values = _TryParseHistogramValuesFromOutput(metric, text)
1053 return metric_values
1056 def _GenerateProfileIfNecessary(command_args):
1057 """Checks the command line of the performance test for dependencies on
1058 profile generation, and runs tools/perf/generate_profile as necessary.
1060 Args:
1061 command_args: Command line being passed to performance test, as a list.
1063 Returns:
1064 False if profile generation was necessary and failed, otherwise True.
1066 if '--profile-dir' in ' '.join(command_args):
1067 # If we were using python 2.7+, we could just use the argparse
1068 # module's parse_known_args to grab --profile-dir. Since some of the
1069 # bots still run 2.6, have to grab the arguments manually.
1070 arg_dict = {}
1071 args_to_parse = ['--profile-dir', '--browser']
1073 for arg_to_parse in args_to_parse:
1074 for i, current_arg in enumerate(command_args):
1075 if arg_to_parse in current_arg:
1076 current_arg_split = current_arg.split('=')
1078 # Check 2 cases, --arg=<val> and --arg <val>
1079 if len(current_arg_split) == 2:
1080 arg_dict[arg_to_parse] = current_arg_split[1]
1081 elif i + 1 < len(command_args):
1082 arg_dict[arg_to_parse] = command_args[i+1]
1084 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1086 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1087 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1088 return not bisect_utils.RunProcess(['python', path_to_generate,
1089 '--profile-type-to-generate', profile_type,
1090 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1091 return False
1092 return True
1095 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data):
1096 """Adds new revisions to the revision_data dict and initializes them.
1098 Args:
1099 revisions: List of revisions to add.
1100 depot: Depot that's currently in use (src, webkit, etc...)
1101 sort: Sorting key for displaying revisions.
1102 revision_data: A dict to add the new revisions into. Existing revisions
1103 will have their sort keys offset.
1105 num_depot_revisions = len(revisions)
1107 for _, v in revision_data.iteritems():
1108 if v['sort'] > sort:
1109 v['sort'] += num_depot_revisions
1111 for i in xrange(num_depot_revisions):
1112 r = revisions[i]
1113 revision_data[r] = {
1114 'revision' : r,
1115 'depot' : depot,
1116 'value' : None,
1117 'perf_time' : 0,
1118 'build_time' : 0,
1119 'passed' : '?',
1120 'sort' : i + sort + 1,
1124 def _PrintThankYou():
1125 print RESULTS_THANKYOU
1128 def _PrintTableRow(column_widths, row_data):
1129 """Prints out a row in a formatted table that has columns aligned.
1131 Args:
1132 column_widths: A list of column width numbers.
1133 row_data: A list of items for each column in this row.
1135 assert len(column_widths) == len(row_data)
1136 text = ''
1137 for i in xrange(len(column_widths)):
1138 current_row_data = row_data[i].center(column_widths[i], ' ')
1139 text += ('%%%ds' % column_widths[i]) % current_row_data
1140 print text
1143 def _PrintStepTime(revision_data_sorted):
1144 """Prints information about how long various steps took.
1146 Args:
1147 revision_data_sorted: The sorted list of revision data dictionaries."""
1148 step_perf_time_avg = 0.0
1149 step_build_time_avg = 0.0
1150 step_count = 0.0
1151 for _, current_data in revision_data_sorted:
1152 if current_data['value']:
1153 step_perf_time_avg += current_data['perf_time']
1154 step_build_time_avg += current_data['build_time']
1155 step_count += 1
1156 if step_count:
1157 step_perf_time_avg = step_perf_time_avg / step_count
1158 step_build_time_avg = step_build_time_avg / step_count
1159 print
1160 print 'Average build time : %s' % datetime.timedelta(
1161 seconds=int(step_build_time_avg))
1162 print 'Average test time : %s' % datetime.timedelta(
1163 seconds=int(step_perf_time_avg))
1165 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good):
1166 """Compiles a list of other possible regressions from the revision data.
1168 Args:
1169 revision_data_sorted: Sorted list of (revision, revision data dict) pairs.
1170 bad_greater_than_good: Whether the result value at the "bad" revision is
1171 numerically greater than the result value at the "good" revision.
1173 Returns:
1174 A list of [current_rev, previous_rev, confidence] for other places where
1175 there may have been a regression.
1177 other_regressions = []
1178 previous_values = []
1179 previous_id = None
1180 for current_id, current_data in revision_data_sorted:
1181 current_values = current_data['value']
1182 if current_values:
1183 current_values = current_values['values']
1184 if previous_values:
1185 confidence = ConfidenceScore(previous_values, [current_values])
1186 mean_of_prev_runs = math_utils.Mean(sum(previous_values, []))
1187 mean_of_current_runs = math_utils.Mean(current_values)
1189 # Check that the potential regression is in the same direction as
1190 # the overall regression. If the mean of the previous runs < the
1191 # mean of the current runs, this local regression is in same
1192 # direction.
1193 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
1194 is_same_direction = (prev_less_than_current if
1195 bad_greater_than_good else not prev_less_than_current)
1197 # Only report potential regressions with high confidence.
1198 if is_same_direction and confidence > 50:
1199 other_regressions.append([current_id, previous_id, confidence])
1200 previous_values.append(current_values)
1201 previous_id = current_id
1202 return other_regressions
1204 class BisectPerformanceMetrics(object):
1205 """This class contains functionality to perform a bisection of a range of
1206 revisions to narrow down where performance regressions may have occurred.
1208 The main entry-point is the Run method.
1211 def __init__(self, source_control, opts):
1212 super(BisectPerformanceMetrics, self).__init__()
1214 self.opts = opts
1215 self.source_control = source_control
1216 self.src_cwd = os.getcwd()
1217 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1218 self.depot_cwd = {}
1219 self.cleanup_commands = []
1220 self.warnings = []
1221 self.builder = Builder.FromOpts(opts)
1223 # This always starts true since the script grabs latest first.
1224 self.was_blink = True
1226 for d in DEPOT_NAMES:
1227 # The working directory of each depot is just the path to the depot, but
1228 # since we're already in 'src', we can skip that part.
1230 self.depot_cwd[d] = os.path.join(
1231 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1233 def PerformCleanup(self):
1234 """Performs cleanup when script is finished."""
1235 os.chdir(self.src_cwd)
1236 for c in self.cleanup_commands:
1237 if c[0] == 'mv':
1238 shutil.move(c[1], c[2])
1239 else:
1240 assert False, 'Invalid cleanup command.'
1242 def GetRevisionList(self, depot, bad_revision, good_revision):
1243 """Retrieves a list of all the commits between the bad revision and
1244 last known good revision."""
1246 revision_work_list = []
1248 if depot == 'cros':
1249 revision_range_start = good_revision
1250 revision_range_end = bad_revision
1252 cwd = os.getcwd()
1253 self.ChangeToDepotWorkingDirectory('cros')
1255 # Print the commit timestamps for every commit in the revision time
1256 # range. We'll sort them and bisect by that. There is a remote chance that
1257 # 2 (or more) commits will share the exact same timestamp, but it's
1258 # probably safe to ignore that case.
1259 cmd = ['repo', 'forall', '-c',
1260 'git log --format=%%ct --before=%d --after=%d' % (
1261 revision_range_end, revision_range_start)]
1262 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
1264 assert not return_code, ('An error occurred while running '
1265 '"%s"' % ' '.join(cmd))
1267 os.chdir(cwd)
1269 revision_work_list = list(set(
1270 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)]))
1271 revision_work_list = sorted(revision_work_list, reverse=True)
1272 else:
1273 cwd = self._GetDepotDirectory(depot)
1274 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1275 good_revision, cwd=cwd)
1277 return revision_work_list
1279 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1280 svn_revision = self.source_control.SVNFindRev(revision)
1282 if bisect_utils.IsStringInt(svn_revision):
1283 # V8 is tricky to bisect, in that there are only a few instances when
1284 # we can dive into bleeding_edge and get back a meaningful result.
1285 # Try to detect a V8 "business as usual" case, which is when:
1286 # 1. trunk revision N has description "Version X.Y.Z"
1287 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1288 # trunk. Now working on X.Y.(Z+1)."
1290 # As of 01/24/2014, V8 trunk descriptions are formatted:
1291 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1292 # So we can just try parsing that out first and fall back to the old way.
1293 v8_dir = self._GetDepotDirectory('v8')
1294 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1296 revision_info = self.source_control.QueryRevisionInfo(revision,
1297 cwd=v8_dir)
1299 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1301 regex_results = version_re.search(revision_info['subject'])
1303 if regex_results:
1304 git_revision = None
1306 # Look for "based on bleeding_edge" and parse out revision
1307 if 'based on bleeding_edge' in revision_info['subject']:
1308 try:
1309 bleeding_edge_revision = revision_info['subject'].split(
1310 'bleeding_edge revision r')[1]
1311 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1312 git_revision = self.source_control.ResolveToRevision(
1313 bleeding_edge_revision, 'v8_bleeding_edge', DEPOT_DEPS_NAME, 1,
1314 cwd=v8_bleeding_edge_dir)
1315 return git_revision
1316 except (IndexError, ValueError):
1317 pass
1319 if not git_revision:
1320 # Wasn't successful, try the old way of looking for "Prepare push to"
1321 git_revision = self.source_control.ResolveToRevision(
1322 int(svn_revision) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME, -1,
1323 cwd=v8_bleeding_edge_dir)
1325 if git_revision:
1326 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1327 cwd=v8_bleeding_edge_dir)
1329 if 'Prepare push to trunk' in revision_info['subject']:
1330 return git_revision
1331 return None
1333 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1334 cwd = self._GetDepotDirectory('v8')
1335 cmd = ['log', '--format=%ct', '-1', revision]
1336 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1337 commit_time = int(output)
1338 commits = []
1340 if search_forward:
1341 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1342 'origin/master']
1343 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1344 output = output.split()
1345 commits = output
1346 commits = reversed(commits)
1347 else:
1348 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1349 'origin/master']
1350 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1351 output = output.split()
1352 commits = output
1354 bleeding_edge_revision = None
1356 for c in commits:
1357 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1358 if bleeding_edge_revision:
1359 break
1361 return bleeding_edge_revision
1363 def _ParseRevisionsFromDEPSFile(self, depot):
1364 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1365 be needed if the bisect recurses into those depots later.
1367 Args:
1368 depot: Name of depot being bisected.
1370 Returns:
1371 A dict in the format {depot:revision} if successful, otherwise None.
1373 try:
1374 deps_data = {
1375 'Var': lambda _: deps_data["vars"][_],
1376 'From': lambda *args: None,
1378 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1379 deps_data = deps_data['deps']
1381 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1382 results = {}
1383 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1384 if (depot_data.get('platform') and
1385 depot_data.get('platform') != os.name):
1386 continue
1388 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1389 depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1390 src_dir = deps_data.get(depot_data_src)
1391 if src_dir:
1392 self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1393 depot_data_src[4:])
1394 re_results = rxp.search(src_dir)
1395 if re_results:
1396 results[depot_name] = re_results.group('revision')
1397 else:
1398 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1399 '%s' % (depot_name, depot))
1400 if not warning_text in self.warnings:
1401 self.warnings.append(warning_text)
1402 else:
1403 results[depot_name] = None
1404 return results
1405 except ImportError:
1406 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1407 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents)
1408 results = {}
1409 for depot_name, depot_revision in parse_results.iteritems():
1410 depot_revision = depot_revision.strip('@')
1411 print depot_name, depot_revision
1412 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1413 if (current_data.has_key('deps_var') and
1414 current_data['deps_var'] == depot_name):
1415 src_name = current_name
1416 results[src_name] = depot_revision
1417 break
1418 return results
1420 def _Get3rdPartyRevisions(self, depot):
1421 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1423 Returns:
1424 A dict in the format {depot:revision} if successful, otherwise None.
1426 cwd = os.getcwd()
1427 self.ChangeToDepotWorkingDirectory(depot)
1429 results = {}
1431 if depot == 'chromium' or depot == 'android-chrome':
1432 results = self._ParseRevisionsFromDEPSFile(depot)
1433 os.chdir(cwd)
1434 elif depot == 'cros':
1435 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1436 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1437 CROS_CHROMEOS_PATTERN]
1438 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
1440 assert not return_code, ('An error occurred while running '
1441 '"%s"' % ' '.join(cmd))
1443 if len(output) > CROS_CHROMEOS_PATTERN:
1444 output = output[len(CROS_CHROMEOS_PATTERN):]
1446 if len(output) > 1:
1447 output = output.split('_')[0]
1449 if len(output) > 3:
1450 contents = output.split('.')
1452 version = contents[2]
1454 if contents[3] != '0':
1455 warningText = ('Chrome version: %s.%s but using %s.0 to bisect.' %
1456 (version, contents[3], version))
1457 if not warningText in self.warnings:
1458 self.warnings.append(warningText)
1460 cwd = os.getcwd()
1461 self.ChangeToDepotWorkingDirectory('chromium')
1462 cmd = ['log', '-1', '--format=%H',
1463 '--author=chrome-release@google.com',
1464 '--grep=to %s' % version, 'origin/master']
1465 return_code = bisect_utils.CheckRunGit(cmd)
1466 os.chdir(cwd)
1468 results['chromium'] = output.strip()
1469 elif depot == 'v8':
1470 # We can't try to map the trunk revision to bleeding edge yet, because
1471 # we don't know which direction to try to search in. Have to wait until
1472 # the bisect has narrowed the results down to 2 v8 rolls.
1473 results['v8_bleeding_edge'] = None
1475 return results
1477 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1478 """Backs up or restores build output directory based on restore argument.
1480 Args:
1481 restore: Indicates whether to restore or backup. Default is False(Backup)
1482 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1484 Returns:
1485 Path to backup or restored location as string. otherwise None if it fails.
1487 build_dir = os.path.abspath(
1488 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1489 source_dir = os.path.join(build_dir, build_type)
1490 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1491 if restore:
1492 source_dir, destination_dir = destination_dir, source_dir
1493 if os.path.exists(source_dir):
1494 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1495 shutil.move(source_dir, destination_dir)
1496 return destination_dir
1497 return None
1499 def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch,
1500 patch_sha, out_dir):
1501 """Checks and downloads build archive for a given revision.
1503 Checks for build archive with Git hash or SVN revision. If either of the
1504 file exists, then downloads the archive file.
1506 Args:
1507 revision: A Git hash revision.
1508 gs_bucket: Cloud storage bucket name
1509 target_arch: 32 or 64 bit build target
1510 patch: A DEPS patch (used while bisecting 3rd party repositories).
1511 out_dir: Build output directory where downloaded file is stored.
1513 Returns:
1514 Downloaded archive file path if exists, otherwise None.
1516 # Source archive file path on cloud storage using Git revision.
1517 source_file = GetRemoteBuildPath(
1518 revision, self.opts.target_platform, target_arch, patch_sha)
1519 downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1520 if not downloaded_archive:
1521 # Get SVN revision for the given SHA.
1522 svn_revision = self.source_control.SVNFindRev(revision)
1523 if svn_revision:
1524 # Source archive file path on cloud storage using SVN revision.
1525 source_file = GetRemoteBuildPath(
1526 svn_revision, self.opts.target_platform, target_arch, patch_sha)
1527 return FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1528 return downloaded_archive
1530 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1531 """Downloads the build archive for the given revision.
1533 Args:
1534 revision: The Git revision to download or build.
1535 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1536 patch: A DEPS patch (used while bisecting 3rd party repositories).
1538 Returns:
1539 True if download succeeds, otherwise False.
1541 patch_sha = None
1542 if patch:
1543 # Get the SHA of the DEPS changes patch.
1544 patch_sha = GetSHA1HexDigest(patch)
1546 # Update the DEPS changes patch with a patch to create a new file named
1547 # 'DEPS.sha' and add patch_sha evaluated above to it.
1548 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1550 # Get Build output directory
1551 abs_build_dir = os.path.abspath(
1552 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1554 fetch_build_func = lambda: self.GetBuildArchiveForRevision(
1555 revision, self.opts.gs_bucket, self.opts.target_arch,
1556 patch_sha, abs_build_dir)
1558 # Downloaded archive file path, downloads build archive for given revision.
1559 downloaded_file = fetch_build_func()
1561 # When build archive doesn't exists, post a build request to tryserver
1562 # and wait for the build to be produced.
1563 if not downloaded_file:
1564 downloaded_file = self.PostBuildRequestAndWait(
1565 revision, fetch_build=fetch_build_func, patch=patch)
1566 if not downloaded_file:
1567 return False
1569 # Generic name for the archive, created when archive file is extracted.
1570 output_dir = os.path.join(
1571 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1572 # Unzip build archive directory.
1573 try:
1574 RmTreeAndMkDir(output_dir, skip_makedir=True)
1575 self.BackupOrRestoreOutputdirectory(restore=False)
1576 # Build output directory based on target(e.g. out/Release, out/Debug).
1577 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1578 ExtractZip(downloaded_file, abs_build_dir)
1579 if not os.path.exists(output_dir):
1580 # Due to recipe changes, the builds extract folder contains
1581 # out/Release instead of full-build-<platform>/Release.
1582 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)):
1583 output_dir = os.path.join(abs_build_dir, 'out', build_type)
1584 else:
1585 raise IOError('Missing extracted folder %s ' % output_dir)
1587 print 'Moving build from %s to %s' % (
1588 output_dir, target_build_output_dir)
1589 shutil.move(output_dir, target_build_output_dir)
1590 return True
1591 except Exception as e:
1592 print 'Something went wrong while extracting archive file: %s' % e
1593 self.BackupOrRestoreOutputdirectory(restore=True)
1594 # Cleanup any leftovers from unzipping.
1595 if os.path.exists(output_dir):
1596 RmTreeAndMkDir(output_dir, skip_makedir=True)
1597 finally:
1598 # Delete downloaded archive
1599 if os.path.exists(downloaded_file):
1600 os.remove(downloaded_file)
1601 return False
1603 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None):
1604 """POSTs the build request job to the tryserver instance.
1606 A try job build request is posted to tryserver.chromium.perf master,
1607 and waits for the binaries to be produced and archived on cloud storage.
1608 Once the build is ready and stored onto cloud, build archive is downloaded
1609 into the output folder.
1611 Args:
1612 revision: A Git hash revision.
1613 fetch_build: Function to check and download build from cloud storage.
1614 patch: A DEPS patch (used while bisecting 3rd party repositories).
1616 Returns:
1617 Downloaded archive file path when requested build exists and download is
1618 successful, otherwise None.
1620 # Get SVN revision for the given SHA.
1621 svn_revision = self.source_control.SVNFindRev(revision)
1622 if not svn_revision:
1623 raise RuntimeError(
1624 'Failed to determine SVN revision for %s' % revision)
1626 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'):
1627 """Gets builder bot name and build time in seconds based on platform."""
1628 # Bot names should match the one listed in tryserver.chromium's
1629 # master.cfg which produces builds for bisect.
1630 if bisect_utils.IsWindowsHost():
1631 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
1632 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1633 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1634 if bisect_utils.IsLinuxHost():
1635 if target_platform == 'android':
1636 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1637 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1638 if bisect_utils.IsMacHost():
1639 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1640 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1641 if not fetch_build:
1642 return False
1644 bot_name, build_timeout = GetBuilderNameAndBuildTime(
1645 self.opts.target_platform, self.opts.target_arch)
1646 builder_host = self.opts.builder_host
1647 builder_port = self.opts.builder_port
1648 # Create a unique ID for each build request posted to tryserver builders.
1649 # This ID is added to "Reason" property in build's json.
1650 build_request_id = GetSHA1HexDigest(
1651 '%s-%s-%s' % (svn_revision, patch, time.time()))
1653 # Creates a try job description.
1654 job_args = {
1655 'host': builder_host,
1656 'port': builder_port,
1657 'revision': 'src@%s' % svn_revision,
1658 'bot': bot_name,
1659 'name': build_request_id,
1661 # Update patch information if supplied.
1662 if patch:
1663 job_args['patch'] = patch
1664 # Posts job to build the revision on the server.
1665 if bisect_builder.PostTryJob(job_args):
1666 target_file, error_msg = _WaitUntilBuildIsReady(
1667 fetch_build, bot_name, builder_host, builder_port, build_request_id,
1668 build_timeout)
1669 if not target_file:
1670 print '%s [revision: %s]' % (error_msg, svn_revision)
1671 return None
1672 return target_file
1673 print 'Failed to post build request for revision: [%s]' % svn_revision
1674 return None
1676 def IsDownloadable(self, depot):
1677 """Checks if build is downloadable based on target platform and depot."""
1678 if (self.opts.target_platform in ['chromium', 'android'] and
1679 self.opts.gs_bucket):
1680 return (depot == 'chromium' or
1681 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1682 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1683 return False
1685 def UpdateDeps(self, revision, depot, deps_file):
1686 """Updates DEPS file with new revision of dependency repository.
1688 This method search DEPS for a particular pattern in which depot revision
1689 is specified (e.g "webkit_revision": "123456"). If a match is found then
1690 it resolves the given git hash to SVN revision and replace it in DEPS file.
1692 Args:
1693 revision: A git hash revision of the dependency repository.
1694 depot: Current depot being bisected.
1695 deps_file: Path to DEPS file.
1697 Returns:
1698 True if DEPS file is modified successfully, otherwise False.
1700 if not os.path.exists(deps_file):
1701 return False
1703 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1704 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1705 if not deps_var:
1706 print 'DEPS update not supported for Depot: %s', depot
1707 return False
1709 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable
1710 # contains "angle_revision" key that holds git hash instead of SVN revision.
1711 # And sometime "angle_revision" key is not specified in "vars" variable.
1712 # In such cases check, "deps" dictionary variable that matches
1713 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1714 if depot == 'angle':
1715 return _UpdateDEPSForAngle(revision, depot, deps_file)
1717 try:
1718 deps_contents = ReadStringFromFile(deps_file)
1719 # Check whether the depot and revision pattern in DEPS file vars
1720 # e.g. for webkit the format is "webkit_revision": "12345".
1721 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1722 re.MULTILINE)
1723 match = re.search(deps_revision, deps_contents)
1724 if match:
1725 svn_revision = self.source_control.SVNFindRev(
1726 revision, self._GetDepotDirectory(depot))
1727 if not svn_revision:
1728 print 'Could not determine SVN revision for %s' % revision
1729 return False
1730 # Update the revision information for the given depot
1731 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1733 # For v8_bleeding_edge revisions change V8 branch in order
1734 # to fetch bleeding edge revision.
1735 if depot == 'v8_bleeding_edge':
1736 new_data = _UpdateV8Branch(new_data)
1737 if not new_data:
1738 return False
1739 # Write changes to DEPS file
1740 WriteStringToFile(new_data, deps_file)
1741 return True
1742 except IOError, e:
1743 print 'Something went wrong while updating DEPS file. [%s]' % e
1744 return False
1746 def CreateDEPSPatch(self, depot, revision):
1747 """Modifies DEPS and returns diff as text.
1749 Args:
1750 depot: Current depot being bisected.
1751 revision: A git hash revision of the dependency repository.
1753 Returns:
1754 A tuple with git hash of chromium revision and DEPS patch text.
1756 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1757 if not os.path.exists(deps_file_path):
1758 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1759 # Get current chromium revision (git hash).
1760 cmd = ['rev-parse', 'HEAD']
1761 chromium_sha = bisect_utils.CheckRunGit(cmd).strip()
1762 if not chromium_sha:
1763 raise RuntimeError('Failed to determine Chromium revision for %s' %
1764 revision)
1765 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1766 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1767 # Checkout DEPS file for the current chromium revision.
1768 if self.source_control.CheckoutFileAtRevision(
1769 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd):
1770 if self.UpdateDeps(revision, depot, deps_file_path):
1771 diff_command = [
1772 'diff',
1773 '--src-prefix=src/',
1774 '--dst-prefix=src/',
1775 '--no-ext-diff',
1776 bisect_utils.FILE_DEPS,
1778 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd)
1779 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1780 else:
1781 raise RuntimeError(
1782 'Failed to update DEPS file for chromium: [%s]' % chromium_sha)
1783 else:
1784 raise RuntimeError(
1785 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha)
1786 return (None, None)
1788 def BuildCurrentRevision(self, depot, revision=None):
1789 """Builds chrome and performance_ui_tests on the current revision.
1791 Returns:
1792 True if the build was successful.
1794 if self.opts.debug_ignore_build:
1795 return True
1796 cwd = os.getcwd()
1797 os.chdir(self.src_cwd)
1798 # Fetch build archive for the given revision from the cloud storage when
1799 # the storage bucket is passed.
1800 if self.IsDownloadable(depot) and revision:
1801 deps_patch = None
1802 if depot != 'chromium':
1803 # Create a DEPS patch with new revision for dependency repository.
1804 revision, deps_patch = self.CreateDEPSPatch(depot, revision)
1805 if self.DownloadCurrentBuild(revision, patch=deps_patch):
1806 os.chdir(cwd)
1807 if deps_patch:
1808 # Reverts the changes to DEPS file.
1809 self.source_control.CheckoutFileAtRevision(
1810 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd)
1811 return True
1812 return False
1814 # These codes are executed when bisect bots builds binaries locally.
1815 build_success = self.builder.Build(depot, self.opts)
1816 os.chdir(cwd)
1817 return build_success
1819 def RunGClientHooks(self):
1820 """Runs gclient with runhooks command.
1822 Returns:
1823 True if gclient reports no errors.
1825 if self.opts.debug_ignore_build:
1826 return True
1827 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1829 def _IsBisectModeUsingMetric(self):
1830 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
1832 def _IsBisectModeReturnCode(self):
1833 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
1835 def _IsBisectModeStandardDeviation(self):
1836 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
1838 def GetCompatibleCommand(self, command_to_run, revision, depot):
1839 # Prior to crrev.com/274857 *only* android-chromium-testshell
1840 # Then until crrev.com/276628 *both* (android-chromium-testshell and
1841 # android-chrome-shell) work. After that rev 276628 *only*
1842 # android-chrome-shell works. bisect-perf-reggresion.py script should
1843 # handle these cases and set appropriate browser type based on revision.
1844 if self.opts.target_platform in ['android']:
1845 # When its a third_party depot, get the chromium revision.
1846 if depot != 'chromium':
1847 revision = bisect_utils.CheckRunGit(
1848 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip()
1849 svn_revision = self.source_control.SVNFindRev(revision, cwd=self.src_cwd)
1850 if not svn_revision:
1851 return command_to_run
1852 cmd_re = re.compile('--browser=(?P<browser_type>\S+)')
1853 matches = cmd_re.search(command_to_run)
1854 if bisect_utils.IsStringInt(svn_revision) and matches:
1855 cmd_browser = matches.group('browser_type')
1856 if svn_revision <= 274857 and cmd_browser == 'android-chrome-shell':
1857 return command_to_run.replace(cmd_browser,
1858 'android-chromium-testshell')
1859 elif (svn_revision >= 276628 and
1860 cmd_browser == 'android-chromium-testshell'):
1861 return command_to_run.replace(cmd_browser,
1862 'android-chrome-shell')
1863 return command_to_run
1865 def RunPerformanceTestAndParseResults(
1866 self, command_to_run, metric, reset_on_first_run=False,
1867 upload_on_last_run=False, results_label=None):
1868 """Runs a performance test on the current revision and parses the results.
1870 Args:
1871 command_to_run: The command to be run to execute the performance test.
1872 metric: The metric to parse out from the results of the performance test.
1873 This is the result chart name and trace name, separated by slash.
1874 reset_on_first_run: If True, pass the flag --reset-results on first run.
1875 upload_on_last_run: If True, pass the flag --upload-results on last run.
1876 results_label: A value for the option flag --results-label.
1877 The arguments reset_on_first_run, upload_on_last_run and results_label
1878 are all ignored if the test is not a Telemetry test.
1880 Returns:
1881 (values dict, 0) if --debug_ignore_perf_test was passed.
1882 (values dict, 0, test output) if the test was run successfully.
1883 (error message, -1) if the test couldn't be run.
1884 (error message, -1, test output) if the test ran but there was an error.
1886 success_code, failure_code = 0, -1
1888 if self.opts.debug_ignore_perf_test:
1889 fake_results = {
1890 'mean': 0.0,
1891 'std_err': 0.0,
1892 'std_dev': 0.0,
1893 'values': [0.0]
1895 return (fake_results, success_code)
1897 # For Windows platform set posix=False, to parse windows paths correctly.
1898 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
1899 # refer to http://bugs.python.org/issue1724822. By default posix=True.
1900 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
1902 if not _GenerateProfileIfNecessary(args):
1903 err_text = 'Failed to generate profile for performance test.'
1904 return (err_text, failure_code)
1906 # If running a Telemetry test for Chrome OS, insert the remote IP and
1907 # identity parameters.
1908 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1909 if self.opts.target_platform == 'cros' and is_telemetry:
1910 args.append('--remote=%s' % self.opts.cros_remote_ip)
1911 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1913 start_time = time.time()
1915 metric_values = []
1916 output_of_all_runs = ''
1917 for i in xrange(self.opts.repeat_test_count):
1918 # Can ignore the return code since if the tests fail, it won't return 0.
1919 current_args = copy.copy(args)
1920 if is_telemetry:
1921 if i == 0 and reset_on_first_run:
1922 current_args.append('--reset-results')
1923 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1924 current_args.append('--upload-results')
1925 if results_label:
1926 current_args.append('--results-label=%s' % results_label)
1927 try:
1928 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
1929 current_args, cwd=self.src_cwd)
1930 except OSError, e:
1931 if e.errno == errno.ENOENT:
1932 err_text = ('Something went wrong running the performance test. '
1933 'Please review the command line:\n\n')
1934 if 'src/' in ' '.join(args):
1935 err_text += ('Check that you haven\'t accidentally specified a '
1936 'path with src/ in the command.\n\n')
1937 err_text += ' '.join(args)
1938 err_text += '\n'
1940 return (err_text, failure_code)
1941 raise
1943 output_of_all_runs += output
1944 if self.opts.output_buildbot_annotations:
1945 print output
1947 if self._IsBisectModeUsingMetric():
1948 metric_values += _ParseMetricValuesFromOutput(metric, output)
1949 # If we're bisecting on a metric (ie, changes in the mean or
1950 # standard deviation) and no metric values are produced, bail out.
1951 if not metric_values:
1952 break
1953 elif self._IsBisectModeReturnCode():
1954 metric_values.append(return_code)
1956 elapsed_minutes = (time.time() - start_time) / 60.0
1957 if elapsed_minutes >= self.opts.max_time_minutes:
1958 break
1960 if len(metric_values) == 0:
1961 err_text = 'Metric %s was not found in the test output.' % metric
1962 # TODO(qyearsley): Consider also getting and displaying a list of metrics
1963 # that were found in the output here.
1964 return (err_text, failure_code, output_of_all_runs)
1966 # If we're bisecting on return codes, we're really just looking for zero vs
1967 # non-zero.
1968 if self._IsBisectModeReturnCode():
1969 # If any of the return codes is non-zero, output 1.
1970 overall_return_code = 0 if (
1971 all(current_value == 0 for current_value in metric_values)) else 1
1973 values = {
1974 'mean': overall_return_code,
1975 'std_err': 0.0,
1976 'std_dev': 0.0,
1977 'values': metric_values,
1980 print 'Results of performance test: Command returned with %d' % (
1981 overall_return_code)
1982 print
1983 else:
1984 # Need to get the average value if there were multiple values.
1985 truncated_mean = math_utils.TruncatedMean(
1986 metric_values, self.opts.truncate_percent)
1987 standard_err = math_utils.StandardError(metric_values)
1988 standard_dev = math_utils.StandardDeviation(metric_values)
1990 if self._IsBisectModeStandardDeviation():
1991 metric_values = [standard_dev]
1993 values = {
1994 'mean': truncated_mean,
1995 'std_err': standard_err,
1996 'std_dev': standard_dev,
1997 'values': metric_values,
2000 print 'Results of performance test: %12f %12f' % (
2001 truncated_mean, standard_err)
2002 print
2003 return (values, success_code, output_of_all_runs)
2005 def FindAllRevisionsToSync(self, revision, depot):
2006 """Finds all dependant revisions and depots that need to be synced for a
2007 given revision. This is only useful in the git workflow, as an svn depot
2008 may be split into multiple mirrors.
2010 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2011 skia/include. To sync skia/src properly, one has to find the proper
2012 revisions in skia/gyp and skia/include.
2014 Args:
2015 revision: The revision to sync to.
2016 depot: The depot in use at the moment (probably skia).
2018 Returns:
2019 A list of [depot, revision] pairs that need to be synced.
2021 revisions_to_sync = [[depot, revision]]
2023 is_base = ((depot == 'chromium') or (depot == 'cros') or
2024 (depot == 'android-chrome'))
2026 # Some SVN depots were split into multiple git depots, so we need to
2027 # figure out for each mirror which git revision to grab. There's no
2028 # guarantee that the SVN revision will exist for each of the dependant
2029 # depots, so we have to grep the git logs and grab the next earlier one.
2030 if (not is_base
2031 and DEPOT_DEPS_NAME[depot]['depends']
2032 and self.source_control.IsGit()):
2033 svn_rev = self.source_control.SVNFindRev(revision)
2035 for d in DEPOT_DEPS_NAME[depot]['depends']:
2036 self.ChangeToDepotWorkingDirectory(d)
2038 dependant_rev = self.source_control.ResolveToRevision(
2039 svn_rev, d, DEPOT_DEPS_NAME, -1000)
2041 if dependant_rev:
2042 revisions_to_sync.append([d, dependant_rev])
2044 num_resolved = len(revisions_to_sync)
2045 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2047 self.ChangeToDepotWorkingDirectory(depot)
2049 if not ((num_resolved - 1) == num_needed):
2050 return None
2052 return revisions_to_sync
2054 def PerformPreBuildCleanup(self):
2055 """Performs necessary cleanup between runs."""
2056 print 'Cleaning up between runs.'
2057 print
2059 # Having these pyc files around between runs can confuse the
2060 # perf tests and cause them to crash.
2061 for (path, _, files) in os.walk(self.src_cwd):
2062 for cur_file in files:
2063 if cur_file.endswith('.pyc'):
2064 path_to_file = os.path.join(path, cur_file)
2065 os.remove(path_to_file)
2067 def PerformWebkitDirectoryCleanup(self, revision):
2068 """If the script is switching between Blink and WebKit during bisect,
2069 its faster to just delete the directory rather than leave it up to git
2070 to sync.
2072 Returns:
2073 True if successful.
2075 if not self.source_control.CheckoutFileAtRevision(
2076 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2077 return False
2079 cwd = os.getcwd()
2080 os.chdir(self.src_cwd)
2082 is_blink = bisect_utils.IsDepsFileBlink()
2084 os.chdir(cwd)
2086 if not self.source_control.RevertFileToHead(
2087 bisect_utils.FILE_DEPS_GIT):
2088 return False
2090 if self.was_blink != is_blink:
2091 self.was_blink = is_blink
2092 # Removes third_party/Webkit directory.
2093 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2094 return True
2096 def PerformCrosChrootCleanup(self):
2097 """Deletes the chroot.
2099 Returns:
2100 True if successful.
2102 cwd = os.getcwd()
2103 self.ChangeToDepotWorkingDirectory('cros')
2104 cmd = [CROS_SDK_PATH, '--delete']
2105 return_code = bisect_utils.RunProcess(cmd)
2106 os.chdir(cwd)
2107 return not return_code
2109 def CreateCrosChroot(self):
2110 """Creates a new chroot.
2112 Returns:
2113 True if successful.
2115 cwd = os.getcwd()
2116 self.ChangeToDepotWorkingDirectory('cros')
2117 cmd = [CROS_SDK_PATH, '--create']
2118 return_code = bisect_utils.RunProcess(cmd)
2119 os.chdir(cwd)
2120 return not return_code
2122 def PerformPreSyncCleanup(self, revision, depot):
2123 """Performs any necessary cleanup before syncing.
2125 Returns:
2126 True if successful.
2128 if depot == 'chromium' or depot == 'android-chrome':
2129 # Removes third_party/libjingle. At some point, libjingle was causing
2130 # issues syncing when using the git workflow (crbug.com/266324).
2131 os.chdir(self.src_cwd)
2132 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2133 return False
2134 # Removes third_party/skia. At some point, skia was causing
2135 # issues syncing when using the git workflow (crbug.com/377951).
2136 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2137 return False
2138 if depot == 'chromium':
2139 # The fast webkit cleanup doesn't work for android_chrome
2140 # The switch from Webkit to Blink that this deals with now happened
2141 # quite a long time ago so this is unlikely to be a problem.
2142 return self.PerformWebkitDirectoryCleanup(revision)
2143 elif depot == 'cros':
2144 return self.PerformCrosChrootCleanup()
2145 return True
2147 def RunPostSync(self, depot):
2148 """Performs any work after syncing.
2150 Returns:
2151 True if successful.
2153 if self.opts.target_platform == 'android':
2154 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2155 path_to_src=self.src_cwd):
2156 return False
2158 if depot == 'cros':
2159 return self.CreateCrosChroot()
2160 else:
2161 return self.RunGClientHooks()
2162 return True
2164 def ShouldSkipRevision(self, depot, revision):
2165 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2166 is git based those changes would have no effect.
2168 Args:
2169 depot: The depot being bisected.
2170 revision: Current revision we're synced to.
2172 Returns:
2173 True if we should skip building/testing this revision.
2175 if depot == 'chromium':
2176 if self.source_control.IsGit():
2177 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2178 output = bisect_utils.CheckRunGit(cmd)
2180 files = output.splitlines()
2182 if len(files) == 1 and files[0] == 'DEPS':
2183 return True
2185 return False
2187 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2188 skippable=False):
2189 """Performs a full sync/build/run of the specified revision.
2191 Args:
2192 revision: The revision to sync to.
2193 depot: The depot that's being used at the moment (src, webkit, etc.)
2194 command_to_run: The command to execute the performance test.
2195 metric: The performance metric being tested.
2197 Returns:
2198 On success, a tuple containing the results of the performance test.
2199 Otherwise, a tuple with the error message.
2201 sync_client = None
2202 if depot == 'chromium' or depot == 'android-chrome':
2203 sync_client = 'gclient'
2204 elif depot == 'cros':
2205 sync_client = 'repo'
2207 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2209 if not revisions_to_sync:
2210 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2212 if not self.PerformPreSyncCleanup(revision, depot):
2213 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2215 success = True
2217 if not self.opts.debug_ignore_sync:
2218 for r in revisions_to_sync:
2219 self.ChangeToDepotWorkingDirectory(r[0])
2221 if sync_client:
2222 self.PerformPreBuildCleanup()
2224 # If you're using gclient to sync, you need to specify the depot you
2225 # want so that all the dependencies sync properly as well.
2226 # ie. gclient sync src@<SHA1>
2227 current_revision = r[1]
2228 if sync_client == 'gclient':
2229 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2230 current_revision)
2231 if not self.source_control.SyncToRevision(current_revision,
2232 sync_client):
2233 success = False
2235 break
2237 if success:
2238 success = self.RunPostSync(depot)
2239 if success:
2240 if skippable and self.ShouldSkipRevision(depot, revision):
2241 return ('Skipped revision: [%s]' % str(revision),
2242 BUILD_RESULT_SKIPPED)
2244 start_build_time = time.time()
2245 if self.BuildCurrentRevision(depot, revision):
2246 after_build_time = time.time()
2247 # Hack to support things that got changed.
2248 command_to_run = self.GetCompatibleCommand(
2249 command_to_run, revision, depot)
2250 results = self.RunPerformanceTestAndParseResults(command_to_run,
2251 metric)
2252 # Restore build output directory once the tests are done, to avoid
2253 # any descrepancy.
2254 if self.IsDownloadable(depot) and revision:
2255 self.BackupOrRestoreOutputdirectory(restore=True)
2257 if results[1] == 0:
2258 external_revisions = self._Get3rdPartyRevisions(depot)
2260 if not external_revisions is None:
2261 return (results[0], results[1], external_revisions,
2262 time.time() - after_build_time, after_build_time -
2263 start_build_time)
2264 else:
2265 return ('Failed to parse DEPS file for external revisions.',
2266 BUILD_RESULT_FAIL)
2267 else:
2268 return results
2269 else:
2270 return ('Failed to build revision: [%s]' % str(revision),
2271 BUILD_RESULT_FAIL)
2272 else:
2273 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2274 else:
2275 return ('Failed to sync revision: [%s]' % str(revision),
2276 BUILD_RESULT_FAIL)
2278 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2279 """Given known good and bad values, decide if the current_value passed
2280 or failed.
2282 Args:
2283 current_value: The value of the metric being checked.
2284 known_bad_value: The reference value for a "failed" run.
2285 known_good_value: The reference value for a "passed" run.
2287 Returns:
2288 True if the current_value is closer to the known_good_value than the
2289 known_bad_value.
2291 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2292 dist_to_good_value = abs(current_value['std_dev'] -
2293 known_good_value['std_dev'])
2294 dist_to_bad_value = abs(current_value['std_dev'] -
2295 known_bad_value['std_dev'])
2296 else:
2297 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2298 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2300 return dist_to_good_value < dist_to_bad_value
2302 def _GetDepotDirectory(self, depot_name):
2303 if depot_name == 'chromium':
2304 return self.src_cwd
2305 elif depot_name == 'cros':
2306 return self.cros_cwd
2307 elif depot_name in DEPOT_NAMES:
2308 return self.depot_cwd[depot_name]
2309 else:
2310 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
2311 'was added without proper support?' % depot_name)
2313 def ChangeToDepotWorkingDirectory(self, depot_name):
2314 """Given a depot, changes to the appropriate working directory.
2316 Args:
2317 depot_name: The name of the depot (see DEPOT_NAMES).
2319 os.chdir(self._GetDepotDirectory(depot_name))
2321 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2322 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2323 search_forward=True)
2324 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2325 search_forward=False)
2326 min_revision_data['external']['v8_bleeding_edge'] = r1
2327 max_revision_data['external']['v8_bleeding_edge'] = r2
2329 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2330 min_revision_data['revision'])
2331 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2332 max_revision_data['revision'])):
2333 self.warnings.append(
2334 'Trunk revisions in V8 did not map directly to bleeding_edge. '
2335 'Attempted to expand the range to find V8 rolls which did map '
2336 'directly to bleeding_edge revisions, but results might not be '
2337 'valid.')
2339 def _FindNextDepotToBisect(
2340 self, current_depot, min_revision_data, max_revision_data):
2341 """Decides which depot the script should dive into next (if any).
2343 Args:
2344 current_depot: Current depot being bisected.
2345 min_revision_data: Data about the earliest revision in the bisect range.
2346 max_revision_data: Data about the latest revision in the bisect range.
2348 Returns:
2349 Name of the depot to bisect next, or None.
2351 external_depot = None
2352 for next_depot in DEPOT_NAMES:
2353 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2354 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2355 continue
2357 if not (DEPOT_DEPS_NAME[next_depot]['recurse']
2358 and min_revision_data['depot']
2359 in DEPOT_DEPS_NAME[next_depot]['from']):
2360 continue
2362 if current_depot == 'v8':
2363 # We grab the bleeding_edge info here rather than earlier because we
2364 # finally have the revision range. From that we can search forwards and
2365 # backwards to try to match trunk revisions to bleeding_edge.
2366 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2368 if (min_revision_data['external'].get(next_depot) ==
2369 max_revision_data['external'].get(next_depot)):
2370 continue
2372 if (min_revision_data['external'].get(next_depot) and
2373 max_revision_data['external'].get(next_depot)):
2374 external_depot = next_depot
2375 break
2377 return external_depot
2379 def PrepareToBisectOnDepot(
2380 self, current_depot, end_revision, start_revision, previous_revision):
2381 """Changes to the appropriate directory and gathers a list of revisions
2382 to bisect between |start_revision| and |end_revision|.
2384 Args:
2385 current_depot: The depot we want to bisect.
2386 end_revision: End of the revision range.
2387 start_revision: Start of the revision range.
2388 previous_revision: The last revision we synced to on |previous_depot|.
2390 Returns:
2391 A list containing the revisions between |start_revision| and
2392 |end_revision| inclusive.
2394 # Change into working directory of external library to run
2395 # subsequent commands.
2396 self.ChangeToDepotWorkingDirectory(current_depot)
2398 # V8 (and possibly others) is merged in periodically. Bisecting
2399 # this directory directly won't give much good info.
2400 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2401 config_path = os.path.join(self.src_cwd, '..')
2402 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2403 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2404 return []
2405 if bisect_utils.RunGClient(
2406 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2407 return []
2409 if current_depot == 'v8_bleeding_edge':
2410 self.ChangeToDepotWorkingDirectory('chromium')
2412 shutil.move('v8', 'v8.bak')
2413 shutil.move('v8_bleeding_edge', 'v8')
2415 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2416 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2418 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2419 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2421 self.ChangeToDepotWorkingDirectory(current_depot)
2423 depot_revision_list = self.GetRevisionList(current_depot,
2424 end_revision,
2425 start_revision)
2427 self.ChangeToDepotWorkingDirectory('chromium')
2429 return depot_revision_list
2431 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2432 """Gathers reference values by running the performance tests on the
2433 known good and bad revisions.
2435 Args:
2436 good_rev: The last known good revision where the performance regression
2437 has not occurred yet.
2438 bad_rev: A revision where the performance regression has already occurred.
2439 cmd: The command to execute the performance test.
2440 metric: The metric being tested for regression.
2442 Returns:
2443 A tuple with the results of building and running each revision.
2445 bad_run_results = self.SyncBuildAndRunRevision(
2446 bad_rev, target_depot, cmd, metric)
2448 good_run_results = None
2450 if not bad_run_results[1]:
2451 good_run_results = self.SyncBuildAndRunRevision(
2452 good_rev, target_depot, cmd, metric)
2454 return (bad_run_results, good_run_results)
2456 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2457 if self.opts.output_buildbot_annotations:
2458 step_name = 'Bisection Range: [%s - %s]' % (
2459 revision_list[len(revision_list)-1], revision_list[0])
2460 bisect_utils.OutputAnnotationStepStart(step_name)
2462 print
2463 print 'Revisions to bisect on [%s]:' % depot
2464 for revision_id in revision_list:
2465 print ' -> %s' % (revision_id, )
2466 print
2468 if self.opts.output_buildbot_annotations:
2469 bisect_utils.OutputAnnotationStepClosed()
2471 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2472 """Checks to see if changes to DEPS file occurred, and that the revision
2473 range also includes the change to .DEPS.git. If it doesn't, attempts to
2474 expand the revision range to include it.
2476 Args:
2477 bad_rev: First known bad revision.
2478 good_revision: Last known good revision.
2480 Returns:
2481 A tuple with the new bad and good revisions.
2483 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2484 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2485 'DEPS', good_revision, bad_revision)
2487 if changes_to_deps:
2488 # DEPS file was changed, search from the oldest change to DEPS file to
2489 # bad_revision to see if there are matching .DEPS.git changes.
2490 oldest_deps_change = changes_to_deps[-1]
2491 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2492 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2494 if len(changes_to_deps) != len(changes_to_gitdeps):
2495 # Grab the timestamp of the last DEPS change
2496 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2497 output = bisect_utils.CheckRunGit(cmd)
2498 commit_time = int(output)
2500 # Try looking for a commit that touches the .DEPS.git file in the
2501 # next 15 minutes after the DEPS file change.
2502 cmd = ['log', '--format=%H', '-1',
2503 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2504 'origin/master', bisect_utils.FILE_DEPS_GIT]
2505 output = bisect_utils.CheckRunGit(cmd)
2506 output = output.strip()
2507 if output:
2508 self.warnings.append('Detected change to DEPS and modified '
2509 'revision range to include change to .DEPS.git')
2510 return (output, good_revision)
2511 else:
2512 self.warnings.append('Detected change to DEPS but couldn\'t find '
2513 'matching change to .DEPS.git')
2514 return (bad_revision, good_revision)
2516 def CheckIfRevisionsInProperOrder(
2517 self, target_depot, good_revision, bad_revision):
2518 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2520 Args:
2521 good_revision: Number/tag of the known good revision.
2522 bad_revision: Number/tag of the known bad revision.
2524 Returns:
2525 True if the revisions are in the proper order (good earlier than bad).
2527 if self.source_control.IsGit() and target_depot != 'cros':
2528 cmd = ['log', '--format=%ct', '-1', good_revision]
2529 cwd = self._GetDepotDirectory(target_depot)
2531 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2532 good_commit_time = int(output)
2534 cmd = ['log', '--format=%ct', '-1', bad_revision]
2535 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2536 bad_commit_time = int(output)
2538 return good_commit_time <= bad_commit_time
2539 else:
2540 # Cros/svn use integers
2541 return int(good_revision) <= int(bad_revision)
2543 def CanPerformBisect(self, revision_to_check):
2544 """Checks whether a given revision is bisectable.
2546 Note: At present it checks whether a given revision is bisectable on
2547 android bots(refer crbug.com/385324).
2549 Args:
2550 revision_to_check: Known good revision.
2552 Returns:
2553 A dictionary indicating the result. If revision is not bisectable,
2554 this will contain the field "error", otherwise None.
2556 if self.opts.target_platform == 'android':
2557 revision_to_check = self.source_control.SVNFindRev(revision_to_check)
2558 if (bisect_utils.IsStringInt(revision_to_check)
2559 and revision_to_check < 265549):
2560 return {'error': (
2561 'Bisect cannot conitnue for the given revision range.\n'
2562 'It is impossible to bisect Android regressions '
2563 'prior to r265549, which allows the bisect bot to '
2564 'rely on Telemetry to do apk installation of the most recently '
2565 'built local ChromeShell(refer to crbug.com/385324).\n'
2566 'Please try bisecting revisions greater than or equal to r265549.')}
2567 return None
2569 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2570 """Given known good and bad revisions, run a binary search on all
2571 intermediate revisions to determine the CL where the performance regression
2572 occurred.
2574 Args:
2575 command_to_run: Specify the command to execute the performance test.
2576 good_revision: Number/tag of the known good revision.
2577 bad_revision: Number/tag of the known bad revision.
2578 metric: The performance metric to monitor.
2580 Returns:
2581 A dict with 2 members, 'revision_data' and 'error'. On success,
2582 'revision_data' will contain a dict mapping revision ids to
2583 data about that revision. Each piece of revision data consists of a
2584 dict with the following keys:
2586 'passed': Represents whether the performance test was successful at
2587 that revision. Possible values include: 1 (passed), 0 (failed),
2588 '?' (skipped), 'F' (build failed).
2589 'depot': The depot that this revision is from (ie. WebKit)
2590 'external': If the revision is a 'src' revision, 'external' contains
2591 the revisions of each of the external libraries.
2592 'sort': A sort value for sorting the dict in order of commits.
2594 For example:
2596 'error':None,
2597 'revision_data':
2599 'CL #1':
2601 'passed':False,
2602 'depot':'chromium',
2603 'external':None,
2604 'sort':0
2609 If an error occurred, the 'error' field will contain the message and
2610 'revision_data' will be empty.
2612 results = {
2613 'revision_data' : {},
2614 'error' : None,
2617 # Choose depot to bisect first
2618 target_depot = 'chromium'
2619 if self.opts.target_platform == 'cros':
2620 target_depot = 'cros'
2621 elif self.opts.target_platform == 'android-chrome':
2622 target_depot = 'android-chrome'
2624 cwd = os.getcwd()
2625 self.ChangeToDepotWorkingDirectory(target_depot)
2627 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2628 bad_revision = self.source_control.ResolveToRevision(
2629 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100)
2630 good_revision = self.source_control.ResolveToRevision(
2631 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100)
2633 os.chdir(cwd)
2635 if bad_revision is None:
2636 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2637 return results
2639 if good_revision is None:
2640 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2641 return results
2643 # Check that they didn't accidentally swap good and bad revisions.
2644 if not self.CheckIfRevisionsInProperOrder(
2645 target_depot, good_revision, bad_revision):
2646 results['error'] = ('bad_revision < good_revision, did you swap these '
2647 'by mistake?')
2648 return results
2650 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange(
2651 bad_revision, good_revision)
2653 if self.opts.output_buildbot_annotations:
2654 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2656 cannot_bisect = self.CanPerformBisect(good_revision)
2657 if cannot_bisect:
2658 results['error'] = cannot_bisect.get('error')
2659 return results
2661 print 'Gathering revision range for bisection.'
2662 # Retrieve a list of revisions to do bisection on.
2663 src_revision_list = self.GetRevisionList(
2664 target_depot, bad_revision, good_revision)
2666 if self.opts.output_buildbot_annotations:
2667 bisect_utils.OutputAnnotationStepClosed()
2669 if src_revision_list:
2670 # revision_data will store information about a revision such as the
2671 # depot it came from, the webkit/V8 revision at that time,
2672 # performance timing, build state, etc...
2673 revision_data = results['revision_data']
2675 # revision_list is the list we're binary searching through at the moment.
2676 revision_list = []
2678 sort_key_ids = 0
2680 for current_revision_id in src_revision_list:
2681 sort_key_ids += 1
2683 revision_data[current_revision_id] = {
2684 'value' : None,
2685 'passed' : '?',
2686 'depot' : target_depot,
2687 'external' : None,
2688 'perf_time' : 0,
2689 'build_time' : 0,
2690 'sort' : sort_key_ids,
2692 revision_list.append(current_revision_id)
2694 min_revision = 0
2695 max_revision = len(revision_list) - 1
2697 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2699 if self.opts.output_buildbot_annotations:
2700 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2702 print 'Gathering reference values for bisection.'
2704 # Perform the performance tests on the good and bad revisions, to get
2705 # reference values.
2706 bad_results, good_results = self.GatherReferenceValues(good_revision,
2707 bad_revision,
2708 command_to_run,
2709 metric,
2710 target_depot)
2712 if self.opts.output_buildbot_annotations:
2713 bisect_utils.OutputAnnotationStepClosed()
2715 if bad_results[1]:
2716 results['error'] = ('An error occurred while building and running '
2717 'the \'bad\' reference value. The bisect cannot continue without '
2718 'a working \'bad\' revision to start from.\n\nError: %s' %
2719 bad_results[0])
2720 return results
2722 if good_results[1]:
2723 results['error'] = ('An error occurred while building and running '
2724 'the \'good\' reference value. The bisect cannot continue without '
2725 'a working \'good\' revision to start from.\n\nError: %s' %
2726 good_results[0])
2727 return results
2730 # We need these reference values to determine if later runs should be
2731 # classified as pass or fail.
2732 known_bad_value = bad_results[0]
2733 known_good_value = good_results[0]
2735 # Can just mark the good and bad revisions explicitly here since we
2736 # already know the results.
2737 bad_revision_data = revision_data[revision_list[0]]
2738 bad_revision_data['external'] = bad_results[2]
2739 bad_revision_data['perf_time'] = bad_results[3]
2740 bad_revision_data['build_time'] = bad_results[4]
2741 bad_revision_data['passed'] = False
2742 bad_revision_data['value'] = known_bad_value
2744 good_revision_data = revision_data[revision_list[max_revision]]
2745 good_revision_data['external'] = good_results[2]
2746 good_revision_data['perf_time'] = good_results[3]
2747 good_revision_data['build_time'] = good_results[4]
2748 good_revision_data['passed'] = True
2749 good_revision_data['value'] = known_good_value
2751 next_revision_depot = target_depot
2753 while True:
2754 if not revision_list:
2755 break
2757 min_revision_data = revision_data[revision_list[min_revision]]
2758 max_revision_data = revision_data[revision_list[max_revision]]
2760 if max_revision - min_revision <= 1:
2761 current_depot = min_revision_data['depot']
2762 if min_revision_data['passed'] == '?':
2763 next_revision_index = min_revision
2764 elif max_revision_data['passed'] == '?':
2765 next_revision_index = max_revision
2766 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2767 previous_revision = revision_list[min_revision]
2768 # If there were changes to any of the external libraries we track,
2769 # should bisect the changes there as well.
2770 external_depot = self._FindNextDepotToBisect(
2771 current_depot, min_revision_data, max_revision_data)
2773 # If there was no change in any of the external depots, the search
2774 # is over.
2775 if not external_depot:
2776 if current_depot == 'v8':
2777 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2778 'continue any further. The script can only bisect into '
2779 'V8\'s bleeding_edge repository if both the current and '
2780 'previous revisions in trunk map directly to revisions in '
2781 'bleeding_edge.')
2782 break
2784 earliest_revision = max_revision_data['external'][external_depot]
2785 latest_revision = min_revision_data['external'][external_depot]
2787 new_revision_list = self.PrepareToBisectOnDepot(
2788 external_depot, latest_revision, earliest_revision,
2789 previous_revision)
2791 if not new_revision_list:
2792 results['error'] = ('An error occurred attempting to retrieve '
2793 'revision range: [%s..%s]' %
2794 (earliest_revision, latest_revision))
2795 return results
2797 _AddRevisionsIntoRevisionData(
2798 new_revision_list, external_depot, min_revision_data['sort'],
2799 revision_data)
2801 # Reset the bisection and perform it on the newly inserted
2802 # changelists.
2803 revision_list = new_revision_list
2804 min_revision = 0
2805 max_revision = len(revision_list) - 1
2806 sort_key_ids += len(revision_list)
2808 print ('Regression in metric %s appears to be the result of '
2809 'changes in [%s].' % (metric, external_depot))
2811 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2813 continue
2814 else:
2815 break
2816 else:
2817 next_revision_index = (int((max_revision - min_revision) / 2) +
2818 min_revision)
2820 next_revision_id = revision_list[next_revision_index]
2821 next_revision_data = revision_data[next_revision_id]
2822 next_revision_depot = next_revision_data['depot']
2824 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2826 if self.opts.output_buildbot_annotations:
2827 step_name = 'Working on [%s]' % next_revision_id
2828 bisect_utils.OutputAnnotationStepStart(step_name)
2830 print 'Working on revision: [%s]' % next_revision_id
2832 run_results = self.SyncBuildAndRunRevision(next_revision_id,
2833 next_revision_depot,
2834 command_to_run,
2835 metric, skippable=True)
2837 # If the build is successful, check whether or not the metric
2838 # had regressed.
2839 if not run_results[1]:
2840 if len(run_results) > 2:
2841 next_revision_data['external'] = run_results[2]
2842 next_revision_data['perf_time'] = run_results[3]
2843 next_revision_data['build_time'] = run_results[4]
2845 passed_regression = self._CheckIfRunPassed(run_results[0],
2846 known_good_value,
2847 known_bad_value)
2849 next_revision_data['passed'] = passed_regression
2850 next_revision_data['value'] = run_results[0]
2852 if passed_regression:
2853 max_revision = next_revision_index
2854 else:
2855 min_revision = next_revision_index
2856 else:
2857 if run_results[1] == BUILD_RESULT_SKIPPED:
2858 next_revision_data['passed'] = 'Skipped'
2859 elif run_results[1] == BUILD_RESULT_FAIL:
2860 next_revision_data['passed'] = 'Build Failed'
2862 print run_results[0]
2864 # If the build is broken, remove it and redo search.
2865 revision_list.pop(next_revision_index)
2867 max_revision -= 1
2869 if self.opts.output_buildbot_annotations:
2870 self._PrintPartialResults(results)
2871 bisect_utils.OutputAnnotationStepClosed()
2872 else:
2873 # Weren't able to sync and retrieve the revision range.
2874 results['error'] = ('An error occurred attempting to retrieve revision '
2875 'range: [%s..%s]' % (good_revision, bad_revision))
2877 return results
2879 def _PrintPartialResults(self, results_dict):
2880 revision_data = results_dict['revision_data']
2881 revision_data_sorted = sorted(revision_data.iteritems(),
2882 key = lambda x: x[1]['sort'])
2883 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2885 self._PrintTestedCommitsTable(revision_data_sorted,
2886 results_dict['first_working_revision'],
2887 results_dict['last_broken_revision'],
2888 100, final_step=False)
2890 def _ConfidenceLevelStatus(self, results_dict):
2891 if not results_dict['confidence']:
2892 return None
2893 confidence_status = 'Successful with %(level)s confidence%(warning)s.'
2894 if results_dict['confidence'] >= 95:
2895 level = 'high'
2896 else:
2897 level = 'low'
2898 warning = ' and warnings'
2899 if not self.warnings:
2900 warning = ''
2901 return confidence_status % {'level': level, 'warning': warning}
2903 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2904 info = self.source_control.QueryRevisionInfo(cl,
2905 self._GetDepotDirectory(depot))
2906 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2907 try:
2908 # Format is "git-svn-id: svn://....@123456 <other data>"
2909 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2910 svn_revision = svn_line[0].split('@')
2911 svn_revision = svn_revision[1].split(' ')[0]
2912 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2913 except IndexError:
2914 return ''
2915 return ''
2917 def _PrintRevisionInfo(self, cl, info, depot=None):
2918 email_info = ''
2919 if not info['email'].startswith(info['author']):
2920 email_info = '\nEmail : %s' % info['email']
2921 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2922 if commit_link:
2923 commit_info = '\nLink : %s' % commit_link
2924 else:
2925 commit_info = ('\nFailed to parse svn revision from body:\n%s' %
2926 info['body'])
2927 print RESULTS_REVISION_INFO % {
2928 'subject': info['subject'],
2929 'author': info['author'],
2930 'email_info': email_info,
2931 'commit_info': commit_info,
2932 'cl': cl,
2933 'cl_date': info['date']
2936 def _PrintTestedCommitsHeader(self):
2937 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2938 _PrintTableRow(
2939 [20, 70, 14, 12, 13],
2940 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
2941 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2942 _PrintTableRow(
2943 [20, 70, 14, 12, 13],
2944 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
2945 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2946 _PrintTableRow(
2947 [20, 70, 14, 13],
2948 ['Depot', 'Commit SHA', 'Return Code', 'State'])
2949 else:
2950 assert False, 'Invalid bisect_mode specified.'
2952 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
2953 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2954 std_error = '+-%.02f' % current_data['value']['std_err']
2955 mean = '%.02f' % current_data['value']['mean']
2956 _PrintTableRow(
2957 [20, 70, 12, 14, 13],
2958 [current_data['depot'], cl_link, mean, std_error, state_str])
2959 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2960 std_error = '+-%.02f' % current_data['value']['std_err']
2961 mean = '%.02f' % current_data['value']['mean']
2962 _PrintTableRow(
2963 [20, 70, 12, 14, 13],
2964 [current_data['depot'], cl_link, std_error, mean, state_str])
2965 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2966 mean = '%d' % current_data['value']['mean']
2967 _PrintTableRow(
2968 [20, 70, 14, 13],
2969 [current_data['depot'], cl_link, mean, state_str])
2971 def _PrintTestedCommitsTable(
2972 self, revision_data_sorted, first_working_revision, last_broken_revision,
2973 confidence, final_step=True):
2974 print
2975 if final_step:
2976 print '===== TESTED COMMITS ====='
2977 else:
2978 print '===== PARTIAL RESULTS ====='
2979 self._PrintTestedCommitsHeader()
2980 state = 0
2981 for current_id, current_data in revision_data_sorted:
2982 if current_data['value']:
2983 if (current_id == last_broken_revision or
2984 current_id == first_working_revision):
2985 # If confidence is too low, don't add this empty line since it's
2986 # used to put focus on a suspected CL.
2987 if confidence and final_step:
2988 print
2989 state += 1
2990 if state == 2 and not final_step:
2991 # Just want a separation between "bad" and "good" cl's.
2992 print
2994 state_str = 'Bad'
2995 if state == 1 and final_step:
2996 state_str = 'Suspected CL'
2997 elif state == 2:
2998 state_str = 'Good'
3000 # If confidence is too low, don't bother outputting good/bad.
3001 if not confidence:
3002 state_str = ''
3003 state_str = state_str.center(13, ' ')
3005 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3006 current_data['depot'])
3007 if not cl_link:
3008 cl_link = current_id
3009 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3011 def _PrintReproSteps(self):
3012 """Prints out a section of the results explaining how to run the test.
3014 This message includes the command used to run the test.
3016 command = '$ ' + self.opts.command
3017 if bisect_utils.IsTelemetryCommand(self.opts.command):
3018 command += ('\nAlso consider passing --profiler=list to see available '
3019 'profilers.')
3020 print REPRO_STEPS_LOCAL % {'command': command}
3021 print REPRO_STEPS_TRYJOB % {'command': command}
3023 def _PrintOtherRegressions(self, other_regressions, revision_data):
3024 """Prints a section of the results about other potential regressions."""
3025 print
3026 print 'Other regressions may have occurred:'
3027 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3028 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3029 for regression in other_regressions:
3030 current_id, previous_id, confidence = regression
3031 current_data = revision_data[current_id]
3032 previous_data = revision_data[previous_id]
3034 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3035 current_data['depot'])
3036 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3037 previous_data['depot'])
3039 # If we can't map it to a viewable URL, at least show the original hash.
3040 if not current_link:
3041 current_link = current_id
3042 if not previous_link:
3043 previous_link = previous_id
3045 print ' %8s %70s %s' % (
3046 current_data['depot'], current_link,
3047 ('%d%%' % confidence).center(10, ' '))
3048 print ' %8s %70s' % (
3049 previous_data['depot'], previous_link)
3050 print
3052 def _GetResultsDict(self, revision_data, revision_data_sorted):
3053 # Find range where it possibly broke.
3054 first_working_revision = None
3055 first_working_revision_index = -1
3056 last_broken_revision = None
3057 last_broken_revision_index = -1
3059 culprit_revisions = []
3060 other_regressions = []
3061 regression_size = 0.0
3062 regression_std_err = 0.0
3063 confidence = 0.0
3065 for i in xrange(len(revision_data_sorted)):
3066 k, v = revision_data_sorted[i]
3067 if v['passed'] == 1:
3068 if not first_working_revision:
3069 first_working_revision = k
3070 first_working_revision_index = i
3072 if not v['passed']:
3073 last_broken_revision = k
3074 last_broken_revision_index = i
3076 if last_broken_revision != None and first_working_revision != None:
3077 broken_means = []
3078 for i in xrange(0, last_broken_revision_index + 1):
3079 if revision_data_sorted[i][1]['value']:
3080 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3082 working_means = []
3083 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3084 if revision_data_sorted[i][1]['value']:
3085 working_means.append(revision_data_sorted[i][1]['value']['values'])
3087 # Flatten the lists to calculate mean of all values.
3088 working_mean = sum(working_means, [])
3089 broken_mean = sum(broken_means, [])
3091 # Calculate the approximate size of the regression
3092 mean_of_bad_runs = math_utils.Mean(broken_mean)
3093 mean_of_good_runs = math_utils.Mean(working_mean)
3095 regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs,
3096 mean_of_bad_runs)
3097 if math.isnan(regression_size):
3098 regression_size = 'zero-to-nonzero'
3100 regression_std_err = math.fabs(math_utils.PooledStandardError(
3101 [working_mean, broken_mean]) /
3102 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3104 # Give a "confidence" in the bisect. At the moment we use how distinct the
3105 # values are before and after the last broken revision, and how noisy the
3106 # overall graph is.
3107 confidence = ConfidenceScore(working_means, broken_means)
3109 culprit_revisions = []
3111 cwd = os.getcwd()
3112 self.ChangeToDepotWorkingDirectory(
3113 revision_data[last_broken_revision]['depot'])
3115 if revision_data[last_broken_revision]['depot'] == 'cros':
3116 # Want to get a list of all the commits and what depots they belong
3117 # to so that we can grab info about each.
3118 cmd = ['repo', 'forall', '-c',
3119 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3120 last_broken_revision, first_working_revision + 1)]
3121 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
3123 changes = []
3124 assert not return_code, ('An error occurred while running '
3125 '"%s"' % ' '.join(cmd))
3126 last_depot = None
3127 cwd = os.getcwd()
3128 for l in output.split('\n'):
3129 if l:
3130 # Output will be in form:
3131 # /path_to_depot
3132 # /path_to_other_depot
3133 # <SHA1>
3134 # /path_again
3135 # <SHA1>
3136 # etc.
3137 if l[0] == '/':
3138 last_depot = l
3139 else:
3140 contents = l.split(' ')
3141 if len(contents) > 1:
3142 changes.append([last_depot, contents[0]])
3143 for c in changes:
3144 os.chdir(c[0])
3145 info = self.source_control.QueryRevisionInfo(c[1])
3146 culprit_revisions.append((c[1], info, None))
3147 else:
3148 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3149 k, v = revision_data_sorted[i]
3150 if k == first_working_revision:
3151 break
3152 self.ChangeToDepotWorkingDirectory(v['depot'])
3153 info = self.source_control.QueryRevisionInfo(k)
3154 culprit_revisions.append((k, info, v['depot']))
3155 os.chdir(cwd)
3157 # Check for any other possible regression ranges.
3158 other_regressions = _FindOtherRegressions(
3159 revision_data_sorted, mean_of_bad_runs > mean_of_good_runs)
3161 return {
3162 'first_working_revision': first_working_revision,
3163 'last_broken_revision': last_broken_revision,
3164 'culprit_revisions': culprit_revisions,
3165 'other_regressions': other_regressions,
3166 'regression_size': regression_size,
3167 'regression_std_err': regression_std_err,
3168 'confidence': confidence,
3171 def _CheckForWarnings(self, results_dict):
3172 if len(results_dict['culprit_revisions']) > 1:
3173 self.warnings.append('Due to build errors, regression range could '
3174 'not be narrowed down to a single commit.')
3175 if self.opts.repeat_test_count == 1:
3176 self.warnings.append('Tests were only set to run once. This may '
3177 'be insufficient to get meaningful results.')
3178 if results_dict['confidence'] < 100:
3179 if results_dict['confidence']:
3180 self.warnings.append(
3181 'Confidence is less than 100%. There could be other candidates '
3182 'for this regression. Try bisecting again with increased '
3183 'repeat_count or on a sub-metric that shows the regression more '
3184 'clearly.')
3185 else:
3186 self.warnings.append(
3187 'Confidence is 0%. Try bisecting again on another platform, with '
3188 'increased repeat_count or on a sub-metric that shows the '
3189 'regression more clearly.')
3191 def FormatAndPrintResults(self, bisect_results):
3192 """Prints the results from a bisection run in a readable format.
3194 Args:
3195 bisect_results: The results from a bisection test run.
3197 revision_data = bisect_results['revision_data']
3198 revision_data_sorted = sorted(revision_data.iteritems(),
3199 key = lambda x: x[1]['sort'])
3200 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3202 self._CheckForWarnings(results_dict)
3204 if self.opts.output_buildbot_annotations:
3205 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3207 print
3208 print 'Full results of bisection:'
3209 for current_id, current_data in revision_data_sorted:
3210 build_status = current_data['passed']
3212 if type(build_status) is bool:
3213 if build_status:
3214 build_status = 'Good'
3215 else:
3216 build_status = 'Bad'
3218 print ' %20s %40s %s' % (current_data['depot'],
3219 current_id, build_status)
3220 print
3222 if self.opts.output_buildbot_annotations:
3223 bisect_utils.OutputAnnotationStepClosed()
3224 # The perf dashboard scrapes the "results" step in order to comment on
3225 # bugs. If you change this, please update the perf dashboard as well.
3226 bisect_utils.OutputAnnotationStepStart('Results')
3228 self._PrintBanner(results_dict)
3229 self._PrintWarnings()
3231 if results_dict['culprit_revisions'] and results_dict['confidence']:
3232 for culprit in results_dict['culprit_revisions']:
3233 cl, info, depot = culprit
3234 self._PrintRevisionInfo(cl, info, depot)
3235 if results_dict['other_regressions']:
3236 self._PrintOtherRegressions(results_dict['other_regressions'],
3237 revision_data)
3238 self._PrintTestedCommitsTable(revision_data_sorted,
3239 results_dict['first_working_revision'],
3240 results_dict['last_broken_revision'],
3241 results_dict['confidence'])
3242 _PrintStepTime(revision_data_sorted)
3243 self._PrintReproSteps()
3244 _PrintThankYou()
3245 if self.opts.output_buildbot_annotations:
3246 bisect_utils.OutputAnnotationStepClosed()
3248 def _PrintBanner(self, results_dict):
3249 if self._IsBisectModeReturnCode():
3250 metrics = 'N/A'
3251 change = 'Yes'
3252 else:
3253 metrics = '/'.join(self.opts.metric)
3254 change = '%.02f%% (+/-%.02f%%)' % (
3255 results_dict['regression_size'], results_dict['regression_std_err'])
3257 if results_dict['culprit_revisions'] and results_dict['confidence']:
3258 status = self._ConfidenceLevelStatus(results_dict)
3259 else:
3260 status = 'Failure, could not reproduce.'
3261 change = 'Bisect could not reproduce a change.'
3263 print RESULTS_BANNER % {
3264 'status': status,
3265 'command': self.opts.command,
3266 'metrics': metrics,
3267 'change': change,
3268 'confidence': results_dict['confidence'],
3271 def _PrintWarnings(self):
3272 """Prints a list of warning strings if there are any."""
3273 if not self.warnings:
3274 return
3275 print
3276 print 'WARNINGS:'
3277 for w in set(self.warnings):
3278 print ' ! %s' % w
3281 def _IsPlatformSupported():
3282 """Checks that this platform and build system are supported.
3284 Args:
3285 opts: The options parsed from the command line.
3287 Returns:
3288 True if the platform and build system are supported.
3290 # Haven't tested the script out on any other platforms yet.
3291 supported = ['posix', 'nt']
3292 return os.name in supported
3295 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3296 """Removes the directory tree specified, and then creates an empty
3297 directory in the same location (if not specified to skip).
3299 Args:
3300 path_to_dir: Path to the directory tree.
3301 skip_makedir: Whether to skip creating empty directory, default is False.
3303 Returns:
3304 True if successful, False if an error occurred.
3306 try:
3307 if os.path.exists(path_to_dir):
3308 shutil.rmtree(path_to_dir)
3309 except OSError, e:
3310 if e.errno != errno.ENOENT:
3311 return False
3313 if not skip_makedir:
3314 return MaybeMakeDirectory(path_to_dir)
3316 return True
3319 def RemoveBuildFiles(build_type):
3320 """Removes build files from previous runs."""
3321 if RmTreeAndMkDir(os.path.join('out', build_type)):
3322 if RmTreeAndMkDir(os.path.join('build', build_type)):
3323 return True
3324 return False
3327 class BisectOptions(object):
3328 """Options to be used when running bisection."""
3329 def __init__(self):
3330 super(BisectOptions, self).__init__()
3332 self.target_platform = 'chromium'
3333 self.build_preference = None
3334 self.good_revision = None
3335 self.bad_revision = None
3336 self.use_goma = None
3337 self.goma_dir = None
3338 self.cros_board = None
3339 self.cros_remote_ip = None
3340 self.repeat_test_count = 20
3341 self.truncate_percent = 25
3342 self.max_time_minutes = 20
3343 self.metric = None
3344 self.command = None
3345 self.output_buildbot_annotations = None
3346 self.no_custom_deps = False
3347 self.working_directory = None
3348 self.extra_src = None
3349 self.debug_ignore_build = None
3350 self.debug_ignore_sync = None
3351 self.debug_ignore_perf_test = None
3352 self.gs_bucket = None
3353 self.target_arch = 'ia32'
3354 self.target_build_type = 'Release'
3355 self.builder_host = None
3356 self.builder_port = None
3357 self.bisect_mode = BISECT_MODE_MEAN
3359 @staticmethod
3360 def _CreateCommandLineParser():
3361 """Creates a parser with bisect options.
3363 Returns:
3364 An instance of optparse.OptionParser.
3366 usage = ('%prog [options] [-- chromium-options]\n'
3367 'Perform binary search on revision history to find a minimal '
3368 'range of revisions where a peformance metric regressed.\n')
3370 parser = optparse.OptionParser(usage=usage)
3372 group = optparse.OptionGroup(parser, 'Bisect options')
3373 group.add_option('-c', '--command',
3374 type='str',
3375 help='A command to execute your performance test at' +
3376 ' each point in the bisection.')
3377 group.add_option('-b', '--bad_revision',
3378 type='str',
3379 help='A bad revision to start bisection. ' +
3380 'Must be later than good revision. May be either a git' +
3381 ' or svn revision.')
3382 group.add_option('-g', '--good_revision',
3383 type='str',
3384 help='A revision to start bisection where performance' +
3385 ' test is known to pass. Must be earlier than the ' +
3386 'bad revision. May be either a git or svn revision.')
3387 group.add_option('-m', '--metric',
3388 type='str',
3389 help='The desired metric to bisect on. For example ' +
3390 '"vm_rss_final_b/vm_rss_f_b"')
3391 group.add_option('-r', '--repeat_test_count',
3392 type='int',
3393 default=20,
3394 help='The number of times to repeat the performance '
3395 'test. Values will be clamped to range [1, 100]. '
3396 'Default value is 20.')
3397 group.add_option('--max_time_minutes',
3398 type='int',
3399 default=20,
3400 help='The maximum time (in minutes) to take running the '
3401 'performance tests. The script will run the performance '
3402 'tests according to --repeat_test_count, so long as it '
3403 'doesn\'t exceed --max_time_minutes. Values will be '
3404 'clamped to range [1, 60].'
3405 'Default value is 20.')
3406 group.add_option('-t', '--truncate_percent',
3407 type='int',
3408 default=25,
3409 help='The highest/lowest % are discarded to form a '
3410 'truncated mean. Values will be clamped to range [0, '
3411 '25]. Default value is 25 (highest/lowest 25% will be '
3412 'discarded).')
3413 group.add_option('--bisect_mode',
3414 type='choice',
3415 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3416 BISECT_MODE_RETURN_CODE],
3417 default=BISECT_MODE_MEAN,
3418 help='The bisect mode. Choices are to bisect on the '
3419 'difference in mean, std_dev, or return_code.')
3420 parser.add_option_group(group)
3422 group = optparse.OptionGroup(parser, 'Build options')
3423 group.add_option('-w', '--working_directory',
3424 type='str',
3425 help='Path to the working directory where the script '
3426 'will do an initial checkout of the chromium depot. The '
3427 'files will be placed in a subdirectory "bisect" under '
3428 'working_directory and that will be used to perform the '
3429 'bisection. This parameter is optional, if it is not '
3430 'supplied, the script will work from the current depot.')
3431 group.add_option('--build_preference',
3432 type='choice',
3433 choices=['msvs', 'ninja', 'make'],
3434 help='The preferred build system to use. On linux/mac '
3435 'the options are make/ninja. On Windows, the options '
3436 'are msvs/ninja.')
3437 group.add_option('--target_platform',
3438 type='choice',
3439 choices=['chromium', 'cros', 'android', 'android-chrome'],
3440 default='chromium',
3441 help='The target platform. Choices are "chromium" '
3442 '(current platform), "cros", or "android". If you '
3443 'specify something other than "chromium", you must be '
3444 'properly set up to build that platform.')
3445 group.add_option('--no_custom_deps',
3446 dest='no_custom_deps',
3447 action='store_true',
3448 default=False,
3449 help='Run the script with custom_deps or not.')
3450 group.add_option('--extra_src',
3451 type='str',
3452 help='Path to a script which can be used to modify '
3453 'the bisect script\'s behavior.')
3454 group.add_option('--cros_board',
3455 type='str',
3456 help='The cros board type to build.')
3457 group.add_option('--cros_remote_ip',
3458 type='str',
3459 help='The remote machine to image to.')
3460 group.add_option('--use_goma',
3461 action='store_true',
3462 help='Add a bunch of extra threads for goma, and enable '
3463 'goma')
3464 group.add_option('--goma_dir',
3465 help='Path to goma tools (or system default if not '
3466 'specified).')
3467 group.add_option('--output_buildbot_annotations',
3468 action='store_true',
3469 help='Add extra annotation output for buildbot.')
3470 group.add_option('--gs_bucket',
3471 default='',
3472 dest='gs_bucket',
3473 type='str',
3474 help=('Name of Google Storage bucket to upload or '
3475 'download build. e.g., chrome-perf'))
3476 group.add_option('--target_arch',
3477 type='choice',
3478 choices=['ia32', 'x64', 'arm'],
3479 default='ia32',
3480 dest='target_arch',
3481 help=('The target build architecture. Choices are "ia32" '
3482 '(default), "x64" or "arm".'))
3483 group.add_option('--target_build_type',
3484 type='choice',
3485 choices=['Release', 'Debug'],
3486 default='Release',
3487 help='The target build type. Choices are "Release" '
3488 '(default), or "Debug".')
3489 group.add_option('--builder_host',
3490 dest='builder_host',
3491 type='str',
3492 help=('Host address of server to produce build by posting'
3493 ' try job request.'))
3494 group.add_option('--builder_port',
3495 dest='builder_port',
3496 type='int',
3497 help=('HTTP port of the server to produce build by posting'
3498 ' try job request.'))
3499 parser.add_option_group(group)
3501 group = optparse.OptionGroup(parser, 'Debug options')
3502 group.add_option('--debug_ignore_build',
3503 action='store_true',
3504 help='DEBUG: Don\'t perform builds.')
3505 group.add_option('--debug_ignore_sync',
3506 action='store_true',
3507 help='DEBUG: Don\'t perform syncs.')
3508 group.add_option('--debug_ignore_perf_test',
3509 action='store_true',
3510 help='DEBUG: Don\'t perform performance tests.')
3511 parser.add_option_group(group)
3512 return parser
3514 def ParseCommandLine(self):
3515 """Parses the command line for bisect options."""
3516 parser = self._CreateCommandLineParser()
3517 opts, _ = parser.parse_args()
3519 try:
3520 if not opts.command:
3521 raise RuntimeError('missing required parameter: --command')
3523 if not opts.good_revision:
3524 raise RuntimeError('missing required parameter: --good_revision')
3526 if not opts.bad_revision:
3527 raise RuntimeError('missing required parameter: --bad_revision')
3529 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3530 raise RuntimeError('missing required parameter: --metric')
3532 if opts.gs_bucket:
3533 if not cloud_storage.List(opts.gs_bucket):
3534 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3535 if not opts.builder_host:
3536 raise RuntimeError('Must specify try server hostname, when '
3537 'gs_bucket is used: --builder_host')
3538 if not opts.builder_port:
3539 raise RuntimeError('Must specify try server port number, when '
3540 'gs_bucket is used: --builder_port')
3541 if opts.target_platform == 'cros':
3542 # Run sudo up front to make sure credentials are cached for later.
3543 print 'Sudo is required to build cros:'
3544 print
3545 bisect_utils.RunProcess(['sudo', 'true'])
3547 if not opts.cros_board:
3548 raise RuntimeError('missing required parameter: --cros_board')
3550 if not opts.cros_remote_ip:
3551 raise RuntimeError('missing required parameter: --cros_remote_ip')
3553 if not opts.working_directory:
3554 raise RuntimeError('missing required parameter: --working_directory')
3556 metric_values = opts.metric.split('/')
3557 if (len(metric_values) != 2 and
3558 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3559 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3561 opts.metric = metric_values
3562 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3563 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3564 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3565 opts.truncate_percent = opts.truncate_percent / 100.0
3567 for k, v in opts.__dict__.iteritems():
3568 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k
3569 setattr(self, k, v)
3570 except RuntimeError, e:
3571 output_string = StringIO.StringIO()
3572 parser.print_help(file=output_string)
3573 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3574 output_string.close()
3575 raise RuntimeError(error_message)
3577 @staticmethod
3578 def FromDict(values):
3579 """Creates an instance of BisectOptions with the values parsed from a
3580 .cfg file.
3582 Args:
3583 values: a dict containing options to set.
3585 Returns:
3586 An instance of BisectOptions.
3588 opts = BisectOptions()
3589 for k, v in values.iteritems():
3590 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
3591 setattr(opts, k, v)
3593 metric_values = opts.metric.split('/')
3594 if len(metric_values) != 2:
3595 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3597 opts.metric = metric_values
3598 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3599 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3600 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3601 opts.truncate_percent = opts.truncate_percent / 100.0
3603 return opts
3606 def main():
3608 try:
3609 opts = BisectOptions()
3610 opts.ParseCommandLine()
3612 if opts.extra_src:
3613 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3614 if not extra_src:
3615 raise RuntimeError('Invalid or missing --extra_src.')
3616 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3618 if opts.working_directory:
3619 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3620 if opts.no_custom_deps:
3621 custom_deps = None
3622 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3624 os.chdir(os.path.join(os.getcwd(), 'src'))
3626 if not RemoveBuildFiles(opts.target_build_type):
3627 raise RuntimeError('Something went wrong removing the build files.')
3629 if not _IsPlatformSupported():
3630 raise RuntimeError('Sorry, this platform isn\'t supported yet.')
3632 # Check what source control method is being used, and create a
3633 # SourceControl object if possible.
3634 source_control = source_control_module.DetermineAndCreateSourceControl(opts)
3636 if not source_control:
3637 raise RuntimeError(
3638 'Sorry, only the git workflow is supported at the moment.')
3640 # gClient sync seems to fail if you're not in master branch.
3641 if (not source_control.IsInProperBranch() and
3642 not opts.debug_ignore_sync and
3643 not opts.working_directory):
3644 raise RuntimeError('You must switch to master branch to run bisection.')
3645 bisect_test = BisectPerformanceMetrics(source_control, opts)
3646 try:
3647 bisect_results = bisect_test.Run(opts.command,
3648 opts.bad_revision,
3649 opts.good_revision,
3650 opts.metric)
3651 if bisect_results['error']:
3652 raise RuntimeError(bisect_results['error'])
3653 bisect_test.FormatAndPrintResults(bisect_results)
3654 return 0
3655 finally:
3656 bisect_test.PerformCleanup()
3657 except RuntimeError, e:
3658 if opts.output_buildbot_annotations:
3659 # The perf dashboard scrapes the "results" step in order to comment on
3660 # bugs. If you change this, please update the perf dashboard as well.
3661 bisect_utils.OutputAnnotationStepStart('Results')
3662 print 'Error: %s' % e.message
3663 if opts.output_buildbot_annotations:
3664 bisect_utils.OutputAnnotationStepClosed()
3665 return 1
3668 if __name__ == '__main__':
3669 sys.exit(main())