[GCM] Persistence of account mappings.
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blobb1aa1309c4033696f6608bd1b1f5ee10227d1c1c
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
35 """
37 import copy
38 import datetime
39 import errno
40 import hashlib
41 import math
42 import optparse
43 import os
44 import re
45 import shlex
46 import shutil
47 import StringIO
48 import sys
49 import time
50 import zipfile
52 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
54 from auto_bisect import bisect_utils
55 from auto_bisect import math_utils
56 from auto_bisect import post_perf_builder_job as bisect_builder
57 from auto_bisect import source_control as source_control_module
58 from auto_bisect import ttest
59 from telemetry.util import cloud_storage
61 # Below is the map of "depot" names to information about each depot. Each depot
62 # is a repository, and in the process of bisecting, revision ranges in these
63 # repositories may also be bisected.
65 # Each depot information dictionary may contain:
66 # src: Path to the working directory.
67 # recurse: True if this repository will get bisected.
68 # depends: A list of other repositories that are actually part of the same
69 # repository in svn. If the repository has any dependent repositories
70 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then
71 # they are specified here.
72 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to
73 # SVN revisions.
74 # from: Parent depot that must be bisected before this is bisected.
75 # deps_var: Key name in vars variable in DEPS file that has revision
76 # information.
77 DEPOT_DEPS_NAME = {
78 'chromium': {
79 'src': 'src',
80 'recurse': True,
81 'depends': None,
82 'from': ['cros', 'android-chrome'],
83 'viewvc':
84 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
85 'deps_var': 'chromium_rev'
87 'webkit': {
88 'src': 'src/third_party/WebKit',
89 'recurse': True,
90 'depends': None,
91 'from': ['chromium'],
92 'viewvc':
93 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
94 'deps_var': 'webkit_revision'
96 'angle': {
97 'src': 'src/third_party/angle',
98 'src_old': 'src/third_party/angle_dx11',
99 'recurse': True,
100 'depends': None,
101 'from': ['chromium'],
102 'platform': 'nt',
103 'deps_var': 'angle_revision'
105 'v8': {
106 'src': 'src/v8',
107 'recurse': True,
108 'depends': None,
109 'from': ['chromium'],
110 'custom_deps': bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
111 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
112 'deps_var': 'v8_revision'
114 'v8_bleeding_edge': {
115 'src': 'src/v8_bleeding_edge',
116 'recurse': True,
117 'depends': None,
118 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
119 'from': ['v8'],
120 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
121 'deps_var': 'v8_revision'
123 'skia/src': {
124 'src': 'src/third_party/skia/src',
125 'recurse': True,
126 'svn': 'http://skia.googlecode.com/svn/trunk/src',
127 'depends': ['skia/include', 'skia/gyp'],
128 'from': ['chromium'],
129 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
130 'deps_var': 'skia_revision'
132 'skia/include': {
133 'src': 'src/third_party/skia/include',
134 'recurse': False,
135 'svn': 'http://skia.googlecode.com/svn/trunk/include',
136 'depends': None,
137 'from': ['chromium'],
138 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
139 'deps_var': 'None'
141 'skia/gyp': {
142 'src': 'src/third_party/skia/gyp',
143 'recurse': False,
144 'svn': 'http://skia.googlecode.com/svn/trunk/gyp',
145 'depends': None,
146 'from': ['chromium'],
147 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
148 'deps_var': 'None'
152 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
154 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
155 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
156 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
157 'testing_rsa')
158 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
159 'mod_for_test_scripts', 'ssh_keys',
160 'testing_rsa')
162 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision.
163 BUILD_RESULT_SUCCEED = 0
164 BUILD_RESULT_FAIL = 1
165 BUILD_RESULT_SKIPPED = 2
167 # Maximum time in seconds to wait after posting build request to tryserver.
168 # TODO: Change these values based on the actual time taken by buildbots on
169 # the tryserver.
170 MAX_MAC_BUILD_TIME = 14400
171 MAX_WIN_BUILD_TIME = 14400
172 MAX_LINUX_BUILD_TIME = 14400
174 # Patch template to add a new file, DEPS.sha under src folder.
175 # This file contains SHA1 value of the DEPS changes made while bisecting
176 # dependency repositories. This patch send along with DEPS patch to tryserver.
177 # When a build requested is posted with a patch, bisect builders on tryserver,
178 # once build is produced, it reads SHA value from this file and appends it
179 # to build archive filename.
180 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
181 new file mode 100644
182 --- /dev/null
183 +++ src/DEPS.sha
184 @@ -0,0 +1 @@
185 +%(deps_sha)s
188 # The possible values of the --bisect_mode flag, which determines what to
189 # use when classifying a revision as "good" or "bad".
190 BISECT_MODE_MEAN = 'mean'
191 BISECT_MODE_STD_DEV = 'std_dev'
192 BISECT_MODE_RETURN_CODE = 'return_code'
194 # The perf dashboard specifically looks for the string
195 # "Estimated Confidence: 95%" to decide whether or not to cc the author(s).
196 # If you change this, please update the perf dashboard as well.
197 RESULTS_BANNER = """
198 ===== BISECT JOB RESULTS =====
199 Status: %(status)s
201 Test Command: %(command)s
202 Test Metric: %(metrics)s
203 Relative Change: %(change)s
204 Estimated Confidence: %(confidence)d%%"""
206 # The perf dashboard specifically looks for the string
207 # "Author : " to parse out who to cc on a bug. If you change the
208 # formatting here, please update the perf dashboard as well.
209 RESULTS_REVISION_INFO = """
210 ===== SUSPECTED CL(s) =====
211 Subject : %(subject)s
212 Author : %(author)s%(email_info)s%(commit_info)s
213 Commit : %(cl)s
214 Date : %(cl_date)s"""
216 REPRO_STEPS_LOCAL = """
217 ==== INSTRUCTIONS TO REPRODUCE ====
218 To run locally:
219 $%(command)s"""
221 REPRO_STEPS_TRYJOB = """
222 To reproduce on Performance trybot:
223 1. Create new git branch or check out existing branch.
224 2. Edit tools/run-perf-test.cfg (instructions in file) or \
225 third_party/WebKit/Tools/run-perf-test.cfg.
226 a) Take care to strip any src/ directories from the head of \
227 relative path names.
228 b) On desktop, only --browser=release is supported, on android \
229 --browser=android-chromium-testshell.
230 c) Test command to use: %(command)s
231 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
232 committed locally to run-perf-test.cfg.
233 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
234 $ git cl upload --bypass-hooks
235 4. Send your try job to the tryserver. \
236 [Please make sure to use appropriate bot to reproduce]
237 $ git cl try -m tryserver.chromium.perf -b <bot>
239 For more details please visit
240 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots"""
242 RESULTS_THANKYOU = """
243 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
244 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
245 on perf regressions.
246 Contact chrome-perf-dashboard-team with any questions or suggestions about
247 bisecting.
248 . .------.
249 . .---. \ \==)
250 . |PERF\ \ \\
251 . | ---------'-------'-----------.
252 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
253 . \______________.-------._______________)
254 . / /
255 . / /
256 . / /==)
257 . ._______."""
260 def _AddAdditionalDepotInfo(depot_info):
261 """Adds additional depot info to the global depot variables."""
262 global DEPOT_DEPS_NAME
263 global DEPOT_NAMES
264 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items())
265 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
268 def ConfidenceScore(good_results_lists, bad_results_lists):
269 """Calculates a confidence score.
271 This score is a percentage which represents our degree of confidence in the
272 proposition that the good results and bad results are distinct groups, and
273 their differences aren't due to chance alone.
276 Args:
277 good_results_lists: A list of lists of "good" result numbers.
278 bad_results_lists: A list of lists of "bad" result numbers.
280 Returns:
281 A number in the range [0, 100].
283 if not good_results_lists or not bad_results_lists:
284 return 0.0
286 # Flatten the lists of results lists.
287 sample1 = sum(good_results_lists, [])
288 sample2 = sum(bad_results_lists, [])
290 # The p-value is approximately the probability of obtaining the given set
291 # of good and bad values just by chance.
292 _, _, p_value = ttest.WelchsTTest(sample1, sample2)
293 return 100.0 * (1.0 - p_value)
296 def GetSHA1HexDigest(contents):
297 """Returns SHA1 hex digest of the given string."""
298 return hashlib.sha1(contents).hexdigest()
301 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
302 """Gets the archive file name for the given revision."""
303 def PlatformName():
304 """Return a string to be used in paths for the platform."""
305 if bisect_utils.IsWindowsHost():
306 # Build archive for x64 is still stored with 'win32'suffix
307 # (chromium_utils.PlatformName()).
308 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
309 return 'win32'
310 return 'win32'
311 if bisect_utils.IsLinuxHost():
312 # Android builds too are archived with full-build-linux* prefix.
313 return 'linux'
314 if bisect_utils.IsMacHost():
315 return 'mac'
316 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
318 base_name = 'full-build-%s' % PlatformName()
319 if not build_revision:
320 return base_name
321 if patch_sha:
322 build_revision = '%s_%s' % (build_revision , patch_sha)
323 return '%s_%s.zip' % (base_name, build_revision)
326 def GetRemoteBuildPath(build_revision, target_platform='chromium',
327 target_arch='ia32', patch_sha=None):
328 """Compute the url to download the build from."""
329 def GetGSRootFolderName(target_platform):
330 """Gets Google Cloud Storage root folder names"""
331 if bisect_utils.IsWindowsHost():
332 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
333 return 'Win x64 Builder'
334 return 'Win Builder'
335 if bisect_utils.IsLinuxHost():
336 if target_platform == 'android':
337 return 'android_perf_rel'
338 return 'Linux Builder'
339 if bisect_utils.IsMacHost():
340 return 'Mac Builder'
341 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
343 base_filename = GetZipFileName(
344 build_revision, target_arch, patch_sha)
345 builder_folder = GetGSRootFolderName(target_platform)
346 return '%s/%s' % (builder_folder, base_filename)
349 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
350 """Fetches file(s) from the Google Cloud Storage.
352 Args:
353 bucket_name: Google Storage bucket name.
354 source_path: Source file path.
355 destination_path: Destination file path.
357 Returns:
358 Downloaded file path if exisits, otherwise None.
360 target_file = os.path.join(destination_path, os.path.basename(source_path))
361 try:
362 if cloud_storage.Exists(bucket_name, source_path):
363 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
364 cloud_storage.Get(bucket_name, source_path, destination_path)
365 if os.path.exists(target_file):
366 return target_file
367 else:
368 print ('File gs://%s/%s not found in cloud storage.' % (
369 bucket_name, source_path))
370 except Exception as e:
371 print 'Something went wrong while fetching file from cloud: %s' % e
372 if os.path.exists(target_file):
373 os.remove(target_file)
374 return None
377 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
378 def MaybeMakeDirectory(*path):
379 """Creates an entire path, if it doesn't already exist."""
380 file_path = os.path.join(*path)
381 try:
382 os.makedirs(file_path)
383 except OSError as e:
384 if e.errno != errno.EEXIST:
385 return False
386 return True
389 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
390 def ExtractZip(filename, output_dir, verbose=True):
391 """ Extract the zip archive in the output directory."""
392 MaybeMakeDirectory(output_dir)
394 # On Linux and Mac, we use the unzip command as it will
395 # handle links and file bits (executable), which is much
396 # easier then trying to do that with ZipInfo options.
398 # The Mac Version of unzip unfortunately does not support Zip64, whereas
399 # the python module does, so we have to fallback to the python zip module
400 # on Mac if the filesize is greater than 4GB.
402 # On Windows, try to use 7z if it is installed, otherwise fall back to python
403 # zip module and pray we don't have files larger than 512MB to unzip.
404 unzip_cmd = None
405 if ((bisect_utils.IsMacHost()
406 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
407 or bisect_utils.IsLinuxHost()):
408 unzip_cmd = ['unzip', '-o']
409 elif (bisect_utils.IsWindowsHost()
410 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')):
411 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
413 if unzip_cmd:
414 # Make sure path is absolute before changing directories.
415 filepath = os.path.abspath(filename)
416 saved_dir = os.getcwd()
417 os.chdir(output_dir)
418 command = unzip_cmd + [filepath]
419 result = bisect_utils.RunProcess(command)
420 os.chdir(saved_dir)
421 if result:
422 raise IOError('unzip failed: %s => %s' % (str(command), result))
423 else:
424 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
425 zf = zipfile.ZipFile(filename)
426 for name in zf.namelist():
427 if verbose:
428 print 'Extracting %s' % name
429 zf.extract(name, output_dir)
430 if bisect_utils.IsMacHost():
431 # Restore permission bits.
432 os.chmod(os.path.join(output_dir, name),
433 zf.getinfo(name).external_attr >> 16L)
436 def SetBuildSystemDefault(build_system, use_goma, goma_dir):
437 """Sets up any environment variables needed to build with the specified build
438 system.
440 Args:
441 build_system: A string specifying build system. Currently only 'ninja' or
442 'make' are supported.
444 if build_system == 'ninja':
445 gyp_var = os.getenv('GYP_GENERATORS', default='')
447 if not gyp_var or not 'ninja' in gyp_var:
448 if gyp_var:
449 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
450 else:
451 os.environ['GYP_GENERATORS'] = 'ninja'
453 if bisect_utils.IsWindowsHost():
454 os.environ['GYP_DEFINES'] = ('component=shared_library '
455 'incremental_chrome_dll=1 '
456 'disable_nacl=1 fastbuild=1 '
457 'chromium_win_pch=0')
459 elif build_system == 'make':
460 os.environ['GYP_GENERATORS'] = 'make'
461 else:
462 raise RuntimeError('%s build not supported.' % build_system)
464 if use_goma:
465 os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', default=''),
466 'use_goma=1')
467 if goma_dir:
468 os.environ['GYP_DEFINES'] += ' gomadir=%s' % goma_dir
471 def BuildWithMake(threads, targets, build_type='Release'):
472 cmd = ['make', 'BUILDTYPE=%s' % build_type]
474 if threads:
475 cmd.append('-j%d' % threads)
477 cmd += targets
479 return_code = bisect_utils.RunProcess(cmd)
481 return not return_code
484 def BuildWithNinja(threads, targets, build_type='Release'):
485 cmd = ['ninja', '-C', os.path.join('out', build_type)]
487 if threads:
488 cmd.append('-j%d' % threads)
490 cmd += targets
492 return_code = bisect_utils.RunProcess(cmd)
494 return not return_code
497 def BuildWithVisualStudio(targets, build_type='Release'):
498 path_to_devenv = os.path.abspath(
499 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
500 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
501 cmd = [path_to_devenv, '/build', build_type, path_to_sln]
503 for t in targets:
504 cmd.extend(['/Project', t])
506 return_code = bisect_utils.RunProcess(cmd)
508 return not return_code
511 def WriteStringToFile(text, file_name):
512 try:
513 with open(file_name, 'wb') as f:
514 f.write(text)
515 except IOError:
516 raise RuntimeError('Error writing to file [%s]' % file_name )
519 def ReadStringFromFile(file_name):
520 try:
521 with open(file_name) as f:
522 return f.read()
523 except IOError:
524 raise RuntimeError('Error reading file [%s]' % file_name )
527 def ChangeBackslashToSlashInPatch(diff_text):
528 """Formats file paths in the given text to unix-style paths."""
529 if diff_text:
530 diff_lines = diff_text.split('\n')
531 for i in range(len(diff_lines)):
532 if (diff_lines[i].startswith('--- ') or
533 diff_lines[i].startswith('+++ ')):
534 diff_lines[i] = diff_lines[i].replace('\\', '/')
535 return '\n'.join(diff_lines)
536 return None
539 class Builder(object):
540 """Builder is used by the bisect script to build relevant targets and deploy.
542 def __init__(self, opts):
543 """Performs setup for building with target build system.
545 Args:
546 opts: Options parsed from command line.
548 if bisect_utils.IsWindowsHost():
549 if not opts.build_preference:
550 opts.build_preference = 'msvs'
552 if opts.build_preference == 'msvs':
553 if not os.getenv('VS100COMNTOOLS'):
554 raise RuntimeError(
555 'Path to visual studio could not be determined.')
556 else:
557 SetBuildSystemDefault(opts.build_preference, opts.use_goma,
558 opts.goma_dir)
559 else:
560 if not opts.build_preference:
561 if 'ninja' in os.getenv('GYP_GENERATORS', default=''):
562 opts.build_preference = 'ninja'
563 else:
564 opts.build_preference = 'make'
566 SetBuildSystemDefault(opts.build_preference, opts.use_goma, opts.goma_dir)
568 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
569 raise RuntimeError('Failed to set platform environment.')
571 @staticmethod
572 def FromOpts(opts):
573 builder = None
574 if opts.target_platform == 'cros':
575 builder = CrosBuilder(opts)
576 elif opts.target_platform == 'android':
577 builder = AndroidBuilder(opts)
578 elif opts.target_platform == 'android-chrome':
579 builder = AndroidChromeBuilder(opts)
580 else:
581 builder = DesktopBuilder(opts)
582 return builder
584 def Build(self, depot, opts):
585 raise NotImplementedError()
587 def GetBuildOutputDirectory(self, opts, src_dir=None):
588 """Returns the path to the build directory, relative to the checkout root.
590 Assumes that the current working directory is the checkout root.
592 src_dir = src_dir or 'src'
593 if opts.build_preference == 'ninja' or bisect_utils.IsLinuxHost():
594 return os.path.join(src_dir, 'out')
595 if bisect_utils.IsMacHost():
596 return os.path.join(src_dir, 'xcodebuild')
597 if bisect_utils.IsWindowsHost():
598 return os.path.join(src_dir, 'build')
599 raise NotImplementedError('Unexpected platform %s' % sys.platform)
602 class DesktopBuilder(Builder):
603 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
604 def __init__(self, opts):
605 super(DesktopBuilder, self).__init__(opts)
607 def Build(self, depot, opts):
608 """Builds chromium_builder_perf target using options passed into
609 the script.
611 Args:
612 depot: Current depot being bisected.
613 opts: The options parsed from the command line.
615 Returns:
616 True if build was successful.
618 targets = ['chromium_builder_perf']
620 threads = None
621 if opts.use_goma:
622 threads = 64
624 build_success = False
625 if opts.build_preference == 'make':
626 build_success = BuildWithMake(threads, targets, opts.target_build_type)
627 elif opts.build_preference == 'ninja':
628 build_success = BuildWithNinja(threads, targets, opts.target_build_type)
629 elif opts.build_preference == 'msvs':
630 assert bisect_utils.IsWindowsHost(), 'msvs is only supported on Windows.'
631 build_success = BuildWithVisualStudio(targets, opts.target_build_type)
632 else:
633 assert False, 'No build system defined.'
634 return build_success
637 class AndroidBuilder(Builder):
638 """AndroidBuilder is used to build on android."""
639 def __init__(self, opts):
640 super(AndroidBuilder, self).__init__(opts)
642 def _GetTargets(self):
643 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
645 def Build(self, depot, opts):
646 """Builds the android content shell and other necessary tools using options
647 passed into the script.
649 Args:
650 depot: Current depot being bisected.
651 opts: The options parsed from the command line.
653 Returns:
654 True if build was successful.
656 threads = None
657 if opts.use_goma:
658 threads = 64
660 build_success = False
661 if opts.build_preference == 'ninja':
662 build_success = BuildWithNinja(
663 threads, self._GetTargets(), opts.target_build_type)
664 else:
665 assert False, 'No build system defined.'
667 return build_success
670 class AndroidChromeBuilder(AndroidBuilder):
671 """AndroidBuilder is used to build on android's chrome."""
672 def __init__(self, opts):
673 super(AndroidChromeBuilder, self).__init__(opts)
675 def _GetTargets(self):
676 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
679 class CrosBuilder(Builder):
680 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
681 target platform."""
682 def __init__(self, opts):
683 super(CrosBuilder, self).__init__(opts)
685 def ImageToTarget(self, opts):
686 """Installs latest image to target specified by opts.cros_remote_ip.
688 Args:
689 opts: Program options containing cros_board and cros_remote_ip.
691 Returns:
692 True if successful.
694 try:
695 # Keys will most likely be set to 0640 after wiping the chroot.
696 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
697 os.chmod(CROS_TEST_KEY_PATH, 0600)
698 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
699 '--remote=%s' % opts.cros_remote_ip,
700 '--board=%s' % opts.cros_board, '--test', '--verbose']
702 return_code = bisect_utils.RunProcess(cmd)
703 return not return_code
704 except OSError:
705 return False
707 def BuildPackages(self, opts, depot):
708 """Builds packages for cros.
710 Args:
711 opts: Program options containing cros_board.
712 depot: The depot being bisected.
714 Returns:
715 True if successful.
717 cmd = [CROS_SDK_PATH]
719 if depot != 'cros':
720 path_to_chrome = os.path.join(os.getcwd(), '..')
721 cmd += ['--chrome_root=%s' % path_to_chrome]
723 cmd += ['--']
725 if depot != 'cros':
726 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
728 cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
729 '--board=%s' % opts.cros_board]
730 return_code = bisect_utils.RunProcess(cmd)
732 return not return_code
734 def BuildImage(self, opts, depot):
735 """Builds test image for cros.
737 Args:
738 opts: Program options containing cros_board.
739 depot: The depot being bisected.
741 Returns:
742 True if successful.
744 cmd = [CROS_SDK_PATH]
746 if depot != 'cros':
747 path_to_chrome = os.path.join(os.getcwd(), '..')
748 cmd += ['--chrome_root=%s' % path_to_chrome]
750 cmd += ['--']
752 if depot != 'cros':
753 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
755 cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
756 '--board=%s' % opts.cros_board, 'test']
758 return_code = bisect_utils.RunProcess(cmd)
760 return not return_code
762 def Build(self, depot, opts):
763 """Builds targets using options passed into the script.
765 Args:
766 depot: Current depot being bisected.
767 opts: The options parsed from the command line.
769 Returns:
770 True if build was successful.
772 if self.BuildPackages(opts, depot):
773 if self.BuildImage(opts, depot):
774 return self.ImageToTarget(opts)
775 return False
778 def _ParseRevisionsFromDEPSFileManually(deps_file_contents):
779 """Parses the vars section of the DEPS file with regex.
781 Args:
782 deps_file_contents: The DEPS file contents as a string.
784 Returns:
785 A dict in the format {depot:revision} if successful, otherwise None.
787 # We'll parse the "vars" section of the DEPS file.
788 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
789 re_results = rxp.search(deps_file_contents)
791 if not re_results:
792 return None
794 # We should be left with a series of entries in the vars component of
795 # the DEPS file with the following format:
796 # 'depot_name': 'revision',
797 vars_body = re_results.group('vars_body')
798 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
799 re.MULTILINE)
800 re_results = rxp.findall(vars_body)
802 return dict(re_results)
805 def _WaitUntilBuildIsReady(
806 fetch_build, bot_name, builder_host, builder_port, build_request_id,
807 max_timeout):
808 """Waits until build is produced by bisect builder on tryserver.
810 Args:
811 fetch_build: Function to check and download build from cloud storage.
812 bot_name: Builder bot name on tryserver.
813 builder_host Tryserver hostname.
814 builder_port: Tryserver port.
815 build_request_id: A unique ID of the build request posted to tryserver.
816 max_timeout: Maximum time to wait for the build.
818 Returns:
819 Downloaded archive file path if exists, otherwise None.
821 # Build number on the tryserver.
822 build_num = None
823 # Interval to check build on cloud storage.
824 poll_interval = 60
825 # Interval to check build status on tryserver.
826 status_check_interval = 600
827 last_status_check = time.time()
828 start_time = time.time()
829 while True:
830 # Checks for build on gs://chrome-perf and download if exists.
831 res = fetch_build()
832 if res:
833 return (res, 'Build successfully found')
834 elapsed_status_check = time.time() - last_status_check
835 # To avoid overloading tryserver with status check requests, we check
836 # build status for every 10 mins.
837 if elapsed_status_check > status_check_interval:
838 last_status_check = time.time()
839 if not build_num:
840 # Get the build number on tryserver for the current build.
841 build_num = bisect_builder.GetBuildNumFromBuilder(
842 build_request_id, bot_name, builder_host, builder_port)
843 # Check the status of build using the build number.
844 # Note: Build is treated as PENDING if build number is not found
845 # on the the tryserver.
846 build_status, status_link = bisect_builder.GetBuildStatus(
847 build_num, bot_name, builder_host, builder_port)
848 if build_status == bisect_builder.FAILED:
849 return (None, 'Failed to produce build, log: %s' % status_link)
850 elapsed_time = time.time() - start_time
851 if elapsed_time > max_timeout:
852 return (None, 'Timed out: %ss without build' % max_timeout)
854 print 'Time elapsed: %ss without build.' % elapsed_time
855 time.sleep(poll_interval)
856 # For some reason, mac bisect bots were not flushing stdout periodically.
857 # As a result buildbot command is timed-out. Flush stdout on all platforms
858 # while waiting for build.
859 sys.stdout.flush()
862 def _UpdateV8Branch(deps_content):
863 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
865 Check for "v8_branch" in DEPS file if exists update its value
866 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
867 variable from DEPS revision 254916, therefore check for "src/v8":
868 <v8 source path> in DEPS in order to support prior DEPS revisions
869 and update it.
871 Args:
872 deps_content: DEPS file contents to be modified.
874 Returns:
875 Modified DEPS file contents as a string.
877 new_branch = r'branches/bleeding_edge'
878 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
879 if re.search(v8_branch_pattern, deps_content):
880 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
881 else:
882 # Replaces the branch assigned to "src/v8" key in DEPS file.
883 # Format of "src/v8" in DEPS:
884 # "src/v8":
885 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
886 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
887 v8_src_pattern = re.compile(
888 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
889 if re.search(v8_src_pattern, deps_content):
890 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
891 return deps_content
894 def _UpdateDEPSForAngle(revision, depot, deps_file):
895 """Updates DEPS file with new revision for Angle repository.
897 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
898 variable contains "angle_revision" key that holds git hash instead of
899 SVN revision.
901 And sometimes "angle_revision" key is not specified in "vars" variable,
902 in such cases check "deps" dictionary variable that matches
903 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
905 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
906 try:
907 deps_contents = ReadStringFromFile(deps_file)
908 # Check whether the depot and revision pattern in DEPS file vars variable
909 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
910 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
911 deps_var, re.MULTILINE)
912 match = re.search(angle_rev_pattern % deps_var, deps_contents)
913 if match:
914 # Update the revision information for the given depot
915 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
916 else:
917 # Check whether the depot and revision pattern in DEPS file deps
918 # variable. e.g.,
919 # "src/third_party/angle": Var("chromium_git") +
920 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
921 angle_rev_pattern = re.compile(
922 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
923 match = re.search(angle_rev_pattern, deps_contents)
924 if not match:
925 print 'Could not find angle revision information in DEPS file.'
926 return False
927 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
928 # Write changes to DEPS file
929 WriteStringToFile(new_data, deps_file)
930 return True
931 except IOError, e:
932 print 'Something went wrong while updating DEPS file, %s' % e
933 return False
936 def _TryParseHistogramValuesFromOutput(metric, text):
937 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
939 Args:
940 metric: The metric as a list of [<trace>, <value>] strings.
941 text: The text to parse the metric values from.
943 Returns:
944 A list of floating point numbers found, [] if none were found.
946 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
948 text_lines = text.split('\n')
949 values_list = []
951 for current_line in text_lines:
952 if metric_formatted in current_line:
953 current_line = current_line[len(metric_formatted):]
955 try:
956 histogram_values = eval(current_line)
958 for b in histogram_values['buckets']:
959 average_for_bucket = float(b['high'] + b['low']) * 0.5
960 # Extends the list with N-elements with the average for that bucket.
961 values_list.extend([average_for_bucket] * b['count'])
962 except Exception:
963 pass
965 return values_list
968 def _TryParseResultValuesFromOutput(metric, text):
969 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
971 Args:
972 metric: The metric as a list of [<trace>, <value>] string pairs.
973 text: The text to parse the metric values from.
975 Returns:
976 A list of floating point numbers found.
978 # Format is: RESULT <graph>: <trace>= <value> <units>
979 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
981 # The log will be parsed looking for format:
982 # <*>RESULT <graph_name>: <trace_name>= <value>
983 single_result_re = re.compile(
984 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
986 # The log will be parsed looking for format:
987 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
988 multi_results_re = re.compile(
989 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
991 # The log will be parsed looking for format:
992 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
993 mean_stddev_re = re.compile(
994 metric_re +
995 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
997 text_lines = text.split('\n')
998 values_list = []
999 for current_line in text_lines:
1000 # Parse the output from the performance test for the metric we're
1001 # interested in.
1002 single_result_match = single_result_re.search(current_line)
1003 multi_results_match = multi_results_re.search(current_line)
1004 mean_stddev_match = mean_stddev_re.search(current_line)
1005 if (not single_result_match is None and
1006 single_result_match.group('VALUE')):
1007 values_list += [single_result_match.group('VALUE')]
1008 elif (not multi_results_match is None and
1009 multi_results_match.group('VALUES')):
1010 metric_values = multi_results_match.group('VALUES')
1011 values_list += metric_values.split(',')
1012 elif (not mean_stddev_match is None and
1013 mean_stddev_match.group('MEAN')):
1014 values_list += [mean_stddev_match.group('MEAN')]
1016 values_list = [float(v) for v in values_list
1017 if bisect_utils.IsStringFloat(v)]
1019 # If the metric is times/t, we need to sum the timings in order to get
1020 # similar regression results as the try-bots.
1021 metrics_to_sum = [
1022 ['times', 't'],
1023 ['times', 'page_load_time'],
1024 ['cold_times', 'page_load_time'],
1025 ['warm_times', 'page_load_time'],
1028 if metric in metrics_to_sum:
1029 if values_list:
1030 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1032 return values_list
1035 def _ParseMetricValuesFromOutput(metric, text):
1036 """Parses output from performance_ui_tests and retrieves the results for
1037 a given metric.
1039 Args:
1040 metric: The metric as a list of [<trace>, <value>] strings.
1041 text: The text to parse the metric values from.
1043 Returns:
1044 A list of floating point numbers found.
1046 metric_values = _TryParseResultValuesFromOutput(metric, text)
1048 if not metric_values:
1049 metric_values = _TryParseHistogramValuesFromOutput(metric, text)
1051 return metric_values
1054 def _GenerateProfileIfNecessary(command_args):
1055 """Checks the command line of the performance test for dependencies on
1056 profile generation, and runs tools/perf/generate_profile as necessary.
1058 Args:
1059 command_args: Command line being passed to performance test, as a list.
1061 Returns:
1062 False if profile generation was necessary and failed, otherwise True.
1064 if '--profile-dir' in ' '.join(command_args):
1065 # If we were using python 2.7+, we could just use the argparse
1066 # module's parse_known_args to grab --profile-dir. Since some of the
1067 # bots still run 2.6, have to grab the arguments manually.
1068 arg_dict = {}
1069 args_to_parse = ['--profile-dir', '--browser']
1071 for arg_to_parse in args_to_parse:
1072 for i, current_arg in enumerate(command_args):
1073 if arg_to_parse in current_arg:
1074 current_arg_split = current_arg.split('=')
1076 # Check 2 cases, --arg=<val> and --arg <val>
1077 if len(current_arg_split) == 2:
1078 arg_dict[arg_to_parse] = current_arg_split[1]
1079 elif i + 1 < len(command_args):
1080 arg_dict[arg_to_parse] = command_args[i+1]
1082 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1084 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1085 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1086 return not bisect_utils.RunProcess(['python', path_to_generate,
1087 '--profile-type-to-generate', profile_type,
1088 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1089 return False
1090 return True
1093 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data):
1094 """Adds new revisions to the revision_data dict and initializes them.
1096 Args:
1097 revisions: List of revisions to add.
1098 depot: Depot that's currently in use (src, webkit, etc...)
1099 sort: Sorting key for displaying revisions.
1100 revision_data: A dict to add the new revisions into. Existing revisions
1101 will have their sort keys offset.
1103 num_depot_revisions = len(revisions)
1105 for _, v in revision_data.iteritems():
1106 if v['sort'] > sort:
1107 v['sort'] += num_depot_revisions
1109 for i in xrange(num_depot_revisions):
1110 r = revisions[i]
1111 revision_data[r] = {
1112 'revision' : r,
1113 'depot' : depot,
1114 'value' : None,
1115 'perf_time' : 0,
1116 'build_time' : 0,
1117 'passed' : '?',
1118 'sort' : i + sort + 1,
1122 def _PrintThankYou():
1123 print RESULTS_THANKYOU
1126 def _PrintTableRow(column_widths, row_data):
1127 """Prints out a row in a formatted table that has columns aligned.
1129 Args:
1130 column_widths: A list of column width numbers.
1131 row_data: A list of items for each column in this row.
1133 assert len(column_widths) == len(row_data)
1134 text = ''
1135 for i in xrange(len(column_widths)):
1136 current_row_data = row_data[i].center(column_widths[i], ' ')
1137 text += ('%%%ds' % column_widths[i]) % current_row_data
1138 print text
1141 def _PrintStepTime(revision_data_sorted):
1142 """Prints information about how long various steps took.
1144 Args:
1145 revision_data_sorted: The sorted list of revision data dictionaries."""
1146 step_perf_time_avg = 0.0
1147 step_build_time_avg = 0.0
1148 step_count = 0.0
1149 for _, current_data in revision_data_sorted:
1150 if current_data['value']:
1151 step_perf_time_avg += current_data['perf_time']
1152 step_build_time_avg += current_data['build_time']
1153 step_count += 1
1154 if step_count:
1155 step_perf_time_avg = step_perf_time_avg / step_count
1156 step_build_time_avg = step_build_time_avg / step_count
1157 print
1158 print 'Average build time : %s' % datetime.timedelta(
1159 seconds=int(step_build_time_avg))
1160 print 'Average test time : %s' % datetime.timedelta(
1161 seconds=int(step_perf_time_avg))
1163 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good):
1164 """Compiles a list of other possible regressions from the revision data.
1166 Args:
1167 revision_data_sorted: Sorted list of (revision, revision data dict) pairs.
1168 bad_greater_than_good: Whether the result value at the "bad" revision is
1169 numerically greater than the result value at the "good" revision.
1171 Returns:
1172 A list of [current_rev, previous_rev, confidence] for other places where
1173 there may have been a regression.
1175 other_regressions = []
1176 previous_values = []
1177 previous_id = None
1178 for current_id, current_data in revision_data_sorted:
1179 current_values = current_data['value']
1180 if current_values:
1181 current_values = current_values['values']
1182 if previous_values:
1183 confidence = ConfidenceScore(previous_values, [current_values])
1184 mean_of_prev_runs = math_utils.Mean(sum(previous_values, []))
1185 mean_of_current_runs = math_utils.Mean(current_values)
1187 # Check that the potential regression is in the same direction as
1188 # the overall regression. If the mean of the previous runs < the
1189 # mean of the current runs, this local regression is in same
1190 # direction.
1191 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
1192 is_same_direction = (prev_less_than_current if
1193 bad_greater_than_good else not prev_less_than_current)
1195 # Only report potential regressions with high confidence.
1196 if is_same_direction and confidence > 50:
1197 other_regressions.append([current_id, previous_id, confidence])
1198 previous_values.append(current_values)
1199 previous_id = current_id
1200 return other_regressions
1202 class BisectPerformanceMetrics(object):
1203 """This class contains functionality to perform a bisection of a range of
1204 revisions to narrow down where performance regressions may have occurred.
1206 The main entry-point is the Run method.
1209 def __init__(self, source_control, opts):
1210 super(BisectPerformanceMetrics, self).__init__()
1212 self.opts = opts
1213 self.source_control = source_control
1214 self.src_cwd = os.getcwd()
1215 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1216 self.depot_cwd = {}
1217 self.cleanup_commands = []
1218 self.warnings = []
1219 self.builder = Builder.FromOpts(opts)
1221 # This always starts true since the script grabs latest first.
1222 self.was_blink = True
1224 for d in DEPOT_NAMES:
1225 # The working directory of each depot is just the path to the depot, but
1226 # since we're already in 'src', we can skip that part.
1228 self.depot_cwd[d] = os.path.join(
1229 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1231 def PerformCleanup(self):
1232 """Performs cleanup when script is finished."""
1233 os.chdir(self.src_cwd)
1234 for c in self.cleanup_commands:
1235 if c[0] == 'mv':
1236 shutil.move(c[1], c[2])
1237 else:
1238 assert False, 'Invalid cleanup command.'
1240 def GetRevisionList(self, depot, bad_revision, good_revision):
1241 """Retrieves a list of all the commits between the bad revision and
1242 last known good revision."""
1244 revision_work_list = []
1246 if depot == 'cros':
1247 revision_range_start = good_revision
1248 revision_range_end = bad_revision
1250 cwd = os.getcwd()
1251 self.ChangeToDepotWorkingDirectory('cros')
1253 # Print the commit timestamps for every commit in the revision time
1254 # range. We'll sort them and bisect by that. There is a remote chance that
1255 # 2 (or more) commits will share the exact same timestamp, but it's
1256 # probably safe to ignore that case.
1257 cmd = ['repo', 'forall', '-c',
1258 'git log --format=%%ct --before=%d --after=%d' % (
1259 revision_range_end, revision_range_start)]
1260 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
1262 assert not return_code, ('An error occurred while running '
1263 '"%s"' % ' '.join(cmd))
1265 os.chdir(cwd)
1267 revision_work_list = list(set(
1268 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)]))
1269 revision_work_list = sorted(revision_work_list, reverse=True)
1270 else:
1271 cwd = self._GetDepotDirectory(depot)
1272 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1273 good_revision, cwd=cwd)
1275 return revision_work_list
1277 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1278 svn_revision = self.source_control.SVNFindRev(revision)
1280 if bisect_utils.IsStringInt(svn_revision):
1281 # V8 is tricky to bisect, in that there are only a few instances when
1282 # we can dive into bleeding_edge and get back a meaningful result.
1283 # Try to detect a V8 "business as usual" case, which is when:
1284 # 1. trunk revision N has description "Version X.Y.Z"
1285 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1286 # trunk. Now working on X.Y.(Z+1)."
1288 # As of 01/24/2014, V8 trunk descriptions are formatted:
1289 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1290 # So we can just try parsing that out first and fall back to the old way.
1291 v8_dir = self._GetDepotDirectory('v8')
1292 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1294 revision_info = self.source_control.QueryRevisionInfo(revision,
1295 cwd=v8_dir)
1297 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1299 regex_results = version_re.search(revision_info['subject'])
1301 if regex_results:
1302 git_revision = None
1304 # Look for "based on bleeding_edge" and parse out revision
1305 if 'based on bleeding_edge' in revision_info['subject']:
1306 try:
1307 bleeding_edge_revision = revision_info['subject'].split(
1308 'bleeding_edge revision r')[1]
1309 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1310 git_revision = self.source_control.ResolveToRevision(
1311 bleeding_edge_revision, 'v8_bleeding_edge', DEPOT_DEPS_NAME, 1,
1312 cwd=v8_bleeding_edge_dir)
1313 return git_revision
1314 except (IndexError, ValueError):
1315 pass
1317 if not git_revision:
1318 # Wasn't successful, try the old way of looking for "Prepare push to"
1319 git_revision = self.source_control.ResolveToRevision(
1320 int(svn_revision) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME, -1,
1321 cwd=v8_bleeding_edge_dir)
1323 if git_revision:
1324 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1325 cwd=v8_bleeding_edge_dir)
1327 if 'Prepare push to trunk' in revision_info['subject']:
1328 return git_revision
1329 return None
1331 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1332 cwd = self._GetDepotDirectory('v8')
1333 cmd = ['log', '--format=%ct', '-1', revision]
1334 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1335 commit_time = int(output)
1336 commits = []
1338 if search_forward:
1339 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1340 'origin/master']
1341 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1342 output = output.split()
1343 commits = output
1344 commits = reversed(commits)
1345 else:
1346 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1347 'origin/master']
1348 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1349 output = output.split()
1350 commits = output
1352 bleeding_edge_revision = None
1354 for c in commits:
1355 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1356 if bleeding_edge_revision:
1357 break
1359 return bleeding_edge_revision
1361 def _ParseRevisionsFromDEPSFile(self, depot):
1362 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1363 be needed if the bisect recurses into those depots later.
1365 Args:
1366 depot: Name of depot being bisected.
1368 Returns:
1369 A dict in the format {depot:revision} if successful, otherwise None.
1371 try:
1372 deps_data = {
1373 'Var': lambda _: deps_data["vars"][_],
1374 'From': lambda *args: None,
1376 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1377 deps_data = deps_data['deps']
1379 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1380 results = {}
1381 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1382 if (depot_data.get('platform') and
1383 depot_data.get('platform') != os.name):
1384 continue
1386 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1387 depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1388 src_dir = deps_data.get(depot_data_src)
1389 if src_dir:
1390 self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1391 depot_data_src[4:])
1392 re_results = rxp.search(src_dir)
1393 if re_results:
1394 results[depot_name] = re_results.group('revision')
1395 else:
1396 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1397 '%s' % (depot_name, depot))
1398 if not warning_text in self.warnings:
1399 self.warnings.append(warning_text)
1400 else:
1401 results[depot_name] = None
1402 return results
1403 except ImportError:
1404 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1405 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents)
1406 results = {}
1407 for depot_name, depot_revision in parse_results.iteritems():
1408 depot_revision = depot_revision.strip('@')
1409 print depot_name, depot_revision
1410 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1411 if (current_data.has_key('deps_var') and
1412 current_data['deps_var'] == depot_name):
1413 src_name = current_name
1414 results[src_name] = depot_revision
1415 break
1416 return results
1418 def _Get3rdPartyRevisions(self, depot):
1419 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1421 Returns:
1422 A dict in the format {depot:revision} if successful, otherwise None.
1424 cwd = os.getcwd()
1425 self.ChangeToDepotWorkingDirectory(depot)
1427 results = {}
1429 if depot == 'chromium' or depot == 'android-chrome':
1430 results = self._ParseRevisionsFromDEPSFile(depot)
1431 os.chdir(cwd)
1432 elif depot == 'cros':
1433 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1434 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1435 CROS_CHROMEOS_PATTERN]
1436 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
1438 assert not return_code, ('An error occurred while running '
1439 '"%s"' % ' '.join(cmd))
1441 if len(output) > CROS_CHROMEOS_PATTERN:
1442 output = output[len(CROS_CHROMEOS_PATTERN):]
1444 if len(output) > 1:
1445 output = output.split('_')[0]
1447 if len(output) > 3:
1448 contents = output.split('.')
1450 version = contents[2]
1452 if contents[3] != '0':
1453 warningText = ('Chrome version: %s.%s but using %s.0 to bisect.' %
1454 (version, contents[3], version))
1455 if not warningText in self.warnings:
1456 self.warnings.append(warningText)
1458 cwd = os.getcwd()
1459 self.ChangeToDepotWorkingDirectory('chromium')
1460 cmd = ['log', '-1', '--format=%H',
1461 '--author=chrome-release@google.com',
1462 '--grep=to %s' % version, 'origin/master']
1463 return_code = bisect_utils.CheckRunGit(cmd)
1464 os.chdir(cwd)
1466 results['chromium'] = output.strip()
1467 elif depot == 'v8':
1468 # We can't try to map the trunk revision to bleeding edge yet, because
1469 # we don't know which direction to try to search in. Have to wait until
1470 # the bisect has narrowed the results down to 2 v8 rolls.
1471 results['v8_bleeding_edge'] = None
1473 return results
1475 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1476 """Backs up or restores build output directory based on restore argument.
1478 Args:
1479 restore: Indicates whether to restore or backup. Default is False(Backup)
1480 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1482 Returns:
1483 Path to backup or restored location as string. otherwise None if it fails.
1485 build_dir = os.path.abspath(
1486 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1487 source_dir = os.path.join(build_dir, build_type)
1488 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1489 if restore:
1490 source_dir, destination_dir = destination_dir, source_dir
1491 if os.path.exists(source_dir):
1492 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1493 shutil.move(source_dir, destination_dir)
1494 return destination_dir
1495 return None
1497 def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch,
1498 patch_sha, out_dir):
1499 """Checks and downloads build archive for a given revision.
1501 Checks for build archive with Git hash or SVN revision. If either of the
1502 file exists, then downloads the archive file.
1504 Args:
1505 revision: A Git hash revision.
1506 gs_bucket: Cloud storage bucket name
1507 target_arch: 32 or 64 bit build target
1508 patch: A DEPS patch (used while bisecting 3rd party repositories).
1509 out_dir: Build output directory where downloaded file is stored.
1511 Returns:
1512 Downloaded archive file path if exists, otherwise None.
1514 # Source archive file path on cloud storage using Git revision.
1515 source_file = GetRemoteBuildPath(
1516 revision, self.opts.target_platform, target_arch, patch_sha)
1517 downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1518 if not downloaded_archive:
1519 # Get SVN revision for the given SHA.
1520 svn_revision = self.source_control.SVNFindRev(revision)
1521 if svn_revision:
1522 # Source archive file path on cloud storage using SVN revision.
1523 source_file = GetRemoteBuildPath(
1524 svn_revision, self.opts.target_platform, target_arch, patch_sha)
1525 return FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1526 return downloaded_archive
1528 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1529 """Downloads the build archive for the given revision.
1531 Args:
1532 revision: The Git revision to download or build.
1533 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1534 patch: A DEPS patch (used while bisecting 3rd party repositories).
1536 Returns:
1537 True if download succeeds, otherwise False.
1539 patch_sha = None
1540 if patch:
1541 # Get the SHA of the DEPS changes patch.
1542 patch_sha = GetSHA1HexDigest(patch)
1544 # Update the DEPS changes patch with a patch to create a new file named
1545 # 'DEPS.sha' and add patch_sha evaluated above to it.
1546 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1548 # Get Build output directory
1549 abs_build_dir = os.path.abspath(
1550 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1552 fetch_build_func = lambda: self.GetBuildArchiveForRevision(
1553 revision, self.opts.gs_bucket, self.opts.target_arch,
1554 patch_sha, abs_build_dir)
1556 # Downloaded archive file path, downloads build archive for given revision.
1557 downloaded_file = fetch_build_func()
1559 # When build archive doesn't exists, post a build request to tryserver
1560 # and wait for the build to be produced.
1561 if not downloaded_file:
1562 downloaded_file = self.PostBuildRequestAndWait(
1563 revision, fetch_build=fetch_build_func, patch=patch)
1564 if not downloaded_file:
1565 return False
1567 # Generic name for the archive, created when archive file is extracted.
1568 output_dir = os.path.join(
1569 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1570 # Unzip build archive directory.
1571 try:
1572 RmTreeAndMkDir(output_dir, skip_makedir=True)
1573 self.BackupOrRestoreOutputdirectory(restore=False)
1574 # Build output directory based on target(e.g. out/Release, out/Debug).
1575 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1576 ExtractZip(downloaded_file, abs_build_dir)
1577 if not os.path.exists(output_dir):
1578 # Due to recipe changes, the builds extract folder contains
1579 # out/Release instead of full-build-<platform>/Release.
1580 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)):
1581 output_dir = os.path.join(abs_build_dir, 'out', build_type)
1582 else:
1583 raise IOError('Missing extracted folder %s ' % output_dir)
1585 print 'Moving build from %s to %s' % (
1586 output_dir, target_build_output_dir)
1587 shutil.move(output_dir, target_build_output_dir)
1588 return True
1589 except Exception as e:
1590 print 'Something went wrong while extracting archive file: %s' % e
1591 self.BackupOrRestoreOutputdirectory(restore=True)
1592 # Cleanup any leftovers from unzipping.
1593 if os.path.exists(output_dir):
1594 RmTreeAndMkDir(output_dir, skip_makedir=True)
1595 finally:
1596 # Delete downloaded archive
1597 if os.path.exists(downloaded_file):
1598 os.remove(downloaded_file)
1599 return False
1601 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None):
1602 """POSTs the build request job to the tryserver instance.
1604 A try job build request is posted to tryserver.chromium.perf master,
1605 and waits for the binaries to be produced and archived on cloud storage.
1606 Once the build is ready and stored onto cloud, build archive is downloaded
1607 into the output folder.
1609 Args:
1610 revision: A Git hash revision.
1611 fetch_build: Function to check and download build from cloud storage.
1612 patch: A DEPS patch (used while bisecting 3rd party repositories).
1614 Returns:
1615 Downloaded archive file path when requested build exists and download is
1616 successful, otherwise None.
1618 # Get SVN revision for the given SHA.
1619 svn_revision = self.source_control.SVNFindRev(revision)
1620 if not svn_revision:
1621 raise RuntimeError(
1622 'Failed to determine SVN revision for %s' % revision)
1624 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'):
1625 """Gets builder bot name and build time in seconds based on platform."""
1626 # Bot names should match the one listed in tryserver.chromium's
1627 # master.cfg which produces builds for bisect.
1628 if bisect_utils.IsWindowsHost():
1629 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
1630 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1631 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1632 if bisect_utils.IsLinuxHost():
1633 if target_platform == 'android':
1634 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1635 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1636 if bisect_utils.IsMacHost():
1637 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1638 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1639 if not fetch_build:
1640 return False
1642 bot_name, build_timeout = GetBuilderNameAndBuildTime(
1643 self.opts.target_platform, self.opts.target_arch)
1644 builder_host = self.opts.builder_host
1645 builder_port = self.opts.builder_port
1646 # Create a unique ID for each build request posted to tryserver builders.
1647 # This ID is added to "Reason" property in build's json.
1648 build_request_id = GetSHA1HexDigest(
1649 '%s-%s-%s' % (svn_revision, patch, time.time()))
1651 # Creates a try job description.
1652 job_args = {
1653 'host': builder_host,
1654 'port': builder_port,
1655 'revision': 'src@%s' % svn_revision,
1656 'bot': bot_name,
1657 'name': build_request_id,
1659 # Update patch information if supplied.
1660 if patch:
1661 job_args['patch'] = patch
1662 # Posts job to build the revision on the server.
1663 if bisect_builder.PostTryJob(job_args):
1664 target_file, error_msg = _WaitUntilBuildIsReady(
1665 fetch_build, bot_name, builder_host, builder_port, build_request_id,
1666 build_timeout)
1667 if not target_file:
1668 print '%s [revision: %s]' % (error_msg, svn_revision)
1669 return None
1670 return target_file
1671 print 'Failed to post build request for revision: [%s]' % svn_revision
1672 return None
1674 def IsDownloadable(self, depot):
1675 """Checks if build is downloadable based on target platform and depot."""
1676 if (self.opts.target_platform in ['chromium', 'android'] and
1677 self.opts.gs_bucket):
1678 return (depot == 'chromium' or
1679 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1680 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1681 return False
1683 def UpdateDeps(self, revision, depot, deps_file):
1684 """Updates DEPS file with new revision of dependency repository.
1686 This method search DEPS for a particular pattern in which depot revision
1687 is specified (e.g "webkit_revision": "123456"). If a match is found then
1688 it resolves the given git hash to SVN revision and replace it in DEPS file.
1690 Args:
1691 revision: A git hash revision of the dependency repository.
1692 depot: Current depot being bisected.
1693 deps_file: Path to DEPS file.
1695 Returns:
1696 True if DEPS file is modified successfully, otherwise False.
1698 if not os.path.exists(deps_file):
1699 return False
1701 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1702 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1703 if not deps_var:
1704 print 'DEPS update not supported for Depot: %s', depot
1705 return False
1707 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable
1708 # contains "angle_revision" key that holds git hash instead of SVN revision.
1709 # And sometime "angle_revision" key is not specified in "vars" variable.
1710 # In such cases check, "deps" dictionary variable that matches
1711 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1712 if depot == 'angle':
1713 return _UpdateDEPSForAngle(revision, depot, deps_file)
1715 try:
1716 deps_contents = ReadStringFromFile(deps_file)
1717 # Check whether the depot and revision pattern in DEPS file vars
1718 # e.g. for webkit the format is "webkit_revision": "12345".
1719 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1720 re.MULTILINE)
1721 match = re.search(deps_revision, deps_contents)
1722 if match:
1723 svn_revision = self.source_control.SVNFindRev(
1724 revision, self._GetDepotDirectory(depot))
1725 if not svn_revision:
1726 print 'Could not determine SVN revision for %s' % revision
1727 return False
1728 # Update the revision information for the given depot
1729 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1731 # For v8_bleeding_edge revisions change V8 branch in order
1732 # to fetch bleeding edge revision.
1733 if depot == 'v8_bleeding_edge':
1734 new_data = _UpdateV8Branch(new_data)
1735 if not new_data:
1736 return False
1737 # Write changes to DEPS file
1738 WriteStringToFile(new_data, deps_file)
1739 return True
1740 except IOError, e:
1741 print 'Something went wrong while updating DEPS file. [%s]' % e
1742 return False
1744 def CreateDEPSPatch(self, depot, revision):
1745 """Modifies DEPS and returns diff as text.
1747 Args:
1748 depot: Current depot being bisected.
1749 revision: A git hash revision of the dependency repository.
1751 Returns:
1752 A tuple with git hash of chromium revision and DEPS patch text.
1754 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1755 if not os.path.exists(deps_file_path):
1756 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1757 # Get current chromium revision (git hash).
1758 cmd = ['rev-parse', 'HEAD']
1759 chromium_sha = bisect_utils.CheckRunGit(cmd).strip()
1760 if not chromium_sha:
1761 raise RuntimeError('Failed to determine Chromium revision for %s' %
1762 revision)
1763 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1764 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1765 # Checkout DEPS file for the current chromium revision.
1766 if self.source_control.CheckoutFileAtRevision(
1767 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd):
1768 if self.UpdateDeps(revision, depot, deps_file_path):
1769 diff_command = [
1770 'diff',
1771 '--src-prefix=src/',
1772 '--dst-prefix=src/',
1773 '--no-ext-diff',
1774 bisect_utils.FILE_DEPS,
1776 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd)
1777 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1778 else:
1779 raise RuntimeError(
1780 'Failed to update DEPS file for chromium: [%s]' % chromium_sha)
1781 else:
1782 raise RuntimeError(
1783 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha)
1784 return (None, None)
1786 def BuildCurrentRevision(self, depot, revision=None):
1787 """Builds chrome and performance_ui_tests on the current revision.
1789 Returns:
1790 True if the build was successful.
1792 if self.opts.debug_ignore_build:
1793 return True
1794 cwd = os.getcwd()
1795 os.chdir(self.src_cwd)
1796 # Fetch build archive for the given revision from the cloud storage when
1797 # the storage bucket is passed.
1798 if self.IsDownloadable(depot) and revision:
1799 deps_patch = None
1800 if depot != 'chromium':
1801 # Create a DEPS patch with new revision for dependency repository.
1802 revision, deps_patch = self.CreateDEPSPatch(depot, revision)
1803 if self.DownloadCurrentBuild(revision, patch=deps_patch):
1804 os.chdir(cwd)
1805 if deps_patch:
1806 # Reverts the changes to DEPS file.
1807 self.source_control.CheckoutFileAtRevision(
1808 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd)
1809 return True
1810 return False
1812 # These codes are executed when bisect bots builds binaries locally.
1813 build_success = self.builder.Build(depot, self.opts)
1814 os.chdir(cwd)
1815 return build_success
1817 def RunGClientHooks(self):
1818 """Runs gclient with runhooks command.
1820 Returns:
1821 True if gclient reports no errors.
1823 if self.opts.debug_ignore_build:
1824 return True
1825 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1827 def _IsBisectModeUsingMetric(self):
1828 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
1830 def _IsBisectModeReturnCode(self):
1831 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
1833 def _IsBisectModeStandardDeviation(self):
1834 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
1836 def GetCompatibleCommand(self, command_to_run, revision, depot):
1837 # Prior to crrev.com/274857 *only* android-chromium-testshell
1838 # Then until crrev.com/276628 *both* (android-chromium-testshell and
1839 # android-chrome-shell) work. After that rev 276628 *only*
1840 # android-chrome-shell works. bisect-perf-reggresion.py script should
1841 # handle these cases and set appropriate browser type based on revision.
1842 if self.opts.target_platform in ['android']:
1843 # When its a third_party depot, get the chromium revision.
1844 if depot != 'chromium':
1845 revision = bisect_utils.CheckRunGit(
1846 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip()
1847 svn_revision = self.source_control.SVNFindRev(revision, cwd=self.src_cwd)
1848 if not svn_revision:
1849 return command_to_run
1850 cmd_re = re.compile('--browser=(?P<browser_type>\S+)')
1851 matches = cmd_re.search(command_to_run)
1852 if bisect_utils.IsStringInt(svn_revision) and matches:
1853 cmd_browser = matches.group('browser_type')
1854 if svn_revision <= 274857 and cmd_browser == 'android-chrome-shell':
1855 return command_to_run.replace(cmd_browser,
1856 'android-chromium-testshell')
1857 elif (svn_revision >= 276628 and
1858 cmd_browser == 'android-chromium-testshell'):
1859 return command_to_run.replace(cmd_browser,
1860 'android-chrome-shell')
1861 return command_to_run
1863 def RunPerformanceTestAndParseResults(
1864 self, command_to_run, metric, reset_on_first_run=False,
1865 upload_on_last_run=False, results_label=None):
1866 """Runs a performance test on the current revision and parses the results.
1868 Args:
1869 command_to_run: The command to be run to execute the performance test.
1870 metric: The metric to parse out from the results of the performance test.
1871 This is the result chart name and trace name, separated by slash.
1872 reset_on_first_run: If True, pass the flag --reset-results on first run.
1873 upload_on_last_run: If True, pass the flag --upload-results on last run.
1874 results_label: A value for the option flag --results-label.
1875 The arguments reset_on_first_run, upload_on_last_run and results_label
1876 are all ignored if the test is not a Telemetry test.
1878 Returns:
1879 (values dict, 0) if --debug_ignore_perf_test was passed.
1880 (values dict, 0, test output) if the test was run successfully.
1881 (error message, -1) if the test couldn't be run.
1882 (error message, -1, test output) if the test ran but there was an error.
1884 success_code, failure_code = 0, -1
1886 if self.opts.debug_ignore_perf_test:
1887 fake_results = {
1888 'mean': 0.0,
1889 'std_err': 0.0,
1890 'std_dev': 0.0,
1891 'values': [0.0]
1893 return (fake_results, success_code)
1895 # For Windows platform set posix=False, to parse windows paths correctly.
1896 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
1897 # refer to http://bugs.python.org/issue1724822. By default posix=True.
1898 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
1900 if not _GenerateProfileIfNecessary(args):
1901 err_text = 'Failed to generate profile for performance test.'
1902 return (err_text, failure_code)
1904 # If running a Telemetry test for Chrome OS, insert the remote IP and
1905 # identity parameters.
1906 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1907 if self.opts.target_platform == 'cros' and is_telemetry:
1908 args.append('--remote=%s' % self.opts.cros_remote_ip)
1909 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1911 start_time = time.time()
1913 metric_values = []
1914 output_of_all_runs = ''
1915 for i in xrange(self.opts.repeat_test_count):
1916 # Can ignore the return code since if the tests fail, it won't return 0.
1917 current_args = copy.copy(args)
1918 if is_telemetry:
1919 if i == 0 and reset_on_first_run:
1920 current_args.append('--reset-results')
1921 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1922 current_args.append('--upload-results')
1923 if results_label:
1924 current_args.append('--results-label=%s' % results_label)
1925 try:
1926 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
1927 current_args, cwd=self.src_cwd)
1928 except OSError, e:
1929 if e.errno == errno.ENOENT:
1930 err_text = ('Something went wrong running the performance test. '
1931 'Please review the command line:\n\n')
1932 if 'src/' in ' '.join(args):
1933 err_text += ('Check that you haven\'t accidentally specified a '
1934 'path with src/ in the command.\n\n')
1935 err_text += ' '.join(args)
1936 err_text += '\n'
1938 return (err_text, failure_code)
1939 raise
1941 output_of_all_runs += output
1942 if self.opts.output_buildbot_annotations:
1943 print output
1945 if self._IsBisectModeUsingMetric():
1946 metric_values += _ParseMetricValuesFromOutput(metric, output)
1947 # If we're bisecting on a metric (ie, changes in the mean or
1948 # standard deviation) and no metric values are produced, bail out.
1949 if not metric_values:
1950 break
1951 elif self._IsBisectModeReturnCode():
1952 metric_values.append(return_code)
1954 elapsed_minutes = (time.time() - start_time) / 60.0
1955 if elapsed_minutes >= self.opts.max_time_minutes:
1956 break
1958 if len(metric_values) == 0:
1959 err_text = 'Metric %s was not found in the test output.' % metric
1960 # TODO(qyearsley): Consider also getting and displaying a list of metrics
1961 # that were found in the output here.
1962 return (err_text, failure_code, output_of_all_runs)
1964 # If we're bisecting on return codes, we're really just looking for zero vs
1965 # non-zero.
1966 if self._IsBisectModeReturnCode():
1967 # If any of the return codes is non-zero, output 1.
1968 overall_return_code = 0 if (
1969 all(current_value == 0 for current_value in metric_values)) else 1
1971 values = {
1972 'mean': overall_return_code,
1973 'std_err': 0.0,
1974 'std_dev': 0.0,
1975 'values': metric_values,
1978 print 'Results of performance test: Command returned with %d' % (
1979 overall_return_code)
1980 print
1981 else:
1982 # Need to get the average value if there were multiple values.
1983 truncated_mean = math_utils.TruncatedMean(
1984 metric_values, self.opts.truncate_percent)
1985 standard_err = math_utils.StandardError(metric_values)
1986 standard_dev = math_utils.StandardDeviation(metric_values)
1988 if self._IsBisectModeStandardDeviation():
1989 metric_values = [standard_dev]
1991 values = {
1992 'mean': truncated_mean,
1993 'std_err': standard_err,
1994 'std_dev': standard_dev,
1995 'values': metric_values,
1998 print 'Results of performance test: %12f %12f' % (
1999 truncated_mean, standard_err)
2000 print
2001 return (values, success_code, output_of_all_runs)
2003 def FindAllRevisionsToSync(self, revision, depot):
2004 """Finds all dependant revisions and depots that need to be synced for a
2005 given revision. This is only useful in the git workflow, as an svn depot
2006 may be split into multiple mirrors.
2008 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2009 skia/include. To sync skia/src properly, one has to find the proper
2010 revisions in skia/gyp and skia/include.
2012 Args:
2013 revision: The revision to sync to.
2014 depot: The depot in use at the moment (probably skia).
2016 Returns:
2017 A list of [depot, revision] pairs that need to be synced.
2019 revisions_to_sync = [[depot, revision]]
2021 is_base = ((depot == 'chromium') or (depot == 'cros') or
2022 (depot == 'android-chrome'))
2024 # Some SVN depots were split into multiple git depots, so we need to
2025 # figure out for each mirror which git revision to grab. There's no
2026 # guarantee that the SVN revision will exist for each of the dependant
2027 # depots, so we have to grep the git logs and grab the next earlier one.
2028 if (not is_base
2029 and DEPOT_DEPS_NAME[depot]['depends']
2030 and self.source_control.IsGit()):
2031 svn_rev = self.source_control.SVNFindRev(revision)
2033 for d in DEPOT_DEPS_NAME[depot]['depends']:
2034 self.ChangeToDepotWorkingDirectory(d)
2036 dependant_rev = self.source_control.ResolveToRevision(
2037 svn_rev, d, DEPOT_DEPS_NAME, -1000)
2039 if dependant_rev:
2040 revisions_to_sync.append([d, dependant_rev])
2042 num_resolved = len(revisions_to_sync)
2043 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2045 self.ChangeToDepotWorkingDirectory(depot)
2047 if not ((num_resolved - 1) == num_needed):
2048 return None
2050 return revisions_to_sync
2052 def PerformPreBuildCleanup(self):
2053 """Performs necessary cleanup between runs."""
2054 print 'Cleaning up between runs.'
2055 print
2057 # Having these pyc files around between runs can confuse the
2058 # perf tests and cause them to crash.
2059 for (path, _, files) in os.walk(self.src_cwd):
2060 for cur_file in files:
2061 if cur_file.endswith('.pyc'):
2062 path_to_file = os.path.join(path, cur_file)
2063 os.remove(path_to_file)
2065 def PerformWebkitDirectoryCleanup(self, revision):
2066 """If the script is switching between Blink and WebKit during bisect,
2067 its faster to just delete the directory rather than leave it up to git
2068 to sync.
2070 Returns:
2071 True if successful.
2073 if not self.source_control.CheckoutFileAtRevision(
2074 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2075 return False
2077 cwd = os.getcwd()
2078 os.chdir(self.src_cwd)
2080 is_blink = bisect_utils.IsDepsFileBlink()
2082 os.chdir(cwd)
2084 if not self.source_control.RevertFileToHead(
2085 bisect_utils.FILE_DEPS_GIT):
2086 return False
2088 if self.was_blink != is_blink:
2089 self.was_blink = is_blink
2090 # Removes third_party/Webkit directory.
2091 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2092 return True
2094 def PerformCrosChrootCleanup(self):
2095 """Deletes the chroot.
2097 Returns:
2098 True if successful.
2100 cwd = os.getcwd()
2101 self.ChangeToDepotWorkingDirectory('cros')
2102 cmd = [CROS_SDK_PATH, '--delete']
2103 return_code = bisect_utils.RunProcess(cmd)
2104 os.chdir(cwd)
2105 return not return_code
2107 def CreateCrosChroot(self):
2108 """Creates a new chroot.
2110 Returns:
2111 True if successful.
2113 cwd = os.getcwd()
2114 self.ChangeToDepotWorkingDirectory('cros')
2115 cmd = [CROS_SDK_PATH, '--create']
2116 return_code = bisect_utils.RunProcess(cmd)
2117 os.chdir(cwd)
2118 return not return_code
2120 def PerformPreSyncCleanup(self, revision, depot):
2121 """Performs any necessary cleanup before syncing.
2123 Returns:
2124 True if successful.
2126 if depot == 'chromium' or depot == 'android-chrome':
2127 # Removes third_party/libjingle. At some point, libjingle was causing
2128 # issues syncing when using the git workflow (crbug.com/266324).
2129 os.chdir(self.src_cwd)
2130 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2131 return False
2132 # Removes third_party/skia. At some point, skia was causing
2133 # issues syncing when using the git workflow (crbug.com/377951).
2134 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2135 return False
2136 if depot == 'chromium':
2137 # The fast webkit cleanup doesn't work for android_chrome
2138 # The switch from Webkit to Blink that this deals with now happened
2139 # quite a long time ago so this is unlikely to be a problem.
2140 return self.PerformWebkitDirectoryCleanup(revision)
2141 elif depot == 'cros':
2142 return self.PerformCrosChrootCleanup()
2143 return True
2145 def RunPostSync(self, depot):
2146 """Performs any work after syncing.
2148 Returns:
2149 True if successful.
2151 if self.opts.target_platform == 'android':
2152 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2153 path_to_src=self.src_cwd):
2154 return False
2156 if depot == 'cros':
2157 return self.CreateCrosChroot()
2158 else:
2159 return self.RunGClientHooks()
2160 return True
2162 def ShouldSkipRevision(self, depot, revision):
2163 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2164 is git based those changes would have no effect.
2166 Args:
2167 depot: The depot being bisected.
2168 revision: Current revision we're synced to.
2170 Returns:
2171 True if we should skip building/testing this revision.
2173 if depot == 'chromium':
2174 if self.source_control.IsGit():
2175 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2176 output = bisect_utils.CheckRunGit(cmd)
2178 files = output.splitlines()
2180 if len(files) == 1 and files[0] == 'DEPS':
2181 return True
2183 return False
2185 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2186 skippable=False):
2187 """Performs a full sync/build/run of the specified revision.
2189 Args:
2190 revision: The revision to sync to.
2191 depot: The depot that's being used at the moment (src, webkit, etc.)
2192 command_to_run: The command to execute the performance test.
2193 metric: The performance metric being tested.
2195 Returns:
2196 On success, a tuple containing the results of the performance test.
2197 Otherwise, a tuple with the error message.
2199 sync_client = None
2200 if depot == 'chromium' or depot == 'android-chrome':
2201 sync_client = 'gclient'
2202 elif depot == 'cros':
2203 sync_client = 'repo'
2205 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2207 if not revisions_to_sync:
2208 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2210 if not self.PerformPreSyncCleanup(revision, depot):
2211 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2213 success = True
2215 if not self.opts.debug_ignore_sync:
2216 for r in revisions_to_sync:
2217 self.ChangeToDepotWorkingDirectory(r[0])
2219 if sync_client:
2220 self.PerformPreBuildCleanup()
2222 # If you're using gclient to sync, you need to specify the depot you
2223 # want so that all the dependencies sync properly as well.
2224 # ie. gclient sync src@<SHA1>
2225 current_revision = r[1]
2226 if sync_client == 'gclient':
2227 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2228 current_revision)
2229 if not self.source_control.SyncToRevision(current_revision,
2230 sync_client):
2231 success = False
2233 break
2235 if success:
2236 success = self.RunPostSync(depot)
2237 if success:
2238 if skippable and self.ShouldSkipRevision(depot, revision):
2239 return ('Skipped revision: [%s]' % str(revision),
2240 BUILD_RESULT_SKIPPED)
2242 start_build_time = time.time()
2243 if self.BuildCurrentRevision(depot, revision):
2244 after_build_time = time.time()
2245 # Hack to support things that got changed.
2246 command_to_run = self.GetCompatibleCommand(
2247 command_to_run, revision, depot)
2248 results = self.RunPerformanceTestAndParseResults(command_to_run,
2249 metric)
2250 # Restore build output directory once the tests are done, to avoid
2251 # any descrepancy.
2252 if self.IsDownloadable(depot) and revision:
2253 self.BackupOrRestoreOutputdirectory(restore=True)
2255 if results[1] == 0:
2256 external_revisions = self._Get3rdPartyRevisions(depot)
2258 if not external_revisions is None:
2259 return (results[0], results[1], external_revisions,
2260 time.time() - after_build_time, after_build_time -
2261 start_build_time)
2262 else:
2263 return ('Failed to parse DEPS file for external revisions.',
2264 BUILD_RESULT_FAIL)
2265 else:
2266 return results
2267 else:
2268 return ('Failed to build revision: [%s]' % str(revision),
2269 BUILD_RESULT_FAIL)
2270 else:
2271 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2272 else:
2273 return ('Failed to sync revision: [%s]' % str(revision),
2274 BUILD_RESULT_FAIL)
2276 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2277 """Given known good and bad values, decide if the current_value passed
2278 or failed.
2280 Args:
2281 current_value: The value of the metric being checked.
2282 known_bad_value: The reference value for a "failed" run.
2283 known_good_value: The reference value for a "passed" run.
2285 Returns:
2286 True if the current_value is closer to the known_good_value than the
2287 known_bad_value.
2289 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2290 dist_to_good_value = abs(current_value['std_dev'] -
2291 known_good_value['std_dev'])
2292 dist_to_bad_value = abs(current_value['std_dev'] -
2293 known_bad_value['std_dev'])
2294 else:
2295 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2296 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2298 return dist_to_good_value < dist_to_bad_value
2300 def _GetDepotDirectory(self, depot_name):
2301 if depot_name == 'chromium':
2302 return self.src_cwd
2303 elif depot_name == 'cros':
2304 return self.cros_cwd
2305 elif depot_name in DEPOT_NAMES:
2306 return self.depot_cwd[depot_name]
2307 else:
2308 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
2309 'was added without proper support?' % depot_name)
2311 def ChangeToDepotWorkingDirectory(self, depot_name):
2312 """Given a depot, changes to the appropriate working directory.
2314 Args:
2315 depot_name: The name of the depot (see DEPOT_NAMES).
2317 os.chdir(self._GetDepotDirectory(depot_name))
2319 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2320 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2321 search_forward=True)
2322 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2323 search_forward=False)
2324 min_revision_data['external']['v8_bleeding_edge'] = r1
2325 max_revision_data['external']['v8_bleeding_edge'] = r2
2327 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2328 min_revision_data['revision'])
2329 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2330 max_revision_data['revision'])):
2331 self.warnings.append(
2332 'Trunk revisions in V8 did not map directly to bleeding_edge. '
2333 'Attempted to expand the range to find V8 rolls which did map '
2334 'directly to bleeding_edge revisions, but results might not be '
2335 'valid.')
2337 def _FindNextDepotToBisect(
2338 self, current_depot, min_revision_data, max_revision_data):
2339 """Decides which depot the script should dive into next (if any).
2341 Args:
2342 current_depot: Current depot being bisected.
2343 min_revision_data: Data about the earliest revision in the bisect range.
2344 max_revision_data: Data about the latest revision in the bisect range.
2346 Returns:
2347 Name of the depot to bisect next, or None.
2349 external_depot = None
2350 for next_depot in DEPOT_NAMES:
2351 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2352 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2353 continue
2355 if not (DEPOT_DEPS_NAME[next_depot]['recurse']
2356 and min_revision_data['depot']
2357 in DEPOT_DEPS_NAME[next_depot]['from']):
2358 continue
2360 if current_depot == 'v8':
2361 # We grab the bleeding_edge info here rather than earlier because we
2362 # finally have the revision range. From that we can search forwards and
2363 # backwards to try to match trunk revisions to bleeding_edge.
2364 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2366 if (min_revision_data['external'].get(next_depot) ==
2367 max_revision_data['external'].get(next_depot)):
2368 continue
2370 if (min_revision_data['external'].get(next_depot) and
2371 max_revision_data['external'].get(next_depot)):
2372 external_depot = next_depot
2373 break
2375 return external_depot
2377 def PrepareToBisectOnDepot(
2378 self, current_depot, end_revision, start_revision, previous_revision):
2379 """Changes to the appropriate directory and gathers a list of revisions
2380 to bisect between |start_revision| and |end_revision|.
2382 Args:
2383 current_depot: The depot we want to bisect.
2384 end_revision: End of the revision range.
2385 start_revision: Start of the revision range.
2386 previous_revision: The last revision we synced to on |previous_depot|.
2388 Returns:
2389 A list containing the revisions between |start_revision| and
2390 |end_revision| inclusive.
2392 # Change into working directory of external library to run
2393 # subsequent commands.
2394 self.ChangeToDepotWorkingDirectory(current_depot)
2396 # V8 (and possibly others) is merged in periodically. Bisecting
2397 # this directory directly won't give much good info.
2398 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2399 config_path = os.path.join(self.src_cwd, '..')
2400 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2401 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2402 return []
2403 if bisect_utils.RunGClient(
2404 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2405 return []
2407 if current_depot == 'v8_bleeding_edge':
2408 self.ChangeToDepotWorkingDirectory('chromium')
2410 shutil.move('v8', 'v8.bak')
2411 shutil.move('v8_bleeding_edge', 'v8')
2413 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2414 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2416 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2417 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2419 self.ChangeToDepotWorkingDirectory(current_depot)
2421 depot_revision_list = self.GetRevisionList(current_depot,
2422 end_revision,
2423 start_revision)
2425 self.ChangeToDepotWorkingDirectory('chromium')
2427 return depot_revision_list
2429 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2430 """Gathers reference values by running the performance tests on the
2431 known good and bad revisions.
2433 Args:
2434 good_rev: The last known good revision where the performance regression
2435 has not occurred yet.
2436 bad_rev: A revision where the performance regression has already occurred.
2437 cmd: The command to execute the performance test.
2438 metric: The metric being tested for regression.
2440 Returns:
2441 A tuple with the results of building and running each revision.
2443 bad_run_results = self.SyncBuildAndRunRevision(
2444 bad_rev, target_depot, cmd, metric)
2446 good_run_results = None
2448 if not bad_run_results[1]:
2449 good_run_results = self.SyncBuildAndRunRevision(
2450 good_rev, target_depot, cmd, metric)
2452 return (bad_run_results, good_run_results)
2454 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2455 if self.opts.output_buildbot_annotations:
2456 step_name = 'Bisection Range: [%s - %s]' % (
2457 revision_list[len(revision_list)-1], revision_list[0])
2458 bisect_utils.OutputAnnotationStepStart(step_name)
2460 print
2461 print 'Revisions to bisect on [%s]:' % depot
2462 for revision_id in revision_list:
2463 print ' -> %s' % (revision_id, )
2464 print
2466 if self.opts.output_buildbot_annotations:
2467 bisect_utils.OutputAnnotationStepClosed()
2469 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2470 """Checks to see if changes to DEPS file occurred, and that the revision
2471 range also includes the change to .DEPS.git. If it doesn't, attempts to
2472 expand the revision range to include it.
2474 Args:
2475 bad_rev: First known bad revision.
2476 good_revision: Last known good revision.
2478 Returns:
2479 A tuple with the new bad and good revisions.
2481 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2482 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2483 'DEPS', good_revision, bad_revision)
2485 if changes_to_deps:
2486 # DEPS file was changed, search from the oldest change to DEPS file to
2487 # bad_revision to see if there are matching .DEPS.git changes.
2488 oldest_deps_change = changes_to_deps[-1]
2489 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2490 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2492 if len(changes_to_deps) != len(changes_to_gitdeps):
2493 # Grab the timestamp of the last DEPS change
2494 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2495 output = bisect_utils.CheckRunGit(cmd)
2496 commit_time = int(output)
2498 # Try looking for a commit that touches the .DEPS.git file in the
2499 # next 15 minutes after the DEPS file change.
2500 cmd = ['log', '--format=%H', '-1',
2501 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2502 'origin/master', bisect_utils.FILE_DEPS_GIT]
2503 output = bisect_utils.CheckRunGit(cmd)
2504 output = output.strip()
2505 if output:
2506 self.warnings.append('Detected change to DEPS and modified '
2507 'revision range to include change to .DEPS.git')
2508 return (output, good_revision)
2509 else:
2510 self.warnings.append('Detected change to DEPS but couldn\'t find '
2511 'matching change to .DEPS.git')
2512 return (bad_revision, good_revision)
2514 def CheckIfRevisionsInProperOrder(
2515 self, target_depot, good_revision, bad_revision):
2516 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2518 Args:
2519 good_revision: Number/tag of the known good revision.
2520 bad_revision: Number/tag of the known bad revision.
2522 Returns:
2523 True if the revisions are in the proper order (good earlier than bad).
2525 if self.source_control.IsGit() and target_depot != 'cros':
2526 cmd = ['log', '--format=%ct', '-1', good_revision]
2527 cwd = self._GetDepotDirectory(target_depot)
2529 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2530 good_commit_time = int(output)
2532 cmd = ['log', '--format=%ct', '-1', bad_revision]
2533 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2534 bad_commit_time = int(output)
2536 return good_commit_time <= bad_commit_time
2537 else:
2538 # Cros/svn use integers
2539 return int(good_revision) <= int(bad_revision)
2541 def CanPerformBisect(self, revision_to_check):
2542 """Checks whether a given revision is bisectable.
2544 Note: At present it checks whether a given revision is bisectable on
2545 android bots(refer crbug.com/385324).
2547 Args:
2548 revision_to_check: Known good revision.
2550 Returns:
2551 A dictionary indicating the result. If revision is not bisectable,
2552 this will contain the field "error", otherwise None.
2554 if self.opts.target_platform == 'android':
2555 revision_to_check = self.source_control.SVNFindRev(revision_to_check)
2556 if (bisect_utils.IsStringInt(revision_to_check)
2557 and revision_to_check < 265549):
2558 return {'error': (
2559 'Bisect cannot conitnue for the given revision range.\n'
2560 'It is impossible to bisect Android regressions '
2561 'prior to r265549, which allows the bisect bot to '
2562 'rely on Telemetry to do apk installation of the most recently '
2563 'built local ChromeShell(refer to crbug.com/385324).\n'
2564 'Please try bisecting revisions greater than or equal to r265549.')}
2565 return None
2567 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2568 """Given known good and bad revisions, run a binary search on all
2569 intermediate revisions to determine the CL where the performance regression
2570 occurred.
2572 Args:
2573 command_to_run: Specify the command to execute the performance test.
2574 good_revision: Number/tag of the known good revision.
2575 bad_revision: Number/tag of the known bad revision.
2576 metric: The performance metric to monitor.
2578 Returns:
2579 A dict with 2 members, 'revision_data' and 'error'. On success,
2580 'revision_data' will contain a dict mapping revision ids to
2581 data about that revision. Each piece of revision data consists of a
2582 dict with the following keys:
2584 'passed': Represents whether the performance test was successful at
2585 that revision. Possible values include: 1 (passed), 0 (failed),
2586 '?' (skipped), 'F' (build failed).
2587 'depot': The depot that this revision is from (ie. WebKit)
2588 'external': If the revision is a 'src' revision, 'external' contains
2589 the revisions of each of the external libraries.
2590 'sort': A sort value for sorting the dict in order of commits.
2592 For example:
2594 'error':None,
2595 'revision_data':
2597 'CL #1':
2599 'passed':False,
2600 'depot':'chromium',
2601 'external':None,
2602 'sort':0
2607 If an error occurred, the 'error' field will contain the message and
2608 'revision_data' will be empty.
2610 results = {
2611 'revision_data' : {},
2612 'error' : None,
2615 # Choose depot to bisect first
2616 target_depot = 'chromium'
2617 if self.opts.target_platform == 'cros':
2618 target_depot = 'cros'
2619 elif self.opts.target_platform == 'android-chrome':
2620 target_depot = 'android-chrome'
2622 cwd = os.getcwd()
2623 self.ChangeToDepotWorkingDirectory(target_depot)
2625 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2626 bad_revision = self.source_control.ResolveToRevision(
2627 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100)
2628 good_revision = self.source_control.ResolveToRevision(
2629 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100)
2631 os.chdir(cwd)
2633 if bad_revision is None:
2634 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2635 return results
2637 if good_revision is None:
2638 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2639 return results
2641 # Check that they didn't accidentally swap good and bad revisions.
2642 if not self.CheckIfRevisionsInProperOrder(
2643 target_depot, good_revision, bad_revision):
2644 results['error'] = ('bad_revision < good_revision, did you swap these '
2645 'by mistake?')
2646 return results
2648 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange(
2649 bad_revision, good_revision)
2651 if self.opts.output_buildbot_annotations:
2652 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2654 cannot_bisect = self.CanPerformBisect(good_revision)
2655 if cannot_bisect:
2656 results['error'] = cannot_bisect.get('error')
2657 return results
2659 print 'Gathering revision range for bisection.'
2660 # Retrieve a list of revisions to do bisection on.
2661 src_revision_list = self.GetRevisionList(
2662 target_depot, bad_revision, good_revision)
2664 if self.opts.output_buildbot_annotations:
2665 bisect_utils.OutputAnnotationStepClosed()
2667 if src_revision_list:
2668 # revision_data will store information about a revision such as the
2669 # depot it came from, the webkit/V8 revision at that time,
2670 # performance timing, build state, etc...
2671 revision_data = results['revision_data']
2673 # revision_list is the list we're binary searching through at the moment.
2674 revision_list = []
2676 sort_key_ids = 0
2678 for current_revision_id in src_revision_list:
2679 sort_key_ids += 1
2681 revision_data[current_revision_id] = {
2682 'value' : None,
2683 'passed' : '?',
2684 'depot' : target_depot,
2685 'external' : None,
2686 'perf_time' : 0,
2687 'build_time' : 0,
2688 'sort' : sort_key_ids,
2690 revision_list.append(current_revision_id)
2692 min_revision = 0
2693 max_revision = len(revision_list) - 1
2695 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2697 if self.opts.output_buildbot_annotations:
2698 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2700 print 'Gathering reference values for bisection.'
2702 # Perform the performance tests on the good and bad revisions, to get
2703 # reference values.
2704 bad_results, good_results = self.GatherReferenceValues(good_revision,
2705 bad_revision,
2706 command_to_run,
2707 metric,
2708 target_depot)
2710 if self.opts.output_buildbot_annotations:
2711 bisect_utils.OutputAnnotationStepClosed()
2713 if bad_results[1]:
2714 results['error'] = ('An error occurred while building and running '
2715 'the \'bad\' reference value. The bisect cannot continue without '
2716 'a working \'bad\' revision to start from.\n\nError: %s' %
2717 bad_results[0])
2718 return results
2720 if good_results[1]:
2721 results['error'] = ('An error occurred while building and running '
2722 'the \'good\' reference value. The bisect cannot continue without '
2723 'a working \'good\' revision to start from.\n\nError: %s' %
2724 good_results[0])
2725 return results
2728 # We need these reference values to determine if later runs should be
2729 # classified as pass or fail.
2730 known_bad_value = bad_results[0]
2731 known_good_value = good_results[0]
2733 # Can just mark the good and bad revisions explicitly here since we
2734 # already know the results.
2735 bad_revision_data = revision_data[revision_list[0]]
2736 bad_revision_data['external'] = bad_results[2]
2737 bad_revision_data['perf_time'] = bad_results[3]
2738 bad_revision_data['build_time'] = bad_results[4]
2739 bad_revision_data['passed'] = False
2740 bad_revision_data['value'] = known_bad_value
2742 good_revision_data = revision_data[revision_list[max_revision]]
2743 good_revision_data['external'] = good_results[2]
2744 good_revision_data['perf_time'] = good_results[3]
2745 good_revision_data['build_time'] = good_results[4]
2746 good_revision_data['passed'] = True
2747 good_revision_data['value'] = known_good_value
2749 next_revision_depot = target_depot
2751 while True:
2752 if not revision_list:
2753 break
2755 min_revision_data = revision_data[revision_list[min_revision]]
2756 max_revision_data = revision_data[revision_list[max_revision]]
2758 if max_revision - min_revision <= 1:
2759 current_depot = min_revision_data['depot']
2760 if min_revision_data['passed'] == '?':
2761 next_revision_index = min_revision
2762 elif max_revision_data['passed'] == '?':
2763 next_revision_index = max_revision
2764 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2765 previous_revision = revision_list[min_revision]
2766 # If there were changes to any of the external libraries we track,
2767 # should bisect the changes there as well.
2768 external_depot = self._FindNextDepotToBisect(
2769 current_depot, min_revision_data, max_revision_data)
2771 # If there was no change in any of the external depots, the search
2772 # is over.
2773 if not external_depot:
2774 if current_depot == 'v8':
2775 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2776 'continue any further. The script can only bisect into '
2777 'V8\'s bleeding_edge repository if both the current and '
2778 'previous revisions in trunk map directly to revisions in '
2779 'bleeding_edge.')
2780 break
2782 earliest_revision = max_revision_data['external'][external_depot]
2783 latest_revision = min_revision_data['external'][external_depot]
2785 new_revision_list = self.PrepareToBisectOnDepot(
2786 external_depot, latest_revision, earliest_revision,
2787 previous_revision)
2789 if not new_revision_list:
2790 results['error'] = ('An error occurred attempting to retrieve '
2791 'revision range: [%s..%s]' %
2792 (earliest_revision, latest_revision))
2793 return results
2795 _AddRevisionsIntoRevisionData(
2796 new_revision_list, external_depot, min_revision_data['sort'],
2797 revision_data)
2799 # Reset the bisection and perform it on the newly inserted
2800 # changelists.
2801 revision_list = new_revision_list
2802 min_revision = 0
2803 max_revision = len(revision_list) - 1
2804 sort_key_ids += len(revision_list)
2806 print ('Regression in metric %s appears to be the result of '
2807 'changes in [%s].' % (metric, external_depot))
2809 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2811 continue
2812 else:
2813 break
2814 else:
2815 next_revision_index = (int((max_revision - min_revision) / 2) +
2816 min_revision)
2818 next_revision_id = revision_list[next_revision_index]
2819 next_revision_data = revision_data[next_revision_id]
2820 next_revision_depot = next_revision_data['depot']
2822 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2824 if self.opts.output_buildbot_annotations:
2825 step_name = 'Working on [%s]' % next_revision_id
2826 bisect_utils.OutputAnnotationStepStart(step_name)
2828 print 'Working on revision: [%s]' % next_revision_id
2830 run_results = self.SyncBuildAndRunRevision(next_revision_id,
2831 next_revision_depot,
2832 command_to_run,
2833 metric, skippable=True)
2835 # If the build is successful, check whether or not the metric
2836 # had regressed.
2837 if not run_results[1]:
2838 if len(run_results) > 2:
2839 next_revision_data['external'] = run_results[2]
2840 next_revision_data['perf_time'] = run_results[3]
2841 next_revision_data['build_time'] = run_results[4]
2843 passed_regression = self._CheckIfRunPassed(run_results[0],
2844 known_good_value,
2845 known_bad_value)
2847 next_revision_data['passed'] = passed_regression
2848 next_revision_data['value'] = run_results[0]
2850 if passed_regression:
2851 max_revision = next_revision_index
2852 else:
2853 min_revision = next_revision_index
2854 else:
2855 if run_results[1] == BUILD_RESULT_SKIPPED:
2856 next_revision_data['passed'] = 'Skipped'
2857 elif run_results[1] == BUILD_RESULT_FAIL:
2858 next_revision_data['passed'] = 'Build Failed'
2860 print run_results[0]
2862 # If the build is broken, remove it and redo search.
2863 revision_list.pop(next_revision_index)
2865 max_revision -= 1
2867 if self.opts.output_buildbot_annotations:
2868 self._PrintPartialResults(results)
2869 bisect_utils.OutputAnnotationStepClosed()
2870 else:
2871 # Weren't able to sync and retrieve the revision range.
2872 results['error'] = ('An error occurred attempting to retrieve revision '
2873 'range: [%s..%s]' % (good_revision, bad_revision))
2875 return results
2877 def _PrintPartialResults(self, results_dict):
2878 revision_data = results_dict['revision_data']
2879 revision_data_sorted = sorted(revision_data.iteritems(),
2880 key = lambda x: x[1]['sort'])
2881 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2883 self._PrintTestedCommitsTable(revision_data_sorted,
2884 results_dict['first_working_revision'],
2885 results_dict['last_broken_revision'],
2886 100, final_step=False)
2888 def _ConfidenceLevelStatus(self, results_dict):
2889 if not results_dict['confidence']:
2890 return None
2891 confidence_status = 'Successful with %(level)s confidence%(warning)s.'
2892 if results_dict['confidence'] >= 95:
2893 level = 'high'
2894 else:
2895 level = 'low'
2896 warning = ' and warnings'
2897 if not self.warnings:
2898 warning = ''
2899 return confidence_status % {'level': level, 'warning': warning}
2901 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2902 info = self.source_control.QueryRevisionInfo(cl,
2903 self._GetDepotDirectory(depot))
2904 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2905 try:
2906 # Format is "git-svn-id: svn://....@123456 <other data>"
2907 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2908 svn_revision = svn_line[0].split('@')
2909 svn_revision = svn_revision[1].split(' ')[0]
2910 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2911 except IndexError:
2912 return ''
2913 return ''
2915 def _PrintRevisionInfo(self, cl, info, depot=None):
2916 email_info = ''
2917 if not info['email'].startswith(info['author']):
2918 email_info = '\nEmail : %s' % info['email']
2919 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2920 if commit_link:
2921 commit_info = '\nLink : %s' % commit_link
2922 else:
2923 commit_info = ('\nFailed to parse svn revision from body:\n%s' %
2924 info['body'])
2925 print RESULTS_REVISION_INFO % {
2926 'subject': info['subject'],
2927 'author': info['author'],
2928 'email_info': email_info,
2929 'commit_info': commit_info,
2930 'cl': cl,
2931 'cl_date': info['date']
2934 def _PrintTestedCommitsHeader(self):
2935 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2936 _PrintTableRow(
2937 [20, 70, 14, 12, 13],
2938 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
2939 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2940 _PrintTableRow(
2941 [20, 70, 14, 12, 13],
2942 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
2943 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2944 _PrintTableRow(
2945 [20, 70, 14, 13],
2946 ['Depot', 'Commit SHA', 'Return Code', 'State'])
2947 else:
2948 assert False, 'Invalid bisect_mode specified.'
2950 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
2951 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2952 std_error = '+-%.02f' % current_data['value']['std_err']
2953 mean = '%.02f' % current_data['value']['mean']
2954 _PrintTableRow(
2955 [20, 70, 12, 14, 13],
2956 [current_data['depot'], cl_link, mean, std_error, state_str])
2957 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2958 std_error = '+-%.02f' % current_data['value']['std_err']
2959 mean = '%.02f' % current_data['value']['mean']
2960 _PrintTableRow(
2961 [20, 70, 12, 14, 13],
2962 [current_data['depot'], cl_link, std_error, mean, state_str])
2963 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2964 mean = '%d' % current_data['value']['mean']
2965 _PrintTableRow(
2966 [20, 70, 14, 13],
2967 [current_data['depot'], cl_link, mean, state_str])
2969 def _PrintTestedCommitsTable(
2970 self, revision_data_sorted, first_working_revision, last_broken_revision,
2971 confidence, final_step=True):
2972 print
2973 if final_step:
2974 print '===== TESTED COMMITS ====='
2975 else:
2976 print '===== PARTIAL RESULTS ====='
2977 self._PrintTestedCommitsHeader()
2978 state = 0
2979 for current_id, current_data in revision_data_sorted:
2980 if current_data['value']:
2981 if (current_id == last_broken_revision or
2982 current_id == first_working_revision):
2983 # If confidence is too low, don't add this empty line since it's
2984 # used to put focus on a suspected CL.
2985 if confidence and final_step:
2986 print
2987 state += 1
2988 if state == 2 and not final_step:
2989 # Just want a separation between "bad" and "good" cl's.
2990 print
2992 state_str = 'Bad'
2993 if state == 1 and final_step:
2994 state_str = 'Suspected CL'
2995 elif state == 2:
2996 state_str = 'Good'
2998 # If confidence is too low, don't bother outputting good/bad.
2999 if not confidence:
3000 state_str = ''
3001 state_str = state_str.center(13, ' ')
3003 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3004 current_data['depot'])
3005 if not cl_link:
3006 cl_link = current_id
3007 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3009 def _PrintReproSteps(self):
3010 """Prints out a section of the results explaining how to run the test.
3012 This message includes the command used to run the test.
3014 command = '$ ' + self.opts.command
3015 if bisect_utils.IsTelemetryCommand(self.opts.command):
3016 command += ('\nAlso consider passing --profiler=list to see available '
3017 'profilers.')
3018 print REPRO_STEPS_LOCAL % {'command': command}
3019 print REPRO_STEPS_TRYJOB % {'command': command}
3021 def _PrintOtherRegressions(self, other_regressions, revision_data):
3022 """Prints a section of the results about other potential regressions."""
3023 print
3024 print 'Other regressions may have occurred:'
3025 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3026 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3027 for regression in other_regressions:
3028 current_id, previous_id, confidence = regression
3029 current_data = revision_data[current_id]
3030 previous_data = revision_data[previous_id]
3032 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3033 current_data['depot'])
3034 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3035 previous_data['depot'])
3037 # If we can't map it to a viewable URL, at least show the original hash.
3038 if not current_link:
3039 current_link = current_id
3040 if not previous_link:
3041 previous_link = previous_id
3043 print ' %8s %70s %s' % (
3044 current_data['depot'], current_link,
3045 ('%d%%' % confidence).center(10, ' '))
3046 print ' %8s %70s' % (
3047 previous_data['depot'], previous_link)
3048 print
3050 def _GetResultsDict(self, revision_data, revision_data_sorted):
3051 # Find range where it possibly broke.
3052 first_working_revision = None
3053 first_working_revision_index = -1
3054 last_broken_revision = None
3055 last_broken_revision_index = -1
3057 for i in xrange(len(revision_data_sorted)):
3058 k, v = revision_data_sorted[i]
3059 if v['passed'] == 1:
3060 if not first_working_revision:
3061 first_working_revision = k
3062 first_working_revision_index = i
3064 if not v['passed']:
3065 last_broken_revision = k
3066 last_broken_revision_index = i
3068 if last_broken_revision != None and first_working_revision != None:
3069 broken_means = []
3070 for i in xrange(0, last_broken_revision_index + 1):
3071 if revision_data_sorted[i][1]['value']:
3072 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3074 working_means = []
3075 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3076 if revision_data_sorted[i][1]['value']:
3077 working_means.append(revision_data_sorted[i][1]['value']['values'])
3079 # Flatten the lists to calculate mean of all values.
3080 working_mean = sum(working_means, [])
3081 broken_mean = sum(broken_means, [])
3083 # Calculate the approximate size of the regression
3084 mean_of_bad_runs = math_utils.Mean(broken_mean)
3085 mean_of_good_runs = math_utils.Mean(working_mean)
3087 regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs,
3088 mean_of_bad_runs)
3089 if math.isnan(regression_size):
3090 regression_size = 'zero-to-nonzero'
3092 regression_std_err = math.fabs(math_utils.PooledStandardError(
3093 [working_mean, broken_mean]) /
3094 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3096 # Give a "confidence" in the bisect. At the moment we use how distinct the
3097 # values are before and after the last broken revision, and how noisy the
3098 # overall graph is.
3099 confidence = ConfidenceScore(working_means, broken_means)
3101 culprit_revisions = []
3103 cwd = os.getcwd()
3104 self.ChangeToDepotWorkingDirectory(
3105 revision_data[last_broken_revision]['depot'])
3107 if revision_data[last_broken_revision]['depot'] == 'cros':
3108 # Want to get a list of all the commits and what depots they belong
3109 # to so that we can grab info about each.
3110 cmd = ['repo', 'forall', '-c',
3111 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3112 last_broken_revision, first_working_revision + 1)]
3113 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
3115 changes = []
3116 assert not return_code, ('An error occurred while running '
3117 '"%s"' % ' '.join(cmd))
3118 last_depot = None
3119 cwd = os.getcwd()
3120 for l in output.split('\n'):
3121 if l:
3122 # Output will be in form:
3123 # /path_to_depot
3124 # /path_to_other_depot
3125 # <SHA1>
3126 # /path_again
3127 # <SHA1>
3128 # etc.
3129 if l[0] == '/':
3130 last_depot = l
3131 else:
3132 contents = l.split(' ')
3133 if len(contents) > 1:
3134 changes.append([last_depot, contents[0]])
3135 for c in changes:
3136 os.chdir(c[0])
3137 info = self.source_control.QueryRevisionInfo(c[1])
3138 culprit_revisions.append((c[1], info, None))
3139 else:
3140 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3141 k, v = revision_data_sorted[i]
3142 if k == first_working_revision:
3143 break
3144 self.ChangeToDepotWorkingDirectory(v['depot'])
3145 info = self.source_control.QueryRevisionInfo(k)
3146 culprit_revisions.append((k, info, v['depot']))
3147 os.chdir(cwd)
3149 # Check for any other possible regression ranges.
3150 other_regressions = _FindOtherRegressions(
3151 revision_data_sorted, mean_of_bad_runs > mean_of_good_runs)
3153 return {
3154 'first_working_revision': first_working_revision,
3155 'last_broken_revision': last_broken_revision,
3156 'culprit_revisions': culprit_revisions,
3157 'other_regressions': other_regressions,
3158 'regression_size': regression_size,
3159 'regression_std_err': regression_std_err,
3160 'confidence': confidence,
3163 def _CheckForWarnings(self, results_dict):
3164 if len(results_dict['culprit_revisions']) > 1:
3165 self.warnings.append('Due to build errors, regression range could '
3166 'not be narrowed down to a single commit.')
3167 if self.opts.repeat_test_count == 1:
3168 self.warnings.append('Tests were only set to run once. This may '
3169 'be insufficient to get meaningful results.')
3170 if results_dict['confidence'] < 100:
3171 if results_dict['confidence']:
3172 self.warnings.append(
3173 'Confidence is less than 100%. There could be other candidates '
3174 'for this regression. Try bisecting again with increased '
3175 'repeat_count or on a sub-metric that shows the regression more '
3176 'clearly.')
3177 else:
3178 self.warnings.append(
3179 'Confidence is 0%. Try bisecting again on another platform, with '
3180 'increased repeat_count or on a sub-metric that shows the '
3181 'regression more clearly.')
3183 def FormatAndPrintResults(self, bisect_results):
3184 """Prints the results from a bisection run in a readable format.
3186 Args:
3187 bisect_results: The results from a bisection test run.
3189 revision_data = bisect_results['revision_data']
3190 revision_data_sorted = sorted(revision_data.iteritems(),
3191 key = lambda x: x[1]['sort'])
3192 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3194 self._CheckForWarnings(results_dict)
3196 if self.opts.output_buildbot_annotations:
3197 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3199 print
3200 print 'Full results of bisection:'
3201 for current_id, current_data in revision_data_sorted:
3202 build_status = current_data['passed']
3204 if type(build_status) is bool:
3205 if build_status:
3206 build_status = 'Good'
3207 else:
3208 build_status = 'Bad'
3210 print ' %20s %40s %s' % (current_data['depot'],
3211 current_id, build_status)
3212 print
3214 if self.opts.output_buildbot_annotations:
3215 bisect_utils.OutputAnnotationStepClosed()
3216 # The perf dashboard scrapes the "results" step in order to comment on
3217 # bugs. If you change this, please update the perf dashboard as well.
3218 bisect_utils.OutputAnnotationStepStart('Results')
3220 self._PrintBanner(results_dict)
3221 self._PrintWarnings()
3223 if results_dict['culprit_revisions'] and results_dict['confidence']:
3224 for culprit in results_dict['culprit_revisions']:
3225 cl, info, depot = culprit
3226 self._PrintRevisionInfo(cl, info, depot)
3227 if results_dict['other_regressions']:
3228 self._PrintOtherRegressions(results_dict['other_regressions'],
3229 revision_data)
3230 self._PrintTestedCommitsTable(revision_data_sorted,
3231 results_dict['first_working_revision'],
3232 results_dict['last_broken_revision'],
3233 results_dict['confidence'])
3234 _PrintStepTime(revision_data_sorted)
3235 self._PrintReproSteps()
3236 _PrintThankYou()
3237 if self.opts.output_buildbot_annotations:
3238 bisect_utils.OutputAnnotationStepClosed()
3240 def _PrintBanner(self, results_dict):
3241 if self._IsBisectModeReturnCode():
3242 metrics = 'N/A'
3243 change = 'Yes'
3244 else:
3245 metrics = '/'.join(self.opts.metric)
3246 change = '%.02f%% (+/-%.02f%%)' % (
3247 results_dict['regression_size'], results_dict['regression_std_err'])
3249 if results_dict['culprit_revisions'] and results_dict['confidence']:
3250 status = self._ConfidenceLevelStatus(results_dict)
3251 else:
3252 status = 'Failure, could not reproduce.'
3253 change = 'Bisect could not reproduce a change.'
3255 print RESULTS_BANNER % {
3256 'status': status,
3257 'command': self.opts.command,
3258 'metrics': metrics,
3259 'change': change,
3260 'confidence': results_dict['confidence'],
3263 def _PrintWarnings(self):
3264 """Prints a list of warning strings if there are any."""
3265 if not self.warnings:
3266 return
3267 print
3268 print 'WARNINGS:'
3269 for w in set(self.warnings):
3270 print ' ! %s' % w
3273 def _IsPlatformSupported():
3274 """Checks that this platform and build system are supported.
3276 Args:
3277 opts: The options parsed from the command line.
3279 Returns:
3280 True if the platform and build system are supported.
3282 # Haven't tested the script out on any other platforms yet.
3283 supported = ['posix', 'nt']
3284 return os.name in supported
3287 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3288 """Removes the directory tree specified, and then creates an empty
3289 directory in the same location (if not specified to skip).
3291 Args:
3292 path_to_dir: Path to the directory tree.
3293 skip_makedir: Whether to skip creating empty directory, default is False.
3295 Returns:
3296 True if successful, False if an error occurred.
3298 try:
3299 if os.path.exists(path_to_dir):
3300 shutil.rmtree(path_to_dir)
3301 except OSError, e:
3302 if e.errno != errno.ENOENT:
3303 return False
3305 if not skip_makedir:
3306 return MaybeMakeDirectory(path_to_dir)
3308 return True
3311 def RemoveBuildFiles(build_type):
3312 """Removes build files from previous runs."""
3313 if RmTreeAndMkDir(os.path.join('out', build_type)):
3314 if RmTreeAndMkDir(os.path.join('build', build_type)):
3315 return True
3316 return False
3319 class BisectOptions(object):
3320 """Options to be used when running bisection."""
3321 def __init__(self):
3322 super(BisectOptions, self).__init__()
3324 self.target_platform = 'chromium'
3325 self.build_preference = None
3326 self.good_revision = None
3327 self.bad_revision = None
3328 self.use_goma = None
3329 self.goma_dir = None
3330 self.cros_board = None
3331 self.cros_remote_ip = None
3332 self.repeat_test_count = 20
3333 self.truncate_percent = 25
3334 self.max_time_minutes = 20
3335 self.metric = None
3336 self.command = None
3337 self.output_buildbot_annotations = None
3338 self.no_custom_deps = False
3339 self.working_directory = None
3340 self.extra_src = None
3341 self.debug_ignore_build = None
3342 self.debug_ignore_sync = None
3343 self.debug_ignore_perf_test = None
3344 self.gs_bucket = None
3345 self.target_arch = 'ia32'
3346 self.target_build_type = 'Release'
3347 self.builder_host = None
3348 self.builder_port = None
3349 self.bisect_mode = BISECT_MODE_MEAN
3351 @staticmethod
3352 def _CreateCommandLineParser():
3353 """Creates a parser with bisect options.
3355 Returns:
3356 An instance of optparse.OptionParser.
3358 usage = ('%prog [options] [-- chromium-options]\n'
3359 'Perform binary search on revision history to find a minimal '
3360 'range of revisions where a peformance metric regressed.\n')
3362 parser = optparse.OptionParser(usage=usage)
3364 group = optparse.OptionGroup(parser, 'Bisect options')
3365 group.add_option('-c', '--command',
3366 type='str',
3367 help='A command to execute your performance test at' +
3368 ' each point in the bisection.')
3369 group.add_option('-b', '--bad_revision',
3370 type='str',
3371 help='A bad revision to start bisection. ' +
3372 'Must be later than good revision. May be either a git' +
3373 ' or svn revision.')
3374 group.add_option('-g', '--good_revision',
3375 type='str',
3376 help='A revision to start bisection where performance' +
3377 ' test is known to pass. Must be earlier than the ' +
3378 'bad revision. May be either a git or svn revision.')
3379 group.add_option('-m', '--metric',
3380 type='str',
3381 help='The desired metric to bisect on. For example ' +
3382 '"vm_rss_final_b/vm_rss_f_b"')
3383 group.add_option('-r', '--repeat_test_count',
3384 type='int',
3385 default=20,
3386 help='The number of times to repeat the performance '
3387 'test. Values will be clamped to range [1, 100]. '
3388 'Default value is 20.')
3389 group.add_option('--max_time_minutes',
3390 type='int',
3391 default=20,
3392 help='The maximum time (in minutes) to take running the '
3393 'performance tests. The script will run the performance '
3394 'tests according to --repeat_test_count, so long as it '
3395 'doesn\'t exceed --max_time_minutes. Values will be '
3396 'clamped to range [1, 60].'
3397 'Default value is 20.')
3398 group.add_option('-t', '--truncate_percent',
3399 type='int',
3400 default=25,
3401 help='The highest/lowest % are discarded to form a '
3402 'truncated mean. Values will be clamped to range [0, '
3403 '25]. Default value is 25 (highest/lowest 25% will be '
3404 'discarded).')
3405 group.add_option('--bisect_mode',
3406 type='choice',
3407 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3408 BISECT_MODE_RETURN_CODE],
3409 default=BISECT_MODE_MEAN,
3410 help='The bisect mode. Choices are to bisect on the '
3411 'difference in mean, std_dev, or return_code.')
3412 parser.add_option_group(group)
3414 group = optparse.OptionGroup(parser, 'Build options')
3415 group.add_option('-w', '--working_directory',
3416 type='str',
3417 help='Path to the working directory where the script '
3418 'will do an initial checkout of the chromium depot. The '
3419 'files will be placed in a subdirectory "bisect" under '
3420 'working_directory and that will be used to perform the '
3421 'bisection. This parameter is optional, if it is not '
3422 'supplied, the script will work from the current depot.')
3423 group.add_option('--build_preference',
3424 type='choice',
3425 choices=['msvs', 'ninja', 'make'],
3426 help='The preferred build system to use. On linux/mac '
3427 'the options are make/ninja. On Windows, the options '
3428 'are msvs/ninja.')
3429 group.add_option('--target_platform',
3430 type='choice',
3431 choices=['chromium', 'cros', 'android', 'android-chrome'],
3432 default='chromium',
3433 help='The target platform. Choices are "chromium" '
3434 '(current platform), "cros", or "android". If you '
3435 'specify something other than "chromium", you must be '
3436 'properly set up to build that platform.')
3437 group.add_option('--no_custom_deps',
3438 dest='no_custom_deps',
3439 action='store_true',
3440 default=False,
3441 help='Run the script with custom_deps or not.')
3442 group.add_option('--extra_src',
3443 type='str',
3444 help='Path to a script which can be used to modify '
3445 'the bisect script\'s behavior.')
3446 group.add_option('--cros_board',
3447 type='str',
3448 help='The cros board type to build.')
3449 group.add_option('--cros_remote_ip',
3450 type='str',
3451 help='The remote machine to image to.')
3452 group.add_option('--use_goma',
3453 action='store_true',
3454 help='Add a bunch of extra threads for goma, and enable '
3455 'goma')
3456 group.add_option('--goma_dir',
3457 help='Path to goma tools (or system default if not '
3458 'specified).')
3459 group.add_option('--output_buildbot_annotations',
3460 action='store_true',
3461 help='Add extra annotation output for buildbot.')
3462 group.add_option('--gs_bucket',
3463 default='',
3464 dest='gs_bucket',
3465 type='str',
3466 help=('Name of Google Storage bucket to upload or '
3467 'download build. e.g., chrome-perf'))
3468 group.add_option('--target_arch',
3469 type='choice',
3470 choices=['ia32', 'x64', 'arm'],
3471 default='ia32',
3472 dest='target_arch',
3473 help=('The target build architecture. Choices are "ia32" '
3474 '(default), "x64" or "arm".'))
3475 group.add_option('--target_build_type',
3476 type='choice',
3477 choices=['Release', 'Debug'],
3478 default='Release',
3479 help='The target build type. Choices are "Release" '
3480 '(default), or "Debug".')
3481 group.add_option('--builder_host',
3482 dest='builder_host',
3483 type='str',
3484 help=('Host address of server to produce build by posting'
3485 ' try job request.'))
3486 group.add_option('--builder_port',
3487 dest='builder_port',
3488 type='int',
3489 help=('HTTP port of the server to produce build by posting'
3490 ' try job request.'))
3491 parser.add_option_group(group)
3493 group = optparse.OptionGroup(parser, 'Debug options')
3494 group.add_option('--debug_ignore_build',
3495 action='store_true',
3496 help='DEBUG: Don\'t perform builds.')
3497 group.add_option('--debug_ignore_sync',
3498 action='store_true',
3499 help='DEBUG: Don\'t perform syncs.')
3500 group.add_option('--debug_ignore_perf_test',
3501 action='store_true',
3502 help='DEBUG: Don\'t perform performance tests.')
3503 parser.add_option_group(group)
3504 return parser
3506 def ParseCommandLine(self):
3507 """Parses the command line for bisect options."""
3508 parser = self._CreateCommandLineParser()
3509 opts, _ = parser.parse_args()
3511 try:
3512 if not opts.command:
3513 raise RuntimeError('missing required parameter: --command')
3515 if not opts.good_revision:
3516 raise RuntimeError('missing required parameter: --good_revision')
3518 if not opts.bad_revision:
3519 raise RuntimeError('missing required parameter: --bad_revision')
3521 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3522 raise RuntimeError('missing required parameter: --metric')
3524 if opts.gs_bucket:
3525 if not cloud_storage.List(opts.gs_bucket):
3526 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3527 if not opts.builder_host:
3528 raise RuntimeError('Must specify try server hostname, when '
3529 'gs_bucket is used: --builder_host')
3530 if not opts.builder_port:
3531 raise RuntimeError('Must specify try server port number, when '
3532 'gs_bucket is used: --builder_port')
3533 if opts.target_platform == 'cros':
3534 # Run sudo up front to make sure credentials are cached for later.
3535 print 'Sudo is required to build cros:'
3536 print
3537 bisect_utils.RunProcess(['sudo', 'true'])
3539 if not opts.cros_board:
3540 raise RuntimeError('missing required parameter: --cros_board')
3542 if not opts.cros_remote_ip:
3543 raise RuntimeError('missing required parameter: --cros_remote_ip')
3545 if not opts.working_directory:
3546 raise RuntimeError('missing required parameter: --working_directory')
3548 metric_values = opts.metric.split('/')
3549 if (len(metric_values) != 2 and
3550 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3551 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3553 opts.metric = metric_values
3554 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3555 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3556 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3557 opts.truncate_percent = opts.truncate_percent / 100.0
3559 for k, v in opts.__dict__.iteritems():
3560 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k
3561 setattr(self, k, v)
3562 except RuntimeError, e:
3563 output_string = StringIO.StringIO()
3564 parser.print_help(file=output_string)
3565 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3566 output_string.close()
3567 raise RuntimeError(error_message)
3569 @staticmethod
3570 def FromDict(values):
3571 """Creates an instance of BisectOptions with the values parsed from a
3572 .cfg file.
3574 Args:
3575 values: a dict containing options to set.
3577 Returns:
3578 An instance of BisectOptions.
3580 opts = BisectOptions()
3581 for k, v in values.iteritems():
3582 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
3583 setattr(opts, k, v)
3585 metric_values = opts.metric.split('/')
3586 if len(metric_values) != 2:
3587 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3589 opts.metric = metric_values
3590 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3591 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3592 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3593 opts.truncate_percent = opts.truncate_percent / 100.0
3595 return opts
3598 def main():
3600 try:
3601 opts = BisectOptions()
3602 opts.ParseCommandLine()
3604 if opts.extra_src:
3605 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3606 if not extra_src:
3607 raise RuntimeError('Invalid or missing --extra_src.')
3608 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3610 if opts.working_directory:
3611 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3612 if opts.no_custom_deps:
3613 custom_deps = None
3614 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3616 os.chdir(os.path.join(os.getcwd(), 'src'))
3618 if not RemoveBuildFiles(opts.target_build_type):
3619 raise RuntimeError('Something went wrong removing the build files.')
3621 if not _IsPlatformSupported():
3622 raise RuntimeError('Sorry, this platform isn\'t supported yet.')
3624 # Check what source control method is being used, and create a
3625 # SourceControl object if possible.
3626 source_control = source_control_module.DetermineAndCreateSourceControl(opts)
3628 if not source_control:
3629 raise RuntimeError(
3630 'Sorry, only the git workflow is supported at the moment.')
3632 # gClient sync seems to fail if you're not in master branch.
3633 if (not source_control.IsInProperBranch() and
3634 not opts.debug_ignore_sync and
3635 not opts.working_directory):
3636 raise RuntimeError('You must switch to master branch to run bisection.')
3637 bisect_test = BisectPerformanceMetrics(source_control, opts)
3638 try:
3639 bisect_results = bisect_test.Run(opts.command,
3640 opts.bad_revision,
3641 opts.good_revision,
3642 opts.metric)
3643 if bisect_results['error']:
3644 raise RuntimeError(bisect_results['error'])
3645 bisect_test.FormatAndPrintResults(bisect_results)
3646 return 0
3647 finally:
3648 bisect_test.PerformCleanup()
3649 except RuntimeError, e:
3650 if opts.output_buildbot_annotations:
3651 # The perf dashboard scrapes the "results" step in order to comment on
3652 # bugs. If you change this, please update the perf dashboard as well.
3653 bisect_utils.OutputAnnotationStepStart('Results')
3654 print 'Error: %s' % e.message
3655 if opts.output_buildbot_annotations:
3656 bisect_utils.OutputAnnotationStepClosed()
3657 return 1
3660 if __name__ == '__main__':
3661 sys.exit(main())