cc: Added inline to Tile::IsReadyToDraw
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob716e85f975bf3d3bd30c4c2e385293540a3d1942
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import errno
39 import imp
40 import math
41 import optparse
42 import os
43 import re
44 import shlex
45 import shutil
46 import subprocess
47 import sys
48 import threading
49 import time
51 import bisect_utils
54 # The additional repositories that might need to be bisected.
55 # If the repository has any dependant repositories (such as skia/src needs
56 # skia/include and skia/gyp to be updated), specify them in the 'depends'
57 # so that they're synced appropriately.
58 # Format is:
59 # src: path to the working directory.
60 # recurse: True if this repositry will get bisected.
61 # depends: A list of other repositories that are actually part of the same
62 # repository in svn.
63 # svn: Needed for git workflow to resolve hashes to svn revisions.
64 # from: Parent depot that must be bisected before this is bisected.
65 DEPOT_DEPS_NAME = {
66 'chromium' : {
67 "src" : "src/",
68 "recurse" : True,
69 "depends" : None,
70 "from" : 'cros'
72 'webkit' : {
73 "src" : "src/third_party/WebKit",
74 "recurse" : True,
75 "depends" : None,
76 "from" : 'chromium'
78 'angle' : {
79 "src" : "src/third_party/angle_dx11",
80 "recurse" : True,
81 "depends" : None,
82 "from" : 'chromium'
84 'v8' : {
85 "src" : "src/v8",
86 "recurse" : True,
87 "depends" : None,
88 "from" : 'chromium',
89 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8
91 'v8_bleeding_edge' : {
92 "src" : "src/v8_bleeding_edge",
93 "recurse" : True,
94 "depends" : None,
95 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
96 "from" : 'v8',
98 'skia/src' : {
99 "src" : "src/third_party/skia/src",
100 "recurse" : True,
101 "svn" : "http://skia.googlecode.com/svn/trunk/src",
102 "depends" : ['skia/include', 'skia/gyp'],
103 "from" : 'chromium'
105 'skia/include' : {
106 "src" : "src/third_party/skia/include",
107 "recurse" : False,
108 "svn" : "http://skia.googlecode.com/svn/trunk/include",
109 "depends" : None,
110 "from" : 'chromium'
112 'skia/gyp' : {
113 "src" : "src/third_party/skia/gyp",
114 "recurse" : False,
115 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
116 "depends" : None,
117 "from" : 'chromium'
121 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
122 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
123 CROS_VERSION_PATTERN = 'new version number from %s'
124 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
125 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
126 'testing_rsa')
127 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
128 'mod_for_test_scripts', 'ssh_keys',
129 'testing_rsa')
131 BUILD_RESULT_SUCCEED = 0
132 BUILD_RESULT_FAIL = 1
133 BUILD_RESULT_SKIPPED = 2
135 def CalculateTruncatedMean(data_set, truncate_percent):
136 """Calculates the truncated mean of a set of values.
138 Args:
139 data_set: Set of values to use in calculation.
140 truncate_percent: The % from the upper/lower portions of the data set to
141 discard, expressed as a value in [0, 1].
143 Returns:
144 The truncated mean as a float.
146 if len(data_set) > 2:
147 data_set = sorted(data_set)
149 discard_num_float = len(data_set) * truncate_percent
150 discard_num_int = int(math.floor(discard_num_float))
151 kept_weight = len(data_set) - discard_num_float * 2
153 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
155 weight_left = 1.0 - (discard_num_float - discard_num_int)
157 if weight_left < 1:
158 # If the % to discard leaves a fractional portion, need to weight those
159 # values.
160 unweighted_vals = data_set[1:len(data_set)-1]
161 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
162 weighted_vals = [w * weight_left for w in weighted_vals]
163 data_set = weighted_vals + unweighted_vals
164 else:
165 kept_weight = len(data_set)
167 truncated_mean = reduce(lambda x, y: float(x) + float(y),
168 data_set) / kept_weight
170 return truncated_mean
173 def CalculateStandardDeviation(v):
174 if len(v) == 1:
175 return 0.0
177 mean = CalculateTruncatedMean(v, 0.0)
178 variances = [float(x) - mean for x in v]
179 variances = [x * x for x in variances]
180 variance = reduce(lambda x, y: float(x) + float(y), variances) / (len(v) - 1)
181 std_dev = math.sqrt(variance)
183 return std_dev
186 def CalculateStandardError(v):
187 if len(v) <= 1:
188 return 0.0
190 std_dev = CalculateStandardDeviation(v)
192 return std_dev / math.sqrt(len(v))
195 def IsStringFloat(string_to_check):
196 """Checks whether or not the given string can be converted to a floating
197 point number.
199 Args:
200 string_to_check: Input string to check if it can be converted to a float.
202 Returns:
203 True if the string can be converted to a float.
205 try:
206 float(string_to_check)
208 return True
209 except ValueError:
210 return False
213 def IsStringInt(string_to_check):
214 """Checks whether or not the given string can be converted to a integer.
216 Args:
217 string_to_check: Input string to check if it can be converted to an int.
219 Returns:
220 True if the string can be converted to an int.
222 try:
223 int(string_to_check)
225 return True
226 except ValueError:
227 return False
230 def IsWindows():
231 """Checks whether or not the script is running on Windows.
233 Returns:
234 True if running on Windows.
236 return os.name == 'nt'
239 def RunProcess(command):
240 """Run an arbitrary command. If output from the call is needed, use
241 RunProcessAndRetrieveOutput instead.
243 Args:
244 command: A list containing the command and args to execute.
246 Returns:
247 The return code of the call.
249 # On Windows, use shell=True to get PATH interpretation.
250 shell = IsWindows()
251 return subprocess.call(command, shell=shell)
254 def RunProcessAndRetrieveOutput(command):
255 """Run an arbitrary command, returning its output and return code. Since
256 output is collected via communicate(), there will be no output until the
257 call terminates. If you need output while the program runs (ie. so
258 that the buildbot doesn't terminate the script), consider RunProcess().
260 Args:
261 command: A list containing the command and args to execute.
262 print_output: Optional parameter to write output to stdout as it's
263 being collected.
265 Returns:
266 A tuple of the output and return code.
268 # On Windows, use shell=True to get PATH interpretation.
269 shell = IsWindows()
270 proc = subprocess.Popen(command,
271 shell=shell,
272 stdout=subprocess.PIPE)
274 (output, _) = proc.communicate()
276 return (output, proc.returncode)
279 def RunGit(command):
280 """Run a git subcommand, returning its output and return code.
282 Args:
283 command: A list containing the args to git.
285 Returns:
286 A tuple of the output and return code.
288 command = ['git'] + command
290 return RunProcessAndRetrieveOutput(command)
293 def CheckRunGit(command):
294 """Run a git subcommand, returning its output and return code. Asserts if
295 the return code of the call is non-zero.
297 Args:
298 command: A list containing the args to git.
300 Returns:
301 A tuple of the output and return code.
303 (output, return_code) = RunGit(command)
305 assert not return_code, 'An error occurred while running'\
306 ' "git %s"' % ' '.join(command)
307 return output
310 def BuildWithMake(threads, targets):
311 cmd = ['make', 'BUILDTYPE=Release']
313 if threads:
314 cmd.append('-j%d' % threads)
316 cmd += targets
318 return_code = RunProcess(cmd)
320 return not return_code
323 def BuildWithNinja(threads, targets):
324 cmd = ['ninja', '-C', os.path.join('out', 'Release')]
326 if threads:
327 cmd.append('-j%d' % threads)
329 cmd += targets
331 return_code = RunProcess(cmd)
333 return not return_code
336 def BuildWithVisualStudio(targets):
337 path_to_devenv = os.path.abspath(
338 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
339 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
340 cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
342 for t in targets:
343 cmd.extend(['/Project', t])
345 return_code = RunProcess(cmd)
347 return not return_code
350 class Builder(object):
351 """Builder is used by the bisect script to build relevant targets and deploy.
353 def Build(self, depot, opts):
354 raise NotImplementedError()
357 class DesktopBuilder(Builder):
358 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
359 def Build(self, depot, opts):
360 """Builds chrome and performance_ui_tests using options passed into
361 the script.
363 Args:
364 depot: Current depot being bisected.
365 opts: The options parsed from the command line.
367 Returns:
368 True if build was successful.
370 targets = ['chrome', 'performance_ui_tests']
372 threads = None
373 if opts.use_goma:
374 threads = 64
376 build_success = False
377 if opts.build_preference == 'make':
378 build_success = BuildWithMake(threads, targets)
379 elif opts.build_preference == 'ninja':
380 if IsWindows():
381 targets = [t + '.exe' for t in targets]
382 build_success = BuildWithNinja(threads, targets)
383 elif opts.build_preference == 'msvs':
384 assert IsWindows(), 'msvs is only supported on Windows.'
385 build_success = BuildWithVisualStudio(targets)
386 else:
387 assert False, 'No build system defined.'
388 return build_success
391 class AndroidBuilder(Builder):
392 """AndroidBuilder is used to build on android."""
393 def InstallAPK(self, opts):
394 """Installs apk to device.
396 Args:
397 opts: The options parsed from the command line.
399 Returns:
400 True if successful.
402 path_to_tool = os.path.join('build', 'android', 'adb_install_apk.py')
403 cmd = [path_to_tool, '--apk', 'ChromiumTestShell.apk', '--apk_package',
404 'org.chromium.chrome.testshell', '--release']
405 return_code = RunProcess(cmd)
407 return not return_code
409 def Build(self, depot, opts):
410 """Builds the android content shell and other necessary tools using options
411 passed into the script.
413 Args:
414 depot: Current depot being bisected.
415 opts: The options parsed from the command line.
417 Returns:
418 True if build was successful.
420 targets = ['chromium_testshell', 'forwarder2', 'md5sum']
421 threads = None
422 if opts.use_goma:
423 threads = 64
425 build_success = False
426 if opts.build_preference == 'ninja':
427 build_success = BuildWithNinja(threads, targets)
428 else:
429 assert False, 'No build system defined.'
431 if build_success:
432 build_success = self.InstallAPK(opts)
434 return build_success
437 class CrosBuilder(Builder):
438 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
439 target platform."""
440 def ImageToTarget(self, opts):
441 """Installs latest image to target specified by opts.cros_remote_ip.
443 Args:
444 opts: Program options containing cros_board and cros_remote_ip.
446 Returns:
447 True if successful.
449 try:
450 # Keys will most likely be set to 0640 after wiping the chroot.
451 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
452 os.chmod(CROS_TEST_KEY_PATH, 0600)
453 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
454 '--remote=%s' % opts.cros_remote_ip,
455 '--board=%s' % opts.cros_board, '--test', '--verbose']
457 return_code = RunProcess(cmd)
458 return not return_code
459 except OSError, e:
460 return False
462 def BuildPackages(self, opts, depot):
463 """Builds packages for cros.
465 Args:
466 opts: Program options containing cros_board.
467 depot: The depot being bisected.
469 Returns:
470 True if successful.
472 cmd = [CROS_SDK_PATH]
474 if depot != 'cros':
475 path_to_chrome = os.path.join(os.getcwd(), '..')
476 cmd += ['--chrome_root=%s' % path_to_chrome]
478 cmd += ['--']
480 if depot != 'cros':
481 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
483 cmd += ['BUILDTYPE=Release', './build_packages',
484 '--board=%s' % opts.cros_board]
485 return_code = RunProcess(cmd)
487 return not return_code
489 def BuildImage(self, opts, depot):
490 """Builds test image for cros.
492 Args:
493 opts: Program options containing cros_board.
494 depot: The depot being bisected.
496 Returns:
497 True if successful.
499 cmd = [CROS_SDK_PATH]
501 if depot != 'cros':
502 path_to_chrome = os.path.join(os.getcwd(), '..')
503 cmd += ['--chrome_root=%s' % path_to_chrome]
505 cmd += ['--']
507 if depot != 'cros':
508 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
510 cmd += ['BUILDTYPE=Release', '--', './build_image',
511 '--board=%s' % opts.cros_board, 'test']
513 return_code = RunProcess(cmd)
515 return not return_code
517 def Build(self, depot, opts):
518 """Builds targets using options passed into the script.
520 Args:
521 depot: Current depot being bisected.
522 opts: The options parsed from the command line.
524 Returns:
525 True if build was successful.
527 if self.BuildPackages(opts, depot):
528 if self.BuildImage(opts, depot):
529 return self.ImageToTarget(opts)
530 return False
533 class SourceControl(object):
534 """SourceControl is an abstraction over the underlying source control
535 system used for chromium. For now only git is supported, but in the
536 future, the svn workflow could be added as well."""
537 def __init__(self):
538 super(SourceControl, self).__init__()
540 def SyncToRevisionWithGClient(self, revision):
541 """Uses gclient to sync to the specified revision.
543 ie. gclient sync --revision <revision>
545 Args:
546 revision: The git SHA1 or svn CL (depending on workflow).
548 Returns:
549 The return code of the call.
551 return bisect_utils.RunGClient(['sync', '--revision',
552 revision, '--verbose', '--nohooks', '--reset', '--force'])
554 def SyncToRevisionWithRepo(self, timestamp):
555 """Uses repo to sync all the underlying git depots to the specified
556 time.
558 Args:
559 timestamp: The unix timestamp to sync to.
561 Returns:
562 The return code of the call.
564 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
567 class GitSourceControl(SourceControl):
568 """GitSourceControl is used to query the underlying source control. """
569 def __init__(self, opts):
570 super(GitSourceControl, self).__init__()
571 self.opts = opts
573 def IsGit(self):
574 return True
576 def GetRevisionList(self, revision_range_end, revision_range_start):
577 """Retrieves a list of revisions between |revision_range_start| and
578 |revision_range_end|.
580 Args:
581 revision_range_end: The SHA1 for the end of the range.
582 revision_range_start: The SHA1 for the beginning of the range.
584 Returns:
585 A list of the revisions between |revision_range_start| and
586 |revision_range_end| (inclusive).
588 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
589 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
590 log_output = CheckRunGit(cmd)
592 revision_hash_list = log_output.split()
593 revision_hash_list.append(revision_range_start)
595 return revision_hash_list
597 def SyncToRevision(self, revision, sync_client=None):
598 """Syncs to the specified revision.
600 Args:
601 revision: The revision to sync to.
602 use_gclient: Specifies whether or not we should sync using gclient or
603 just use source control directly.
605 Returns:
606 True if successful.
609 if not sync_client:
610 results = RunGit(['checkout', revision])[1]
611 elif sync_client == 'gclient':
612 results = self.SyncToRevisionWithGClient(revision)
613 elif sync_client == 'repo':
614 results = self.SyncToRevisionWithRepo(revision)
616 return not results
618 def ResolveToRevision(self, revision_to_check, depot, search):
619 """If an SVN revision is supplied, try to resolve it to a git SHA1.
621 Args:
622 revision_to_check: The user supplied revision string that may need to be
623 resolved to a git SHA1.
624 depot: The depot the revision_to_check is from.
625 search: The number of changelists to try if the first fails to resolve
626 to a git hash. If the value is negative, the function will search
627 backwards chronologically, otherwise it will search forward.
629 Returns:
630 A string containing a git SHA1 hash, otherwise None.
632 if depot != 'cros':
633 if not IsStringInt(revision_to_check):
634 return revision_to_check
636 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
638 if depot != 'chromium':
639 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
641 svn_revision = int(revision_to_check)
642 git_revision = None
644 if search > 0:
645 search_range = xrange(svn_revision, svn_revision + search, 1)
646 else:
647 search_range = xrange(svn_revision, svn_revision + search, -1)
649 for i in search_range:
650 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
651 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
652 'origin/master']
654 (log_output, return_code) = RunGit(cmd)
656 assert not return_code, 'An error occurred while running'\
657 ' "git %s"' % ' '.join(cmd)
659 if not return_code:
660 log_output = log_output.strip()
662 if log_output:
663 git_revision = log_output
665 break
667 return git_revision
668 else:
669 if IsStringInt(revision_to_check):
670 return int(revision_to_check)
671 else:
672 cwd = os.getcwd()
673 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
674 'chromiumos-overlay'))
675 pattern = CROS_VERSION_PATTERN % revision_to_check
676 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
678 git_revision = None
680 log_output = CheckRunGit(cmd)
681 if log_output:
682 git_revision = log_output
683 git_revision = int(log_output.strip())
684 os.chdir(cwd)
686 return git_revision
688 def IsInProperBranch(self):
689 """Confirms they're in the master branch for performing the bisection.
690 This is needed or gclient will fail to sync properly.
692 Returns:
693 True if the current branch on src is 'master'
695 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
696 log_output = CheckRunGit(cmd)
697 log_output = log_output.strip()
699 return log_output == "master"
701 def SVNFindRev(self, revision):
702 """Maps directly to the 'git svn find-rev' command.
704 Args:
705 revision: The git SHA1 to use.
707 Returns:
708 An integer changelist #, otherwise None.
711 cmd = ['svn', 'find-rev', revision]
713 output = CheckRunGit(cmd)
714 svn_revision = output.strip()
716 if IsStringInt(svn_revision):
717 return int(svn_revision)
719 return None
721 def QueryRevisionInfo(self, revision):
722 """Gathers information on a particular revision, such as author's name,
723 email, subject, and date.
725 Args:
726 revision: Revision you want to gather information on.
727 Returns:
728 A dict in the following format:
730 'author': %s,
731 'email': %s,
732 'date': %s,
733 'subject': %s,
736 commit_info = {}
738 formats = ['%cN', '%cE', '%s', '%cD']
739 targets = ['author', 'email', 'subject', 'date']
741 for i in xrange(len(formats)):
742 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
743 output = CheckRunGit(cmd)
744 commit_info[targets[i]] = output.rstrip()
746 return commit_info
748 def CheckoutFileAtRevision(self, file_name, revision):
749 """Performs a checkout on a file at the given revision.
751 Returns:
752 True if successful.
754 return not RunGit(['checkout', revision, file_name])[1]
756 def RevertFileToHead(self, file_name):
757 """Unstages a file and returns it to HEAD.
759 Returns:
760 True if successful.
762 # Reset doesn't seem to return 0 on success.
763 RunGit(['reset', 'HEAD', bisect_utils.FILE_DEPS_GIT])
765 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
767 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
768 """Returns a list of commits that modified this file.
770 Args:
771 filename: Name of file.
772 revision_start: Start of revision range.
773 revision_end: End of revision range.
775 Returns:
776 Returns a list of commits that touched this file.
778 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
779 filename]
780 output = CheckRunGit(cmd)
782 return [o for o in output.split('\n') if o]
784 class BisectPerformanceMetrics(object):
785 """BisectPerformanceMetrics performs a bisection against a list of range
786 of revisions to narrow down where performance regressions may have
787 occurred."""
789 def __init__(self, source_control, opts):
790 super(BisectPerformanceMetrics, self).__init__()
792 self.opts = opts
793 self.source_control = source_control
794 self.src_cwd = os.getcwd()
795 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
796 self.depot_cwd = {}
797 self.cleanup_commands = []
798 self.warnings = []
799 self.builder = None
801 if opts.target_platform == 'cros':
802 self.builder = CrosBuilder()
803 elif opts.target_platform == 'android':
804 self.builder = AndroidBuilder()
805 else:
806 self.builder = DesktopBuilder()
808 # This always starts true since the script grabs latest first.
809 self.was_blink = True
811 for d in DEPOT_NAMES:
812 # The working directory of each depot is just the path to the depot, but
813 # since we're already in 'src', we can skip that part.
815 self.depot_cwd[d] = os.path.join(
816 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
818 def PerformCleanup(self):
819 """Performs cleanup when script is finished."""
820 os.chdir(self.src_cwd)
821 for c in self.cleanup_commands:
822 if c[0] == 'mv':
823 shutil.move(c[1], c[2])
824 else:
825 assert False, 'Invalid cleanup command.'
827 def GetRevisionList(self, depot, bad_revision, good_revision):
828 """Retrieves a list of all the commits between the bad revision and
829 last known good revision."""
831 revision_work_list = []
833 if depot == 'cros':
834 revision_range_start = good_revision
835 revision_range_end = bad_revision
837 cwd = os.getcwd()
838 self.ChangeToDepotWorkingDirectory('cros')
840 # Print the commit timestamps for every commit in the revision time
841 # range. We'll sort them and bisect by that. There is a remote chance that
842 # 2 (or more) commits will share the exact same timestamp, but it's
843 # probably safe to ignore that case.
844 cmd = ['repo', 'forall', '-c',
845 'git log --format=%%ct --before=%d --after=%d' % (
846 revision_range_end, revision_range_start)]
847 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
849 assert not return_code, 'An error occurred while running'\
850 ' "%s"' % ' '.join(cmd)
852 os.chdir(cwd)
854 revision_work_list = list(set(
855 [int(o) for o in output.split('\n') if IsStringInt(o)]))
856 revision_work_list = sorted(revision_work_list, reverse=True)
857 else:
858 revision_work_list = self.source_control.GetRevisionList(bad_revision,
859 good_revision)
861 return revision_work_list
863 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
864 """Parses the DEPS file to determine WebKit/v8/etc... versions.
866 Returns:
867 A dict in the format {depot:revision} if successful, otherwise None.
870 cwd = os.getcwd()
871 self.ChangeToDepotWorkingDirectory(depot)
873 results = {}
875 if depot == 'chromium':
876 locals = {'Var': lambda _: locals["vars"][_],
877 'From': lambda *args: None}
878 execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
880 os.chdir(cwd)
882 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
884 for d in DEPOT_NAMES:
885 if DEPOT_DEPS_NAME[d]['recurse'] and\
886 DEPOT_DEPS_NAME[d]['from'] == depot:
887 if locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']):
888 re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src']])
890 if re_results:
891 results[d] = re_results.group('revision')
892 else:
893 return None
894 else:
895 return None
896 elif depot == 'cros':
897 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
898 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
899 CROS_CHROMEOS_PATTERN]
900 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
902 assert not return_code, 'An error occurred while running'\
903 ' "%s"' % ' '.join(cmd)
905 if len(output) > CROS_CHROMEOS_PATTERN:
906 output = output[len(CROS_CHROMEOS_PATTERN):]
908 if len(output) > 1:
909 output = output.split('_')[0]
911 if len(output) > 3:
912 contents = output.split('.')
914 version = contents[2]
916 if contents[3] != '0':
917 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' %\
918 (version, contents[3], version)
919 if not warningText in self.warnings:
920 self.warnings.append(warningText)
922 cwd = os.getcwd()
923 self.ChangeToDepotWorkingDirectory('chromium')
924 return_code = CheckRunGit(['log', '-1', '--format=%H',
925 '--author=chrome-release@google.com', '--grep=to %s' % version,
926 'origin/master'])
927 os.chdir(cwd)
929 results['chromium'] = output.strip()
930 elif depot == 'v8':
931 results['v8_bleeding_edge'] = None
933 svn_revision = self.source_control.SVNFindRev(revision)
935 if IsStringInt(svn_revision):
936 # V8 is tricky to bisect, in that there are only a few instances when
937 # we can dive into bleeding_edge and get back a meaningful result.
938 # Try to detect a V8 "business as usual" case, which is when:
939 # 1. trunk revision N has description "Version X.Y.Z"
940 # 2. bleeding_edge revision (N-1) has description "Prepare push to
941 # trunk. Now working on X.Y.(Z+1)."
942 self.ChangeToDepotWorkingDirectory(depot)
944 revision_info = self.source_control.QueryRevisionInfo(revision)
946 version_re = re.compile("Version (?P<values>[0-9,.]+)")
948 regex_results = version_re.search(revision_info['subject'])
950 if regex_results:
951 version = regex_results.group('values')
953 self.ChangeToDepotWorkingDirectory('v8_bleeding_edge')
955 git_revision = self.source_control.ResolveToRevision(
956 int(svn_revision) - 1, 'v8_bleeding_edge', -1)
958 if git_revision:
959 revision_info = self.source_control.QueryRevisionInfo(git_revision)
961 if 'Prepare push to trunk' in revision_info['subject']:
962 results['v8_bleeding_edge'] = git_revision
964 return results
966 def BuildCurrentRevision(self, depot):
967 """Builds chrome and performance_ui_tests on the current revision.
969 Returns:
970 True if the build was successful.
972 if self.opts.debug_ignore_build:
973 return True
975 cwd = os.getcwd()
976 os.chdir(self.src_cwd)
978 build_success = self.builder.Build(depot, self.opts)
980 os.chdir(cwd)
982 return build_success
984 def RunGClientHooks(self):
985 """Runs gclient with runhooks command.
987 Returns:
988 True if gclient reports no errors.
991 if self.opts.debug_ignore_build:
992 return True
994 return not bisect_utils.RunGClient(['runhooks'])
996 def TryParseHistogramValuesFromOutput(self, metric, text):
997 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
999 Args:
1000 metric: The metric as a list of [<trace>, <value>] strings.
1001 text: The text to parse the metric values from.
1003 Returns:
1004 A list of floating point numbers found.
1006 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1008 text_lines = text.split('\n')
1009 values_list = []
1011 for current_line in text_lines:
1012 if metric_formatted in current_line:
1013 current_line = current_line[len(metric_formatted):]
1015 try:
1016 histogram_values = eval(current_line)
1018 for b in histogram_values['buckets']:
1019 average_for_bucket = float(b['high'] + b['low']) * 0.5
1020 # Extends the list with N-elements with the average for that bucket.
1021 values_list.extend([average_for_bucket] * b['count'])
1022 except:
1023 pass
1025 return values_list
1027 def TryParseResultValuesFromOutput(self, metric, text):
1028 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1030 Args:
1031 metric: The metric as a list of [<trace>, <value>] strings.
1032 text: The text to parse the metric values from.
1034 Returns:
1035 A list of floating point numbers found.
1037 # Format is: RESULT <graph>: <trace>= <value> <units>
1038 metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1040 text_lines = text.split('\n')
1041 values_list = []
1043 for current_line in text_lines:
1044 # Parse the output from the performance test for the metric we're
1045 # interested in.
1046 metric_re = metric_formatted +\
1047 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1048 metric_re = re.compile(metric_re)
1049 regex_results = metric_re.search(current_line)
1051 if not regex_results is None:
1052 values_list += [regex_results.group('values')]
1053 else:
1054 metric_re = metric_formatted +\
1055 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1056 metric_re = re.compile(metric_re)
1057 regex_results = metric_re.search(current_line)
1059 if not regex_results is None:
1060 metric_values = regex_results.group('values')
1062 values_list += metric_values.split(',')
1064 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1066 # If the metric is times/t, we need to sum the timings in order to get
1067 # similar regression results as the try-bots.
1068 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1069 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1071 if metric in metrics_to_sum:
1072 if values_list:
1073 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1075 return values_list
1077 def ParseMetricValuesFromOutput(self, metric, text):
1078 """Parses output from performance_ui_tests and retrieves the results for
1079 a given metric.
1081 Args:
1082 metric: The metric as a list of [<trace>, <value>] strings.
1083 text: The text to parse the metric values from.
1085 Returns:
1086 A list of floating point numbers found.
1088 metric_values = self.TryParseResultValuesFromOutput(metric, text)
1090 if not metric_values:
1091 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
1093 return metric_values
1095 def RunPerformanceTestAndParseResults(self, command_to_run, metric):
1096 """Runs a performance test on the current revision by executing the
1097 'command_to_run' and parses the results.
1099 Args:
1100 command_to_run: The command to be run to execute the performance test.
1101 metric: The metric to parse out from the results of the performance test.
1103 Returns:
1104 On success, it will return a tuple of the average value of the metric,
1105 and a success code of 0.
1108 if self.opts.debug_ignore_perf_test:
1109 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1111 if IsWindows():
1112 command_to_run = command_to_run.replace('/', r'\\')
1114 args = shlex.split(command_to_run)
1116 # If running a telemetry test for cros, insert the remote ip, and
1117 # identity parameters.
1118 if self.opts.target_platform == 'cros':
1119 if 'tools/perf/run_' in args[0]:
1120 args.append('--remote=%s' % self.opts.cros_remote_ip)
1121 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1123 cwd = os.getcwd()
1124 os.chdir(self.src_cwd)
1126 start_time = time.time()
1128 metric_values = []
1129 for i in xrange(self.opts.repeat_test_count):
1130 # Can ignore the return code since if the tests fail, it won't return 0.
1131 try:
1132 (output, return_code) = RunProcessAndRetrieveOutput(args)
1133 except OSError, e:
1134 if e.errno == errno.ENOENT:
1135 err_text = ("Something went wrong running the performance test. "
1136 "Please review the command line:\n\n")
1137 if 'src/' in ' '.join(args):
1138 err_text += ("Check that you haven't accidentally specified a path "
1139 "with src/ in the command.\n\n")
1140 err_text += ' '.join(args)
1141 err_text += '\n'
1143 return (err_text, -1)
1144 raise
1146 if self.opts.output_buildbot_annotations:
1147 print output
1149 metric_values += self.ParseMetricValuesFromOutput(metric, output)
1151 elapsed_minutes = (time.time() - start_time) / 60.0
1153 if elapsed_minutes >= self.opts.repeat_test_max_time or not metric_values:
1154 break
1156 os.chdir(cwd)
1158 # Need to get the average value if there were multiple values.
1159 if metric_values:
1160 truncated_mean = CalculateTruncatedMean(metric_values,
1161 self.opts.truncate_percent)
1162 standard_err = CalculateStandardError(metric_values)
1163 standard_dev = CalculateStandardDeviation(metric_values)
1165 values = {
1166 'mean': truncated_mean,
1167 'std_err': standard_err,
1168 'std_dev': standard_dev,
1169 'values': metric_values,
1172 print 'Results of performance test: %12f %12f' % (
1173 truncated_mean, standard_err)
1174 print
1175 return (values, 0)
1176 else:
1177 return ('Invalid metric specified, or no values returned from '
1178 'performance test.', -1)
1180 def FindAllRevisionsToSync(self, revision, depot):
1181 """Finds all dependant revisions and depots that need to be synced for a
1182 given revision. This is only useful in the git workflow, as an svn depot
1183 may be split into multiple mirrors.
1185 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1186 skia/include. To sync skia/src properly, one has to find the proper
1187 revisions in skia/gyp and skia/include.
1189 Args:
1190 revision: The revision to sync to.
1191 depot: The depot in use at the moment (probably skia).
1193 Returns:
1194 A list of [depot, revision] pairs that need to be synced.
1196 revisions_to_sync = [[depot, revision]]
1198 is_base = (depot == 'chromium') or (depot == 'cros')
1200 # Some SVN depots were split into multiple git depots, so we need to
1201 # figure out for each mirror which git revision to grab. There's no
1202 # guarantee that the SVN revision will exist for each of the dependant
1203 # depots, so we have to grep the git logs and grab the next earlier one.
1204 if not is_base and\
1205 DEPOT_DEPS_NAME[depot]['depends'] and\
1206 self.source_control.IsGit():
1207 svn_rev = self.source_control.SVNFindRev(revision)
1209 for d in DEPOT_DEPS_NAME[depot]['depends']:
1210 self.ChangeToDepotWorkingDirectory(d)
1212 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
1214 if dependant_rev:
1215 revisions_to_sync.append([d, dependant_rev])
1217 num_resolved = len(revisions_to_sync)
1218 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
1220 self.ChangeToDepotWorkingDirectory(depot)
1222 if not ((num_resolved - 1) == num_needed):
1223 return None
1225 return revisions_to_sync
1227 def PerformPreBuildCleanup(self):
1228 """Performs necessary cleanup between runs."""
1229 print 'Cleaning up between runs.'
1230 print
1232 # Having these pyc files around between runs can confuse the
1233 # perf tests and cause them to crash.
1234 for (path, dir, files) in os.walk(self.src_cwd):
1235 for cur_file in files:
1236 if cur_file.endswith('.pyc'):
1237 path_to_file = os.path.join(path, cur_file)
1238 os.remove(path_to_file)
1240 def PerformWebkitDirectoryCleanup(self, revision):
1241 """If the script is switching between Blink and WebKit during bisect,
1242 its faster to just delete the directory rather than leave it up to git
1243 to sync.
1245 Returns:
1246 True if successful.
1248 if not self.source_control.CheckoutFileAtRevision(
1249 bisect_utils.FILE_DEPS_GIT, revision):
1250 return False
1252 cwd = os.getcwd()
1253 os.chdir(self.src_cwd)
1255 is_blink = bisect_utils.IsDepsFileBlink()
1257 os.chdir(cwd)
1259 if not self.source_control.RevertFileToHead(
1260 bisect_utils.FILE_DEPS_GIT):
1261 return False
1263 if self.was_blink != is_blink:
1264 self.was_blink = is_blink
1265 return bisect_utils.RemoveThirdPartyWebkitDirectory()
1266 return True
1268 def PerformCrosChrootCleanup(self):
1269 """Deletes the chroot.
1271 Returns:
1272 True if successful.
1274 cwd = os.getcwd()
1275 self.ChangeToDepotWorkingDirectory('cros')
1276 cmd = [CROS_SDK_PATH, '--delete']
1277 return_code = RunProcess(cmd)
1278 os.chdir(cwd)
1279 return not return_code
1281 def CreateCrosChroot(self):
1282 """Creates a new chroot.
1284 Returns:
1285 True if successful.
1287 cwd = os.getcwd()
1288 self.ChangeToDepotWorkingDirectory('cros')
1289 cmd = [CROS_SDK_PATH, '--create']
1290 return_code = RunProcess(cmd)
1291 os.chdir(cwd)
1292 return not return_code
1294 def PerformPreSyncCleanup(self, revision, depot):
1295 """Performs any necessary cleanup before syncing.
1297 Returns:
1298 True if successful.
1300 if depot == 'chromium':
1301 if not bisect_utils.RemoveThirdPartyLibjingleDirectory():
1302 return False
1303 return self.PerformWebkitDirectoryCleanup(revision)
1304 elif depot == 'cros':
1305 return self.PerformCrosChrootCleanup()
1306 return True
1308 def RunPostSync(self, depot):
1309 """Performs any work after syncing.
1311 Returns:
1312 True if successful.
1314 if self.opts.target_platform == 'android':
1315 cwd = os.getcwd()
1316 os.chdir(os.path.join(self.src_cwd, '..'))
1317 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts):
1318 return False
1319 os.chdir(cwd)
1321 if depot == 'cros':
1322 return self.CreateCrosChroot()
1323 else:
1324 return self.RunGClientHooks()
1325 return True
1327 def ShouldSkipRevision(self, depot, revision):
1328 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1329 is git based those changes would have no effect.
1331 Args:
1332 depot: The depot being bisected.
1333 revision: Current revision we're synced to.
1335 Returns:
1336 True if we should skip building/testing this revision.
1338 if depot == 'chromium':
1339 if self.source_control.IsGit():
1340 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
1341 output = CheckRunGit(cmd)
1343 files = output.splitlines()
1345 if len(files) == 1 and files[0] == 'DEPS':
1346 return True
1348 return False
1350 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
1351 skippable=False):
1352 """Performs a full sync/build/run of the specified revision.
1354 Args:
1355 revision: The revision to sync to.
1356 depot: The depot that's being used at the moment (src, webkit, etc.)
1357 command_to_run: The command to execute the performance test.
1358 metric: The performance metric being tested.
1360 Returns:
1361 On success, a tuple containing the results of the performance test.
1362 Otherwise, a tuple with the error message.
1364 sync_client = None
1365 if depot == 'chromium':
1366 sync_client = 'gclient'
1367 elif depot == 'cros':
1368 sync_client = 'repo'
1370 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
1372 if not revisions_to_sync:
1373 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
1375 if not self.PerformPreSyncCleanup(revision, depot):
1376 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
1378 success = True
1380 if not self.opts.debug_ignore_sync:
1381 for r in revisions_to_sync:
1382 self.ChangeToDepotWorkingDirectory(r[0])
1384 if sync_client:
1385 self.PerformPreBuildCleanup()
1387 if not self.source_control.SyncToRevision(r[1], sync_client):
1388 success = False
1390 break
1392 if success:
1393 success = self.RunPostSync(depot)
1395 if success:
1396 if skippable and self.ShouldSkipRevision(depot, revision):
1397 return ('Skipped revision: [%s]' % str(revision),
1398 BUILD_RESULT_SKIPPED)
1400 if self.BuildCurrentRevision(depot):
1401 results = self.RunPerformanceTestAndParseResults(command_to_run,
1402 metric)
1404 if results[1] == 0:
1405 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
1406 depot, revision)
1408 if not external_revisions is None:
1409 return (results[0], results[1], external_revisions)
1410 else:
1411 return ('Failed to parse DEPS file for external revisions.',
1412 BUILD_RESULT_FAIL)
1413 else:
1414 return results
1415 else:
1416 return ('Failed to build revision: [%s]' % (str(revision, )),
1417 BUILD_RESULT_FAIL)
1418 else:
1419 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
1420 else:
1421 return ('Failed to sync revision: [%s]' % (str(revision, )),
1422 BUILD_RESULT_FAIL)
1424 def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
1425 """Given known good and bad values, decide if the current_value passed
1426 or failed.
1428 Args:
1429 current_value: The value of the metric being checked.
1430 known_bad_value: The reference value for a "failed" run.
1431 known_good_value: The reference value for a "passed" run.
1433 Returns:
1434 True if the current_value is closer to the known_good_value than the
1435 known_bad_value.
1437 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
1438 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
1440 return dist_to_good_value < dist_to_bad_value
1442 def ChangeToDepotWorkingDirectory(self, depot_name):
1443 """Given a depot, changes to the appropriate working directory.
1445 Args:
1446 depot_name: The name of the depot (see DEPOT_NAMES).
1448 if depot_name == 'chromium':
1449 os.chdir(self.src_cwd)
1450 elif depot_name == 'cros':
1451 os.chdir(self.cros_cwd)
1452 elif depot_name in DEPOT_NAMES:
1453 os.chdir(self.depot_cwd[depot_name])
1454 else:
1455 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1456 ' was added without proper support?' %\
1457 (depot_name,)
1459 def FindNextDepotToBisect(self, current_revision, min_revision_data,
1460 max_revision_data):
1461 """Given the state of the bisect, decides which depot the script should
1462 dive into next (if any).
1464 Args:
1465 current_revision: Current revision synced to.
1466 min_revision_data: Data about the earliest revision in the bisect range.
1467 max_revision_data: Data about the latest revision in the bisect range.
1469 Returns:
1470 The depot to bisect next, or None.
1472 external_depot = None
1473 for current_depot in DEPOT_NAMES:
1474 if not (DEPOT_DEPS_NAME[current_depot]["recurse"] and
1475 DEPOT_DEPS_NAME[current_depot]['from'] ==
1476 min_revision_data['depot']):
1477 continue
1479 if (min_revision_data['external'][current_depot] ==
1480 max_revision_data['external'][current_depot]):
1481 continue
1483 if (min_revision_data['external'][current_depot] and
1484 max_revision_data['external'][current_depot]):
1485 external_depot = current_depot
1486 break
1488 return external_depot
1490 def PrepareToBisectOnDepot(self,
1491 current_depot,
1492 end_revision,
1493 start_revision,
1494 previous_depot,
1495 previous_revision):
1496 """Changes to the appropriate directory and gathers a list of revisions
1497 to bisect between |start_revision| and |end_revision|.
1499 Args:
1500 current_depot: The depot we want to bisect.
1501 end_revision: End of the revision range.
1502 start_revision: Start of the revision range.
1503 previous_depot: The depot we were previously bisecting.
1504 previous_revision: The last revision we synced to on |previous_depot|.
1506 Returns:
1507 A list containing the revisions between |start_revision| and
1508 |end_revision| inclusive.
1510 # Change into working directory of external library to run
1511 # subsequent commands.
1512 self.ChangeToDepotWorkingDirectory(current_depot)
1514 # V8 (and possibly others) is merged in periodically. Bisecting
1515 # this directory directly won't give much good info.
1516 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
1517 config_path = os.path.join(self.src_cwd, '..')
1518 if bisect_utils.RunGClientAndCreateConfig(self.opts,
1519 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
1520 return []
1521 if bisect_utils.RunGClient(
1522 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
1523 return []
1525 if current_depot == 'v8_bleeding_edge':
1526 self.ChangeToDepotWorkingDirectory('chromium')
1528 shutil.move('v8', 'v8.bak')
1529 shutil.move('v8_bleeding_edge', 'v8')
1531 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
1532 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
1534 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
1535 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
1537 self.ChangeToDepotWorkingDirectory(current_depot)
1539 depot_revision_list = self.GetRevisionList(current_depot,
1540 end_revision,
1541 start_revision)
1543 self.ChangeToDepotWorkingDirectory('chromium')
1545 return depot_revision_list
1547 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
1548 """Gathers reference values by running the performance tests on the
1549 known good and bad revisions.
1551 Args:
1552 good_rev: The last known good revision where the performance regression
1553 has not occurred yet.
1554 bad_rev: A revision where the performance regression has already occurred.
1555 cmd: The command to execute the performance test.
1556 metric: The metric being tested for regression.
1558 Returns:
1559 A tuple with the results of building and running each revision.
1561 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
1562 target_depot,
1563 cmd,
1564 metric)
1566 good_run_results = None
1568 if not bad_run_results[1]:
1569 good_run_results = self.SyncBuildAndRunRevision(good_rev,
1570 target_depot,
1571 cmd,
1572 metric)
1574 return (bad_run_results, good_run_results)
1576 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
1577 """Adds new revisions to the revision_data dict and initializes them.
1579 Args:
1580 revisions: List of revisions to add.
1581 depot: Depot that's currently in use (src, webkit, etc...)
1582 sort: Sorting key for displaying revisions.
1583 revision_data: A dict to add the new revisions into. Existing revisions
1584 will have their sort keys offset.
1587 num_depot_revisions = len(revisions)
1589 for k, v in revision_data.iteritems():
1590 if v['sort'] > sort:
1591 v['sort'] += num_depot_revisions
1593 for i in xrange(num_depot_revisions):
1594 r = revisions[i]
1596 revision_data[r] = {'revision' : r,
1597 'depot' : depot,
1598 'value' : None,
1599 'passed' : '?',
1600 'sort' : i + sort + 1}
1602 def PrintRevisionsToBisectMessage(self, revision_list, depot):
1603 if self.opts.output_buildbot_annotations:
1604 step_name = 'Bisection Range: [%s - %s]' % (
1605 revision_list[len(revision_list)-1], revision_list[0])
1606 bisect_utils.OutputAnnotationStepStart(step_name)
1608 print
1609 print 'Revisions to bisect on [%s]:' % depot
1610 for revision_id in revision_list:
1611 print ' -> %s' % (revision_id, )
1612 print
1614 if self.opts.output_buildbot_annotations:
1615 bisect_utils.OutputAnnotationStepClosed()
1617 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
1618 """Checks to see if changes to DEPS file occurred, and that the revision
1619 range also includes the change to .DEPS.git. If it doesn't, attempts to
1620 expand the revision range to include it.
1622 Args:
1623 bad_rev: First known bad revision.
1624 good_revision: Last known good revision.
1626 Returns:
1627 A tuple with the new bad and good revisions.
1629 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
1630 changes_to_deps = self.source_control.QueryFileRevisionHistory(
1631 'DEPS', good_revision, bad_revision)
1633 if changes_to_deps:
1634 # DEPS file was changed, search from the oldest change to DEPS file to
1635 # bad_revision to see if there are matching .DEPS.git changes.
1636 oldest_deps_change = changes_to_deps[-1]
1637 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
1638 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
1640 if len(changes_to_deps) != len(changes_to_gitdeps):
1641 # Grab the timestamp of the last DEPS change
1642 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
1643 output = CheckRunGit(cmd)
1644 commit_time = int(output)
1646 # Try looking for a commit that touches the .DEPS.git file in the
1647 # next 15 minutes after the DEPS file change.
1648 cmd = ['log', '--format=%H', '-1',
1649 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
1650 'origin/master', bisect_utils.FILE_DEPS_GIT]
1651 output = CheckRunGit(cmd)
1652 output = output.strip()
1653 if output:
1654 self.warnings.append('Detected change to DEPS and modified '
1655 'revision range to include change to .DEPS.git')
1656 return (output, good_revision)
1657 else:
1658 self.warnings.append('Detected change to DEPS but couldn\'t find '
1659 'matching change to .DEPS.git')
1660 return (bad_revision, good_revision)
1662 def CheckIfRevisionsInProperOrder(self,
1663 target_depot,
1664 good_revision,
1665 bad_revision):
1666 """Checks that |good_revision| is an earlier revision than |bad_revision|.
1668 Args:
1669 good_revision: Number/tag of the known good revision.
1670 bad_revision: Number/tag of the known bad revision.
1672 Returns:
1673 True if the revisions are in the proper order (good earlier than bad).
1675 if self.source_control.IsGit() and target_depot != 'cros':
1676 cmd = ['log', '--format=%ct', '-1', good_revision]
1677 output = CheckRunGit(cmd)
1678 good_commit_time = int(output)
1680 cmd = ['log', '--format=%ct', '-1', bad_revision]
1681 output = CheckRunGit(cmd)
1682 bad_commit_time = int(output)
1684 return good_commit_time <= bad_commit_time
1685 else:
1686 # Cros/svn use integers
1687 return int(good_revision) <= int(bad_revision)
1689 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
1690 """Given known good and bad revisions, run a binary search on all
1691 intermediate revisions to determine the CL where the performance regression
1692 occurred.
1694 Args:
1695 command_to_run: Specify the command to execute the performance test.
1696 good_revision: Number/tag of the known good revision.
1697 bad_revision: Number/tag of the known bad revision.
1698 metric: The performance metric to monitor.
1700 Returns:
1701 A dict with 2 members, 'revision_data' and 'error'. On success,
1702 'revision_data' will contain a dict mapping revision ids to
1703 data about that revision. Each piece of revision data consists of a
1704 dict with the following keys:
1706 'passed': Represents whether the performance test was successful at
1707 that revision. Possible values include: 1 (passed), 0 (failed),
1708 '?' (skipped), 'F' (build failed).
1709 'depot': The depot that this revision is from (ie. WebKit)
1710 'external': If the revision is a 'src' revision, 'external' contains
1711 the revisions of each of the external libraries.
1712 'sort': A sort value for sorting the dict in order of commits.
1714 For example:
1716 'error':None,
1717 'revision_data':
1719 'CL #1':
1721 'passed':False,
1722 'depot':'chromium',
1723 'external':None,
1724 'sort':0
1729 If an error occurred, the 'error' field will contain the message and
1730 'revision_data' will be empty.
1733 results = {'revision_data' : {},
1734 'error' : None}
1736 # Choose depot to bisect first
1737 target_depot = 'chromium'
1738 if self.opts.target_platform == 'cros':
1739 target_depot = 'cros'
1741 cwd = os.getcwd()
1742 self.ChangeToDepotWorkingDirectory(target_depot)
1744 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
1745 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
1746 target_depot, 100)
1747 good_revision = self.source_control.ResolveToRevision(good_revision_in,
1748 target_depot, -100)
1750 os.chdir(cwd)
1753 if bad_revision is None:
1754 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
1755 return results
1757 if good_revision is None:
1758 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
1759 return results
1761 # Check that they didn't accidentally swap good and bad revisions.
1762 if not self.CheckIfRevisionsInProperOrder(
1763 target_depot, good_revision, bad_revision):
1764 results['error'] = 'bad_revision < good_revision, did you swap these '\
1765 'by mistake?'
1766 return results
1768 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
1769 bad_revision, good_revision)
1771 if self.opts.output_buildbot_annotations:
1772 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
1774 print 'Gathering revision range for bisection.'
1776 # Retrieve a list of revisions to do bisection on.
1777 src_revision_list = self.GetRevisionList(target_depot,
1778 bad_revision,
1779 good_revision)
1781 if self.opts.output_buildbot_annotations:
1782 bisect_utils.OutputAnnotationStepClosed()
1784 if src_revision_list:
1785 # revision_data will store information about a revision such as the
1786 # depot it came from, the webkit/V8 revision at that time,
1787 # performance timing, build state, etc...
1788 revision_data = results['revision_data']
1790 # revision_list is the list we're binary searching through at the moment.
1791 revision_list = []
1793 sort_key_ids = 0
1795 for current_revision_id in src_revision_list:
1796 sort_key_ids += 1
1798 revision_data[current_revision_id] = {'value' : None,
1799 'passed' : '?',
1800 'depot' : target_depot,
1801 'external' : None,
1802 'sort' : sort_key_ids}
1803 revision_list.append(current_revision_id)
1805 min_revision = 0
1806 max_revision = len(revision_list) - 1
1808 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
1810 if self.opts.output_buildbot_annotations:
1811 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
1813 print 'Gathering reference values for bisection.'
1815 # Perform the performance tests on the good and bad revisions, to get
1816 # reference values.
1817 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
1818 bad_revision,
1819 command_to_run,
1820 metric,
1821 target_depot)
1823 if self.opts.output_buildbot_annotations:
1824 bisect_utils.OutputAnnotationStepClosed()
1826 if bad_results[1]:
1827 results['error'] = ('An error occurred while building and running '
1828 'the \'bad\' reference value. The bisect cannot continue without '
1829 'a working \'bad\' revision to start from.\n\nError: %s' %
1830 bad_results[0])
1831 return results
1833 if good_results[1]:
1834 results['error'] = ('An error occurred while building and running '
1835 'the \'good\' reference value. The bisect cannot continue without '
1836 'a working \'good\' revision to start from.\n\nError: %s' %
1837 good_results[0])
1838 return results
1841 # We need these reference values to determine if later runs should be
1842 # classified as pass or fail.
1843 known_bad_value = bad_results[0]
1844 known_good_value = good_results[0]
1846 # Can just mark the good and bad revisions explicitly here since we
1847 # already know the results.
1848 bad_revision_data = revision_data[revision_list[0]]
1849 bad_revision_data['external'] = bad_results[2]
1850 bad_revision_data['passed'] = False
1851 bad_revision_data['value'] = known_bad_value
1853 good_revision_data = revision_data[revision_list[max_revision]]
1854 good_revision_data['external'] = good_results[2]
1855 good_revision_data['passed'] = True
1856 good_revision_data['value'] = known_good_value
1858 next_revision_depot = target_depot
1860 while True:
1861 if not revision_list:
1862 break
1864 min_revision_data = revision_data[revision_list[min_revision]]
1865 max_revision_data = revision_data[revision_list[max_revision]]
1867 if max_revision - min_revision <= 1:
1868 current_depot = min_revision_data['depot']
1869 if min_revision_data['passed'] == '?':
1870 next_revision_index = min_revision
1871 elif max_revision_data['passed'] == '?':
1872 next_revision_index = max_revision
1873 elif current_depot in ['cros', 'chromium', 'v8']:
1874 previous_revision = revision_list[min_revision]
1875 # If there were changes to any of the external libraries we track,
1876 # should bisect the changes there as well.
1877 external_depot = self.FindNextDepotToBisect(
1878 previous_revision, min_revision_data, max_revision_data)
1880 # If there was no change in any of the external depots, the search
1881 # is over.
1882 if not external_depot:
1883 if current_depot == 'v8':
1884 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
1885 'continue any further. The script can only bisect into '
1886 'V8\'s bleeding_edge repository if both the current and '
1887 'previous revisions in trunk map directly to revisions in '
1888 'bleeding_edge.')
1889 break
1891 earliest_revision = max_revision_data['external'][external_depot]
1892 latest_revision = min_revision_data['external'][external_depot]
1894 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
1895 latest_revision,
1896 earliest_revision,
1897 next_revision_depot,
1898 previous_revision)
1900 if not new_revision_list:
1901 results['error'] = 'An error occurred attempting to retrieve'\
1902 ' revision range: [%s..%s]' %\
1903 (earliest_revision, latest_revision)
1904 return results
1906 self.AddRevisionsIntoRevisionData(new_revision_list,
1907 external_depot,
1908 min_revision_data['sort'],
1909 revision_data)
1911 # Reset the bisection and perform it on the newly inserted
1912 # changelists.
1913 revision_list = new_revision_list
1914 min_revision = 0
1915 max_revision = len(revision_list) - 1
1916 sort_key_ids += len(revision_list)
1918 print 'Regression in metric:%s appears to be the result of changes'\
1919 ' in [%s].' % (metric, external_depot)
1921 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
1923 continue
1924 else:
1925 break
1926 else:
1927 next_revision_index = int((max_revision - min_revision) / 2) +\
1928 min_revision
1930 next_revision_id = revision_list[next_revision_index]
1931 next_revision_data = revision_data[next_revision_id]
1932 next_revision_depot = next_revision_data['depot']
1934 self.ChangeToDepotWorkingDirectory(next_revision_depot)
1936 if self.opts.output_buildbot_annotations:
1937 step_name = 'Working on [%s]' % next_revision_id
1938 bisect_utils.OutputAnnotationStepStart(step_name)
1940 print 'Working on revision: [%s]' % next_revision_id
1942 run_results = self.SyncBuildAndRunRevision(next_revision_id,
1943 next_revision_depot,
1944 command_to_run,
1945 metric, skippable=True)
1947 # If the build is successful, check whether or not the metric
1948 # had regressed.
1949 if not run_results[1]:
1950 if len(run_results) > 2:
1951 next_revision_data['external'] = run_results[2]
1953 passed_regression = self.CheckIfRunPassed(run_results[0],
1954 known_good_value,
1955 known_bad_value)
1957 next_revision_data['passed'] = passed_regression
1958 next_revision_data['value'] = run_results[0]
1960 if passed_regression:
1961 max_revision = next_revision_index
1962 else:
1963 min_revision = next_revision_index
1964 else:
1965 if run_results[1] == BUILD_RESULT_SKIPPED:
1966 next_revision_data['passed'] = 'Skipped'
1967 elif run_results[1] == BUILD_RESULT_FAIL:
1968 next_revision_data['passed'] = 'Build Failed'
1970 print run_results[0]
1972 # If the build is broken, remove it and redo search.
1973 revision_list.pop(next_revision_index)
1975 max_revision -= 1
1977 if self.opts.output_buildbot_annotations:
1978 bisect_utils.OutputAnnotationStepClosed()
1979 else:
1980 # Weren't able to sync and retrieve the revision range.
1981 results['error'] = 'An error occurred attempting to retrieve revision '\
1982 'range: [%s..%s]' % (good_revision, bad_revision)
1984 return results
1986 def FormatAndPrintResults(self, bisect_results):
1987 """Prints the results from a bisection run in a readable format.
1989 Args
1990 bisect_results: The results from a bisection test run.
1992 if bisect_results['error']:
1993 if self.opts.output_buildbot_annotations:
1994 bisect_utils.OutputAnnotationStepStart('Results - Bisect Failed')
1996 print
1997 print bisect_results['error']
1998 print
2000 if self.opts.output_buildbot_annotations:
2001 bisect_utils.OutputAnnotationStepClosed()
2002 return
2004 revision_data = bisect_results['revision_data']
2005 revision_data_sorted = sorted(revision_data.iteritems(),
2006 key = lambda x: x[1]['sort'])
2008 if self.opts.output_buildbot_annotations:
2009 bisect_utils.OutputAnnotationStepStart('Results')
2011 print
2012 print 'Full results of bisection:'
2013 for current_id, current_data in revision_data_sorted:
2014 build_status = current_data['passed']
2016 if type(build_status) is bool:
2017 if build_status:
2018 build_status = 'Good'
2019 else:
2020 build_status = 'Bad'
2022 print ' %20s %40s %s' % (current_data['depot'],
2023 current_id, build_status)
2024 print
2026 # Find range where it possibly broke.
2027 first_working_revision = None
2028 first_working_revision_index = -1
2029 last_broken_revision = None
2030 last_broken_revision_index = -1
2032 for i in xrange(len(revision_data_sorted)):
2033 k, v = revision_data_sorted[i]
2034 if v['passed'] == 1:
2035 if not first_working_revision:
2036 first_working_revision = k
2037 first_working_revision_index = i
2039 if not v['passed']:
2040 last_broken_revision = k
2041 last_broken_revision_index = i
2043 print
2044 print 'Tested commits:'
2045 print ' %20s %40s %12s %14s %13s' % ('Depot'.center(20, ' '),
2046 'Commit SHA'.center(40, ' '), 'Mean'.center(12, ' '),
2047 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2048 state = 0
2049 for current_id, current_data in revision_data_sorted:
2050 if current_data['value']:
2051 if (current_id == last_broken_revision or
2052 current_id == first_working_revision):
2053 print
2054 state += 1
2056 state_str = 'Bad'
2057 if state == 1:
2058 state_str = 'Suspected CL'
2059 elif state == 2:
2060 state_str = 'Good'
2061 state_str = state_str.center(13, ' ')
2063 std_error = ('+-%.02f' %
2064 current_data['value']['std_err']).center(14, ' ')
2065 mean = ('%.02f' % current_data['value']['mean']).center(12, ' ')
2066 print ' %20s %40s %12s %14s %13s' % (
2067 current_data['depot'].center(20, ' '), current_id, mean,
2068 std_error, state_str)
2070 if last_broken_revision != None and first_working_revision != None:
2071 # Give a "confidence" in the bisect. At the moment we use how distinct the
2072 # values are before and after the last broken revision, and how noisy the
2073 # overall graph is.
2074 bounds_broken = [revision_data[last_broken_revision]['value']['mean'],
2075 revision_data[last_broken_revision]['value']['mean']]
2076 broken_mean = []
2077 for i in xrange(0, last_broken_revision_index + 1):
2078 if revision_data_sorted[i][1]['value']:
2079 bounds_broken[0] = min(bounds_broken[0],
2080 revision_data_sorted[i][1]['value']['mean'])
2081 bounds_broken[1] = max(bounds_broken[1],
2082 revision_data_sorted[i][1]['value']['mean'])
2083 broken_mean.extend(revision_data_sorted[i][1]['value']['values'])
2085 bounds_working = [revision_data[first_working_revision]['value']['mean'],
2086 revision_data[first_working_revision]['value']['mean']]
2087 working_mean = []
2088 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
2089 if revision_data_sorted[i][1]['value']:
2090 bounds_working[0] = min(bounds_working[0],
2091 revision_data_sorted[i][1]['value']['mean'])
2092 bounds_working[1] = max(bounds_working[1],
2093 revision_data_sorted[i][1]['value']['mean'])
2094 working_mean.extend(revision_data_sorted[i][1]['value']['values'])
2096 dist_between_groups = min(math.fabs(bounds_broken[1] - bounds_working[0]),
2097 math.fabs(bounds_broken[0] - bounds_working[1]))
2098 len_working_group = CalculateStandardError(working_mean)
2099 len_broken_group = CalculateStandardError(broken_mean)
2101 confidence = (dist_between_groups / (
2102 max(0.0001, (len_broken_group + len_working_group ))))
2103 confidence = min(1.0, max(confidence, 0.0)) * 100.0
2105 print
2106 print 'Confidence in Bisection Results: %d%%' % int(confidence)
2107 print
2109 print 'Results: Regression may have occurred in range:'
2110 print ' -> First Bad Revision: [%40s] [%s]' %\
2111 (last_broken_revision,
2112 revision_data[last_broken_revision]['depot'])
2113 print ' -> Last Good Revision: [%40s] [%s]' %\
2114 (first_working_revision,
2115 revision_data[first_working_revision]['depot'])
2117 cwd = os.getcwd()
2118 self.ChangeToDepotWorkingDirectory(
2119 revision_data[last_broken_revision]['depot'])
2121 if revision_data[last_broken_revision]['depot'] == 'cros':
2122 # Want to get a list of all the commits and what depots they belong
2123 # to so that we can grab info about each.
2124 cmd = ['repo', 'forall', '-c',
2125 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2126 last_broken_revision, first_working_revision + 1)]
2127 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
2129 changes = []
2131 assert not return_code, 'An error occurred while running'\
2132 ' "%s"' % ' '.join(cmd)
2134 last_depot = None
2135 cwd = os.getcwd()
2136 for l in output.split('\n'):
2137 if l:
2138 # Output will be in form:
2139 # /path_to_depot
2140 # /path_to_other_depot
2141 # <SHA1>
2142 # /path_again
2143 # <SHA1>
2144 # etc.
2145 if l[0] == '/':
2146 last_depot = l
2147 else:
2148 contents = l.split(' ')
2149 if len(contents) > 1:
2150 changes.append([last_depot, contents[0]])
2152 print
2153 for c in changes:
2154 os.chdir(c[0])
2155 info = self.source_control.QueryRevisionInfo(c[1])
2157 print
2158 print 'Commit : %s' % c[1]
2159 print 'Author : %s' % info['author']
2160 print 'Email : %s' % info['email']
2161 print 'Date : %s' % info['date']
2162 print 'Subject : %s' % info['subject']
2163 print
2164 else:
2165 multiple_commits = 0
2166 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
2167 k, v = revision_data_sorted[i]
2168 if k == first_working_revision:
2169 break
2171 self.ChangeToDepotWorkingDirectory(v['depot'])
2173 info = self.source_control.QueryRevisionInfo(k)
2175 print
2176 print 'Commit : %s' % k
2177 print 'Author : %s' % info['author']
2178 print 'Email : %s' % info['email']
2179 print 'Date : %s' % info['date']
2180 print 'Subject : %s' % info['subject']
2182 multiple_commits += 1
2183 if multiple_commits > 1:
2184 self.warnings.append('Due to build errors, regression range could'
2185 ' not be narrowed down to a single commit.')
2186 print
2187 os.chdir(cwd)
2189 # Give a warning if the values were very close together
2190 good_std_dev = revision_data[first_working_revision]['value']['std_err']
2191 good_mean = revision_data[first_working_revision]['value']['mean']
2192 bad_mean = revision_data[last_broken_revision]['value']['mean']
2194 # A standard deviation of 0 could indicate either insufficient runs
2195 # or a test that consistently returns the same value.
2196 if self.opts.repeat_test_count == 1:
2197 self.warnings.append('Tests were only set to run once. This '
2198 'may be insufficient to get meaningful results.')
2200 # Check for any other possible regression ranges
2201 prev_revision_data = revision_data_sorted[0][1]
2202 prev_revision_id = revision_data_sorted[0][0]
2203 possible_regressions = []
2204 for current_id, current_data in revision_data_sorted:
2205 if current_data['value']:
2206 prev_mean = prev_revision_data['value']['mean']
2207 cur_mean = current_data['value']['mean']
2209 if good_std_dev:
2210 deviations = math.fabs(prev_mean - cur_mean) / good_std_dev
2211 else:
2212 deviations = None
2214 if good_mean:
2215 percent_change = (prev_mean - cur_mean) / good_mean
2217 # If the "good" valuse are supposed to be higher than the "bad"
2218 # values (ie. scores), flip the sign of the percent change so that
2219 # a positive value always represents a regression.
2220 if bad_mean < good_mean:
2221 percent_change *= -1.0
2222 else:
2223 percent_change = None
2225 if deviations >= 1.5 or percent_change > 0.01:
2226 if current_id != first_working_revision:
2227 possible_regressions.append(
2228 [current_id, prev_revision_id, percent_change, deviations])
2229 prev_revision_data = current_data
2230 prev_revision_id = current_id
2232 if possible_regressions:
2233 print
2234 print 'Other regressions may have occurred:'
2235 print
2236 for p in possible_regressions:
2237 current_id = p[0]
2238 percent_change = p[2]
2239 deviations = p[3]
2240 current_data = revision_data[current_id]
2241 previous_id = p[1]
2242 previous_data = revision_data[previous_id]
2244 if deviations is None:
2245 deviations = 'N/A'
2246 else:
2247 deviations = '%.2f' % deviations
2249 if percent_change is None:
2250 percent_change = 0
2252 print ' %8s %s [%.2f%%, %s x std.dev]' % (
2253 previous_data['depot'], previous_id, 100 * percent_change,
2254 deviations)
2255 print ' %8s %s' % (
2256 current_data['depot'], current_id)
2257 print
2259 if self.warnings:
2260 print
2261 print 'The following warnings were generated:'
2262 print
2263 for w in self.warnings:
2264 print ' - %s' % w
2265 print
2267 if self.opts.output_buildbot_annotations:
2268 bisect_utils.OutputAnnotationStepClosed()
2271 def DetermineAndCreateSourceControl(opts):
2272 """Attempts to determine the underlying source control workflow and returns
2273 a SourceControl object.
2275 Returns:
2276 An instance of a SourceControl object, or None if the current workflow
2277 is unsupported.
2280 (output, return_code) = RunGit(['rev-parse', '--is-inside-work-tree'])
2282 if output.strip() == 'true':
2283 return GitSourceControl(opts)
2285 return None
2288 def SetNinjaBuildSystemDefault():
2289 """Makes ninja the default build system to be used by
2290 the bisection script."""
2291 gyp_var = os.getenv('GYP_GENERATORS')
2293 if not gyp_var or not 'ninja' in gyp_var:
2294 if gyp_var:
2295 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
2296 else:
2297 os.environ['GYP_GENERATORS'] = 'ninja'
2299 if IsWindows():
2300 os.environ['GYP_DEFINES'] = 'component=shared_library '\
2301 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
2302 'chromium_win_pch=0'
2305 def SetMakeBuildSystemDefault():
2306 """Makes make the default build system to be used by
2307 the bisection script."""
2308 os.environ['GYP_GENERATORS'] = 'make'
2311 def CheckPlatformSupported(opts):
2312 """Checks that this platform and build system are supported.
2314 Args:
2315 opts: The options parsed from the command line.
2317 Returns:
2318 True if the platform and build system are supported.
2320 # Haven't tested the script out on any other platforms yet.
2321 supported = ['posix', 'nt']
2322 if not os.name in supported:
2323 print "Sorry, this platform isn't supported yet."
2324 print
2325 return False
2327 if IsWindows():
2328 if not opts.build_preference:
2329 opts.build_preference = 'msvs'
2331 if opts.build_preference == 'msvs':
2332 if not os.getenv('VS100COMNTOOLS'):
2333 print 'Error: Path to visual studio could not be determined.'
2334 print
2335 return False
2336 elif opts.build_preference == 'ninja':
2337 SetNinjaBuildSystemDefault()
2338 else:
2339 assert False, 'Error: %s build not supported' % opts.build_preference
2340 else:
2341 if not opts.build_preference:
2342 if 'ninja' in os.getenv('GYP_GENERATORS'):
2343 opts.build_preference = 'ninja'
2344 else:
2345 opts.build_preference = 'make'
2347 if opts.build_preference == 'ninja':
2348 SetNinjaBuildSystemDefault()
2349 elif opts.build_preference == 'make':
2350 SetMakeBuildSystemDefault()
2351 elif opts.build_preference != 'make':
2352 assert False, 'Error: %s build not supported' % opts.build_preference
2354 bisect_utils.RunGClient(['runhooks'])
2356 return True
2359 def RmTreeAndMkDir(path_to_dir):
2360 """Removes the directory tree specified, and then creates an empty
2361 directory in the same location.
2363 Args:
2364 path_to_dir: Path to the directory tree.
2366 Returns:
2367 True if successful, False if an error occurred.
2369 try:
2370 if os.path.exists(path_to_dir):
2371 shutil.rmtree(path_to_dir)
2372 except OSError, e:
2373 if e.errno != errno.ENOENT:
2374 return False
2376 try:
2377 os.makedirs(path_to_dir)
2378 except OSError, e:
2379 if e.errno != errno.EEXIST:
2380 return False
2382 return True
2385 def RemoveBuildFiles():
2386 """Removes build files from previous runs."""
2387 if RmTreeAndMkDir(os.path.join('out', 'Release')):
2388 if RmTreeAndMkDir(os.path.join('build', 'Release')):
2389 return True
2390 return False
2393 def main():
2395 usage = ('%prog [options] [-- chromium-options]\n'
2396 'Perform binary search on revision history to find a minimal '
2397 'range of revisions where a peformance metric regressed.\n')
2399 parser = optparse.OptionParser(usage=usage)
2401 parser.add_option('-c', '--command',
2402 type='str',
2403 help='A command to execute your performance test at' +
2404 ' each point in the bisection.')
2405 parser.add_option('-b', '--bad_revision',
2406 type='str',
2407 help='A bad revision to start bisection. ' +
2408 'Must be later than good revision. May be either a git' +
2409 ' or svn revision.')
2410 parser.add_option('-g', '--good_revision',
2411 type='str',
2412 help='A revision to start bisection where performance' +
2413 ' test is known to pass. Must be earlier than the ' +
2414 'bad revision. May be either a git or svn revision.')
2415 parser.add_option('-m', '--metric',
2416 type='str',
2417 help='The desired metric to bisect on. For example ' +
2418 '"vm_rss_final_b/vm_rss_f_b"')
2419 parser.add_option('-w', '--working_directory',
2420 type='str',
2421 help='Path to the working directory where the script will '
2422 'do an initial checkout of the chromium depot. The '
2423 'files will be placed in a subdirectory "bisect" under '
2424 'working_directory and that will be used to perform the '
2425 'bisection. This parameter is optional, if it is not '
2426 'supplied, the script will work from the current depot.')
2427 parser.add_option('-r', '--repeat_test_count',
2428 type='int',
2429 default=20,
2430 help='The number of times to repeat the performance test. '
2431 'Values will be clamped to range [1, 100]. '
2432 'Default value is 20.')
2433 parser.add_option('--repeat_test_max_time',
2434 type='int',
2435 default=20,
2436 help='The maximum time (in minutes) to take running the '
2437 'performance tests. The script will run the performance '
2438 'tests according to --repeat_test_count, so long as it '
2439 'doesn\'t exceed --repeat_test_max_time. Values will be '
2440 'clamped to range [1, 60].'
2441 'Default value is 20.')
2442 parser.add_option('-t', '--truncate_percent',
2443 type='int',
2444 default=25,
2445 help='The highest/lowest % are discarded to form a '
2446 'truncated mean. Values will be clamped to range [0, 25]. '
2447 'Default value is 25 (highest/lowest 25% will be '
2448 'discarded).')
2449 parser.add_option('--build_preference',
2450 type='choice',
2451 choices=['msvs', 'ninja', 'make'],
2452 help='The preferred build system to use. On linux/mac '
2453 'the options are make/ninja. On Windows, the options '
2454 'are msvs/ninja.')
2455 parser.add_option('--target_platform',
2456 type='choice',
2457 choices=['chromium', 'cros', 'android'],
2458 default='chromium',
2459 help='The target platform. Choices are "chromium" (current '
2460 'platform), "cros", or "android". If you specify something '
2461 'other than "chromium", you must be properly set up to '
2462 'build that platform.')
2463 parser.add_option('--no_custom_deps',
2464 dest='no_custom_deps',
2465 action="store_true",
2466 default=False,
2467 help='Run the script with custom_deps or not.')
2468 parser.add_option('--cros_board',
2469 type='str',
2470 help='The cros board type to build.')
2471 parser.add_option('--cros_remote_ip',
2472 type='str',
2473 help='The remote machine to image to.')
2474 parser.add_option('--use_goma',
2475 action="store_true",
2476 help='Add a bunch of extra threads for goma.')
2477 parser.add_option('--output_buildbot_annotations',
2478 action="store_true",
2479 help='Add extra annotation output for buildbot.')
2480 parser.add_option('--debug_ignore_build',
2481 action="store_true",
2482 help='DEBUG: Don\'t perform builds.')
2483 parser.add_option('--debug_ignore_sync',
2484 action="store_true",
2485 help='DEBUG: Don\'t perform syncs.')
2486 parser.add_option('--debug_ignore_perf_test',
2487 action="store_true",
2488 help='DEBUG: Don\'t perform performance tests.')
2489 (opts, args) = parser.parse_args()
2491 if not opts.command:
2492 print 'Error: missing required parameter: --command'
2493 print
2494 parser.print_help()
2495 return 1
2497 if not opts.good_revision:
2498 print 'Error: missing required parameter: --good_revision'
2499 print
2500 parser.print_help()
2501 return 1
2503 if not opts.bad_revision:
2504 print 'Error: missing required parameter: --bad_revision'
2505 print
2506 parser.print_help()
2507 return 1
2509 if not opts.metric:
2510 print 'Error: missing required parameter: --metric'
2511 print
2512 parser.print_help()
2513 return 1
2515 if opts.target_platform == 'cros':
2516 # Run sudo up front to make sure credentials are cached for later.
2517 print 'Sudo is required to build cros:'
2518 print
2519 RunProcess(['sudo', 'true'])
2521 if not opts.cros_board:
2522 print 'Error: missing required parameter: --cros_board'
2523 print
2524 parser.print_help()
2525 return 1
2527 if not opts.cros_remote_ip:
2528 print 'Error: missing required parameter: --cros_remote_ip'
2529 print
2530 parser.print_help()
2531 return 1
2533 if not opts.working_directory:
2534 print 'Error: missing required parameter: --working_directory'
2535 print
2536 parser.print_help()
2537 return 1
2539 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2540 opts.repeat_test_max_time = min(max(opts.repeat_test_max_time, 1), 60)
2541 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2542 opts.truncate_percent = opts.truncate_percent / 100.0
2544 metric_values = opts.metric.split('/')
2545 if len(metric_values) != 2:
2546 print "Invalid metric specified: [%s]" % (opts.metric,)
2547 print
2548 return 1
2550 if opts.working_directory:
2551 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
2552 if opts.no_custom_deps:
2553 custom_deps = None
2554 if bisect_utils.CreateBisectDirectoryAndSetupDepot(opts,
2555 custom_deps):
2556 return 1
2559 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
2560 print 'Error: Failed to set platform environment.'
2561 print
2562 return 1
2564 os.chdir(os.path.join(os.getcwd(), 'src'))
2566 if not RemoveBuildFiles():
2567 print "Something went wrong removing the build files."
2568 print
2569 return 1
2571 if not CheckPlatformSupported(opts):
2572 return 1
2574 # Check what source control method they're using. Only support git workflow
2575 # at the moment.
2576 source_control = DetermineAndCreateSourceControl(opts)
2578 if not source_control:
2579 print "Sorry, only the git workflow is supported at the moment."
2580 print
2581 return 1
2583 # gClient sync seems to fail if you're not in master branch.
2584 if not source_control.IsInProperBranch() and not opts.debug_ignore_sync:
2585 print "You must switch to master branch to run bisection."
2586 print
2587 return 1
2589 bisect_test = BisectPerformanceMetrics(source_control, opts)
2590 try:
2591 bisect_results = bisect_test.Run(opts.command,
2592 opts.bad_revision,
2593 opts.good_revision,
2594 metric_values)
2595 bisect_test.FormatAndPrintResults(bisect_results)
2596 finally:
2597 bisect_test.PerformCleanup()
2599 if not bisect_results['error']:
2600 return 0
2601 else:
2602 return 1
2604 if __name__ == '__main__':
2605 sys.exit(main())