2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
54 # The additional repositories that might need to be bisected.
55 # If the repository has any dependant repositories (such as skia/src needs
56 # skia/include and skia/gyp to be updated), specify them in the 'depends'
57 # so that they're synced appropriately.
59 # src: path to the working directory.
60 # recurse: True if this repositry will get bisected.
61 # depends: A list of other repositories that are actually part of the same
63 # svn: Needed for git workflow to resolve hashes to svn revisions.
64 # from: Parent depot that must be bisected before this is bisected.
73 "src" : "src/third_party/WebKit",
82 "build_with": 'v8_bleeding_edge',
84 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
86 'v8_bleeding_edge' : {
87 "src" : "src/v8_bleeding_edge",
90 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
94 "src" : "src/third_party/skia/src",
96 "svn" : "http://skia.googlecode.com/svn/trunk/src",
97 "depends" : ['skia/include', 'skia/gyp'],
101 "src" : "src/third_party/skia/include",
103 "svn" : "http://skia.googlecode.com/svn/trunk/include",
108 "src" : "src/third_party/skia/gyp",
110 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
116 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
117 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
118 CROS_VERSION_PATTERN
= 'new version number from %s'
119 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
120 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
122 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
123 'mod_for_test_scripts', 'ssh_keys',
126 BUILD_RESULT_SUCCEED
= 0
127 BUILD_RESULT_FAIL
= 1
128 BUILD_RESULT_SKIPPED
= 2
130 def CalculateTruncatedMean(data_set
, truncate_percent
):
131 """Calculates the truncated mean of a set of values.
134 data_set: Set of values to use in calculation.
135 truncate_percent: The % from the upper/lower portions of the data set to
136 discard, expressed as a value in [0, 1].
139 The truncated mean as a float.
141 if len(data_set
) > 2:
142 data_set
= sorted(data_set
)
144 discard_num_float
= len(data_set
) * truncate_percent
145 discard_num_int
= int(math
.floor(discard_num_float
))
146 kept_weight
= len(data_set
) - discard_num_float
* 2
148 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
150 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
153 # If the % to discard leaves a fractional portion, need to weight those
155 unweighted_vals
= data_set
[1:len(data_set
)-1]
156 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
157 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
158 data_set
= weighted_vals
+ unweighted_vals
160 kept_weight
= len(data_set
)
162 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
163 data_set
) / kept_weight
165 return truncated_mean
168 def CalculateStandardDeviation(v
):
172 mean
= CalculateTruncatedMean(v
, 0.0)
173 variances
= [float(x
) - mean
for x
in v
]
174 variances
= [x
* x
for x
in variances
]
175 variance
= reduce(lambda x
, y
: float(x
) + float(y
), variances
) / (len(v
) - 1)
176 std_dev
= math
.sqrt(variance
)
181 def IsStringFloat(string_to_check
):
182 """Checks whether or not the given string can be converted to a floating
186 string_to_check: Input string to check if it can be converted to a float.
189 True if the string can be converted to a float.
192 float(string_to_check
)
199 def IsStringInt(string_to_check
):
200 """Checks whether or not the given string can be converted to a integer.
203 string_to_check: Input string to check if it can be converted to an int.
206 True if the string can be converted to an int.
217 """Checks whether or not the script is running on Windows.
220 True if running on Windows.
222 return os
.name
== 'nt'
225 def RunProcess(command
):
226 """Run an arbitrary command. If output from the call is needed, use
227 RunProcessAndRetrieveOutput instead.
230 command: A list containing the command and args to execute.
233 The return code of the call.
235 # On Windows, use shell=True to get PATH interpretation.
237 return subprocess
.call(command
, shell
=shell
)
240 def RunProcessAndRetrieveOutput(command
):
241 """Run an arbitrary command, returning its output and return code. Since
242 output is collected via communicate(), there will be no output until the
243 call terminates. If you need output while the program runs (ie. so
244 that the buildbot doesn't terminate the script), consider RunProcess().
247 command: A list containing the command and args to execute.
248 print_output: Optional parameter to write output to stdout as it's
252 A tuple of the output and return code.
254 # On Windows, use shell=True to get PATH interpretation.
256 proc
= subprocess
.Popen(command
,
258 stdout
=subprocess
.PIPE
)
260 (output
, _
) = proc
.communicate()
262 return (output
, proc
.returncode
)
266 """Run a git subcommand, returning its output and return code.
269 command: A list containing the args to git.
272 A tuple of the output and return code.
274 command
= ['git'] + command
276 return RunProcessAndRetrieveOutput(command
)
279 def CheckRunGit(command
):
280 """Run a git subcommand, returning its output and return code. Asserts if
281 the return code of the call is non-zero.
284 command: A list containing the args to git.
287 A tuple of the output and return code.
289 (output
, return_code
) = RunGit(command
)
291 assert not return_code
, 'An error occurred while running'\
292 ' "git %s"' % ' '.join(command
)
296 def BuildWithMake(threads
, targets
):
297 cmd
= ['make', 'BUILDTYPE=Release']
300 cmd
.append('-j%d' % threads
)
304 return_code
= RunProcess(cmd
)
306 return not return_code
309 def BuildWithNinja(threads
, targets
):
310 cmd
= ['ninja', '-C', os
.path
.join('out', 'Release')]
313 cmd
.append('-j%d' % threads
)
317 return_code
= RunProcess(cmd
)
319 return not return_code
322 def BuildWithVisualStudio(targets
):
323 path_to_devenv
= os
.path
.abspath(
324 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
325 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
326 cmd
= [path_to_devenv
, '/build', 'Release', path_to_sln
]
329 cmd
.extend(['/Project', t
])
331 return_code
= RunProcess(cmd
)
333 return not return_code
336 class Builder(object):
337 """Builder is used by the bisect script to build relevant targets and deploy.
339 def Build(self
, depot
, opts
):
340 raise NotImplementedError()
343 class DesktopBuilder(Builder
):
344 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
345 def Build(self
, depot
, opts
):
346 """Builds chrome and performance_ui_tests using options passed into
350 depot: Current depot being bisected.
351 opts: The options parsed from the command line.
354 True if build was successful.
356 targets
= ['chrome', 'performance_ui_tests']
362 build_success
= False
363 if opts
.build_preference
== 'make':
364 build_success
= BuildWithMake(threads
, targets
)
365 elif opts
.build_preference
== 'ninja':
367 targets
= [t
+ '.exe' for t
in targets
]
368 build_success
= BuildWithNinja(threads
, targets
)
369 elif opts
.build_preference
== 'msvs':
370 assert IsWindows(), 'msvs is only supported on Windows.'
371 build_success
= BuildWithVisualStudio(targets
)
373 assert False, 'No build system defined.'
377 class AndroidBuilder(Builder
):
378 """AndroidBuilder is used to build on android."""
379 def InstallAPK(self
, opts
):
380 """Installs apk to device.
383 opts: The options parsed from the command line.
388 path_to_tool
= os
.path
.join('build', 'android', 'adb_install_apk.py')
389 cmd
= [path_to_tool
, '--apk', 'ChromiumTestShell.apk', '--apk_package',
390 'org.chromium.chrome.testshell', '--release']
391 return_code
= RunProcess(cmd
)
393 return not return_code
395 def Build(self
, depot
, opts
):
396 """Builds the android content shell and other necessary tools using options
397 passed into the script.
400 depot: Current depot being bisected.
401 opts: The options parsed from the command line.
404 True if build was successful.
406 targets
= ['chromium_testshell', 'forwarder2', 'md5sum']
411 build_success
= False
412 if opts
.build_preference
== 'ninja':
413 build_success
= BuildWithNinja(threads
, targets
)
415 assert False, 'No build system defined.'
418 build_success
= self
.InstallAPK(opts
)
423 class CrosBuilder(Builder
):
424 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
426 def ImageToTarget(self
, opts
):
427 """Installs latest image to target specified by opts.cros_remote_ip.
430 opts: Program options containing cros_board and cros_remote_ip.
436 # Keys will most likely be set to 0640 after wiping the chroot.
437 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
438 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
439 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
440 '--remote=%s' % opts
.cros_remote_ip
,
441 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
443 return_code
= RunProcess(cmd
)
444 return not return_code
448 def BuildPackages(self
, opts
, depot
):
449 """Builds packages for cros.
452 opts: Program options containing cros_board.
453 depot: The depot being bisected.
458 cmd
= [CROS_SDK_PATH
]
461 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
462 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
467 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
469 cmd
+= ['BUILDTYPE=Release', './build_packages',
470 '--board=%s' % opts
.cros_board
]
471 return_code
= RunProcess(cmd
)
473 return not return_code
475 def BuildImage(self
, opts
, depot
):
476 """Builds test image for cros.
479 opts: Program options containing cros_board.
480 depot: The depot being bisected.
485 cmd
= [CROS_SDK_PATH
]
488 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
489 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
494 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
496 cmd
+= ['BUILDTYPE=Release', '--', './build_image',
497 '--board=%s' % opts
.cros_board
, 'test']
499 return_code
= RunProcess(cmd
)
501 return not return_code
503 def Build(self
, depot
, opts
):
504 """Builds targets using options passed into the script.
507 depot: Current depot being bisected.
508 opts: The options parsed from the command line.
511 True if build was successful.
513 if self
.BuildPackages(opts
, depot
):
514 if self
.BuildImage(opts
, depot
):
515 return self
.ImageToTarget(opts
)
519 class SourceControl(object):
520 """SourceControl is an abstraction over the underlying source control
521 system used for chromium. For now only git is supported, but in the
522 future, the svn workflow could be added as well."""
524 super(SourceControl
, self
).__init
__()
526 def SyncToRevisionWithGClient(self
, revision
):
527 """Uses gclient to sync to the specified revision.
529 ie. gclient sync --revision <revision>
532 revision: The git SHA1 or svn CL (depending on workflow).
535 The return code of the call.
537 return bisect_utils
.RunGClient(['sync', '--revision',
538 revision
, '--verbose', '--nohooks', '--reset', '--force'])
540 def SyncToRevisionWithRepo(self
, timestamp
):
541 """Uses repo to sync all the underlying git depots to the specified
545 timestamp: The unix timestamp to sync to.
548 The return code of the call.
550 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
553 class GitSourceControl(SourceControl
):
554 """GitSourceControl is used to query the underlying source control. """
555 def __init__(self
, opts
):
556 super(GitSourceControl
, self
).__init
__()
562 def GetRevisionList(self
, revision_range_end
, revision_range_start
):
563 """Retrieves a list of revisions between |revision_range_start| and
564 |revision_range_end|.
567 revision_range_end: The SHA1 for the end of the range.
568 revision_range_start: The SHA1 for the beginning of the range.
571 A list of the revisions between |revision_range_start| and
572 |revision_range_end| (inclusive).
574 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
575 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
576 log_output
= CheckRunGit(cmd
)
578 revision_hash_list
= log_output
.split()
579 revision_hash_list
.append(revision_range_start
)
581 return revision_hash_list
583 def SyncToRevision(self
, revision
, sync_client
=None):
584 """Syncs to the specified revision.
587 revision: The revision to sync to.
588 use_gclient: Specifies whether or not we should sync using gclient or
589 just use source control directly.
596 results
= RunGit(['checkout', revision
])[1]
597 elif sync_client
== 'gclient':
598 results
= self
.SyncToRevisionWithGClient(revision
)
599 elif sync_client
== 'repo':
600 results
= self
.SyncToRevisionWithRepo(revision
)
604 def ResolveToRevision(self
, revision_to_check
, depot
, search
):
605 """If an SVN revision is supplied, try to resolve it to a git SHA1.
608 revision_to_check: The user supplied revision string that may need to be
609 resolved to a git SHA1.
610 depot: The depot the revision_to_check is from.
611 search: The number of changelists to try if the first fails to resolve
612 to a git hash. If the value is negative, the function will search
613 backwards chronologically, otherwise it will search forward.
616 A string containing a git SHA1 hash, otherwise None.
619 if not IsStringInt(revision_to_check
):
620 return revision_to_check
622 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
624 if depot
!= 'chromium':
625 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
627 svn_revision
= int(revision_to_check
)
631 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
633 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
635 for i
in search_range
:
636 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
637 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
640 (log_output
, return_code
) = RunGit(cmd
)
642 assert not return_code
, 'An error occurred while running'\
643 ' "git %s"' % ' '.join(cmd
)
646 log_output
= log_output
.strip()
649 git_revision
= log_output
655 if IsStringInt(revision_to_check
):
656 return int(revision_to_check
)
659 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
660 'chromiumos-overlay'))
661 pattern
= CROS_VERSION_PATTERN
% revision_to_check
662 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
666 log_output
= CheckRunGit(cmd
)
668 git_revision
= log_output
669 git_revision
= int(log_output
.strip())
674 def IsInProperBranch(self
):
675 """Confirms they're in the master branch for performing the bisection.
676 This is needed or gclient will fail to sync properly.
679 True if the current branch on src is 'master'
681 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
682 log_output
= CheckRunGit(cmd
)
683 log_output
= log_output
.strip()
685 return log_output
== "master"
687 def SVNFindRev(self
, revision
):
688 """Maps directly to the 'git svn find-rev' command.
691 revision: The git SHA1 to use.
694 An integer changelist #, otherwise None.
697 cmd
= ['svn', 'find-rev', revision
]
699 output
= CheckRunGit(cmd
)
700 svn_revision
= output
.strip()
702 if IsStringInt(svn_revision
):
703 return int(svn_revision
)
707 def QueryRevisionInfo(self
, revision
):
708 """Gathers information on a particular revision, such as author's name,
709 email, subject, and date.
712 revision: Revision you want to gather information on.
714 A dict in the following format:
724 formats
= ['%cN', '%cE', '%s', '%cD']
725 targets
= ['author', 'email', 'subject', 'date']
727 for i
in xrange(len(formats
)):
728 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
729 output
= CheckRunGit(cmd
)
730 commit_info
[targets
[i
]] = output
.rstrip()
734 def CheckoutFileAtRevision(self
, file_name
, revision
):
735 """Performs a checkout on a file at the given revision.
740 return not RunGit(['checkout', revision
, file_name
])[1]
742 def RevertFileToHead(self
, file_name
):
743 """Unstages a file and returns it to HEAD.
748 # Reset doesn't seem to return 0 on success.
749 RunGit(['reset', 'HEAD', bisect_utils
.FILE_DEPS_GIT
])
751 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
753 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
754 """Returns a list of commits that modified this file.
757 filename: Name of file.
758 revision_start: Start of revision range.
759 revision_end: End of revision range.
762 Returns a list of commits that touched this file.
764 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
766 output
= CheckRunGit(cmd
)
768 return [o
for o
in output
.split('\n') if o
]
770 class BisectPerformanceMetrics(object):
771 """BisectPerformanceMetrics performs a bisection against a list of range
772 of revisions to narrow down where performance regressions may have
775 def __init__(self
, source_control
, opts
):
776 super(BisectPerformanceMetrics
, self
).__init
__()
779 self
.source_control
= source_control
780 self
.src_cwd
= os
.getcwd()
781 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
783 self
.cleanup_commands
= []
787 if opts
.target_platform
== 'cros':
788 self
.builder
= CrosBuilder()
789 elif opts
.target_platform
== 'android':
790 self
.builder
= AndroidBuilder()
792 self
.builder
= DesktopBuilder()
794 # This always starts true since the script grabs latest first.
795 self
.was_blink
= True
797 for d
in DEPOT_NAMES
:
798 # The working directory of each depot is just the path to the depot, but
799 # since we're already in 'src', we can skip that part.
801 self
.depot_cwd
[d
] = self
.src_cwd
+ DEPOT_DEPS_NAME
[d
]['src'][3:]
803 def PerformCleanup(self
):
804 """Performs cleanup when script is finished."""
805 os
.chdir(self
.src_cwd
)
806 for c
in self
.cleanup_commands
:
808 shutil
.move(c
[1], c
[2])
810 assert False, 'Invalid cleanup command.'
812 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
813 """Retrieves a list of all the commits between the bad revision and
814 last known good revision."""
816 revision_work_list
= []
819 revision_range_start
= good_revision
820 revision_range_end
= bad_revision
823 self
.ChangeToDepotWorkingDirectory('cros')
825 # Print the commit timestamps for every commit in the revision time
826 # range. We'll sort them and bisect by that. There is a remote chance that
827 # 2 (or more) commits will share the exact same timestamp, but it's
828 # probably safe to ignore that case.
829 cmd
= ['repo', 'forall', '-c',
830 'git log --format=%%ct --before=%d --after=%d' % (
831 revision_range_end
, revision_range_start
)]
832 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
834 assert not return_code
, 'An error occurred while running'\
835 ' "%s"' % ' '.join(cmd
)
839 revision_work_list
= list(set(
840 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
841 revision_work_list
= sorted(revision_work_list
, reverse
=True)
843 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
846 return revision_work_list
848 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
):
849 """Parses the DEPS file to determine WebKit/v8/etc... versions.
852 A dict in the format {depot:revision} if successful, otherwise None.
856 self
.ChangeToDepotWorkingDirectory(depot
)
860 if depot
== 'chromium':
861 locals = {'Var': lambda _
: locals["vars"][_
],
862 'From': lambda *args
: None}
863 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, locals)
867 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
869 for d
in DEPOT_NAMES
:
870 if DEPOT_DEPS_NAME
[d
]['recurse'] and\
871 DEPOT_DEPS_NAME
[d
]['from'] == depot
:
872 if locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']):
873 re_results
= rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src']])
876 results
[d
] = re_results
.group('revision')
881 elif depot
== 'cros':
882 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
883 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
884 CROS_CHROMEOS_PATTERN
]
885 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
887 assert not return_code
, 'An error occurred while running'\
888 ' "%s"' % ' '.join(cmd
)
890 if len(output
) > CROS_CHROMEOS_PATTERN
:
891 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
894 output
= output
.split('_')[0]
897 contents
= output
.split('.')
899 version
= contents
[2]
901 if contents
[3] != '0':
902 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' %\
903 (version
, contents
[3], version
)
904 if not warningText
in self
.warnings
:
905 self
.warnings
.append(warningText
)
908 self
.ChangeToDepotWorkingDirectory('chromium')
909 return_code
= CheckRunGit(['log', '-1', '--format=%H',
910 '--author=chrome-release@google.com', '--grep=to %s' % version
,
914 results
['chromium'] = output
.strip()
918 def BuildCurrentRevision(self
, depot
):
919 """Builds chrome and performance_ui_tests on the current revision.
922 True if the build was successful.
924 if self
.opts
.debug_ignore_build
:
928 os
.chdir(self
.src_cwd
)
930 build_success
= self
.builder
.Build(depot
, self
.opts
)
936 def RunGClientHooks(self
):
937 """Runs gclient with runhooks command.
940 True if gclient reports no errors.
943 if self
.opts
.debug_ignore_build
:
946 return not bisect_utils
.RunGClient(['runhooks'])
948 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
949 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
952 metric: The metric as a list of [<trace>, <value>] strings.
953 text: The text to parse the metric values from.
956 A list of floating point numbers found.
958 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
960 text_lines
= text
.split('\n')
963 for current_line
in text_lines
:
964 if metric_formatted
in current_line
:
965 current_line
= current_line
[len(metric_formatted
):]
968 histogram_values
= eval(current_line
)
970 for b
in histogram_values
['buckets']:
971 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
972 # Extends the list with N-elements with the average for that bucket.
973 values_list
.extend([average_for_bucket
] * b
['count'])
979 def TryParseResultValuesFromOutput(self
, metric
, text
):
980 """Attempts to parse a metric in the format RESULT <graph: <trace>.
983 metric: The metric as a list of [<trace>, <value>] strings.
984 text: The text to parse the metric values from.
987 A list of floating point numbers found.
989 # Format is: RESULT <graph>: <trace>= <value> <units>
990 metric_formatted
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
992 text_lines
= text
.split('\n')
995 for current_line
in text_lines
:
996 # Parse the output from the performance test for the metric we're
998 metric_re
= metric_formatted
+\
999 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1000 metric_re
= re
.compile(metric_re
)
1001 regex_results
= metric_re
.search(current_line
)
1003 if not regex_results
is None:
1004 values_list
+= [regex_results
.group('values')]
1006 metric_re
= metric_formatted
+\
1007 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1008 metric_re
= re
.compile(metric_re
)
1009 regex_results
= metric_re
.search(current_line
)
1011 if not regex_results
is None:
1012 metric_values
= regex_results
.group('values')
1014 values_list
+= metric_values
.split(',')
1016 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
1018 # If the metric is times/t, we need to sum the timings in order to get
1019 # similar regression results as the try-bots.
1021 if metric
== ['times', 't']:
1023 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
1027 def ParseMetricValuesFromOutput(self
, metric
, text
):
1028 """Parses output from performance_ui_tests and retrieves the results for
1032 metric: The metric as a list of [<trace>, <value>] strings.
1033 text: The text to parse the metric values from.
1036 A list of floating point numbers found.
1038 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
1040 if not metric_values
:
1041 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
1043 return metric_values
1045 def RunPerformanceTestAndParseResults(self
, command_to_run
, metric
):
1046 """Runs a performance test on the current revision by executing the
1047 'command_to_run' and parses the results.
1050 command_to_run: The command to be run to execute the performance test.
1051 metric: The metric to parse out from the results of the performance test.
1054 On success, it will return a tuple of the average value of the metric,
1055 and a success code of 0.
1058 if self
.opts
.debug_ignore_perf_test
:
1059 return ({'mean': 0.0, 'std_dev': 0.0}, 0)
1062 command_to_run
= command_to_run
.replace('/', r
'\\')
1064 args
= shlex
.split(command_to_run
)
1066 # If running a telemetry test for cros, insert the remote ip, and
1067 # identity parameters.
1068 if self
.opts
.target_platform
== 'cros':
1069 if 'tools/perf/run_' in args
[0]:
1070 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
1071 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
1074 os
.chdir(self
.src_cwd
)
1076 start_time
= time
.time()
1079 for i
in xrange(self
.opts
.repeat_test_count
):
1080 # Can ignore the return code since if the tests fail, it won't return 0.
1082 (output
, return_code
) = RunProcessAndRetrieveOutput(args
)
1084 if e
.errno
== errno
.ENOENT
:
1085 err_text
= ("Something went wrong running the performance test. "
1086 "Please review the command line:\n\n")
1087 if 'src/' in ' '.join(args
):
1088 err_text
+= ("Check that you haven't accidentally specified a path "
1089 "with src/ in the command.\n\n")
1090 err_text
+= ' '.join(args
)
1093 return (err_text
, -1)
1096 if self
.opts
.output_buildbot_annotations
:
1099 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
1101 elapsed_minutes
= (time
.time() - start_time
) / 60.0
1103 if elapsed_minutes
>= self
.opts
.repeat_test_max_time
or not metric_values
:
1108 # Need to get the average value if there were multiple values.
1110 truncated_mean
= CalculateTruncatedMean(metric_values
,
1111 self
.opts
.truncate_percent
)
1112 standard_dev
= CalculateStandardDeviation(metric_values
)
1115 'mean': truncated_mean
,
1116 'std_dev': standard_dev
,
1119 print 'Results of performance test: %12f %12f' % (
1120 truncated_mean
, standard_dev
)
1124 return ('Invalid metric specified, or no values returned from '
1125 'performance test.', -1)
1127 def FindAllRevisionsToSync(self
, revision
, depot
):
1128 """Finds all dependant revisions and depots that need to be synced for a
1129 given revision. This is only useful in the git workflow, as an svn depot
1130 may be split into multiple mirrors.
1132 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1133 skia/include. To sync skia/src properly, one has to find the proper
1134 revisions in skia/gyp and skia/include.
1137 revision: The revision to sync to.
1138 depot: The depot in use at the moment (probably skia).
1141 A list of [depot, revision] pairs that need to be synced.
1143 revisions_to_sync
= [[depot
, revision
]]
1145 is_base
= (depot
== 'chromium') or (depot
== 'cros')
1147 # Some SVN depots were split into multiple git depots, so we need to
1148 # figure out for each mirror which git revision to grab. There's no
1149 # guarantee that the SVN revision will exist for each of the dependant
1150 # depots, so we have to grep the git logs and grab the next earlier one.
1152 DEPOT_DEPS_NAME
[depot
]['depends'] and\
1153 self
.source_control
.IsGit():
1154 svn_rev
= self
.source_control
.SVNFindRev(revision
)
1156 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
1157 self
.ChangeToDepotWorkingDirectory(d
)
1159 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
1162 revisions_to_sync
.append([d
, dependant_rev
])
1164 num_resolved
= len(revisions_to_sync
)
1165 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
1167 self
.ChangeToDepotWorkingDirectory(depot
)
1169 if not ((num_resolved
- 1) == num_needed
):
1172 return revisions_to_sync
1174 def PerformPreBuildCleanup(self
):
1175 """Performs necessary cleanup between runs."""
1176 print 'Cleaning up between runs.'
1179 # Having these pyc files around between runs can confuse the
1180 # perf tests and cause them to crash.
1181 for (path
, dir, files
) in os
.walk(self
.src_cwd
):
1182 for cur_file
in files
:
1183 if cur_file
.endswith('.pyc'):
1184 path_to_file
= os
.path
.join(path
, cur_file
)
1185 os
.remove(path_to_file
)
1187 def PerformWebkitDirectoryCleanup(self
, revision
):
1188 """If the script is switching between Blink and WebKit during bisect,
1189 its faster to just delete the directory rather than leave it up to git
1195 if not self
.source_control
.CheckoutFileAtRevision(
1196 bisect_utils
.FILE_DEPS_GIT
, revision
):
1200 os
.chdir(self
.src_cwd
)
1202 is_blink
= bisect_utils
.IsDepsFileBlink()
1206 if not self
.source_control
.RevertFileToHead(
1207 bisect_utils
.FILE_DEPS_GIT
):
1210 if self
.was_blink
!= is_blink
:
1211 self
.was_blink
= is_blink
1212 return bisect_utils
.RemoveThirdPartyWebkitDirectory()
1215 def PerformCrosChrootCleanup(self
):
1216 """Deletes the chroot.
1222 self
.ChangeToDepotWorkingDirectory('cros')
1223 cmd
= [CROS_SDK_PATH
, '--delete']
1224 return_code
= RunProcess(cmd
)
1226 return not return_code
1228 def CreateCrosChroot(self
):
1229 """Creates a new chroot.
1235 self
.ChangeToDepotWorkingDirectory('cros')
1236 cmd
= [CROS_SDK_PATH
, '--create']
1237 return_code
= RunProcess(cmd
)
1239 return not return_code
1241 def PerformPreSyncCleanup(self
, revision
, depot
):
1242 """Performs any necessary cleanup before syncing.
1247 if depot
== 'chromium':
1248 return self
.PerformWebkitDirectoryCleanup(revision
)
1249 elif depot
== 'cros':
1250 return self
.PerformCrosChrootCleanup()
1253 def RunPostSync(self
, depot
):
1254 """Performs any work after syncing.
1259 if self
.opts
.target_platform
== 'android':
1261 os
.chdir(os
.path
.join(self
.src_cwd
, '..'))
1262 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
):
1267 return self
.CreateCrosChroot()
1269 return self
.RunGClientHooks()
1272 def ShouldSkipRevision(self
, depot
, revision
):
1273 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1274 is git based those changes would have no effect.
1277 depot: The depot being bisected.
1278 revision: Current revision we're synced to.
1281 True if we should skip building/testing this revision.
1283 if depot
== 'chromium':
1284 if self
.source_control
.IsGit():
1285 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
1286 output
= CheckRunGit(cmd
)
1288 files
= output
.splitlines()
1290 if len(files
) == 1 and files
[0] == 'DEPS':
1295 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
1297 """Performs a full sync/build/run of the specified revision.
1300 revision: The revision to sync to.
1301 depot: The depot that's being used at the moment (src, webkit, etc.)
1302 command_to_run: The command to execute the performance test.
1303 metric: The performance metric being tested.
1306 On success, a tuple containing the results of the performance test.
1307 Otherwise, a tuple with the error message.
1310 if depot
== 'chromium':
1311 sync_client
= 'gclient'
1312 elif depot
== 'cros':
1313 sync_client
= 'repo'
1315 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
1317 if not revisions_to_sync
:
1318 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
1320 if not self
.PerformPreSyncCleanup(revision
, depot
):
1321 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
1325 if not self
.opts
.debug_ignore_sync
:
1326 for r
in revisions_to_sync
:
1327 self
.ChangeToDepotWorkingDirectory(r
[0])
1330 self
.PerformPreBuildCleanup()
1332 if not self
.source_control
.SyncToRevision(r
[1], sync_client
):
1338 success
= self
.RunPostSync(depot
)
1341 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
1342 return ('Skipped revision: [%s]' % str(revision
),
1343 BUILD_RESULT_SKIPPED
)
1345 if self
.BuildCurrentRevision(depot
):
1346 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
1349 if results
[1] == 0 and sync_client
:
1350 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
1353 if external_revisions
:
1354 return (results
[0], results
[1], external_revisions
)
1356 return ('Failed to parse DEPS file for external revisions.',
1361 return ('Failed to build revision: [%s]' % (str(revision
, )),
1364 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
1366 return ('Failed to sync revision: [%s]' % (str(revision
, )),
1369 def CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
1370 """Given known good and bad values, decide if the current_value passed
1374 current_value: The value of the metric being checked.
1375 known_bad_value: The reference value for a "failed" run.
1376 known_good_value: The reference value for a "passed" run.
1379 True if the current_value is closer to the known_good_value than the
1382 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
1383 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
1385 return dist_to_good_value
< dist_to_bad_value
1387 def ChangeToDepotWorkingDirectory(self
, depot_name
):
1388 """Given a depot, changes to the appropriate working directory.
1391 depot_name: The name of the depot (see DEPOT_NAMES).
1393 if depot_name
== 'chromium':
1394 os
.chdir(self
.src_cwd
)
1395 elif depot_name
== 'cros':
1396 os
.chdir(self
.cros_cwd
)
1397 elif depot_name
in DEPOT_NAMES
:
1398 os
.chdir(self
.depot_cwd
[depot_name
])
1400 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1401 ' was added without proper support?' %\
1404 def PrepareToBisectOnDepot(self
,
1410 """Changes to the appropriate directory and gathers a list of revisions
1411 to bisect between |start_revision| and |end_revision|.
1414 current_depot: The depot we want to bisect.
1415 end_revision: End of the revision range.
1416 start_revision: Start of the revision range.
1417 previous_depot: The depot we were previously bisecting.
1418 previous_revision: The last revision we synced to on |previous_depot|.
1421 A list containing the revisions between |start_revision| and
1422 |end_revision| inclusive.
1424 # Change into working directory of external library to run
1425 # subsequent commands.
1426 old_cwd
= os
.getcwd()
1427 os
.chdir(self
.depot_cwd
[current_depot
])
1429 # V8 (and possibly others) is merged in periodically. Bisecting
1430 # this directory directly won't give much good info.
1431 if DEPOT_DEPS_NAME
[current_depot
].has_key('build_with'):
1432 if (DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps') and
1433 previous_depot
== 'chromium'):
1434 config_path
= os
.path
.join(self
.src_cwd
, '..')
1435 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
1436 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
1438 if bisect_utils
.RunGClient(
1439 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
1442 new_depot
= DEPOT_DEPS_NAME
[current_depot
]['build_with']
1444 svn_start_revision
= self
.source_control
.SVNFindRev(start_revision
)
1445 svn_end_revision
= self
.source_control
.SVNFindRev(end_revision
)
1446 os
.chdir(self
.depot_cwd
[new_depot
])
1448 start_revision
= self
.source_control
.ResolveToRevision(
1449 svn_start_revision
, new_depot
, -1000)
1450 end_revision
= self
.source_control
.ResolveToRevision(
1451 svn_end_revision
, new_depot
, -1000)
1453 old_name
= DEPOT_DEPS_NAME
[current_depot
]['src'][4:]
1454 new_name
= DEPOT_DEPS_NAME
[new_depot
]['src'][4:]
1456 os
.chdir(self
.src_cwd
)
1458 shutil
.move(old_name
, old_name
+ '.bak')
1459 shutil
.move(new_name
, old_name
)
1460 os
.chdir(self
.depot_cwd
[current_depot
])
1462 self
.cleanup_commands
.append(['mv', old_name
, new_name
])
1463 self
.cleanup_commands
.append(['mv', old_name
+ '.bak', old_name
])
1465 os
.chdir(self
.depot_cwd
[current_depot
])
1467 depot_revision_list
= self
.GetRevisionList(current_depot
,
1473 return depot_revision_list
1475 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
1476 """Gathers reference values by running the performance tests on the
1477 known good and bad revisions.
1480 good_rev: The last known good revision where the performance regression
1481 has not occurred yet.
1482 bad_rev: A revision where the performance regression has already occurred.
1483 cmd: The command to execute the performance test.
1484 metric: The metric being tested for regression.
1487 A tuple with the results of building and running each revision.
1489 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
1494 good_run_results
= None
1496 if not bad_run_results
[1]:
1497 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
1502 return (bad_run_results
, good_run_results
)
1504 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
1505 """Adds new revisions to the revision_data dict and initializes them.
1508 revisions: List of revisions to add.
1509 depot: Depot that's currently in use (src, webkit, etc...)
1510 sort: Sorting key for displaying revisions.
1511 revision_data: A dict to add the new revisions into. Existing revisions
1512 will have their sort keys offset.
1515 num_depot_revisions
= len(revisions
)
1517 for k
, v
in revision_data
.iteritems():
1518 if v
['sort'] > sort
:
1519 v
['sort'] += num_depot_revisions
1521 for i
in xrange(num_depot_revisions
):
1524 revision_data
[r
] = {'revision' : r
,
1528 'sort' : i
+ sort
+ 1}
1530 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
1531 if self
.opts
.output_buildbot_annotations
:
1532 step_name
= 'Bisection Range: [%s - %s]' % (
1533 revision_list
[len(revision_list
)-1], revision_list
[0])
1534 bisect_utils
.OutputAnnotationStepStart(step_name
)
1537 print 'Revisions to bisect on [%s]:' % depot
1538 for revision_id
in revision_list
:
1539 print ' -> %s' % (revision_id
, )
1542 if self
.opts
.output_buildbot_annotations
:
1543 bisect_utils
.OutputAnnotationStepClosed()
1545 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
1546 """Checks to see if changes to DEPS file occurred, and that the revision
1547 range also includes the change to .DEPS.git. If it doesn't, attempts to
1548 expand the revision range to include it.
1551 bad_rev: First known bad revision.
1552 good_revision: Last known good revision.
1555 A tuple with the new bad and good revisions.
1557 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
1558 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
1559 'DEPS', good_revision
, bad_revision
)
1562 # DEPS file was changed, search from the oldest change to DEPS file to
1563 # bad_revision to see if there are matching .DEPS.git changes.
1564 oldest_deps_change
= changes_to_deps
[-1]
1565 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
1566 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
1568 if len(changes_to_deps
) != len(changes_to_gitdeps
):
1569 # Grab the timestamp of the last DEPS change
1570 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
1571 output
= CheckRunGit(cmd
)
1572 commit_time
= int(output
)
1574 # Try looking for a commit that touches the .DEPS.git file in the
1575 # next 15 minutes after the DEPS file change.
1576 cmd
= ['log', '--format=%H', '-1',
1577 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
1578 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
1579 output
= CheckRunGit(cmd
)
1580 output
= output
.strip()
1582 self
.warnings
.append('Detected change to DEPS and modified '
1583 'revision range to include change to .DEPS.git')
1584 return (output
, good_revision
)
1586 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
1587 'matching change to .DEPS.git')
1588 return (bad_revision
, good_revision
)
1590 def CheckIfRevisionsInProperOrder(self
,
1594 """Checks that |good_revision| is an earlier revision than |bad_revision|.
1597 good_revision: Number/tag of the known good revision.
1598 bad_revision: Number/tag of the known bad revision.
1601 True if the revisions are in the proper order (good earlier than bad).
1603 if self
.source_control
.IsGit() and target_depot
!= 'cros':
1604 cmd
= ['log', '--format=%ct', '-1', good_revision
]
1605 output
= CheckRunGit(cmd
)
1606 good_commit_time
= int(output
)
1608 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
1609 output
= CheckRunGit(cmd
)
1610 bad_commit_time
= int(output
)
1612 return good_commit_time
<= bad_commit_time
1614 # Cros/svn use integers
1615 return int(good_revision
) <= int(bad_revision
)
1617 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
1618 """Given known good and bad revisions, run a binary search on all
1619 intermediate revisions to determine the CL where the performance regression
1623 command_to_run: Specify the command to execute the performance test.
1624 good_revision: Number/tag of the known good revision.
1625 bad_revision: Number/tag of the known bad revision.
1626 metric: The performance metric to monitor.
1629 A dict with 2 members, 'revision_data' and 'error'. On success,
1630 'revision_data' will contain a dict mapping revision ids to
1631 data about that revision. Each piece of revision data consists of a
1632 dict with the following keys:
1634 'passed': Represents whether the performance test was successful at
1635 that revision. Possible values include: 1 (passed), 0 (failed),
1636 '?' (skipped), 'F' (build failed).
1637 'depot': The depot that this revision is from (ie. WebKit)
1638 'external': If the revision is a 'src' revision, 'external' contains
1639 the revisions of each of the external libraries.
1640 'sort': A sort value for sorting the dict in order of commits.
1657 If an error occurred, the 'error' field will contain the message and
1658 'revision_data' will be empty.
1661 results
= {'revision_data' : {},
1664 # Choose depot to bisect first
1665 target_depot
= 'chromium'
1666 if self
.opts
.target_platform
== 'cros':
1667 target_depot
= 'cros'
1670 self
.ChangeToDepotWorkingDirectory(target_depot
)
1672 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
1673 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
1675 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
1681 if bad_revision
is None:
1682 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
1685 if good_revision
is None:
1686 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
1689 # Check that they didn't accidentally swap good and bad revisions.
1690 if not self
.CheckIfRevisionsInProperOrder(
1691 target_depot
, good_revision
, bad_revision
):
1692 results
['error'] = 'bad_revision < good_revision, did you swap these '\
1696 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
1697 bad_revision
, good_revision
)
1699 if self
.opts
.output_buildbot_annotations
:
1700 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
1702 print 'Gathering revision range for bisection.'
1704 # Retrieve a list of revisions to do bisection on.
1705 src_revision_list
= self
.GetRevisionList(target_depot
,
1709 if self
.opts
.output_buildbot_annotations
:
1710 bisect_utils
.OutputAnnotationStepClosed()
1712 if src_revision_list
:
1713 # revision_data will store information about a revision such as the
1714 # depot it came from, the webkit/V8 revision at that time,
1715 # performance timing, build state, etc...
1716 revision_data
= results
['revision_data']
1718 # revision_list is the list we're binary searching through at the moment.
1723 for current_revision_id
in src_revision_list
:
1726 revision_data
[current_revision_id
] = {'value' : None,
1728 'depot' : target_depot
,
1730 'sort' : sort_key_ids
}
1731 revision_list
.append(current_revision_id
)
1734 max_revision
= len(revision_list
) - 1
1736 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
1738 if self
.opts
.output_buildbot_annotations
:
1739 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
1741 print 'Gathering reference values for bisection.'
1743 # Perform the performance tests on the good and bad revisions, to get
1745 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
1751 if self
.opts
.output_buildbot_annotations
:
1752 bisect_utils
.OutputAnnotationStepClosed()
1755 results
['error'] = bad_results
[0]
1759 results
['error'] = good_results
[0]
1763 # We need these reference values to determine if later runs should be
1764 # classified as pass or fail.
1765 known_bad_value
= bad_results
[0]
1766 known_good_value
= good_results
[0]
1768 # Can just mark the good and bad revisions explicitly here since we
1769 # already know the results.
1770 bad_revision_data
= revision_data
[revision_list
[0]]
1771 bad_revision_data
['external'] = bad_results
[2]
1772 bad_revision_data
['passed'] = 0
1773 bad_revision_data
['value'] = known_bad_value
1775 good_revision_data
= revision_data
[revision_list
[max_revision
]]
1776 good_revision_data
['external'] = good_results
[2]
1777 good_revision_data
['passed'] = 1
1778 good_revision_data
['value'] = known_good_value
1780 next_revision_depot
= target_depot
1783 if not revision_list
:
1786 min_revision_data
= revision_data
[revision_list
[min_revision
]]
1787 max_revision_data
= revision_data
[revision_list
[max_revision
]]
1789 if max_revision
- min_revision
<= 1:
1790 if min_revision_data
['passed'] == '?':
1791 next_revision_index
= min_revision
1792 elif max_revision_data
['passed'] == '?':
1793 next_revision_index
= max_revision
1794 elif min_revision_data
['depot'] == 'chromium' or\
1795 min_revision_data
['depot'] == 'cros':
1796 # If there were changes to any of the external libraries we track,
1797 # should bisect the changes there as well.
1798 external_depot
= None
1800 for current_depot
in DEPOT_NAMES
:
1801 if DEPOT_DEPS_NAME
[current_depot
]["recurse"] and\
1802 DEPOT_DEPS_NAME
[current_depot
]['from'] ==\
1803 min_revision_data
['depot']:
1804 if min_revision_data
['external'][current_depot
] !=\
1805 max_revision_data
['external'][current_depot
]:
1806 external_depot
= current_depot
1809 # If there was no change in any of the external depots, the search
1811 if not external_depot
:
1814 previous_revision
= revision_list
[min_revision
]
1816 earliest_revision
= max_revision_data
['external'][external_depot
]
1817 latest_revision
= min_revision_data
['external'][external_depot
]
1819 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
1822 next_revision_depot
,
1825 if not new_revision_list
:
1826 results
['error'] = 'An error occurred attempting to retrieve'\
1827 ' revision range: [%s..%s]' %\
1828 (depot_rev_range
[1], depot_rev_range
[0])
1831 self
.AddRevisionsIntoRevisionData(new_revision_list
,
1833 min_revision_data
['sort'],
1836 # Reset the bisection and perform it on the newly inserted
1838 revision_list
= new_revision_list
1840 max_revision
= len(revision_list
) - 1
1841 sort_key_ids
+= len(revision_list
)
1843 print 'Regression in metric:%s appears to be the result of changes'\
1844 ' in [%s].' % (metric
, external_depot
)
1846 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
1852 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
1855 next_revision_id
= revision_list
[next_revision_index
]
1856 next_revision_data
= revision_data
[next_revision_id
]
1857 next_revision_depot
= next_revision_data
['depot']
1859 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
1861 if self
.opts
.output_buildbot_annotations
:
1862 step_name
= 'Working on [%s]' % next_revision_id
1863 bisect_utils
.OutputAnnotationStepStart(step_name
)
1865 print 'Working on revision: [%s]' % next_revision_id
1867 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
1868 next_revision_depot
,
1870 metric
, skippable
=True)
1872 # If the build is successful, check whether or not the metric
1874 if not run_results
[1]:
1875 if len(run_results
) > 2:
1876 next_revision_data
['external'] = run_results
[2]
1878 passed_regression
= self
.CheckIfRunPassed(run_results
[0],
1882 next_revision_data
['passed'] = passed_regression
1883 next_revision_data
['value'] = run_results
[0]
1885 if passed_regression
:
1886 max_revision
= next_revision_index
1888 min_revision
= next_revision_index
1890 if run_results
[1] == BUILD_RESULT_SKIPPED
:
1891 next_revision_data
['passed'] = 'Skipped'
1892 elif run_results
[1] == BUILD_RESULT_FAIL
:
1893 next_revision_data
['passed'] = 'Failed'
1895 print run_results
[0]
1897 # If the build is broken, remove it and redo search.
1898 revision_list
.pop(next_revision_index
)
1902 if self
.opts
.output_buildbot_annotations
:
1903 bisect_utils
.OutputAnnotationStepClosed()
1905 # Weren't able to sync and retrieve the revision range.
1906 results
['error'] = 'An error occurred attempting to retrieve revision '\
1907 'range: [%s..%s]' % (good_revision
, bad_revision
)
1911 def FormatAndPrintResults(self
, bisect_results
):
1912 """Prints the results from a bisection run in a readable format.
1915 bisect_results: The results from a bisection test run.
1917 revision_data
= bisect_results
['revision_data']
1918 revision_data_sorted
= sorted(revision_data
.iteritems(),
1919 key
= lambda x
: x
[1]['sort'])
1921 if self
.opts
.output_buildbot_annotations
:
1922 bisect_utils
.OutputAnnotationStepStart('Results')
1925 print 'Full results of bisection:'
1926 for current_id
, current_data
in revision_data_sorted
:
1927 build_status
= current_data
['passed']
1929 if type(build_status
) is bool:
1930 build_status
= int(build_status
)
1932 print ' %8s %40s %s' % (current_data
['depot'],
1933 current_id
, build_status
)
1937 print 'Tested commits:'
1938 for current_id
, current_data
in revision_data_sorted
:
1939 if current_data
['value']:
1940 print ' %8s %40s %12f %12f' % (
1941 current_data
['depot'], current_id
,
1942 current_data
['value']['mean'], current_data
['value']['std_dev'])
1945 # Find range where it possibly broke.
1946 first_working_revision
= None
1947 last_broken_revision
= None
1948 last_broken_revision_index
= -1
1950 for i
in xrange(len(revision_data_sorted
)):
1951 k
, v
= revision_data_sorted
[i
]
1952 if v
['passed'] == 1:
1953 if not first_working_revision
:
1954 first_working_revision
= k
1957 last_broken_revision
= k
1958 last_broken_revision_index
= i
1960 if last_broken_revision
!= None and first_working_revision
!= None:
1961 print 'Results: Regression may have occurred in range:'
1962 print ' -> First Bad Revision: [%40s] [%s]' %\
1963 (last_broken_revision
,
1964 revision_data
[last_broken_revision
]['depot'])
1965 print ' -> Last Good Revision: [%40s] [%s]' %\
1966 (first_working_revision
,
1967 revision_data
[first_working_revision
]['depot'])
1970 self
.ChangeToDepotWorkingDirectory(
1971 revision_data
[last_broken_revision
]['depot'])
1973 if revision_data
[last_broken_revision
]['depot'] == 'cros':
1974 # Want to get a list of all the commits and what depots they belong
1975 # to so that we can grab info about each.
1976 cmd
= ['repo', 'forall', '-c',
1977 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
1978 last_broken_revision
, first_working_revision
+ 1)]
1979 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1983 assert not return_code
, 'An error occurred while running'\
1984 ' "%s"' % ' '.join(cmd
)
1988 for l
in output
.split('\n'):
1990 # Output will be in form:
1992 # /path_to_other_depot
2000 contents
= l
.split(' ')
2001 if len(contents
) > 1:
2002 changes
.append([last_depot
, contents
[0]])
2007 info
= self
.source_control
.QueryRevisionInfo(c
[1])
2010 print 'Commit : %s' % c
[1]
2011 print 'Author : %s' % info
['author']
2012 print 'Email : %s' % info
['email']
2013 print 'Date : %s' % info
['date']
2014 print 'Subject : %s' % info
['subject']
2017 multiple_commits
= 0
2018 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
2019 k
, v
= revision_data_sorted
[i
]
2020 if k
== first_working_revision
:
2023 self
.ChangeToDepotWorkingDirectory(v
['depot'])
2025 info
= self
.source_control
.QueryRevisionInfo(k
)
2028 print 'Commit : %s' % k
2029 print 'Author : %s' % info
['author']
2030 print 'Email : %s' % info
['email']
2031 print 'Date : %s' % info
['date']
2032 print 'Subject : %s' % info
['subject']
2034 multiple_commits
+= 1
2035 if multiple_commits
> 1:
2036 self
.warnings
.append('Due to build errors, regression range could'
2037 ' not be narrowed down to a single commit.')
2041 # Give a warning if the values were very close together
2042 good_std_dev
= revision_data
[first_working_revision
]['value']['std_dev']
2043 good_mean
= revision_data
[first_working_revision
]['value']['mean']
2044 bad_mean
= revision_data
[last_broken_revision
]['value']['mean']
2046 # A standard deviation of 0 could indicate either insufficient runs
2047 # or a test that consistently returns the same value.
2048 if good_std_dev
> 0:
2049 deviations
= math
.fabs(bad_mean
- good_mean
) / good_std_dev
2051 if deviations
< 1.5:
2052 self
.warnings
.append('Regression was less than 1.5 standard '
2053 'deviations from "good" value. Results may not be accurate.')
2054 elif self
.opts
.repeat_test_count
== 1:
2055 self
.warnings
.append('Tests were only set to run once. This '
2056 'may be insufficient to get meaningful results.')
2058 # Check for any other possible regression ranges
2059 prev_revision_data
= revision_data_sorted
[0][1]
2060 prev_revision_id
= revision_data_sorted
[0][0]
2061 possible_regressions
= []
2062 for current_id
, current_data
in revision_data_sorted
:
2063 if current_data
['value']:
2064 prev_mean
= prev_revision_data
['value']['mean']
2065 cur_mean
= current_data
['value']['mean']
2068 deviations
= math
.fabs(prev_mean
- cur_mean
) / good_std_dev
2073 percent_change
= (prev_mean
- cur_mean
) / good_mean
2075 # If the "good" valuse are supposed to be higher than the "bad"
2076 # values (ie. scores), flip the sign of the percent change so that
2077 # a positive value always represents a regression.
2078 if bad_mean
< good_mean
:
2079 percent_change
*= -1.0
2081 percent_change
= None
2083 if deviations
>= 1.5 or percent_change
> 0.01:
2084 if current_id
!= first_working_revision
:
2085 possible_regressions
.append(
2086 [current_id
, prev_revision_id
, percent_change
, deviations
])
2087 prev_revision_data
= current_data
2088 prev_revision_id
= current_id
2090 if possible_regressions
:
2092 print 'Other regressions may have occurred:'
2094 for p
in possible_regressions
:
2096 percent_change
= p
[2]
2098 current_data
= revision_data
[current_id
]
2100 previous_data
= revision_data
[previous_id
]
2102 if deviations
is None:
2105 deviations
= '%.2f' % deviations
2107 if percent_change
is None:
2110 print ' %8s %s [%.2f%%, %s x std.dev]' % (
2111 previous_data
['depot'], previous_id
, 100 * percent_change
,
2114 current_data
['depot'], current_id
)
2119 print 'The following warnings were generated:'
2121 for w
in self
.warnings
:
2125 if self
.opts
.output_buildbot_annotations
:
2126 bisect_utils
.OutputAnnotationStepClosed()
2129 def DetermineAndCreateSourceControl(opts
):
2130 """Attempts to determine the underlying source control workflow and returns
2131 a SourceControl object.
2134 An instance of a SourceControl object, or None if the current workflow
2138 (output
, return_code
) = RunGit(['rev-parse', '--is-inside-work-tree'])
2140 if output
.strip() == 'true':
2141 return GitSourceControl(opts
)
2146 def SetNinjaBuildSystemDefault():
2147 """Makes ninja the default build system to be used by
2148 the bisection script."""
2149 gyp_var
= os
.getenv('GYP_GENERATORS')
2151 if not gyp_var
or not 'ninja' in gyp_var
:
2153 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
2155 os
.environ
['GYP_GENERATORS'] = 'ninja'
2158 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
2159 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
2160 'chromium_win_pch=0'
2163 def SetMakeBuildSystemDefault():
2164 """Makes make the default build system to be used by
2165 the bisection script."""
2166 os
.environ
['GYP_GENERATORS'] = 'make'
2169 def CheckPlatformSupported(opts
):
2170 """Checks that this platform and build system are supported.
2173 opts: The options parsed from the command line.
2176 True if the platform and build system are supported.
2178 # Haven't tested the script out on any other platforms yet.
2179 supported
= ['posix', 'nt']
2180 if not os
.name
in supported
:
2181 print "Sorry, this platform isn't supported yet."
2186 if not opts
.build_preference
:
2187 opts
.build_preference
= 'msvs'
2189 if opts
.build_preference
== 'msvs':
2190 if not os
.getenv('VS100COMNTOOLS'):
2191 print 'Error: Path to visual studio could not be determined.'
2194 elif opts
.build_preference
== 'ninja':
2195 SetNinjaBuildSystemDefault()
2197 assert False, 'Error: %s build not supported' % opts
.build_preference
2199 if not opts
.build_preference
:
2200 if 'ninja' in os
.getenv('GYP_GENERATORS'):
2201 opts
.build_preference
= 'ninja'
2203 opts
.build_preference
= 'make'
2205 if opts
.build_preference
== 'ninja':
2206 SetNinjaBuildSystemDefault()
2207 elif opts
.build_preference
== 'make':
2208 SetMakeBuildSystemDefault()
2209 elif opts
.build_preference
!= 'make':
2210 assert False, 'Error: %s build not supported' % opts
.build_preference
2212 bisect_utils
.RunGClient(['runhooks'])
2217 def RmTreeAndMkDir(path_to_dir
):
2218 """Removes the directory tree specified, and then creates an empty
2219 directory in the same location.
2222 path_to_dir: Path to the directory tree.
2225 True if successful, False if an error occurred.
2228 if os
.path
.exists(path_to_dir
):
2229 shutil
.rmtree(path_to_dir
)
2231 if e
.errno
!= errno
.ENOENT
:
2235 os
.makedirs(path_to_dir
)
2237 if e
.errno
!= errno
.EEXIST
:
2243 def RemoveBuildFiles():
2244 """Removes build files from previous runs."""
2245 if RmTreeAndMkDir(os
.path
.join('out', 'Release')):
2246 if RmTreeAndMkDir(os
.path
.join('build', 'Release')):
2253 usage
= ('%prog [options] [-- chromium-options]\n'
2254 'Perform binary search on revision history to find a minimal '
2255 'range of revisions where a peformance metric regressed.\n')
2257 parser
= optparse
.OptionParser(usage
=usage
)
2259 parser
.add_option('-c', '--command',
2261 help='A command to execute your performance test at' +
2262 ' each point in the bisection.')
2263 parser
.add_option('-b', '--bad_revision',
2265 help='A bad revision to start bisection. ' +
2266 'Must be later than good revision. May be either a git' +
2267 ' or svn revision.')
2268 parser
.add_option('-g', '--good_revision',
2270 help='A revision to start bisection where performance' +
2271 ' test is known to pass. Must be earlier than the ' +
2272 'bad revision. May be either a git or svn revision.')
2273 parser
.add_option('-m', '--metric',
2275 help='The desired metric to bisect on. For example ' +
2276 '"vm_rss_final_b/vm_rss_f_b"')
2277 parser
.add_option('-w', '--working_directory',
2279 help='Path to the working directory where the script will '
2280 'do an initial checkout of the chromium depot. The '
2281 'files will be placed in a subdirectory "bisect" under '
2282 'working_directory and that will be used to perform the '
2283 'bisection. This parameter is optional, if it is not '
2284 'supplied, the script will work from the current depot.')
2285 parser
.add_option('-r', '--repeat_test_count',
2288 help='The number of times to repeat the performance test. '
2289 'Values will be clamped to range [1, 100]. '
2290 'Default value is 20.')
2291 parser
.add_option('--repeat_test_max_time',
2294 help='The maximum time (in minutes) to take running the '
2295 'performance tests. The script will run the performance '
2296 'tests according to --repeat_test_count, so long as it '
2297 'doesn\'t exceed --repeat_test_max_time. Values will be '
2298 'clamped to range [1, 60].'
2299 'Default value is 20.')
2300 parser
.add_option('-t', '--truncate_percent',
2303 help='The highest/lowest % are discarded to form a '
2304 'truncated mean. Values will be clamped to range [0, 25]. '
2305 'Default value is 25 (highest/lowest 25% will be '
2307 parser
.add_option('--build_preference',
2309 choices
=['msvs', 'ninja', 'make'],
2310 help='The preferred build system to use. On linux/mac '
2311 'the options are make/ninja. On Windows, the options '
2313 parser
.add_option('--target_platform',
2315 choices
=['chromium', 'cros', 'android'],
2317 help='The target platform. Choices are "chromium" (current '
2318 'platform), "cros", or "android". If you specify something '
2319 'other than "chromium", you must be properly set up to '
2320 'build that platform.')
2321 parser
.add_option('--cros_board',
2323 help='The cros board type to build.')
2324 parser
.add_option('--cros_remote_ip',
2326 help='The remote machine to image to.')
2327 parser
.add_option('--use_goma',
2328 action
="store_true",
2329 help='Add a bunch of extra threads for goma.')
2330 parser
.add_option('--output_buildbot_annotations',
2331 action
="store_true",
2332 help='Add extra annotation output for buildbot.')
2333 parser
.add_option('--debug_ignore_build',
2334 action
="store_true",
2335 help='DEBUG: Don\'t perform builds.')
2336 parser
.add_option('--debug_ignore_sync',
2337 action
="store_true",
2338 help='DEBUG: Don\'t perform syncs.')
2339 parser
.add_option('--debug_ignore_perf_test',
2340 action
="store_true",
2341 help='DEBUG: Don\'t perform performance tests.')
2342 (opts
, args
) = parser
.parse_args()
2344 if not opts
.command
:
2345 print 'Error: missing required parameter: --command'
2350 if not opts
.good_revision
:
2351 print 'Error: missing required parameter: --good_revision'
2356 if not opts
.bad_revision
:
2357 print 'Error: missing required parameter: --bad_revision'
2363 print 'Error: missing required parameter: --metric'
2368 if opts
.target_platform
== 'cros':
2369 # Run sudo up front to make sure credentials are cached for later.
2370 print 'Sudo is required to build cros:'
2372 RunProcess(['sudo', 'true'])
2374 if not opts
.cros_board
:
2375 print 'Error: missing required parameter: --cros_board'
2380 if not opts
.cros_remote_ip
:
2381 print 'Error: missing required parameter: --cros_remote_ip'
2386 if not opts
.working_directory
:
2387 print 'Error: missing required parameter: --working_directory'
2392 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
2393 opts
.repeat_test_max_time
= min(max(opts
.repeat_test_max_time
, 1), 60)
2394 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
2395 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
2397 metric_values
= opts
.metric
.split('/')
2398 if len(metric_values
) != 2:
2399 print "Invalid metric specified: [%s]" % (opts
.metric
,)
2403 if opts
.working_directory
:
2404 if bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
):
2407 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
2408 print 'Error: Failed to set platform environment.'
2412 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
2414 if not RemoveBuildFiles():
2415 print "Something went wrong removing the build files."
2419 if not CheckPlatformSupported(opts
):
2422 # Check what source control method they're using. Only support git workflow
2424 source_control
= DetermineAndCreateSourceControl(opts
)
2426 if not source_control
:
2427 print "Sorry, only the git workflow is supported at the moment."
2431 # gClient sync seems to fail if you're not in master branch.
2432 if not source_control
.IsInProperBranch() and not opts
.debug_ignore_sync
:
2433 print "You must switch to master branch to run bisection."
2437 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
2439 bisect_results
= bisect_test
.Run(opts
.command
,
2443 if not(bisect_results
['error']):
2444 bisect_test
.FormatAndPrintResults(bisect_results
)
2446 bisect_test
.PerformCleanup()
2448 if not(bisect_results
['error']):
2451 print 'Error: ' + bisect_results
['error']
2455 if __name__
== '__main__':