2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
52 sys
.path
.append(os
.path
.join(os
.path
.dirname(__file__
), 'telemetry'))
54 from auto_bisect
import bisect_utils
55 from auto_bisect
import math_utils
56 from auto_bisect
import post_perf_builder_job
as bisect_builder
57 from auto_bisect
import source_control
as source_control_module
58 from auto_bisect
import ttest
59 from telemetry
.util
import cloud_storage
61 # Below is the map of "depot" names to information about each depot. Each depot
62 # is a repository, and in the process of bisecting, revision ranges in these
63 # repositories may also be bisected.
65 # Each depot information dictionary may contain:
66 # src: Path to the working directory.
67 # recurse: True if this repository will get bisected.
68 # depends: A list of other repositories that are actually part of the same
69 # repository in svn. If the repository has any dependent repositories
70 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then
71 # they are specified here.
72 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to
74 # from: Parent depot that must be bisected before this is bisected.
75 # deps_var: Key name in vars variable in DEPS file that has revision
82 'from': ['cros', 'android-chrome'],
84 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
85 'deps_var': 'chromium_rev'
88 'src': 'src/third_party/WebKit',
93 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
94 'deps_var': 'webkit_revision'
97 'src': 'src/third_party/angle',
98 'src_old': 'src/third_party/angle_dx11',
101 'from': ['chromium'],
103 'deps_var': 'angle_revision'
109 'from': ['chromium'],
110 'custom_deps': bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
111 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
112 'deps_var': 'v8_revision'
114 'v8_bleeding_edge': {
115 'src': 'src/v8_bleeding_edge',
118 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
120 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
121 'deps_var': 'v8_revision'
124 'src': 'src/third_party/skia/src',
126 'svn': 'http://skia.googlecode.com/svn/trunk/src',
127 'depends': ['skia/include', 'skia/gyp'],
128 'from': ['chromium'],
129 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
130 'deps_var': 'skia_revision'
133 'src': 'src/third_party/skia/include',
135 'svn': 'http://skia.googlecode.com/svn/trunk/include',
137 'from': ['chromium'],
138 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
142 'src': 'src/third_party/skia/gyp',
144 'svn': 'http://skia.googlecode.com/svn/trunk/gyp',
146 'from': ['chromium'],
147 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
152 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
154 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
155 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
156 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
158 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
159 'mod_for_test_scripts', 'ssh_keys',
162 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision.
163 BUILD_RESULT_SUCCEED
= 0
164 BUILD_RESULT_FAIL
= 1
165 BUILD_RESULT_SKIPPED
= 2
167 # Maximum time in seconds to wait after posting build request to tryserver.
168 # TODO: Change these values based on the actual time taken by buildbots on
170 MAX_MAC_BUILD_TIME
= 14400
171 MAX_WIN_BUILD_TIME
= 14400
172 MAX_LINUX_BUILD_TIME
= 14400
174 # Patch template to add a new file, DEPS.sha under src folder.
175 # This file contains SHA1 value of the DEPS changes made while bisecting
176 # dependency repositories. This patch send along with DEPS patch to tryserver.
177 # When a build requested is posted with a patch, bisect builders on tryserver,
178 # once build is produced, it reads SHA value from this file and appends it
179 # to build archive filename.
180 DEPS_SHA_PATCH
= """diff --git src/DEPS.sha src/DEPS.sha
188 # The possible values of the --bisect_mode flag, which determines what to
189 # use when classifying a revision as "good" or "bad".
190 BISECT_MODE_MEAN
= 'mean'
191 BISECT_MODE_STD_DEV
= 'std_dev'
192 BISECT_MODE_RETURN_CODE
= 'return_code'
194 # The perf dashboard specifically looks for the string
195 # "Estimated Confidence: 95%" to decide whether or not to cc the author(s).
196 # If you change this, please update the perf dashboard as well.
198 ===== BISECT JOB RESULTS =====
201 Test Command: %(command)s
202 Test Metric: %(metrics)s
203 Relative Change: %(change)s
204 Estimated Confidence: %(confidence)d%%"""
206 # The perf dashboard specifically looks for the string
207 # "Author : " to parse out who to cc on a bug. If you change the
208 # formatting here, please update the perf dashboard as well.
209 RESULTS_REVISION_INFO
= """
210 ===== SUSPECTED CL(s) =====
211 Subject : %(subject)s
212 Author : %(author)s%(email_info)s%(commit_info)s
214 Date : %(cl_date)s"""
216 REPRO_STEPS_LOCAL
= """
217 ==== INSTRUCTIONS TO REPRODUCE ====
221 REPRO_STEPS_TRYJOB
= """
222 To reproduce on Performance trybot:
223 1. Create new git branch or check out existing branch.
224 2. Edit tools/run-perf-test.cfg (instructions in file) or \
225 third_party/WebKit/Tools/run-perf-test.cfg.
226 a) Take care to strip any src/ directories from the head of \
228 b) On desktop, only --browser=release is supported, on android \
229 --browser=android-chromium-testshell.
230 c) Test command to use: %(command)s
231 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
232 committed locally to run-perf-test.cfg.
233 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
234 $ git cl upload --bypass-hooks
235 4. Send your try job to the tryserver. \
236 [Please make sure to use appropriate bot to reproduce]
237 $ git cl try -m tryserver.chromium.perf -b <bot>
239 For more details please visit
240 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots"""
242 RESULTS_THANKYOU
= """
243 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
244 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
246 Contact chrome-perf-dashboard-team with any questions or suggestions about
251 . | ---------'-------'-----------.
252 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
253 . \______________.-------._______________)
260 def _AddAdditionalDepotInfo(depot_info
):
261 """Adds additional depot info to the global depot variables."""
262 global DEPOT_DEPS_NAME
264 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() + depot_info
.items())
265 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
268 def ConfidenceScore(good_results_lists
, bad_results_lists
):
269 """Calculates a confidence score.
271 This score is a percentage which represents our degree of confidence in the
272 proposition that the good results and bad results are distinct groups, and
273 their differences aren't due to chance alone.
277 good_results_lists: A list of lists of "good" result numbers.
278 bad_results_lists: A list of lists of "bad" result numbers.
281 A number in the range [0, 100].
283 if not good_results_lists
or not bad_results_lists
:
286 # Flatten the lists of results lists.
287 sample1
= sum(good_results_lists
, [])
288 sample2
= sum(bad_results_lists
, [])
289 if not sample1
or not sample2
:
292 # The p-value is approximately the probability of obtaining the given set
293 # of good and bad values just by chance.
294 _
, _
, p_value
= ttest
.WelchsTTest(sample1
, sample2
)
295 return 100.0 * (1.0 - p_value
)
298 def GetSHA1HexDigest(contents
):
299 """Returns SHA1 hex digest of the given string."""
300 return hashlib
.sha1(contents
).hexdigest()
303 def GetZipFileName(build_revision
=None, target_arch
='ia32', patch_sha
=None):
304 """Gets the archive file name for the given revision."""
306 """Return a string to be used in paths for the platform."""
307 if bisect_utils
.IsWindowsHost():
308 # Build archive for x64 is still stored with 'win32'suffix
309 # (chromium_utils.PlatformName()).
310 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
313 if bisect_utils
.IsLinuxHost():
314 # Android builds too are archived with full-build-linux* prefix.
316 if bisect_utils
.IsMacHost():
318 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
320 base_name
= 'full-build-%s' % PlatformName()
321 if not build_revision
:
324 build_revision
= '%s_%s' % (build_revision
, patch_sha
)
325 return '%s_%s.zip' % (base_name
, build_revision
)
328 def GetRemoteBuildPath(build_revision
, target_platform
='chromium',
329 target_arch
='ia32', patch_sha
=None):
330 """Compute the url to download the build from."""
331 def GetGSRootFolderName(target_platform
):
332 """Gets Google Cloud Storage root folder names"""
333 if bisect_utils
.IsWindowsHost():
334 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
335 return 'Win x64 Builder'
337 if bisect_utils
.IsLinuxHost():
338 if target_platform
== 'android':
339 return 'android_perf_rel'
340 return 'Linux Builder'
341 if bisect_utils
.IsMacHost():
343 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
345 base_filename
= GetZipFileName(
346 build_revision
, target_arch
, patch_sha
)
347 builder_folder
= GetGSRootFolderName(target_platform
)
348 return '%s/%s' % (builder_folder
, base_filename
)
351 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
352 """Fetches file(s) from the Google Cloud Storage.
355 bucket_name: Google Storage bucket name.
356 source_path: Source file path.
357 destination_path: Destination file path.
360 Downloaded file path if exisits, otherwise None.
362 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
364 if cloud_storage
.Exists(bucket_name
, source_path
):
365 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
366 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
367 if os
.path
.exists(target_file
):
370 print ('File gs://%s/%s not found in cloud storage.' % (
371 bucket_name
, source_path
))
372 except Exception as e
:
373 print 'Something went wrong while fetching file from cloud: %s' % e
374 if os
.path
.exists(target_file
):
375 os
.remove(target_file
)
379 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
380 def MaybeMakeDirectory(*path
):
381 """Creates an entire path, if it doesn't already exist."""
382 file_path
= os
.path
.join(*path
)
384 os
.makedirs(file_path
)
386 if e
.errno
!= errno
.EEXIST
:
391 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
392 def ExtractZip(filename
, output_dir
, verbose
=True):
393 """ Extract the zip archive in the output directory."""
394 MaybeMakeDirectory(output_dir
)
396 # On Linux and Mac, we use the unzip command as it will
397 # handle links and file bits (executable), which is much
398 # easier then trying to do that with ZipInfo options.
400 # The Mac Version of unzip unfortunately does not support Zip64, whereas
401 # the python module does, so we have to fallback to the python zip module
402 # on Mac if the filesize is greater than 4GB.
404 # On Windows, try to use 7z if it is installed, otherwise fall back to python
405 # zip module and pray we don't have files larger than 512MB to unzip.
407 if ((bisect_utils
.IsMacHost()
408 and os
.path
.getsize(filename
) < 4 * 1024 * 1024 * 1024)
409 or bisect_utils
.IsLinuxHost()):
410 unzip_cmd
= ['unzip', '-o']
411 elif (bisect_utils
.IsWindowsHost()
412 and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe')):
413 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
416 # Make sure path is absolute before changing directories.
417 filepath
= os
.path
.abspath(filename
)
418 saved_dir
= os
.getcwd()
420 command
= unzip_cmd
+ [filepath
]
421 result
= bisect_utils
.RunProcess(command
)
424 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
426 assert bisect_utils
.IsWindowsHost() or bisect_utils
.IsMacHost()
427 zf
= zipfile
.ZipFile(filename
)
428 for name
in zf
.namelist():
430 print 'Extracting %s' % name
431 zf
.extract(name
, output_dir
)
432 if bisect_utils
.IsMacHost():
433 # Restore permission bits.
434 os
.chmod(os
.path
.join(output_dir
, name
),
435 zf
.getinfo(name
).external_attr
>> 16L)
438 def SetBuildSystemDefault(build_system
, use_goma
, goma_dir
):
439 """Sets up any environment variables needed to build with the specified build
443 build_system: A string specifying build system. Currently only 'ninja' or
444 'make' are supported.
446 if build_system
== 'ninja':
447 gyp_var
= os
.getenv('GYP_GENERATORS', default
='')
449 if not gyp_var
or not 'ninja' in gyp_var
:
451 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
453 os
.environ
['GYP_GENERATORS'] = 'ninja'
455 if bisect_utils
.IsWindowsHost():
456 os
.environ
['GYP_DEFINES'] = ('component=shared_library '
457 'incremental_chrome_dll=1 '
458 'disable_nacl=1 fastbuild=1 '
459 'chromium_win_pch=0')
461 elif build_system
== 'make':
462 os
.environ
['GYP_GENERATORS'] = 'make'
464 raise RuntimeError('%s build not supported.' % build_system
)
467 os
.environ
['GYP_DEFINES'] = '%s %s' % (os
.getenv('GYP_DEFINES', default
=''),
470 os
.environ
['GYP_DEFINES'] += ' gomadir=%s' % goma_dir
473 def BuildWithMake(threads
, targets
, build_type
='Release'):
474 cmd
= ['make', 'BUILDTYPE=%s' % build_type
]
477 cmd
.append('-j%d' % threads
)
481 return_code
= bisect_utils
.RunProcess(cmd
)
483 return not return_code
486 def BuildWithNinja(threads
, targets
, build_type
='Release'):
487 cmd
= ['ninja', '-C', os
.path
.join('out', build_type
)]
490 cmd
.append('-j%d' % threads
)
494 return_code
= bisect_utils
.RunProcess(cmd
)
496 return not return_code
499 def BuildWithVisualStudio(targets
, build_type
='Release'):
500 path_to_devenv
= os
.path
.abspath(
501 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
502 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
503 cmd
= [path_to_devenv
, '/build', build_type
, path_to_sln
]
506 cmd
.extend(['/Project', t
])
508 return_code
= bisect_utils
.RunProcess(cmd
)
510 return not return_code
513 def WriteStringToFile(text
, file_name
):
515 with
open(file_name
, 'wb') as f
:
518 raise RuntimeError('Error writing to file [%s]' % file_name
)
521 def ReadStringFromFile(file_name
):
523 with
open(file_name
) as f
:
526 raise RuntimeError('Error reading file [%s]' % file_name
)
529 def ChangeBackslashToSlashInPatch(diff_text
):
530 """Formats file paths in the given text to unix-style paths."""
532 diff_lines
= diff_text
.split('\n')
533 for i
in range(len(diff_lines
)):
534 if (diff_lines
[i
].startswith('--- ') or
535 diff_lines
[i
].startswith('+++ ')):
536 diff_lines
[i
] = diff_lines
[i
].replace('\\', '/')
537 return '\n'.join(diff_lines
)
541 class Builder(object):
542 """Builder is used by the bisect script to build relevant targets and deploy.
544 def __init__(self
, opts
):
545 """Performs setup for building with target build system.
548 opts: Options parsed from command line.
550 if bisect_utils
.IsWindowsHost():
551 if not opts
.build_preference
:
552 opts
.build_preference
= 'msvs'
554 if opts
.build_preference
== 'msvs':
555 if not os
.getenv('VS100COMNTOOLS'):
557 'Path to visual studio could not be determined.')
559 SetBuildSystemDefault(opts
.build_preference
, opts
.use_goma
,
562 if not opts
.build_preference
:
563 if 'ninja' in os
.getenv('GYP_GENERATORS', default
=''):
564 opts
.build_preference
= 'ninja'
566 opts
.build_preference
= 'make'
568 SetBuildSystemDefault(opts
.build_preference
, opts
.use_goma
, opts
.goma_dir
)
570 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
571 raise RuntimeError('Failed to set platform environment.')
576 if opts
.target_platform
== 'cros':
577 builder
= CrosBuilder(opts
)
578 elif opts
.target_platform
== 'android':
579 builder
= AndroidBuilder(opts
)
580 elif opts
.target_platform
== 'android-chrome':
581 builder
= AndroidChromeBuilder(opts
)
583 builder
= DesktopBuilder(opts
)
586 def Build(self
, depot
, opts
):
587 raise NotImplementedError()
589 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
590 """Returns the path to the build directory, relative to the checkout root.
592 Assumes that the current working directory is the checkout root.
594 src_dir
= src_dir
or 'src'
595 if opts
.build_preference
== 'ninja' or bisect_utils
.IsLinuxHost():
596 return os
.path
.join(src_dir
, 'out')
597 if bisect_utils
.IsMacHost():
598 return os
.path
.join(src_dir
, 'xcodebuild')
599 if bisect_utils
.IsWindowsHost():
600 return os
.path
.join(src_dir
, 'build')
601 raise NotImplementedError('Unexpected platform %s' % sys
.platform
)
604 class DesktopBuilder(Builder
):
605 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
606 def __init__(self
, opts
):
607 super(DesktopBuilder
, self
).__init
__(opts
)
609 def Build(self
, depot
, opts
):
610 """Builds chromium_builder_perf target using options passed into
614 depot: Current depot being bisected.
615 opts: The options parsed from the command line.
618 True if build was successful.
620 targets
= ['chromium_builder_perf']
626 build_success
= False
627 if opts
.build_preference
== 'make':
628 build_success
= BuildWithMake(threads
, targets
, opts
.target_build_type
)
629 elif opts
.build_preference
== 'ninja':
630 build_success
= BuildWithNinja(threads
, targets
, opts
.target_build_type
)
631 elif opts
.build_preference
== 'msvs':
632 assert bisect_utils
.IsWindowsHost(), 'msvs is only supported on Windows.'
633 build_success
= BuildWithVisualStudio(targets
, opts
.target_build_type
)
635 assert False, 'No build system defined.'
639 class AndroidBuilder(Builder
):
640 """AndroidBuilder is used to build on android."""
641 def __init__(self
, opts
):
642 super(AndroidBuilder
, self
).__init
__(opts
)
644 def _GetTargets(self
):
645 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
647 def Build(self
, depot
, opts
):
648 """Builds the android content shell and other necessary tools using options
649 passed into the script.
652 depot: Current depot being bisected.
653 opts: The options parsed from the command line.
656 True if build was successful.
662 build_success
= False
663 if opts
.build_preference
== 'ninja':
664 build_success
= BuildWithNinja(
665 threads
, self
._GetTargets
(), opts
.target_build_type
)
667 assert False, 'No build system defined.'
672 class AndroidChromeBuilder(AndroidBuilder
):
673 """AndroidBuilder is used to build on android's chrome."""
674 def __init__(self
, opts
):
675 super(AndroidChromeBuilder
, self
).__init
__(opts
)
677 def _GetTargets(self
):
678 return AndroidBuilder
._GetTargets
(self
) + ['chrome_apk']
681 class CrosBuilder(Builder
):
682 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
684 def __init__(self
, opts
):
685 super(CrosBuilder
, self
).__init
__(opts
)
687 def ImageToTarget(self
, opts
):
688 """Installs latest image to target specified by opts.cros_remote_ip.
691 opts: Program options containing cros_board and cros_remote_ip.
697 # Keys will most likely be set to 0640 after wiping the chroot.
698 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
699 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
700 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
701 '--remote=%s' % opts
.cros_remote_ip
,
702 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
704 return_code
= bisect_utils
.RunProcess(cmd
)
705 return not return_code
709 def BuildPackages(self
, opts
, depot
):
710 """Builds packages for cros.
713 opts: Program options containing cros_board.
714 depot: The depot being bisected.
719 cmd
= [CROS_SDK_PATH
]
722 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
723 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
728 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
730 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, './build_packages',
731 '--board=%s' % opts
.cros_board
]
732 return_code
= bisect_utils
.RunProcess(cmd
)
734 return not return_code
736 def BuildImage(self
, opts
, depot
):
737 """Builds test image for cros.
740 opts: Program options containing cros_board.
741 depot: The depot being bisected.
746 cmd
= [CROS_SDK_PATH
]
749 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
750 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
755 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
757 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, '--', './build_image',
758 '--board=%s' % opts
.cros_board
, 'test']
760 return_code
= bisect_utils
.RunProcess(cmd
)
762 return not return_code
764 def Build(self
, depot
, opts
):
765 """Builds targets using options passed into the script.
768 depot: Current depot being bisected.
769 opts: The options parsed from the command line.
772 True if build was successful.
774 if self
.BuildPackages(opts
, depot
):
775 if self
.BuildImage(opts
, depot
):
776 return self
.ImageToTarget(opts
)
780 def _ParseRevisionsFromDEPSFileManually(deps_file_contents
):
781 """Parses the vars section of the DEPS file with regex.
784 deps_file_contents: The DEPS file contents as a string.
787 A dict in the format {depot:revision} if successful, otherwise None.
789 # We'll parse the "vars" section of the DEPS file.
790 rxp
= re
.compile('vars = {(?P<vars_body>[^}]+)', re
.MULTILINE
)
791 re_results
= rxp
.search(deps_file_contents
)
796 # We should be left with a series of entries in the vars component of
797 # the DEPS file with the following format:
798 # 'depot_name': 'revision',
799 vars_body
= re_results
.group('vars_body')
800 rxp
= re
.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
802 re_results
= rxp
.findall(vars_body
)
804 return dict(re_results
)
807 def _WaitUntilBuildIsReady(
808 fetch_build
, bot_name
, builder_host
, builder_port
, build_request_id
,
810 """Waits until build is produced by bisect builder on tryserver.
813 fetch_build: Function to check and download build from cloud storage.
814 bot_name: Builder bot name on tryserver.
815 builder_host Tryserver hostname.
816 builder_port: Tryserver port.
817 build_request_id: A unique ID of the build request posted to tryserver.
818 max_timeout: Maximum time to wait for the build.
821 Downloaded archive file path if exists, otherwise None.
823 # Build number on the tryserver.
825 # Interval to check build on cloud storage.
827 # Interval to check build status on tryserver.
828 status_check_interval
= 600
829 last_status_check
= time
.time()
830 start_time
= time
.time()
832 # Checks for build on gs://chrome-perf and download if exists.
835 return (res
, 'Build successfully found')
836 elapsed_status_check
= time
.time() - last_status_check
837 # To avoid overloading tryserver with status check requests, we check
838 # build status for every 10 mins.
839 if elapsed_status_check
> status_check_interval
:
840 last_status_check
= time
.time()
842 # Get the build number on tryserver for the current build.
843 build_num
= bisect_builder
.GetBuildNumFromBuilder(
844 build_request_id
, bot_name
, builder_host
, builder_port
)
845 # Check the status of build using the build number.
846 # Note: Build is treated as PENDING if build number is not found
847 # on the the tryserver.
848 build_status
, status_link
= bisect_builder
.GetBuildStatus(
849 build_num
, bot_name
, builder_host
, builder_port
)
850 if build_status
== bisect_builder
.FAILED
:
851 return (None, 'Failed to produce build, log: %s' % status_link
)
852 elapsed_time
= time
.time() - start_time
853 if elapsed_time
> max_timeout
:
854 return (None, 'Timed out: %ss without build' % max_timeout
)
856 print 'Time elapsed: %ss without build.' % elapsed_time
857 time
.sleep(poll_interval
)
858 # For some reason, mac bisect bots were not flushing stdout periodically.
859 # As a result buildbot command is timed-out. Flush stdout on all platforms
860 # while waiting for build.
864 def _UpdateV8Branch(deps_content
):
865 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
867 Check for "v8_branch" in DEPS file if exists update its value
868 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
869 variable from DEPS revision 254916, therefore check for "src/v8":
870 <v8 source path> in DEPS in order to support prior DEPS revisions
874 deps_content: DEPS file contents to be modified.
877 Modified DEPS file contents as a string.
879 new_branch
= r
'branches/bleeding_edge'
880 v8_branch_pattern
= re
.compile(r
'(?<="v8_branch": ")(.*)(?=")')
881 if re
.search(v8_branch_pattern
, deps_content
):
882 deps_content
= re
.sub(v8_branch_pattern
, new_branch
, deps_content
)
884 # Replaces the branch assigned to "src/v8" key in DEPS file.
885 # Format of "src/v8" in DEPS:
887 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
888 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
889 v8_src_pattern
= re
.compile(
890 r
'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re
.MULTILINE
)
891 if re
.search(v8_src_pattern
, deps_content
):
892 deps_content
= re
.sub(v8_src_pattern
, new_branch
, deps_content
)
896 def _UpdateDEPSForAngle(revision
, depot
, deps_file
):
897 """Updates DEPS file with new revision for Angle repository.
899 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
900 variable contains "angle_revision" key that holds git hash instead of
903 And sometimes "angle_revision" key is not specified in "vars" variable,
904 in such cases check "deps" dictionary variable that matches
905 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
907 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
909 deps_contents
= ReadStringFromFile(deps_file
)
910 # Check whether the depot and revision pattern in DEPS file vars variable
911 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
912 angle_rev_pattern
= re
.compile(r
'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
913 deps_var
, re
.MULTILINE
)
914 match
= re
.search(angle_rev_pattern
% deps_var
, deps_contents
)
916 # Update the revision information for the given depot
917 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
919 # Check whether the depot and revision pattern in DEPS file deps
921 # "src/third_party/angle": Var("chromium_git") +
922 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
923 angle_rev_pattern
= re
.compile(
924 r
'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re
.MULTILINE
)
925 match
= re
.search(angle_rev_pattern
, deps_contents
)
927 print 'Could not find angle revision information in DEPS file.'
929 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
930 # Write changes to DEPS file
931 WriteStringToFile(new_data
, deps_file
)
934 print 'Something went wrong while updating DEPS file, %s' % e
938 def _TryParseHistogramValuesFromOutput(metric
, text
):
939 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
942 metric: The metric as a list of [<trace>, <value>] strings.
943 text: The text to parse the metric values from.
946 A list of floating point numbers found, [] if none were found.
948 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
950 text_lines
= text
.split('\n')
953 for current_line
in text_lines
:
954 if metric_formatted
in current_line
:
955 current_line
= current_line
[len(metric_formatted
):]
958 histogram_values
= eval(current_line
)
960 for b
in histogram_values
['buckets']:
961 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
962 # Extends the list with N-elements with the average for that bucket.
963 values_list
.extend([average_for_bucket
] * b
['count'])
970 def _TryParseResultValuesFromOutput(metric
, text
):
971 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
974 metric: The metric as a list of [<trace>, <value>] string pairs.
975 text: The text to parse the metric values from.
978 A list of floating point numbers found.
980 # Format is: RESULT <graph>: <trace>= <value> <units>
981 metric_re
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
983 # The log will be parsed looking for format:
984 # <*>RESULT <graph_name>: <trace_name>= <value>
985 single_result_re
= re
.compile(
986 metric_re
+ '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
988 # The log will be parsed looking for format:
989 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
990 multi_results_re
= re
.compile(
991 metric_re
+ '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
993 # The log will be parsed looking for format:
994 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
995 mean_stddev_re
= re
.compile(
997 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
999 text_lines
= text
.split('\n')
1001 for current_line
in text_lines
:
1002 # Parse the output from the performance test for the metric we're
1004 single_result_match
= single_result_re
.search(current_line
)
1005 multi_results_match
= multi_results_re
.search(current_line
)
1006 mean_stddev_match
= mean_stddev_re
.search(current_line
)
1007 if (not single_result_match
is None and
1008 single_result_match
.group('VALUE')):
1009 values_list
+= [single_result_match
.group('VALUE')]
1010 elif (not multi_results_match
is None and
1011 multi_results_match
.group('VALUES')):
1012 metric_values
= multi_results_match
.group('VALUES')
1013 values_list
+= metric_values
.split(',')
1014 elif (not mean_stddev_match
is None and
1015 mean_stddev_match
.group('MEAN')):
1016 values_list
+= [mean_stddev_match
.group('MEAN')]
1018 values_list
= [float(v
) for v
in values_list
1019 if bisect_utils
.IsStringFloat(v
)]
1021 # If the metric is times/t, we need to sum the timings in order to get
1022 # similar regression results as the try-bots.
1025 ['times', 'page_load_time'],
1026 ['cold_times', 'page_load_time'],
1027 ['warm_times', 'page_load_time'],
1030 if metric
in metrics_to_sum
:
1032 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
1037 def _ParseMetricValuesFromOutput(metric
, text
):
1038 """Parses output from performance_ui_tests and retrieves the results for
1042 metric: The metric as a list of [<trace>, <value>] strings.
1043 text: The text to parse the metric values from.
1046 A list of floating point numbers found.
1048 metric_values
= _TryParseResultValuesFromOutput(metric
, text
)
1050 if not metric_values
:
1051 metric_values
= _TryParseHistogramValuesFromOutput(metric
, text
)
1053 return metric_values
1056 def _GenerateProfileIfNecessary(command_args
):
1057 """Checks the command line of the performance test for dependencies on
1058 profile generation, and runs tools/perf/generate_profile as necessary.
1061 command_args: Command line being passed to performance test, as a list.
1064 False if profile generation was necessary and failed, otherwise True.
1066 if '--profile-dir' in ' '.join(command_args
):
1067 # If we were using python 2.7+, we could just use the argparse
1068 # module's parse_known_args to grab --profile-dir. Since some of the
1069 # bots still run 2.6, have to grab the arguments manually.
1071 args_to_parse
= ['--profile-dir', '--browser']
1073 for arg_to_parse
in args_to_parse
:
1074 for i
, current_arg
in enumerate(command_args
):
1075 if arg_to_parse
in current_arg
:
1076 current_arg_split
= current_arg
.split('=')
1078 # Check 2 cases, --arg=<val> and --arg <val>
1079 if len(current_arg_split
) == 2:
1080 arg_dict
[arg_to_parse
] = current_arg_split
[1]
1081 elif i
+ 1 < len(command_args
):
1082 arg_dict
[arg_to_parse
] = command_args
[i
+1]
1084 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
1086 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
1087 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
1088 return not bisect_utils
.RunProcess(['python', path_to_generate
,
1089 '--profile-type-to-generate', profile_type
,
1090 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
1095 def _AddRevisionsIntoRevisionData(revisions
, depot
, sort
, revision_data
):
1096 """Adds new revisions to the revision_data dict and initializes them.
1099 revisions: List of revisions to add.
1100 depot: Depot that's currently in use (src, webkit, etc...)
1101 sort: Sorting key for displaying revisions.
1102 revision_data: A dict to add the new revisions into. Existing revisions
1103 will have their sort keys offset.
1105 num_depot_revisions
= len(revisions
)
1107 for _
, v
in revision_data
.iteritems():
1108 if v
['sort'] > sort
:
1109 v
['sort'] += num_depot_revisions
1111 for i
in xrange(num_depot_revisions
):
1113 revision_data
[r
] = {
1120 'sort' : i
+ sort
+ 1,
1124 def _PrintThankYou():
1125 print RESULTS_THANKYOU
1128 def _PrintTableRow(column_widths
, row_data
):
1129 """Prints out a row in a formatted table that has columns aligned.
1132 column_widths: A list of column width numbers.
1133 row_data: A list of items for each column in this row.
1135 assert len(column_widths
) == len(row_data
)
1137 for i
in xrange(len(column_widths
)):
1138 current_row_data
= row_data
[i
].center(column_widths
[i
], ' ')
1139 text
+= ('%%%ds' % column_widths
[i
]) % current_row_data
1143 def _PrintStepTime(revision_data_sorted
):
1144 """Prints information about how long various steps took.
1147 revision_data_sorted: The sorted list of revision data dictionaries."""
1148 step_perf_time_avg
= 0.0
1149 step_build_time_avg
= 0.0
1151 for _
, current_data
in revision_data_sorted
:
1152 if current_data
['value']:
1153 step_perf_time_avg
+= current_data
['perf_time']
1154 step_build_time_avg
+= current_data
['build_time']
1157 step_perf_time_avg
= step_perf_time_avg
/ step_count
1158 step_build_time_avg
= step_build_time_avg
/ step_count
1160 print 'Average build time : %s' % datetime
.timedelta(
1161 seconds
=int(step_build_time_avg
))
1162 print 'Average test time : %s' % datetime
.timedelta(
1163 seconds
=int(step_perf_time_avg
))
1165 def _FindOtherRegressions(revision_data_sorted
, bad_greater_than_good
):
1166 """Compiles a list of other possible regressions from the revision data.
1169 revision_data_sorted: Sorted list of (revision, revision data dict) pairs.
1170 bad_greater_than_good: Whether the result value at the "bad" revision is
1171 numerically greater than the result value at the "good" revision.
1174 A list of [current_rev, previous_rev, confidence] for other places where
1175 there may have been a regression.
1177 other_regressions
= []
1178 previous_values
= []
1180 for current_id
, current_data
in revision_data_sorted
:
1181 current_values
= current_data
['value']
1183 current_values
= current_values
['values']
1185 confidence
= ConfidenceScore(previous_values
, [current_values
])
1186 mean_of_prev_runs
= math_utils
.Mean(sum(previous_values
, []))
1187 mean_of_current_runs
= math_utils
.Mean(current_values
)
1189 # Check that the potential regression is in the same direction as
1190 # the overall regression. If the mean of the previous runs < the
1191 # mean of the current runs, this local regression is in same
1193 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
1194 is_same_direction
= (prev_less_than_current
if
1195 bad_greater_than_good
else not prev_less_than_current
)
1197 # Only report potential regressions with high confidence.
1198 if is_same_direction
and confidence
> 50:
1199 other_regressions
.append([current_id
, previous_id
, confidence
])
1200 previous_values
.append(current_values
)
1201 previous_id
= current_id
1202 return other_regressions
1204 class BisectPerformanceMetrics(object):
1205 """This class contains functionality to perform a bisection of a range of
1206 revisions to narrow down where performance regressions may have occurred.
1208 The main entry-point is the Run method.
1211 def __init__(self
, source_control
, opts
):
1212 super(BisectPerformanceMetrics
, self
).__init
__()
1215 self
.source_control
= source_control
1216 self
.src_cwd
= os
.getcwd()
1217 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
1219 self
.cleanup_commands
= []
1221 self
.builder
= Builder
.FromOpts(opts
)
1223 # This always starts true since the script grabs latest first.
1224 self
.was_blink
= True
1226 for d
in DEPOT_NAMES
:
1227 # The working directory of each depot is just the path to the depot, but
1228 # since we're already in 'src', we can skip that part.
1230 self
.depot_cwd
[d
] = os
.path
.join(
1231 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1233 def PerformCleanup(self
):
1234 """Performs cleanup when script is finished."""
1235 os
.chdir(self
.src_cwd
)
1236 for c
in self
.cleanup_commands
:
1238 shutil
.move(c
[1], c
[2])
1240 assert False, 'Invalid cleanup command.'
1242 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
1243 """Retrieves a list of all the commits between the bad revision and
1244 last known good revision."""
1246 revision_work_list
= []
1249 revision_range_start
= good_revision
1250 revision_range_end
= bad_revision
1253 self
.ChangeToDepotWorkingDirectory('cros')
1255 # Print the commit timestamps for every commit in the revision time
1256 # range. We'll sort them and bisect by that. There is a remote chance that
1257 # 2 (or more) commits will share the exact same timestamp, but it's
1258 # probably safe to ignore that case.
1259 cmd
= ['repo', 'forall', '-c',
1260 'git log --format=%%ct --before=%d --after=%d' % (
1261 revision_range_end
, revision_range_start
)]
1262 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
1264 assert not return_code
, ('An error occurred while running '
1265 '"%s"' % ' '.join(cmd
))
1269 revision_work_list
= list(set(
1270 [int(o
) for o
in output
.split('\n') if bisect_utils
.IsStringInt(o
)]))
1271 revision_work_list
= sorted(revision_work_list
, reverse
=True)
1273 cwd
= self
._GetDepotDirectory
(depot
)
1274 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
1275 good_revision
, cwd
=cwd
)
1277 return revision_work_list
1279 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
1280 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1282 if bisect_utils
.IsStringInt(svn_revision
):
1283 # V8 is tricky to bisect, in that there are only a few instances when
1284 # we can dive into bleeding_edge and get back a meaningful result.
1285 # Try to detect a V8 "business as usual" case, which is when:
1286 # 1. trunk revision N has description "Version X.Y.Z"
1287 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1288 # trunk. Now working on X.Y.(Z+1)."
1290 # As of 01/24/2014, V8 trunk descriptions are formatted:
1291 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1292 # So we can just try parsing that out first and fall back to the old way.
1293 v8_dir
= self
._GetDepotDirectory
('v8')
1294 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
1296 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
1299 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
1301 regex_results
= version_re
.search(revision_info
['subject'])
1306 # Look for "based on bleeding_edge" and parse out revision
1307 if 'based on bleeding_edge' in revision_info
['subject']:
1309 bleeding_edge_revision
= revision_info
['subject'].split(
1310 'bleeding_edge revision r')[1]
1311 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1312 git_revision
= self
.source_control
.ResolveToRevision(
1313 bleeding_edge_revision
, 'v8_bleeding_edge', DEPOT_DEPS_NAME
, 1,
1314 cwd
=v8_bleeding_edge_dir
)
1316 except (IndexError, ValueError):
1319 if not git_revision
:
1320 # Wasn't successful, try the old way of looking for "Prepare push to"
1321 git_revision
= self
.source_control
.ResolveToRevision(
1322 int(svn_revision
) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME
, -1,
1323 cwd
=v8_bleeding_edge_dir
)
1326 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1327 cwd
=v8_bleeding_edge_dir
)
1329 if 'Prepare push to trunk' in revision_info
['subject']:
1333 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1334 cwd
= self
._GetDepotDirectory
('v8')
1335 cmd
= ['log', '--format=%ct', '-1', revision
]
1336 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1337 commit_time
= int(output
)
1341 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1343 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1344 output
= output
.split()
1346 commits
= reversed(commits
)
1348 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1350 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1351 output
= output
.split()
1354 bleeding_edge_revision
= None
1357 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1358 if bleeding_edge_revision
:
1361 return bleeding_edge_revision
1363 def _ParseRevisionsFromDEPSFile(self
, depot
):
1364 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1365 be needed if the bisect recurses into those depots later.
1368 depot: Name of depot being bisected.
1371 A dict in the format {depot:revision} if successful, otherwise None.
1375 'Var': lambda _
: deps_data
["vars"][_
],
1376 'From': lambda *args
: None,
1378 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, deps_data
)
1379 deps_data
= deps_data
['deps']
1381 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1383 for depot_name
, depot_data
in DEPOT_DEPS_NAME
.iteritems():
1384 if (depot_data
.get('platform') and
1385 depot_data
.get('platform') != os
.name
):
1388 if (depot_data
.get('recurse') and depot
in depot_data
.get('from')):
1389 depot_data_src
= depot_data
.get('src') or depot_data
.get('src_old')
1390 src_dir
= deps_data
.get(depot_data_src
)
1392 self
.depot_cwd
[depot_name
] = os
.path
.join(self
.src_cwd
,
1394 re_results
= rxp
.search(src_dir
)
1396 results
[depot_name
] = re_results
.group('revision')
1398 warning_text
= ('Couldn\'t parse revision for %s while bisecting '
1399 '%s' % (depot_name
, depot
))
1400 if not warning_text
in self
.warnings
:
1401 self
.warnings
.append(warning_text
)
1403 results
[depot_name
] = None
1406 deps_file_contents
= ReadStringFromFile(bisect_utils
.FILE_DEPS_GIT
)
1407 parse_results
= _ParseRevisionsFromDEPSFileManually(deps_file_contents
)
1409 for depot_name
, depot_revision
in parse_results
.iteritems():
1410 depot_revision
= depot_revision
.strip('@')
1411 print depot_name
, depot_revision
1412 for current_name
, current_data
in DEPOT_DEPS_NAME
.iteritems():
1413 if (current_data
.has_key('deps_var') and
1414 current_data
['deps_var'] == depot_name
):
1415 src_name
= current_name
1416 results
[src_name
] = depot_revision
1420 def _Get3rdPartyRevisions(self
, depot
):
1421 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1424 A dict in the format {depot:revision} if successful, otherwise None.
1427 self
.ChangeToDepotWorkingDirectory(depot
)
1431 if depot
== 'chromium' or depot
== 'android-chrome':
1432 results
= self
._ParseRevisionsFromDEPSFile
(depot
)
1434 elif depot
== 'cros':
1435 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1436 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1437 CROS_CHROMEOS_PATTERN
]
1438 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
1440 assert not return_code
, ('An error occurred while running '
1441 '"%s"' % ' '.join(cmd
))
1443 if len(output
) > CROS_CHROMEOS_PATTERN
:
1444 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1447 output
= output
.split('_')[0]
1450 contents
= output
.split('.')
1452 version
= contents
[2]
1454 if contents
[3] != '0':
1455 warningText
= ('Chrome version: %s.%s but using %s.0 to bisect.' %
1456 (version
, contents
[3], version
))
1457 if not warningText
in self
.warnings
:
1458 self
.warnings
.append(warningText
)
1461 self
.ChangeToDepotWorkingDirectory('chromium')
1462 cmd
= ['log', '-1', '--format=%H',
1463 '--author=chrome-release@google.com',
1464 '--grep=to %s' % version
, 'origin/master']
1465 return_code
= bisect_utils
.CheckRunGit(cmd
)
1468 results
['chromium'] = output
.strip()
1470 # We can't try to map the trunk revision to bleeding edge yet, because
1471 # we don't know which direction to try to search in. Have to wait until
1472 # the bisect has narrowed the results down to 2 v8 rolls.
1473 results
['v8_bleeding_edge'] = None
1477 def BackupOrRestoreOutputdirectory(self
, restore
=False, build_type
='Release'):
1478 """Backs up or restores build output directory based on restore argument.
1481 restore: Indicates whether to restore or backup. Default is False(Backup)
1482 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1485 Path to backup or restored location as string. otherwise None if it fails.
1487 build_dir
= os
.path
.abspath(
1488 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1489 source_dir
= os
.path
.join(build_dir
, build_type
)
1490 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1492 source_dir
, destination_dir
= destination_dir
, source_dir
1493 if os
.path
.exists(source_dir
):
1494 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1495 shutil
.move(source_dir
, destination_dir
)
1496 return destination_dir
1499 def GetBuildArchiveForRevision(self
, revision
, gs_bucket
, target_arch
,
1500 patch_sha
, out_dir
):
1501 """Checks and downloads build archive for a given revision.
1503 Checks for build archive with Git hash or SVN revision. If either of the
1504 file exists, then downloads the archive file.
1507 revision: A Git hash revision.
1508 gs_bucket: Cloud storage bucket name
1509 target_arch: 32 or 64 bit build target
1510 patch: A DEPS patch (used while bisecting 3rd party repositories).
1511 out_dir: Build output directory where downloaded file is stored.
1514 Downloaded archive file path if exists, otherwise None.
1516 # Source archive file path on cloud storage using Git revision.
1517 source_file
= GetRemoteBuildPath(
1518 revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1519 downloaded_archive
= FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1520 if not downloaded_archive
:
1521 # Get SVN revision for the given SHA.
1522 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1524 # Source archive file path on cloud storage using SVN revision.
1525 source_file
= GetRemoteBuildPath(
1526 svn_revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1527 return FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1528 return downloaded_archive
1530 def DownloadCurrentBuild(self
, revision
, build_type
='Release', patch
=None):
1531 """Downloads the build archive for the given revision.
1534 revision: The Git revision to download or build.
1535 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1536 patch: A DEPS patch (used while bisecting 3rd party repositories).
1539 True if download succeeds, otherwise False.
1543 # Get the SHA of the DEPS changes patch.
1544 patch_sha
= GetSHA1HexDigest(patch
)
1546 # Update the DEPS changes patch with a patch to create a new file named
1547 # 'DEPS.sha' and add patch_sha evaluated above to it.
1548 patch
= '%s\n%s' % (patch
, DEPS_SHA_PATCH
% {'deps_sha': patch_sha
})
1550 # Get Build output directory
1551 abs_build_dir
= os
.path
.abspath(
1552 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1554 fetch_build_func
= lambda: self
.GetBuildArchiveForRevision(
1555 revision
, self
.opts
.gs_bucket
, self
.opts
.target_arch
,
1556 patch_sha
, abs_build_dir
)
1558 # Downloaded archive file path, downloads build archive for given revision.
1559 downloaded_file
= fetch_build_func()
1561 # When build archive doesn't exists, post a build request to tryserver
1562 # and wait for the build to be produced.
1563 if not downloaded_file
:
1564 downloaded_file
= self
.PostBuildRequestAndWait(
1565 revision
, fetch_build
=fetch_build_func
, patch
=patch
)
1566 if not downloaded_file
:
1569 # Generic name for the archive, created when archive file is extracted.
1570 output_dir
= os
.path
.join(
1571 abs_build_dir
, GetZipFileName(target_arch
=self
.opts
.target_arch
))
1572 # Unzip build archive directory.
1574 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1575 self
.BackupOrRestoreOutputdirectory(restore
=False)
1576 # Build output directory based on target(e.g. out/Release, out/Debug).
1577 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1578 ExtractZip(downloaded_file
, abs_build_dir
)
1579 if not os
.path
.exists(output_dir
):
1580 # Due to recipe changes, the builds extract folder contains
1581 # out/Release instead of full-build-<platform>/Release.
1582 if os
.path
.exists(os
.path
.join(abs_build_dir
, 'out', build_type
)):
1583 output_dir
= os
.path
.join(abs_build_dir
, 'out', build_type
)
1585 raise IOError('Missing extracted folder %s ' % output_dir
)
1587 print 'Moving build from %s to %s' % (
1588 output_dir
, target_build_output_dir
)
1589 shutil
.move(output_dir
, target_build_output_dir
)
1591 except Exception as e
:
1592 print 'Something went wrong while extracting archive file: %s' % e
1593 self
.BackupOrRestoreOutputdirectory(restore
=True)
1594 # Cleanup any leftovers from unzipping.
1595 if os
.path
.exists(output_dir
):
1596 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1598 # Delete downloaded archive
1599 if os
.path
.exists(downloaded_file
):
1600 os
.remove(downloaded_file
)
1603 def PostBuildRequestAndWait(self
, revision
, fetch_build
, patch
=None):
1604 """POSTs the build request job to the tryserver instance.
1606 A try job build request is posted to tryserver.chromium.perf master,
1607 and waits for the binaries to be produced and archived on cloud storage.
1608 Once the build is ready and stored onto cloud, build archive is downloaded
1609 into the output folder.
1612 revision: A Git hash revision.
1613 fetch_build: Function to check and download build from cloud storage.
1614 patch: A DEPS patch (used while bisecting 3rd party repositories).
1617 Downloaded archive file path when requested build exists and download is
1618 successful, otherwise None.
1620 # Get SVN revision for the given SHA.
1621 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1622 if not svn_revision
:
1624 'Failed to determine SVN revision for %s' % revision
)
1626 def GetBuilderNameAndBuildTime(target_platform
, target_arch
='ia32'):
1627 """Gets builder bot name and build time in seconds based on platform."""
1628 # Bot names should match the one listed in tryserver.chromium's
1629 # master.cfg which produces builds for bisect.
1630 if bisect_utils
.IsWindowsHost():
1631 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
1632 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1633 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1634 if bisect_utils
.IsLinuxHost():
1635 if target_platform
== 'android':
1636 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1637 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1638 if bisect_utils
.IsMacHost():
1639 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME
)
1640 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
1644 bot_name
, build_timeout
= GetBuilderNameAndBuildTime(
1645 self
.opts
.target_platform
, self
.opts
.target_arch
)
1646 builder_host
= self
.opts
.builder_host
1647 builder_port
= self
.opts
.builder_port
1648 # Create a unique ID for each build request posted to tryserver builders.
1649 # This ID is added to "Reason" property in build's json.
1650 build_request_id
= GetSHA1HexDigest(
1651 '%s-%s-%s' % (svn_revision
, patch
, time
.time()))
1653 # Creates a try job description.
1655 'revision': 'src@%s' % svn_revision
,
1657 'name': build_request_id
,
1659 # Update patch information if supplied.
1661 job_args
['patch'] = patch
1662 # Posts job to build the revision on the server.
1663 if bisect_builder
.PostTryJob(builder_host
, builder_port
, job_args
):
1664 target_file
, error_msg
= _WaitUntilBuildIsReady(
1665 fetch_build
, bot_name
, builder_host
, builder_port
, build_request_id
,
1668 print '%s [revision: %s]' % (error_msg
, svn_revision
)
1671 print 'Failed to post build request for revision: [%s]' % svn_revision
1674 def IsDownloadable(self
, depot
):
1675 """Checks if build is downloadable based on target platform and depot."""
1676 if (self
.opts
.target_platform
in ['chromium', 'android'] and
1677 self
.opts
.gs_bucket
):
1678 return (depot
== 'chromium' or
1679 'chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1680 'v8' in DEPOT_DEPS_NAME
[depot
]['from'])
1683 def UpdateDeps(self
, revision
, depot
, deps_file
):
1684 """Updates DEPS file with new revision of dependency repository.
1686 This method search DEPS for a particular pattern in which depot revision
1687 is specified (e.g "webkit_revision": "123456"). If a match is found then
1688 it resolves the given git hash to SVN revision and replace it in DEPS file.
1691 revision: A git hash revision of the dependency repository.
1692 depot: Current depot being bisected.
1693 deps_file: Path to DEPS file.
1696 True if DEPS file is modified successfully, otherwise False.
1698 if not os
.path
.exists(deps_file
):
1701 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1702 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1704 print 'DEPS update not supported for Depot: %s', depot
1707 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable
1708 # contains "angle_revision" key that holds git hash instead of SVN revision.
1709 # And sometime "angle_revision" key is not specified in "vars" variable.
1710 # In such cases check, "deps" dictionary variable that matches
1711 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1712 if depot
== 'angle':
1713 return _UpdateDEPSForAngle(revision
, depot
, deps_file
)
1716 deps_contents
= ReadStringFromFile(deps_file
)
1717 # Check whether the depot and revision pattern in DEPS file vars
1718 # e.g. for webkit the format is "webkit_revision": "12345".
1719 deps_revision
= re
.compile(r
'(?<="%s": ")([0-9]+)(?=")' % deps_var
,
1721 match
= re
.search(deps_revision
, deps_contents
)
1723 svn_revision
= self
.source_control
.SVNFindRev(
1724 revision
, self
._GetDepotDirectory
(depot
))
1725 if not svn_revision
:
1726 print 'Could not determine SVN revision for %s' % revision
1728 # Update the revision information for the given depot
1729 new_data
= re
.sub(deps_revision
, str(svn_revision
), deps_contents
)
1731 # For v8_bleeding_edge revisions change V8 branch in order
1732 # to fetch bleeding edge revision.
1733 if depot
== 'v8_bleeding_edge':
1734 new_data
= _UpdateV8Branch(new_data
)
1737 # Write changes to DEPS file
1738 WriteStringToFile(new_data
, deps_file
)
1741 print 'Something went wrong while updating DEPS file. [%s]' % e
1744 def CreateDEPSPatch(self
, depot
, revision
):
1745 """Modifies DEPS and returns diff as text.
1748 depot: Current depot being bisected.
1749 revision: A git hash revision of the dependency repository.
1752 A tuple with git hash of chromium revision and DEPS patch text.
1754 deps_file_path
= os
.path
.join(self
.src_cwd
, bisect_utils
.FILE_DEPS
)
1755 if not os
.path
.exists(deps_file_path
):
1756 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path
)
1757 # Get current chromium revision (git hash).
1758 cmd
= ['rev-parse', 'HEAD']
1759 chromium_sha
= bisect_utils
.CheckRunGit(cmd
).strip()
1760 if not chromium_sha
:
1761 raise RuntimeError('Failed to determine Chromium revision for %s' %
1763 if ('chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1764 'v8' in DEPOT_DEPS_NAME
[depot
]['from']):
1765 # Checkout DEPS file for the current chromium revision.
1766 if self
.source_control
.CheckoutFileAtRevision(
1767 bisect_utils
.FILE_DEPS
, chromium_sha
, cwd
=self
.src_cwd
):
1768 if self
.UpdateDeps(revision
, depot
, deps_file_path
):
1771 '--src-prefix=src/',
1772 '--dst-prefix=src/',
1774 bisect_utils
.FILE_DEPS
,
1776 diff_text
= bisect_utils
.CheckRunGit(diff_command
, cwd
=self
.src_cwd
)
1777 return (chromium_sha
, ChangeBackslashToSlashInPatch(diff_text
))
1780 'Failed to update DEPS file for chromium: [%s]' % chromium_sha
)
1783 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha
)
1786 def BuildCurrentRevision(self
, depot
, revision
=None):
1787 """Builds chrome and performance_ui_tests on the current revision.
1790 True if the build was successful.
1792 if self
.opts
.debug_ignore_build
:
1795 os
.chdir(self
.src_cwd
)
1796 # Fetch build archive for the given revision from the cloud storage when
1797 # the storage bucket is passed.
1798 if self
.IsDownloadable(depot
) and revision
:
1800 if depot
!= 'chromium':
1801 # Create a DEPS patch with new revision for dependency repository.
1802 revision
, deps_patch
= self
.CreateDEPSPatch(depot
, revision
)
1803 if self
.DownloadCurrentBuild(revision
, patch
=deps_patch
):
1806 # Reverts the changes to DEPS file.
1807 self
.source_control
.CheckoutFileAtRevision(
1808 bisect_utils
.FILE_DEPS
, revision
, cwd
=self
.src_cwd
)
1812 # These codes are executed when bisect bots builds binaries locally.
1813 build_success
= self
.builder
.Build(depot
, self
.opts
)
1815 return build_success
1817 def RunGClientHooks(self
):
1818 """Runs gclient with runhooks command.
1821 True if gclient reports no errors.
1823 if self
.opts
.debug_ignore_build
:
1825 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
1827 def _IsBisectModeUsingMetric(self
):
1828 return self
.opts
.bisect_mode
in [BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
]
1830 def _IsBisectModeReturnCode(self
):
1831 return self
.opts
.bisect_mode
in [BISECT_MODE_RETURN_CODE
]
1833 def _IsBisectModeStandardDeviation(self
):
1834 return self
.opts
.bisect_mode
in [BISECT_MODE_STD_DEV
]
1836 def GetCompatibleCommand(self
, command_to_run
, revision
, depot
):
1837 # Prior to crrev.com/274857 *only* android-chromium-testshell
1838 # Then until crrev.com/276628 *both* (android-chromium-testshell and
1839 # android-chrome-shell) work. After that rev 276628 *only*
1840 # android-chrome-shell works. bisect-perf-reggresion.py script should
1841 # handle these cases and set appropriate browser type based on revision.
1842 if self
.opts
.target_platform
in ['android']:
1843 # When its a third_party depot, get the chromium revision.
1844 if depot
!= 'chromium':
1845 revision
= bisect_utils
.CheckRunGit(
1846 ['rev-parse', 'HEAD'], cwd
=self
.src_cwd
).strip()
1847 svn_revision
= self
.source_control
.SVNFindRev(revision
, cwd
=self
.src_cwd
)
1848 if not svn_revision
:
1849 return command_to_run
1850 cmd_re
= re
.compile('--browser=(?P<browser_type>\S+)')
1851 matches
= cmd_re
.search(command_to_run
)
1852 if bisect_utils
.IsStringInt(svn_revision
) and matches
:
1853 cmd_browser
= matches
.group('browser_type')
1854 if svn_revision
<= 274857 and cmd_browser
== 'android-chrome-shell':
1855 return command_to_run
.replace(cmd_browser
,
1856 'android-chromium-testshell')
1857 elif (svn_revision
>= 276628 and
1858 cmd_browser
== 'android-chromium-testshell'):
1859 return command_to_run
.replace(cmd_browser
,
1860 'android-chrome-shell')
1861 return command_to_run
1863 def RunPerformanceTestAndParseResults(
1864 self
, command_to_run
, metric
, reset_on_first_run
=False,
1865 upload_on_last_run
=False, results_label
=None):
1866 """Runs a performance test on the current revision and parses the results.
1869 command_to_run: The command to be run to execute the performance test.
1870 metric: The metric to parse out from the results of the performance test.
1871 This is the result chart name and trace name, separated by slash.
1872 reset_on_first_run: If True, pass the flag --reset-results on first run.
1873 upload_on_last_run: If True, pass the flag --upload-results on last run.
1874 results_label: A value for the option flag --results-label.
1875 The arguments reset_on_first_run, upload_on_last_run and results_label
1876 are all ignored if the test is not a Telemetry test.
1879 (values dict, 0) if --debug_ignore_perf_test was passed.
1880 (values dict, 0, test output) if the test was run successfully.
1881 (error message, -1) if the test couldn't be run.
1882 (error message, -1, test output) if the test ran but there was an error.
1884 success_code
, failure_code
= 0, -1
1886 if self
.opts
.debug_ignore_perf_test
:
1893 return (fake_results
, success_code
)
1895 # For Windows platform set posix=False, to parse windows paths correctly.
1896 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
1897 # refer to http://bugs.python.org/issue1724822. By default posix=True.
1898 args
= shlex
.split(command_to_run
, posix
=not bisect_utils
.IsWindowsHost())
1900 if not _GenerateProfileIfNecessary(args
):
1901 err_text
= 'Failed to generate profile for performance test.'
1902 return (err_text
, failure_code
)
1904 # If running a Telemetry test for Chrome OS, insert the remote IP and
1905 # identity parameters.
1906 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
1907 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
1908 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
1909 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
1911 start_time
= time
.time()
1914 output_of_all_runs
= ''
1915 for i
in xrange(self
.opts
.repeat_test_count
):
1916 # Can ignore the return code since if the tests fail, it won't return 0.
1917 current_args
= copy
.copy(args
)
1919 if i
== 0 and reset_on_first_run
:
1920 current_args
.append('--reset-results')
1921 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
1922 current_args
.append('--upload-results')
1924 current_args
.append('--results-label=%s' % results_label
)
1926 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(
1927 current_args
, cwd
=self
.src_cwd
)
1929 if e
.errno
== errno
.ENOENT
:
1930 err_text
= ('Something went wrong running the performance test. '
1931 'Please review the command line:\n\n')
1932 if 'src/' in ' '.join(args
):
1933 err_text
+= ('Check that you haven\'t accidentally specified a '
1934 'path with src/ in the command.\n\n')
1935 err_text
+= ' '.join(args
)
1938 return (err_text
, failure_code
)
1941 output_of_all_runs
+= output
1942 if self
.opts
.output_buildbot_annotations
:
1945 if self
._IsBisectModeUsingMetric
():
1946 metric_values
+= _ParseMetricValuesFromOutput(metric
, output
)
1947 # If we're bisecting on a metric (ie, changes in the mean or
1948 # standard deviation) and no metric values are produced, bail out.
1949 if not metric_values
:
1951 elif self
._IsBisectModeReturnCode
():
1952 metric_values
.append(return_code
)
1954 elapsed_minutes
= (time
.time() - start_time
) / 60.0
1955 if elapsed_minutes
>= self
.opts
.max_time_minutes
:
1958 if len(metric_values
) == 0:
1959 err_text
= 'Metric %s was not found in the test output.' % metric
1960 # TODO(qyearsley): Consider also getting and displaying a list of metrics
1961 # that were found in the output here.
1962 return (err_text
, failure_code
, output_of_all_runs
)
1964 # If we're bisecting on return codes, we're really just looking for zero vs
1966 if self
._IsBisectModeReturnCode
():
1967 # If any of the return codes is non-zero, output 1.
1968 overall_return_code
= 0 if (
1969 all(current_value
== 0 for current_value
in metric_values
)) else 1
1972 'mean': overall_return_code
,
1975 'values': metric_values
,
1978 print 'Results of performance test: Command returned with %d' % (
1979 overall_return_code
)
1982 # Need to get the average value if there were multiple values.
1983 truncated_mean
= math_utils
.TruncatedMean(
1984 metric_values
, self
.opts
.truncate_percent
)
1985 standard_err
= math_utils
.StandardError(metric_values
)
1986 standard_dev
= math_utils
.StandardDeviation(metric_values
)
1988 if self
._IsBisectModeStandardDeviation
():
1989 metric_values
= [standard_dev
]
1992 'mean': truncated_mean
,
1993 'std_err': standard_err
,
1994 'std_dev': standard_dev
,
1995 'values': metric_values
,
1998 print 'Results of performance test: %12f %12f' % (
1999 truncated_mean
, standard_err
)
2001 return (values
, success_code
, output_of_all_runs
)
2003 def FindAllRevisionsToSync(self
, revision
, depot
):
2004 """Finds all dependant revisions and depots that need to be synced for a
2005 given revision. This is only useful in the git workflow, as an svn depot
2006 may be split into multiple mirrors.
2008 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2009 skia/include. To sync skia/src properly, one has to find the proper
2010 revisions in skia/gyp and skia/include.
2013 revision: The revision to sync to.
2014 depot: The depot in use at the moment (probably skia).
2017 A list of [depot, revision] pairs that need to be synced.
2019 revisions_to_sync
= [[depot
, revision
]]
2021 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
2022 (depot
== 'android-chrome'))
2024 # Some SVN depots were split into multiple git depots, so we need to
2025 # figure out for each mirror which git revision to grab. There's no
2026 # guarantee that the SVN revision will exist for each of the dependant
2027 # depots, so we have to grep the git logs and grab the next earlier one.
2029 and DEPOT_DEPS_NAME
[depot
]['depends']
2030 and self
.source_control
.IsGit()):
2031 svn_rev
= self
.source_control
.SVNFindRev(revision
)
2033 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
2034 self
.ChangeToDepotWorkingDirectory(d
)
2036 dependant_rev
= self
.source_control
.ResolveToRevision(
2037 svn_rev
, d
, DEPOT_DEPS_NAME
, -1000)
2040 revisions_to_sync
.append([d
, dependant_rev
])
2042 num_resolved
= len(revisions_to_sync
)
2043 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
2045 self
.ChangeToDepotWorkingDirectory(depot
)
2047 if not ((num_resolved
- 1) == num_needed
):
2050 return revisions_to_sync
2052 def PerformPreBuildCleanup(self
):
2053 """Performs necessary cleanup between runs."""
2054 print 'Cleaning up between runs.'
2057 # Having these pyc files around between runs can confuse the
2058 # perf tests and cause them to crash.
2059 for (path
, _
, files
) in os
.walk(self
.src_cwd
):
2060 for cur_file
in files
:
2061 if cur_file
.endswith('.pyc'):
2062 path_to_file
= os
.path
.join(path
, cur_file
)
2063 os
.remove(path_to_file
)
2065 def PerformWebkitDirectoryCleanup(self
, revision
):
2066 """If the script is switching between Blink and WebKit during bisect,
2067 its faster to just delete the directory rather than leave it up to git
2073 if not self
.source_control
.CheckoutFileAtRevision(
2074 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
2078 os
.chdir(self
.src_cwd
)
2080 is_blink
= bisect_utils
.IsDepsFileBlink()
2084 if not self
.source_control
.RevertFileToHead(
2085 bisect_utils
.FILE_DEPS_GIT
):
2088 if self
.was_blink
!= is_blink
:
2089 self
.was_blink
= is_blink
2090 # Removes third_party/Webkit directory.
2091 return bisect_utils
.RemoveThirdPartyDirectory('Webkit')
2094 def PerformCrosChrootCleanup(self
):
2095 """Deletes the chroot.
2101 self
.ChangeToDepotWorkingDirectory('cros')
2102 cmd
= [CROS_SDK_PATH
, '--delete']
2103 return_code
= bisect_utils
.RunProcess(cmd
)
2105 return not return_code
2107 def CreateCrosChroot(self
):
2108 """Creates a new chroot.
2114 self
.ChangeToDepotWorkingDirectory('cros')
2115 cmd
= [CROS_SDK_PATH
, '--create']
2116 return_code
= bisect_utils
.RunProcess(cmd
)
2118 return not return_code
2120 def PerformPreSyncCleanup(self
, revision
, depot
):
2121 """Performs any necessary cleanup before syncing.
2126 if depot
== 'chromium' or depot
== 'android-chrome':
2127 # Removes third_party/libjingle. At some point, libjingle was causing
2128 # issues syncing when using the git workflow (crbug.com/266324).
2129 os
.chdir(self
.src_cwd
)
2130 if not bisect_utils
.RemoveThirdPartyDirectory('libjingle'):
2132 # Removes third_party/skia. At some point, skia was causing
2133 # issues syncing when using the git workflow (crbug.com/377951).
2134 if not bisect_utils
.RemoveThirdPartyDirectory('skia'):
2136 if depot
== 'chromium':
2137 # The fast webkit cleanup doesn't work for android_chrome
2138 # The switch from Webkit to Blink that this deals with now happened
2139 # quite a long time ago so this is unlikely to be a problem.
2140 return self
.PerformWebkitDirectoryCleanup(revision
)
2141 elif depot
== 'cros':
2142 return self
.PerformCrosChrootCleanup()
2145 def RunPostSync(self
, depot
):
2146 """Performs any work after syncing.
2151 if self
.opts
.target_platform
== 'android':
2152 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
2153 path_to_src
=self
.src_cwd
):
2157 return self
.CreateCrosChroot()
2159 return self
.RunGClientHooks()
2162 def ShouldSkipRevision(self
, depot
, revision
):
2163 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2164 is git based those changes would have no effect.
2167 depot: The depot being bisected.
2168 revision: Current revision we're synced to.
2171 True if we should skip building/testing this revision.
2173 if depot
== 'chromium':
2174 if self
.source_control
.IsGit():
2175 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
2176 output
= bisect_utils
.CheckRunGit(cmd
)
2178 files
= output
.splitlines()
2180 if len(files
) == 1 and files
[0] == 'DEPS':
2185 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
2187 """Performs a full sync/build/run of the specified revision.
2190 revision: The revision to sync to.
2191 depot: The depot that's being used at the moment (src, webkit, etc.)
2192 command_to_run: The command to execute the performance test.
2193 metric: The performance metric being tested.
2196 On success, a tuple containing the results of the performance test.
2197 Otherwise, a tuple with the error message.
2200 if depot
== 'chromium' or depot
== 'android-chrome':
2201 sync_client
= 'gclient'
2202 elif depot
== 'cros':
2203 sync_client
= 'repo'
2205 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
2207 if not revisions_to_sync
:
2208 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
2210 if not self
.PerformPreSyncCleanup(revision
, depot
):
2211 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
2215 if not self
.opts
.debug_ignore_sync
:
2216 for r
in revisions_to_sync
:
2217 self
.ChangeToDepotWorkingDirectory(r
[0])
2220 self
.PerformPreBuildCleanup()
2222 # If you're using gclient to sync, you need to specify the depot you
2223 # want so that all the dependencies sync properly as well.
2224 # ie. gclient sync src@<SHA1>
2225 current_revision
= r
[1]
2226 if sync_client
== 'gclient':
2227 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
2229 if not self
.source_control
.SyncToRevision(current_revision
,
2236 success
= self
.RunPostSync(depot
)
2238 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
2239 return ('Skipped revision: [%s]' % str(revision
),
2240 BUILD_RESULT_SKIPPED
)
2242 start_build_time
= time
.time()
2243 if self
.BuildCurrentRevision(depot
, revision
):
2244 after_build_time
= time
.time()
2245 # Hack to support things that got changed.
2246 command_to_run
= self
.GetCompatibleCommand(
2247 command_to_run
, revision
, depot
)
2248 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
2250 # Restore build output directory once the tests are done, to avoid
2252 if self
.IsDownloadable(depot
) and revision
:
2253 self
.BackupOrRestoreOutputdirectory(restore
=True)
2256 external_revisions
= self
._Get
3rdPartyRevisions
(depot
)
2258 if not external_revisions
is None:
2259 return (results
[0], results
[1], external_revisions
,
2260 time
.time() - after_build_time
, after_build_time
-
2263 return ('Failed to parse DEPS file for external revisions.',
2268 return ('Failed to build revision: [%s]' % str(revision
),
2271 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
2273 return ('Failed to sync revision: [%s]' % str(revision
),
2276 def _CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
2277 """Given known good and bad values, decide if the current_value passed
2281 current_value: The value of the metric being checked.
2282 known_bad_value: The reference value for a "failed" run.
2283 known_good_value: The reference value for a "passed" run.
2286 True if the current_value is closer to the known_good_value than the
2289 if self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2290 dist_to_good_value
= abs(current_value
['std_dev'] -
2291 known_good_value
['std_dev'])
2292 dist_to_bad_value
= abs(current_value
['std_dev'] -
2293 known_bad_value
['std_dev'])
2295 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
2296 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
2298 return dist_to_good_value
< dist_to_bad_value
2300 def _GetDepotDirectory(self
, depot_name
):
2301 if depot_name
== 'chromium':
2303 elif depot_name
== 'cros':
2304 return self
.cros_cwd
2305 elif depot_name
in DEPOT_NAMES
:
2306 return self
.depot_cwd
[depot_name
]
2308 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
2309 'was added without proper support?' % depot_name
)
2311 def ChangeToDepotWorkingDirectory(self
, depot_name
):
2312 """Given a depot, changes to the appropriate working directory.
2315 depot_name: The name of the depot (see DEPOT_NAMES).
2317 os
.chdir(self
._GetDepotDirectory
(depot_name
))
2319 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
2320 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
2321 search_forward
=True)
2322 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
2323 search_forward
=False)
2324 min_revision_data
['external']['v8_bleeding_edge'] = r1
2325 max_revision_data
['external']['v8_bleeding_edge'] = r2
2327 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2328 min_revision_data
['revision'])
2329 or not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2330 max_revision_data
['revision'])):
2331 self
.warnings
.append(
2332 'Trunk revisions in V8 did not map directly to bleeding_edge. '
2333 'Attempted to expand the range to find V8 rolls which did map '
2334 'directly to bleeding_edge revisions, but results might not be '
2337 def _FindNextDepotToBisect(
2338 self
, current_depot
, min_revision_data
, max_revision_data
):
2339 """Decides which depot the script should dive into next (if any).
2342 current_depot: Current depot being bisected.
2343 min_revision_data: Data about the earliest revision in the bisect range.
2344 max_revision_data: Data about the latest revision in the bisect range.
2347 Name of the depot to bisect next, or None.
2349 external_depot
= None
2350 for next_depot
in DEPOT_NAMES
:
2351 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
2352 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
2355 if not (DEPOT_DEPS_NAME
[next_depot
]['recurse']
2356 and min_revision_data
['depot']
2357 in DEPOT_DEPS_NAME
[next_depot
]['from']):
2360 if current_depot
== 'v8':
2361 # We grab the bleeding_edge info here rather than earlier because we
2362 # finally have the revision range. From that we can search forwards and
2363 # backwards to try to match trunk revisions to bleeding_edge.
2364 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
2366 if (min_revision_data
['external'].get(next_depot
) ==
2367 max_revision_data
['external'].get(next_depot
)):
2370 if (min_revision_data
['external'].get(next_depot
) and
2371 max_revision_data
['external'].get(next_depot
)):
2372 external_depot
= next_depot
2375 return external_depot
2377 def PrepareToBisectOnDepot(
2378 self
, current_depot
, end_revision
, start_revision
, previous_revision
):
2379 """Changes to the appropriate directory and gathers a list of revisions
2380 to bisect between |start_revision| and |end_revision|.
2383 current_depot: The depot we want to bisect.
2384 end_revision: End of the revision range.
2385 start_revision: Start of the revision range.
2386 previous_revision: The last revision we synced to on |previous_depot|.
2389 A list containing the revisions between |start_revision| and
2390 |end_revision| inclusive.
2392 # Change into working directory of external library to run
2393 # subsequent commands.
2394 self
.ChangeToDepotWorkingDirectory(current_depot
)
2396 # V8 (and possibly others) is merged in periodically. Bisecting
2397 # this directory directly won't give much good info.
2398 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
2399 config_path
= os
.path
.join(self
.src_cwd
, '..')
2400 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
2401 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
2403 if bisect_utils
.RunGClient(
2404 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
2407 if current_depot
== 'v8_bleeding_edge':
2408 self
.ChangeToDepotWorkingDirectory('chromium')
2410 shutil
.move('v8', 'v8.bak')
2411 shutil
.move('v8_bleeding_edge', 'v8')
2413 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
2414 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
2416 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
2417 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
2419 self
.ChangeToDepotWorkingDirectory(current_depot
)
2421 depot_revision_list
= self
.GetRevisionList(current_depot
,
2425 self
.ChangeToDepotWorkingDirectory('chromium')
2427 return depot_revision_list
2429 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
2430 """Gathers reference values by running the performance tests on the
2431 known good and bad revisions.
2434 good_rev: The last known good revision where the performance regression
2435 has not occurred yet.
2436 bad_rev: A revision where the performance regression has already occurred.
2437 cmd: The command to execute the performance test.
2438 metric: The metric being tested for regression.
2441 A tuple with the results of building and running each revision.
2443 bad_run_results
= self
.SyncBuildAndRunRevision(
2444 bad_rev
, target_depot
, cmd
, metric
)
2446 good_run_results
= None
2448 if not bad_run_results
[1]:
2449 good_run_results
= self
.SyncBuildAndRunRevision(
2450 good_rev
, target_depot
, cmd
, metric
)
2452 return (bad_run_results
, good_run_results
)
2454 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
2455 if self
.opts
.output_buildbot_annotations
:
2456 step_name
= 'Bisection Range: [%s - %s]' % (
2457 revision_list
[len(revision_list
)-1], revision_list
[0])
2458 bisect_utils
.OutputAnnotationStepStart(step_name
)
2461 print 'Revisions to bisect on [%s]:' % depot
2462 for revision_id
in revision_list
:
2463 print ' -> %s' % (revision_id
, )
2466 if self
.opts
.output_buildbot_annotations
:
2467 bisect_utils
.OutputAnnotationStepClosed()
2469 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
2470 """Checks to see if changes to DEPS file occurred, and that the revision
2471 range also includes the change to .DEPS.git. If it doesn't, attempts to
2472 expand the revision range to include it.
2475 bad_rev: First known bad revision.
2476 good_revision: Last known good revision.
2479 A tuple with the new bad and good revisions.
2481 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
2482 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
2483 'DEPS', good_revision
, bad_revision
)
2486 # DEPS file was changed, search from the oldest change to DEPS file to
2487 # bad_revision to see if there are matching .DEPS.git changes.
2488 oldest_deps_change
= changes_to_deps
[-1]
2489 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
2490 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
2492 if len(changes_to_deps
) != len(changes_to_gitdeps
):
2493 # Grab the timestamp of the last DEPS change
2494 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
2495 output
= bisect_utils
.CheckRunGit(cmd
)
2496 commit_time
= int(output
)
2498 # Try looking for a commit that touches the .DEPS.git file in the
2499 # next 15 minutes after the DEPS file change.
2500 cmd
= ['log', '--format=%H', '-1',
2501 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
2502 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
2503 output
= bisect_utils
.CheckRunGit(cmd
)
2504 output
= output
.strip()
2506 self
.warnings
.append('Detected change to DEPS and modified '
2507 'revision range to include change to .DEPS.git')
2508 return (output
, good_revision
)
2510 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
2511 'matching change to .DEPS.git')
2512 return (bad_revision
, good_revision
)
2514 def CheckIfRevisionsInProperOrder(
2515 self
, target_depot
, good_revision
, bad_revision
):
2516 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2519 good_revision: Number/tag of the known good revision.
2520 bad_revision: Number/tag of the known bad revision.
2523 True if the revisions are in the proper order (good earlier than bad).
2525 if self
.source_control
.IsGit() and target_depot
!= 'cros':
2526 cmd
= ['log', '--format=%ct', '-1', good_revision
]
2527 cwd
= self
._GetDepotDirectory
(target_depot
)
2529 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
2530 good_commit_time
= int(output
)
2532 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
2533 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
2534 bad_commit_time
= int(output
)
2536 return good_commit_time
<= bad_commit_time
2538 # Cros/svn use integers
2539 return int(good_revision
) <= int(bad_revision
)
2541 def CanPerformBisect(self
, revision_to_check
):
2542 """Checks whether a given revision is bisectable.
2544 Note: At present it checks whether a given revision is bisectable on
2545 android bots(refer crbug.com/385324).
2548 revision_to_check: Known good revision.
2551 A dictionary indicating the result. If revision is not bisectable,
2552 this will contain the field "error", otherwise None.
2554 if self
.opts
.target_platform
== 'android':
2555 revision_to_check
= self
.source_control
.SVNFindRev(revision_to_check
)
2556 if (bisect_utils
.IsStringInt(revision_to_check
)
2557 and revision_to_check
< 265549):
2559 'Bisect cannot conitnue for the given revision range.\n'
2560 'It is impossible to bisect Android regressions '
2561 'prior to r265549, which allows the bisect bot to '
2562 'rely on Telemetry to do apk installation of the most recently '
2563 'built local ChromeShell(refer to crbug.com/385324).\n'
2564 'Please try bisecting revisions greater than or equal to r265549.')}
2567 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
2568 """Given known good and bad revisions, run a binary search on all
2569 intermediate revisions to determine the CL where the performance regression
2573 command_to_run: Specify the command to execute the performance test.
2574 good_revision: Number/tag of the known good revision.
2575 bad_revision: Number/tag of the known bad revision.
2576 metric: The performance metric to monitor.
2579 A dict with 2 members, 'revision_data' and 'error'. On success,
2580 'revision_data' will contain a dict mapping revision ids to
2581 data about that revision. Each piece of revision data consists of a
2582 dict with the following keys:
2584 'passed': Represents whether the performance test was successful at
2585 that revision. Possible values include: 1 (passed), 0 (failed),
2586 '?' (skipped), 'F' (build failed).
2587 'depot': The depot that this revision is from (ie. WebKit)
2588 'external': If the revision is a 'src' revision, 'external' contains
2589 the revisions of each of the external libraries.
2590 'sort': A sort value for sorting the dict in order of commits.
2607 If an error occurred, the 'error' field will contain the message and
2608 'revision_data' will be empty.
2611 'revision_data' : {},
2615 # Choose depot to bisect first
2616 target_depot
= 'chromium'
2617 if self
.opts
.target_platform
== 'cros':
2618 target_depot
= 'cros'
2619 elif self
.opts
.target_platform
== 'android-chrome':
2620 target_depot
= 'android-chrome'
2623 self
.ChangeToDepotWorkingDirectory(target_depot
)
2625 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2626 bad_revision
= self
.source_control
.ResolveToRevision(
2627 bad_revision_in
, target_depot
, DEPOT_DEPS_NAME
, 100)
2628 good_revision
= self
.source_control
.ResolveToRevision(
2629 good_revision_in
, target_depot
, DEPOT_DEPS_NAME
, -100)
2633 if bad_revision
is None:
2634 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
2637 if good_revision
is None:
2638 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
2641 # Check that they didn't accidentally swap good and bad revisions.
2642 if not self
.CheckIfRevisionsInProperOrder(
2643 target_depot
, good_revision
, bad_revision
):
2644 results
['error'] = ('bad_revision < good_revision, did you swap these '
2648 bad_revision
, good_revision
= self
.NudgeRevisionsIfDEPSChange(
2649 bad_revision
, good_revision
)
2651 if self
.opts
.output_buildbot_annotations
:
2652 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
2654 cannot_bisect
= self
.CanPerformBisect(good_revision
)
2656 results
['error'] = cannot_bisect
.get('error')
2659 print 'Gathering revision range for bisection.'
2660 # Retrieve a list of revisions to do bisection on.
2661 src_revision_list
= self
.GetRevisionList(
2662 target_depot
, bad_revision
, good_revision
)
2664 if self
.opts
.output_buildbot_annotations
:
2665 bisect_utils
.OutputAnnotationStepClosed()
2667 if src_revision_list
:
2668 # revision_data will store information about a revision such as the
2669 # depot it came from, the webkit/V8 revision at that time,
2670 # performance timing, build state, etc...
2671 revision_data
= results
['revision_data']
2673 # revision_list is the list we're binary searching through at the moment.
2678 for current_revision_id
in src_revision_list
:
2681 revision_data
[current_revision_id
] = {
2684 'depot' : target_depot
,
2688 'sort' : sort_key_ids
,
2690 revision_list
.append(current_revision_id
)
2693 max_revision
= len(revision_list
) - 1
2695 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
2697 if self
.opts
.output_buildbot_annotations
:
2698 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
2700 print 'Gathering reference values for bisection.'
2702 # Perform the performance tests on the good and bad revisions, to get
2704 bad_results
, good_results
= self
.GatherReferenceValues(good_revision
,
2710 if self
.opts
.output_buildbot_annotations
:
2711 bisect_utils
.OutputAnnotationStepClosed()
2714 results
['error'] = ('An error occurred while building and running '
2715 'the \'bad\' reference value. The bisect cannot continue without '
2716 'a working \'bad\' revision to start from.\n\nError: %s' %
2721 results
['error'] = ('An error occurred while building and running '
2722 'the \'good\' reference value. The bisect cannot continue without '
2723 'a working \'good\' revision to start from.\n\nError: %s' %
2728 # We need these reference values to determine if later runs should be
2729 # classified as pass or fail.
2730 known_bad_value
= bad_results
[0]
2731 known_good_value
= good_results
[0]
2733 # Can just mark the good and bad revisions explicitly here since we
2734 # already know the results.
2735 bad_revision_data
= revision_data
[revision_list
[0]]
2736 bad_revision_data
['external'] = bad_results
[2]
2737 bad_revision_data
['perf_time'] = bad_results
[3]
2738 bad_revision_data
['build_time'] = bad_results
[4]
2739 bad_revision_data
['passed'] = False
2740 bad_revision_data
['value'] = known_bad_value
2742 good_revision_data
= revision_data
[revision_list
[max_revision
]]
2743 good_revision_data
['external'] = good_results
[2]
2744 good_revision_data
['perf_time'] = good_results
[3]
2745 good_revision_data
['build_time'] = good_results
[4]
2746 good_revision_data
['passed'] = True
2747 good_revision_data
['value'] = known_good_value
2749 next_revision_depot
= target_depot
2752 if not revision_list
:
2755 min_revision_data
= revision_data
[revision_list
[min_revision
]]
2756 max_revision_data
= revision_data
[revision_list
[max_revision
]]
2758 if max_revision
- min_revision
<= 1:
2759 current_depot
= min_revision_data
['depot']
2760 if min_revision_data
['passed'] == '?':
2761 next_revision_index
= min_revision
2762 elif max_revision_data
['passed'] == '?':
2763 next_revision_index
= max_revision
2764 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
2765 previous_revision
= revision_list
[min_revision
]
2766 # If there were changes to any of the external libraries we track,
2767 # should bisect the changes there as well.
2768 external_depot
= self
._FindNextDepotToBisect
(
2769 current_depot
, min_revision_data
, max_revision_data
)
2771 # If there was no change in any of the external depots, the search
2773 if not external_depot
:
2774 if current_depot
== 'v8':
2775 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
2776 'continue any further. The script can only bisect into '
2777 'V8\'s bleeding_edge repository if both the current and '
2778 'previous revisions in trunk map directly to revisions in '
2782 earliest_revision
= max_revision_data
['external'][external_depot
]
2783 latest_revision
= min_revision_data
['external'][external_depot
]
2785 new_revision_list
= self
.PrepareToBisectOnDepot(
2786 external_depot
, latest_revision
, earliest_revision
,
2789 if not new_revision_list
:
2790 results
['error'] = ('An error occurred attempting to retrieve '
2791 'revision range: [%s..%s]' %
2792 (earliest_revision
, latest_revision
))
2795 _AddRevisionsIntoRevisionData(
2796 new_revision_list
, external_depot
, min_revision_data
['sort'],
2799 # Reset the bisection and perform it on the newly inserted
2801 revision_list
= new_revision_list
2803 max_revision
= len(revision_list
) - 1
2804 sort_key_ids
+= len(revision_list
)
2806 print ('Regression in metric %s appears to be the result of '
2807 'changes in [%s].' % (metric
, external_depot
))
2809 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
2815 next_revision_index
= (int((max_revision
- min_revision
) / 2) +
2818 next_revision_id
= revision_list
[next_revision_index
]
2819 next_revision_data
= revision_data
[next_revision_id
]
2820 next_revision_depot
= next_revision_data
['depot']
2822 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
2824 if self
.opts
.output_buildbot_annotations
:
2825 step_name
= 'Working on [%s]' % next_revision_id
2826 bisect_utils
.OutputAnnotationStepStart(step_name
)
2828 print 'Working on revision: [%s]' % next_revision_id
2830 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
2831 next_revision_depot
,
2833 metric
, skippable
=True)
2835 # If the build is successful, check whether or not the metric
2837 if not run_results
[1]:
2838 if len(run_results
) > 2:
2839 next_revision_data
['external'] = run_results
[2]
2840 next_revision_data
['perf_time'] = run_results
[3]
2841 next_revision_data
['build_time'] = run_results
[4]
2843 passed_regression
= self
._CheckIfRunPassed
(run_results
[0],
2847 next_revision_data
['passed'] = passed_regression
2848 next_revision_data
['value'] = run_results
[0]
2850 if passed_regression
:
2851 max_revision
= next_revision_index
2853 min_revision
= next_revision_index
2855 if run_results
[1] == BUILD_RESULT_SKIPPED
:
2856 next_revision_data
['passed'] = 'Skipped'
2857 elif run_results
[1] == BUILD_RESULT_FAIL
:
2858 next_revision_data
['passed'] = 'Build Failed'
2860 print run_results
[0]
2862 # If the build is broken, remove it and redo search.
2863 revision_list
.pop(next_revision_index
)
2867 if self
.opts
.output_buildbot_annotations
:
2868 self
._PrintPartialResults
(results
)
2869 bisect_utils
.OutputAnnotationStepClosed()
2871 # Weren't able to sync and retrieve the revision range.
2872 results
['error'] = ('An error occurred attempting to retrieve revision '
2873 'range: [%s..%s]' % (good_revision
, bad_revision
))
2877 def _PrintPartialResults(self
, results_dict
):
2878 revision_data
= results_dict
['revision_data']
2879 revision_data_sorted
= sorted(revision_data
.iteritems(),
2880 key
= lambda x
: x
[1]['sort'])
2881 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2883 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2884 results_dict
['first_working_revision'],
2885 results_dict
['last_broken_revision'],
2886 100, final_step
=False)
2888 def _ConfidenceLevelStatus(self
, results_dict
):
2889 if not results_dict
['confidence']:
2891 confidence_status
= 'Successful with %(level)s confidence%(warning)s.'
2892 if results_dict
['confidence'] >= 95:
2896 warning
= ' and warnings'
2897 if not self
.warnings
:
2899 return confidence_status
% {'level': level
, 'warning': warning
}
2901 def _GetViewVCLinkFromDepotAndHash(self
, cl
, depot
):
2902 info
= self
.source_control
.QueryRevisionInfo(cl
,
2903 self
._GetDepotDirectory
(depot
))
2904 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
2906 # Format is "git-svn-id: svn://....@123456 <other data>"
2907 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
2908 svn_revision
= svn_line
[0].split('@')
2909 svn_revision
= svn_revision
[1].split(' ')[0]
2910 return DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
2915 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
2917 if not info
['email'].startswith(info
['author']):
2918 email_info
= '\nEmail : %s' % info
['email']
2919 commit_link
= self
._GetViewVCLinkFromDepotAndHash
(cl
, depot
)
2921 commit_info
= '\nLink : %s' % commit_link
2923 commit_info
= ('\nFailed to parse svn revision from body:\n%s' %
2925 print RESULTS_REVISION_INFO
% {
2926 'subject': info
['subject'],
2927 'author': info
['author'],
2928 'email_info': email_info
,
2929 'commit_info': commit_info
,
2931 'cl_date': info
['date']
2934 def _PrintTestedCommitsHeader(self
):
2935 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
2937 [20, 70, 14, 12, 13],
2938 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
2939 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2941 [20, 70, 14, 12, 13],
2942 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
2943 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
2946 ['Depot', 'Commit SHA', 'Return Code', 'State'])
2948 assert False, 'Invalid bisect_mode specified.'
2950 def _PrintTestedCommitsEntry(self
, current_data
, cl_link
, state_str
):
2951 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
2952 std_error
= '+-%.02f' % current_data
['value']['std_err']
2953 mean
= '%.02f' % current_data
['value']['mean']
2955 [20, 70, 12, 14, 13],
2956 [current_data
['depot'], cl_link
, mean
, std_error
, state_str
])
2957 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2958 std_error
= '+-%.02f' % current_data
['value']['std_err']
2959 mean
= '%.02f' % current_data
['value']['mean']
2961 [20, 70, 12, 14, 13],
2962 [current_data
['depot'], cl_link
, std_error
, mean
, state_str
])
2963 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
2964 mean
= '%d' % current_data
['value']['mean']
2967 [current_data
['depot'], cl_link
, mean
, state_str
])
2969 def _PrintTestedCommitsTable(
2970 self
, revision_data_sorted
, first_working_revision
, last_broken_revision
,
2971 confidence
, final_step
=True):
2974 print '===== TESTED COMMITS ====='
2976 print '===== PARTIAL RESULTS ====='
2977 self
._PrintTestedCommitsHeader
()
2979 for current_id
, current_data
in revision_data_sorted
:
2980 if current_data
['value']:
2981 if (current_id
== last_broken_revision
or
2982 current_id
== first_working_revision
):
2983 # If confidence is too low, don't add this empty line since it's
2984 # used to put focus on a suspected CL.
2985 if confidence
and final_step
:
2988 if state
== 2 and not final_step
:
2989 # Just want a separation between "bad" and "good" cl's.
2993 if state
== 1 and final_step
:
2994 state_str
= 'Suspected CL'
2998 # If confidence is too low, don't bother outputting good/bad.
3001 state_str
= state_str
.center(13, ' ')
3003 cl_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3004 current_data
['depot'])
3006 cl_link
= current_id
3007 self
._PrintTestedCommitsEntry
(current_data
, cl_link
, state_str
)
3009 def _PrintReproSteps(self
):
3010 """Prints out a section of the results explaining how to run the test.
3012 This message includes the command used to run the test.
3014 command
= '$ ' + self
.opts
.command
3015 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
3016 command
+= ('\nAlso consider passing --profiler=list to see available '
3018 print REPRO_STEPS_LOCAL
% {'command': command
}
3019 print REPRO_STEPS_TRYJOB
% {'command': command
}
3021 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
3022 """Prints a section of the results about other potential regressions."""
3024 print 'Other regressions may have occurred:'
3025 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3026 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3027 for regression
in other_regressions
:
3028 current_id
, previous_id
, confidence
= regression
3029 current_data
= revision_data
[current_id
]
3030 previous_data
= revision_data
[previous_id
]
3032 current_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3033 current_data
['depot'])
3034 previous_link
= self
._GetViewVCLinkFromDepotAndHash
(previous_id
,
3035 previous_data
['depot'])
3037 # If we can't map it to a viewable URL, at least show the original hash.
3038 if not current_link
:
3039 current_link
= current_id
3040 if not previous_link
:
3041 previous_link
= previous_id
3043 print ' %8s %70s %s' % (
3044 current_data
['depot'], current_link
,
3045 ('%d%%' % confidence
).center(10, ' '))
3046 print ' %8s %70s' % (
3047 previous_data
['depot'], previous_link
)
3050 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
3051 # Find range where it possibly broke.
3052 first_working_revision
= None
3053 first_working_revision_index
= -1
3054 last_broken_revision
= None
3055 last_broken_revision_index
= -1
3057 culprit_revisions
= []
3058 other_regressions
= []
3059 regression_size
= 0.0
3060 regression_std_err
= 0.0
3063 for i
in xrange(len(revision_data_sorted
)):
3064 k
, v
= revision_data_sorted
[i
]
3065 if v
['passed'] == 1:
3066 if not first_working_revision
:
3067 first_working_revision
= k
3068 first_working_revision_index
= i
3071 last_broken_revision
= k
3072 last_broken_revision_index
= i
3074 if last_broken_revision
!= None and first_working_revision
!= None:
3076 for i
in xrange(0, last_broken_revision_index
+ 1):
3077 if revision_data_sorted
[i
][1]['value']:
3078 broken_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3081 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
3082 if revision_data_sorted
[i
][1]['value']:
3083 working_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3085 # Flatten the lists to calculate mean of all values.
3086 working_mean
= sum(working_means
, [])
3087 broken_mean
= sum(broken_means
, [])
3089 # Calculate the approximate size of the regression
3090 mean_of_bad_runs
= math_utils
.Mean(broken_mean
)
3091 mean_of_good_runs
= math_utils
.Mean(working_mean
)
3093 regression_size
= 100 * math_utils
.RelativeChange(mean_of_good_runs
,
3095 if math
.isnan(regression_size
):
3096 regression_size
= 'zero-to-nonzero'
3098 regression_std_err
= math
.fabs(math_utils
.PooledStandardError(
3099 [working_mean
, broken_mean
]) /
3100 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
3102 # Give a "confidence" in the bisect. At the moment we use how distinct the
3103 # values are before and after the last broken revision, and how noisy the
3105 confidence
= ConfidenceScore(working_means
, broken_means
)
3107 culprit_revisions
= []
3110 self
.ChangeToDepotWorkingDirectory(
3111 revision_data
[last_broken_revision
]['depot'])
3113 if revision_data
[last_broken_revision
]['depot'] == 'cros':
3114 # Want to get a list of all the commits and what depots they belong
3115 # to so that we can grab info about each.
3116 cmd
= ['repo', 'forall', '-c',
3117 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3118 last_broken_revision
, first_working_revision
+ 1)]
3119 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
3122 assert not return_code
, ('An error occurred while running '
3123 '"%s"' % ' '.join(cmd
))
3126 for l
in output
.split('\n'):
3128 # Output will be in form:
3130 # /path_to_other_depot
3138 contents
= l
.split(' ')
3139 if len(contents
) > 1:
3140 changes
.append([last_depot
, contents
[0]])
3143 info
= self
.source_control
.QueryRevisionInfo(c
[1])
3144 culprit_revisions
.append((c
[1], info
, None))
3146 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
3147 k
, v
= revision_data_sorted
[i
]
3148 if k
== first_working_revision
:
3150 self
.ChangeToDepotWorkingDirectory(v
['depot'])
3151 info
= self
.source_control
.QueryRevisionInfo(k
)
3152 culprit_revisions
.append((k
, info
, v
['depot']))
3155 # Check for any other possible regression ranges.
3156 other_regressions
= _FindOtherRegressions(
3157 revision_data_sorted
, mean_of_bad_runs
> mean_of_good_runs
)
3160 'first_working_revision': first_working_revision
,
3161 'last_broken_revision': last_broken_revision
,
3162 'culprit_revisions': culprit_revisions
,
3163 'other_regressions': other_regressions
,
3164 'regression_size': regression_size
,
3165 'regression_std_err': regression_std_err
,
3166 'confidence': confidence
,
3169 def _CheckForWarnings(self
, results_dict
):
3170 if len(results_dict
['culprit_revisions']) > 1:
3171 self
.warnings
.append('Due to build errors, regression range could '
3172 'not be narrowed down to a single commit.')
3173 if self
.opts
.repeat_test_count
== 1:
3174 self
.warnings
.append('Tests were only set to run once. This may '
3175 'be insufficient to get meaningful results.')
3176 if results_dict
['confidence'] < 100:
3177 if results_dict
['confidence']:
3178 self
.warnings
.append(
3179 'Confidence is less than 100%. There could be other candidates '
3180 'for this regression. Try bisecting again with increased '
3181 'repeat_count or on a sub-metric that shows the regression more '
3184 self
.warnings
.append(
3185 'Confidence is 0%. Try bisecting again on another platform, with '
3186 'increased repeat_count or on a sub-metric that shows the '
3187 'regression more clearly.')
3189 def FormatAndPrintResults(self
, bisect_results
):
3190 """Prints the results from a bisection run in a readable format.
3193 bisect_results: The results from a bisection test run.
3195 revision_data
= bisect_results
['revision_data']
3196 revision_data_sorted
= sorted(revision_data
.iteritems(),
3197 key
= lambda x
: x
[1]['sort'])
3198 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3200 self
._CheckForWarnings
(results_dict
)
3202 if self
.opts
.output_buildbot_annotations
:
3203 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
3206 print 'Full results of bisection:'
3207 for current_id
, current_data
in revision_data_sorted
:
3208 build_status
= current_data
['passed']
3210 if type(build_status
) is bool:
3212 build_status
= 'Good'
3214 build_status
= 'Bad'
3216 print ' %20s %40s %s' % (current_data
['depot'],
3217 current_id
, build_status
)
3220 if self
.opts
.output_buildbot_annotations
:
3221 bisect_utils
.OutputAnnotationStepClosed()
3222 # The perf dashboard scrapes the "results" step in order to comment on
3223 # bugs. If you change this, please update the perf dashboard as well.
3224 bisect_utils
.OutputAnnotationStepStart('Results')
3226 self
._PrintBanner
(results_dict
)
3227 self
._PrintWarnings
()
3229 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
3230 for culprit
in results_dict
['culprit_revisions']:
3231 cl
, info
, depot
= culprit
3232 self
._PrintRevisionInfo
(cl
, info
, depot
)
3233 if results_dict
['other_regressions']:
3234 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
3236 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3237 results_dict
['first_working_revision'],
3238 results_dict
['last_broken_revision'],
3239 results_dict
['confidence'])
3240 _PrintStepTime(revision_data_sorted
)
3241 self
._PrintReproSteps
()
3243 if self
.opts
.output_buildbot_annotations
:
3244 bisect_utils
.OutputAnnotationStepClosed()
3246 def _PrintBanner(self
, results_dict
):
3247 if self
._IsBisectModeReturnCode
():
3251 metrics
= '/'.join(self
.opts
.metric
)
3252 change
= '%.02f%% (+/-%.02f%%)' % (
3253 results_dict
['regression_size'], results_dict
['regression_std_err'])
3255 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
3256 status
= self
._ConfidenceLevelStatus
(results_dict
)
3258 status
= 'Failure, could not reproduce.'
3259 change
= 'Bisect could not reproduce a change.'
3261 print RESULTS_BANNER
% {
3263 'command': self
.opts
.command
,
3266 'confidence': results_dict
['confidence'],
3269 def _PrintWarnings(self
):
3270 """Prints a list of warning strings if there are any."""
3271 if not self
.warnings
:
3275 for w
in set(self
.warnings
):
3279 def _IsPlatformSupported():
3280 """Checks that this platform and build system are supported.
3283 opts: The options parsed from the command line.
3286 True if the platform and build system are supported.
3288 # Haven't tested the script out on any other platforms yet.
3289 supported
= ['posix', 'nt']
3290 return os
.name
in supported
3293 def RmTreeAndMkDir(path_to_dir
, skip_makedir
=False):
3294 """Removes the directory tree specified, and then creates an empty
3295 directory in the same location (if not specified to skip).
3298 path_to_dir: Path to the directory tree.
3299 skip_makedir: Whether to skip creating empty directory, default is False.
3302 True if successful, False if an error occurred.
3305 if os
.path
.exists(path_to_dir
):
3306 shutil
.rmtree(path_to_dir
)
3308 if e
.errno
!= errno
.ENOENT
:
3311 if not skip_makedir
:
3312 return MaybeMakeDirectory(path_to_dir
)
3317 def RemoveBuildFiles(build_type
):
3318 """Removes build files from previous runs."""
3319 if RmTreeAndMkDir(os
.path
.join('out', build_type
)):
3320 if RmTreeAndMkDir(os
.path
.join('build', build_type
)):
3325 class BisectOptions(object):
3326 """Options to be used when running bisection."""
3328 super(BisectOptions
, self
).__init
__()
3330 self
.target_platform
= 'chromium'
3331 self
.build_preference
= None
3332 self
.good_revision
= None
3333 self
.bad_revision
= None
3334 self
.use_goma
= None
3335 self
.goma_dir
= None
3336 self
.cros_board
= None
3337 self
.cros_remote_ip
= None
3338 self
.repeat_test_count
= 20
3339 self
.truncate_percent
= 25
3340 self
.max_time_minutes
= 20
3343 self
.output_buildbot_annotations
= None
3344 self
.no_custom_deps
= False
3345 self
.working_directory
= None
3346 self
.extra_src
= None
3347 self
.debug_ignore_build
= None
3348 self
.debug_ignore_sync
= None
3349 self
.debug_ignore_perf_test
= None
3350 self
.gs_bucket
= None
3351 self
.target_arch
= 'ia32'
3352 self
.target_build_type
= 'Release'
3353 self
.builder_host
= None
3354 self
.builder_port
= None
3355 self
.bisect_mode
= BISECT_MODE_MEAN
3358 def _CreateCommandLineParser():
3359 """Creates a parser with bisect options.
3362 An instance of optparse.OptionParser.
3364 usage
= ('%prog [options] [-- chromium-options]\n'
3365 'Perform binary search on revision history to find a minimal '
3366 'range of revisions where a peformance metric regressed.\n')
3368 parser
= optparse
.OptionParser(usage
=usage
)
3370 group
= optparse
.OptionGroup(parser
, 'Bisect options')
3371 group
.add_option('-c', '--command',
3373 help='A command to execute your performance test at' +
3374 ' each point in the bisection.')
3375 group
.add_option('-b', '--bad_revision',
3377 help='A bad revision to start bisection. ' +
3378 'Must be later than good revision. May be either a git' +
3379 ' or svn revision.')
3380 group
.add_option('-g', '--good_revision',
3382 help='A revision to start bisection where performance' +
3383 ' test is known to pass. Must be earlier than the ' +
3384 'bad revision. May be either a git or svn revision.')
3385 group
.add_option('-m', '--metric',
3387 help='The desired metric to bisect on. For example ' +
3388 '"vm_rss_final_b/vm_rss_f_b"')
3389 group
.add_option('-r', '--repeat_test_count',
3392 help='The number of times to repeat the performance '
3393 'test. Values will be clamped to range [1, 100]. '
3394 'Default value is 20.')
3395 group
.add_option('--max_time_minutes',
3398 help='The maximum time (in minutes) to take running the '
3399 'performance tests. The script will run the performance '
3400 'tests according to --repeat_test_count, so long as it '
3401 'doesn\'t exceed --max_time_minutes. Values will be '
3402 'clamped to range [1, 60].'
3403 'Default value is 20.')
3404 group
.add_option('-t', '--truncate_percent',
3407 help='The highest/lowest % are discarded to form a '
3408 'truncated mean. Values will be clamped to range [0, '
3409 '25]. Default value is 25 (highest/lowest 25% will be '
3411 group
.add_option('--bisect_mode',
3413 choices
=[BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
,
3414 BISECT_MODE_RETURN_CODE
],
3415 default
=BISECT_MODE_MEAN
,
3416 help='The bisect mode. Choices are to bisect on the '
3417 'difference in mean, std_dev, or return_code.')
3418 parser
.add_option_group(group
)
3420 group
= optparse
.OptionGroup(parser
, 'Build options')
3421 group
.add_option('-w', '--working_directory',
3423 help='Path to the working directory where the script '
3424 'will do an initial checkout of the chromium depot. The '
3425 'files will be placed in a subdirectory "bisect" under '
3426 'working_directory and that will be used to perform the '
3427 'bisection. This parameter is optional, if it is not '
3428 'supplied, the script will work from the current depot.')
3429 group
.add_option('--build_preference',
3431 choices
=['msvs', 'ninja', 'make'],
3432 help='The preferred build system to use. On linux/mac '
3433 'the options are make/ninja. On Windows, the options '
3435 group
.add_option('--target_platform',
3437 choices
=['chromium', 'cros', 'android', 'android-chrome'],
3439 help='The target platform. Choices are "chromium" '
3440 '(current platform), "cros", or "android". If you '
3441 'specify something other than "chromium", you must be '
3442 'properly set up to build that platform.')
3443 group
.add_option('--no_custom_deps',
3444 dest
='no_custom_deps',
3445 action
='store_true',
3447 help='Run the script with custom_deps or not.')
3448 group
.add_option('--extra_src',
3450 help='Path to a script which can be used to modify '
3451 'the bisect script\'s behavior.')
3452 group
.add_option('--cros_board',
3454 help='The cros board type to build.')
3455 group
.add_option('--cros_remote_ip',
3457 help='The remote machine to image to.')
3458 group
.add_option('--use_goma',
3459 action
='store_true',
3460 help='Add a bunch of extra threads for goma, and enable '
3462 group
.add_option('--goma_dir',
3463 help='Path to goma tools (or system default if not '
3465 group
.add_option('--output_buildbot_annotations',
3466 action
='store_true',
3467 help='Add extra annotation output for buildbot.')
3468 group
.add_option('--gs_bucket',
3472 help=('Name of Google Storage bucket to upload or '
3473 'download build. e.g., chrome-perf'))
3474 group
.add_option('--target_arch',
3476 choices
=['ia32', 'x64', 'arm'],
3479 help=('The target build architecture. Choices are "ia32" '
3480 '(default), "x64" or "arm".'))
3481 group
.add_option('--target_build_type',
3483 choices
=['Release', 'Debug'],
3485 help='The target build type. Choices are "Release" '
3486 '(default), or "Debug".')
3487 group
.add_option('--builder_host',
3488 dest
='builder_host',
3490 help=('Host address of server to produce build by posting'
3491 ' try job request.'))
3492 group
.add_option('--builder_port',
3493 dest
='builder_port',
3495 help=('HTTP port of the server to produce build by posting'
3496 ' try job request.'))
3497 parser
.add_option_group(group
)
3499 group
= optparse
.OptionGroup(parser
, 'Debug options')
3500 group
.add_option('--debug_ignore_build',
3501 action
='store_true',
3502 help='DEBUG: Don\'t perform builds.')
3503 group
.add_option('--debug_ignore_sync',
3504 action
='store_true',
3505 help='DEBUG: Don\'t perform syncs.')
3506 group
.add_option('--debug_ignore_perf_test',
3507 action
='store_true',
3508 help='DEBUG: Don\'t perform performance tests.')
3509 parser
.add_option_group(group
)
3512 def ParseCommandLine(self
):
3513 """Parses the command line for bisect options."""
3514 parser
= self
._CreateCommandLineParser
()
3515 opts
, _
= parser
.parse_args()
3518 if not opts
.command
:
3519 raise RuntimeError('missing required parameter: --command')
3521 if not opts
.good_revision
:
3522 raise RuntimeError('missing required parameter: --good_revision')
3524 if not opts
.bad_revision
:
3525 raise RuntimeError('missing required parameter: --bad_revision')
3527 if not opts
.metric
and opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
:
3528 raise RuntimeError('missing required parameter: --metric')
3531 if not cloud_storage
.List(opts
.gs_bucket
):
3532 raise RuntimeError('Invalid Google Storage: gs://%s' % opts
.gs_bucket
)
3533 if not opts
.builder_host
:
3534 raise RuntimeError('Must specify try server hostname, when '
3535 'gs_bucket is used: --builder_host')
3536 if not opts
.builder_port
:
3537 raise RuntimeError('Must specify try server port number, when '
3538 'gs_bucket is used: --builder_port')
3539 if opts
.target_platform
== 'cros':
3540 # Run sudo up front to make sure credentials are cached for later.
3541 print 'Sudo is required to build cros:'
3543 bisect_utils
.RunProcess(['sudo', 'true'])
3545 if not opts
.cros_board
:
3546 raise RuntimeError('missing required parameter: --cros_board')
3548 if not opts
.cros_remote_ip
:
3549 raise RuntimeError('missing required parameter: --cros_remote_ip')
3551 if not opts
.working_directory
:
3552 raise RuntimeError('missing required parameter: --working_directory')
3554 metric_values
= opts
.metric
.split('/')
3555 if (len(metric_values
) != 2 and
3556 opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
):
3557 raise RuntimeError('Invalid metric specified: [%s]' % opts
.metric
)
3559 opts
.metric
= metric_values
3560 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3561 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3562 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3563 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3565 for k
, v
in opts
.__dict
__.iteritems():
3566 assert hasattr(self
, k
), 'Invalid %s attribute in BisectOptions.' % k
3568 except RuntimeError, e
:
3569 output_string
= StringIO
.StringIO()
3570 parser
.print_help(file=output_string
)
3571 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
3572 output_string
.close()
3573 raise RuntimeError(error_message
)
3576 def FromDict(values
):
3577 """Creates an instance of BisectOptions with the values parsed from a
3581 values: a dict containing options to set.
3584 An instance of BisectOptions.
3586 opts
= BisectOptions()
3587 for k
, v
in values
.iteritems():
3588 assert hasattr(opts
, k
), 'Invalid %s attribute in BisectOptions.' % k
3591 metric_values
= opts
.metric
.split('/')
3592 if len(metric_values
) != 2:
3593 raise RuntimeError('Invalid metric specified: [%s]' % opts
.metric
)
3595 opts
.metric
= metric_values
3596 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3597 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3598 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3599 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3607 opts
= BisectOptions()
3608 opts
.ParseCommandLine()
3611 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
3613 raise RuntimeError('Invalid or missing --extra_src.')
3614 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
3616 if opts
.working_directory
:
3617 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
3618 if opts
.no_custom_deps
:
3620 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
3622 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
3624 if not RemoveBuildFiles(opts
.target_build_type
):
3625 raise RuntimeError('Something went wrong removing the build files.')
3627 if not _IsPlatformSupported():
3628 raise RuntimeError('Sorry, this platform isn\'t supported yet.')
3630 # Check what source control method is being used, and create a
3631 # SourceControl object if possible.
3632 source_control
= source_control_module
.DetermineAndCreateSourceControl(opts
)
3634 if not source_control
:
3636 'Sorry, only the git workflow is supported at the moment.')
3638 # gClient sync seems to fail if you're not in master branch.
3639 if (not source_control
.IsInProperBranch() and
3640 not opts
.debug_ignore_sync
and
3641 not opts
.working_directory
):
3642 raise RuntimeError('You must switch to master branch to run bisection.')
3643 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
3645 bisect_results
= bisect_test
.Run(opts
.command
,
3649 if bisect_results
['error']:
3650 raise RuntimeError(bisect_results
['error'])
3651 bisect_test
.FormatAndPrintResults(bisect_results
)
3654 bisect_test
.PerformCleanup()
3655 except RuntimeError, e
:
3656 if opts
.output_buildbot_annotations
:
3657 # The perf dashboard scrapes the "results" step in order to comment on
3658 # bugs. If you change this, please update the perf dashboard as well.
3659 bisect_utils
.OutputAnnotationStepStart('Results')
3660 print 'Error: %s' % e
.message
3661 if opts
.output_buildbot_annotations
:
3662 bisect_utils
.OutputAnnotationStepClosed()
3666 if __name__
== '__main__':