2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
16 Example usage using SVN revisions:
18 ./tools/bisect-perf-regression.py -c\
19 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
20 -g 168222 -b 168232 -m shutdown/simple-user-quit
22 Be aware that if you're using the git workflow and specify an SVN revision,
23 the script will attempt to find the git SHA1 where SVN changes up to that
24 revision were merged in.
26 Example usage using git hashes:
28 ./tools/bisect-perf-regression.py -c\
29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
32 -m shutdown/simple-user-quit
50 sys
.path
.append(os
.path
.join(os
.path
.dirname(__file__
), 'telemetry'))
52 from auto_bisect
import bisect_utils
53 from auto_bisect
import builder
54 from auto_bisect
import math_utils
55 from auto_bisect
import request_build
56 from auto_bisect
import source_control
as source_control_module
57 from auto_bisect
import ttest
58 from telemetry
.util
import cloud_storage
60 # Below is the map of "depot" names to information about each depot. Each depot
61 # is a repository, and in the process of bisecting, revision ranges in these
62 # repositories may also be bisected.
64 # Each depot information dictionary may contain:
65 # src: Path to the working directory.
66 # recurse: True if this repository will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn. If the repository has any dependent repositories
69 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then
70 # they are specified here.
71 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to
73 # from: Parent depot that must be bisected before this is bisected.
74 # deps_var: Key name in vars variable in DEPS file that has revision
81 'from': ['cros', 'android-chrome'],
83 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
84 'deps_var': 'chromium_rev'
87 'src': 'src/third_party/WebKit',
92 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
93 'deps_var': 'webkit_revision'
96 'src': 'src/third_party/angle',
97 'src_old': 'src/third_party/angle_dx11',
100 'from': ['chromium'],
102 'deps_var': 'angle_revision'
108 'from': ['chromium'],
109 'custom_deps': bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
110 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
111 'deps_var': 'v8_revision'
113 'v8_bleeding_edge': {
114 'src': 'src/v8_bleeding_edge',
117 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
119 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
120 'deps_var': 'v8_revision'
123 'src': 'src/third_party/skia/src',
125 'svn': 'http://skia.googlecode.com/svn/trunk/src',
126 'depends': ['skia/include', 'skia/gyp'],
127 'from': ['chromium'],
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
129 'deps_var': 'skia_revision'
132 'src': 'src/third_party/skia/include',
134 'svn': 'http://skia.googlecode.com/svn/trunk/include',
136 'from': ['chromium'],
137 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141 'src': 'src/third_party/skia/gyp',
143 'svn': 'http://skia.googlecode.com/svn/trunk/gyp',
145 'from': ['chromium'],
146 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
151 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
153 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
155 # Possible return values from BisectPerformanceMetrics.RunTest.
156 BUILD_RESULT_SUCCEED
= 0
157 BUILD_RESULT_FAIL
= 1
158 BUILD_RESULT_SKIPPED
= 2
160 # Maximum time in seconds to wait after posting build request to the try server.
161 # TODO: Change these values based on the actual time taken by buildbots on
163 MAX_MAC_BUILD_TIME
= 14400
164 MAX_WIN_BUILD_TIME
= 14400
165 MAX_LINUX_BUILD_TIME
= 14400
167 # The confidence percentage at which confidence can be consider "high".
170 # Patch template to add a new file, DEPS.sha under src folder.
171 # This file contains SHA1 value of the DEPS changes made while bisecting
172 # dependency repositories. This patch send along with DEPS patch to try server.
173 # When a build requested is posted with a patch, bisect builders on try server,
174 # once build is produced, it reads SHA value from this file and appends it
175 # to build archive filename.
176 DEPS_SHA_PATCH
= """diff --git src/DEPS.sha src/DEPS.sha
184 # The possible values of the --bisect_mode flag, which determines what to
185 # use when classifying a revision as "good" or "bad".
186 BISECT_MODE_MEAN
= 'mean'
187 BISECT_MODE_STD_DEV
= 'std_dev'
188 BISECT_MODE_RETURN_CODE
= 'return_code'
190 # The perf dashboard looks for a string like "Estimated Confidence: 95%"
191 # to decide whether or not to cc the author(s). If you change this, please
192 # update the perf dashboard as well.
194 ===== BISECT JOB RESULTS =====
197 Test Command: %(command)s
198 Test Metric: %(metrics)s
199 Relative Change: %(change)s
200 Estimated Confidence: %(confidence).02f%%"""
202 # The perf dashboard specifically looks for the string
203 # "Author : " to parse out who to cc on a bug. If you change the
204 # formatting here, please update the perf dashboard as well.
205 RESULTS_REVISION_INFO
= """
206 ===== SUSPECTED CL(s) =====
207 Subject : %(subject)s
208 Author : %(author)s%(email_info)s%(commit_info)s
210 Date : %(cl_date)s"""
212 REPRO_STEPS_LOCAL
= """
213 ==== INSTRUCTIONS TO REPRODUCE ====
217 REPRO_STEPS_TRYJOB
= """
218 To reproduce on a performance try bot:
219 1. Create new git branch or check out existing branch.
220 2. Edit tools/run-perf-test.cfg (instructions in file) or \
221 third_party/WebKit/Tools/run-perf-test.cfg.
222 a) Take care to strip any src/ directories from the head of \
224 b) On desktop, only --browser=release is supported, on android \
225 --browser=android-chromium-testshell.
226 c) Test command to use: %(command)s
227 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
228 committed locally to run-perf-test.cfg.
229 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
230 $ git cl upload --bypass-hooks
231 4. Send your try job to the try server. \
232 [Please make sure to use appropriate bot to reproduce]
233 $ git cl try -m tryserver.chromium.perf -b <bot>
235 For more details please visit
236 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots"""
238 REPRO_STEPS_TRYJOB_TELEMETRY
= """
239 To reproduce on a performance try bot:
241 (Where <bot-name> comes from tools/perf/run_benchmark --browser=list)
243 For more details please visit
244 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots
247 RESULTS_THANKYOU
= """
248 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
249 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
251 Contact chrome-perf-dashboard-team with any questions or suggestions about
256 . | ---------'-------'-----------.
257 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
258 . \______________.-------._______________)
265 def _AddAdditionalDepotInfo(depot_info
):
266 """Adds additional depot info to the global depot variables."""
267 global DEPOT_DEPS_NAME
269 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() + depot_info
.items())
270 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
273 def ConfidenceScore(good_results_lists
, bad_results_lists
):
274 """Calculates a confidence score.
276 This score is a percentage which represents our degree of confidence in the
277 proposition that the good results and bad results are distinct groups, and
278 their differences aren't due to chance alone.
282 good_results_lists: A list of lists of "good" result numbers.
283 bad_results_lists: A list of lists of "bad" result numbers.
286 A number in the range [0, 100].
288 # If there's only one item in either list, this means only one revision was
289 # classified good or bad; this isn't good enough evidence to make a decision.
290 # If an empty list was passed, that also implies zero confidence.
291 if len(good_results_lists
) <= 1 or len(bad_results_lists
) <= 1:
294 # Flatten the lists of results lists.
295 sample1
= sum(good_results_lists
, [])
296 sample2
= sum(bad_results_lists
, [])
298 # If there were only empty lists in either of the lists (this is unexpected
299 # and normally shouldn't happen), then we also want to return 0.
300 if not sample1
or not sample2
:
303 # The p-value is approximately the probability of obtaining the given set
304 # of good and bad values just by chance.
305 _
, _
, p_value
= ttest
.WelchsTTest(sample1
, sample2
)
306 return 100.0 * (1.0 - p_value
)
309 def GetSHA1HexDigest(contents
):
310 """Returns SHA1 hex digest of the given string."""
311 return hashlib
.sha1(contents
).hexdigest()
314 def GetZipFileName(build_revision
=None, target_arch
='ia32', patch_sha
=None):
315 """Gets the archive file name for the given revision."""
317 """Return a string to be used in paths for the platform."""
318 if bisect_utils
.IsWindowsHost():
319 # Build archive for x64 is still stored with the "win32" suffix.
320 # See chromium_utils.PlatformName().
321 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
324 if bisect_utils
.IsLinuxHost():
325 # Android builds are also archived with the "full-build-linux prefix.
327 if bisect_utils
.IsMacHost():
329 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
331 base_name
= 'full-build-%s' % PlatformName()
332 if not build_revision
:
335 build_revision
= '%s_%s' % (build_revision
, patch_sha
)
336 return '%s_%s.zip' % (base_name
, build_revision
)
339 def GetRemoteBuildPath(build_revision
, target_platform
='chromium',
340 target_arch
='ia32', patch_sha
=None):
341 """Returns the URL to download the build from."""
342 def GetGSRootFolderName(target_platform
):
343 """Returns the Google Cloud Storage root folder name."""
344 if bisect_utils
.IsWindowsHost():
345 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
346 return 'Win x64 Builder'
348 if bisect_utils
.IsLinuxHost():
349 if target_platform
== 'android':
350 return 'android_perf_rel'
351 return 'Linux Builder'
352 if bisect_utils
.IsMacHost():
354 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
356 base_filename
= GetZipFileName(
357 build_revision
, target_arch
, patch_sha
)
358 builder_folder
= GetGSRootFolderName(target_platform
)
359 return '%s/%s' % (builder_folder
, base_filename
)
362 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
363 """Fetches file(s) from the Google Cloud Storage.
366 bucket_name: Google Storage bucket name.
367 source_path: Source file path.
368 destination_path: Destination file path.
371 Downloaded file path if exists, otherwise None.
373 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
375 if cloud_storage
.Exists(bucket_name
, source_path
):
376 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
377 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
378 if os
.path
.exists(target_file
):
381 print ('File gs://%s/%s not found in cloud storage.' % (
382 bucket_name
, source_path
))
383 except Exception as e
:
384 print 'Something went wrong while fetching file from cloud: %s' % e
385 if os
.path
.exists(target_file
):
386 os
.remove(target_file
)
390 # This is copied from build/scripts/common/chromium_utils.py.
391 def MaybeMakeDirectory(*path
):
392 """Creates an entire path, if it doesn't already exist."""
393 file_path
= os
.path
.join(*path
)
395 os
.makedirs(file_path
)
397 if e
.errno
!= errno
.EEXIST
:
402 # This was copied from build/scripts/common/chromium_utils.py.
403 def ExtractZip(filename
, output_dir
, verbose
=True):
404 """ Extract the zip archive in the output directory."""
405 MaybeMakeDirectory(output_dir
)
407 # On Linux and Mac, we use the unzip command as it will
408 # handle links and file bits (executable), which is much
409 # easier then trying to do that with ZipInfo options.
411 # The Mac Version of unzip unfortunately does not support Zip64, whereas
412 # the python module does, so we have to fall back to the python zip module
413 # on Mac if the file size is greater than 4GB.
415 # On Windows, try to use 7z if it is installed, otherwise fall back to python
416 # zip module and pray we don't have files larger than 512MB to unzip.
418 if ((bisect_utils
.IsMacHost()
419 and os
.path
.getsize(filename
) < 4 * 1024 * 1024 * 1024)
420 or bisect_utils
.IsLinuxHost()):
421 unzip_cmd
= ['unzip', '-o']
422 elif (bisect_utils
.IsWindowsHost()
423 and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe')):
424 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
427 # Make sure path is absolute before changing directories.
428 filepath
= os
.path
.abspath(filename
)
429 saved_dir
= os
.getcwd()
431 command
= unzip_cmd
+ [filepath
]
432 result
= bisect_utils
.RunProcess(command
)
435 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
437 assert bisect_utils
.IsWindowsHost() or bisect_utils
.IsMacHost()
438 zf
= zipfile
.ZipFile(filename
)
439 for name
in zf
.namelist():
441 print 'Extracting %s' % name
442 zf
.extract(name
, output_dir
)
443 if bisect_utils
.IsMacHost():
444 # Restore permission bits.
445 os
.chmod(os
.path
.join(output_dir
, name
),
446 zf
.getinfo(name
).external_attr
>> 16L)
449 def WriteStringToFile(text
, file_name
):
450 """Writes text to a file, raising an RuntimeError on failure."""
452 with
open(file_name
, 'wb') as f
:
455 raise RuntimeError('Error writing to file [%s]' % file_name
)
458 def ReadStringFromFile(file_name
):
459 """Writes text to a file, raising an RuntimeError on failure."""
461 with
open(file_name
) as f
:
464 raise RuntimeError('Error reading file [%s]' % file_name
)
467 def ChangeBackslashToSlashInPatch(diff_text
):
468 """Formats file paths in the given patch text to Unix-style paths."""
471 diff_lines
= diff_text
.split('\n')
472 for i
in range(len(diff_lines
)):
474 if line
.startswith('--- ') or line
.startswith('+++ '):
475 diff_lines
[i
] = line
.replace('\\', '/')
476 return '\n'.join(diff_lines
)
479 def _ParseRevisionsFromDEPSFileManually(deps_file_contents
):
480 """Parses the vars section of the DEPS file using regular expressions.
483 deps_file_contents: The DEPS file contents as a string.
486 A dictionary in the format {depot: revision} if successful, otherwise None.
488 # We'll parse the "vars" section of the DEPS file.
489 rxp
= re
.compile('vars = {(?P<vars_body>[^}]+)', re
.MULTILINE
)
490 re_results
= rxp
.search(deps_file_contents
)
495 # We should be left with a series of entries in the vars component of
496 # the DEPS file with the following format:
497 # 'depot_name': 'revision',
498 vars_body
= re_results
.group('vars_body')
499 rxp
= re
.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
501 re_results
= rxp
.findall(vars_body
)
503 return dict(re_results
)
506 def _WaitUntilBuildIsReady(
507 fetch_build
, bot_name
, builder_host
, builder_port
, build_request_id
,
509 """Waits until build is produced by bisect builder on try server.
512 fetch_build: Function to check and download build from cloud storage.
513 bot_name: Builder bot name on try server.
514 builder_host Try server host name.
515 builder_port: Try server port.
516 build_request_id: A unique ID of the build request posted to try server.
517 max_timeout: Maximum time to wait for the build.
520 Downloaded archive file path if exists, otherwise None.
522 # Build number on the try server.
524 # Interval to check build on cloud storage.
526 # Interval to check build status on try server in seconds.
527 status_check_interval
= 600
528 last_status_check
= time
.time()
529 start_time
= time
.time()
531 # Checks for build on gs://chrome-perf and download if exists.
534 return (res
, 'Build successfully found')
535 elapsed_status_check
= time
.time() - last_status_check
536 # To avoid overloading try server with status check requests, we check
537 # build status for every 10 minutes.
538 if elapsed_status_check
> status_check_interval
:
539 last_status_check
= time
.time()
541 # Get the build number on try server for the current build.
542 build_num
= request_build
.GetBuildNumFromBuilder(
543 build_request_id
, bot_name
, builder_host
, builder_port
)
544 # Check the status of build using the build number.
545 # Note: Build is treated as PENDING if build number is not found
546 # on the the try server.
547 build_status
, status_link
= request_build
.GetBuildStatus(
548 build_num
, bot_name
, builder_host
, builder_port
)
549 if build_status
== request_build
.FAILED
:
550 return (None, 'Failed to produce build, log: %s' % status_link
)
551 elapsed_time
= time
.time() - start_time
552 if elapsed_time
> max_timeout
:
553 return (None, 'Timed out: %ss without build' % max_timeout
)
555 print 'Time elapsed: %ss without build.' % elapsed_time
556 time
.sleep(poll_interval
)
557 # For some reason, mac bisect bots were not flushing stdout periodically.
558 # As a result buildbot command is timed-out. Flush stdout on all platforms
559 # while waiting for build.
563 def _UpdateV8Branch(deps_content
):
564 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
566 Check for "v8_branch" in DEPS file if exists update its value
567 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
568 variable from DEPS revision 254916, therefore check for "src/v8":
569 <v8 source path> in DEPS in order to support prior DEPS revisions
573 deps_content: DEPS file contents to be modified.
576 Modified DEPS file contents as a string.
578 new_branch
= r
'branches/bleeding_edge'
579 v8_branch_pattern
= re
.compile(r
'(?<="v8_branch": ")(.*)(?=")')
580 if re
.search(v8_branch_pattern
, deps_content
):
581 deps_content
= re
.sub(v8_branch_pattern
, new_branch
, deps_content
)
583 # Replaces the branch assigned to "src/v8" key in DEPS file.
584 # Format of "src/v8" in DEPS:
586 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
587 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
588 v8_src_pattern
= re
.compile(
589 r
'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re
.MULTILINE
)
590 if re
.search(v8_src_pattern
, deps_content
):
591 deps_content
= re
.sub(v8_src_pattern
, new_branch
, deps_content
)
595 def _UpdateDEPSForAngle(revision
, depot
, deps_file
):
596 """Updates DEPS file with new revision for Angle repository.
598 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
599 variable contains "angle_revision" key that holds git hash instead of
602 And sometimes "angle_revision" key is not specified in "vars" variable,
603 in such cases check "deps" dictionary variable that matches
604 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
606 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
608 deps_contents
= ReadStringFromFile(deps_file
)
609 # Check whether the depot and revision pattern in DEPS file vars variable
610 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
611 angle_rev_pattern
= re
.compile(r
'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
612 deps_var
, re
.MULTILINE
)
613 match
= re
.search(angle_rev_pattern
% deps_var
, deps_contents
)
615 # Update the revision information for the given depot
616 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
618 # Check whether the depot and revision pattern in DEPS file deps
620 # "src/third_party/angle": Var("chromium_git") +
621 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
622 angle_rev_pattern
= re
.compile(
623 r
'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re
.MULTILINE
)
624 match
= re
.search(angle_rev_pattern
, deps_contents
)
626 print 'Could not find angle revision information in DEPS file.'
628 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
629 # Write changes to DEPS file
630 WriteStringToFile(new_data
, deps_file
)
633 print 'Something went wrong while updating DEPS file, %s' % e
637 def _TryParseHistogramValuesFromOutput(metric
, text
):
638 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
641 metric: The metric as a list of [<trace>, <value>] strings.
642 text: The text to parse the metric values from.
645 A list of floating point numbers found, [] if none were found.
647 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
649 text_lines
= text
.split('\n')
652 for current_line
in text_lines
:
653 if metric_formatted
in current_line
:
654 current_line
= current_line
[len(metric_formatted
):]
657 histogram_values
= eval(current_line
)
659 for b
in histogram_values
['buckets']:
660 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
661 # Extends the list with N-elements with the average for that bucket.
662 values_list
.extend([average_for_bucket
] * b
['count'])
669 def _TryParseResultValuesFromOutput(metric
, text
):
670 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
673 metric: The metric as a list of [<trace>, <value>] string pairs.
674 text: The text to parse the metric values from.
677 A list of floating point numbers found.
679 # Format is: RESULT <graph>: <trace>= <value> <units>
680 metric_re
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
682 # The log will be parsed looking for format:
683 # <*>RESULT <graph_name>: <trace_name>= <value>
684 single_result_re
= re
.compile(
685 metric_re
+ '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
687 # The log will be parsed looking for format:
688 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
689 multi_results_re
= re
.compile(
690 metric_re
+ '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
692 # The log will be parsed looking for format:
693 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
694 mean_stddev_re
= re
.compile(
696 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
698 text_lines
= text
.split('\n')
700 for current_line
in text_lines
:
701 # Parse the output from the performance test for the metric we're
703 single_result_match
= single_result_re
.search(current_line
)
704 multi_results_match
= multi_results_re
.search(current_line
)
705 mean_stddev_match
= mean_stddev_re
.search(current_line
)
706 if (not single_result_match
is None and
707 single_result_match
.group('VALUE')):
708 values_list
+= [single_result_match
.group('VALUE')]
709 elif (not multi_results_match
is None and
710 multi_results_match
.group('VALUES')):
711 metric_values
= multi_results_match
.group('VALUES')
712 values_list
+= metric_values
.split(',')
713 elif (not mean_stddev_match
is None and
714 mean_stddev_match
.group('MEAN')):
715 values_list
+= [mean_stddev_match
.group('MEAN')]
717 values_list
= [float(v
) for v
in values_list
718 if bisect_utils
.IsStringFloat(v
)]
720 # If the metric is times/t, we need to sum the timings in order to get
721 # similar regression results as the try-bots.
724 ['times', 'page_load_time'],
725 ['cold_times', 'page_load_time'],
726 ['warm_times', 'page_load_time'],
729 if metric
in metrics_to_sum
:
731 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
736 def _ParseMetricValuesFromOutput(metric
, text
):
737 """Parses output from performance_ui_tests and retrieves the results for
741 metric: The metric as a list of [<trace>, <value>] strings.
742 text: The text to parse the metric values from.
745 A list of floating point numbers found.
747 metric_values
= _TryParseResultValuesFromOutput(metric
, text
)
749 if not metric_values
:
750 metric_values
= _TryParseHistogramValuesFromOutput(metric
, text
)
755 def _GenerateProfileIfNecessary(command_args
):
756 """Checks the command line of the performance test for dependencies on
757 profile generation, and runs tools/perf/generate_profile as necessary.
760 command_args: Command line being passed to performance test, as a list.
763 False if profile generation was necessary and failed, otherwise True.
765 if '--profile-dir' in ' '.join(command_args
):
766 # If we were using python 2.7+, we could just use the argparse
767 # module's parse_known_args to grab --profile-dir. Since some of the
768 # bots still run 2.6, have to grab the arguments manually.
770 args_to_parse
= ['--profile-dir', '--browser']
772 for arg_to_parse
in args_to_parse
:
773 for i
, current_arg
in enumerate(command_args
):
774 if arg_to_parse
in current_arg
:
775 current_arg_split
= current_arg
.split('=')
777 # Check 2 cases, --arg=<val> and --arg <val>
778 if len(current_arg_split
) == 2:
779 arg_dict
[arg_to_parse
] = current_arg_split
[1]
780 elif i
+ 1 < len(command_args
):
781 arg_dict
[arg_to_parse
] = command_args
[i
+1]
783 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
785 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
786 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
787 return not bisect_utils
.RunProcess(['python', path_to_generate
,
788 '--profile-type-to-generate', profile_type
,
789 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
794 def _AddRevisionsIntoRevisionData(revisions
, depot
, sort
, revision_data
):
795 """Adds new revisions to the revision_data dictionary and initializes them.
798 revisions: List of revisions to add.
799 depot: Depot that's currently in use (src, webkit, etc...)
800 sort: Sorting key for displaying revisions.
801 revision_data: A dictionary to add the new revisions into.
802 Existing revisions will have their sort keys adjusted.
804 num_depot_revisions
= len(revisions
)
806 for _
, v
in revision_data
.iteritems():
808 v
['sort'] += num_depot_revisions
810 for i
in xrange(num_depot_revisions
):
819 'sort' : i
+ sort
+ 1,
823 def _PrintThankYou():
824 print RESULTS_THANKYOU
827 def _PrintTableRow(column_widths
, row_data
):
828 """Prints out a row in a formatted table that has columns aligned.
831 column_widths: A list of column width numbers.
832 row_data: A list of items for each column in this row.
834 assert len(column_widths
) == len(row_data
)
836 for i
in xrange(len(column_widths
)):
837 current_row_data
= row_data
[i
].center(column_widths
[i
], ' ')
838 text
+= ('%%%ds' % column_widths
[i
]) % current_row_data
842 def _PrintStepTime(revision_data_sorted
):
843 """Prints information about how long various steps took.
846 revision_data_sorted: The sorted list of revision data dictionaries."""
847 step_perf_time_avg
= 0.0
848 step_build_time_avg
= 0.0
850 for _
, current_data
in revision_data_sorted
:
851 if current_data
['value']:
852 step_perf_time_avg
+= current_data
['perf_time']
853 step_build_time_avg
+= current_data
['build_time']
856 step_perf_time_avg
= step_perf_time_avg
/ step_count
857 step_build_time_avg
= step_build_time_avg
/ step_count
859 print 'Average build time : %s' % datetime
.timedelta(
860 seconds
=int(step_build_time_avg
))
861 print 'Average test time : %s' % datetime
.timedelta(
862 seconds
=int(step_perf_time_avg
))
865 def _FindOtherRegressions(revision_data_sorted
, bad_greater_than_good
):
866 """Compiles a list of other possible regressions from the revision data.
869 revision_data_sorted: Sorted list of (revision, revision data) pairs.
870 bad_greater_than_good: Whether the result value at the "bad" revision is
871 numerically greater than the result value at the "good" revision.
874 A list of [current_rev, previous_rev, confidence] for other places where
875 there may have been a regression.
877 other_regressions
= []
880 for current_id
, current_data
in revision_data_sorted
:
881 current_values
= current_data
['value']
883 current_values
= current_values
['values']
885 confidence
= ConfidenceScore(previous_values
, [current_values
])
886 mean_of_prev_runs
= math_utils
.Mean(sum(previous_values
, []))
887 mean_of_current_runs
= math_utils
.Mean(current_values
)
889 # Check that the potential regression is in the same direction as
890 # the overall regression. If the mean of the previous runs < the
891 # mean of the current runs, this local regression is in same
893 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
894 is_same_direction
= (prev_less_than_current
if
895 bad_greater_than_good
else not prev_less_than_current
)
897 # Only report potential regressions with high confidence.
898 if is_same_direction
and confidence
> 50:
899 other_regressions
.append([current_id
, previous_id
, confidence
])
900 previous_values
.append(current_values
)
901 previous_id
= current_id
902 return other_regressions
905 class BisectPerformanceMetrics(object):
906 """This class contains functionality to perform a bisection of a range of
907 revisions to narrow down where performance regressions may have occurred.
909 The main entry-point is the Run method.
912 def __init__(self
, source_control
, opts
):
913 super(BisectPerformanceMetrics
, self
).__init
__()
916 self
.source_control
= source_control
917 self
.src_cwd
= os
.getcwd()
918 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
920 self
.cleanup_commands
= []
922 self
.builder
= builder
.Builder
.FromOpts(opts
)
924 for d
in DEPOT_NAMES
:
925 # The working directory of each depot is just the path to the depot, but
926 # since we're already in 'src', we can skip that part.
928 self
.depot_cwd
[d
] = os
.path
.join(
929 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
931 def PerformCleanup(self
):
932 """Performs cleanup when script is finished."""
933 os
.chdir(self
.src_cwd
)
934 for c
in self
.cleanup_commands
:
936 shutil
.move(c
[1], c
[2])
938 assert False, 'Invalid cleanup command.'
940 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
941 """Retrieves a list of all the commits between the bad revision and
942 last known good revision."""
944 revision_work_list
= []
947 revision_range_start
= good_revision
948 revision_range_end
= bad_revision
951 self
.ChangeToDepotWorkingDirectory('cros')
953 # Print the commit timestamps for every commit in the revision time
954 # range. We'll sort them and bisect by that. There is a remote chance that
955 # 2 (or more) commits will share the exact same timestamp, but it's
956 # probably safe to ignore that case.
957 cmd
= ['repo', 'forall', '-c',
958 'git log --format=%%ct --before=%d --after=%d' % (
959 revision_range_end
, revision_range_start
)]
960 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
962 assert not return_code
, ('An error occurred while running '
963 '"%s"' % ' '.join(cmd
))
967 revision_work_list
= list(set(
968 [int(o
) for o
in output
.split('\n') if bisect_utils
.IsStringInt(o
)]))
969 revision_work_list
= sorted(revision_work_list
, reverse
=True)
971 cwd
= self
._GetDepotDirectory
(depot
)
972 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
973 good_revision
, cwd
=cwd
)
975 return revision_work_list
977 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
978 commit_position
= self
.source_control
.GetCommitPosition(revision
)
980 if bisect_utils
.IsStringInt(commit_position
):
981 # V8 is tricky to bisect, in that there are only a few instances when
982 # we can dive into bleeding_edge and get back a meaningful result.
983 # Try to detect a V8 "business as usual" case, which is when:
984 # 1. trunk revision N has description "Version X.Y.Z"
985 # 2. bleeding_edge revision (N-1) has description "Prepare push to
986 # trunk. Now working on X.Y.(Z+1)."
988 # As of 01/24/2014, V8 trunk descriptions are formatted:
989 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
990 # So we can just try parsing that out first and fall back to the old way.
991 v8_dir
= self
._GetDepotDirectory
('v8')
992 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
994 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
997 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
999 regex_results
= version_re
.search(revision_info
['subject'])
1004 # Look for "based on bleeding_edge" and parse out revision
1005 if 'based on bleeding_edge' in revision_info
['subject']:
1007 bleeding_edge_revision
= revision_info
['subject'].split(
1008 'bleeding_edge revision r')[1]
1009 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1010 git_revision
= self
.source_control
.ResolveToRevision(
1011 bleeding_edge_revision
, 'v8_bleeding_edge', DEPOT_DEPS_NAME
, 1,
1012 cwd
=v8_bleeding_edge_dir
)
1014 except (IndexError, ValueError):
1017 if not git_revision
:
1018 # Wasn't successful, try the old way of looking for "Prepare push to"
1019 git_revision
= self
.source_control
.ResolveToRevision(
1020 int(commit_position
) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME
, -1,
1021 cwd
=v8_bleeding_edge_dir
)
1024 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1025 cwd
=v8_bleeding_edge_dir
)
1027 if 'Prepare push to trunk' in revision_info
['subject']:
1031 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1032 cwd
= self
._GetDepotDirectory
('v8')
1033 cmd
= ['log', '--format=%ct', '-1', revision
]
1034 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1035 commit_time
= int(output
)
1039 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1041 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1042 output
= output
.split()
1044 commits
= reversed(commits
)
1046 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1048 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1049 output
= output
.split()
1052 bleeding_edge_revision
= None
1055 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1056 if bleeding_edge_revision
:
1059 return bleeding_edge_revision
1061 def _ParseRevisionsFromDEPSFile(self
, depot
):
1062 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1063 be needed if the bisect recurses into those depots later.
1066 depot: Name of depot being bisected.
1069 A dict in the format {depot:revision} if successful, otherwise None.
1073 'Var': lambda _
: deps_data
["vars"][_
],
1074 'From': lambda *args
: None,
1077 deps_file
= bisect_utils
.FILE_DEPS_GIT
1078 if not os
.path
.exists(deps_file
):
1079 deps_file
= bisect_utils
.FILE_DEPS
1080 execfile(deps_file
, {}, deps_data
)
1081 deps_data
= deps_data
['deps']
1083 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1085 for depot_name
, depot_data
in DEPOT_DEPS_NAME
.iteritems():
1086 if (depot_data
.get('platform') and
1087 depot_data
.get('platform') != os
.name
):
1090 if (depot_data
.get('recurse') and depot
in depot_data
.get('from')):
1091 depot_data_src
= depot_data
.get('src') or depot_data
.get('src_old')
1092 src_dir
= deps_data
.get(depot_data_src
)
1094 self
.depot_cwd
[depot_name
] = os
.path
.join(self
.src_cwd
,
1096 re_results
= rxp
.search(src_dir
)
1098 results
[depot_name
] = re_results
.group('revision')
1100 warning_text
= ('Could not parse revision for %s while bisecting '
1101 '%s' % (depot_name
, depot
))
1102 if not warning_text
in self
.warnings
:
1103 self
.warnings
.append(warning_text
)
1105 results
[depot_name
] = None
1108 deps_file_contents
= ReadStringFromFile(deps_file
)
1109 parse_results
= _ParseRevisionsFromDEPSFileManually(deps_file_contents
)
1111 for depot_name
, depot_revision
in parse_results
.iteritems():
1112 depot_revision
= depot_revision
.strip('@')
1113 print depot_name
, depot_revision
1114 for current_name
, current_data
in DEPOT_DEPS_NAME
.iteritems():
1115 if (current_data
.has_key('deps_var') and
1116 current_data
['deps_var'] == depot_name
):
1117 src_name
= current_name
1118 results
[src_name
] = depot_revision
1122 def _Get3rdPartyRevisions(self
, depot
):
1123 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1126 depot: A depot name. Should be in the DEPOT_NAMES list.
1129 A dict in the format {depot: revision} if successful, otherwise None.
1132 self
.ChangeToDepotWorkingDirectory(depot
)
1136 if depot
== 'chromium' or depot
== 'android-chrome':
1137 results
= self
._ParseRevisionsFromDEPSFile
(depot
)
1142 bisect_utils
.CROS_SDK_PATH
,
1144 'portageq-%s' % self
.opts
.cros_board
,
1146 '/build/%s' % self
.opts
.cros_board
,
1148 CROS_CHROMEOS_PATTERN
1150 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
1152 assert not return_code
, ('An error occurred while running '
1153 '"%s"' % ' '.join(cmd
))
1155 if len(output
) > CROS_CHROMEOS_PATTERN
:
1156 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1159 output
= output
.split('_')[0]
1162 contents
= output
.split('.')
1164 version
= contents
[2]
1166 if contents
[3] != '0':
1167 warningText
= ('Chrome version: %s.%s but using %s.0 to bisect.' %
1168 (version
, contents
[3], version
))
1169 if not warningText
in self
.warnings
:
1170 self
.warnings
.append(warningText
)
1173 self
.ChangeToDepotWorkingDirectory('chromium')
1174 cmd
= ['log', '-1', '--format=%H',
1175 '--author=chrome-release@google.com',
1176 '--grep=to %s' % version
, 'origin/master']
1177 return_code
= bisect_utils
.CheckRunGit(cmd
)
1180 results
['chromium'] = output
.strip()
1183 # We can't try to map the trunk revision to bleeding edge yet, because
1184 # we don't know which direction to try to search in. Have to wait until
1185 # the bisect has narrowed the results down to 2 v8 rolls.
1186 results
['v8_bleeding_edge'] = None
1190 def BackupOrRestoreOutputDirectory(self
, restore
=False, build_type
='Release'):
1191 """Backs up or restores build output directory based on restore argument.
1194 restore: Indicates whether to restore or backup. Default is False(Backup)
1195 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1198 Path to backup or restored location as string. otherwise None if it fails.
1200 build_dir
= os
.path
.abspath(
1201 builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1202 source_dir
= os
.path
.join(build_dir
, build_type
)
1203 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1205 source_dir
, destination_dir
= destination_dir
, source_dir
1206 if os
.path
.exists(source_dir
):
1207 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1208 shutil
.move(source_dir
, destination_dir
)
1209 return destination_dir
1212 def GetBuildArchiveForRevision(self
, revision
, gs_bucket
, target_arch
,
1213 patch_sha
, out_dir
):
1214 """Checks and downloads build archive for a given revision.
1216 Checks for build archive with Git hash or SVN revision. If either of the
1217 file exists, then downloads the archive file.
1220 revision: A Git hash revision.
1221 gs_bucket: Cloud storage bucket name
1222 target_arch: 32 or 64 bit build target
1223 patch: A DEPS patch (used while bisecting 3rd party repositories).
1224 out_dir: Build output directory where downloaded file is stored.
1227 Downloaded archive file path if exists, otherwise None.
1229 # Source archive file path on cloud storage using Git revision.
1230 source_file
= GetRemoteBuildPath(
1231 revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1232 downloaded_archive
= FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1233 if not downloaded_archive
:
1234 # Get commit position for the given SHA.
1235 commit_position
= self
.source_control
.GetCommitPosition(revision
)
1237 # Source archive file path on cloud storage using SVN revision.
1238 source_file
= GetRemoteBuildPath(
1239 commit_position
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1240 return FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1241 return downloaded_archive
1243 def DownloadCurrentBuild(self
, revision
, build_type
='Release', patch
=None):
1244 """Downloads the build archive for the given revision.
1247 revision: The Git revision to download or build.
1248 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1249 patch: A DEPS patch (used while bisecting 3rd party repositories).
1252 True if download succeeds, otherwise False.
1256 # Get the SHA of the DEPS changes patch.
1257 patch_sha
= GetSHA1HexDigest(patch
)
1259 # Update the DEPS changes patch with a patch to create a new file named
1260 # 'DEPS.sha' and add patch_sha evaluated above to it.
1261 patch
= '%s\n%s' % (patch
, DEPS_SHA_PATCH
% {'deps_sha': patch_sha
})
1263 # Get Build output directory
1264 abs_build_dir
= os
.path
.abspath(
1265 builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1267 fetch_build_func
= lambda: self
.GetBuildArchiveForRevision(
1268 revision
, self
.opts
.gs_bucket
, self
.opts
.target_arch
,
1269 patch_sha
, abs_build_dir
)
1271 # Downloaded archive file path, downloads build archive for given revision.
1272 downloaded_file
= fetch_build_func()
1274 # When build archive doesn't exists, post a build request to tryserver
1275 # and wait for the build to be produced.
1276 if not downloaded_file
:
1277 downloaded_file
= self
.PostBuildRequestAndWait(
1278 revision
, fetch_build
=fetch_build_func
, patch
=patch
)
1279 if not downloaded_file
:
1282 # Generic name for the archive, created when archive file is extracted.
1283 output_dir
= os
.path
.join(
1284 abs_build_dir
, GetZipFileName(target_arch
=self
.opts
.target_arch
))
1285 # Unzip build archive directory.
1287 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1288 self
.BackupOrRestoreOutputDirectory(restore
=False)
1289 # Build output directory based on target(e.g. out/Release, out/Debug).
1290 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1291 ExtractZip(downloaded_file
, abs_build_dir
)
1292 if not os
.path
.exists(output_dir
):
1293 # Due to recipe changes, the builds extract folder contains
1294 # out/Release instead of full-build-<platform>/Release.
1295 if os
.path
.exists(os
.path
.join(abs_build_dir
, 'out', build_type
)):
1296 output_dir
= os
.path
.join(abs_build_dir
, 'out', build_type
)
1298 raise IOError('Missing extracted folder %s ' % output_dir
)
1300 print 'Moving build from %s to %s' % (
1301 output_dir
, target_build_output_dir
)
1302 shutil
.move(output_dir
, target_build_output_dir
)
1304 except Exception as e
:
1305 print 'Something went wrong while extracting archive file: %s' % e
1306 self
.BackupOrRestoreOutputDirectory(restore
=True)
1307 # Cleanup any leftovers from unzipping.
1308 if os
.path
.exists(output_dir
):
1309 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1311 # Delete downloaded archive
1312 if os
.path
.exists(downloaded_file
):
1313 os
.remove(downloaded_file
)
1316 def PostBuildRequestAndWait(self
, git_revision
, fetch_build
, patch
=None):
1317 """POSTs the build request job to the try server instance.
1319 A try job build request is posted to tryserver.chromium.perf master,
1320 and waits for the binaries to be produced and archived on cloud storage.
1321 Once the build is ready and stored onto cloud, build archive is downloaded
1322 into the output folder.
1325 git_revision: A Git hash revision.
1326 fetch_build: Function to check and download build from cloud storage.
1327 patch: A DEPS patch (used while bisecting 3rd party repositories).
1330 Downloaded archive file path when requested build exists and download is
1331 successful, otherwise None.
1333 def GetBuilderNameAndBuildTime(target_platform
, target_arch
='ia32'):
1334 """Gets builder bot name and build time in seconds based on platform."""
1335 # Bot names should match the one listed in tryserver.chromium's
1336 # master.cfg which produces builds for bisect.
1337 if bisect_utils
.IsWindowsHost():
1338 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
1339 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1340 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1341 if bisect_utils
.IsLinuxHost():
1342 if target_platform
== 'android':
1343 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1344 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1345 if bisect_utils
.IsMacHost():
1346 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME
)
1347 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
1351 bot_name
, build_timeout
= GetBuilderNameAndBuildTime(
1352 self
.opts
.target_platform
, self
.opts
.target_arch
)
1353 builder_host
= self
.opts
.builder_host
1354 builder_port
= self
.opts
.builder_port
1355 # Create a unique ID for each build request posted to try server builders.
1356 # This ID is added to "Reason" property of the build.
1357 build_request_id
= GetSHA1HexDigest(
1358 '%s-%s-%s' % (git_revision
, patch
, time
.time()))
1360 # Creates a try job description.
1361 # Always use Git hash to post build request since Commit positions are
1362 # not supported by builders to build.
1364 'revision': 'src@%s' % git_revision
,
1366 'name': build_request_id
,
1368 # Update patch information if supplied.
1370 job_args
['patch'] = patch
1371 # Posts job to build the revision on the server.
1372 if request_build
.PostTryJob(builder_host
, builder_port
, job_args
):
1373 target_file
, error_msg
= _WaitUntilBuildIsReady(
1374 fetch_build
, bot_name
, builder_host
, builder_port
, build_request_id
,
1377 print '%s [revision: %s]' % (error_msg
, git_revision
)
1380 print 'Failed to post build request for revision: [%s]' % git_revision
1383 def IsDownloadable(self
, depot
):
1384 """Checks if build can be downloaded based on target platform and depot."""
1385 if (self
.opts
.target_platform
in ['chromium', 'android'] and
1386 self
.opts
.gs_bucket
):
1387 return (depot
== 'chromium' or
1388 'chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1389 'v8' in DEPOT_DEPS_NAME
[depot
]['from'])
1392 def UpdateDepsContents(self
, deps_contents
, depot
, git_revision
, deps_key
):
1393 """Returns modified version of DEPS file contents.
1396 deps_contents: DEPS file content.
1397 depot: Current depot being bisected.
1398 git_revision: A git hash to be updated in DEPS.
1399 deps_key: Key in vars section of DEPS file to be searched.
1402 Updated DEPS content as string if deps key is found, otherwise None.
1404 # Check whether the depot and revision pattern in DEPS file vars
1405 # e.g. for webkit the format is "webkit_revision": "12345".
1406 deps_revision
= re
.compile(r
'(?<="%s": ")([0-9]+)(?=")' % deps_key
,
1409 if re
.search(deps_revision
, deps_contents
):
1410 commit_position
= self
.source_control
.GetCommitPosition(
1411 git_revision
, self
._GetDepotDirectory
(depot
))
1412 if not commit_position
:
1413 print 'Could not determine commit position for %s' % git_revision
1415 # Update the revision information for the given depot
1416 new_data
= re
.sub(deps_revision
, str(commit_position
), deps_contents
)
1418 # Check whether the depot and revision pattern in DEPS file vars
1419 # e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..".
1420 deps_revision
= re
.compile(
1421 r
'(?<=["\']%s["\']: ["\'])([a
-fA
-F0
-9]{40}
)(?
=["\'])' % deps_key,
1423 if re.search(deps_revision, deps_contents):
1424 new_data = re.sub(deps_revision, git_revision, deps_contents)
1426 # For v8_bleeding_edge revisions change V8 branch in order
1427 # to fetch bleeding edge revision.
1428 if depot == 'v8_bleeding_edge':
1429 new_data = _UpdateV8Branch(new_data)
1434 def UpdateDeps(self, revision, depot, deps_file):
1435 """Updates DEPS file with new revision of dependency repository.
1437 This method search DEPS for a particular pattern in which depot revision
1438 is specified (e.g "webkit_revision
": "123456"). If a match is found then
1439 it resolves the given git hash to SVN revision and replace it in DEPS file.
1442 revision: A git hash revision of the dependency repository.
1443 depot: Current depot being bisected.
1444 deps_file: Path to DEPS file.
1447 True if DEPS file is modified successfully, otherwise False.
1449 if not os.path.exists(deps_file):
1452 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1453 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1455 print 'DEPS update not supported for Depot: %s', depot
1458 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable
1459 # contains "angle_revision
" key that holds git hash instead of SVN revision.
1460 # And sometime "angle_revision
" key is not specified in "vars" variable.
1461 # In such cases check, "deps
" dictionary variable that matches
1462 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1463 if depot == 'angle':
1464 return _UpdateDEPSForAngle(revision, depot, deps_file)
1467 deps_contents = ReadStringFromFile(deps_file)
1468 updated_deps_content = self.UpdateDepsContents(
1469 deps_contents, depot, revision, deps_var)
1470 # Write changes to DEPS file
1471 if updated_deps_content:
1472 WriteStringToFile(updated_deps_content, deps_file)
1475 print 'Something went wrong while updating DEPS file. [%s]' % e
1478 def CreateDEPSPatch(self, depot, revision):
1479 """Modifies DEPS and returns diff as text.
1482 depot: Current depot being bisected.
1483 revision: A git hash revision of the dependency repository.
1486 A tuple with git hash of chromium revision and DEPS patch text.
1488 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1489 if not os.path.exists(deps_file_path):
1490 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1491 # Get current chromium revision (git hash).
1492 cmd = ['rev-parse', 'HEAD']
1493 chromium_sha = bisect_utils.CheckRunGit(cmd).strip()
1494 if not chromium_sha:
1495 raise RuntimeError('Failed to determine Chromium revision for %s' %
1497 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1498 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1499 # Checkout DEPS file for the current chromium revision.
1500 if self.source_control.CheckoutFileAtRevision(
1501 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd):
1502 if self.UpdateDeps(revision, depot, deps_file_path):
1505 '--src-prefix=src/',
1506 '--dst-prefix=src/',
1508 bisect_utils.FILE_DEPS,
1510 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd)
1511 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1514 'Failed to update DEPS file for chromium: [%s]' % chromium_sha)
1517 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha)
1520 def BuildCurrentRevision(self, depot, revision=None):
1521 """Builds chrome and performance_ui_tests on the current revision.
1524 True if the build was successful.
1526 if self.opts.debug_ignore_build:
1529 build_success = False
1531 os.chdir(self.src_cwd)
1532 # Fetch build archive for the given revision from the cloud storage when
1533 # the storage bucket is passed.
1534 if self.IsDownloadable(depot) and revision:
1536 if depot != 'chromium':
1537 # Create a DEPS patch with new revision for dependency repository.
1538 revision, deps_patch = self.CreateDEPSPatch(depot, revision)
1539 if self.DownloadCurrentBuild(revision, patch=deps_patch):
1541 # Reverts the changes to DEPS file.
1542 self.source_control.CheckoutFileAtRevision(
1543 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd)
1544 build_success = True
1546 # These codes are executed when bisect bots builds binaries locally.
1547 build_success = self.builder.Build(depot, self.opts)
1549 return build_success
1551 def RunGClientHooks(self):
1552 """Runs gclient with runhooks command.
1555 True if gclient reports no errors.
1557 if self.opts.debug_ignore_build:
1559 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1561 def _IsBisectModeUsingMetric(self):
1562 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
1564 def _IsBisectModeReturnCode(self):
1565 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
1567 def _IsBisectModeStandardDeviation(self):
1568 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
1570 def GetCompatibleCommand(self, command_to_run, revision, depot):
1571 # Prior to crrev.com/274857 *only* android-chromium-testshell
1572 # Then until crrev.com/276628 *both* (android-chromium-testshell and
1573 # android-chrome-shell) work. After that rev 276628 *only*
1574 # android-chrome-shell works. bisect-perf-regression.py script should
1575 # handle these cases and set appropriate browser type based on revision.
1576 if self.opts.target_platform in ['android']:
1577 # When its a third_party depot, get the chromium revision.
1578 if depot != 'chromium':
1579 revision = bisect_utils.CheckRunGit(
1580 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip()
1581 commit_position = self.source_control.GetCommitPosition(revision,
1583 if not commit_position:
1584 return command_to_run
1585 cmd_re = re.compile('--browser=(?P<browser_type>\S+)')
1586 matches = cmd_re.search(command_to_run)
1587 if bisect_utils.IsStringInt(commit_position) and matches:
1588 cmd_browser = matches.group('browser_type')
1589 if commit_position <= 274857 and cmd_browser == 'android-chrome-shell':
1590 return command_to_run.replace(cmd_browser,
1591 'android-chromium-testshell')
1592 elif (commit_position >= 276628 and
1593 cmd_browser == 'android-chromium-testshell'):
1594 return command_to_run.replace(cmd_browser,
1595 'android-chrome-shell')
1596 return command_to_run
1598 def RunPerformanceTestAndParseResults(
1599 self, command_to_run, metric, reset_on_first_run=False,
1600 upload_on_last_run=False, results_label=None):
1601 """Runs a performance test on the current revision and parses the results.
1604 command_to_run: The command to be run to execute the performance test.
1605 metric: The metric to parse out from the results of the performance test.
1606 This is the result chart name and trace name, separated by slash.
1607 May be None for perf try jobs.
1608 reset_on_first_run: If True, pass the flag --reset-results on first run.
1609 upload_on_last_run: If True, pass the flag --upload-results on last run.
1610 results_label: A value for the option flag --results-label.
1611 The arguments reset_on_first_run, upload_on_last_run and results_label
1612 are all ignored if the test is not a Telemetry test.
1615 (values dict, 0) if --debug_ignore_perf_test was passed.
1616 (values dict, 0, test output) if the test was run successfully.
1617 (error message, -1) if the test couldn't be run.
1618 (error message, -1, test output) if the test ran but there was an error.
1620 success_code, failure_code = 0, -1
1622 if self.opts.debug_ignore_perf_test:
1629 return (fake_results, success_code)
1631 # For Windows platform set posix=False, to parse windows paths correctly.
1632 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
1633 # refer to http://bugs.python.org/issue1724822. By default posix=True.
1634 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
1636 if not _GenerateProfileIfNecessary(args):
1637 err_text = 'Failed to generate profile for performance test.'
1638 return (err_text, failure_code)
1640 # If running a Telemetry test for Chrome OS, insert the remote IP and
1641 # identity parameters.
1642 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1643 if self.opts.target_platform == 'cros' and is_telemetry:
1644 args.append('--remote=%s' % self.opts.cros_remote_ip)
1645 args.append('--identity=%s' % bisect_utils.CROS_TEST_KEY_PATH)
1647 start_time = time.time()
1650 output_of_all_runs = ''
1651 for i in xrange(self.opts.repeat_test_count):
1652 # Can ignore the return code since if the tests fail, it won't return 0.
1653 current_args = copy.copy(args)
1655 if i == 0 and reset_on_first_run:
1656 current_args.append('--reset-results')
1657 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1658 current_args.append('--upload-results')
1660 current_args.append('--results-label=%s' % results_label)
1662 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
1663 current_args, cwd=self.src_cwd)
1665 if e.errno == errno.ENOENT:
1666 err_text = ('Something went wrong running the performance test. '
1667 'Please review the command line:\n\n')
1668 if 'src/' in ' '.join(args):
1669 err_text += ('Check that you haven\'t accidentally specified a '
1670 'path with src/ in the command.\n\n')
1671 err_text += ' '.join(args)
1674 return (err_text, failure_code)
1677 output_of_all_runs += output
1678 if self.opts.output_buildbot_annotations:
1681 if metric and self._IsBisectModeUsingMetric():
1682 metric_values += _ParseMetricValuesFromOutput(metric, output)
1683 # If we're bisecting on a metric (ie, changes in the mean or
1684 # standard deviation) and no metric values are produced, bail out.
1685 if not metric_values:
1687 elif self._IsBisectModeReturnCode():
1688 metric_values.append(return_code)
1690 elapsed_minutes = (time.time() - start_time) / 60.0
1691 if elapsed_minutes >= self.opts.max_time_minutes:
1694 if metric and len(metric_values) == 0:
1695 err_text = 'Metric %s was not found in the test output.' % metric
1696 # TODO(qyearsley): Consider also getting and displaying a list of metrics
1697 # that were found in the output here.
1698 return (err_text, failure_code, output_of_all_runs)
1700 # If we're bisecting on return codes, we're really just looking for zero vs
1703 if self._IsBisectModeReturnCode():
1704 # If any of the return codes is non-zero, output 1.
1705 overall_return_code = 0 if (
1706 all(current_value == 0 for current_value in metric_values)) else 1
1709 'mean': overall_return_code,
1712 'values': metric_values,
1715 print 'Results of performance test: Command returned with %d' % (
1716 overall_return_code)
1719 # Need to get the average value if there were multiple values.
1720 truncated_mean = math_utils.TruncatedMean(
1721 metric_values, self.opts.truncate_percent)
1722 standard_err = math_utils.StandardError(metric_values)
1723 standard_dev = math_utils.StandardDeviation(metric_values)
1725 if self._IsBisectModeStandardDeviation():
1726 metric_values = [standard_dev]
1729 'mean': truncated_mean,
1730 'std_err': standard_err,
1731 'std_dev': standard_dev,
1732 'values': metric_values,
1735 print 'Results of performance test: %12f %12f' % (
1736 truncated_mean, standard_err)
1738 return (values, success_code, output_of_all_runs)
1740 def _FindAllRevisionsToSync(self, revision, depot):
1741 """Finds all dependent revisions and depots that need to be synced.
1743 For example skia is broken up into 3 git mirrors over skia/src,
1744 skia/gyp, and skia/include. To sync skia/src properly, one has to find
1745 the proper revisions in skia/gyp and skia/include.
1747 This is only useful in the git workflow, as an SVN depot may be split into
1751 revision: The revision to sync to.
1752 depot: The depot in use at the moment (probably skia).
1755 A list of [depot, revision] pairs that need to be synced.
1757 revisions_to_sync = [[depot, revision]]
1759 is_base = ((depot == 'chromium') or (depot == 'cros') or
1760 (depot == 'android-chrome'))
1762 # Some SVN depots were split into multiple git depots, so we need to
1763 # figure out for each mirror which git revision to grab. There's no
1764 # guarantee that the SVN revision will exist for each of the dependent
1765 # depots, so we have to grep the git logs and grab the next earlier one.
1767 and DEPOT_DEPS_NAME[depot]['depends']
1768 and self.source_control.IsGit()):
1769 commit_position = self.source_control.GetCommitPosition(revision)
1771 for d in DEPOT_DEPS_NAME[depot]['depends']:
1772 self.ChangeToDepotWorkingDirectory(d)
1774 dependant_rev = self.source_control.ResolveToRevision(
1775 commit_position, d, DEPOT_DEPS_NAME, -1000)
1778 revisions_to_sync.append([d, dependant_rev])
1780 num_resolved = len(revisions_to_sync)
1781 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
1783 self.ChangeToDepotWorkingDirectory(depot)
1785 if not ((num_resolved - 1) == num_needed):
1788 return revisions_to_sync
1790 def PerformPreBuildCleanup(self):
1791 """Performs cleanup between runs."""
1792 print 'Cleaning up between runs.'
1795 # Leaving these .pyc files around between runs may disrupt some perf tests.
1796 for (path, _, files) in os.walk(self.src_cwd):
1797 for cur_file in files:
1798 if cur_file.endswith('.pyc'):
1799 path_to_file = os.path.join(path, cur_file)
1800 os.remove(path_to_file)
1802 def PerformCrosChrootCleanup(self):
1803 """Deletes the chroot.
1809 self.ChangeToDepotWorkingDirectory('cros')
1810 cmd = [bisect_utils.CROS_SDK_PATH, '--delete']
1811 return_code = bisect_utils.RunProcess(cmd)
1813 return not return_code
1815 def CreateCrosChroot(self):
1816 """Creates a new chroot.
1822 self.ChangeToDepotWorkingDirectory('cros')
1823 cmd = [bisect_utils.CROS_SDK_PATH, '--create']
1824 return_code = bisect_utils.RunProcess(cmd)
1826 return not return_code
1828 def _PerformPreSyncCleanup(self, depot):
1829 """Performs any necessary cleanup before syncing.
1837 if depot == 'chromium' or depot == 'android-chrome':
1838 # Removes third_party/libjingle. At some point, libjingle was causing
1839 # issues syncing when using the git workflow (crbug.com/266324).
1840 os.chdir(self.src_cwd)
1841 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
1843 # Removes third_party/skia. At some point, skia was causing
1844 # issues syncing when using the git workflow (crbug.com/377951).
1845 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
1847 elif depot == 'cros':
1848 return self.PerformCrosChrootCleanup()
1851 def _RunPostSync(self, depot):
1852 """Performs any work after syncing.
1860 if self.opts.target_platform == 'android':
1861 if not builder.SetupAndroidBuildEnvironment(self.opts,
1862 path_to_src=self.src_cwd):
1866 return self.CreateCrosChroot()
1868 return self.RunGClientHooks()
1871 def ShouldSkipRevision(self, depot, revision):
1872 """Checks whether a particular revision can be safely skipped.
1874 Some commits can be safely skipped (such as a DEPS roll), since the tool
1875 is git based those changes would have no effect.
1878 depot: The depot being bisected.
1879 revision: Current revision we're synced to.
1882 True if we should skip building/testing this revision.
1884 if depot == 'chromium':
1885 if self.source_control.IsGit():
1886 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
1887 output = bisect_utils.CheckRunGit(cmd)
1889 files = output.splitlines()
1891 if len(files) == 1 and files[0] == 'DEPS':
1896 def RunTest(self, revision, depot, command, metric, skippable=False):
1897 """Performs a full sync/build/run of the specified revision.
1900 revision: The revision to sync to.
1901 depot: The depot that's being used at the moment (src, webkit, etc.)
1902 command: The command to execute the performance test.
1903 metric: The performance metric being tested.
1906 On success, a tuple containing the results of the performance test.
1907 Otherwise, a tuple with the error message.
1909 # Decide which sync program to use.
1911 if depot == 'chromium' or depot == 'android-chrome':
1912 sync_client = 'gclient'
1913 elif depot == 'cros':
1914 sync_client = 'repo'
1916 # Decide what depots will need to be synced to what revisions.
1917 revisions_to_sync = self._FindAllRevisionsToSync(revision, depot)
1918 if not revisions_to_sync:
1919 return ('Failed to resolve dependent depots.', BUILD_RESULT_FAIL)
1921 if not self._PerformPreSyncCleanup(depot):
1922 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
1924 # Do the syncing for all depots.
1925 if not self.opts.debug_ignore_sync:
1926 if not self._SyncAllRevisions(revisions_to_sync, sync_client):
1927 return ('Failed to sync: [%s]' % str(revision), BUILD_RESULT_FAIL)
1929 # Try to do any post-sync steps. This may include "gclient runhooks
".
1930 if not self._RunPostSync(depot):
1931 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
1933 # Skip this revision if it can be skipped.
1934 if skippable and self.ShouldSkipRevision(depot, revision):
1935 return ('Skipped revision: [%s]' % str(revision),
1936 BUILD_RESULT_SKIPPED)
1938 # Obtain a build for this revision. This may be done by requesting a build
1939 # from another builder, waiting for it and downloading it.
1940 start_build_time = time.time()
1941 build_success = self.BuildCurrentRevision(depot, revision)
1942 if not build_success:
1943 return ('Failed to build revision: [%s]' % str(revision),
1945 after_build_time = time.time()
1947 # Possibly alter the command.
1948 command = self.GetCompatibleCommand(command, revision, depot)
1950 # Run the command and get the results.
1951 results = self.RunPerformanceTestAndParseResults(command, metric)
1953 # Restore build output directory once the tests are done, to avoid
1954 # any discrepancies.
1955 if self.IsDownloadable(depot) and revision:
1956 self.BackupOrRestoreOutputDirectory(restore=True)
1958 # A value other than 0 indicates that the test couldn't be run, and results
1959 # should also include an error message.
1963 external_revisions = self._Get3rdPartyRevisions(depot)
1965 if not external_revisions is None:
1966 return (results[0], results[1], external_revisions,
1967 time.time() - after_build_time, after_build_time -
1970 return ('Failed to parse DEPS file for external revisions.',
1973 def _SyncAllRevisions(self, revisions_to_sync, sync_client):
1974 """Syncs multiple depots to particular revisions.
1977 revisions_to_sync: A list of (depot, revision) pairs to be synced.
1978 sync_client: Program used to sync, e.g. "gclient
", "repo
". Can be None.
1981 True if successful, False otherwise.
1983 for depot, revision in revisions_to_sync:
1984 self.ChangeToDepotWorkingDirectory(depot)
1987 self.PerformPreBuildCleanup()
1989 # When using gclient to sync, you need to specify the depot you
1990 # want so that all the dependencies sync properly as well.
1991 # i.e. gclient sync src@<SHA1>
1992 if sync_client == 'gclient':
1993 revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'], revision)
1995 sync_success = self.source_control.SyncToRevision(revision, sync_client)
1996 if not sync_success:
2001 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2002 """Given known good and bad values, decide if the current_value passed
2006 current_value: The value of the metric being checked.
2007 known_bad_value: The reference value for a "failed
" run.
2008 known_good_value: The reference value for a "passed
" run.
2011 True if the current_value is closer to the known_good_value than the
2014 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2015 dist_to_good_value = abs(current_value['std_dev'] -
2016 known_good_value['std_dev'])
2017 dist_to_bad_value = abs(current_value['std_dev'] -
2018 known_bad_value['std_dev'])
2020 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2021 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2023 return dist_to_good_value < dist_to_bad_value
2025 def _GetDepotDirectory(self, depot_name):
2026 if depot_name == 'chromium':
2028 elif depot_name == 'cros':
2029 return self.cros_cwd
2030 elif depot_name in DEPOT_NAMES:
2031 return self.depot_cwd[depot_name]
2033 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
2034 'was added without proper support?' % depot_name)
2036 def ChangeToDepotWorkingDirectory(self, depot_name):
2037 """Given a depot, changes to the appropriate working directory.
2040 depot_name: The name of the depot (see DEPOT_NAMES).
2042 os.chdir(self._GetDepotDirectory(depot_name))
2044 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2045 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2046 search_forward=True)
2047 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2048 search_forward=False)
2049 min_revision_data['external']['v8_bleeding_edge'] = r1
2050 max_revision_data['external']['v8_bleeding_edge'] = r2
2052 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2053 min_revision_data['revision'])
2054 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2055 max_revision_data['revision'])):
2056 self.warnings.append(
2057 'Trunk revisions in V8 did not map directly to bleeding_edge. '
2058 'Attempted to expand the range to find V8 rolls which did map '
2059 'directly to bleeding_edge revisions, but results might not be '
2062 def _FindNextDepotToBisect(
2063 self, current_depot, min_revision_data, max_revision_data):
2064 """Decides which depot the script should dive into next (if any).
2067 current_depot: Current depot being bisected.
2068 min_revision_data: Data about the earliest revision in the bisect range.
2069 max_revision_data: Data about the latest revision in the bisect range.
2072 Name of the depot to bisect next, or None.
2074 external_depot = None
2075 for next_depot in DEPOT_NAMES:
2076 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2077 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2080 if not (DEPOT_DEPS_NAME[next_depot]['recurse']
2081 and min_revision_data['depot']
2082 in DEPOT_DEPS_NAME[next_depot]['from']):
2085 if current_depot == 'v8':
2086 # We grab the bleeding_edge info here rather than earlier because we
2087 # finally have the revision range. From that we can search forwards and
2088 # backwards to try to match trunk revisions to bleeding_edge.
2089 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2091 if (min_revision_data['external'].get(next_depot) ==
2092 max_revision_data['external'].get(next_depot)):
2095 if (min_revision_data['external'].get(next_depot) and
2096 max_revision_data['external'].get(next_depot)):
2097 external_depot = next_depot
2100 return external_depot
2102 def PrepareToBisectOnDepot(
2103 self, current_depot, end_revision, start_revision, previous_revision):
2104 """Changes to the appropriate directory and gathers a list of revisions
2105 to bisect between |start_revision| and |end_revision|.
2108 current_depot: The depot we want to bisect.
2109 end_revision: End of the revision range.
2110 start_revision: Start of the revision range.
2111 previous_revision: The last revision we synced to on |previous_depot|.
2114 A list containing the revisions between |start_revision| and
2115 |end_revision| inclusive.
2117 # Change into working directory of external library to run
2118 # subsequent commands.
2119 self.ChangeToDepotWorkingDirectory(current_depot)
2121 # V8 (and possibly others) is merged in periodically. Bisecting
2122 # this directory directly won't give much good info.
2123 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2124 config_path = os.path.join(self.src_cwd, '..')
2125 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2126 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2128 if bisect_utils.RunGClient(
2129 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2132 if current_depot == 'v8_bleeding_edge':
2133 self.ChangeToDepotWorkingDirectory('chromium')
2135 shutil.move('v8', 'v8.bak')
2136 shutil.move('v8_bleeding_edge', 'v8')
2138 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2139 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2141 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2142 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2144 self.ChangeToDepotWorkingDirectory(current_depot)
2146 depot_revision_list = self.GetRevisionList(current_depot,
2150 self.ChangeToDepotWorkingDirectory('chromium')
2152 return depot_revision_list
2154 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2155 """Gathers reference values by running the performance tests on the
2156 known good and bad revisions.
2159 good_rev: The last known good revision where the performance regression
2160 has not occurred yet.
2161 bad_rev: A revision where the performance regression has already occurred.
2162 cmd: The command to execute the performance test.
2163 metric: The metric being tested for regression.
2166 A tuple with the results of building and running each revision.
2168 bad_run_results = self.RunTest(bad_rev, target_depot, cmd, metric)
2170 good_run_results = None
2172 if not bad_run_results[1]:
2173 good_run_results = self.RunTest(good_rev, target_depot, cmd, metric)
2175 return (bad_run_results, good_run_results)
2177 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2178 if self.opts.output_buildbot_annotations:
2179 step_name = 'Bisection Range: [%s - %s]' % (
2180 revision_list[len(revision_list)-1], revision_list[0])
2181 bisect_utils.OutputAnnotationStepStart(step_name)
2184 print 'Revisions to bisect on [%s]:' % depot
2185 for revision_id in revision_list:
2186 print ' -> %s' % (revision_id, )
2189 if self.opts.output_buildbot_annotations:
2190 bisect_utils.OutputAnnotationStepClosed()
2192 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision,
2193 good_svn_revision=None):
2194 """Checks to see if changes to DEPS file occurred, and that the revision
2195 range also includes the change to .DEPS.git. If it doesn't, attempts to
2196 expand the revision range to include it.
2199 bad_revision: First known bad git revision.
2200 good_revision: Last known good git revision.
2201 good_svn_revision: Last known good svn revision.
2204 A tuple with the new bad and good revisions.
2206 # DONOT perform nudge because at revision 291563 .DEPS.git was removed
2207 # and source contain only DEPS file for dependency changes.
2208 if good_svn_revision >= 291563:
2209 return (bad_revision, good_revision)
2211 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2212 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2213 FILE_DEPS, good_revision, bad_revision)
2216 # DEPS file was changed, search from the oldest change to DEPS file to
2217 # bad_revision to see if there are matching .DEPS.git changes.
2218 oldest_deps_change = changes_to_deps[-1]
2219 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2220 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2222 if len(changes_to_deps) != len(changes_to_gitdeps):
2223 # Grab the timestamp of the last DEPS change
2224 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2225 output = bisect_utils.CheckRunGit(cmd)
2226 commit_time = int(output)
2228 # Try looking for a commit that touches the .DEPS.git file in the
2229 # next 15 minutes after the DEPS file change.
2230 cmd = ['log', '--format=%H', '-1',
2231 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2232 'origin/master', '--', bisect_utils.FILE_DEPS_GIT]
2233 output = bisect_utils.CheckRunGit(cmd)
2234 output = output.strip()
2236 self.warnings.append('Detected change to DEPS and modified '
2237 'revision range to include change to .DEPS.git')
2238 return (output, good_revision)
2240 self.warnings.append('Detected change to DEPS but couldn\'t find '
2241 'matching change to .DEPS.git')
2242 return (bad_revision, good_revision)
2244 def CheckIfRevisionsInProperOrder(
2245 self, target_depot, good_revision, bad_revision):
2246 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2249 good_revision: Number/tag of the known good revision.
2250 bad_revision: Number/tag of the known bad revision.
2253 True if the revisions are in the proper order (good earlier than bad).
2255 if self.source_control.IsGit() and target_depot != 'cros':
2256 cwd = self._GetDepotDirectory(target_depot)
2258 cmd = ['log', '--format=%ct', '-1', good_revision]
2259 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2260 good_commit_time = int(output)
2262 cmd = ['log', '--format=%ct', '-1', bad_revision]
2263 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2264 bad_commit_time = int(output)
2266 return good_commit_time <= bad_commit_time
2268 # CrOS and SVN use integers.
2269 return int(good_revision) <= int(bad_revision)
2271 def CanPerformBisect(self, good_revision, bad_revision):
2272 """Checks whether a given revision is bisectable.
2274 Checks for following:
2275 1. Non-bisectable revsions for android bots (refer to crbug.com/385324).
2276 2. Non-bisectable revsions for Windows bots (refer to crbug.com/405274).
2279 good_revision: Known good revision.
2280 bad_revision: Known bad revision.
2283 A dictionary indicating the result. If revision is not bisectable,
2284 this will contain the field "error
", otherwise None.
2286 if self.opts.target_platform == 'android':
2287 revision_to_check = self.source_control.GetCommitPosition(good_revision)
2288 if (bisect_utils.IsStringInt(good_revision)
2289 and good_revision < 265549):
2291 'Bisect cannot continue for the given revision range.\n'
2292 'It is impossible to bisect Android regressions '
2293 'prior to r265549, which allows the bisect bot to '
2294 'rely on Telemetry to do apk installation of the most recently '
2295 'built local ChromeShell(refer to crbug.com/385324).\n'
2296 'Please try bisecting revisions greater than or equal to r265549.')}
2298 if bisect_utils.IsWindowsHost():
2299 good_revision = self.source_control.GetCommitPosition(good_revision)
2300 bad_revision = self.source_control.GetCommitPosition(bad_revision)
2301 if (bisect_utils.IsStringInt(good_revision) and
2302 bisect_utils.IsStringInt(bad_revision)):
2303 if (289987 <= good_revision < 290716 or
2304 289987 <= bad_revision < 290716):
2305 return {'error': ('Oops! Revision between r289987 and r290716 are '
2306 'marked as dead zone for Windows due to '
2307 'crbug.com/405274. Please try another range.')}
2311 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2312 """Given known good and bad revisions, run a binary search on all
2313 intermediate revisions to determine the CL where the performance regression
2317 command_to_run: Specify the command to execute the performance test.
2318 good_revision: Number/tag of the known good revision.
2319 bad_revision: Number/tag of the known bad revision.
2320 metric: The performance metric to monitor.
2323 A dict with 2 members, 'revision_data' and 'error'. On success,
2324 'revision_data' will contain a dict mapping revision ids to
2325 data about that revision. Each piece of revision data consists of a
2326 dict with the following keys:
2328 'passed': Represents whether the performance test was successful at
2329 that revision. Possible values include: 1 (passed), 0 (failed),
2330 '?' (skipped), 'F' (build failed).
2331 'depot': The depot that this revision is from (i.e. WebKit)
2332 'external': If the revision is a 'src' revision, 'external' contains
2333 the revisions of each of the external libraries.
2334 'sort': A sort value for sorting the dict in order of commits.
2344 'depot': 'chromium',
2351 If an error occurred, the 'error' field will contain the message and
2352 'revision_data' will be empty.
2355 'revision_data' : {},
2359 # Choose depot to bisect first
2360 target_depot = 'chromium'
2361 if self.opts.target_platform == 'cros':
2362 target_depot = 'cros'
2363 elif self.opts.target_platform == 'android-chrome':
2364 target_depot = 'android-chrome'
2367 self.ChangeToDepotWorkingDirectory(target_depot)
2369 # If they passed SVN revisions, we can try match them to git SHA1 hashes.
2370 bad_revision = self.source_control.ResolveToRevision(
2371 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100)
2372 good_revision = self.source_control.ResolveToRevision(
2373 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100)
2376 if bad_revision is None:
2377 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % bad_revision_in
2380 if good_revision is None:
2381 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % good_revision_in
2384 # Check that they didn't accidentally swap good and bad revisions.
2385 if not self.CheckIfRevisionsInProperOrder(
2386 target_depot, good_revision, bad_revision):
2387 results['error'] = ('bad_revision < good_revision, did you swap these '
2390 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange(
2391 bad_revision, good_revision, good_revision_in)
2392 if self.opts.output_buildbot_annotations:
2393 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2395 cannot_bisect = self.CanPerformBisect(good_revision, bad_revision)
2397 results['error'] = cannot_bisect.get('error')
2400 print 'Gathering revision range for bisection.'
2401 # Retrieve a list of revisions to do bisection on.
2402 src_revision_list = self.GetRevisionList(
2403 target_depot, bad_revision, good_revision)
2405 if self.opts.output_buildbot_annotations:
2406 bisect_utils.OutputAnnotationStepClosed()
2408 if src_revision_list:
2409 # revision_data will store information about a revision such as the
2410 # depot it came from, the webkit/V8 revision at that time,
2411 # performance timing, build state, etc...
2412 revision_data = results['revision_data']
2414 # revision_list is the list we're binary searching through at the moment.
2419 for current_revision_id in src_revision_list:
2422 revision_data[current_revision_id] = {
2425 'depot' : target_depot,
2429 'sort' : sort_key_ids,
2431 revision_list.append(current_revision_id)
2434 max_revision = len(revision_list) - 1
2436 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2438 if self.opts.output_buildbot_annotations:
2439 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2441 print 'Gathering reference values for bisection.'
2443 # Perform the performance tests on the good and bad revisions, to get
2445 bad_results, good_results = self.GatherReferenceValues(good_revision,
2451 if self.opts.output_buildbot_annotations:
2452 bisect_utils.OutputAnnotationStepClosed()
2455 results['error'] = ('An error occurred while building and running '
2456 'the \'bad\' reference value. The bisect cannot continue without '
2457 'a working \'bad\' revision to start from.\n\nError: %s' %
2462 results['error'] = ('An error occurred while building and running '
2463 'the \'good\' reference value. The bisect cannot continue without '
2464 'a working \'good\' revision to start from.\n\nError: %s' %
2469 # We need these reference values to determine if later runs should be
2470 # classified as pass or fail.
2471 known_bad_value = bad_results[0]
2472 known_good_value = good_results[0]
2474 # Can just mark the good and bad revisions explicitly here since we
2475 # already know the results.
2476 bad_revision_data = revision_data[revision_list[0]]
2477 bad_revision_data['external'] = bad_results[2]
2478 bad_revision_data['perf_time'] = bad_results[3]
2479 bad_revision_data['build_time'] = bad_results[4]
2480 bad_revision_data['passed'] = False
2481 bad_revision_data['value'] = known_bad_value
2483 good_revision_data = revision_data[revision_list[max_revision]]
2484 good_revision_data['external'] = good_results[2]
2485 good_revision_data['perf_time'] = good_results[3]
2486 good_revision_data['build_time'] = good_results[4]
2487 good_revision_data['passed'] = True
2488 good_revision_data['value'] = known_good_value
2490 next_revision_depot = target_depot
2493 if not revision_list:
2496 min_revision_data = revision_data[revision_list[min_revision]]
2497 max_revision_data = revision_data[revision_list[max_revision]]
2499 if max_revision - min_revision <= 1:
2500 current_depot = min_revision_data['depot']
2501 if min_revision_data['passed'] == '?':
2502 next_revision_index = min_revision
2503 elif max_revision_data['passed'] == '?':
2504 next_revision_index = max_revision
2505 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2506 previous_revision = revision_list[min_revision]
2507 # If there were changes to any of the external libraries we track,
2508 # should bisect the changes there as well.
2509 external_depot = self._FindNextDepotToBisect(
2510 current_depot, min_revision_data, max_revision_data)
2511 # If there was no change in any of the external depots, the search
2513 if not external_depot:
2514 if current_depot == 'v8':
2515 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2516 'continue any further. The script can only bisect into '
2517 'V8\'s bleeding_edge repository if both the current and '
2518 'previous revisions in trunk map directly to revisions in '
2522 earliest_revision = max_revision_data['external'][external_depot]
2523 latest_revision = min_revision_data['external'][external_depot]
2525 new_revision_list = self.PrepareToBisectOnDepot(
2526 external_depot, latest_revision, earliest_revision,
2529 if not new_revision_list:
2530 results['error'] = ('An error occurred attempting to retrieve '
2531 'revision range: [%s..%s]' %
2532 (earliest_revision, latest_revision))
2535 _AddRevisionsIntoRevisionData(
2536 new_revision_list, external_depot, min_revision_data['sort'],
2539 # Reset the bisection and perform it on the newly inserted
2541 revision_list = new_revision_list
2543 max_revision = len(revision_list) - 1
2544 sort_key_ids += len(revision_list)
2546 print ('Regression in metric %s appears to be the result of '
2547 'changes in [%s].' % (metric, external_depot))
2549 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2555 next_revision_index = (int((max_revision - min_revision) / 2) +
2558 next_revision_id = revision_list[next_revision_index]
2559 next_revision_data = revision_data[next_revision_id]
2560 next_revision_depot = next_revision_data['depot']
2562 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2564 if self.opts.output_buildbot_annotations:
2565 step_name = 'Working on [%s]' % next_revision_id
2566 bisect_utils.OutputAnnotationStepStart(step_name)
2568 print 'Working on revision: [%s]' % next_revision_id
2570 run_results = self.RunTest(
2571 next_revision_id, next_revision_depot, command_to_run, metric,
2574 # If the build is successful, check whether or not the metric
2576 if not run_results[1]:
2577 if len(run_results) > 2:
2578 next_revision_data['external'] = run_results[2]
2579 next_revision_data['perf_time'] = run_results[3]
2580 next_revision_data['build_time'] = run_results[4]
2582 passed_regression = self._CheckIfRunPassed(run_results[0],
2586 next_revision_data['passed'] = passed_regression
2587 next_revision_data['value'] = run_results[0]
2589 if passed_regression:
2590 max_revision = next_revision_index
2592 min_revision = next_revision_index
2594 if run_results[1] == BUILD_RESULT_SKIPPED:
2595 next_revision_data['passed'] = 'Skipped'
2596 elif run_results[1] == BUILD_RESULT_FAIL:
2597 next_revision_data['passed'] = 'Build Failed'
2599 print run_results[0]
2601 # If the build is broken, remove it and redo search.
2602 revision_list.pop(next_revision_index)
2606 if self.opts.output_buildbot_annotations:
2607 self._PrintPartialResults(results)
2608 bisect_utils.OutputAnnotationStepClosed()
2610 # Weren't able to sync and retrieve the revision range.
2611 results['error'] = ('An error occurred attempting to retrieve revision '
2612 'range: [%s..%s]' % (good_revision, bad_revision))
2616 def _PrintPartialResults(self, results_dict):
2617 revision_data = results_dict['revision_data']
2618 revision_data_sorted = sorted(revision_data.iteritems(),
2619 key = lambda x: x[1]['sort'])
2620 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2622 self._PrintTestedCommitsTable(revision_data_sorted,
2623 results_dict['first_working_revision'],
2624 results_dict['last_broken_revision'],
2625 100, final_step=False)
2627 def _ConfidenceLevelStatus(self, results_dict):
2628 if not results_dict['confidence']:
2630 confidence_status = 'Successful with %(level)s confidence%(warning)s.'
2631 if results_dict['confidence'] >= HIGH_CONFIDENCE:
2635 warning = ' and warnings'
2636 if not self.warnings:
2638 return confidence_status % {'level': level, 'warning': warning}
2640 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2641 info = self.source_control.QueryRevisionInfo(cl,
2642 self._GetDepotDirectory(depot))
2643 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2645 # Format is "git
-svn
-id: svn
://....@123456 <other data
>"
2646 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2647 svn_revision = svn_line[0].split('@')
2648 svn_revision = svn_revision[1].split(' ')[0]
2649 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2654 def _PrintRevisionInfo(self, cl, info, depot=None):
2656 if not info['email'].startswith(info['author']):
2657 email_info = '\nEmail : %s' % info['email']
2658 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2660 commit_info = '\nLink : %s' % commit_link
2662 commit_info = ('\nFailed to parse SVN revision from body:\n%s' %
2664 print RESULTS_REVISION_INFO % {
2665 'subject': info['subject'],
2666 'author': info['author'],
2667 'email_info': email_info,
2668 'commit_info': commit_info,
2670 'cl_date': info['date']
2673 def _PrintTestedCommitsHeader(self):
2674 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2676 [20, 70, 14, 12, 13],
2677 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
2678 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2680 [20, 70, 14, 12, 13],
2681 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
2682 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2685 ['Depot', 'Commit SHA', 'Return Code', 'State'])
2687 assert False, 'Invalid bisect_mode specified.'
2689 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
2690 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2691 std_error = '+-%.02f' % current_data['value']['std_err']
2692 mean = '%.02f' % current_data['value']['mean']
2694 [20, 70, 12, 14, 13],
2695 [current_data['depot'], cl_link, mean, std_error, state_str])
2696 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2697 std_error = '+-%.02f' % current_data['value']['std_err']
2698 mean = '%.02f' % current_data['value']['mean']
2700 [20, 70, 12, 14, 13],
2701 [current_data['depot'], cl_link, std_error, mean, state_str])
2702 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2703 mean = '%d' % current_data['value']['mean']
2706 [current_data['depot'], cl_link, mean, state_str])
2708 def _PrintTestedCommitsTable(
2709 self, revision_data_sorted, first_working_revision, last_broken_revision,
2710 confidence, final_step=True):
2713 print '===== TESTED COMMITS ====='
2715 print '===== PARTIAL RESULTS ====='
2716 self._PrintTestedCommitsHeader()
2718 for current_id, current_data in revision_data_sorted:
2719 if current_data['value']:
2720 if (current_id == last_broken_revision or
2721 current_id == first_working_revision):
2722 # If confidence is too low, don't add this empty line since it's
2723 # used to put focus on a suspected CL.
2724 if confidence and final_step:
2727 if state == 2 and not final_step:
2728 # Just want a separation between "bad
" and "good
" cl's.
2732 if state == 1 and final_step:
2733 state_str = 'Suspected CL'
2737 # If confidence is too low, don't bother outputting good/bad.
2740 state_str = state_str.center(13, ' ')
2742 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2743 current_data['depot'])
2745 cl_link = current_id
2746 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
2748 def _PrintReproSteps(self):
2749 """Prints out a section of the results explaining how to run the test.
2751 This message includes the command used to run the test.
2753 command = '$ ' + self.opts.command
2754 if bisect_utils.IsTelemetryCommand(self.opts.command):
2755 command += ('\nAlso consider passing --profiler=list to see available '
2757 print REPRO_STEPS_LOCAL % {'command': command}
2758 if bisect_utils.IsTelemetryCommand(self.opts.command):
2759 telemetry_command = re.sub(r'--browser=[^\s]+',
2760 '--browser=<bot-name>',
2762 print REPRO_STEPS_TRYJOB_TELEMETRY % {'command': telemetry_command}
2764 print REPRO_STEPS_TRYJOB % {'command': command}
2766 def _PrintOtherRegressions(self, other_regressions, revision_data):
2767 """Prints a section of the results about other potential regressions."""
2769 print 'Other regressions may have occurred:'
2770 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
2771 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
2772 for regression in other_regressions:
2773 current_id, previous_id, confidence = regression
2774 current_data = revision_data[current_id]
2775 previous_data = revision_data[previous_id]
2777 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2778 current_data['depot'])
2779 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
2780 previous_data['depot'])
2782 # If we can't map it to a viewable URL, at least show the original hash.
2783 if not current_link:
2784 current_link = current_id
2785 if not previous_link:
2786 previous_link = previous_id
2788 print ' %8s %70s %s' % (
2789 current_data['depot'], current_link,
2790 ('%d%%' % confidence).center(10, ' '))
2791 print ' %8s %70s' % (
2792 previous_data['depot'], previous_link)
2795 def _GetResultsDict(self, revision_data, revision_data_sorted):
2796 # Find range where it possibly broke.
2797 first_working_revision = None
2798 first_working_revision_index = -1
2799 last_broken_revision = None
2800 last_broken_revision_index = -1
2802 culprit_revisions = []
2803 other_regressions = []
2804 regression_size = 0.0
2805 regression_std_err = 0.0
2808 for i in xrange(len(revision_data_sorted)):
2809 k, v = revision_data_sorted[i]
2810 if v['passed'] == 1:
2811 if not first_working_revision:
2812 first_working_revision = k
2813 first_working_revision_index = i
2816 last_broken_revision = k
2817 last_broken_revision_index = i
2819 if last_broken_revision != None and first_working_revision != None:
2821 for i in xrange(0, last_broken_revision_index + 1):
2822 if revision_data_sorted[i][1]['value']:
2823 broken_means.append(revision_data_sorted[i][1]['value']['values'])
2826 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
2827 if revision_data_sorted[i][1]['value']:
2828 working_means.append(revision_data_sorted[i][1]['value']['values'])
2830 # Flatten the lists to calculate mean of all values.
2831 working_mean = sum(working_means, [])
2832 broken_mean = sum(broken_means, [])
2834 # Calculate the approximate size of the regression
2835 mean_of_bad_runs = math_utils.Mean(broken_mean)
2836 mean_of_good_runs = math_utils.Mean(working_mean)
2838 regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs,
2840 if math.isnan(regression_size):
2841 regression_size = 'zero-to-nonzero'
2843 regression_std_err = math.fabs(math_utils.PooledStandardError(
2844 [working_mean, broken_mean]) /
2845 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
2847 # Give a "confidence
" in the bisect. At the moment we use how distinct the
2848 # values are before and after the last broken revision, and how noisy the
2850 confidence = ConfidenceScore(working_means, broken_means)
2852 culprit_revisions = []
2855 self.ChangeToDepotWorkingDirectory(
2856 revision_data[last_broken_revision]['depot'])
2858 if revision_data[last_broken_revision]['depot'] == 'cros':
2859 # Want to get a list of all the commits and what depots they belong
2860 # to so that we can grab info about each.
2861 cmd = ['repo', 'forall', '-c',
2862 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2863 last_broken_revision, first_working_revision + 1)]
2864 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
2867 assert not return_code, ('An error occurred while running '
2868 '"%s"' % ' '.join(cmd))
2871 for l in output.split('\n'):
2873 # Output will be in form:
2875 # /path_to_other_depot
2883 contents = l.split(' ')
2884 if len(contents) > 1:
2885 changes.append([last_depot, contents[0]])
2888 info = self.source_control.QueryRevisionInfo(c[1])
2889 culprit_revisions.append((c[1], info, None))
2891 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
2892 k, v = revision_data_sorted[i]
2893 if k == first_working_revision:
2895 self.ChangeToDepotWorkingDirectory(v['depot'])
2896 info = self.source_control.QueryRevisionInfo(k)
2897 culprit_revisions.append((k, info, v['depot']))
2900 # Check for any other possible regression ranges.
2901 other_regressions = _FindOtherRegressions(
2902 revision_data_sorted, mean_of_bad_runs > mean_of_good_runs)
2905 'first_working_revision': first_working_revision,
2906 'last_broken_revision': last_broken_revision,
2907 'culprit_revisions': culprit_revisions,
2908 'other_regressions': other_regressions,
2909 'regression_size': regression_size,
2910 'regression_std_err': regression_std_err,
2911 'confidence': confidence,
2914 def _CheckForWarnings(self, results_dict):
2915 if len(results_dict['culprit_revisions']) > 1:
2916 self.warnings.append('Due to build errors, regression range could '
2917 'not be narrowed down to a single commit.')
2918 if self.opts.repeat_test_count == 1:
2919 self.warnings.append('Tests were only set to run once. This may '
2920 'be insufficient to get meaningful results.')
2921 if 0 < results_dict['confidence'] < HIGH_CONFIDENCE:
2922 self.warnings.append('Confidence is not high. Try bisecting again '
2923 'with increased repeat_count, larger range, or '
2924 'on another metric.')
2925 if not results_dict['confidence']:
2926 self.warnings.append('Confidence score is 0%. Try bisecting again on '
2927 'another platform or another metric.')
2929 def FormatAndPrintResults(self, bisect_results):
2930 """Prints the results from a bisection run in a readable format.
2933 bisect_results: The results from a bisection test run.
2935 revision_data = bisect_results['revision_data']
2936 revision_data_sorted = sorted(revision_data.iteritems(),
2937 key = lambda x: x[1]['sort'])
2938 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2940 self._CheckForWarnings(results_dict)
2942 if self.opts.output_buildbot_annotations:
2943 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
2946 print 'Full results of bisection:'
2947 for current_id, current_data in revision_data_sorted:
2948 build_status = current_data['passed']
2950 if type(build_status) is bool:
2952 build_status = 'Good'
2954 build_status = 'Bad'
2956 print ' %20s %40s %s' % (current_data['depot'],
2957 current_id, build_status)
2960 if self.opts.output_buildbot_annotations:
2961 bisect_utils.OutputAnnotationStepClosed()
2962 # The perf dashboard scrapes the "results
" step in order to comment on
2963 # bugs. If you change this, please update the perf dashboard as well.
2964 bisect_utils.OutputAnnotationStepStart('Results')
2966 self._PrintBanner(results_dict)
2967 self._PrintWarnings()
2969 if results_dict['culprit_revisions'] and results_dict['confidence']:
2970 for culprit in results_dict['culprit_revisions']:
2971 cl, info, depot = culprit
2972 self._PrintRevisionInfo(cl, info, depot)
2973 if results_dict['other_regressions']:
2974 self._PrintOtherRegressions(results_dict['other_regressions'],
2976 self._PrintTestedCommitsTable(revision_data_sorted,
2977 results_dict['first_working_revision'],
2978 results_dict['last_broken_revision'],
2979 results_dict['confidence'])
2980 _PrintStepTime(revision_data_sorted)
2981 self._PrintReproSteps()
2983 if self.opts.output_buildbot_annotations:
2984 bisect_utils.OutputAnnotationStepClosed()
2986 def _PrintBanner(self, results_dict):
2987 if self._IsBisectModeReturnCode():
2991 metrics = '/'.join(self.opts.metric)
2992 change = '%.02f%% (+/-%.02f%%)' % (
2993 results_dict['regression_size'], results_dict['regression_std_err'])
2995 if results_dict['culprit_revisions'] and results_dict['confidence']:
2996 status = self._ConfidenceLevelStatus(results_dict)
2998 status = 'Failure, could not reproduce.'
2999 change = 'Bisect could not reproduce a change.'
3001 print RESULTS_BANNER % {
3003 'command': self.opts.command,
3006 'confidence': results_dict['confidence'],
3009 def _PrintWarnings(self):
3010 """Prints a list of warning strings if there are any."""
3011 if not self.warnings:
3015 for w in set(self.warnings):
3019 def _IsPlatformSupported():
3020 """Checks that this platform and build system are supported.
3023 opts: The options parsed from the command line.
3026 True if the platform and build system are supported.
3028 # Haven't tested the script out on any other platforms yet.
3029 supported = ['posix', 'nt']
3030 return os.name in supported
3033 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3034 """Removes the directory tree specified, and then creates an empty
3035 directory in the same location (if not specified to skip).
3038 path_to_dir: Path to the directory tree.
3039 skip_makedir: Whether to skip creating empty directory, default is False.
3042 True if successful, False if an error occurred.
3045 if os.path.exists(path_to_dir):
3046 shutil.rmtree(path_to_dir)
3048 if e.errno != errno.ENOENT:
3051 if not skip_makedir:
3052 return MaybeMakeDirectory(path_to_dir)
3057 def RemoveBuildFiles(build_type):
3058 """Removes build files from previous runs."""
3059 if RmTreeAndMkDir(os.path.join('out', build_type)):
3060 if RmTreeAndMkDir(os.path.join('build', build_type)):
3065 class BisectOptions(object):
3066 """Options to be used when running bisection."""
3068 super(BisectOptions, self).__init__()
3070 self.target_platform = 'chromium'
3071 self.build_preference = None
3072 self.good_revision = None
3073 self.bad_revision = None
3074 self.use_goma = None
3075 self.goma_dir = None
3076 self.cros_board = None
3077 self.cros_remote_ip = None
3078 self.repeat_test_count = 20
3079 self.truncate_percent = 25
3080 self.max_time_minutes = 20
3083 self.output_buildbot_annotations = None
3084 self.no_custom_deps = False
3085 self.working_directory = None
3086 self.extra_src = None
3087 self.debug_ignore_build = None
3088 self.debug_ignore_sync = None
3089 self.debug_ignore_perf_test = None
3090 self.gs_bucket = None
3091 self.target_arch = 'ia32'
3092 self.target_build_type = 'Release'
3093 self.builder_host = None
3094 self.builder_port = None
3095 self.bisect_mode = BISECT_MODE_MEAN
3098 def _CreateCommandLineParser():
3099 """Creates a parser with bisect options.
3102 An instance of optparse.OptionParser.
3104 usage = ('%prog [options] [-- chromium-options]\n'
3105 'Perform binary search on revision history to find a minimal '
3106 'range of revisions where a performance metric regressed.\n')
3108 parser = optparse.OptionParser(usage=usage)
3110 group = optparse.OptionGroup(parser, 'Bisect options')
3111 group.add_option('-c', '--command',
3113 help='A command to execute your performance test at' +
3114 ' each point in the bisection.')
3115 group.add_option('-b', '--bad_revision',
3117 help='A bad revision to start bisection. ' +
3118 'Must be later than good revision. May be either a git' +
3119 ' or svn revision.')
3120 group.add_option('-g', '--good_revision',
3122 help='A revision to start bisection where performance' +
3123 ' test is known to pass. Must be earlier than the ' +
3124 'bad revision. May be either a git or svn revision.')
3125 group.add_option('-m', '--metric',
3127 help='The desired metric to bisect on. For example ' +
3128 '"vm_rss_final_b
/vm_rss_f_b
"')
3129 group.add_option('-r', '--repeat_test_count',
3132 help='The number of times to repeat the performance '
3133 'test. Values will be clamped to range [1, 100]. '
3134 'Default value is 20.')
3135 group.add_option('--max_time_minutes',
3138 help='The maximum time (in minutes) to take running the '
3139 'performance tests. The script will run the performance '
3140 'tests according to --repeat_test_count, so long as it '
3141 'doesn\'t exceed --max_time_minutes. Values will be '
3142 'clamped to range [1, 60].'
3143 'Default value is 20.')
3144 group.add_option('-t', '--truncate_percent',
3147 help='The highest/lowest % are discarded to form a '
3148 'truncated mean. Values will be clamped to range [0, '
3149 '25]. Default value is 25 (highest/lowest 25% will be '
3151 group.add_option('--bisect_mode',
3153 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3154 BISECT_MODE_RETURN_CODE],
3155 default=BISECT_MODE_MEAN,
3156 help='The bisect mode. Choices are to bisect on the '
3157 'difference in mean, std_dev, or return_code.')
3158 parser.add_option_group(group)
3160 group = optparse.OptionGroup(parser, 'Build options')
3161 group.add_option('-w', '--working_directory',
3163 help='Path to the working directory where the script '
3164 'will do an initial checkout of the chromium depot. The '
3165 'files will be placed in a subdirectory "bisect
" under '
3166 'working_directory and that will be used to perform the '
3167 'bisection. This parameter is optional, if it is not '
3168 'supplied, the script will work from the current depot.')
3169 group.add_option('--build_preference',
3171 choices=['msvs', 'ninja', 'make'],
3172 help='The preferred build system to use. On linux/mac '
3173 'the options are make/ninja. On Windows, the options '
3175 group.add_option('--target_platform',
3177 choices=['chromium', 'cros', 'android', 'android-chrome'],
3179 help='The target platform. Choices are "chromium
" '
3180 '(current platform), "cros
", or "android
". If you '
3181 'specify something other than "chromium
", you must be '
3182 'properly set up to build that platform.')
3183 group.add_option('--no_custom_deps',
3184 dest='no_custom_deps',
3185 action='store_true',
3187 help='Run the script with custom_deps or not.')
3188 group.add_option('--extra_src',
3190 help='Path to a script which can be used to modify '
3191 'the bisect script\'s behavior.')
3192 group.add_option('--cros_board',
3194 help='The cros board type to build.')
3195 group.add_option('--cros_remote_ip',
3197 help='The remote machine to image to.')
3198 group.add_option('--use_goma',
3199 action='store_true',
3200 help='Add a bunch of extra threads for goma, and enable '
3202 group.add_option('--goma_dir',
3203 help='Path to goma tools (or system default if not '
3205 group.add_option('--output_buildbot_annotations',
3206 action='store_true',
3207 help='Add extra annotation output for buildbot.')
3208 group.add_option('--gs_bucket',
3212 help=('Name of Google Storage bucket to upload or '
3213 'download build. e.g., chrome-perf'))
3214 group.add_option('--target_arch',
3216 choices=['ia32', 'x64', 'arm'],
3219 help=('The target build architecture. Choices are "ia32
" '
3220 '(default), "x64
" or "arm
".'))
3221 group.add_option('--target_build_type',
3223 choices=['Release', 'Debug'],
3225 help='The target build type. Choices are "Release
" '
3226 '(default), or "Debug
".')
3227 group.add_option('--builder_host',
3228 dest='builder_host',
3230 help=('Host address of server to produce build by posting'
3231 ' try job request.'))
3232 group.add_option('--builder_port',
3233 dest='builder_port',
3235 help=('HTTP port of the server to produce build by posting'
3236 ' try job request.'))
3237 parser.add_option_group(group)
3239 group = optparse.OptionGroup(parser, 'Debug options')
3240 group.add_option('--debug_ignore_build',
3241 action='store_true',
3242 help='DEBUG: Don\'t perform builds.')
3243 group.add_option('--debug_ignore_sync',
3244 action='store_true',
3245 help='DEBUG: Don\'t perform syncs.')
3246 group.add_option('--debug_ignore_perf_test',
3247 action='store_true',
3248 help='DEBUG: Don\'t perform performance tests.')
3249 parser.add_option_group(group)
3252 def ParseCommandLine(self):
3253 """Parses the command line for bisect options."""
3254 parser = self._CreateCommandLineParser()
3255 opts, _ = parser.parse_args()
3258 if not opts.command:
3259 raise RuntimeError('missing required parameter: --command')
3261 if not opts.good_revision:
3262 raise RuntimeError('missing required parameter: --good_revision')
3264 if not opts.bad_revision:
3265 raise RuntimeError('missing required parameter: --bad_revision')
3267 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3268 raise RuntimeError('missing required parameter: --metric')
3271 if not cloud_storage.List(opts.gs_bucket):
3272 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3273 if not opts.builder_host:
3274 raise RuntimeError('Must specify try server host name using '
3275 '--builder_host when gs_bucket is used.')
3276 if not opts.builder_port:
3277 raise RuntimeError('Must specify try server port number using '
3278 '--builder_port when gs_bucket is used.')
3279 if opts.target_platform == 'cros':
3280 # Run sudo up front to make sure credentials are cached for later.
3281 print 'Sudo is required to build cros:'
3283 bisect_utils.RunProcess(['sudo', 'true'])
3285 if not opts.cros_board:
3286 raise RuntimeError('missing required parameter: --cros_board')
3288 if not opts.cros_remote_ip:
3289 raise RuntimeError('missing required parameter: --cros_remote_ip')
3291 if not opts.working_directory:
3292 raise RuntimeError('missing required parameter: --working_directory')
3294 if opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3295 metric_values = opts.metric.split('/')
3296 if len(metric_values) != 2:
3297 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3298 opts.metric = metric_values
3300 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3301 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3302 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3303 opts.truncate_percent = opts.truncate_percent / 100.0
3305 for k, v in opts.__dict__.iteritems():
3306 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k
3308 except RuntimeError, e:
3309 output_string = StringIO.StringIO()
3310 parser.print_help(file=output_string)
3311 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3312 output_string.close()
3313 raise RuntimeError(error_message)
3316 def FromDict(values):
3317 """Creates an instance of BisectOptions from a dictionary.
3320 values: a dict containing options to set.
3323 An instance of BisectOptions.
3325 opts = BisectOptions()
3326 for k, v in values.iteritems():
3327 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
3330 if opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3331 metric_values = opts.metric.split('/')
3332 if len(metric_values) != 2:
3333 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3334 opts.metric = metric_values
3336 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3337 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3338 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3339 opts.truncate_percent = opts.truncate_percent / 100.0
3347 opts = BisectOptions()
3348 opts.ParseCommandLine()
3351 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3353 raise RuntimeError('Invalid or missing --extra_src.')
3354 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3356 if opts.working_directory:
3357 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3358 if opts.no_custom_deps:
3360 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3362 os.chdir(os.path.join(os.getcwd(), 'src'))
3364 if not RemoveBuildFiles(opts.target_build_type):
3365 raise RuntimeError('Something went wrong removing the build files.')
3367 if not _IsPlatformSupported():
3368 raise RuntimeError('Sorry, this platform isn\'t supported yet.')
3370 # Check what source control method is being used, and create a
3371 # SourceControl object if possible.
3372 source_control = source_control_module.DetermineAndCreateSourceControl(opts)
3374 if not source_control:
3376 'Sorry, only the git workflow is supported at the moment.')
3378 # gClient sync seems to fail if you're not in master branch.
3379 if (not source_control.IsInProperBranch() and
3380 not opts.debug_ignore_sync and
3381 not opts.working_directory):
3382 raise RuntimeError('You must switch to master branch to run bisection.')
3383 bisect_test = BisectPerformanceMetrics(source_control, opts)
3385 bisect_results = bisect_test.Run(opts.command,
3389 if bisect_results['error']:
3390 raise RuntimeError(bisect_results['error'])
3391 bisect_test.FormatAndPrintResults(bisect_results)
3394 bisect_test.PerformCleanup()
3395 except RuntimeError, e:
3396 if opts.output_buildbot_annotations:
3397 # The perf dashboard scrapes the "results
" step in order to comment on
3398 # bugs. If you change this, please update the perf dashboard as well.
3399 bisect_utils.OutputAnnotationStepStart('Results')
3400 print 'Error: %s' % e.message
3401 if opts.output_buildbot_annotations:
3402 bisect_utils.OutputAnnotationStepClosed()
3406 if __name__ == '__main__':