2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
52 sys
.path
.append(os
.path
.join(os
.path
.dirname(__file__
), 'telemetry'))
54 from auto_bisect
import bisect_utils
55 from auto_bisect
import builder
56 from auto_bisect
import math_utils
57 from auto_bisect
import post_perf_builder_job
as bisect_builder
58 from auto_bisect
import source_control
as source_control_module
59 from auto_bisect
import ttest
60 from telemetry
.util
import cloud_storage
62 # Below is the map of "depot" names to information about each depot. Each depot
63 # is a repository, and in the process of bisecting, revision ranges in these
64 # repositories may also be bisected.
66 # Each depot information dictionary may contain:
67 # src: Path to the working directory.
68 # recurse: True if this repository will get bisected.
69 # depends: A list of other repositories that are actually part of the same
70 # repository in svn. If the repository has any dependent repositories
71 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then
72 # they are specified here.
73 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to
75 # from: Parent depot that must be bisected before this is bisected.
76 # deps_var: Key name in vars variable in DEPS file that has revision
83 'from': ['cros', 'android-chrome'],
85 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
86 'deps_var': 'chromium_rev'
89 'src': 'src/third_party/WebKit',
94 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
95 'deps_var': 'webkit_revision'
98 'src': 'src/third_party/angle',
99 'src_old': 'src/third_party/angle_dx11',
102 'from': ['chromium'],
104 'deps_var': 'angle_revision'
110 'from': ['chromium'],
111 'custom_deps': bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
112 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
113 'deps_var': 'v8_revision'
115 'v8_bleeding_edge': {
116 'src': 'src/v8_bleeding_edge',
119 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
121 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
122 'deps_var': 'v8_revision'
125 'src': 'src/third_party/skia/src',
127 'svn': 'http://skia.googlecode.com/svn/trunk/src',
128 'depends': ['skia/include', 'skia/gyp'],
129 'from': ['chromium'],
130 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
131 'deps_var': 'skia_revision'
134 'src': 'src/third_party/skia/include',
136 'svn': 'http://skia.googlecode.com/svn/trunk/include',
138 'from': ['chromium'],
139 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
143 'src': 'src/third_party/skia/gyp',
145 'svn': 'http://skia.googlecode.com/svn/trunk/gyp',
147 'from': ['chromium'],
148 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
153 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
155 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
157 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision.
158 BUILD_RESULT_SUCCEED
= 0
159 BUILD_RESULT_FAIL
= 1
160 BUILD_RESULT_SKIPPED
= 2
162 # Maximum time in seconds to wait after posting build request to tryserver.
163 # TODO: Change these values based on the actual time taken by buildbots on
165 MAX_MAC_BUILD_TIME
= 14400
166 MAX_WIN_BUILD_TIME
= 14400
167 MAX_LINUX_BUILD_TIME
= 14400
169 # The confidence percentage at which confidence can be consider "high".
172 # Patch template to add a new file, DEPS.sha under src folder.
173 # This file contains SHA1 value of the DEPS changes made while bisecting
174 # dependency repositories. This patch send along with DEPS patch to tryserver.
175 # When a build requested is posted with a patch, bisect builders on tryserver,
176 # once build is produced, it reads SHA value from this file and appends it
177 # to build archive filename.
178 DEPS_SHA_PATCH
= """diff --git src/DEPS.sha src/DEPS.sha
186 # The possible values of the --bisect_mode flag, which determines what to
187 # use when classifying a revision as "good" or "bad".
188 BISECT_MODE_MEAN
= 'mean'
189 BISECT_MODE_STD_DEV
= 'std_dev'
190 BISECT_MODE_RETURN_CODE
= 'return_code'
192 # The perf dashboard looks for a string like "Estimated Confidence: 95%"
193 # to decide whether or not to cc the author(s). If you change this, please
194 # update the perf dashboard as well.
196 ===== BISECT JOB RESULTS =====
199 Test Command: %(command)s
200 Test Metric: %(metrics)s
201 Relative Change: %(change)s
202 Estimated Confidence: %(confidence)d%%"""
204 # The perf dashboard specifically looks for the string
205 # "Author : " to parse out who to cc on a bug. If you change the
206 # formatting here, please update the perf dashboard as well.
207 RESULTS_REVISION_INFO
= """
208 ===== SUSPECTED CL(s) =====
209 Subject : %(subject)s
210 Author : %(author)s%(email_info)s%(commit_info)s
212 Date : %(cl_date)s"""
214 REPRO_STEPS_LOCAL
= """
215 ==== INSTRUCTIONS TO REPRODUCE ====
219 REPRO_STEPS_TRYJOB
= """
220 To reproduce on Performance trybot:
221 1. Create new git branch or check out existing branch.
222 2. Edit tools/run-perf-test.cfg (instructions in file) or \
223 third_party/WebKit/Tools/run-perf-test.cfg.
224 a) Take care to strip any src/ directories from the head of \
226 b) On desktop, only --browser=release is supported, on android \
227 --browser=android-chromium-testshell.
228 c) Test command to use: %(command)s
229 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
230 committed locally to run-perf-test.cfg.
231 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
232 $ git cl upload --bypass-hooks
233 4. Send your try job to the tryserver. \
234 [Please make sure to use appropriate bot to reproduce]
235 $ git cl try -m tryserver.chromium.perf -b <bot>
237 For more details please visit
238 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots"""
240 RESULTS_THANKYOU
= """
241 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
242 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
244 Contact chrome-perf-dashboard-team with any questions or suggestions about
249 . | ---------'-------'-----------.
250 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
251 . \______________.-------._______________)
258 def _AddAdditionalDepotInfo(depot_info
):
259 """Adds additional depot info to the global depot variables."""
260 global DEPOT_DEPS_NAME
262 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() + depot_info
.items())
263 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
266 def ConfidenceScore(good_results_lists
, bad_results_lists
):
267 """Calculates a confidence score.
269 This score is a percentage which represents our degree of confidence in the
270 proposition that the good results and bad results are distinct groups, and
271 their differences aren't due to chance alone.
275 good_results_lists: A list of lists of "good" result numbers.
276 bad_results_lists: A list of lists of "bad" result numbers.
279 A number in the range [0, 100].
281 # If there's only one item in either list, this means only one revision was
282 # classified good or bad; this isn't good enough evidence to make a decision.
283 # If an empty list was passed, that also implies zero confidence.
284 if len(good_results_lists
) <= 1 or len(bad_results_lists
) <= 1:
287 # Flatten the lists of results lists.
288 sample1
= sum(good_results_lists
, [])
289 sample2
= sum(bad_results_lists
, [])
291 # If there were only empty lists in either of the lists (this is unexpected
292 # and normally shouldn't happen), then we also want to return 0.
293 if not sample1
or not sample2
:
296 # The p-value is approximately the probability of obtaining the given set
297 # of good and bad values just by chance.
298 _
, _
, p_value
= ttest
.WelchsTTest(sample1
, sample2
)
299 return 100.0 * (1.0 - p_value
)
302 def GetSHA1HexDigest(contents
):
303 """Returns SHA1 hex digest of the given string."""
304 return hashlib
.sha1(contents
).hexdigest()
307 def GetZipFileName(build_revision
=None, target_arch
='ia32', patch_sha
=None):
308 """Gets the archive file name for the given revision."""
310 """Return a string to be used in paths for the platform."""
311 if bisect_utils
.IsWindowsHost():
312 # Build archive for x64 is still stored with 'win32'suffix
313 # (chromium_utils.PlatformName()).
314 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
317 if bisect_utils
.IsLinuxHost():
318 # Android builds too are archived with full-build-linux* prefix.
320 if bisect_utils
.IsMacHost():
322 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
324 base_name
= 'full-build-%s' % PlatformName()
325 if not build_revision
:
328 build_revision
= '%s_%s' % (build_revision
, patch_sha
)
329 return '%s_%s.zip' % (base_name
, build_revision
)
332 def GetRemoteBuildPath(build_revision
, target_platform
='chromium',
333 target_arch
='ia32', patch_sha
=None):
334 """Compute the url to download the build from."""
335 def GetGSRootFolderName(target_platform
):
336 """Gets Google Cloud Storage root folder names"""
337 if bisect_utils
.IsWindowsHost():
338 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
339 return 'Win x64 Builder'
341 if bisect_utils
.IsLinuxHost():
342 if target_platform
== 'android':
343 return 'android_perf_rel'
344 return 'Linux Builder'
345 if bisect_utils
.IsMacHost():
347 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
349 base_filename
= GetZipFileName(
350 build_revision
, target_arch
, patch_sha
)
351 builder_folder
= GetGSRootFolderName(target_platform
)
352 return '%s/%s' % (builder_folder
, base_filename
)
355 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
356 """Fetches file(s) from the Google Cloud Storage.
359 bucket_name: Google Storage bucket name.
360 source_path: Source file path.
361 destination_path: Destination file path.
364 Downloaded file path if exisits, otherwise None.
366 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
368 if cloud_storage
.Exists(bucket_name
, source_path
):
369 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
370 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
371 if os
.path
.exists(target_file
):
374 print ('File gs://%s/%s not found in cloud storage.' % (
375 bucket_name
, source_path
))
376 except Exception as e
:
377 print 'Something went wrong while fetching file from cloud: %s' % e
378 if os
.path
.exists(target_file
):
379 os
.remove(target_file
)
383 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
384 def MaybeMakeDirectory(*path
):
385 """Creates an entire path, if it doesn't already exist."""
386 file_path
= os
.path
.join(*path
)
388 os
.makedirs(file_path
)
390 if e
.errno
!= errno
.EEXIST
:
395 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
396 def ExtractZip(filename
, output_dir
, verbose
=True):
397 """ Extract the zip archive in the output directory."""
398 MaybeMakeDirectory(output_dir
)
400 # On Linux and Mac, we use the unzip command as it will
401 # handle links and file bits (executable), which is much
402 # easier then trying to do that with ZipInfo options.
404 # The Mac Version of unzip unfortunately does not support Zip64, whereas
405 # the python module does, so we have to fallback to the python zip module
406 # on Mac if the filesize is greater than 4GB.
408 # On Windows, try to use 7z if it is installed, otherwise fall back to python
409 # zip module and pray we don't have files larger than 512MB to unzip.
411 if ((bisect_utils
.IsMacHost()
412 and os
.path
.getsize(filename
) < 4 * 1024 * 1024 * 1024)
413 or bisect_utils
.IsLinuxHost()):
414 unzip_cmd
= ['unzip', '-o']
415 elif (bisect_utils
.IsWindowsHost()
416 and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe')):
417 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
420 # Make sure path is absolute before changing directories.
421 filepath
= os
.path
.abspath(filename
)
422 saved_dir
= os
.getcwd()
424 command
= unzip_cmd
+ [filepath
]
425 result
= bisect_utils
.RunProcess(command
)
428 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
430 assert bisect_utils
.IsWindowsHost() or bisect_utils
.IsMacHost()
431 zf
= zipfile
.ZipFile(filename
)
432 for name
in zf
.namelist():
434 print 'Extracting %s' % name
435 zf
.extract(name
, output_dir
)
436 if bisect_utils
.IsMacHost():
437 # Restore permission bits.
438 os
.chmod(os
.path
.join(output_dir
, name
),
439 zf
.getinfo(name
).external_attr
>> 16L)
442 def WriteStringToFile(text
, file_name
):
444 with
open(file_name
, 'wb') as f
:
447 raise RuntimeError('Error writing to file [%s]' % file_name
)
450 def ReadStringFromFile(file_name
):
452 with
open(file_name
) as f
:
455 raise RuntimeError('Error reading file [%s]' % file_name
)
458 def ChangeBackslashToSlashInPatch(diff_text
):
459 """Formats file paths in the given text to unix-style paths."""
461 diff_lines
= diff_text
.split('\n')
462 for i
in range(len(diff_lines
)):
463 if (diff_lines
[i
].startswith('--- ') or
464 diff_lines
[i
].startswith('+++ ')):
465 diff_lines
[i
] = diff_lines
[i
].replace('\\', '/')
466 return '\n'.join(diff_lines
)
470 def _ParseRevisionsFromDEPSFileManually(deps_file_contents
):
471 """Parses the vars section of the DEPS file with regex.
474 deps_file_contents: The DEPS file contents as a string.
477 A dict in the format {depot:revision} if successful, otherwise None.
479 # We'll parse the "vars" section of the DEPS file.
480 rxp
= re
.compile('vars = {(?P<vars_body>[^}]+)', re
.MULTILINE
)
481 re_results
= rxp
.search(deps_file_contents
)
486 # We should be left with a series of entries in the vars component of
487 # the DEPS file with the following format:
488 # 'depot_name': 'revision',
489 vars_body
= re_results
.group('vars_body')
490 rxp
= re
.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
492 re_results
= rxp
.findall(vars_body
)
494 return dict(re_results
)
497 def _WaitUntilBuildIsReady(
498 fetch_build
, bot_name
, builder_host
, builder_port
, build_request_id
,
500 """Waits until build is produced by bisect builder on tryserver.
503 fetch_build: Function to check and download build from cloud storage.
504 bot_name: Builder bot name on tryserver.
505 builder_host Tryserver hostname.
506 builder_port: Tryserver port.
507 build_request_id: A unique ID of the build request posted to tryserver.
508 max_timeout: Maximum time to wait for the build.
511 Downloaded archive file path if exists, otherwise None.
513 # Build number on the tryserver.
515 # Interval to check build on cloud storage.
517 # Interval to check build status on tryserver.
518 status_check_interval
= 600
519 last_status_check
= time
.time()
520 start_time
= time
.time()
522 # Checks for build on gs://chrome-perf and download if exists.
525 return (res
, 'Build successfully found')
526 elapsed_status_check
= time
.time() - last_status_check
527 # To avoid overloading tryserver with status check requests, we check
528 # build status for every 10 mins.
529 if elapsed_status_check
> status_check_interval
:
530 last_status_check
= time
.time()
532 # Get the build number on tryserver for the current build.
533 build_num
= bisect_builder
.GetBuildNumFromBuilder(
534 build_request_id
, bot_name
, builder_host
, builder_port
)
535 # Check the status of build using the build number.
536 # Note: Build is treated as PENDING if build number is not found
537 # on the the tryserver.
538 build_status
, status_link
= bisect_builder
.GetBuildStatus(
539 build_num
, bot_name
, builder_host
, builder_port
)
540 if build_status
== bisect_builder
.FAILED
:
541 return (None, 'Failed to produce build, log: %s' % status_link
)
542 elapsed_time
= time
.time() - start_time
543 if elapsed_time
> max_timeout
:
544 return (None, 'Timed out: %ss without build' % max_timeout
)
546 print 'Time elapsed: %ss without build.' % elapsed_time
547 time
.sleep(poll_interval
)
548 # For some reason, mac bisect bots were not flushing stdout periodically.
549 # As a result buildbot command is timed-out. Flush stdout on all platforms
550 # while waiting for build.
554 def _UpdateV8Branch(deps_content
):
555 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
557 Check for "v8_branch" in DEPS file if exists update its value
558 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
559 variable from DEPS revision 254916, therefore check for "src/v8":
560 <v8 source path> in DEPS in order to support prior DEPS revisions
564 deps_content: DEPS file contents to be modified.
567 Modified DEPS file contents as a string.
569 new_branch
= r
'branches/bleeding_edge'
570 v8_branch_pattern
= re
.compile(r
'(?<="v8_branch": ")(.*)(?=")')
571 if re
.search(v8_branch_pattern
, deps_content
):
572 deps_content
= re
.sub(v8_branch_pattern
, new_branch
, deps_content
)
574 # Replaces the branch assigned to "src/v8" key in DEPS file.
575 # Format of "src/v8" in DEPS:
577 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
578 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
579 v8_src_pattern
= re
.compile(
580 r
'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re
.MULTILINE
)
581 if re
.search(v8_src_pattern
, deps_content
):
582 deps_content
= re
.sub(v8_src_pattern
, new_branch
, deps_content
)
586 def _UpdateDEPSForAngle(revision
, depot
, deps_file
):
587 """Updates DEPS file with new revision for Angle repository.
589 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
590 variable contains "angle_revision" key that holds git hash instead of
593 And sometimes "angle_revision" key is not specified in "vars" variable,
594 in such cases check "deps" dictionary variable that matches
595 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
597 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
599 deps_contents
= ReadStringFromFile(deps_file
)
600 # Check whether the depot and revision pattern in DEPS file vars variable
601 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
602 angle_rev_pattern
= re
.compile(r
'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
603 deps_var
, re
.MULTILINE
)
604 match
= re
.search(angle_rev_pattern
% deps_var
, deps_contents
)
606 # Update the revision information for the given depot
607 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
609 # Check whether the depot and revision pattern in DEPS file deps
611 # "src/third_party/angle": Var("chromium_git") +
612 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
613 angle_rev_pattern
= re
.compile(
614 r
'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re
.MULTILINE
)
615 match
= re
.search(angle_rev_pattern
, deps_contents
)
617 print 'Could not find angle revision information in DEPS file.'
619 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
620 # Write changes to DEPS file
621 WriteStringToFile(new_data
, deps_file
)
624 print 'Something went wrong while updating DEPS file, %s' % e
628 def _TryParseHistogramValuesFromOutput(metric
, text
):
629 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
632 metric: The metric as a list of [<trace>, <value>] strings.
633 text: The text to parse the metric values from.
636 A list of floating point numbers found, [] if none were found.
638 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
640 text_lines
= text
.split('\n')
643 for current_line
in text_lines
:
644 if metric_formatted
in current_line
:
645 current_line
= current_line
[len(metric_formatted
):]
648 histogram_values
= eval(current_line
)
650 for b
in histogram_values
['buckets']:
651 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
652 # Extends the list with N-elements with the average for that bucket.
653 values_list
.extend([average_for_bucket
] * b
['count'])
660 def _TryParseResultValuesFromOutput(metric
, text
):
661 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
664 metric: The metric as a list of [<trace>, <value>] string pairs.
665 text: The text to parse the metric values from.
668 A list of floating point numbers found.
670 # Format is: RESULT <graph>: <trace>= <value> <units>
671 metric_re
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
673 # The log will be parsed looking for format:
674 # <*>RESULT <graph_name>: <trace_name>= <value>
675 single_result_re
= re
.compile(
676 metric_re
+ '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
678 # The log will be parsed looking for format:
679 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
680 multi_results_re
= re
.compile(
681 metric_re
+ '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
683 # The log will be parsed looking for format:
684 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
685 mean_stddev_re
= re
.compile(
687 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
689 text_lines
= text
.split('\n')
691 for current_line
in text_lines
:
692 # Parse the output from the performance test for the metric we're
694 single_result_match
= single_result_re
.search(current_line
)
695 multi_results_match
= multi_results_re
.search(current_line
)
696 mean_stddev_match
= mean_stddev_re
.search(current_line
)
697 if (not single_result_match
is None and
698 single_result_match
.group('VALUE')):
699 values_list
+= [single_result_match
.group('VALUE')]
700 elif (not multi_results_match
is None and
701 multi_results_match
.group('VALUES')):
702 metric_values
= multi_results_match
.group('VALUES')
703 values_list
+= metric_values
.split(',')
704 elif (not mean_stddev_match
is None and
705 mean_stddev_match
.group('MEAN')):
706 values_list
+= [mean_stddev_match
.group('MEAN')]
708 values_list
= [float(v
) for v
in values_list
709 if bisect_utils
.IsStringFloat(v
)]
711 # If the metric is times/t, we need to sum the timings in order to get
712 # similar regression results as the try-bots.
715 ['times', 'page_load_time'],
716 ['cold_times', 'page_load_time'],
717 ['warm_times', 'page_load_time'],
720 if metric
in metrics_to_sum
:
722 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
727 def _ParseMetricValuesFromOutput(metric
, text
):
728 """Parses output from performance_ui_tests and retrieves the results for
732 metric: The metric as a list of [<trace>, <value>] strings.
733 text: The text to parse the metric values from.
736 A list of floating point numbers found.
738 metric_values
= _TryParseResultValuesFromOutput(metric
, text
)
740 if not metric_values
:
741 metric_values
= _TryParseHistogramValuesFromOutput(metric
, text
)
746 def _GenerateProfileIfNecessary(command_args
):
747 """Checks the command line of the performance test for dependencies on
748 profile generation, and runs tools/perf/generate_profile as necessary.
751 command_args: Command line being passed to performance test, as a list.
754 False if profile generation was necessary and failed, otherwise True.
756 if '--profile-dir' in ' '.join(command_args
):
757 # If we were using python 2.7+, we could just use the argparse
758 # module's parse_known_args to grab --profile-dir. Since some of the
759 # bots still run 2.6, have to grab the arguments manually.
761 args_to_parse
= ['--profile-dir', '--browser']
763 for arg_to_parse
in args_to_parse
:
764 for i
, current_arg
in enumerate(command_args
):
765 if arg_to_parse
in current_arg
:
766 current_arg_split
= current_arg
.split('=')
768 # Check 2 cases, --arg=<val> and --arg <val>
769 if len(current_arg_split
) == 2:
770 arg_dict
[arg_to_parse
] = current_arg_split
[1]
771 elif i
+ 1 < len(command_args
):
772 arg_dict
[arg_to_parse
] = command_args
[i
+1]
774 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
776 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
777 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
778 return not bisect_utils
.RunProcess(['python', path_to_generate
,
779 '--profile-type-to-generate', profile_type
,
780 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
785 def _AddRevisionsIntoRevisionData(revisions
, depot
, sort
, revision_data
):
786 """Adds new revisions to the revision_data dict and initializes them.
789 revisions: List of revisions to add.
790 depot: Depot that's currently in use (src, webkit, etc...)
791 sort: Sorting key for displaying revisions.
792 revision_data: A dict to add the new revisions into. Existing revisions
793 will have their sort keys offset.
795 num_depot_revisions
= len(revisions
)
797 for _
, v
in revision_data
.iteritems():
799 v
['sort'] += num_depot_revisions
801 for i
in xrange(num_depot_revisions
):
810 'sort' : i
+ sort
+ 1,
814 def _PrintThankYou():
815 print RESULTS_THANKYOU
818 def _PrintTableRow(column_widths
, row_data
):
819 """Prints out a row in a formatted table that has columns aligned.
822 column_widths: A list of column width numbers.
823 row_data: A list of items for each column in this row.
825 assert len(column_widths
) == len(row_data
)
827 for i
in xrange(len(column_widths
)):
828 current_row_data
= row_data
[i
].center(column_widths
[i
], ' ')
829 text
+= ('%%%ds' % column_widths
[i
]) % current_row_data
833 def _PrintStepTime(revision_data_sorted
):
834 """Prints information about how long various steps took.
837 revision_data_sorted: The sorted list of revision data dictionaries."""
838 step_perf_time_avg
= 0.0
839 step_build_time_avg
= 0.0
841 for _
, current_data
in revision_data_sorted
:
842 if current_data
['value']:
843 step_perf_time_avg
+= current_data
['perf_time']
844 step_build_time_avg
+= current_data
['build_time']
847 step_perf_time_avg
= step_perf_time_avg
/ step_count
848 step_build_time_avg
= step_build_time_avg
/ step_count
850 print 'Average build time : %s' % datetime
.timedelta(
851 seconds
=int(step_build_time_avg
))
852 print 'Average test time : %s' % datetime
.timedelta(
853 seconds
=int(step_perf_time_avg
))
856 def _FindOtherRegressions(revision_data_sorted
, bad_greater_than_good
):
857 """Compiles a list of other possible regressions from the revision data.
860 revision_data_sorted: Sorted list of (revision, revision data dict) pairs.
861 bad_greater_than_good: Whether the result value at the "bad" revision is
862 numerically greater than the result value at the "good" revision.
865 A list of [current_rev, previous_rev, confidence] for other places where
866 there may have been a regression.
868 other_regressions
= []
871 for current_id
, current_data
in revision_data_sorted
:
872 current_values
= current_data
['value']
874 current_values
= current_values
['values']
876 confidence
= ConfidenceScore(previous_values
, [current_values
])
877 mean_of_prev_runs
= math_utils
.Mean(sum(previous_values
, []))
878 mean_of_current_runs
= math_utils
.Mean(current_values
)
880 # Check that the potential regression is in the same direction as
881 # the overall regression. If the mean of the previous runs < the
882 # mean of the current runs, this local regression is in same
884 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
885 is_same_direction
= (prev_less_than_current
if
886 bad_greater_than_good
else not prev_less_than_current
)
888 # Only report potential regressions with high confidence.
889 if is_same_direction
and confidence
> 50:
890 other_regressions
.append([current_id
, previous_id
, confidence
])
891 previous_values
.append(current_values
)
892 previous_id
= current_id
893 return other_regressions
896 class BisectPerformanceMetrics(object):
897 """This class contains functionality to perform a bisection of a range of
898 revisions to narrow down where performance regressions may have occurred.
900 The main entry-point is the Run method.
903 def __init__(self
, source_control
, opts
):
904 super(BisectPerformanceMetrics
, self
).__init
__()
907 self
.source_control
= source_control
908 self
.src_cwd
= os
.getcwd()
909 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
911 self
.cleanup_commands
= []
913 self
.builder
= builder
.Builder
.FromOpts(opts
)
915 # This always starts true since the script grabs latest first.
916 self
.was_blink
= True
918 for d
in DEPOT_NAMES
:
919 # The working directory of each depot is just the path to the depot, but
920 # since we're already in 'src', we can skip that part.
922 self
.depot_cwd
[d
] = os
.path
.join(
923 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
925 def PerformCleanup(self
):
926 """Performs cleanup when script is finished."""
927 os
.chdir(self
.src_cwd
)
928 for c
in self
.cleanup_commands
:
930 shutil
.move(c
[1], c
[2])
932 assert False, 'Invalid cleanup command.'
934 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
935 """Retrieves a list of all the commits between the bad revision and
936 last known good revision."""
938 revision_work_list
= []
941 revision_range_start
= good_revision
942 revision_range_end
= bad_revision
945 self
.ChangeToDepotWorkingDirectory('cros')
947 # Print the commit timestamps for every commit in the revision time
948 # range. We'll sort them and bisect by that. There is a remote chance that
949 # 2 (or more) commits will share the exact same timestamp, but it's
950 # probably safe to ignore that case.
951 cmd
= ['repo', 'forall', '-c',
952 'git log --format=%%ct --before=%d --after=%d' % (
953 revision_range_end
, revision_range_start
)]
954 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
956 assert not return_code
, ('An error occurred while running '
957 '"%s"' % ' '.join(cmd
))
961 revision_work_list
= list(set(
962 [int(o
) for o
in output
.split('\n') if bisect_utils
.IsStringInt(o
)]))
963 revision_work_list
= sorted(revision_work_list
, reverse
=True)
965 cwd
= self
._GetDepotDirectory
(depot
)
966 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
967 good_revision
, cwd
=cwd
)
969 return revision_work_list
971 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
972 svn_revision
= self
.source_control
.SVNFindRev(revision
)
974 if bisect_utils
.IsStringInt(svn_revision
):
975 # V8 is tricky to bisect, in that there are only a few instances when
976 # we can dive into bleeding_edge and get back a meaningful result.
977 # Try to detect a V8 "business as usual" case, which is when:
978 # 1. trunk revision N has description "Version X.Y.Z"
979 # 2. bleeding_edge revision (N-1) has description "Prepare push to
980 # trunk. Now working on X.Y.(Z+1)."
982 # As of 01/24/2014, V8 trunk descriptions are formatted:
983 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
984 # So we can just try parsing that out first and fall back to the old way.
985 v8_dir
= self
._GetDepotDirectory
('v8')
986 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
988 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
991 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
993 regex_results
= version_re
.search(revision_info
['subject'])
998 # Look for "based on bleeding_edge" and parse out revision
999 if 'based on bleeding_edge' in revision_info
['subject']:
1001 bleeding_edge_revision
= revision_info
['subject'].split(
1002 'bleeding_edge revision r')[1]
1003 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1004 git_revision
= self
.source_control
.ResolveToRevision(
1005 bleeding_edge_revision
, 'v8_bleeding_edge', DEPOT_DEPS_NAME
, 1,
1006 cwd
=v8_bleeding_edge_dir
)
1008 except (IndexError, ValueError):
1011 if not git_revision
:
1012 # Wasn't successful, try the old way of looking for "Prepare push to"
1013 git_revision
= self
.source_control
.ResolveToRevision(
1014 int(svn_revision
) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME
, -1,
1015 cwd
=v8_bleeding_edge_dir
)
1018 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1019 cwd
=v8_bleeding_edge_dir
)
1021 if 'Prepare push to trunk' in revision_info
['subject']:
1025 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1026 cwd
= self
._GetDepotDirectory
('v8')
1027 cmd
= ['log', '--format=%ct', '-1', revision
]
1028 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1029 commit_time
= int(output
)
1033 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1035 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1036 output
= output
.split()
1038 commits
= reversed(commits
)
1040 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1042 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
1043 output
= output
.split()
1046 bleeding_edge_revision
= None
1049 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1050 if bleeding_edge_revision
:
1053 return bleeding_edge_revision
1055 def _ParseRevisionsFromDEPSFile(self
, depot
):
1056 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1057 be needed if the bisect recurses into those depots later.
1060 depot: Name of depot being bisected.
1063 A dict in the format {depot:revision} if successful, otherwise None.
1067 'Var': lambda _
: deps_data
["vars"][_
],
1068 'From': lambda *args
: None,
1070 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, deps_data
)
1071 deps_data
= deps_data
['deps']
1073 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1075 for depot_name
, depot_data
in DEPOT_DEPS_NAME
.iteritems():
1076 if (depot_data
.get('platform') and
1077 depot_data
.get('platform') != os
.name
):
1080 if (depot_data
.get('recurse') and depot
in depot_data
.get('from')):
1081 depot_data_src
= depot_data
.get('src') or depot_data
.get('src_old')
1082 src_dir
= deps_data
.get(depot_data_src
)
1084 self
.depot_cwd
[depot_name
] = os
.path
.join(self
.src_cwd
,
1086 re_results
= rxp
.search(src_dir
)
1088 results
[depot_name
] = re_results
.group('revision')
1090 warning_text
= ('Couldn\'t parse revision for %s while bisecting '
1091 '%s' % (depot_name
, depot
))
1092 if not warning_text
in self
.warnings
:
1093 self
.warnings
.append(warning_text
)
1095 results
[depot_name
] = None
1098 deps_file_contents
= ReadStringFromFile(bisect_utils
.FILE_DEPS_GIT
)
1099 parse_results
= _ParseRevisionsFromDEPSFileManually(deps_file_contents
)
1101 for depot_name
, depot_revision
in parse_results
.iteritems():
1102 depot_revision
= depot_revision
.strip('@')
1103 print depot_name
, depot_revision
1104 for current_name
, current_data
in DEPOT_DEPS_NAME
.iteritems():
1105 if (current_data
.has_key('deps_var') and
1106 current_data
['deps_var'] == depot_name
):
1107 src_name
= current_name
1108 results
[src_name
] = depot_revision
1112 def _Get3rdPartyRevisions(self
, depot
):
1113 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1116 A dict in the format {depot:revision} if successful, otherwise None.
1119 self
.ChangeToDepotWorkingDirectory(depot
)
1123 if depot
== 'chromium' or depot
== 'android-chrome':
1124 results
= self
._ParseRevisionsFromDEPSFile
(depot
)
1129 bisect_utils
.CROS_SDK_PATH
,
1131 'portageq-%s' % self
.opts
.cros_board
,
1133 '/build/%s' % self
.opts
.cros_board
,
1135 CROS_CHROMEOS_PATTERN
1137 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
1139 assert not return_code
, ('An error occurred while running '
1140 '"%s"' % ' '.join(cmd
))
1142 if len(output
) > CROS_CHROMEOS_PATTERN
:
1143 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1146 output
= output
.split('_')[0]
1149 contents
= output
.split('.')
1151 version
= contents
[2]
1153 if contents
[3] != '0':
1154 warningText
= ('Chrome version: %s.%s but using %s.0 to bisect.' %
1155 (version
, contents
[3], version
))
1156 if not warningText
in self
.warnings
:
1157 self
.warnings
.append(warningText
)
1160 self
.ChangeToDepotWorkingDirectory('chromium')
1161 cmd
= ['log', '-1', '--format=%H',
1162 '--author=chrome-release@google.com',
1163 '--grep=to %s' % version
, 'origin/master']
1164 return_code
= bisect_utils
.CheckRunGit(cmd
)
1167 results
['chromium'] = output
.strip()
1170 # We can't try to map the trunk revision to bleeding edge yet, because
1171 # we don't know which direction to try to search in. Have to wait until
1172 # the bisect has narrowed the results down to 2 v8 rolls.
1173 results
['v8_bleeding_edge'] = None
1177 def BackupOrRestoreOutputdirectory(self
, restore
=False, build_type
='Release'):
1178 """Backs up or restores build output directory based on restore argument.
1181 restore: Indicates whether to restore or backup. Default is False(Backup)
1182 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1185 Path to backup or restored location as string. otherwise None if it fails.
1187 build_dir
= os
.path
.abspath(
1188 builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1189 source_dir
= os
.path
.join(build_dir
, build_type
)
1190 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1192 source_dir
, destination_dir
= destination_dir
, source_dir
1193 if os
.path
.exists(source_dir
):
1194 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1195 shutil
.move(source_dir
, destination_dir
)
1196 return destination_dir
1199 def GetBuildArchiveForRevision(self
, revision
, gs_bucket
, target_arch
,
1200 patch_sha
, out_dir
):
1201 """Checks and downloads build archive for a given revision.
1203 Checks for build archive with Git hash or SVN revision. If either of the
1204 file exists, then downloads the archive file.
1207 revision: A Git hash revision.
1208 gs_bucket: Cloud storage bucket name
1209 target_arch: 32 or 64 bit build target
1210 patch: A DEPS patch (used while bisecting 3rd party repositories).
1211 out_dir: Build output directory where downloaded file is stored.
1214 Downloaded archive file path if exists, otherwise None.
1216 # Source archive file path on cloud storage using Git revision.
1217 source_file
= GetRemoteBuildPath(
1218 revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1219 downloaded_archive
= FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1220 if not downloaded_archive
:
1221 # Get SVN revision for the given SHA.
1222 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1224 # Source archive file path on cloud storage using SVN revision.
1225 source_file
= GetRemoteBuildPath(
1226 svn_revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1227 return FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1228 return downloaded_archive
1230 def DownloadCurrentBuild(self
, revision
, build_type
='Release', patch
=None):
1231 """Downloads the build archive for the given revision.
1234 revision: The Git revision to download or build.
1235 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1236 patch: A DEPS patch (used while bisecting 3rd party repositories).
1239 True if download succeeds, otherwise False.
1243 # Get the SHA of the DEPS changes patch.
1244 patch_sha
= GetSHA1HexDigest(patch
)
1246 # Update the DEPS changes patch with a patch to create a new file named
1247 # 'DEPS.sha' and add patch_sha evaluated above to it.
1248 patch
= '%s\n%s' % (patch
, DEPS_SHA_PATCH
% {'deps_sha': patch_sha
})
1250 # Get Build output directory
1251 abs_build_dir
= os
.path
.abspath(
1252 builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1254 fetch_build_func
= lambda: self
.GetBuildArchiveForRevision(
1255 revision
, self
.opts
.gs_bucket
, self
.opts
.target_arch
,
1256 patch_sha
, abs_build_dir
)
1258 # Downloaded archive file path, downloads build archive for given revision.
1259 downloaded_file
= fetch_build_func()
1261 # When build archive doesn't exists, post a build request to tryserver
1262 # and wait for the build to be produced.
1263 if not downloaded_file
:
1264 downloaded_file
= self
.PostBuildRequestAndWait(
1265 revision
, fetch_build
=fetch_build_func
, patch
=patch
)
1266 if not downloaded_file
:
1269 # Generic name for the archive, created when archive file is extracted.
1270 output_dir
= os
.path
.join(
1271 abs_build_dir
, GetZipFileName(target_arch
=self
.opts
.target_arch
))
1272 # Unzip build archive directory.
1274 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1275 self
.BackupOrRestoreOutputdirectory(restore
=False)
1276 # Build output directory based on target(e.g. out/Release, out/Debug).
1277 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1278 ExtractZip(downloaded_file
, abs_build_dir
)
1279 if not os
.path
.exists(output_dir
):
1280 # Due to recipe changes, the builds extract folder contains
1281 # out/Release instead of full-build-<platform>/Release.
1282 if os
.path
.exists(os
.path
.join(abs_build_dir
, 'out', build_type
)):
1283 output_dir
= os
.path
.join(abs_build_dir
, 'out', build_type
)
1285 raise IOError('Missing extracted folder %s ' % output_dir
)
1287 print 'Moving build from %s to %s' % (
1288 output_dir
, target_build_output_dir
)
1289 shutil
.move(output_dir
, target_build_output_dir
)
1291 except Exception as e
:
1292 print 'Something went wrong while extracting archive file: %s' % e
1293 self
.BackupOrRestoreOutputdirectory(restore
=True)
1294 # Cleanup any leftovers from unzipping.
1295 if os
.path
.exists(output_dir
):
1296 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1298 # Delete downloaded archive
1299 if os
.path
.exists(downloaded_file
):
1300 os
.remove(downloaded_file
)
1303 def PostBuildRequestAndWait(self
, revision
, fetch_build
, patch
=None):
1304 """POSTs the build request job to the tryserver instance.
1306 A try job build request is posted to tryserver.chromium.perf master,
1307 and waits for the binaries to be produced and archived on cloud storage.
1308 Once the build is ready and stored onto cloud, build archive is downloaded
1309 into the output folder.
1312 revision: A Git hash revision.
1313 fetch_build: Function to check and download build from cloud storage.
1314 patch: A DEPS patch (used while bisecting 3rd party repositories).
1317 Downloaded archive file path when requested build exists and download is
1318 successful, otherwise None.
1320 # Get SVN revision for the given SHA.
1321 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1322 if not svn_revision
:
1324 'Failed to determine SVN revision for %s' % revision
)
1326 def GetBuilderNameAndBuildTime(target_platform
, target_arch
='ia32'):
1327 """Gets builder bot name and build time in seconds based on platform."""
1328 # Bot names should match the one listed in tryserver.chromium's
1329 # master.cfg which produces builds for bisect.
1330 if bisect_utils
.IsWindowsHost():
1331 if bisect_utils
.Is64BitWindows() and target_arch
== 'x64':
1332 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1333 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1334 if bisect_utils
.IsLinuxHost():
1335 if target_platform
== 'android':
1336 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1337 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1338 if bisect_utils
.IsMacHost():
1339 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME
)
1340 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
1344 bot_name
, build_timeout
= GetBuilderNameAndBuildTime(
1345 self
.opts
.target_platform
, self
.opts
.target_arch
)
1346 builder_host
= self
.opts
.builder_host
1347 builder_port
= self
.opts
.builder_port
1348 # Create a unique ID for each build request posted to tryserver builders.
1349 # This ID is added to "Reason" property in build's json.
1350 build_request_id
= GetSHA1HexDigest(
1351 '%s-%s-%s' % (svn_revision
, patch
, time
.time()))
1353 # Creates a try job description.
1355 'revision': 'src@%s' % svn_revision
,
1357 'name': build_request_id
,
1359 # Update patch information if supplied.
1361 job_args
['patch'] = patch
1362 # Posts job to build the revision on the server.
1363 if bisect_builder
.PostTryJob(builder_host
, builder_port
, job_args
):
1364 target_file
, error_msg
= _WaitUntilBuildIsReady(
1365 fetch_build
, bot_name
, builder_host
, builder_port
, build_request_id
,
1368 print '%s [revision: %s]' % (error_msg
, svn_revision
)
1371 print 'Failed to post build request for revision: [%s]' % svn_revision
1374 def IsDownloadable(self
, depot
):
1375 """Checks if build is downloadable based on target platform and depot."""
1376 if (self
.opts
.target_platform
in ['chromium', 'android'] and
1377 self
.opts
.gs_bucket
):
1378 return (depot
== 'chromium' or
1379 'chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1380 'v8' in DEPOT_DEPS_NAME
[depot
]['from'])
1383 def UpdateDeps(self
, revision
, depot
, deps_file
):
1384 """Updates DEPS file with new revision of dependency repository.
1386 This method search DEPS for a particular pattern in which depot revision
1387 is specified (e.g "webkit_revision": "123456"). If a match is found then
1388 it resolves the given git hash to SVN revision and replace it in DEPS file.
1391 revision: A git hash revision of the dependency repository.
1392 depot: Current depot being bisected.
1393 deps_file: Path to DEPS file.
1396 True if DEPS file is modified successfully, otherwise False.
1398 if not os
.path
.exists(deps_file
):
1401 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1402 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1404 print 'DEPS update not supported for Depot: %s', depot
1407 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable
1408 # contains "angle_revision" key that holds git hash instead of SVN revision.
1409 # And sometime "angle_revision" key is not specified in "vars" variable.
1410 # In such cases check, "deps" dictionary variable that matches
1411 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1412 if depot
== 'angle':
1413 return _UpdateDEPSForAngle(revision
, depot
, deps_file
)
1416 deps_contents
= ReadStringFromFile(deps_file
)
1417 # Check whether the depot and revision pattern in DEPS file vars
1418 # e.g. for webkit the format is "webkit_revision": "12345".
1419 deps_revision
= re
.compile(r
'(?<="%s": ")([0-9]+)(?=")' % deps_var
,
1421 match
= re
.search(deps_revision
, deps_contents
)
1423 svn_revision
= self
.source_control
.SVNFindRev(
1424 revision
, self
._GetDepotDirectory
(depot
))
1425 if not svn_revision
:
1426 print 'Could not determine SVN revision for %s' % revision
1428 # Update the revision information for the given depot
1429 new_data
= re
.sub(deps_revision
, str(svn_revision
), deps_contents
)
1431 # For v8_bleeding_edge revisions change V8 branch in order
1432 # to fetch bleeding edge revision.
1433 if depot
== 'v8_bleeding_edge':
1434 new_data
= _UpdateV8Branch(new_data
)
1437 # Write changes to DEPS file
1438 WriteStringToFile(new_data
, deps_file
)
1441 print 'Something went wrong while updating DEPS file. [%s]' % e
1444 def CreateDEPSPatch(self
, depot
, revision
):
1445 """Modifies DEPS and returns diff as text.
1448 depot: Current depot being bisected.
1449 revision: A git hash revision of the dependency repository.
1452 A tuple with git hash of chromium revision and DEPS patch text.
1454 deps_file_path
= os
.path
.join(self
.src_cwd
, bisect_utils
.FILE_DEPS
)
1455 if not os
.path
.exists(deps_file_path
):
1456 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path
)
1457 # Get current chromium revision (git hash).
1458 cmd
= ['rev-parse', 'HEAD']
1459 chromium_sha
= bisect_utils
.CheckRunGit(cmd
).strip()
1460 if not chromium_sha
:
1461 raise RuntimeError('Failed to determine Chromium revision for %s' %
1463 if ('chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1464 'v8' in DEPOT_DEPS_NAME
[depot
]['from']):
1465 # Checkout DEPS file for the current chromium revision.
1466 if self
.source_control
.CheckoutFileAtRevision(
1467 bisect_utils
.FILE_DEPS
, chromium_sha
, cwd
=self
.src_cwd
):
1468 if self
.UpdateDeps(revision
, depot
, deps_file_path
):
1471 '--src-prefix=src/',
1472 '--dst-prefix=src/',
1474 bisect_utils
.FILE_DEPS
,
1476 diff_text
= bisect_utils
.CheckRunGit(diff_command
, cwd
=self
.src_cwd
)
1477 return (chromium_sha
, ChangeBackslashToSlashInPatch(diff_text
))
1480 'Failed to update DEPS file for chromium: [%s]' % chromium_sha
)
1483 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha
)
1486 def BuildCurrentRevision(self
, depot
, revision
=None):
1487 """Builds chrome and performance_ui_tests on the current revision.
1490 True if the build was successful.
1492 if self
.opts
.debug_ignore_build
:
1495 os
.chdir(self
.src_cwd
)
1496 # Fetch build archive for the given revision from the cloud storage when
1497 # the storage bucket is passed.
1498 if self
.IsDownloadable(depot
) and revision
:
1500 if depot
!= 'chromium':
1501 # Create a DEPS patch with new revision for dependency repository.
1502 revision
, deps_patch
= self
.CreateDEPSPatch(depot
, revision
)
1503 if self
.DownloadCurrentBuild(revision
, patch
=deps_patch
):
1506 # Reverts the changes to DEPS file.
1507 self
.source_control
.CheckoutFileAtRevision(
1508 bisect_utils
.FILE_DEPS
, revision
, cwd
=self
.src_cwd
)
1512 # These codes are executed when bisect bots builds binaries locally.
1513 build_success
= self
.builder
.Build(depot
, self
.opts
)
1515 return build_success
1517 def RunGClientHooks(self
):
1518 """Runs gclient with runhooks command.
1521 True if gclient reports no errors.
1523 if self
.opts
.debug_ignore_build
:
1525 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
1527 def _IsBisectModeUsingMetric(self
):
1528 return self
.opts
.bisect_mode
in [BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
]
1530 def _IsBisectModeReturnCode(self
):
1531 return self
.opts
.bisect_mode
in [BISECT_MODE_RETURN_CODE
]
1533 def _IsBisectModeStandardDeviation(self
):
1534 return self
.opts
.bisect_mode
in [BISECT_MODE_STD_DEV
]
1536 def GetCompatibleCommand(self
, command_to_run
, revision
, depot
):
1537 # Prior to crrev.com/274857 *only* android-chromium-testshell
1538 # Then until crrev.com/276628 *both* (android-chromium-testshell and
1539 # android-chrome-shell) work. After that rev 276628 *only*
1540 # android-chrome-shell works. bisect-perf-reggresion.py script should
1541 # handle these cases and set appropriate browser type based on revision.
1542 if self
.opts
.target_platform
in ['android']:
1543 # When its a third_party depot, get the chromium revision.
1544 if depot
!= 'chromium':
1545 revision
= bisect_utils
.CheckRunGit(
1546 ['rev-parse', 'HEAD'], cwd
=self
.src_cwd
).strip()
1547 svn_revision
= self
.source_control
.SVNFindRev(revision
, cwd
=self
.src_cwd
)
1548 if not svn_revision
:
1549 return command_to_run
1550 cmd_re
= re
.compile('--browser=(?P<browser_type>\S+)')
1551 matches
= cmd_re
.search(command_to_run
)
1552 if bisect_utils
.IsStringInt(svn_revision
) and matches
:
1553 cmd_browser
= matches
.group('browser_type')
1554 if svn_revision
<= 274857 and cmd_browser
== 'android-chrome-shell':
1555 return command_to_run
.replace(cmd_browser
,
1556 'android-chromium-testshell')
1557 elif (svn_revision
>= 276628 and
1558 cmd_browser
== 'android-chromium-testshell'):
1559 return command_to_run
.replace(cmd_browser
,
1560 'android-chrome-shell')
1561 return command_to_run
1563 def RunPerformanceTestAndParseResults(
1564 self
, command_to_run
, metric
, reset_on_first_run
=False,
1565 upload_on_last_run
=False, results_label
=None):
1566 """Runs a performance test on the current revision and parses the results.
1569 command_to_run: The command to be run to execute the performance test.
1570 metric: The metric to parse out from the results of the performance test.
1571 This is the result chart name and trace name, separated by slash.
1572 May be None for perf try jobs.
1573 reset_on_first_run: If True, pass the flag --reset-results on first run.
1574 upload_on_last_run: If True, pass the flag --upload-results on last run.
1575 results_label: A value for the option flag --results-label.
1576 The arguments reset_on_first_run, upload_on_last_run and results_label
1577 are all ignored if the test is not a Telemetry test.
1580 (values dict, 0) if --debug_ignore_perf_test was passed.
1581 (values dict, 0, test output) if the test was run successfully.
1582 (error message, -1) if the test couldn't be run.
1583 (error message, -1, test output) if the test ran but there was an error.
1585 success_code
, failure_code
= 0, -1
1587 if self
.opts
.debug_ignore_perf_test
:
1594 return (fake_results
, success_code
)
1596 # For Windows platform set posix=False, to parse windows paths correctly.
1597 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
1598 # refer to http://bugs.python.org/issue1724822. By default posix=True.
1599 args
= shlex
.split(command_to_run
, posix
=not bisect_utils
.IsWindowsHost())
1601 if not _GenerateProfileIfNecessary(args
):
1602 err_text
= 'Failed to generate profile for performance test.'
1603 return (err_text
, failure_code
)
1605 # If running a Telemetry test for Chrome OS, insert the remote IP and
1606 # identity parameters.
1607 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
1608 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
1609 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
1610 args
.append('--identity=%s' % bisect_utils
.CROS_TEST_KEY_PATH
)
1612 start_time
= time
.time()
1615 output_of_all_runs
= ''
1616 for i
in xrange(self
.opts
.repeat_test_count
):
1617 # Can ignore the return code since if the tests fail, it won't return 0.
1618 current_args
= copy
.copy(args
)
1620 if i
== 0 and reset_on_first_run
:
1621 current_args
.append('--reset-results')
1622 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
1623 current_args
.append('--upload-results')
1625 current_args
.append('--results-label=%s' % results_label
)
1627 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(
1628 current_args
, cwd
=self
.src_cwd
)
1630 if e
.errno
== errno
.ENOENT
:
1631 err_text
= ('Something went wrong running the performance test. '
1632 'Please review the command line:\n\n')
1633 if 'src/' in ' '.join(args
):
1634 err_text
+= ('Check that you haven\'t accidentally specified a '
1635 'path with src/ in the command.\n\n')
1636 err_text
+= ' '.join(args
)
1639 return (err_text
, failure_code
)
1642 output_of_all_runs
+= output
1643 if self
.opts
.output_buildbot_annotations
:
1646 if metric
and self
._IsBisectModeUsingMetric
():
1647 metric_values
+= _ParseMetricValuesFromOutput(metric
, output
)
1648 # If we're bisecting on a metric (ie, changes in the mean or
1649 # standard deviation) and no metric values are produced, bail out.
1650 if not metric_values
:
1652 elif self
._IsBisectModeReturnCode
():
1653 metric_values
.append(return_code
)
1655 elapsed_minutes
= (time
.time() - start_time
) / 60.0
1656 if elapsed_minutes
>= self
.opts
.max_time_minutes
:
1659 if metric
and len(metric_values
) == 0:
1660 err_text
= 'Metric %s was not found in the test output.' % metric
1661 # TODO(qyearsley): Consider also getting and displaying a list of metrics
1662 # that were found in the output here.
1663 return (err_text
, failure_code
, output_of_all_runs
)
1665 # If we're bisecting on return codes, we're really just looking for zero vs
1668 if self
._IsBisectModeReturnCode
():
1669 # If any of the return codes is non-zero, output 1.
1670 overall_return_code
= 0 if (
1671 all(current_value
== 0 for current_value
in metric_values
)) else 1
1674 'mean': overall_return_code
,
1677 'values': metric_values
,
1680 print 'Results of performance test: Command returned with %d' % (
1681 overall_return_code
)
1684 # Need to get the average value if there were multiple values.
1685 truncated_mean
= math_utils
.TruncatedMean(
1686 metric_values
, self
.opts
.truncate_percent
)
1687 standard_err
= math_utils
.StandardError(metric_values
)
1688 standard_dev
= math_utils
.StandardDeviation(metric_values
)
1690 if self
._IsBisectModeStandardDeviation
():
1691 metric_values
= [standard_dev
]
1694 'mean': truncated_mean
,
1695 'std_err': standard_err
,
1696 'std_dev': standard_dev
,
1697 'values': metric_values
,
1700 print 'Results of performance test: %12f %12f' % (
1701 truncated_mean
, standard_err
)
1703 return (values
, success_code
, output_of_all_runs
)
1705 def FindAllRevisionsToSync(self
, revision
, depot
):
1706 """Finds all dependant revisions and depots that need to be synced for a
1707 given revision. This is only useful in the git workflow, as an svn depot
1708 may be split into multiple mirrors.
1710 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1711 skia/include. To sync skia/src properly, one has to find the proper
1712 revisions in skia/gyp and skia/include.
1715 revision: The revision to sync to.
1716 depot: The depot in use at the moment (probably skia).
1719 A list of [depot, revision] pairs that need to be synced.
1721 revisions_to_sync
= [[depot
, revision
]]
1723 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
1724 (depot
== 'android-chrome'))
1726 # Some SVN depots were split into multiple git depots, so we need to
1727 # figure out for each mirror which git revision to grab. There's no
1728 # guarantee that the SVN revision will exist for each of the dependant
1729 # depots, so we have to grep the git logs and grab the next earlier one.
1731 and DEPOT_DEPS_NAME
[depot
]['depends']
1732 and self
.source_control
.IsGit()):
1733 svn_rev
= self
.source_control
.SVNFindRev(revision
)
1735 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
1736 self
.ChangeToDepotWorkingDirectory(d
)
1738 dependant_rev
= self
.source_control
.ResolveToRevision(
1739 svn_rev
, d
, DEPOT_DEPS_NAME
, -1000)
1742 revisions_to_sync
.append([d
, dependant_rev
])
1744 num_resolved
= len(revisions_to_sync
)
1745 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
1747 self
.ChangeToDepotWorkingDirectory(depot
)
1749 if not ((num_resolved
- 1) == num_needed
):
1752 return revisions_to_sync
1754 def PerformPreBuildCleanup(self
):
1755 """Performs necessary cleanup between runs."""
1756 print 'Cleaning up between runs.'
1759 # Having these pyc files around between runs can confuse the
1760 # perf tests and cause them to crash.
1761 for (path
, _
, files
) in os
.walk(self
.src_cwd
):
1762 for cur_file
in files
:
1763 if cur_file
.endswith('.pyc'):
1764 path_to_file
= os
.path
.join(path
, cur_file
)
1765 os
.remove(path_to_file
)
1767 def PerformWebkitDirectoryCleanup(self
, revision
):
1768 """If the script is switching between Blink and WebKit during bisect,
1769 its faster to just delete the directory rather than leave it up to git
1775 if not self
.source_control
.CheckoutFileAtRevision(
1776 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
1780 os
.chdir(self
.src_cwd
)
1782 is_blink
= bisect_utils
.IsDepsFileBlink()
1786 if not self
.source_control
.RevertFileToHead(
1787 bisect_utils
.FILE_DEPS_GIT
):
1790 if self
.was_blink
!= is_blink
:
1791 self
.was_blink
= is_blink
1792 # Removes third_party/Webkit directory.
1793 return bisect_utils
.RemoveThirdPartyDirectory('Webkit')
1796 def PerformCrosChrootCleanup(self
):
1797 """Deletes the chroot.
1803 self
.ChangeToDepotWorkingDirectory('cros')
1804 cmd
= [bisect_utils
.CROS_SDK_PATH
, '--delete']
1805 return_code
= bisect_utils
.RunProcess(cmd
)
1807 return not return_code
1809 def CreateCrosChroot(self
):
1810 """Creates a new chroot.
1816 self
.ChangeToDepotWorkingDirectory('cros')
1817 cmd
= [bisect_utils
.CROS_SDK_PATH
, '--create']
1818 return_code
= bisect_utils
.RunProcess(cmd
)
1820 return not return_code
1822 def PerformPreSyncCleanup(self
, revision
, depot
):
1823 """Performs any necessary cleanup before syncing.
1828 if depot
== 'chromium' or depot
== 'android-chrome':
1829 # Removes third_party/libjingle. At some point, libjingle was causing
1830 # issues syncing when using the git workflow (crbug.com/266324).
1831 os
.chdir(self
.src_cwd
)
1832 if not bisect_utils
.RemoveThirdPartyDirectory('libjingle'):
1834 # Removes third_party/skia. At some point, skia was causing
1835 # issues syncing when using the git workflow (crbug.com/377951).
1836 if not bisect_utils
.RemoveThirdPartyDirectory('skia'):
1838 if depot
== 'chromium':
1839 # The fast webkit cleanup doesn't work for android_chrome
1840 # The switch from Webkit to Blink that this deals with now happened
1841 # quite a long time ago so this is unlikely to be a problem.
1842 return self
.PerformWebkitDirectoryCleanup(revision
)
1843 elif depot
== 'cros':
1844 return self
.PerformCrosChrootCleanup()
1847 def RunPostSync(self
, depot
):
1848 """Performs any work after syncing.
1853 if self
.opts
.target_platform
== 'android':
1854 if not builder
.SetupAndroidBuildEnvironment(self
.opts
,
1855 path_to_src
=self
.src_cwd
):
1859 return self
.CreateCrosChroot()
1861 return self
.RunGClientHooks()
1864 def ShouldSkipRevision(self
, depot
, revision
):
1865 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1866 is git based those changes would have no effect.
1869 depot: The depot being bisected.
1870 revision: Current revision we're synced to.
1873 True if we should skip building/testing this revision.
1875 if depot
== 'chromium':
1876 if self
.source_control
.IsGit():
1877 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
1878 output
= bisect_utils
.CheckRunGit(cmd
)
1880 files
= output
.splitlines()
1882 if len(files
) == 1 and files
[0] == 'DEPS':
1887 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
1889 """Performs a full sync/build/run of the specified revision.
1892 revision: The revision to sync to.
1893 depot: The depot that's being used at the moment (src, webkit, etc.)
1894 command_to_run: The command to execute the performance test.
1895 metric: The performance metric being tested.
1898 On success, a tuple containing the results of the performance test.
1899 Otherwise, a tuple with the error message.
1902 if depot
== 'chromium' or depot
== 'android-chrome':
1903 sync_client
= 'gclient'
1904 elif depot
== 'cros':
1905 sync_client
= 'repo'
1907 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
1909 if not revisions_to_sync
:
1910 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
1912 if not self
.PerformPreSyncCleanup(revision
, depot
):
1913 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
1917 if not self
.opts
.debug_ignore_sync
:
1918 for r
in revisions_to_sync
:
1919 self
.ChangeToDepotWorkingDirectory(r
[0])
1922 self
.PerformPreBuildCleanup()
1924 # If you're using gclient to sync, you need to specify the depot you
1925 # want so that all the dependencies sync properly as well.
1926 # ie. gclient sync src@<SHA1>
1927 current_revision
= r
[1]
1928 if sync_client
== 'gclient':
1929 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
1931 if not self
.source_control
.SyncToRevision(current_revision
,
1938 success
= self
.RunPostSync(depot
)
1940 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
1941 return ('Skipped revision: [%s]' % str(revision
),
1942 BUILD_RESULT_SKIPPED
)
1944 start_build_time
= time
.time()
1945 if self
.BuildCurrentRevision(depot
, revision
):
1946 after_build_time
= time
.time()
1947 # Hack to support things that got changed.
1948 command_to_run
= self
.GetCompatibleCommand(
1949 command_to_run
, revision
, depot
)
1950 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
1952 # Restore build output directory once the tests are done, to avoid
1954 if self
.IsDownloadable(depot
) and revision
:
1955 self
.BackupOrRestoreOutputdirectory(restore
=True)
1958 external_revisions
= self
._Get
3rdPartyRevisions
(depot
)
1960 if not external_revisions
is None:
1961 return (results
[0], results
[1], external_revisions
,
1962 time
.time() - after_build_time
, after_build_time
-
1965 return ('Failed to parse DEPS file for external revisions.',
1970 return ('Failed to build revision: [%s]' % str(revision
),
1973 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
1975 return ('Failed to sync revision: [%s]' % str(revision
),
1978 def _CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
1979 """Given known good and bad values, decide if the current_value passed
1983 current_value: The value of the metric being checked.
1984 known_bad_value: The reference value for a "failed" run.
1985 known_good_value: The reference value for a "passed" run.
1988 True if the current_value is closer to the known_good_value than the
1991 if self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
1992 dist_to_good_value
= abs(current_value
['std_dev'] -
1993 known_good_value
['std_dev'])
1994 dist_to_bad_value
= abs(current_value
['std_dev'] -
1995 known_bad_value
['std_dev'])
1997 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
1998 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
2000 return dist_to_good_value
< dist_to_bad_value
2002 def _GetDepotDirectory(self
, depot_name
):
2003 if depot_name
== 'chromium':
2005 elif depot_name
== 'cros':
2006 return self
.cros_cwd
2007 elif depot_name
in DEPOT_NAMES
:
2008 return self
.depot_cwd
[depot_name
]
2010 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
2011 'was added without proper support?' % depot_name
)
2013 def ChangeToDepotWorkingDirectory(self
, depot_name
):
2014 """Given a depot, changes to the appropriate working directory.
2017 depot_name: The name of the depot (see DEPOT_NAMES).
2019 os
.chdir(self
._GetDepotDirectory
(depot_name
))
2021 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
2022 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
2023 search_forward
=True)
2024 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
2025 search_forward
=False)
2026 min_revision_data
['external']['v8_bleeding_edge'] = r1
2027 max_revision_data
['external']['v8_bleeding_edge'] = r2
2029 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2030 min_revision_data
['revision'])
2031 or not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2032 max_revision_data
['revision'])):
2033 self
.warnings
.append(
2034 'Trunk revisions in V8 did not map directly to bleeding_edge. '
2035 'Attempted to expand the range to find V8 rolls which did map '
2036 'directly to bleeding_edge revisions, but results might not be '
2039 def _FindNextDepotToBisect(
2040 self
, current_depot
, min_revision_data
, max_revision_data
):
2041 """Decides which depot the script should dive into next (if any).
2044 current_depot: Current depot being bisected.
2045 min_revision_data: Data about the earliest revision in the bisect range.
2046 max_revision_data: Data about the latest revision in the bisect range.
2049 Name of the depot to bisect next, or None.
2051 external_depot
= None
2052 for next_depot
in DEPOT_NAMES
:
2053 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
2054 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
2057 if not (DEPOT_DEPS_NAME
[next_depot
]['recurse']
2058 and min_revision_data
['depot']
2059 in DEPOT_DEPS_NAME
[next_depot
]['from']):
2062 if current_depot
== 'v8':
2063 # We grab the bleeding_edge info here rather than earlier because we
2064 # finally have the revision range. From that we can search forwards and
2065 # backwards to try to match trunk revisions to bleeding_edge.
2066 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
2068 if (min_revision_data
['external'].get(next_depot
) ==
2069 max_revision_data
['external'].get(next_depot
)):
2072 if (min_revision_data
['external'].get(next_depot
) and
2073 max_revision_data
['external'].get(next_depot
)):
2074 external_depot
= next_depot
2077 return external_depot
2079 def PrepareToBisectOnDepot(
2080 self
, current_depot
, end_revision
, start_revision
, previous_revision
):
2081 """Changes to the appropriate directory and gathers a list of revisions
2082 to bisect between |start_revision| and |end_revision|.
2085 current_depot: The depot we want to bisect.
2086 end_revision: End of the revision range.
2087 start_revision: Start of the revision range.
2088 previous_revision: The last revision we synced to on |previous_depot|.
2091 A list containing the revisions between |start_revision| and
2092 |end_revision| inclusive.
2094 # Change into working directory of external library to run
2095 # subsequent commands.
2096 self
.ChangeToDepotWorkingDirectory(current_depot
)
2098 # V8 (and possibly others) is merged in periodically. Bisecting
2099 # this directory directly won't give much good info.
2100 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
2101 config_path
= os
.path
.join(self
.src_cwd
, '..')
2102 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
2103 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
2105 if bisect_utils
.RunGClient(
2106 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
2109 if current_depot
== 'v8_bleeding_edge':
2110 self
.ChangeToDepotWorkingDirectory('chromium')
2112 shutil
.move('v8', 'v8.bak')
2113 shutil
.move('v8_bleeding_edge', 'v8')
2115 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
2116 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
2118 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
2119 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
2121 self
.ChangeToDepotWorkingDirectory(current_depot
)
2123 depot_revision_list
= self
.GetRevisionList(current_depot
,
2127 self
.ChangeToDepotWorkingDirectory('chromium')
2129 return depot_revision_list
2131 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
2132 """Gathers reference values by running the performance tests on the
2133 known good and bad revisions.
2136 good_rev: The last known good revision where the performance regression
2137 has not occurred yet.
2138 bad_rev: A revision where the performance regression has already occurred.
2139 cmd: The command to execute the performance test.
2140 metric: The metric being tested for regression.
2143 A tuple with the results of building and running each revision.
2145 bad_run_results
= self
.SyncBuildAndRunRevision(
2146 bad_rev
, target_depot
, cmd
, metric
)
2148 good_run_results
= None
2150 if not bad_run_results
[1]:
2151 good_run_results
= self
.SyncBuildAndRunRevision(
2152 good_rev
, target_depot
, cmd
, metric
)
2154 return (bad_run_results
, good_run_results
)
2156 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
2157 if self
.opts
.output_buildbot_annotations
:
2158 step_name
= 'Bisection Range: [%s - %s]' % (
2159 revision_list
[len(revision_list
)-1], revision_list
[0])
2160 bisect_utils
.OutputAnnotationStepStart(step_name
)
2163 print 'Revisions to bisect on [%s]:' % depot
2164 for revision_id
in revision_list
:
2165 print ' -> %s' % (revision_id
, )
2168 if self
.opts
.output_buildbot_annotations
:
2169 bisect_utils
.OutputAnnotationStepClosed()
2171 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
2172 """Checks to see if changes to DEPS file occurred, and that the revision
2173 range also includes the change to .DEPS.git. If it doesn't, attempts to
2174 expand the revision range to include it.
2177 bad_rev: First known bad revision.
2178 good_revision: Last known good revision.
2181 A tuple with the new bad and good revisions.
2183 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
2184 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
2185 'DEPS', good_revision
, bad_revision
)
2188 # DEPS file was changed, search from the oldest change to DEPS file to
2189 # bad_revision to see if there are matching .DEPS.git changes.
2190 oldest_deps_change
= changes_to_deps
[-1]
2191 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
2192 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
2194 if len(changes_to_deps
) != len(changes_to_gitdeps
):
2195 # Grab the timestamp of the last DEPS change
2196 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
2197 output
= bisect_utils
.CheckRunGit(cmd
)
2198 commit_time
= int(output
)
2200 # Try looking for a commit that touches the .DEPS.git file in the
2201 # next 15 minutes after the DEPS file change.
2202 cmd
= ['log', '--format=%H', '-1',
2203 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
2204 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
2205 output
= bisect_utils
.CheckRunGit(cmd
)
2206 output
= output
.strip()
2208 self
.warnings
.append('Detected change to DEPS and modified '
2209 'revision range to include change to .DEPS.git')
2210 return (output
, good_revision
)
2212 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
2213 'matching change to .DEPS.git')
2214 return (bad_revision
, good_revision
)
2216 def CheckIfRevisionsInProperOrder(
2217 self
, target_depot
, good_revision
, bad_revision
):
2218 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2221 good_revision: Number/tag of the known good revision.
2222 bad_revision: Number/tag of the known bad revision.
2225 True if the revisions are in the proper order (good earlier than bad).
2227 if self
.source_control
.IsGit() and target_depot
!= 'cros':
2228 cmd
= ['log', '--format=%ct', '-1', good_revision
]
2229 cwd
= self
._GetDepotDirectory
(target_depot
)
2231 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
2232 good_commit_time
= int(output
)
2234 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
2235 output
= bisect_utils
.CheckRunGit(cmd
, cwd
=cwd
)
2236 bad_commit_time
= int(output
)
2238 return good_commit_time
<= bad_commit_time
2240 # Cros/svn use integers
2241 return int(good_revision
) <= int(bad_revision
)
2243 def CanPerformBisect(self
, revision_to_check
):
2244 """Checks whether a given revision is bisectable.
2246 Note: At present it checks whether a given revision is bisectable on
2247 android bots(refer crbug.com/385324).
2250 revision_to_check: Known good revision.
2253 A dictionary indicating the result. If revision is not bisectable,
2254 this will contain the field "error", otherwise None.
2256 if self
.opts
.target_platform
== 'android':
2257 revision_to_check
= self
.source_control
.SVNFindRev(revision_to_check
)
2258 if (bisect_utils
.IsStringInt(revision_to_check
)
2259 and revision_to_check
< 265549):
2261 'Bisect cannot conitnue for the given revision range.\n'
2262 'It is impossible to bisect Android regressions '
2263 'prior to r265549, which allows the bisect bot to '
2264 'rely on Telemetry to do apk installation of the most recently '
2265 'built local ChromeShell(refer to crbug.com/385324).\n'
2266 'Please try bisecting revisions greater than or equal to r265549.')}
2269 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
2270 """Given known good and bad revisions, run a binary search on all
2271 intermediate revisions to determine the CL where the performance regression
2275 command_to_run: Specify the command to execute the performance test.
2276 good_revision: Number/tag of the known good revision.
2277 bad_revision: Number/tag of the known bad revision.
2278 metric: The performance metric to monitor.
2281 A dict with 2 members, 'revision_data' and 'error'. On success,
2282 'revision_data' will contain a dict mapping revision ids to
2283 data about that revision. Each piece of revision data consists of a
2284 dict with the following keys:
2286 'passed': Represents whether the performance test was successful at
2287 that revision. Possible values include: 1 (passed), 0 (failed),
2288 '?' (skipped), 'F' (build failed).
2289 'depot': The depot that this revision is from (ie. WebKit)
2290 'external': If the revision is a 'src' revision, 'external' contains
2291 the revisions of each of the external libraries.
2292 'sort': A sort value for sorting the dict in order of commits.
2309 If an error occurred, the 'error' field will contain the message and
2310 'revision_data' will be empty.
2313 'revision_data' : {},
2317 # Choose depot to bisect first
2318 target_depot
= 'chromium'
2319 if self
.opts
.target_platform
== 'cros':
2320 target_depot
= 'cros'
2321 elif self
.opts
.target_platform
== 'android-chrome':
2322 target_depot
= 'android-chrome'
2325 self
.ChangeToDepotWorkingDirectory(target_depot
)
2327 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2328 bad_revision
= self
.source_control
.ResolveToRevision(
2329 bad_revision_in
, target_depot
, DEPOT_DEPS_NAME
, 100)
2330 good_revision
= self
.source_control
.ResolveToRevision(
2331 good_revision_in
, target_depot
, DEPOT_DEPS_NAME
, -100)
2335 if bad_revision
is None:
2336 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
2339 if good_revision
is None:
2340 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
2343 # Check that they didn't accidentally swap good and bad revisions.
2344 if not self
.CheckIfRevisionsInProperOrder(
2345 target_depot
, good_revision
, bad_revision
):
2346 results
['error'] = ('bad_revision < good_revision, did you swap these '
2350 bad_revision
, good_revision
= self
.NudgeRevisionsIfDEPSChange(
2351 bad_revision
, good_revision
)
2353 if self
.opts
.output_buildbot_annotations
:
2354 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
2356 cannot_bisect
= self
.CanPerformBisect(good_revision
)
2358 results
['error'] = cannot_bisect
.get('error')
2361 print 'Gathering revision range for bisection.'
2362 # Retrieve a list of revisions to do bisection on.
2363 src_revision_list
= self
.GetRevisionList(
2364 target_depot
, bad_revision
, good_revision
)
2366 if self
.opts
.output_buildbot_annotations
:
2367 bisect_utils
.OutputAnnotationStepClosed()
2369 if src_revision_list
:
2370 # revision_data will store information about a revision such as the
2371 # depot it came from, the webkit/V8 revision at that time,
2372 # performance timing, build state, etc...
2373 revision_data
= results
['revision_data']
2375 # revision_list is the list we're binary searching through at the moment.
2380 for current_revision_id
in src_revision_list
:
2383 revision_data
[current_revision_id
] = {
2386 'depot' : target_depot
,
2390 'sort' : sort_key_ids
,
2392 revision_list
.append(current_revision_id
)
2395 max_revision
= len(revision_list
) - 1
2397 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
2399 if self
.opts
.output_buildbot_annotations
:
2400 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
2402 print 'Gathering reference values for bisection.'
2404 # Perform the performance tests on the good and bad revisions, to get
2406 bad_results
, good_results
= self
.GatherReferenceValues(good_revision
,
2412 if self
.opts
.output_buildbot_annotations
:
2413 bisect_utils
.OutputAnnotationStepClosed()
2416 results
['error'] = ('An error occurred while building and running '
2417 'the \'bad\' reference value. The bisect cannot continue without '
2418 'a working \'bad\' revision to start from.\n\nError: %s' %
2423 results
['error'] = ('An error occurred while building and running '
2424 'the \'good\' reference value. The bisect cannot continue without '
2425 'a working \'good\' revision to start from.\n\nError: %s' %
2430 # We need these reference values to determine if later runs should be
2431 # classified as pass or fail.
2432 known_bad_value
= bad_results
[0]
2433 known_good_value
= good_results
[0]
2435 # Can just mark the good and bad revisions explicitly here since we
2436 # already know the results.
2437 bad_revision_data
= revision_data
[revision_list
[0]]
2438 bad_revision_data
['external'] = bad_results
[2]
2439 bad_revision_data
['perf_time'] = bad_results
[3]
2440 bad_revision_data
['build_time'] = bad_results
[4]
2441 bad_revision_data
['passed'] = False
2442 bad_revision_data
['value'] = known_bad_value
2444 good_revision_data
= revision_data
[revision_list
[max_revision
]]
2445 good_revision_data
['external'] = good_results
[2]
2446 good_revision_data
['perf_time'] = good_results
[3]
2447 good_revision_data
['build_time'] = good_results
[4]
2448 good_revision_data
['passed'] = True
2449 good_revision_data
['value'] = known_good_value
2451 next_revision_depot
= target_depot
2454 if not revision_list
:
2457 min_revision_data
= revision_data
[revision_list
[min_revision
]]
2458 max_revision_data
= revision_data
[revision_list
[max_revision
]]
2460 if max_revision
- min_revision
<= 1:
2461 current_depot
= min_revision_data
['depot']
2462 if min_revision_data
['passed'] == '?':
2463 next_revision_index
= min_revision
2464 elif max_revision_data
['passed'] == '?':
2465 next_revision_index
= max_revision
2466 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
2467 previous_revision
= revision_list
[min_revision
]
2468 # If there were changes to any of the external libraries we track,
2469 # should bisect the changes there as well.
2470 external_depot
= self
._FindNextDepotToBisect
(
2471 current_depot
, min_revision_data
, max_revision_data
)
2473 # If there was no change in any of the external depots, the search
2475 if not external_depot
:
2476 if current_depot
== 'v8':
2477 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
2478 'continue any further. The script can only bisect into '
2479 'V8\'s bleeding_edge repository if both the current and '
2480 'previous revisions in trunk map directly to revisions in '
2484 earliest_revision
= max_revision_data
['external'][external_depot
]
2485 latest_revision
= min_revision_data
['external'][external_depot
]
2487 new_revision_list
= self
.PrepareToBisectOnDepot(
2488 external_depot
, latest_revision
, earliest_revision
,
2491 if not new_revision_list
:
2492 results
['error'] = ('An error occurred attempting to retrieve '
2493 'revision range: [%s..%s]' %
2494 (earliest_revision
, latest_revision
))
2497 _AddRevisionsIntoRevisionData(
2498 new_revision_list
, external_depot
, min_revision_data
['sort'],
2501 # Reset the bisection and perform it on the newly inserted
2503 revision_list
= new_revision_list
2505 max_revision
= len(revision_list
) - 1
2506 sort_key_ids
+= len(revision_list
)
2508 print ('Regression in metric %s appears to be the result of '
2509 'changes in [%s].' % (metric
, external_depot
))
2511 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
2517 next_revision_index
= (int((max_revision
- min_revision
) / 2) +
2520 next_revision_id
= revision_list
[next_revision_index
]
2521 next_revision_data
= revision_data
[next_revision_id
]
2522 next_revision_depot
= next_revision_data
['depot']
2524 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
2526 if self
.opts
.output_buildbot_annotations
:
2527 step_name
= 'Working on [%s]' % next_revision_id
2528 bisect_utils
.OutputAnnotationStepStart(step_name
)
2530 print 'Working on revision: [%s]' % next_revision_id
2532 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
2533 next_revision_depot
,
2535 metric
, skippable
=True)
2537 # If the build is successful, check whether or not the metric
2539 if not run_results
[1]:
2540 if len(run_results
) > 2:
2541 next_revision_data
['external'] = run_results
[2]
2542 next_revision_data
['perf_time'] = run_results
[3]
2543 next_revision_data
['build_time'] = run_results
[4]
2545 passed_regression
= self
._CheckIfRunPassed
(run_results
[0],
2549 next_revision_data
['passed'] = passed_regression
2550 next_revision_data
['value'] = run_results
[0]
2552 if passed_regression
:
2553 max_revision
= next_revision_index
2555 min_revision
= next_revision_index
2557 if run_results
[1] == BUILD_RESULT_SKIPPED
:
2558 next_revision_data
['passed'] = 'Skipped'
2559 elif run_results
[1] == BUILD_RESULT_FAIL
:
2560 next_revision_data
['passed'] = 'Build Failed'
2562 print run_results
[0]
2564 # If the build is broken, remove it and redo search.
2565 revision_list
.pop(next_revision_index
)
2569 if self
.opts
.output_buildbot_annotations
:
2570 self
._PrintPartialResults
(results
)
2571 bisect_utils
.OutputAnnotationStepClosed()
2573 # Weren't able to sync and retrieve the revision range.
2574 results
['error'] = ('An error occurred attempting to retrieve revision '
2575 'range: [%s..%s]' % (good_revision
, bad_revision
))
2579 def _PrintPartialResults(self
, results_dict
):
2580 revision_data
= results_dict
['revision_data']
2581 revision_data_sorted
= sorted(revision_data
.iteritems(),
2582 key
= lambda x
: x
[1]['sort'])
2583 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2585 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2586 results_dict
['first_working_revision'],
2587 results_dict
['last_broken_revision'],
2588 100, final_step
=False)
2590 def _ConfidenceLevelStatus(self
, results_dict
):
2591 if not results_dict
['confidence']:
2593 confidence_status
= 'Successful with %(level)s confidence%(warning)s.'
2594 if results_dict
['confidence'] >= HIGH_CONFIDENCE
:
2598 warning
= ' and warnings'
2599 if not self
.warnings
:
2601 return confidence_status
% {'level': level
, 'warning': warning
}
2603 def _GetViewVCLinkFromDepotAndHash(self
, cl
, depot
):
2604 info
= self
.source_control
.QueryRevisionInfo(cl
,
2605 self
._GetDepotDirectory
(depot
))
2606 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
2608 # Format is "git-svn-id: svn://....@123456 <other data>"
2609 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
2610 svn_revision
= svn_line
[0].split('@')
2611 svn_revision
= svn_revision
[1].split(' ')[0]
2612 return DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
2617 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
2619 if not info
['email'].startswith(info
['author']):
2620 email_info
= '\nEmail : %s' % info
['email']
2621 commit_link
= self
._GetViewVCLinkFromDepotAndHash
(cl
, depot
)
2623 commit_info
= '\nLink : %s' % commit_link
2625 commit_info
= ('\nFailed to parse svn revision from body:\n%s' %
2627 print RESULTS_REVISION_INFO
% {
2628 'subject': info
['subject'],
2629 'author': info
['author'],
2630 'email_info': email_info
,
2631 'commit_info': commit_info
,
2633 'cl_date': info
['date']
2636 def _PrintTestedCommitsHeader(self
):
2637 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
2639 [20, 70, 14, 12, 13],
2640 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
2641 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2643 [20, 70, 14, 12, 13],
2644 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
2645 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
2648 ['Depot', 'Commit SHA', 'Return Code', 'State'])
2650 assert False, 'Invalid bisect_mode specified.'
2652 def _PrintTestedCommitsEntry(self
, current_data
, cl_link
, state_str
):
2653 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
2654 std_error
= '+-%.02f' % current_data
['value']['std_err']
2655 mean
= '%.02f' % current_data
['value']['mean']
2657 [20, 70, 12, 14, 13],
2658 [current_data
['depot'], cl_link
, mean
, std_error
, state_str
])
2659 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2660 std_error
= '+-%.02f' % current_data
['value']['std_err']
2661 mean
= '%.02f' % current_data
['value']['mean']
2663 [20, 70, 12, 14, 13],
2664 [current_data
['depot'], cl_link
, std_error
, mean
, state_str
])
2665 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
2666 mean
= '%d' % current_data
['value']['mean']
2669 [current_data
['depot'], cl_link
, mean
, state_str
])
2671 def _PrintTestedCommitsTable(
2672 self
, revision_data_sorted
, first_working_revision
, last_broken_revision
,
2673 confidence
, final_step
=True):
2676 print '===== TESTED COMMITS ====='
2678 print '===== PARTIAL RESULTS ====='
2679 self
._PrintTestedCommitsHeader
()
2681 for current_id
, current_data
in revision_data_sorted
:
2682 if current_data
['value']:
2683 if (current_id
== last_broken_revision
or
2684 current_id
== first_working_revision
):
2685 # If confidence is too low, don't add this empty line since it's
2686 # used to put focus on a suspected CL.
2687 if confidence
and final_step
:
2690 if state
== 2 and not final_step
:
2691 # Just want a separation between "bad" and "good" cl's.
2695 if state
== 1 and final_step
:
2696 state_str
= 'Suspected CL'
2700 # If confidence is too low, don't bother outputting good/bad.
2703 state_str
= state_str
.center(13, ' ')
2705 cl_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
2706 current_data
['depot'])
2708 cl_link
= current_id
2709 self
._PrintTestedCommitsEntry
(current_data
, cl_link
, state_str
)
2711 def _PrintReproSteps(self
):
2712 """Prints out a section of the results explaining how to run the test.
2714 This message includes the command used to run the test.
2716 command
= '$ ' + self
.opts
.command
2717 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
2718 command
+= ('\nAlso consider passing --profiler=list to see available '
2720 print REPRO_STEPS_LOCAL
% {'command': command
}
2721 print REPRO_STEPS_TRYJOB
% {'command': command
}
2723 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
2724 """Prints a section of the results about other potential regressions."""
2726 print 'Other regressions may have occurred:'
2727 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
2728 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
2729 for regression
in other_regressions
:
2730 current_id
, previous_id
, confidence
= regression
2731 current_data
= revision_data
[current_id
]
2732 previous_data
= revision_data
[previous_id
]
2734 current_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
2735 current_data
['depot'])
2736 previous_link
= self
._GetViewVCLinkFromDepotAndHash
(previous_id
,
2737 previous_data
['depot'])
2739 # If we can't map it to a viewable URL, at least show the original hash.
2740 if not current_link
:
2741 current_link
= current_id
2742 if not previous_link
:
2743 previous_link
= previous_id
2745 print ' %8s %70s %s' % (
2746 current_data
['depot'], current_link
,
2747 ('%d%%' % confidence
).center(10, ' '))
2748 print ' %8s %70s' % (
2749 previous_data
['depot'], previous_link
)
2752 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
2753 # Find range where it possibly broke.
2754 first_working_revision
= None
2755 first_working_revision_index
= -1
2756 last_broken_revision
= None
2757 last_broken_revision_index
= -1
2759 culprit_revisions
= []
2760 other_regressions
= []
2761 regression_size
= 0.0
2762 regression_std_err
= 0.0
2765 for i
in xrange(len(revision_data_sorted
)):
2766 k
, v
= revision_data_sorted
[i
]
2767 if v
['passed'] == 1:
2768 if not first_working_revision
:
2769 first_working_revision
= k
2770 first_working_revision_index
= i
2773 last_broken_revision
= k
2774 last_broken_revision_index
= i
2776 if last_broken_revision
!= None and first_working_revision
!= None:
2778 for i
in xrange(0, last_broken_revision_index
+ 1):
2779 if revision_data_sorted
[i
][1]['value']:
2780 broken_means
.append(revision_data_sorted
[i
][1]['value']['values'])
2783 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
2784 if revision_data_sorted
[i
][1]['value']:
2785 working_means
.append(revision_data_sorted
[i
][1]['value']['values'])
2787 # Flatten the lists to calculate mean of all values.
2788 working_mean
= sum(working_means
, [])
2789 broken_mean
= sum(broken_means
, [])
2791 # Calculate the approximate size of the regression
2792 mean_of_bad_runs
= math_utils
.Mean(broken_mean
)
2793 mean_of_good_runs
= math_utils
.Mean(working_mean
)
2795 regression_size
= 100 * math_utils
.RelativeChange(mean_of_good_runs
,
2797 if math
.isnan(regression_size
):
2798 regression_size
= 'zero-to-nonzero'
2800 regression_std_err
= math
.fabs(math_utils
.PooledStandardError(
2801 [working_mean
, broken_mean
]) /
2802 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
2804 # Give a "confidence" in the bisect. At the moment we use how distinct the
2805 # values are before and after the last broken revision, and how noisy the
2807 confidence
= ConfidenceScore(working_means
, broken_means
)
2809 culprit_revisions
= []
2812 self
.ChangeToDepotWorkingDirectory(
2813 revision_data
[last_broken_revision
]['depot'])
2815 if revision_data
[last_broken_revision
]['depot'] == 'cros':
2816 # Want to get a list of all the commits and what depots they belong
2817 # to so that we can grab info about each.
2818 cmd
= ['repo', 'forall', '-c',
2819 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2820 last_broken_revision
, first_working_revision
+ 1)]
2821 output
, return_code
= bisect_utils
.RunProcessAndRetrieveOutput(cmd
)
2824 assert not return_code
, ('An error occurred while running '
2825 '"%s"' % ' '.join(cmd
))
2828 for l
in output
.split('\n'):
2830 # Output will be in form:
2832 # /path_to_other_depot
2840 contents
= l
.split(' ')
2841 if len(contents
) > 1:
2842 changes
.append([last_depot
, contents
[0]])
2845 info
= self
.source_control
.QueryRevisionInfo(c
[1])
2846 culprit_revisions
.append((c
[1], info
, None))
2848 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
2849 k
, v
= revision_data_sorted
[i
]
2850 if k
== first_working_revision
:
2852 self
.ChangeToDepotWorkingDirectory(v
['depot'])
2853 info
= self
.source_control
.QueryRevisionInfo(k
)
2854 culprit_revisions
.append((k
, info
, v
['depot']))
2857 # Check for any other possible regression ranges.
2858 other_regressions
= _FindOtherRegressions(
2859 revision_data_sorted
, mean_of_bad_runs
> mean_of_good_runs
)
2862 'first_working_revision': first_working_revision
,
2863 'last_broken_revision': last_broken_revision
,
2864 'culprit_revisions': culprit_revisions
,
2865 'other_regressions': other_regressions
,
2866 'regression_size': regression_size
,
2867 'regression_std_err': regression_std_err
,
2868 'confidence': confidence
,
2871 def _CheckForWarnings(self
, results_dict
):
2872 if len(results_dict
['culprit_revisions']) > 1:
2873 self
.warnings
.append('Due to build errors, regression range could '
2874 'not be narrowed down to a single commit.')
2875 if self
.opts
.repeat_test_count
== 1:
2876 self
.warnings
.append('Tests were only set to run once. This may '
2877 'be insufficient to get meaningful results.')
2878 if 0 < results_dict
['confidence'] < HIGH_CONFIDENCE
:
2879 self
.warnings
.append('Confidence is not high. Try bisecting again '
2880 'with increased repeat_count, larger range, or '
2881 'on another metric.')
2882 if not results_dict
['confidence']:
2883 self
.warnings
.append('Confidence score is 0%. Try bisecting again on '
2884 'another platform or another metric.')
2886 def FormatAndPrintResults(self
, bisect_results
):
2887 """Prints the results from a bisection run in a readable format.
2890 bisect_results: The results from a bisection test run.
2892 revision_data
= bisect_results
['revision_data']
2893 revision_data_sorted
= sorted(revision_data
.iteritems(),
2894 key
= lambda x
: x
[1]['sort'])
2895 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2897 self
._CheckForWarnings
(results_dict
)
2899 if self
.opts
.output_buildbot_annotations
:
2900 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
2903 print 'Full results of bisection:'
2904 for current_id
, current_data
in revision_data_sorted
:
2905 build_status
= current_data
['passed']
2907 if type(build_status
) is bool:
2909 build_status
= 'Good'
2911 build_status
= 'Bad'
2913 print ' %20s %40s %s' % (current_data
['depot'],
2914 current_id
, build_status
)
2917 if self
.opts
.output_buildbot_annotations
:
2918 bisect_utils
.OutputAnnotationStepClosed()
2919 # The perf dashboard scrapes the "results" step in order to comment on
2920 # bugs. If you change this, please update the perf dashboard as well.
2921 bisect_utils
.OutputAnnotationStepStart('Results')
2923 self
._PrintBanner
(results_dict
)
2924 self
._PrintWarnings
()
2926 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
2927 for culprit
in results_dict
['culprit_revisions']:
2928 cl
, info
, depot
= culprit
2929 self
._PrintRevisionInfo
(cl
, info
, depot
)
2930 if results_dict
['other_regressions']:
2931 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
2933 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2934 results_dict
['first_working_revision'],
2935 results_dict
['last_broken_revision'],
2936 results_dict
['confidence'])
2937 _PrintStepTime(revision_data_sorted
)
2938 self
._PrintReproSteps
()
2940 if self
.opts
.output_buildbot_annotations
:
2941 bisect_utils
.OutputAnnotationStepClosed()
2943 def _PrintBanner(self
, results_dict
):
2944 if self
._IsBisectModeReturnCode
():
2948 metrics
= '/'.join(self
.opts
.metric
)
2949 change
= '%.02f%% (+/-%.02f%%)' % (
2950 results_dict
['regression_size'], results_dict
['regression_std_err'])
2952 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
2953 status
= self
._ConfidenceLevelStatus
(results_dict
)
2955 status
= 'Failure, could not reproduce.'
2956 change
= 'Bisect could not reproduce a change.'
2958 print RESULTS_BANNER
% {
2960 'command': self
.opts
.command
,
2963 'confidence': results_dict
['confidence'],
2966 def _PrintWarnings(self
):
2967 """Prints a list of warning strings if there are any."""
2968 if not self
.warnings
:
2972 for w
in set(self
.warnings
):
2976 def _IsPlatformSupported():
2977 """Checks that this platform and build system are supported.
2980 opts: The options parsed from the command line.
2983 True if the platform and build system are supported.
2985 # Haven't tested the script out on any other platforms yet.
2986 supported
= ['posix', 'nt']
2987 return os
.name
in supported
2990 def RmTreeAndMkDir(path_to_dir
, skip_makedir
=False):
2991 """Removes the directory tree specified, and then creates an empty
2992 directory in the same location (if not specified to skip).
2995 path_to_dir: Path to the directory tree.
2996 skip_makedir: Whether to skip creating empty directory, default is False.
2999 True if successful, False if an error occurred.
3002 if os
.path
.exists(path_to_dir
):
3003 shutil
.rmtree(path_to_dir
)
3005 if e
.errno
!= errno
.ENOENT
:
3008 if not skip_makedir
:
3009 return MaybeMakeDirectory(path_to_dir
)
3014 def RemoveBuildFiles(build_type
):
3015 """Removes build files from previous runs."""
3016 if RmTreeAndMkDir(os
.path
.join('out', build_type
)):
3017 if RmTreeAndMkDir(os
.path
.join('build', build_type
)):
3022 class BisectOptions(object):
3023 """Options to be used when running bisection."""
3025 super(BisectOptions
, self
).__init
__()
3027 self
.target_platform
= 'chromium'
3028 self
.build_preference
= None
3029 self
.good_revision
= None
3030 self
.bad_revision
= None
3031 self
.use_goma
= None
3032 self
.goma_dir
= None
3033 self
.cros_board
= None
3034 self
.cros_remote_ip
= None
3035 self
.repeat_test_count
= 20
3036 self
.truncate_percent
= 25
3037 self
.max_time_minutes
= 20
3040 self
.output_buildbot_annotations
= None
3041 self
.no_custom_deps
= False
3042 self
.working_directory
= None
3043 self
.extra_src
= None
3044 self
.debug_ignore_build
= None
3045 self
.debug_ignore_sync
= None
3046 self
.debug_ignore_perf_test
= None
3047 self
.gs_bucket
= None
3048 self
.target_arch
= 'ia32'
3049 self
.target_build_type
= 'Release'
3050 self
.builder_host
= None
3051 self
.builder_port
= None
3052 self
.bisect_mode
= BISECT_MODE_MEAN
3055 def _CreateCommandLineParser():
3056 """Creates a parser with bisect options.
3059 An instance of optparse.OptionParser.
3061 usage
= ('%prog [options] [-- chromium-options]\n'
3062 'Perform binary search on revision history to find a minimal '
3063 'range of revisions where a peformance metric regressed.\n')
3065 parser
= optparse
.OptionParser(usage
=usage
)
3067 group
= optparse
.OptionGroup(parser
, 'Bisect options')
3068 group
.add_option('-c', '--command',
3070 help='A command to execute your performance test at' +
3071 ' each point in the bisection.')
3072 group
.add_option('-b', '--bad_revision',
3074 help='A bad revision to start bisection. ' +
3075 'Must be later than good revision. May be either a git' +
3076 ' or svn revision.')
3077 group
.add_option('-g', '--good_revision',
3079 help='A revision to start bisection where performance' +
3080 ' test is known to pass. Must be earlier than the ' +
3081 'bad revision. May be either a git or svn revision.')
3082 group
.add_option('-m', '--metric',
3084 help='The desired metric to bisect on. For example ' +
3085 '"vm_rss_final_b/vm_rss_f_b"')
3086 group
.add_option('-r', '--repeat_test_count',
3089 help='The number of times to repeat the performance '
3090 'test. Values will be clamped to range [1, 100]. '
3091 'Default value is 20.')
3092 group
.add_option('--max_time_minutes',
3095 help='The maximum time (in minutes) to take running the '
3096 'performance tests. The script will run the performance '
3097 'tests according to --repeat_test_count, so long as it '
3098 'doesn\'t exceed --max_time_minutes. Values will be '
3099 'clamped to range [1, 60].'
3100 'Default value is 20.')
3101 group
.add_option('-t', '--truncate_percent',
3104 help='The highest/lowest % are discarded to form a '
3105 'truncated mean. Values will be clamped to range [0, '
3106 '25]. Default value is 25 (highest/lowest 25% will be '
3108 group
.add_option('--bisect_mode',
3110 choices
=[BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
,
3111 BISECT_MODE_RETURN_CODE
],
3112 default
=BISECT_MODE_MEAN
,
3113 help='The bisect mode. Choices are to bisect on the '
3114 'difference in mean, std_dev, or return_code.')
3115 parser
.add_option_group(group
)
3117 group
= optparse
.OptionGroup(parser
, 'Build options')
3118 group
.add_option('-w', '--working_directory',
3120 help='Path to the working directory where the script '
3121 'will do an initial checkout of the chromium depot. The '
3122 'files will be placed in a subdirectory "bisect" under '
3123 'working_directory and that will be used to perform the '
3124 'bisection. This parameter is optional, if it is not '
3125 'supplied, the script will work from the current depot.')
3126 group
.add_option('--build_preference',
3128 choices
=['msvs', 'ninja', 'make'],
3129 help='The preferred build system to use. On linux/mac '
3130 'the options are make/ninja. On Windows, the options '
3132 group
.add_option('--target_platform',
3134 choices
=['chromium', 'cros', 'android', 'android-chrome'],
3136 help='The target platform. Choices are "chromium" '
3137 '(current platform), "cros", or "android". If you '
3138 'specify something other than "chromium", you must be '
3139 'properly set up to build that platform.')
3140 group
.add_option('--no_custom_deps',
3141 dest
='no_custom_deps',
3142 action
='store_true',
3144 help='Run the script with custom_deps or not.')
3145 group
.add_option('--extra_src',
3147 help='Path to a script which can be used to modify '
3148 'the bisect script\'s behavior.')
3149 group
.add_option('--cros_board',
3151 help='The cros board type to build.')
3152 group
.add_option('--cros_remote_ip',
3154 help='The remote machine to image to.')
3155 group
.add_option('--use_goma',
3156 action
='store_true',
3157 help='Add a bunch of extra threads for goma, and enable '
3159 group
.add_option('--goma_dir',
3160 help='Path to goma tools (or system default if not '
3162 group
.add_option('--output_buildbot_annotations',
3163 action
='store_true',
3164 help='Add extra annotation output for buildbot.')
3165 group
.add_option('--gs_bucket',
3169 help=('Name of Google Storage bucket to upload or '
3170 'download build. e.g., chrome-perf'))
3171 group
.add_option('--target_arch',
3173 choices
=['ia32', 'x64', 'arm'],
3176 help=('The target build architecture. Choices are "ia32" '
3177 '(default), "x64" or "arm".'))
3178 group
.add_option('--target_build_type',
3180 choices
=['Release', 'Debug'],
3182 help='The target build type. Choices are "Release" '
3183 '(default), or "Debug".')
3184 group
.add_option('--builder_host',
3185 dest
='builder_host',
3187 help=('Host address of server to produce build by posting'
3188 ' try job request.'))
3189 group
.add_option('--builder_port',
3190 dest
='builder_port',
3192 help=('HTTP port of the server to produce build by posting'
3193 ' try job request.'))
3194 parser
.add_option_group(group
)
3196 group
= optparse
.OptionGroup(parser
, 'Debug options')
3197 group
.add_option('--debug_ignore_build',
3198 action
='store_true',
3199 help='DEBUG: Don\'t perform builds.')
3200 group
.add_option('--debug_ignore_sync',
3201 action
='store_true',
3202 help='DEBUG: Don\'t perform syncs.')
3203 group
.add_option('--debug_ignore_perf_test',
3204 action
='store_true',
3205 help='DEBUG: Don\'t perform performance tests.')
3206 parser
.add_option_group(group
)
3209 def ParseCommandLine(self
):
3210 """Parses the command line for bisect options."""
3211 parser
= self
._CreateCommandLineParser
()
3212 opts
, _
= parser
.parse_args()
3215 if not opts
.command
:
3216 raise RuntimeError('missing required parameter: --command')
3218 if not opts
.good_revision
:
3219 raise RuntimeError('missing required parameter: --good_revision')
3221 if not opts
.bad_revision
:
3222 raise RuntimeError('missing required parameter: --bad_revision')
3224 if not opts
.metric
and opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
:
3225 raise RuntimeError('missing required parameter: --metric')
3228 if not cloud_storage
.List(opts
.gs_bucket
):
3229 raise RuntimeError('Invalid Google Storage: gs://%s' % opts
.gs_bucket
)
3230 if not opts
.builder_host
:
3231 raise RuntimeError('Must specify try server hostname, when '
3232 'gs_bucket is used: --builder_host')
3233 if not opts
.builder_port
:
3234 raise RuntimeError('Must specify try server port number, when '
3235 'gs_bucket is used: --builder_port')
3236 if opts
.target_platform
== 'cros':
3237 # Run sudo up front to make sure credentials are cached for later.
3238 print 'Sudo is required to build cros:'
3240 bisect_utils
.RunProcess(['sudo', 'true'])
3242 if not opts
.cros_board
:
3243 raise RuntimeError('missing required parameter: --cros_board')
3245 if not opts
.cros_remote_ip
:
3246 raise RuntimeError('missing required parameter: --cros_remote_ip')
3248 if not opts
.working_directory
:
3249 raise RuntimeError('missing required parameter: --working_directory')
3251 metric_values
= opts
.metric
.split('/')
3252 if (len(metric_values
) != 2 and
3253 opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
):
3254 raise RuntimeError('Invalid metric specified: [%s]' % opts
.metric
)
3256 opts
.metric
= metric_values
3257 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3258 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3259 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3260 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3262 for k
, v
in opts
.__dict
__.iteritems():
3263 assert hasattr(self
, k
), 'Invalid %s attribute in BisectOptions.' % k
3265 except RuntimeError, e
:
3266 output_string
= StringIO
.StringIO()
3267 parser
.print_help(file=output_string
)
3268 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
3269 output_string
.close()
3270 raise RuntimeError(error_message
)
3273 def FromDict(values
):
3274 """Creates an instance of BisectOptions with the values parsed from a
3278 values: a dict containing options to set.
3281 An instance of BisectOptions.
3283 opts
= BisectOptions()
3284 for k
, v
in values
.iteritems():
3285 assert hasattr(opts
, k
), 'Invalid %s attribute in BisectOptions.' % k
3289 metric_values
= opts
.metric
.split('/')
3290 if len(metric_values
) != 2:
3291 raise RuntimeError('Invalid metric specified: [%s]' % opts
.metric
)
3292 opts
.metric
= metric_values
3294 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3295 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3296 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3297 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3305 opts
= BisectOptions()
3306 opts
.ParseCommandLine()
3309 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
3311 raise RuntimeError('Invalid or missing --extra_src.')
3312 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
3314 if opts
.working_directory
:
3315 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
3316 if opts
.no_custom_deps
:
3318 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
3320 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
3322 if not RemoveBuildFiles(opts
.target_build_type
):
3323 raise RuntimeError('Something went wrong removing the build files.')
3325 if not _IsPlatformSupported():
3326 raise RuntimeError('Sorry, this platform isn\'t supported yet.')
3328 # Check what source control method is being used, and create a
3329 # SourceControl object if possible.
3330 source_control
= source_control_module
.DetermineAndCreateSourceControl(opts
)
3332 if not source_control
:
3334 'Sorry, only the git workflow is supported at the moment.')
3336 # gClient sync seems to fail if you're not in master branch.
3337 if (not source_control
.IsInProperBranch() and
3338 not opts
.debug_ignore_sync
and
3339 not opts
.working_directory
):
3340 raise RuntimeError('You must switch to master branch to run bisection.')
3341 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
3343 bisect_results
= bisect_test
.Run(opts
.command
,
3347 if bisect_results
['error']:
3348 raise RuntimeError(bisect_results
['error'])
3349 bisect_test
.FormatAndPrintResults(bisect_results
)
3352 bisect_test
.PerformCleanup()
3353 except RuntimeError, e
:
3354 if opts
.output_buildbot_annotations
:
3355 # The perf dashboard scrapes the "results" step in order to comment on
3356 # bugs. If you change this, please update the perf dashboard as well.
3357 bisect_utils
.OutputAnnotationStepStart('Results')
3358 print 'Error: %s' % e
.message
3359 if opts
.output_buildbot_annotations
:
3360 bisect_utils
.OutputAnnotationStepClosed()
3364 if __name__
== '__main__':