Roll src/third_party/WebKit a3b4a2e:7441784 (svn 202551:202552)
[chromium-blink-merge.git] / build / android / pylib / instrumentation / test_runner.py
blob8131e0712b297993a8cb2e73da1d3ca1a5e9a728
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Class for running instrumentation tests on a single device."""
7 import logging
8 import os
9 import re
10 import sys
11 import time
13 from devil.android import device_errors
14 from pylib import constants
15 from pylib import flag_changer
16 from pylib import valgrind_tools
17 from pylib.base import base_test_result
18 from pylib.base import base_test_runner
19 from pylib.instrumentation import instrumentation_test_instance
20 from pylib.instrumentation import json_perf_parser
21 from pylib.instrumentation import test_result
22 from pylib.local.device import local_device_instrumentation_test_run
24 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
25 'common'))
26 import perf_tests_results_helper # pylint: disable=F0401
29 _PERF_TEST_ANNOTATION = 'PerfTest'
32 class TestRunner(base_test_runner.BaseTestRunner):
33 """Responsible for running a series of tests connected to a single device."""
35 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
36 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
37 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
38 '/chrome-profile*')
40 def __init__(self, test_options, device, shard_index, test_pkg,
41 additional_flags=None):
42 """Create a new TestRunner.
44 Args:
45 test_options: An InstrumentationOptions object.
46 device: Attached android device.
47 shard_index: Shard index.
48 test_pkg: A TestPackage object.
49 additional_flags: A list of additional flags to add to the command line.
50 """
51 super(TestRunner, self).__init__(device, test_options.tool)
52 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
53 self._logcat_monitor = None
55 self.coverage_device_file = None
56 self.coverage_dir = test_options.coverage_dir
57 self.coverage_host_file = None
58 self.options = test_options
59 self.test_pkg = test_pkg
60 # Use the correct command line file for the package under test.
61 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
62 if a.test_package == self.test_pkg.GetPackageName()]
63 assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
64 if len(cmdline_file) and cmdline_file[0]:
65 self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
66 if additional_flags:
67 self.flags.AddFlags(additional_flags)
68 else:
69 self.flags = None
71 #override
72 def InstallTestPackage(self):
73 self.test_pkg.Install(self.device)
75 def _GetInstrumentationArgs(self):
76 ret = {}
77 if self.options.wait_for_debugger:
78 ret['debug'] = 'true'
79 if self.coverage_dir:
80 ret['coverage'] = 'true'
81 ret['coverageFile'] = self.coverage_device_file
83 return ret
85 def _TakeScreenshot(self, test):
86 """Takes a screenshot from the device."""
87 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
88 logging.info('Taking screenshot named %s', screenshot_name)
89 self.device.TakeScreenshot(screenshot_name)
91 def SetUp(self):
92 """Sets up the test harness and device before all tests are run."""
93 super(TestRunner, self).SetUp()
94 if not self.device.HasRoot():
95 logging.warning('Unable to enable java asserts for %s, non rooted device',
96 str(self.device))
97 else:
98 if self.device.SetJavaAsserts(self.options.set_asserts):
99 self.device.RunShellCommand('stop')
100 self.device.RunShellCommand('start')
101 self.device.WaitUntilFullyBooted()
103 # We give different default value to launch HTTP server based on shard index
104 # because it may have race condition when multiple processes are trying to
105 # launch lighttpd with same port at same time.
106 self.LaunchTestHttpServer(
107 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
108 if self.flags:
109 self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
110 if self.options.device_flags:
111 with open(self.options.device_flags) as device_flags_file:
112 stripped_flags = (l.strip() for l in device_flags_file)
113 self.flags.AddFlags([flag for flag in stripped_flags if flag])
115 def TearDown(self):
116 """Cleans up the test harness and saves outstanding data from test run."""
117 if self.flags:
118 self.flags.Restore()
119 super(TestRunner, self).TearDown()
121 def TestSetup(self, test):
122 """Sets up the test harness for running a particular test.
124 Args:
125 test: The name of the test that will be run.
127 self.SetupPerfMonitoringIfNeeded(test)
128 self._SetupIndividualTestTimeoutScale(test)
129 self.tool.SetupEnvironment()
131 if self.flags and self._IsFreTest(test):
132 self.flags.RemoveFlags(['--disable-fre'])
134 # Make sure the forwarder is still running.
135 self._RestartHttpServerForwarderIfNecessary()
137 if self.coverage_dir:
138 coverage_basename = '%s.ec' % test
139 self.coverage_device_file = '%s/%s/%s' % (
140 self.device.GetExternalStoragePath(),
141 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
142 self.coverage_host_file = os.path.join(
143 self.coverage_dir, coverage_basename)
145 def _IsFreTest(self, test):
146 """Determines whether a test is a first run experience test.
148 Args:
149 test: The name of the test to be checked.
151 Returns:
152 Whether the feature being tested is FirstRunExperience.
154 annotations = self.test_pkg.GetTestAnnotations(test)
155 return 'FirstRunExperience' == annotations.get('Feature', None)
157 def _IsPerfTest(self, test):
158 """Determines whether a test is a performance test.
160 Args:
161 test: The name of the test to be checked.
163 Returns:
164 Whether the test is annotated as a performance test.
166 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
168 def SetupPerfMonitoringIfNeeded(self, test):
169 """Sets up performance monitoring if the specified test requires it.
171 Args:
172 test: The name of the test to be run.
174 if not self._IsPerfTest(test):
175 return
176 self.device.RunShellCommand(
177 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX])
178 self._logcat_monitor = self.device.GetLogcatMonitor()
179 self._logcat_monitor.Start()
181 def TestTeardown(self, test, result):
182 """Cleans up the test harness after running a particular test.
184 Depending on the options of this TestRunner this might handle performance
185 tracking. This method will only be called if the test passed.
187 Args:
188 test: The name of the test that was just run.
189 result: result for this test.
192 self.tool.CleanUpEnvironment()
194 # The logic below relies on the test passing.
195 if not result or not result.DidRunPass():
196 return
198 self.TearDownPerfMonitoring(test)
200 if self.flags and self._IsFreTest(test):
201 self.flags.AddFlags(['--disable-fre'])
203 if self.coverage_dir:
204 self.device.PullFile(
205 self.coverage_device_file, self.coverage_host_file)
206 self.device.RunShellCommand(
207 'rm -f %s' % self.coverage_device_file)
209 def TearDownPerfMonitoring(self, test):
210 """Cleans up performance monitoring if the specified test required it.
212 Args:
213 test: The name of the test that was just run.
214 Raises:
215 Exception: if there's anything wrong with the perf data.
217 if not self._IsPerfTest(test):
218 return
219 raw_test_name = test.split('#')[1]
221 # Wait and grab annotation data so we can figure out which traces to parse
222 regex = self._logcat_monitor.WaitFor(
223 re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'))
225 # If the test is set to run on a specific device type only (IE: only
226 # tablet or phone) and it is being run on the wrong device, the test
227 # just quits and does not do anything. The java test harness will still
228 # print the appropriate annotation for us, but will add --NORUN-- for
229 # us so we know to ignore the results.
230 # The --NORUN-- tag is managed by ChromeTabbedActivityTestBase.java
231 if regex.group(1) != '--NORUN--':
233 # Obtain the relevant perf data. The data is dumped to a
234 # JSON formatted file.
235 json_string = self.device.ReadFile(
236 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
237 as_root=True)
239 if not json_string:
240 raise Exception('Perf file is empty')
242 if self.options.save_perf_json:
243 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
244 with open(json_local_file, 'w') as f:
245 f.write(json_string)
246 logging.info('Saving Perf UI JSON from test ' +
247 test + ' to ' + json_local_file)
249 raw_perf_data = regex.group(1).split(';')
251 for raw_perf_set in raw_perf_data:
252 if raw_perf_set:
253 perf_set = raw_perf_set.split(',')
254 if len(perf_set) != 3:
255 raise Exception('Unexpected number of tokens in perf annotation '
256 'string: ' + raw_perf_set)
258 # Process the performance data
259 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
260 perf_set[0])
261 perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
262 [result['average']],
263 result['units'])
265 def _SetupIndividualTestTimeoutScale(self, test):
266 timeout_scale = self._GetIndividualTestTimeoutScale(test)
267 valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
269 def _GetIndividualTestTimeoutScale(self, test):
270 """Returns the timeout scale for the given |test|."""
271 annotations = self.test_pkg.GetTestAnnotations(test)
272 timeout_scale = 1
273 if 'TimeoutScale' in annotations:
274 try:
275 timeout_scale = int(annotations['TimeoutScale'])
276 except ValueError:
277 logging.warning('Non-integer value of TimeoutScale ignored. (%s)',
278 annotations['TimeoutScale'])
279 if self.options.wait_for_debugger:
280 timeout_scale *= 100
281 return timeout_scale
283 # pylint: disable=too-many-return-statements
284 def _GetIndividualTestTimeoutSecs(self, test):
285 """Returns the timeout in seconds for the given |test|."""
286 annotations = self.test_pkg.GetTestAnnotations(test)
287 if 'Manual' in annotations:
288 return 10 * 60 * 60
289 if 'IntegrationTest' in annotations:
290 return 30 * 60
291 if 'External' in annotations:
292 return 10 * 60
293 if 'EnormousTest' in annotations:
294 return 10 * 60
295 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
296 return 5 * 60
297 if 'MediumTest' in annotations:
298 return 3 * 60
299 if 'SmallTest' in annotations:
300 return 1 * 60
302 logging.warn("Test size not found in annotations for test '%s', using " +
303 "1 minute for timeout.", test)
304 return 1 * 60
306 def _RunTest(self, test, timeout):
307 """Runs a single instrumentation test.
309 Args:
310 test: Test class/method.
311 timeout: Timeout time in seconds.
313 Returns:
314 The raw output of am instrument as a list of lines.
316 extras = self._GetInstrumentationArgs()
317 extras['class'] = test
318 return self.device.StartInstrumentation(
319 '%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner),
320 raw=True, extras=extras, timeout=timeout, retries=3)
322 # pylint: disable=no-self-use
323 def _GenerateTestResult(self, test, instr_result_code, instr_result_bundle,
324 statuses, start_ms, duration_ms):
325 results = instrumentation_test_instance.GenerateTestResults(
326 instr_result_code, instr_result_bundle, statuses, start_ms, duration_ms)
327 for r in results:
328 if r.GetName() == test:
329 return r
330 logging.error('Could not find result for test: %s', test)
331 return test_result.InstrumentationTestResult(
332 test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms)
334 #override
335 def RunTest(self, test):
336 results = base_test_result.TestRunResults()
337 timeout = (self._GetIndividualTestTimeoutSecs(test) *
338 self._GetIndividualTestTimeoutScale(test) *
339 self.tool.GetTimeoutScale())
341 start_ms = 0
342 duration_ms = 0
343 try:
344 self.TestSetup(test)
346 try:
347 self.device.GoHome()
348 except device_errors.CommandTimeoutError:
349 logging.exception('Failed to focus the launcher.')
351 time_ms = lambda: int(time.time() * 1000)
352 start_ms = time_ms()
353 raw_output = self._RunTest(test, timeout)
354 duration_ms = time_ms() - start_ms
356 # Parse the test output
357 result_code, result_bundle, statuses = (
358 instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
359 result = self._GenerateTestResult(
360 test, result_code, result_bundle, statuses, start_ms, duration_ms)
361 if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
362 self.test_pkg.GetPackageName(), self.device):
363 result.SetType(base_test_result.ResultType.CRASH)
364 results.AddResult(result)
365 except device_errors.CommandTimeoutError as e:
366 results.AddResult(test_result.InstrumentationTestResult(
367 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
368 log=str(e) or 'No information'))
369 except device_errors.DeviceUnreachableError as e:
370 results.AddResult(test_result.InstrumentationTestResult(
371 test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
372 log=str(e) or 'No information'))
373 self.TestTeardown(test, results)
374 return (results, None if results.DidRunPass() else test)