1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Class for running instrumentation tests on a single device."""
13 from pylib
import constants
14 from pylib
import flag_changer
15 from pylib
import valgrind_tools
16 from pylib
.base
import base_test_result
17 from pylib
.base
import base_test_runner
18 from pylib
.device
import device_errors
19 from pylib
.instrumentation
import instrumentation_test_instance
20 from pylib
.instrumentation
import json_perf_parser
21 from pylib
.instrumentation
import test_result
22 from pylib
.local
.device
import local_device_instrumentation_test_run
24 sys
.path
.append(os
.path
.join(constants
.DIR_SOURCE_ROOT
, 'build', 'util', 'lib',
26 import perf_tests_results_helper
# pylint: disable=F0401
29 _PERF_TEST_ANNOTATION
= 'PerfTest'
32 class TestRunner(base_test_runner
.BaseTestRunner
):
33 """Responsible for running a series of tests connected to a single device."""
35 _DEVICE_COVERAGE_DIR
= 'chrome/test/coverage'
36 _HOSTMACHINE_PERF_OUTPUT_FILE
= '/tmp/chrome-profile'
37 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX
= (constants
.DEVICE_PERF_OUTPUT_DIR
+
40 def __init__(self
, test_options
, device
, shard_index
, test_pkg
,
41 additional_flags
=None):
42 """Create a new TestRunner.
45 test_options: An InstrumentationOptions object.
46 device: Attached android device.
47 shard_index: Shard index.
48 test_pkg: A TestPackage object.
49 additional_flags: A list of additional flags to add to the command line.
51 super(TestRunner
, self
).__init
__(device
, test_options
.tool
,
52 test_options
.cleanup_test_files
)
53 self
._lighttp
_port
= constants
.LIGHTTPD_RANDOM_PORT_FIRST
+ shard_index
54 self
._logcat
_monitor
= None
56 self
.coverage_device_file
= None
57 self
.coverage_dir
= test_options
.coverage_dir
58 self
.coverage_host_file
= None
59 self
.options
= test_options
60 self
.test_pkg
= test_pkg
61 # Use the correct command line file for the package under test.
62 cmdline_file
= [a
.cmdline_file
for a
in constants
.PACKAGE_INFO
.itervalues()
63 if a
.test_package
== self
.test_pkg
.GetPackageName()]
64 assert len(cmdline_file
) < 2, 'Multiple packages have the same test package'
65 if len(cmdline_file
) and cmdline_file
[0]:
66 self
.flags
= flag_changer
.FlagChanger(self
.device
, cmdline_file
[0])
68 self
.flags
.AddFlags(additional_flags
)
73 def InstallTestPackage(self
):
74 self
.test_pkg
.Install(self
.device
)
76 def _GetInstrumentationArgs(self
):
78 if self
.options
.wait_for_debugger
:
81 ret
['coverage'] = 'true'
82 ret
['coverageFile'] = self
.coverage_device_file
86 def _TakeScreenshot(self
, test
):
87 """Takes a screenshot from the device."""
88 screenshot_name
= os
.path
.join(constants
.SCREENSHOTS_DIR
, '%s.png' % test
)
89 logging
.info('Taking screenshot named %s', screenshot_name
)
90 self
.device
.TakeScreenshot(screenshot_name
)
93 """Sets up the test harness and device before all tests are run."""
94 super(TestRunner
, self
).SetUp()
95 if not self
.device
.HasRoot():
96 logging
.warning('Unable to enable java asserts for %s, non rooted device',
99 if self
.device
.SetJavaAsserts(self
.options
.set_asserts
):
100 # TODO(jbudorick) How to best do shell restart after the
101 # android_commands refactor?
102 self
.device
.RunShellCommand('stop')
103 self
.device
.RunShellCommand('start')
105 # We give different default value to launch HTTP server based on shard index
106 # because it may have race condition when multiple processes are trying to
107 # launch lighttpd with same port at same time.
108 self
.LaunchTestHttpServer(
109 os
.path
.join(constants
.DIR_SOURCE_ROOT
), self
._lighttp
_port
)
111 self
.flags
.AddFlags(['--disable-fre', '--enable-test-intents'])
112 if self
.options
.device_flags
:
113 with
open(self
.options
.device_flags
) as device_flags_file
:
114 stripped_flags
= (l
.strip() for l
in device_flags_file
)
115 self
.flags
.AddFlags([flag
for flag
in stripped_flags
if flag
])
118 """Cleans up the test harness and saves outstanding data from test run."""
121 super(TestRunner
, self
).TearDown()
123 def TestSetup(self
, test
):
124 """Sets up the test harness for running a particular test.
127 test: The name of the test that will be run.
129 self
.SetupPerfMonitoringIfNeeded(test
)
130 self
._SetupIndividualTestTimeoutScale
(test
)
131 self
.tool
.SetupEnvironment()
133 if self
.flags
and self
._IsFreTest
(test
):
134 self
.flags
.RemoveFlags(['--disable-fre'])
136 # Make sure the forwarder is still running.
137 self
._RestartHttpServerForwarderIfNecessary
()
139 if self
.coverage_dir
:
140 coverage_basename
= '%s.ec' % test
141 self
.coverage_device_file
= '%s/%s/%s' % (
142 self
.device
.GetExternalStoragePath(),
143 TestRunner
._DEVICE
_COVERAGE
_DIR
, coverage_basename
)
144 self
.coverage_host_file
= os
.path
.join(
145 self
.coverage_dir
, coverage_basename
)
147 def _IsFreTest(self
, test
):
148 """Determines whether a test is a first run experience test.
151 test: The name of the test to be checked.
154 Whether the feature being tested is FirstRunExperience.
156 annotations
= self
.test_pkg
.GetTestAnnotations(test
)
157 return 'FirstRunExperience' == annotations
.get('Feature', None)
159 def _IsPerfTest(self
, test
):
160 """Determines whether a test is a performance test.
163 test: The name of the test to be checked.
166 Whether the test is annotated as a performance test.
168 return _PERF_TEST_ANNOTATION
in self
.test_pkg
.GetTestAnnotations(test
)
170 def SetupPerfMonitoringIfNeeded(self
, test
):
171 """Sets up performance monitoring if the specified test requires it.
174 test: The name of the test to be run.
176 if not self
._IsPerfTest
(test
):
178 self
.device
.RunShellCommand(
179 ['rm', TestRunner
._DEVICE
_PERF
_OUTPUT
_SEARCH
_PREFIX
])
180 self
._logcat
_monitor
= self
.device
.GetLogcatMonitor()
181 self
._logcat
_monitor
.Start()
183 def TestTeardown(self
, test
, result
):
184 """Cleans up the test harness after running a particular test.
186 Depending on the options of this TestRunner this might handle performance
187 tracking. This method will only be called if the test passed.
190 test: The name of the test that was just run.
191 result: result for this test.
194 self
.tool
.CleanUpEnvironment()
196 # The logic below relies on the test passing.
197 if not result
or not result
.DidRunPass():
200 self
.TearDownPerfMonitoring(test
)
202 if self
.flags
and self
._IsFreTest
(test
):
203 self
.flags
.AddFlags(['--disable-fre'])
205 if self
.coverage_dir
:
206 self
.device
.PullFile(
207 self
.coverage_device_file
, self
.coverage_host_file
)
208 self
.device
.RunShellCommand(
209 'rm -f %s' % self
.coverage_device_file
)
211 def TearDownPerfMonitoring(self
, test
):
212 """Cleans up performance monitoring if the specified test required it.
215 test: The name of the test that was just run.
217 Exception: if there's anything wrong with the perf data.
219 if not self
._IsPerfTest
(test
):
221 raw_test_name
= test
.split('#')[1]
223 # Wait and grab annotation data so we can figure out which traces to parse
224 regex
= self
._logcat
_monitor
.WaitFor(
225 re
.compile(r
'\*\*PERFANNOTATION\(' + raw_test_name
+ r
'\)\:(.*)'))
227 # If the test is set to run on a specific device type only (IE: only
228 # tablet or phone) and it is being run on the wrong device, the test
229 # just quits and does not do anything. The java test harness will still
230 # print the appropriate annotation for us, but will add --NORUN-- for
231 # us so we know to ignore the results.
232 # The --NORUN-- tag is managed by MainActivityTestBase.java
233 if regex
.group(1) != '--NORUN--':
235 # Obtain the relevant perf data. The data is dumped to a
236 # JSON formatted file.
237 json_string
= self
.device
.ReadFile(
238 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
242 raise Exception('Perf file is empty')
244 if self
.options
.save_perf_json
:
245 json_local_file
= '/tmp/chromium-android-perf-json-' + raw_test_name
246 with
open(json_local_file
, 'w') as f
:
248 logging
.info('Saving Perf UI JSON from test ' +
249 test
+ ' to ' + json_local_file
)
251 raw_perf_data
= regex
.group(1).split(';')
253 for raw_perf_set
in raw_perf_data
:
255 perf_set
= raw_perf_set
.split(',')
256 if len(perf_set
) != 3:
257 raise Exception('Unexpected number of tokens in perf annotation '
258 'string: ' + raw_perf_set
)
260 # Process the performance data
261 result
= json_perf_parser
.GetAverageRunInfoFromJSONString(json_string
,
263 perf_tests_results_helper
.PrintPerfResult(perf_set
[1], perf_set
[2],
267 def _SetupIndividualTestTimeoutScale(self
, test
):
268 timeout_scale
= self
._GetIndividualTestTimeoutScale
(test
)
269 valgrind_tools
.SetChromeTimeoutScale(self
.device
, timeout_scale
)
271 def _GetIndividualTestTimeoutScale(self
, test
):
272 """Returns the timeout scale for the given |test|."""
273 annotations
= self
.test_pkg
.GetTestAnnotations(test
)
275 if 'TimeoutScale' in annotations
:
277 timeout_scale
= int(annotations
['TimeoutScale'])
279 logging
.warning('Non-integer value of TimeoutScale ignored. (%s)'
280 % annotations
['TimeoutScale'])
281 if self
.options
.wait_for_debugger
:
285 def _GetIndividualTestTimeoutSecs(self
, test
):
286 """Returns the timeout in seconds for the given |test|."""
287 annotations
= self
.test_pkg
.GetTestAnnotations(test
)
288 if 'Manual' in annotations
:
290 if 'IntegrationTest' in annotations
:
292 if 'External' in annotations
:
294 if 'EnormousTest' in annotations
:
296 if 'LargeTest' in annotations
or _PERF_TEST_ANNOTATION
in annotations
:
298 if 'MediumTest' in annotations
:
300 if 'SmallTest' in annotations
:
303 logging
.warn(("Test size not found in annotations for test '%s', using " +
304 "1 minute for timeout.") % test
)
307 def _RunTest(self
, test
, timeout
):
308 """Runs a single instrumentation test.
311 test: Test class/method.
312 timeout: Timeout time in seconds.
315 The raw output of am instrument as a list of lines.
317 extras
= self
._GetInstrumentationArgs
()
318 extras
['class'] = test
319 return self
.device
.StartInstrumentation(
320 '%s/%s' % (self
.test_pkg
.GetPackageName(), self
.options
.test_runner
),
321 raw
=True, extras
=extras
, timeout
=timeout
, retries
=3)
323 def _GenerateTestResult(self
, test
, instr_result_code
, instr_result_bundle
,
324 statuses
, start_ms
, duration_ms
):
325 results
= instrumentation_test_instance
.GenerateTestResults(
326 instr_result_code
, instr_result_bundle
, statuses
, start_ms
, duration_ms
)
328 if r
.GetName() == test
:
330 logging
.error('Could not find result for test: %s', test
)
331 return test_result
.InstrumentationTestResult(
332 test
, base_test_result
.ResultType
.UNKNOWN
, start_ms
, duration_ms
)
335 def RunTest(self
, test
):
336 results
= base_test_result
.TestRunResults()
337 timeout
= (self
._GetIndividualTestTimeoutSecs
(test
) *
338 self
._GetIndividualTestTimeoutScale
(test
) *
339 self
.tool
.GetTimeoutScale())
340 if (self
.device
.build_version_sdk
341 < constants
.ANDROID_SDK_VERSION_CODES
.JELLY_BEAN
):
349 time_ms
= lambda: int(time
.time() * 1000)
351 raw_output
= self
._RunTest
(test
, timeout
)
352 duration_ms
= time_ms() - start_ms
354 # Parse the test output
355 result_code
, result_bundle
, statuses
= (
356 instrumentation_test_instance
.ParseAmInstrumentRawOutput(raw_output
))
357 result
= self
._GenerateTestResult
(
358 test
, result_code
, result_bundle
, statuses
, start_ms
, duration_ms
)
359 if local_device_instrumentation_test_run
.DidPackageCrashOnDevice(
360 self
.test_pkg
.GetPackageName(), self
.device
):
361 result
.SetType(base_test_result
.ResultType
.CRASH
)
362 results
.AddResult(result
)
363 except device_errors
.CommandTimeoutError
as e
:
364 results
.AddResult(test_result
.InstrumentationTestResult(
365 test
, base_test_result
.ResultType
.TIMEOUT
, start_ms
, duration_ms
,
366 log
=str(e
) or 'No information'))
367 except device_errors
.DeviceUnreachableError
as e
:
368 results
.AddResult(test_result
.InstrumentationTestResult(
369 test
, base_test_result
.ResultType
.CRASH
, start_ms
, duration_ms
,
370 log
=str(e
) or 'No information'))
371 self
.TestTeardown(test
, results
)
372 return (results
, None if results
.DidRunPass() else test
)