Adding Peter Thatcher to the owners file.
[chromium-blink-merge.git] / build / android / pylib / gtest / test_runner.py
blob4bb97379e042fa2527435f6b0b26d8e89862a99f
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 import logging
6 import os
7 import re
9 from pylib import pexpect
10 from pylib import ports
11 from pylib.base import base_test_result
12 from pylib.base import base_test_runner
13 from pylib.device import device_errors
14 from pylib.local import local_test_server_spawner
15 from pylib.perf import perf_control
17 # Test case statuses.
18 RE_RUN = re.compile('\\[ RUN \\] ?(.*)\r\n')
19 RE_FAIL = re.compile('\\[ FAILED \\] ?(.*?)( \\((\\d+) ms\\))?\r\r\n')
20 RE_OK = re.compile('\\[ OK \\] ?(.*?)( \\((\\d+) ms\\))?\r\r\n')
22 # Test run statuses.
23 RE_PASSED = re.compile('\\[ PASSED \\] ?(.*)\r\n')
24 RE_RUNNER_FAIL = re.compile('\\[ RUNNER_FAILED \\] ?(.*)\r\n')
25 # Signal handlers are installed before starting tests
26 # to output the CRASHED marker when a crash happens.
27 RE_CRASH = re.compile('\\[ CRASHED \\](.*)\r\n')
30 def _TestSuiteRequiresMockTestServer(suite_name):
31 """Returns True if the test suite requires mock test server."""
32 tests_require_net_test_server = ['unit_tests', 'net_unittests',
33 'content_unittests',
34 'content_browsertests']
35 return (suite_name in
36 tests_require_net_test_server)
38 def _TestSuiteRequiresHighPerfMode(suite_name):
39 """Returns True if the test suite requires high performance mode."""
40 return 'perftests' in suite_name
42 class TestRunner(base_test_runner.BaseTestRunner):
43 def __init__(self, test_options, device, test_package):
44 """Single test suite attached to a single device.
46 Args:
47 test_options: A GTestOptions object.
48 device: Device to run the tests.
49 test_package: An instance of TestPackage class.
50 """
52 super(TestRunner, self).__init__(device, test_options.tool,
53 test_options.cleanup_test_files)
55 self.test_package = test_package
56 self.test_package.tool = self.tool
57 self._test_arguments = test_options.test_arguments
59 timeout = test_options.timeout
60 if timeout == 0:
61 timeout = 60
62 # On a VM (e.g. chromium buildbots), this timeout is way too small.
63 if os.environ.get('BUILDBOT_SLAVENAME'):
64 timeout = timeout * 2
66 self._timeout = timeout * self.tool.GetTimeoutScale()
67 if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
68 self._perf_controller = perf_control.PerfControl(self.device)
70 if _TestSuiteRequiresMockTestServer(self.test_package.suite_name):
71 self._servers = [
72 local_test_server_spawner.LocalTestServerSpawner(
73 ports.AllocateTestServerPort(), self.device, self.tool)]
74 else:
75 self._servers = []
77 #override
78 def InstallTestPackage(self):
79 self.test_package.Install(self.device)
81 def _ParseTestOutput(self, p):
82 """Process the test output.
84 Args:
85 p: An instance of pexpect spawn class.
87 Returns:
88 A TestRunResults object.
89 """
90 results = base_test_result.TestRunResults()
92 log = ''
93 try:
94 while True:
95 full_test_name = None
97 found = p.expect([RE_RUN, RE_PASSED, RE_RUNNER_FAIL],
98 timeout=self._timeout)
99 if found == 1: # RE_PASSED
100 break
101 elif found == 2: # RE_RUNNER_FAIL
102 break
103 else: # RE_RUN
104 full_test_name = p.match.group(1).replace('\r', '')
105 found = p.expect([RE_OK, RE_FAIL, RE_CRASH], timeout=self._timeout)
106 log = p.before.replace('\r', '')
107 if found == 0: # RE_OK
108 if full_test_name == p.match.group(1).replace('\r', ''):
109 duration_ms = int(p.match.group(3)) if p.match.group(3) else 0
110 results.AddResult(base_test_result.BaseTestResult(
111 full_test_name, base_test_result.ResultType.PASS,
112 duration=duration_ms, log=log))
113 elif found == 2: # RE_CRASH
114 results.AddResult(base_test_result.BaseTestResult(
115 full_test_name, base_test_result.ResultType.CRASH,
116 log=log))
117 break
118 else: # RE_FAIL
119 duration_ms = int(p.match.group(3)) if p.match.group(3) else 0
120 results.AddResult(base_test_result.BaseTestResult(
121 full_test_name, base_test_result.ResultType.FAIL,
122 duration=duration_ms, log=log))
123 except pexpect.EOF:
124 logging.error('Test terminated - EOF')
125 # We're here because either the device went offline, or the test harness
126 # crashed without outputting the CRASHED marker (crbug.com/175538).
127 if not self.device.IsOnline():
128 raise device_errors.DeviceUnreachableError(
129 'Device %s went offline.' % str(self.device))
130 if full_test_name:
131 results.AddResult(base_test_result.BaseTestResult(
132 full_test_name, base_test_result.ResultType.CRASH,
133 log=p.before.replace('\r', '')))
134 except pexpect.TIMEOUT:
135 logging.error('Test terminated after %d second timeout.',
136 self._timeout)
137 if full_test_name:
138 results.AddResult(base_test_result.BaseTestResult(
139 full_test_name, base_test_result.ResultType.TIMEOUT,
140 log=p.before.replace('\r', '')))
141 finally:
142 p.close()
144 ret_code = self.test_package.GetGTestReturnCode(self.device)
145 if ret_code:
146 logging.critical(
147 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
148 ret_code, p.before, p.after)
150 return results
152 #override
153 def RunTest(self, test):
154 test_results = base_test_result.TestRunResults()
155 if not test:
156 return test_results, None
158 try:
159 self.test_package.ClearApplicationState(self.device)
160 self.test_package.CreateCommandLineFileOnDevice(
161 self.device, test, self._test_arguments)
162 test_results = self._ParseTestOutput(
163 self.test_package.SpawnTestProcess(self.device))
164 finally:
165 for s in self._servers:
166 s.Reset()
167 # Calculate unknown test results.
168 all_tests = set(test.split(':'))
169 all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
170 unknown_tests = all_tests - all_tests_ran
171 test_results.AddResults(
172 [base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
173 for t in unknown_tests])
174 retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
175 return test_results, retry
177 #override
178 def SetUp(self):
179 """Sets up necessary test enviroment for the test suite."""
180 super(TestRunner, self).SetUp()
181 for s in self._servers:
182 s.SetUp()
183 if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
184 self._perf_controller.SetHighPerfMode()
185 self.tool.SetupEnvironment()
187 #override
188 def TearDown(self):
189 """Cleans up the test enviroment for the test suite."""
190 for s in self._servers:
191 s.TearDown()
192 if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
193 self._perf_controller.SetDefaultPerfMode()
194 self.test_package.ClearApplicationState(self.device)
195 self.tool.CleanUpEnvironment()
196 super(TestRunner, self).TearDown()