1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
9 from pylib
import pexpect
10 from pylib
import ports
11 from pylib
.base
import base_test_result
12 from pylib
.base
import base_test_runner
13 from pylib
.device
import device_errors
14 from pylib
.local
import local_test_server_spawner
15 from pylib
.perf
import perf_control
18 RE_RUN
= re
.compile('\\[ RUN \\] ?(.*)\r\n')
19 RE_FAIL
= re
.compile('\\[ FAILED \\] ?(.*?)( \\((\\d+) ms\\))?\r\r\n')
20 RE_OK
= re
.compile('\\[ OK \\] ?(.*?)( \\((\\d+) ms\\))?\r\r\n')
23 RE_PASSED
= re
.compile('\\[ PASSED \\] ?(.*)\r\n')
24 RE_RUNNER_FAIL
= re
.compile('\\[ RUNNER_FAILED \\] ?(.*)\r\n')
25 # Signal handlers are installed before starting tests
26 # to output the CRASHED marker when a crash happens.
27 RE_CRASH
= re
.compile('\\[ CRASHED \\](.*)\r\n')
30 def _TestSuiteRequiresMockTestServer(suite_name
):
31 """Returns True if the test suite requires mock test server."""
32 tests_require_net_test_server
= ['unit_tests', 'net_unittests',
34 'content_browsertests']
36 tests_require_net_test_server
)
38 def _TestSuiteRequiresHighPerfMode(suite_name
):
39 """Returns True if the test suite requires high performance mode."""
40 return 'perftests' in suite_name
42 class TestRunner(base_test_runner
.BaseTestRunner
):
43 def __init__(self
, test_options
, device
, test_package
):
44 """Single test suite attached to a single device.
47 test_options: A GTestOptions object.
48 device: Device to run the tests.
49 test_package: An instance of TestPackage class.
52 super(TestRunner
, self
).__init
__(device
, test_options
.tool
,
53 test_options
.cleanup_test_files
)
55 self
.test_package
= test_package
56 self
.test_package
.tool
= self
.tool
57 self
._test
_arguments
= test_options
.test_arguments
59 timeout
= test_options
.timeout
62 # On a VM (e.g. chromium buildbots), this timeout is way too small.
63 if os
.environ
.get('BUILDBOT_SLAVENAME'):
66 self
._timeout
= timeout
* self
.tool
.GetTimeoutScale()
67 if _TestSuiteRequiresHighPerfMode(self
.test_package
.suite_name
):
68 self
._perf
_controller
= perf_control
.PerfControl(self
.device
)
70 if _TestSuiteRequiresMockTestServer(self
.test_package
.suite_name
):
72 local_test_server_spawner
.LocalTestServerSpawner(
73 ports
.AllocateTestServerPort(), self
.device
, self
.tool
)]
78 def InstallTestPackage(self
):
79 self
.test_package
.Install(self
.device
)
81 def _ParseTestOutput(self
, p
):
82 """Process the test output.
85 p: An instance of pexpect spawn class.
88 A TestRunResults object.
90 results
= base_test_result
.TestRunResults()
97 found
= p
.expect([RE_RUN
, RE_PASSED
, RE_RUNNER_FAIL
],
98 timeout
=self
._timeout
)
99 if found
== 1: # RE_PASSED
101 elif found
== 2: # RE_RUNNER_FAIL
104 full_test_name
= p
.match
.group(1).replace('\r', '')
105 found
= p
.expect([RE_OK
, RE_FAIL
, RE_CRASH
], timeout
=self
._timeout
)
106 log
= p
.before
.replace('\r', '')
107 if found
== 0: # RE_OK
108 if full_test_name
== p
.match
.group(1).replace('\r', ''):
109 duration_ms
= int(p
.match
.group(3)) if p
.match
.group(3) else 0
110 results
.AddResult(base_test_result
.BaseTestResult(
111 full_test_name
, base_test_result
.ResultType
.PASS
,
112 duration
=duration_ms
, log
=log
))
113 elif found
== 2: # RE_CRASH
114 results
.AddResult(base_test_result
.BaseTestResult(
115 full_test_name
, base_test_result
.ResultType
.CRASH
,
119 duration_ms
= int(p
.match
.group(3)) if p
.match
.group(3) else 0
120 results
.AddResult(base_test_result
.BaseTestResult(
121 full_test_name
, base_test_result
.ResultType
.FAIL
,
122 duration
=duration_ms
, log
=log
))
124 logging
.error('Test terminated - EOF')
125 # We're here because either the device went offline, or the test harness
126 # crashed without outputting the CRASHED marker (crbug.com/175538).
127 if not self
.device
.IsOnline():
128 raise device_errors
.DeviceUnreachableError(
129 'Device %s went offline.' % str(self
.device
))
131 results
.AddResult(base_test_result
.BaseTestResult(
132 full_test_name
, base_test_result
.ResultType
.CRASH
,
133 log
=p
.before
.replace('\r', '')))
134 except pexpect
.TIMEOUT
:
135 logging
.error('Test terminated after %d second timeout.',
138 results
.AddResult(base_test_result
.BaseTestResult(
139 full_test_name
, base_test_result
.ResultType
.TIMEOUT
,
140 log
=p
.before
.replace('\r', '')))
144 ret_code
= self
.test_package
.GetGTestReturnCode(self
.device
)
147 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
148 ret_code
, p
.before
, p
.after
)
153 def RunTest(self
, test
):
154 test_results
= base_test_result
.TestRunResults()
156 return test_results
, None
159 self
.test_package
.ClearApplicationState(self
.device
)
160 self
.test_package
.CreateCommandLineFileOnDevice(
161 self
.device
, test
, self
._test
_arguments
)
162 test_results
= self
._ParseTestOutput
(
163 self
.test_package
.SpawnTestProcess(self
.device
))
165 for s
in self
._servers
:
167 # Calculate unknown test results.
168 all_tests
= set(test
.split(':'))
169 all_tests_ran
= set([t
.GetName() for t
in test_results
.GetAll()])
170 unknown_tests
= all_tests
- all_tests_ran
171 test_results
.AddResults(
172 [base_test_result
.BaseTestResult(t
, base_test_result
.ResultType
.UNKNOWN
)
173 for t
in unknown_tests
])
174 retry
= ':'.join([t
.GetName() for t
in test_results
.GetNotPass()])
175 return test_results
, retry
179 """Sets up necessary test enviroment for the test suite."""
180 super(TestRunner
, self
).SetUp()
181 for s
in self
._servers
:
183 if _TestSuiteRequiresHighPerfMode(self
.test_package
.suite_name
):
184 self
._perf
_controller
.SetHighPerfMode()
185 self
.tool
.SetupEnvironment()
189 """Cleans up the test enviroment for the test suite."""
190 for s
in self
._servers
:
192 if _TestSuiteRequiresHighPerfMode(self
.test_package
.suite_name
):
193 self
._perf
_controller
.SetDefaultPerfMode()
194 self
.test_package
.ClearApplicationState(self
.device
)
195 self
.tool
.CleanUpEnvironment()
196 super(TestRunner
, self
).TearDown()