Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / mojo / tools / mopy / gtest.py
blobe037da30a9d5f9d68b23d418a6ca4714a2657f59
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 import logging
6 import os
7 import Queue
8 import re
9 import subprocess
10 import sys
11 import threading
12 import time
14 from mopy.config import Config
15 from mopy.paths import Paths
17 sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),
18 '..', '..', '..', 'testing'))
19 import xvfb
22 # The DISPLAY ID number used for xvfb, incremented with each use.
23 XVFB_DISPLAY_ID = 9
26 def run_apptest(config, shell, args, apptest, isolate):
27 '''Run the apptest; optionally isolating fixtures across shell invocations.
29 Returns the list of test fixtures run and the list of failed test fixtures.
30 TODO(msw): Also return the list of DISABLED test fixtures.
32 Args:
33 config: The mopy.config.Config for the build.
34 shell: The mopy.android.AndroidShell, if Android is the target platform.
35 args: The arguments for the shell or apptest.
36 apptest: The application test URL.
37 isolate: True if the test fixtures should be run in isolation.
38 '''
39 if not isolate:
40 return _run_apptest_with_retry(config, shell, args, apptest)
42 fixtures = _get_fixtures(config, shell, args, apptest)
43 fixtures = [f for f in fixtures if not f.startswith('DISABLED_')]
44 failed = []
45 for fixture in fixtures:
46 arguments = args + ['--gtest_filter=%s' % fixture]
47 failures = _run_apptest_with_retry(config, shell, arguments, apptest)[1]
48 failed.extend(failures if failures != [apptest] else [fixture])
49 # Abort when 20 fixtures, or a tenth of the apptest fixtures, have failed.
50 # base::TestLauncher does this for timeouts and unknown results.
51 if len(failed) >= max(20, len(fixtures) / 10):
52 print 'Too many failing fixtures (%d), exiting now.' % len(failed)
53 return (fixtures, failed + [apptest + ' aborted for excessive failures.'])
54 return (fixtures, failed)
57 # TODO(msw): Determine proper test retry counts; allow configuration.
58 def _run_apptest_with_retry(config, shell, args, apptest, retry_count=2):
59 '''Runs an apptest, retrying on failure; returns the fixtures and failures.'''
60 (tests, failed) = _run_apptest(config, shell, args, apptest)
61 while failed and retry_count:
62 print 'Retrying failed tests (%d attempts remaining)' % retry_count
63 arguments = args
64 # Retry only the failing fixtures if there is no existing filter specified.
65 if failed and not [a for a in args if a.startswith('--gtest_filter')]:
66 arguments += ['--gtest_filter=%s' % ':'.join(failed)]
67 failed = _run_apptest(config, shell, arguments, apptest)[1]
68 retry_count -= 1
69 return (tests, failed)
72 def _run_apptest(config, shell, args, apptest):
73 '''Runs an apptest; returns the list of fixtures and the list of failures.'''
74 command = _build_command_line(config, args, apptest)
75 logging.getLogger().debug('Command: %s' % ' '.join(command))
76 start_time = time.time()
78 try:
79 output = _run_test_with_xvfb(config, shell, args, apptest)
80 except Exception as e:
81 _print_exception(command, e)
82 return ([apptest], [apptest])
84 # Find all fixtures begun from gtest's '[ RUN ] <Suite.Fixture>' output.
85 tests = [x for x in output.split('\n') if x.find('[ RUN ] ') != -1]
86 tests = [x.strip(' \t\n\r')[x.find('[ RUN ] ') + 13:] for x in tests]
88 # Fail on output with gtest's '[ FAILED ]' or a lack of '[ OK ]'.
89 # The latter check ensures failure on broken command lines, hung output, etc.
90 # Check output instead of exit codes because mojo shell always exits with 0.
91 failed = [x for x in tests if (re.search('\[ FAILED \].*' + x, output) or
92 not re.search('\[ OK \].*' + x, output))]
94 ms = int(round(1000 * (time.time() - start_time)))
95 if failed:
96 _print_exception(command, output, ms)
97 else:
98 logging.getLogger().debug('Passed in %d ms with output:\n%s' % (ms, output))
99 return (tests, failed)
102 def _get_fixtures(config, shell, args, apptest):
103 '''Returns an apptest's 'Suite.Fixture' list via --gtest_list_tests output.'''
104 arguments = args + ['--gtest_list_tests']
105 command = _build_command_line(config, arguments, apptest)
106 logging.getLogger().debug('Command: %s' % ' '.join(command))
107 try:
108 tests = _run_test_with_xvfb(config, shell, arguments, apptest)
109 # Remove log lines from the output and ensure it matches known formatting.
110 # Ignore empty fixture lists when the commandline has a gtest filter flag.
111 tests = re.sub('^(\[|WARNING: linker:).*\n', '', tests, flags=re.MULTILINE)
112 if (not re.match('^(\w*\.\r?\n( \w*\r?\n)+)+', tests) and
113 not [a for a in args if a.startswith('--gtest_filter')]):
114 raise Exception('Unrecognized --gtest_list_tests output:\n%s' % tests)
115 test_list = []
116 for line in tests.split('\n'):
117 if not line:
118 continue
119 if line[0] != ' ':
120 suite = line.strip()
121 continue
122 test_list.append(suite + line.strip())
123 logging.getLogger().debug('Tests for %s: %s' % (apptest, test_list))
124 return test_list
125 except Exception as e:
126 _print_exception(command, e)
127 return []
130 def _print_exception(command_line, exception, milliseconds=None):
131 '''Print a formatted exception raised from a failed command execution.'''
132 details = (' (in %d ms)' % milliseconds) if milliseconds else ''
133 if hasattr(exception, 'returncode'):
134 details += ' (with exit code %d)' % exception.returncode
135 print '\n[ FAILED ] Command%s: %s' % (details, ' '.join(command_line))
136 print 72 * '-'
137 if hasattr(exception, 'output'):
138 print exception.output
139 print str(exception)
140 print 72 * '-'
143 def _build_command_line(config, args, apptest):
144 '''Build the apptest command line. This value isn't executed on Android.'''
145 not_list_tests = not '--gtest_list_tests' in args
146 data_dir = ['--use-temporary-user-data-dir'] if not_list_tests else []
147 return Paths(config).mojo_runner + data_dir + args + [apptest]
150 def _run_test_with_xvfb(config, shell, args, apptest):
151 '''Run the test with xvfb; return the output or raise an exception.'''
152 env = os.environ.copy()
153 # Make sure gtest doesn't try to add color to the output. Color is done via
154 # escape sequences which confuses the code that searches the gtest output.
155 env['GTEST_COLOR'] = 'no'
156 if (config.target_os != Config.OS_LINUX or '--gtest_list_tests' in args
157 or not xvfb.should_start_xvfb(env)):
158 return _run_test_with_timeout(config, shell, args, apptest, env)
160 try:
161 # Simply prepending xvfb.py to the command line precludes direct control of
162 # test subprocesses, and prevents easily getting output when tests timeout.
163 xvfb_proc = None
164 openbox_proc = None
165 global XVFB_DISPLAY_ID
166 display_string = ':' + str(XVFB_DISPLAY_ID)
167 (xvfb_proc, openbox_proc) = xvfb.start_xvfb(env, Paths(config).build_dir,
168 display=display_string)
169 XVFB_DISPLAY_ID = (XVFB_DISPLAY_ID + 1) % 50000
170 if not xvfb_proc or not xvfb_proc.pid:
171 raise Exception('Xvfb failed to start; aborting test run.')
172 if not openbox_proc or not openbox_proc.pid:
173 raise Exception('Openbox failed to start; aborting test run.')
174 logging.getLogger().debug('Running Xvfb %s (pid %d) and Openbox (pid %d).' %
175 (display_string, xvfb_proc.pid, openbox_proc.pid))
176 return _run_test_with_timeout(config, shell, args, apptest, env)
177 finally:
178 xvfb.kill(xvfb_proc)
179 xvfb.kill(openbox_proc)
182 # TODO(msw): Determine proper test timeout durations (starting small).
183 def _run_test_with_timeout(config, shell, args, apptest, env, seconds=10):
184 '''Run the test with a timeout; return the output or raise an exception.'''
185 result = Queue.Queue()
186 thread = threading.Thread(target=_run_test,
187 args=(config, shell, args, apptest, env, result))
188 thread.start()
189 process_or_shell = result.get()
190 thread.join(seconds)
191 timeout_exception = ''
193 if thread.is_alive():
194 timeout_exception = '\nError: Test timeout after %s seconds' % seconds
195 logging.getLogger().debug('Killing the runner or shell for timeout.')
196 try:
197 process_or_shell.kill()
198 except OSError:
199 pass # The process may have ended after checking |is_alive|.
201 thread.join(seconds)
202 if thread.is_alive():
203 raise Exception('Error: Test hung and could not be killed!')
204 if result.empty():
205 raise Exception('Error: Test exited with no output.')
206 (output, exception) = result.get()
207 exception += timeout_exception
208 if exception:
209 raise Exception('%s%s%s' % (output, '\n' if output else '', exception))
210 return output
213 def _run_test(config, shell, args, apptest, env, result):
214 '''Run the test; put the shell/proc, output, and any exception in |result|.'''
215 output = ''
216 exception = ''
217 try:
218 if config.target_os != Config.OS_ANDROID:
219 command = _build_command_line(config, args, apptest)
220 process = subprocess.Popen(command, stdout=subprocess.PIPE,
221 stderr=subprocess.PIPE, env=env)
222 result.put(process)
223 (output, stderr_output) = process.communicate()
224 if process.returncode:
225 exception = 'Error: Test exited with code: %d\n%s' % (
226 process.returncode, stderr_output)
227 elif config.is_verbose:
228 output += '\n' + stderr_output
229 if output.startswith('This program contains tests'):
230 exception = 'Error: GTest printed help; check command line flags.'
231 else:
232 assert shell
233 result.put(shell)
234 (r, w) = os.pipe()
235 with os.fdopen(r, 'r') as rf:
236 with os.fdopen(w, 'w') as wf:
237 arguments = args + [apptest]
238 shell.StartActivity('MojoShellActivity', arguments, wf, wf.close)
239 output = rf.read()
240 except Exception as e:
241 output += (e.output + '\n') if hasattr(e, 'output') else ''
242 exception += str(e)
243 result.put((output, exception))