Update V8 to version 4.6.72.
[chromium-blink-merge.git] / mojo / tools / mopy / gtest.py
blobb53c9e6ddd2d6b77ac41afed7851265448635e03
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 import logging
6 import os
7 import Queue
8 import re
9 import subprocess
10 import sys
11 import threading
12 import time
14 from mopy.config import Config
15 from mopy.paths import Paths
17 sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),
18 '..', '..', '..', 'testing'))
19 import xvfb
22 # The DISPLAY ID number used for xvfb, incremented with each use.
23 XVFB_DISPLAY_ID = 9
26 def run_apptest(config, shell, args, apptest, isolate):
27 '''Run the apptest; optionally isolating fixtures across shell invocations.
29 Returns the list of test fixtures run and the list of failed test fixtures.
30 TODO(msw): Also return the list of DISABLED test fixtures.
32 Args:
33 config: The mopy.config.Config for the build.
34 shell: The mopy.android.AndroidShell, if Android is the target platform.
35 args: The arguments for the shell or apptest.
36 apptest: The application test URL.
37 isolate: True if the test fixtures should be run in isolation.
38 '''
39 if not isolate:
40 return _run_apptest_with_retry(config, shell, args, apptest)
42 fixtures = _get_fixtures(config, shell, args, apptest)
43 fixtures = [f for f in fixtures if not f.startswith('DISABLED_')]
44 failed = []
45 for fixture in fixtures:
46 arguments = args + ['--gtest_filter=%s' % fixture]
47 failures = _run_apptest_with_retry(config, shell, arguments, apptest)[1]
48 failed.extend(failures if failures != [apptest] else [fixture])
49 # Abort when 20 fixtures, or a tenth of the apptest fixtures, have failed.
50 # base::TestLauncher does this for timeouts and unknown results.
51 if len(failed) >= max(20, len(fixtures) / 10):
52 print 'Too many failing fixtures (%d), exiting now.' % len(failed)
53 return (fixtures, failed + [apptest + ' aborted for excessive failures.'])
54 return (fixtures, failed)
57 # TODO(msw): Determine proper test retry counts; allow configuration.
58 def _run_apptest_with_retry(config, shell, args, apptest, retry_count=2):
59 '''Runs an apptest, retrying on failure; returns the fixtures and failures.'''
60 (tests, failed) = _run_apptest(config, shell, args, apptest)
61 while failed and retry_count:
62 print 'Retrying failed tests (%d attempts remaining)' % retry_count
63 arguments = args
64 # Retry only the failing fixtures if there is no existing filter specified.
65 if failed != [apptest] and not [a for a in args if '--gtest_filter=' in a]:
66 arguments += ['--gtest_filter=%s' % ':'.join(failed)]
67 failed = _run_apptest(config, shell, arguments, apptest)[1]
68 retry_count -= 1
69 return (tests, failed)
72 def _run_apptest(config, shell, args, apptest):
73 '''Runs an apptest; returns the list of fixtures and the list of failures.'''
74 command = _build_command_line(config, args, apptest)
75 logging.getLogger().debug('Command: %s' % ' '.join(command))
76 start_time = time.time()
78 try:
79 output = _run_test_with_xvfb(config, shell, args, apptest)
80 except Exception as e:
81 _print_exception(command, e)
82 return ([apptest], [apptest])
84 # Find all fixtures begun from gtest's '[ RUN ] <Suite.Fixture>' output.
85 tests = [x for x in output.split('\n') if x.find('[ RUN ] ') != -1]
86 tests = [x.strip(' \t\n\r')[x.find('[ RUN ] ') + 13:] for x in tests]
88 # Fail on output with gtest's '[ FAILED ]' or a lack of '[ OK ]'.
89 # The latter check ensures failure on broken command lines, hung output, etc.
90 # Check output instead of exit codes because mojo shell always exits with 0.
91 failed = [x for x in tests if (re.search('\[ FAILED \].*' + x, output) or
92 not re.search('\[ OK \].*' + x, output))]
94 ms = int(round(1000 * (time.time() - start_time)))
95 if failed:
96 _print_exception(command, output, ms)
97 else:
98 logging.getLogger().debug('Passed in %d ms with output:\n%s' % (ms, output))
99 return (tests, failed)
102 def _get_fixtures(config, shell, args, apptest):
103 '''Returns an apptest's 'Suite.Fixture' list via --gtest_list_tests output.'''
104 arguments = args + ['--gtest_list_tests']
105 command = _build_command_line(config, arguments, apptest)
106 logging.getLogger().debug('Command: %s' % ' '.join(command))
107 try:
108 tests = _run_test_with_xvfb(config, shell, arguments, apptest)
109 logging.getLogger().debug('Tests for %s:\n%s' % (apptest, tests))
110 # Remove log lines from the output and ensure it matches known formatting.
111 tests = re.sub('^(\[|WARNING: linker:).*\n', '', tests, flags=re.MULTILINE)
112 if not re.match('^(\w*\.\r?\n( \w*\r?\n)+)+', tests):
113 raise Exception('Unrecognized --gtest_list_tests output:\n%s' % tests)
114 test_list = []
115 for line in tests.split('\n'):
116 if not line:
117 continue
118 if line[0] != ' ':
119 suite = line.strip()
120 continue
121 test_list.append(suite + line.strip())
122 return test_list
123 except Exception as e:
124 _print_exception(command, e)
125 return []
128 def _print_exception(command_line, exception, milliseconds=None):
129 '''Print a formatted exception raised from a failed command execution.'''
130 details = (' (in %d ms)' % milliseconds) if milliseconds else ''
131 if hasattr(exception, 'returncode'):
132 details += ' (with exit code %d)' % exception.returncode
133 print '\n[ FAILED ] Command%s: %s' % (details, ' '.join(command_line))
134 print 72 * '-'
135 if hasattr(exception, 'output'):
136 print exception.output
137 print str(exception)
138 print 72 * '-'
141 def _build_command_line(config, args, apptest):
142 '''Build the apptest command line. This value isn't executed on Android.'''
143 not_list_tests = not '--gtest_list_tests' in args
144 data_dir = ['--use-temporary-user-data-dir'] if not_list_tests else []
145 return Paths(config).mojo_runner + data_dir + args + [apptest]
148 def _run_test_with_xvfb(config, shell, args, apptest):
149 '''Run the test with xvfb; return the output or raise an exception.'''
150 env = os.environ.copy()
151 # Make sure gtest doesn't try to add color to the output. Color is done via
152 # escape sequences which confuses the code that searches the gtest output.
153 env['GTEST_COLOR'] = 'no'
154 if (config.target_os != Config.OS_LINUX or '--gtest_list_tests' in args
155 or not xvfb.should_start_xvfb(env)):
156 return _run_test_with_timeout(config, shell, args, apptest, env)
158 try:
159 # Simply prepending xvfb.py to the command line precludes direct control of
160 # test subprocesses, and prevents easily getting output when tests timeout.
161 xvfb_proc = None
162 openbox_proc = None
163 global XVFB_DISPLAY_ID
164 display_string = ':' + str(XVFB_DISPLAY_ID)
165 (xvfb_proc, openbox_proc) = xvfb.start_xvfb(env, Paths(config).build_dir,
166 display=display_string)
167 XVFB_DISPLAY_ID = (XVFB_DISPLAY_ID + 1) % 50000
168 if not xvfb_proc or not xvfb_proc.pid:
169 raise Exception('Xvfb failed to start; aborting test run.')
170 if not openbox_proc or not openbox_proc.pid:
171 raise Exception('Openbox failed to start; aborting test run.')
172 logging.getLogger().debug('Running Xvfb %s (pid %d) and Openbox (pid %d).' %
173 (display_string, xvfb_proc.pid, openbox_proc.pid))
174 return _run_test_with_timeout(config, shell, args, apptest, env)
175 finally:
176 xvfb.kill(xvfb_proc)
177 xvfb.kill(openbox_proc)
180 # TODO(msw): Determine proper test timeout durations (starting small).
181 def _run_test_with_timeout(config, shell, args, apptest, env, seconds=10):
182 '''Run the test with a timeout; return the output or raise an exception.'''
183 result = Queue.Queue()
184 thread = threading.Thread(target=_run_test,
185 args=(config, shell, args, apptest, env, result))
186 thread.start()
187 process_or_shell = result.get()
188 thread.join(seconds)
189 timeout_exception = ''
191 if thread.is_alive():
192 timeout_exception = '\nError: Test timeout after %s seconds' % seconds
193 logging.getLogger().debug('Killing the runner or shell for timeout.')
194 try:
195 process_or_shell.kill()
196 except OSError:
197 pass # The process may have ended after checking |is_alive|.
199 thread.join(seconds)
200 if thread.is_alive():
201 raise Exception('Error: Test hung and could not be killed!')
202 if result.empty():
203 raise Exception('Error: Test exited with no output.')
204 (output, exception) = result.get()
205 exception += timeout_exception
206 if exception:
207 raise Exception('%s%s%s' % (output, '\n' if output else '', exception))
208 return output
211 def _run_test(config, shell, args, apptest, env, result):
212 '''Run the test; put the shell/proc, output and any exception in |result|.'''
213 output = ''
214 exception = ''
215 try:
216 if config.target_os != Config.OS_ANDROID:
217 command = _build_command_line(config, args, apptest)
218 process = subprocess.Popen(command, stdout=subprocess.PIPE,
219 stderr=subprocess.PIPE, env=env)
220 result.put(process)
221 (output, stderr_output) = process.communicate()
222 if process.returncode:
223 exception = 'Error: Test exited with code: %d\n%s' % (
224 process.returncode, stderr_output)
225 elif config.is_verbose:
226 output += '\n' + stderr_output
227 else:
228 assert shell
229 result.put(shell)
230 (r, w) = os.pipe()
231 with os.fdopen(r, 'r') as rf:
232 with os.fdopen(w, 'w') as wf:
233 arguments = args + [apptest]
234 shell.StartActivity('MojoShellActivity', arguments, wf, wf.close)
235 output = rf.read()
236 except Exception as e:
237 output += (e.output + '\n') if hasattr(e, 'output') else ''
238 exception += str(e)
239 result.put((output, exception))