Update V8 to version 4.7.44.
[chromium-blink-merge.git] / build / android / test_runner.py
blob6c384b1c6a0be82ff5deedfd931d17a6df12369c
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import signal
14 import sys
15 import threading
16 import unittest
18 from devil import base_error
19 from devil.android import apk_helper
20 from devil.android import device_blacklist
21 from devil.android import device_errors
22 from devil.android import device_utils
23 from devil.android import ports
24 from devil.utils import reraiser_thread
25 from devil.utils import run_tests_helper
27 from pylib import constants
28 from pylib import forwarder
29 from pylib.base import base_test_result
30 from pylib.base import environment_factory
31 from pylib.base import test_dispatcher
32 from pylib.base import test_instance_factory
33 from pylib.base import test_run_factory
34 from pylib.gtest import gtest_config
35 # TODO(jbudorick): Remove this once we stop selectively enabling platform mode.
36 from pylib.gtest import gtest_test_instance
37 from pylib.gtest import setup as gtest_setup
38 from pylib.gtest import test_options as gtest_test_options
39 from pylib.linker import setup as linker_setup
40 from pylib.host_driven import setup as host_driven_setup
41 from pylib.instrumentation import setup as instrumentation_setup
42 from pylib.instrumentation import test_options as instrumentation_test_options
43 from pylib.junit import setup as junit_setup
44 from pylib.junit import test_dispatcher as junit_dispatcher
45 from pylib.monkey import setup as monkey_setup
46 from pylib.monkey import test_options as monkey_test_options
47 from pylib.perf import setup as perf_setup
48 from pylib.perf import test_options as perf_test_options
49 from pylib.perf import test_runner as perf_test_runner
50 from pylib.results import json_results
51 from pylib.results import report_results
52 from pylib.uiautomator import setup as uiautomator_setup
53 from pylib.uiautomator import test_options as uiautomator_test_options
56 def AddCommonOptions(parser):
57 """Adds all common options to |parser|."""
59 group = parser.add_argument_group('Common Options')
61 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
63 debug_or_release_group = group.add_mutually_exclusive_group()
64 debug_or_release_group.add_argument(
65 '--debug', action='store_const', const='Debug', dest='build_type',
66 default=default_build_type,
67 help=('If set, run test suites under out/Debug. '
68 'Default is env var BUILDTYPE or Debug.'))
69 debug_or_release_group.add_argument(
70 '--release', action='store_const', const='Release', dest='build_type',
71 help=('If set, run test suites under out/Release. '
72 'Default is env var BUILDTYPE or Debug.'))
74 group.add_argument('--build-directory', dest='build_directory',
75 help=('Path to the directory in which build files are'
76 ' located (should not include build type)'))
77 group.add_argument('--output-directory', dest='output_directory',
78 help=('Path to the directory in which build files are'
79 ' located (must include build type). This will take'
80 ' precedence over --debug, --release and'
81 ' --build-directory'))
82 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
83 help=('Number of retries for a test before '
84 'giving up (default: %(default)s).'))
85 group.add_argument('-v',
86 '--verbose',
87 dest='verbose_count',
88 default=0,
89 action='count',
90 help='Verbose level (multiple times for more)')
91 group.add_argument('--flakiness-dashboard-server',
92 dest='flakiness_dashboard_server',
93 help=('Address of the server that is hosting the '
94 'Chrome for Android flakiness dashboard.'))
95 group.add_argument('--enable-platform-mode', action='store_true',
96 help=('Run the test scripts in platform mode, which '
97 'conceptually separates the test runner from the '
98 '"device" (local or remote, real or emulated) on '
99 'which the tests are running. [experimental]'))
100 group.add_argument('-e', '--environment', default='local',
101 choices=constants.VALID_ENVIRONMENTS,
102 help='Test environment to run in (default: %(default)s).')
103 group.add_argument('--adb-path',
104 help=('Specify the absolute path of the adb binary that '
105 'should be used.'))
106 group.add_argument('--json-results-file', dest='json_results_file',
107 help='If set, will dump results in JSON form '
108 'to specified file.')
110 def ProcessCommonOptions(args):
111 """Processes and handles all common options."""
112 run_tests_helper.SetLogLevel(args.verbose_count)
113 constants.SetBuildType(args.build_type)
114 if args.build_directory:
115 constants.SetBuildDirectory(args.build_directory)
116 if args.output_directory:
117 constants.SetOutputDirectory(args.output_directory)
118 if args.adb_path:
119 constants.SetAdbPath(args.adb_path)
120 # Some things such as Forwarder require ADB to be in the environment path.
121 adb_dir = os.path.dirname(constants.GetAdbPath())
122 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
123 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
126 def AddRemoteDeviceOptions(parser):
127 group = parser.add_argument_group('Remote Device Options')
129 group.add_argument('--trigger',
130 help=('Only triggers the test if set. Stores test_run_id '
131 'in given file path. '))
132 group.add_argument('--collect',
133 help=('Only collects the test results if set. '
134 'Gets test_run_id from given file path.'))
135 group.add_argument('--remote-device', action='append',
136 help='Device type to run test on.')
137 group.add_argument('--results-path',
138 help='File path to download results to.')
139 group.add_argument('--api-protocol',
140 help='HTTP protocol to use. (http or https)')
141 group.add_argument('--api-address',
142 help='Address to send HTTP requests.')
143 group.add_argument('--api-port',
144 help='Port to send HTTP requests to.')
145 group.add_argument('--runner-type',
146 help='Type of test to run as.')
147 group.add_argument('--runner-package',
148 help='Package name of test.')
149 group.add_argument('--device-type',
150 choices=constants.VALID_DEVICE_TYPES,
151 help=('Type of device to run on. iOS or android'))
152 group.add_argument('--device-oem', action='append',
153 help='Device OEM to run on.')
154 group.add_argument('--remote-device-file',
155 help=('File with JSON to select remote device. '
156 'Overrides all other flags.'))
157 group.add_argument('--remote-device-timeout', type=int,
158 help='Times to retry finding remote device')
159 group.add_argument('--network-config', type=int,
160 help='Integer that specifies the network environment '
161 'that the tests will be run in.')
163 device_os_group = group.add_mutually_exclusive_group()
164 device_os_group.add_argument('--remote-device-minimum-os',
165 help='Minimum OS on device.')
166 device_os_group.add_argument('--remote-device-os', action='append',
167 help='OS to have on the device.')
169 api_secret_group = group.add_mutually_exclusive_group()
170 api_secret_group.add_argument('--api-secret', default='',
171 help='API secret for remote devices.')
172 api_secret_group.add_argument('--api-secret-file', default='',
173 help='Path to file that contains API secret.')
175 api_key_group = group.add_mutually_exclusive_group()
176 api_key_group.add_argument('--api-key', default='',
177 help='API key for remote devices.')
178 api_key_group.add_argument('--api-key-file', default='',
179 help='Path to file that contains API key.')
182 def AddDeviceOptions(parser):
183 """Adds device options to |parser|."""
184 group = parser.add_argument_group(title='Device Options')
185 group.add_argument('--tool',
186 dest='tool',
187 help=('Run the test under a tool '
188 '(use --tool help to list them)'))
189 group.add_argument('-d', '--device', dest='test_device',
190 help=('Target device for the test suite '
191 'to run on.'))
192 group.add_argument('--blacklist-file', help='Device blacklist file.')
195 def AddGTestOptions(parser):
196 """Adds gtest options to |parser|."""
198 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
199 + gtest_config.EXPERIMENTAL_TEST_SUITES)
201 group = parser.add_argument_group('GTest Options')
202 group.add_argument('-s', '--suite', dest='suite_name',
203 nargs='+', metavar='SUITE_NAME', required=True,
204 help=('Executable name of the test suite to run. '
205 'Available suites include (but are not limited to): '
206 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
207 group.add_argument('--gtest_also_run_disabled_tests',
208 '--gtest-also-run-disabled-tests',
209 dest='run_disabled', action='store_true',
210 help='Also run disabled tests if applicable.')
211 group.add_argument('-a', '--test-arguments', dest='test_arguments',
212 default='',
213 help='Additional arguments to pass to the test.')
214 group.add_argument('-t', dest='timeout', type=int, default=60,
215 help='Timeout to wait for each test '
216 '(default: %(default)s).')
217 group.add_argument('--isolate_file_path',
218 '--isolate-file-path',
219 dest='isolate_file_path',
220 help='.isolate file path to override the default '
221 'path')
222 group.add_argument('--app-data-file', action='append', dest='app_data_files',
223 help='A file path relative to the app data directory '
224 'that should be saved to the host.')
225 group.add_argument('--app-data-file-dir',
226 help='Host directory to which app data files will be'
227 ' saved. Used with --app-data-file.')
228 group.add_argument('--delete-stale-data', dest='delete_stale_data',
229 action='store_true',
230 help='Delete stale test data on the device.')
232 filter_group = group.add_mutually_exclusive_group()
233 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
234 dest='test_filter',
235 help='googletest-style filter string.')
236 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
237 help='Path to file that contains googletest-style '
238 'filter strings. (Lines will be joined with '
239 '":" to create a single filter string.)')
241 AddDeviceOptions(parser)
242 AddCommonOptions(parser)
243 AddRemoteDeviceOptions(parser)
246 def AddLinkerTestOptions(parser):
247 group = parser.add_argument_group('Linker Test Options')
248 group.add_argument('-f', '--gtest-filter', dest='test_filter',
249 help='googletest-style filter string.')
250 AddCommonOptions(parser)
251 AddDeviceOptions(parser)
254 def AddJavaTestOptions(argument_group):
255 """Adds the Java test options to |option_parser|."""
257 argument_group.add_argument(
258 '-f', '--test-filter', dest='test_filter',
259 help=('Test filter (if not fully qualified, will run all matches).'))
260 argument_group.add_argument(
261 '-A', '--annotation', dest='annotation_str',
262 help=('Comma-separated list of annotations. Run only tests with any of '
263 'the given annotations. An annotation can be either a key or a '
264 'key-values pair. A test that has no annotation is considered '
265 '"SmallTest".'))
266 argument_group.add_argument(
267 '-E', '--exclude-annotation', dest='exclude_annotation_str',
268 help=('Comma-separated list of annotations. Exclude tests with these '
269 'annotations.'))
270 argument_group.add_argument(
271 '--screenshot', dest='screenshot_failures', action='store_true',
272 help='Capture screenshots of test failures')
273 argument_group.add_argument(
274 '--save-perf-json', action='store_true',
275 help='Saves the JSON file for each UI Perf test.')
276 argument_group.add_argument(
277 '--official-build', action='store_true', help='Run official build tests.')
278 argument_group.add_argument(
279 '--test_data', '--test-data', action='append', default=[],
280 help=('Each instance defines a directory of test data that should be '
281 'copied to the target(s) before running the tests. The argument '
282 'should be of the form <target>:<source>, <target> is relative to '
283 'the device data directory, and <source> is relative to the '
284 'chromium build directory.'))
285 argument_group.add_argument(
286 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
287 default=True, help='Removes the dalvik.vm.enableassertions property')
291 def ProcessJavaTestOptions(args):
292 """Processes options/arguments and populates |options| with defaults."""
294 # TODO(jbudorick): Handle most of this function in argparse.
295 if args.annotation_str:
296 args.annotations = args.annotation_str.split(',')
297 elif args.test_filter:
298 args.annotations = []
299 else:
300 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
301 'EnormousTest', 'IntegrationTest']
303 if args.exclude_annotation_str:
304 args.exclude_annotations = args.exclude_annotation_str.split(',')
305 else:
306 args.exclude_annotations = []
309 def AddInstrumentationTestOptions(parser):
310 """Adds Instrumentation test options to |parser|."""
312 parser.usage = '%(prog)s [options]'
314 group = parser.add_argument_group('Instrumentation Test Options')
315 AddJavaTestOptions(group)
317 java_or_python_group = group.add_mutually_exclusive_group()
318 java_or_python_group.add_argument(
319 '-j', '--java-only', action='store_false',
320 dest='run_python_tests', default=True, help='Run only the Java tests.')
321 java_or_python_group.add_argument(
322 '-p', '--python-only', action='store_false',
323 dest='run_java_tests', default=True,
324 help='Run only the host-driven tests.')
326 group.add_argument('--host-driven-root',
327 help='Root of the host-driven tests.')
328 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
329 action='store_true',
330 help='Wait for debugger.')
331 group.add_argument('--apk-under-test', dest='apk_under_test',
332 help=('the name of the apk under test.'))
333 group.add_argument('--test-apk', dest='test_apk', required=True,
334 help=('The name of the apk containing the tests '
335 '(without the .apk extension; '
336 'e.g. "ContentShellTest").'))
337 group.add_argument('--coverage-dir',
338 help=('Directory in which to place all generated '
339 'EMMA coverage files.'))
340 group.add_argument('--device-flags', dest='device_flags', default='',
341 help='The relative filepath to a file containing '
342 'command-line flags to set on the device')
343 group.add_argument('--device-flags-file', default='',
344 help='The relative filepath to a file containing '
345 'command-line flags to set on the device')
346 group.add_argument('--isolate_file_path',
347 '--isolate-file-path',
348 dest='isolate_file_path',
349 help='.isolate file path to override the default '
350 'path')
351 group.add_argument('--delete-stale-data', dest='delete_stale_data',
352 action='store_true',
353 help='Delete stale test data on the device.')
355 AddCommonOptions(parser)
356 AddDeviceOptions(parser)
357 AddRemoteDeviceOptions(parser)
360 def ProcessInstrumentationOptions(args):
361 """Processes options/arguments and populate |options| with defaults.
363 Args:
364 args: argparse.Namespace object.
366 Returns:
367 An InstrumentationOptions named tuple which contains all options relevant to
368 instrumentation tests.
371 ProcessJavaTestOptions(args)
373 if not args.host_driven_root:
374 args.run_python_tests = False
376 args.test_apk_path = os.path.join(
377 constants.GetOutDirectory(),
378 constants.SDK_BUILD_APKS_DIR,
379 '%s.apk' % args.test_apk)
380 args.test_apk_jar_path = os.path.join(
381 constants.GetOutDirectory(),
382 constants.SDK_BUILD_TEST_JAVALIB_DIR,
383 '%s.jar' % args.test_apk)
384 args.test_support_apk_path = '%sSupport%s' % (
385 os.path.splitext(args.test_apk_path))
387 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
389 # TODO(jbudorick): Get rid of InstrumentationOptions.
390 return instrumentation_test_options.InstrumentationOptions(
391 args.tool,
392 args.annotations,
393 args.exclude_annotations,
394 args.test_filter,
395 args.test_data,
396 args.save_perf_json,
397 args.screenshot_failures,
398 args.wait_for_debugger,
399 args.coverage_dir,
400 args.test_apk,
401 args.test_apk_path,
402 args.test_apk_jar_path,
403 args.test_runner,
404 args.test_support_apk_path,
405 args.device_flags,
406 args.isolate_file_path,
407 args.set_asserts,
408 args.delete_stale_data
412 def AddUIAutomatorTestOptions(parser):
413 """Adds UI Automator test options to |parser|."""
415 group = parser.add_argument_group('UIAutomator Test Options')
416 AddJavaTestOptions(group)
417 group.add_argument(
418 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
419 metavar='PACKAGE', help='Package under test.')
420 group.add_argument(
421 '--test-jar', dest='test_jar', required=True,
422 help=('The name of the dexed jar containing the tests (without the '
423 '.dex.jar extension). Alternatively, this can be a full path '
424 'to the jar.'))
426 AddCommonOptions(parser)
427 AddDeviceOptions(parser)
430 def ProcessUIAutomatorOptions(args):
431 """Processes UIAutomator options/arguments.
433 Args:
434 args: argparse.Namespace object.
436 Returns:
437 A UIAutomatorOptions named tuple which contains all options relevant to
438 uiautomator tests.
441 ProcessJavaTestOptions(args)
443 if os.path.exists(args.test_jar):
444 # The dexed JAR is fully qualified, assume the info JAR lives along side.
445 args.uiautomator_jar = args.test_jar
446 else:
447 args.uiautomator_jar = os.path.join(
448 constants.GetOutDirectory(),
449 constants.SDK_BUILD_JAVALIB_DIR,
450 '%s.dex.jar' % args.test_jar)
451 args.uiautomator_info_jar = (
452 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
453 '_java.jar')
455 return uiautomator_test_options.UIAutomatorOptions(
456 args.tool,
457 args.annotations,
458 args.exclude_annotations,
459 args.test_filter,
460 args.test_data,
461 args.save_perf_json,
462 args.screenshot_failures,
463 args.uiautomator_jar,
464 args.uiautomator_info_jar,
465 args.package,
466 args.set_asserts)
469 def AddJUnitTestOptions(parser):
470 """Adds junit test options to |parser|."""
472 group = parser.add_argument_group('JUnit Test Options')
473 group.add_argument(
474 '-s', '--test-suite', dest='test_suite', required=True,
475 help=('JUnit test suite to run.'))
476 group.add_argument(
477 '-f', '--test-filter', dest='test_filter',
478 help='Filters tests googletest-style.')
479 group.add_argument(
480 '--package-filter', dest='package_filter',
481 help='Filters tests by package.')
482 group.add_argument(
483 '--runner-filter', dest='runner_filter',
484 help='Filters tests by runner class. Must be fully qualified.')
485 group.add_argument(
486 '--sdk-version', dest='sdk_version', type=int,
487 help='The Android SDK version.')
488 AddCommonOptions(parser)
491 def AddMonkeyTestOptions(parser):
492 """Adds monkey test options to |parser|."""
494 group = parser.add_argument_group('Monkey Test Options')
495 group.add_argument(
496 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
497 metavar='PACKAGE', help='Package under test.')
498 group.add_argument(
499 '--event-count', default=10000, type=int,
500 help='Number of events to generate (default: %(default)s).')
501 group.add_argument(
502 '--category', default='',
503 help='A list of allowed categories.')
504 group.add_argument(
505 '--throttle', default=100, type=int,
506 help='Delay between events (ms) (default: %(default)s). ')
507 group.add_argument(
508 '--seed', type=int,
509 help=('Seed value for pseudo-random generator. Same seed value generates '
510 'the same sequence of events. Seed is randomized by default.'))
511 group.add_argument(
512 '--extra-args', default='',
513 help=('String of other args to pass to the command verbatim.'))
515 AddCommonOptions(parser)
516 AddDeviceOptions(parser)
518 def ProcessMonkeyTestOptions(args):
519 """Processes all monkey test options.
521 Args:
522 args: argparse.Namespace object.
524 Returns:
525 A MonkeyOptions named tuple which contains all options relevant to
526 monkey tests.
528 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
529 category = args.category
530 if category:
531 category = args.category.split(',')
533 # TODO(jbudorick): Get rid of MonkeyOptions.
534 return monkey_test_options.MonkeyOptions(
535 args.verbose_count,
536 args.package,
537 args.event_count,
538 category,
539 args.throttle,
540 args.seed,
541 args.extra_args)
543 def AddUirobotTestOptions(parser):
544 """Adds uirobot test options to |option_parser|."""
545 group = parser.add_argument_group('Uirobot Test Options')
547 group.add_argument('--app-under-test', required=True,
548 help='APK to run tests on.')
549 group.add_argument(
550 '--minutes', default=5, type=int,
551 help='Number of minutes to run uirobot test [default: %(default)s].')
553 AddCommonOptions(parser)
554 AddDeviceOptions(parser)
555 AddRemoteDeviceOptions(parser)
557 def AddPerfTestOptions(parser):
558 """Adds perf test options to |parser|."""
560 group = parser.add_argument_group('Perf Test Options')
562 class SingleStepAction(argparse.Action):
563 def __call__(self, parser, namespace, values, option_string=None):
564 if values and not namespace.single_step:
565 parser.error('single step command provided, '
566 'but --single-step not specified.')
567 elif namespace.single_step and not values:
568 parser.error('--single-step specified, '
569 'but no single step command provided.')
570 setattr(namespace, self.dest, values)
572 step_group = group.add_mutually_exclusive_group(required=True)
573 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
574 # This requires removing "--" from client calls.
575 step_group.add_argument(
576 '--single-step', action='store_true',
577 help='Execute the given command with retries, but only print the result '
578 'for the "most successful" round.')
579 step_group.add_argument(
580 '--steps',
581 help='JSON file containing the list of commands to run.')
582 step_group.add_argument(
583 '--print-step',
584 help='The name of a previously executed perf step to print.')
586 group.add_argument(
587 '--output-json-list',
588 help='Write a simple list of names from --steps into the given file.')
589 group.add_argument(
590 '--collect-chartjson-data',
591 action='store_true',
592 help='Cache the chartjson output from each step for later use.')
593 group.add_argument(
594 '--output-chartjson-data',
595 default='',
596 help='Write out chartjson into the given file.')
597 group.add_argument(
598 '--flaky-steps',
599 help=('A JSON file containing steps that are flaky '
600 'and will have its exit code ignored.'))
601 group.add_argument(
602 '--no-timeout', action='store_true',
603 help=('Do not impose a timeout. Each perf step is responsible for '
604 'implementing the timeout logic.'))
605 group.add_argument(
606 '-f', '--test-filter',
607 help=('Test filter (will match against the names listed in --steps).'))
608 group.add_argument(
609 '--dry-run', action='store_true',
610 help='Just print the steps without executing.')
611 # Uses 0.1 degrees C because that's what Android does.
612 group.add_argument(
613 '--max-battery-temp', type=int,
614 help='Only start tests when the battery is at or below the given '
615 'temperature (0.1 C)')
616 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
617 help='If --single-step is specified, the command to run.')
618 group.add_argument('--min-battery-level', type=int,
619 help='Only starts tests when the battery is charged above '
620 'given level.')
621 AddCommonOptions(parser)
622 AddDeviceOptions(parser)
625 def ProcessPerfTestOptions(args):
626 """Processes all perf test options.
628 Args:
629 args: argparse.Namespace object.
631 Returns:
632 A PerfOptions named tuple which contains all options relevant to
633 perf tests.
635 # TODO(jbudorick): Move single_step handling down into the perf tests.
636 if args.single_step:
637 args.single_step = ' '.join(args.single_step_command)
638 # TODO(jbudorick): Get rid of PerfOptions.
639 return perf_test_options.PerfOptions(
640 args.steps, args.flaky_steps, args.output_json_list,
641 args.print_step, args.no_timeout, args.test_filter,
642 args.dry_run, args.single_step, args.collect_chartjson_data,
643 args.output_chartjson_data, args.max_battery_temp, args.min_battery_level)
646 def AddPythonTestOptions(parser):
647 group = parser.add_argument_group('Python Test Options')
648 group.add_argument(
649 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
650 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
651 help='Name of the test suite to run.')
652 AddCommonOptions(parser)
655 def _RunGTests(args, devices):
656 """Subcommand of RunTestsCommands which runs gtests."""
657 exit_code = 0
658 for suite_name in args.suite_name:
659 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
660 # into the gtest code.
661 gtest_options = gtest_test_options.GTestOptions(
662 args.tool,
663 args.test_filter,
664 args.run_disabled,
665 args.test_arguments,
666 args.timeout,
667 args.isolate_file_path,
668 suite_name,
669 args.app_data_files,
670 args.app_data_file_dir,
671 args.delete_stale_data)
672 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
674 results, test_exit_code = test_dispatcher.RunTests(
675 tests, runner_factory, devices, shard=True, test_timeout=None,
676 num_retries=args.num_retries)
678 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
679 exit_code = test_exit_code
681 report_results.LogFull(
682 results=results,
683 test_type='Unit test',
684 test_package=suite_name,
685 flakiness_server=args.flakiness_dashboard_server)
687 if args.json_results_file:
688 json_results.GenerateJsonResultsFile(results, args.json_results_file)
690 return exit_code
693 def _RunLinkerTests(args, devices):
694 """Subcommand of RunTestsCommands which runs linker tests."""
695 runner_factory, tests = linker_setup.Setup(args, devices)
697 results, exit_code = test_dispatcher.RunTests(
698 tests, runner_factory, devices, shard=True, test_timeout=60,
699 num_retries=args.num_retries)
701 report_results.LogFull(
702 results=results,
703 test_type='Linker test',
704 test_package='ChromiumLinkerTest')
706 if args.json_results_file:
707 json_results.GenerateJsonResultsFile(results, args.json_results_file)
709 return exit_code
712 def _RunInstrumentationTests(args, devices):
713 """Subcommand of RunTestsCommands which runs instrumentation tests."""
714 logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))
716 instrumentation_options = ProcessInstrumentationOptions(args)
718 if len(devices) > 1 and args.wait_for_debugger:
719 logging.warning('Debugger can not be sharded, using first available device')
720 devices = devices[:1]
722 results = base_test_result.TestRunResults()
723 exit_code = 0
725 if args.run_java_tests:
726 runner_factory, tests = instrumentation_setup.Setup(
727 instrumentation_options, devices)
729 test_results, exit_code = test_dispatcher.RunTests(
730 tests, runner_factory, devices, shard=True, test_timeout=None,
731 num_retries=args.num_retries)
733 results.AddTestRunResults(test_results)
735 if args.run_python_tests:
736 runner_factory, tests = host_driven_setup.InstrumentationSetup(
737 args.host_driven_root, args.official_build,
738 instrumentation_options)
740 if tests:
741 test_results, test_exit_code = test_dispatcher.RunTests(
742 tests, runner_factory, devices, shard=True, test_timeout=None,
743 num_retries=args.num_retries)
745 results.AddTestRunResults(test_results)
747 # Only allow exit code escalation
748 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
749 exit_code = test_exit_code
751 if args.device_flags:
752 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
753 args.device_flags)
755 report_results.LogFull(
756 results=results,
757 test_type='Instrumentation',
758 test_package=os.path.basename(args.test_apk),
759 annotation=args.annotations,
760 flakiness_server=args.flakiness_dashboard_server)
762 if args.json_results_file:
763 json_results.GenerateJsonResultsFile(results, args.json_results_file)
765 return exit_code
768 def _RunUIAutomatorTests(args, devices):
769 """Subcommand of RunTestsCommands which runs uiautomator tests."""
770 uiautomator_options = ProcessUIAutomatorOptions(args)
772 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices)
774 results, exit_code = test_dispatcher.RunTests(
775 tests, runner_factory, devices, shard=True, test_timeout=None,
776 num_retries=args.num_retries)
778 report_results.LogFull(
779 results=results,
780 test_type='UIAutomator',
781 test_package=os.path.basename(args.test_jar),
782 annotation=args.annotations,
783 flakiness_server=args.flakiness_dashboard_server)
785 if args.json_results_file:
786 json_results.GenerateJsonResultsFile(results, args.json_results_file)
788 return exit_code
791 def _RunJUnitTests(args):
792 """Subcommand of RunTestsCommand which runs junit tests."""
793 runner_factory, tests = junit_setup.Setup(args)
794 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
796 report_results.LogFull(
797 results=results,
798 test_type='JUnit',
799 test_package=args.test_suite)
801 if args.json_results_file:
802 json_results.GenerateJsonResultsFile(results, args.json_results_file)
804 return exit_code
807 def _RunMonkeyTests(args, devices):
808 """Subcommand of RunTestsCommands which runs monkey tests."""
809 monkey_options = ProcessMonkeyTestOptions(args)
811 runner_factory, tests = monkey_setup.Setup(monkey_options)
813 results, exit_code = test_dispatcher.RunTests(
814 tests, runner_factory, devices, shard=False, test_timeout=None,
815 num_retries=args.num_retries)
817 report_results.LogFull(
818 results=results,
819 test_type='Monkey',
820 test_package='Monkey')
822 if args.json_results_file:
823 json_results.GenerateJsonResultsFile(results, args.json_results_file)
825 return exit_code
828 def _RunPerfTests(args, active_devices):
829 """Subcommand of RunTestsCommands which runs perf tests."""
830 perf_options = ProcessPerfTestOptions(args)
832 # Just save a simple json with a list of test names.
833 if perf_options.output_json_list:
834 return perf_test_runner.OutputJsonList(
835 perf_options.steps, perf_options.output_json_list)
837 # Just print the results from a single previously executed step.
838 if perf_options.print_step:
839 return perf_test_runner.PrintTestOutput(
840 perf_options.print_step, perf_options.output_chartjson_data)
842 runner_factory, tests, devices = perf_setup.Setup(
843 perf_options, active_devices)
845 # shard=False means that each device will get the full list of tests
846 # and then each one will decide their own affinity.
847 # shard=True means each device will pop the next test available from a queue,
848 # which increases throughput but have no affinity.
849 results, _ = test_dispatcher.RunTests(
850 tests, runner_factory, devices, shard=False, test_timeout=None,
851 num_retries=args.num_retries)
853 report_results.LogFull(
854 results=results,
855 test_type='Perf',
856 test_package='Perf')
858 if args.json_results_file:
859 json_results.GenerateJsonResultsFile(results, args.json_results_file)
861 if perf_options.single_step:
862 return perf_test_runner.PrintTestOutput('single_step')
864 perf_test_runner.PrintSummary(tests)
866 # Always return 0 on the sharding stage. Individual tests exit_code
867 # will be returned on the print_step stage.
868 return 0
871 def _RunPythonTests(args):
872 """Subcommand of RunTestsCommand which runs python unit tests."""
873 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
874 suite_path = suite_vars['path']
875 suite_test_modules = suite_vars['test_modules']
877 sys.path = [suite_path] + sys.path
878 try:
879 suite = unittest.TestSuite()
880 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
881 for m in suite_test_modules)
882 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
883 return 0 if runner.run(suite).wasSuccessful() else 1
884 finally:
885 sys.path = sys.path[1:]
888 def _GetAttachedDevices(blacklist_file, test_device):
889 """Get all attached devices.
891 Args:
892 test_device: Name of a specific device to use.
894 Returns:
895 A list of attached devices.
897 if not blacklist_file:
898 # TODO(jbudorick): Remove this once bots pass the blacklist file.
899 blacklist_file = device_blacklist.BLACKLIST_JSON
900 logging.warning('Using default device blacklist %s',
901 device_blacklist.BLACKLIST_JSON)
903 blacklist = device_blacklist.Blacklist(blacklist_file)
904 attached_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
905 if test_device:
906 test_device = [d for d in attached_devices if d == test_device]
907 if not test_device:
908 raise device_errors.DeviceUnreachableError(
909 'Did not find device %s among attached device. Attached devices: %s'
910 % (test_device, ', '.join(attached_devices)))
911 return test_device
913 else:
914 if not attached_devices:
915 raise device_errors.NoDevicesError()
916 return sorted(attached_devices)
919 def RunTestsCommand(args, parser): # pylint: disable=too-many-return-statements
920 """Checks test type and dispatches to the appropriate function.
922 Args:
923 args: argparse.Namespace object.
924 parser: argparse.ArgumentParser object.
926 Returns:
927 Integer indicated exit code.
929 Raises:
930 Exception: Unknown command name passed in, or an exception from an
931 individual test runner.
933 command = args.command
935 ProcessCommonOptions(args)
937 if args.enable_platform_mode:
938 return RunTestsInPlatformMode(args, parser)
940 if command in constants.LOCAL_MACHINE_TESTS:
941 devices = []
942 else:
943 devices = _GetAttachedDevices(args.blacklist_file, args.test_device)
945 forwarder.Forwarder.RemoveHostLog()
946 if not ports.ResetTestServerPortAllocation():
947 raise Exception('Failed to reset test server port.')
949 if command == 'gtest':
950 if args.suite_name[0] in gtest_test_instance.BROWSER_TEST_SUITES:
951 return RunTestsInPlatformMode(args, parser)
952 return _RunGTests(args, devices)
953 elif command == 'linker':
954 return _RunLinkerTests(args, devices)
955 elif command == 'instrumentation':
956 return _RunInstrumentationTests(args, devices)
957 elif command == 'uiautomator':
958 return _RunUIAutomatorTests(args, devices)
959 elif command == 'junit':
960 return _RunJUnitTests(args)
961 elif command == 'monkey':
962 return _RunMonkeyTests(args, devices)
963 elif command == 'perf':
964 return _RunPerfTests(args, devices)
965 elif command == 'python':
966 return _RunPythonTests(args)
967 else:
968 raise Exception('Unknown test type.')
971 _SUPPORTED_IN_PLATFORM_MODE = [
972 # TODO(jbudorick): Add support for more test types.
973 'gtest',
974 'instrumentation',
975 'uirobot',
979 def RunTestsInPlatformMode(args, parser):
981 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
982 parser.error('%s is not yet supported in platform mode' % args.command)
984 with environment_factory.CreateEnvironment(args, parser.error) as env:
985 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
986 with test_run_factory.CreateTestRun(
987 args, env, test, parser.error) as test_run:
988 results = test_run.RunTests()
990 if args.environment == 'remote_device' and args.trigger:
991 return 0 # Not returning results, only triggering.
993 report_results.LogFull(
994 results=results,
995 test_type=test.TestType(),
996 test_package=test_run.TestPackage(),
997 annotation=getattr(args, 'annotations', None),
998 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
1000 if args.json_results_file:
1001 json_results.GenerateJsonResultsFile(
1002 results, args.json_results_file)
1004 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
1007 CommandConfigTuple = collections.namedtuple(
1008 'CommandConfigTuple',
1009 ['add_options_func', 'help_txt'])
1010 VALID_COMMANDS = {
1011 'gtest': CommandConfigTuple(
1012 AddGTestOptions,
1013 'googletest-based C++ tests'),
1014 'instrumentation': CommandConfigTuple(
1015 AddInstrumentationTestOptions,
1016 'InstrumentationTestCase-based Java tests'),
1017 'uiautomator': CommandConfigTuple(
1018 AddUIAutomatorTestOptions,
1019 "Tests that run via Android's uiautomator command"),
1020 'junit': CommandConfigTuple(
1021 AddJUnitTestOptions,
1022 'JUnit4-based Java tests'),
1023 'monkey': CommandConfigTuple(
1024 AddMonkeyTestOptions,
1025 "Tests based on Android's monkey"),
1026 'perf': CommandConfigTuple(
1027 AddPerfTestOptions,
1028 'Performance tests'),
1029 'python': CommandConfigTuple(
1030 AddPythonTestOptions,
1031 'Python tests based on unittest.TestCase'),
1032 'linker': CommandConfigTuple(
1033 AddLinkerTestOptions,
1034 'Linker tests'),
1035 'uirobot': CommandConfigTuple(
1036 AddUirobotTestOptions,
1037 'Uirobot test'),
1041 def DumpThreadStacks(_signal, _frame):
1042 for thread in threading.enumerate():
1043 reraiser_thread.LogThreadStack(thread)
1046 def main():
1047 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1049 parser = argparse.ArgumentParser()
1050 command_parsers = parser.add_subparsers(title='test types',
1051 dest='command')
1053 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1054 key=lambda x: x[0]):
1055 subparser = command_parsers.add_parser(
1056 test_type, usage='%(prog)s [options]', help=config.help_txt)
1057 config.add_options_func(subparser)
1059 args = parser.parse_args()
1061 try:
1062 return RunTestsCommand(args, parser)
1063 except base_error.BaseError as e:
1064 logging.exception('Error occurred.')
1065 if e.is_infra_error:
1066 return constants.INFRA_EXIT_CODE
1067 return constants.ERROR_EXIT_CODE
1068 except: # pylint: disable=W0702
1069 logging.exception('Unrecognized error occurred.')
1070 return constants.ERROR_EXIT_CODE
1073 if __name__ == '__main__':
1074 sys.exit(main())