Don't show supervised user as "already on this device" while they're being imported.
[chromium-blink-merge.git] / build / android / test_runner.py
blobb71f805cb136af0d447443c62d50ba02fd10f187
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from pylib import constants
20 from pylib import forwarder
21 from pylib import ports
22 from pylib.base import base_test_result
23 from pylib.base import environment_factory
24 from pylib.base import test_dispatcher
25 from pylib.base import test_instance_factory
26 from pylib.base import test_run_factory
27 from pylib.device import device_errors
28 from pylib.device import device_utils
29 from pylib.gtest import gtest_config
30 from pylib.gtest import setup as gtest_setup
31 from pylib.gtest import test_options as gtest_test_options
32 from pylib.linker import setup as linker_setup
33 from pylib.host_driven import setup as host_driven_setup
34 from pylib.instrumentation import setup as instrumentation_setup
35 from pylib.instrumentation import test_options as instrumentation_test_options
36 from pylib.junit import setup as junit_setup
37 from pylib.junit import test_dispatcher as junit_dispatcher
38 from pylib.monkey import setup as monkey_setup
39 from pylib.monkey import test_options as monkey_test_options
40 from pylib.perf import setup as perf_setup
41 from pylib.perf import test_options as perf_test_options
42 from pylib.perf import test_runner as perf_test_runner
43 from pylib.results import json_results
44 from pylib.results import report_results
45 from pylib.uiautomator import setup as uiautomator_setup
46 from pylib.uiautomator import test_options as uiautomator_test_options
47 from pylib.utils import apk_helper
48 from pylib.utils import base_error
49 from pylib.utils import reraiser_thread
50 from pylib.utils import run_tests_helper
53 def AddCommonOptions(parser):
54 """Adds all common options to |parser|."""
56 group = parser.add_argument_group('Common Options')
58 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
60 debug_or_release_group = group.add_mutually_exclusive_group()
61 debug_or_release_group.add_argument(
62 '--debug', action='store_const', const='Debug', dest='build_type',
63 default=default_build_type,
64 help=('If set, run test suites under out/Debug. '
65 'Default is env var BUILDTYPE or Debug.'))
66 debug_or_release_group.add_argument(
67 '--release', action='store_const', const='Release', dest='build_type',
68 help=('If set, run test suites under out/Release. '
69 'Default is env var BUILDTYPE or Debug.'))
71 group.add_argument('--build-directory', dest='build_directory',
72 help=('Path to the directory in which build files are'
73 ' located (should not include build type)'))
74 group.add_argument('--output-directory', dest='output_directory',
75 help=('Path to the directory in which build files are'
76 ' located (must include build type). This will take'
77 ' precedence over --debug, --release and'
78 ' --build-directory'))
79 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
80 help=('Number of retries for a test before '
81 'giving up (default: %(default)s).'))
82 group.add_argument('-v',
83 '--verbose',
84 dest='verbose_count',
85 default=0,
86 action='count',
87 help='Verbose level (multiple times for more)')
88 group.add_argument('--flakiness-dashboard-server',
89 dest='flakiness_dashboard_server',
90 help=('Address of the server that is hosting the '
91 'Chrome for Android flakiness dashboard.'))
92 group.add_argument('--enable-platform-mode', action='store_true',
93 help=('Run the test scripts in platform mode, which '
94 'conceptually separates the test runner from the '
95 '"device" (local or remote, real or emulated) on '
96 'which the tests are running. [experimental]'))
97 group.add_argument('-e', '--environment', default='local',
98 choices=constants.VALID_ENVIRONMENTS,
99 help='Test environment to run in (default: %(default)s).')
100 group.add_argument('--adb-path',
101 help=('Specify the absolute path of the adb binary that '
102 'should be used.'))
103 group.add_argument('--json-results-file', dest='json_results_file',
104 help='If set, will dump results in JSON form '
105 'to specified file.')
107 def ProcessCommonOptions(args):
108 """Processes and handles all common options."""
109 run_tests_helper.SetLogLevel(args.verbose_count)
110 constants.SetBuildType(args.build_type)
111 if args.build_directory:
112 constants.SetBuildDirectory(args.build_directory)
113 if args.output_directory:
114 constants.SetOutputDirectory(args.output_directory)
115 if args.adb_path:
116 constants.SetAdbPath(args.adb_path)
117 # Some things such as Forwarder require ADB to be in the environment path.
118 adb_dir = os.path.dirname(constants.GetAdbPath())
119 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
120 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
123 def AddRemoteDeviceOptions(parser):
124 group = parser.add_argument_group('Remote Device Options')
126 group.add_argument('--trigger',
127 help=('Only triggers the test if set. Stores test_run_id '
128 'in given file path. '))
129 group.add_argument('--collect',
130 help=('Only collects the test results if set. '
131 'Gets test_run_id from given file path.'))
132 group.add_argument('--remote-device', action='append',
133 help='Device type to run test on.')
134 group.add_argument('--results-path',
135 help='File path to download results to.')
136 group.add_argument('--api-protocol',
137 help='HTTP protocol to use. (http or https)')
138 group.add_argument('--api-address',
139 help='Address to send HTTP requests.')
140 group.add_argument('--api-port',
141 help='Port to send HTTP requests to.')
142 group.add_argument('--runner-type',
143 help='Type of test to run as.')
144 group.add_argument('--runner-package',
145 help='Package name of test.')
146 group.add_argument('--device-type',
147 choices=constants.VALID_DEVICE_TYPES,
148 help=('Type of device to run on. iOS or android'))
149 group.add_argument('--device-oem', action='append',
150 help='Device OEM to run on.')
151 group.add_argument('--remote-device-file',
152 help=('File with JSON to select remote device. '
153 'Overrides all other flags.'))
154 group.add_argument('--remote-device-timeout', type=int,
155 help='Times to retry finding remote device')
156 group.add_argument('--network-config', type=int,
157 help='Integer that specifies the network environment '
158 'that the tests will be run in.')
160 device_os_group = group.add_mutually_exclusive_group()
161 device_os_group.add_argument('--remote-device-minimum-os',
162 help='Minimum OS on device.')
163 device_os_group.add_argument('--remote-device-os', action='append',
164 help='OS to have on the device.')
166 api_secret_group = group.add_mutually_exclusive_group()
167 api_secret_group.add_argument('--api-secret', default='',
168 help='API secret for remote devices.')
169 api_secret_group.add_argument('--api-secret-file', default='',
170 help='Path to file that contains API secret.')
172 api_key_group = group.add_mutually_exclusive_group()
173 api_key_group.add_argument('--api-key', default='',
174 help='API key for remote devices.')
175 api_key_group.add_argument('--api-key-file', default='',
176 help='Path to file that contains API key.')
179 def AddDeviceOptions(parser):
180 """Adds device options to |parser|."""
181 group = parser.add_argument_group(title='Device Options')
182 group.add_argument('--tool',
183 dest='tool',
184 help=('Run the test under a tool '
185 '(use --tool help to list them)'))
186 group.add_argument('-d', '--device', dest='test_device',
187 help=('Target device for the test suite '
188 'to run on.'))
191 def AddGTestOptions(parser):
192 """Adds gtest options to |parser|."""
194 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
195 + gtest_config.EXPERIMENTAL_TEST_SUITES)
197 group = parser.add_argument_group('GTest Options')
198 group.add_argument('-s', '--suite', dest='suite_name',
199 nargs='+', metavar='SUITE_NAME', required=True,
200 help=('Executable name of the test suite to run. '
201 'Available suites include (but are not limited to): '
202 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
203 group.add_argument('--gtest_also_run_disabled_tests',
204 '--gtest-also-run-disabled-tests',
205 dest='run_disabled', action='store_true',
206 help='Also run disabled tests if applicable.')
207 group.add_argument('-a', '--test-arguments', dest='test_arguments',
208 default='',
209 help='Additional arguments to pass to the test.')
210 group.add_argument('-t', dest='timeout', type=int, default=60,
211 help='Timeout to wait for each test '
212 '(default: %(default)s).')
213 group.add_argument('--isolate_file_path',
214 '--isolate-file-path',
215 dest='isolate_file_path',
216 help='.isolate file path to override the default '
217 'path')
219 filter_group = group.add_mutually_exclusive_group()
220 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
221 dest='test_filter',
222 help='googletest-style filter string.')
223 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
224 help='Path to file that contains googletest-style '
225 'filter strings. (Lines will be joined with '
226 '":" to create a single filter string.)')
228 AddDeviceOptions(parser)
229 AddCommonOptions(parser)
230 AddRemoteDeviceOptions(parser)
233 def AddLinkerTestOptions(parser):
234 group = parser.add_argument_group('Linker Test Options')
235 group.add_argument('-f', '--gtest-filter', dest='test_filter',
236 help='googletest-style filter string.')
237 AddCommonOptions(parser)
238 AddDeviceOptions(parser)
241 def AddJavaTestOptions(argument_group):
242 """Adds the Java test options to |option_parser|."""
244 argument_group.add_argument(
245 '-f', '--test-filter', dest='test_filter',
246 help=('Test filter (if not fully qualified, will run all matches).'))
247 argument_group.add_argument(
248 '-A', '--annotation', dest='annotation_str',
249 help=('Comma-separated list of annotations. Run only tests with any of '
250 'the given annotations. An annotation can be either a key or a '
251 'key-values pair. A test that has no annotation is considered '
252 '"SmallTest".'))
253 argument_group.add_argument(
254 '-E', '--exclude-annotation', dest='exclude_annotation_str',
255 help=('Comma-separated list of annotations. Exclude tests with these '
256 'annotations.'))
257 argument_group.add_argument(
258 '--screenshot', dest='screenshot_failures', action='store_true',
259 help='Capture screenshots of test failures')
260 argument_group.add_argument(
261 '--save-perf-json', action='store_true',
262 help='Saves the JSON file for each UI Perf test.')
263 argument_group.add_argument(
264 '--official-build', action='store_true', help='Run official build tests.')
265 argument_group.add_argument(
266 '--test_data', '--test-data', action='append', default=[],
267 help=('Each instance defines a directory of test data that should be '
268 'copied to the target(s) before running the tests. The argument '
269 'should be of the form <target>:<source>, <target> is relative to '
270 'the device data directory, and <source> is relative to the '
271 'chromium build directory.'))
272 argument_group.add_argument(
273 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
274 default=True, help='Removes the dalvik.vm.enableassertions property')
278 def ProcessJavaTestOptions(args):
279 """Processes options/arguments and populates |options| with defaults."""
281 # TODO(jbudorick): Handle most of this function in argparse.
282 if args.annotation_str:
283 args.annotations = args.annotation_str.split(',')
284 elif args.test_filter:
285 args.annotations = []
286 else:
287 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
288 'EnormousTest', 'IntegrationTest']
290 if args.exclude_annotation_str:
291 args.exclude_annotations = args.exclude_annotation_str.split(',')
292 else:
293 args.exclude_annotations = []
296 def AddInstrumentationTestOptions(parser):
297 """Adds Instrumentation test options to |parser|."""
299 parser.usage = '%(prog)s [options]'
301 group = parser.add_argument_group('Instrumentation Test Options')
302 AddJavaTestOptions(group)
304 java_or_python_group = group.add_mutually_exclusive_group()
305 java_or_python_group.add_argument(
306 '-j', '--java-only', action='store_false',
307 dest='run_python_tests', default=True, help='Run only the Java tests.')
308 java_or_python_group.add_argument(
309 '-p', '--python-only', action='store_false',
310 dest='run_java_tests', default=True,
311 help='Run only the host-driven tests.')
313 group.add_argument('--host-driven-root',
314 help='Root of the host-driven tests.')
315 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
316 action='store_true',
317 help='Wait for debugger.')
318 group.add_argument('--apk-under-test', dest='apk_under_test',
319 help=('the name of the apk under test.'))
320 group.add_argument('--test-apk', dest='test_apk', required=True,
321 help=('The name of the apk containing the tests '
322 '(without the .apk extension; '
323 'e.g. "ContentShellTest").'))
324 group.add_argument('--coverage-dir',
325 help=('Directory in which to place all generated '
326 'EMMA coverage files.'))
327 group.add_argument('--device-flags', dest='device_flags', default='',
328 help='The relative filepath to a file containing '
329 'command-line flags to set on the device')
330 group.add_argument('--device-flags-file', default='',
331 help='The relative filepath to a file containing '
332 'command-line flags to set on the device')
333 group.add_argument('--isolate_file_path',
334 '--isolate-file-path',
335 dest='isolate_file_path',
336 help='.isolate file path to override the default '
337 'path')
339 AddCommonOptions(parser)
340 AddDeviceOptions(parser)
341 AddRemoteDeviceOptions(parser)
344 def ProcessInstrumentationOptions(args):
345 """Processes options/arguments and populate |options| with defaults.
347 Args:
348 args: argparse.Namespace object.
350 Returns:
351 An InstrumentationOptions named tuple which contains all options relevant to
352 instrumentation tests.
355 ProcessJavaTestOptions(args)
357 if not args.host_driven_root:
358 args.run_python_tests = False
360 args.test_apk_path = os.path.join(
361 constants.GetOutDirectory(),
362 constants.SDK_BUILD_APKS_DIR,
363 '%s.apk' % args.test_apk)
364 args.test_apk_jar_path = os.path.join(
365 constants.GetOutDirectory(),
366 constants.SDK_BUILD_TEST_JAVALIB_DIR,
367 '%s.jar' % args.test_apk)
368 args.test_support_apk_path = '%sSupport%s' % (
369 os.path.splitext(args.test_apk_path))
371 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
373 # TODO(jbudorick): Get rid of InstrumentationOptions.
374 return instrumentation_test_options.InstrumentationOptions(
375 args.tool,
376 args.annotations,
377 args.exclude_annotations,
378 args.test_filter,
379 args.test_data,
380 args.save_perf_json,
381 args.screenshot_failures,
382 args.wait_for_debugger,
383 args.coverage_dir,
384 args.test_apk,
385 args.test_apk_path,
386 args.test_apk_jar_path,
387 args.test_runner,
388 args.test_support_apk_path,
389 args.device_flags,
390 args.isolate_file_path,
391 args.set_asserts
395 def AddUIAutomatorTestOptions(parser):
396 """Adds UI Automator test options to |parser|."""
398 group = parser.add_argument_group('UIAutomator Test Options')
399 AddJavaTestOptions(group)
400 group.add_argument(
401 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
402 metavar='PACKAGE', help='Package under test.')
403 group.add_argument(
404 '--test-jar', dest='test_jar', required=True,
405 help=('The name of the dexed jar containing the tests (without the '
406 '.dex.jar extension). Alternatively, this can be a full path '
407 'to the jar.'))
409 AddCommonOptions(parser)
410 AddDeviceOptions(parser)
413 def ProcessUIAutomatorOptions(args):
414 """Processes UIAutomator options/arguments.
416 Args:
417 args: argparse.Namespace object.
419 Returns:
420 A UIAutomatorOptions named tuple which contains all options relevant to
421 uiautomator tests.
424 ProcessJavaTestOptions(args)
426 if os.path.exists(args.test_jar):
427 # The dexed JAR is fully qualified, assume the info JAR lives along side.
428 args.uiautomator_jar = args.test_jar
429 else:
430 args.uiautomator_jar = os.path.join(
431 constants.GetOutDirectory(),
432 constants.SDK_BUILD_JAVALIB_DIR,
433 '%s.dex.jar' % args.test_jar)
434 args.uiautomator_info_jar = (
435 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
436 '_java.jar')
438 return uiautomator_test_options.UIAutomatorOptions(
439 args.tool,
440 args.annotations,
441 args.exclude_annotations,
442 args.test_filter,
443 args.test_data,
444 args.save_perf_json,
445 args.screenshot_failures,
446 args.uiautomator_jar,
447 args.uiautomator_info_jar,
448 args.package,
449 args.set_asserts)
452 def AddJUnitTestOptions(parser):
453 """Adds junit test options to |parser|."""
455 group = parser.add_argument_group('JUnit Test Options')
456 group.add_argument(
457 '-s', '--test-suite', dest='test_suite', required=True,
458 help=('JUnit test suite to run.'))
459 group.add_argument(
460 '-f', '--test-filter', dest='test_filter',
461 help='Filters tests googletest-style.')
462 group.add_argument(
463 '--package-filter', dest='package_filter',
464 help='Filters tests by package.')
465 group.add_argument(
466 '--runner-filter', dest='runner_filter',
467 help='Filters tests by runner class. Must be fully qualified.')
468 group.add_argument(
469 '--sdk-version', dest='sdk_version', type=int,
470 help='The Android SDK version.')
471 AddCommonOptions(parser)
474 def AddMonkeyTestOptions(parser):
475 """Adds monkey test options to |parser|."""
477 group = parser.add_argument_group('Monkey Test Options')
478 group.add_argument(
479 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
480 metavar='PACKAGE', help='Package under test.')
481 group.add_argument(
482 '--event-count', default=10000, type=int,
483 help='Number of events to generate (default: %(default)s).')
484 group.add_argument(
485 '--category', default='',
486 help='A list of allowed categories.')
487 group.add_argument(
488 '--throttle', default=100, type=int,
489 help='Delay between events (ms) (default: %(default)s). ')
490 group.add_argument(
491 '--seed', type=int,
492 help=('Seed value for pseudo-random generator. Same seed value generates '
493 'the same sequence of events. Seed is randomized by default.'))
494 group.add_argument(
495 '--extra-args', default='',
496 help=('String of other args to pass to the command verbatim.'))
498 AddCommonOptions(parser)
499 AddDeviceOptions(parser)
501 def ProcessMonkeyTestOptions(args):
502 """Processes all monkey test options.
504 Args:
505 args: argparse.Namespace object.
507 Returns:
508 A MonkeyOptions named tuple which contains all options relevant to
509 monkey tests.
511 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
512 category = args.category
513 if category:
514 category = args.category.split(',')
516 # TODO(jbudorick): Get rid of MonkeyOptions.
517 return monkey_test_options.MonkeyOptions(
518 args.verbose_count,
519 args.package,
520 args.event_count,
521 category,
522 args.throttle,
523 args.seed,
524 args.extra_args)
526 def AddUirobotTestOptions(parser):
527 """Adds uirobot test options to |option_parser|."""
528 group = parser.add_argument_group('Uirobot Test Options')
530 group.add_argument('--app-under-test', required=True,
531 help='APK to run tests on.')
532 group.add_argument(
533 '--minutes', default=5, type=int,
534 help='Number of minutes to run uirobot test [default: %(default)s].')
536 AddCommonOptions(parser)
537 AddDeviceOptions(parser)
538 AddRemoteDeviceOptions(parser)
540 def AddPerfTestOptions(parser):
541 """Adds perf test options to |parser|."""
543 group = parser.add_argument_group('Perf Test Options')
545 class SingleStepAction(argparse.Action):
546 def __call__(self, parser, namespace, values, option_string=None):
547 if values and not namespace.single_step:
548 parser.error('single step command provided, '
549 'but --single-step not specified.')
550 elif namespace.single_step and not values:
551 parser.error('--single-step specified, '
552 'but no single step command provided.')
553 setattr(namespace, self.dest, values)
555 step_group = group.add_mutually_exclusive_group(required=True)
556 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
557 # This requires removing "--" from client calls.
558 step_group.add_argument(
559 '--single-step', action='store_true',
560 help='Execute the given command with retries, but only print the result '
561 'for the "most successful" round.')
562 step_group.add_argument(
563 '--steps',
564 help='JSON file containing the list of commands to run.')
565 step_group.add_argument(
566 '--print-step',
567 help='The name of a previously executed perf step to print.')
569 group.add_argument(
570 '--output-json-list',
571 help='Write a simple list of names from --steps into the given file.')
572 group.add_argument(
573 '--collect-chartjson-data',
574 action='store_true',
575 help='Cache the chartjson output from each step for later use.')
576 group.add_argument(
577 '--output-chartjson-data',
578 default='',
579 help='Write out chartjson into the given file.')
580 group.add_argument(
581 '--flaky-steps',
582 help=('A JSON file containing steps that are flaky '
583 'and will have its exit code ignored.'))
584 group.add_argument(
585 '--no-timeout', action='store_true',
586 help=('Do not impose a timeout. Each perf step is responsible for '
587 'implementing the timeout logic.'))
588 group.add_argument(
589 '-f', '--test-filter',
590 help=('Test filter (will match against the names listed in --steps).'))
591 group.add_argument(
592 '--dry-run', action='store_true',
593 help='Just print the steps without executing.')
594 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
595 help='If --single-step is specified, the command to run.')
596 AddCommonOptions(parser)
597 AddDeviceOptions(parser)
600 def ProcessPerfTestOptions(args):
601 """Processes all perf test options.
603 Args:
604 args: argparse.Namespace object.
606 Returns:
607 A PerfOptions named tuple which contains all options relevant to
608 perf tests.
610 # TODO(jbudorick): Move single_step handling down into the perf tests.
611 if args.single_step:
612 args.single_step = ' '.join(args.single_step_command)
613 # TODO(jbudorick): Get rid of PerfOptions.
614 return perf_test_options.PerfOptions(
615 args.steps, args.flaky_steps, args.output_json_list,
616 args.print_step, args.no_timeout, args.test_filter,
617 args.dry_run, args.single_step, args.collect_chartjson_data,
618 args.output_chartjson_data)
621 def AddPythonTestOptions(parser):
622 group = parser.add_argument_group('Python Test Options')
623 group.add_argument(
624 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
625 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
626 help='Name of the test suite to run.')
627 AddCommonOptions(parser)
630 def _RunGTests(args, devices):
631 """Subcommand of RunTestsCommands which runs gtests."""
632 exit_code = 0
633 for suite_name in args.suite_name:
634 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
635 # into the gtest code.
636 gtest_options = gtest_test_options.GTestOptions(
637 args.tool,
638 args.test_filter,
639 args.run_disabled,
640 args.test_arguments,
641 args.timeout,
642 args.isolate_file_path,
643 suite_name)
644 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
646 results, test_exit_code = test_dispatcher.RunTests(
647 tests, runner_factory, devices, shard=True, test_timeout=None,
648 num_retries=args.num_retries)
650 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
651 exit_code = test_exit_code
653 report_results.LogFull(
654 results=results,
655 test_type='Unit test',
656 test_package=suite_name,
657 flakiness_server=args.flakiness_dashboard_server)
659 if args.json_results_file:
660 json_results.GenerateJsonResultsFile(results, args.json_results_file)
662 return exit_code
665 def _RunLinkerTests(args, devices):
666 """Subcommand of RunTestsCommands which runs linker tests."""
667 runner_factory, tests = linker_setup.Setup(args, devices)
669 results, exit_code = test_dispatcher.RunTests(
670 tests, runner_factory, devices, shard=True, test_timeout=60,
671 num_retries=args.num_retries)
673 report_results.LogFull(
674 results=results,
675 test_type='Linker test',
676 test_package='ChromiumLinkerTest')
678 if args.json_results_file:
679 json_results.GenerateJsonResultsFile(results, args.json_results_file)
681 return exit_code
684 def _RunInstrumentationTests(args, devices):
685 """Subcommand of RunTestsCommands which runs instrumentation tests."""
686 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
688 instrumentation_options = ProcessInstrumentationOptions(args)
690 if len(devices) > 1 and args.wait_for_debugger:
691 logging.warning('Debugger can not be sharded, using first available device')
692 devices = devices[:1]
694 results = base_test_result.TestRunResults()
695 exit_code = 0
697 if args.run_java_tests:
698 runner_factory, tests = instrumentation_setup.Setup(
699 instrumentation_options, devices)
701 test_results, exit_code = test_dispatcher.RunTests(
702 tests, runner_factory, devices, shard=True, test_timeout=None,
703 num_retries=args.num_retries)
705 results.AddTestRunResults(test_results)
707 if args.run_python_tests:
708 runner_factory, tests = host_driven_setup.InstrumentationSetup(
709 args.host_driven_root, args.official_build,
710 instrumentation_options)
712 if tests:
713 test_results, test_exit_code = test_dispatcher.RunTests(
714 tests, runner_factory, devices, shard=True, test_timeout=None,
715 num_retries=args.num_retries)
717 results.AddTestRunResults(test_results)
719 # Only allow exit code escalation
720 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
721 exit_code = test_exit_code
723 if args.device_flags:
724 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
725 args.device_flags)
727 report_results.LogFull(
728 results=results,
729 test_type='Instrumentation',
730 test_package=os.path.basename(args.test_apk),
731 annotation=args.annotations,
732 flakiness_server=args.flakiness_dashboard_server)
734 if args.json_results_file:
735 json_results.GenerateJsonResultsFile(results, args.json_results_file)
737 return exit_code
740 def _RunUIAutomatorTests(args, devices):
741 """Subcommand of RunTestsCommands which runs uiautomator tests."""
742 uiautomator_options = ProcessUIAutomatorOptions(args)
744 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
746 results, exit_code = test_dispatcher.RunTests(
747 tests, runner_factory, devices, shard=True, test_timeout=None,
748 num_retries=args.num_retries)
750 report_results.LogFull(
751 results=results,
752 test_type='UIAutomator',
753 test_package=os.path.basename(args.test_jar),
754 annotation=args.annotations,
755 flakiness_server=args.flakiness_dashboard_server)
757 if args.json_results_file:
758 json_results.GenerateJsonResultsFile(results, args.json_results_file)
760 return exit_code
763 def _RunJUnitTests(args):
764 """Subcommand of RunTestsCommand which runs junit tests."""
765 runner_factory, tests = junit_setup.Setup(args)
766 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
768 report_results.LogFull(
769 results=results,
770 test_type='JUnit',
771 test_package=args.test_suite)
773 if args.json_results_file:
774 json_results.GenerateJsonResultsFile(results, args.json_results_file)
776 return exit_code
779 def _RunMonkeyTests(args, devices):
780 """Subcommand of RunTestsCommands which runs monkey tests."""
781 monkey_options = ProcessMonkeyTestOptions(args)
783 runner_factory, tests = monkey_setup.Setup(monkey_options)
785 results, exit_code = test_dispatcher.RunTests(
786 tests, runner_factory, devices, shard=False, test_timeout=None,
787 num_retries=args.num_retries)
789 report_results.LogFull(
790 results=results,
791 test_type='Monkey',
792 test_package='Monkey')
794 if args.json_results_file:
795 json_results.GenerateJsonResultsFile(results, args.json_results_file)
797 return exit_code
800 def _RunPerfTests(args):
801 """Subcommand of RunTestsCommands which runs perf tests."""
802 perf_options = ProcessPerfTestOptions(args)
804 # Just save a simple json with a list of test names.
805 if perf_options.output_json_list:
806 return perf_test_runner.OutputJsonList(
807 perf_options.steps, perf_options.output_json_list)
809 # Just print the results from a single previously executed step.
810 if perf_options.print_step:
811 return perf_test_runner.PrintTestOutput(
812 perf_options.print_step, perf_options.output_chartjson_data)
814 runner_factory, tests, devices = perf_setup.Setup(perf_options)
816 # shard=False means that each device will get the full list of tests
817 # and then each one will decide their own affinity.
818 # shard=True means each device will pop the next test available from a queue,
819 # which increases throughput but have no affinity.
820 results, _ = test_dispatcher.RunTests(
821 tests, runner_factory, devices, shard=False, test_timeout=None,
822 num_retries=args.num_retries)
824 report_results.LogFull(
825 results=results,
826 test_type='Perf',
827 test_package='Perf')
829 if args.json_results_file:
830 json_results.GenerateJsonResultsFile(results, args.json_results_file)
832 if perf_options.single_step:
833 return perf_test_runner.PrintTestOutput('single_step')
835 perf_test_runner.PrintSummary(tests)
837 # Always return 0 on the sharding stage. Individual tests exit_code
838 # will be returned on the print_step stage.
839 return 0
842 def _RunPythonTests(args):
843 """Subcommand of RunTestsCommand which runs python unit tests."""
844 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
845 suite_path = suite_vars['path']
846 suite_test_modules = suite_vars['test_modules']
848 sys.path = [suite_path] + sys.path
849 try:
850 suite = unittest.TestSuite()
851 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
852 for m in suite_test_modules)
853 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
854 return 0 if runner.run(suite).wasSuccessful() else 1
855 finally:
856 sys.path = sys.path[1:]
859 def _GetAttachedDevices(test_device=None):
860 """Get all attached devices.
862 Args:
863 test_device: Name of a specific device to use.
865 Returns:
866 A list of attached devices.
868 attached_devices = device_utils.DeviceUtils.HealthyDevices()
869 if test_device:
870 test_device = [d for d in attached_devices if d == test_device]
871 if not test_device:
872 raise device_errors.DeviceUnreachableError(
873 'Did not find device %s among attached device. Attached devices: %s'
874 % (test_device, ', '.join(attached_devices)))
875 return test_device
877 else:
878 if not attached_devices:
879 raise device_errors.NoDevicesError()
880 return sorted(attached_devices)
883 def RunTestsCommand(args, parser):
884 """Checks test type and dispatches to the appropriate function.
886 Args:
887 args: argparse.Namespace object.
888 parser: argparse.ArgumentParser object.
890 Returns:
891 Integer indicated exit code.
893 Raises:
894 Exception: Unknown command name passed in, or an exception from an
895 individual test runner.
897 command = args.command
899 ProcessCommonOptions(args)
901 if args.enable_platform_mode:
902 return RunTestsInPlatformMode(args, parser)
904 if command in constants.LOCAL_MACHINE_TESTS:
905 devices = []
906 else:
907 devices = _GetAttachedDevices(args.test_device)
909 forwarder.Forwarder.RemoveHostLog()
910 if not ports.ResetTestServerPortAllocation():
911 raise Exception('Failed to reset test server port.')
913 if command == 'gtest':
914 return _RunGTests(args, devices)
915 elif command == 'linker':
916 return _RunLinkerTests(args, devices)
917 elif command == 'instrumentation':
918 return _RunInstrumentationTests(args, devices)
919 elif command == 'uiautomator':
920 return _RunUIAutomatorTests(args, devices)
921 elif command == 'junit':
922 return _RunJUnitTests(args)
923 elif command == 'monkey':
924 return _RunMonkeyTests(args, devices)
925 elif command == 'perf':
926 return _RunPerfTests(args)
927 elif command == 'python':
928 return _RunPythonTests(args)
929 else:
930 raise Exception('Unknown test type.')
933 _SUPPORTED_IN_PLATFORM_MODE = [
934 # TODO(jbudorick): Add support for more test types.
935 'gtest',
936 'instrumentation',
937 'uirobot',
941 def RunTestsInPlatformMode(args, parser):
943 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
944 parser.error('%s is not yet supported in platform mode' % args.command)
946 with environment_factory.CreateEnvironment(args, parser.error) as env:
947 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
948 with test_run_factory.CreateTestRun(
949 args, env, test, parser.error) as test_run:
950 results = test_run.RunTests()
952 if args.environment == 'remote_device' and args.trigger:
953 return 0 # Not returning results, only triggering.
955 report_results.LogFull(
956 results=results,
957 test_type=test.TestType(),
958 test_package=test_run.TestPackage(),
959 annotation=getattr(args, 'annotations', None),
960 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
962 if args.json_results_file:
963 json_results.GenerateJsonResultsFile(
964 results, args.json_results_file)
966 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
969 CommandConfigTuple = collections.namedtuple(
970 'CommandConfigTuple',
971 ['add_options_func', 'help_txt'])
972 VALID_COMMANDS = {
973 'gtest': CommandConfigTuple(
974 AddGTestOptions,
975 'googletest-based C++ tests'),
976 'instrumentation': CommandConfigTuple(
977 AddInstrumentationTestOptions,
978 'InstrumentationTestCase-based Java tests'),
979 'uiautomator': CommandConfigTuple(
980 AddUIAutomatorTestOptions,
981 "Tests that run via Android's uiautomator command"),
982 'junit': CommandConfigTuple(
983 AddJUnitTestOptions,
984 'JUnit4-based Java tests'),
985 'monkey': CommandConfigTuple(
986 AddMonkeyTestOptions,
987 "Tests based on Android's monkey"),
988 'perf': CommandConfigTuple(
989 AddPerfTestOptions,
990 'Performance tests'),
991 'python': CommandConfigTuple(
992 AddPythonTestOptions,
993 'Python tests based on unittest.TestCase'),
994 'linker': CommandConfigTuple(
995 AddLinkerTestOptions,
996 'Linker tests'),
997 'uirobot': CommandConfigTuple(
998 AddUirobotTestOptions,
999 'Uirobot test'),
1003 def DumpThreadStacks(_signal, _frame):
1004 for thread in threading.enumerate():
1005 reraiser_thread.LogThreadStack(thread)
1008 def main():
1009 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1011 parser = argparse.ArgumentParser()
1012 command_parsers = parser.add_subparsers(title='test types',
1013 dest='command')
1015 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1016 key=lambda x: x[0]):
1017 subparser = command_parsers.add_parser(
1018 test_type, usage='%(prog)s [options]', help=config.help_txt)
1019 config.add_options_func(subparser)
1021 args = parser.parse_args()
1023 try:
1024 return RunTestsCommand(args, parser)
1025 except base_error.BaseError as e:
1026 logging.exception('Error occurred.')
1027 if e.is_infra_error:
1028 return constants.INFRA_EXIT_CODE
1029 else:
1030 return constants.ERROR_EXIT_CODE
1031 except: # pylint: disable=W0702
1032 logging.exception('Unrecognized error occurred.')
1033 return constants.ERROR_EXIT_CODE
1036 if __name__ == '__main__':
1037 sys.exit(main())