Revert of media-internals: Simplify the code path for creating a media log entry...
[chromium-blink-merge.git] / build / android / test_runner.py
blob7d5039104dd0ac5b8a38f559951b113e477b40db
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from pylib import android_commands
20 from pylib import constants
21 from pylib import forwarder
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import environment_factory
25 from pylib.base import test_dispatcher
26 from pylib.base import test_instance_factory
27 from pylib.base import test_run_factory
28 from pylib.gtest import gtest_config
29 from pylib.gtest import setup as gtest_setup
30 from pylib.gtest import test_options as gtest_test_options
31 from pylib.linker import setup as linker_setup
32 from pylib.host_driven import setup as host_driven_setup
33 from pylib.instrumentation import setup as instrumentation_setup
34 from pylib.instrumentation import test_options as instrumentation_test_options
35 from pylib.junit import setup as junit_setup
36 from pylib.junit import test_dispatcher as junit_dispatcher
37 from pylib.monkey import setup as monkey_setup
38 from pylib.monkey import test_options as monkey_test_options
39 from pylib.perf import setup as perf_setup
40 from pylib.perf import test_options as perf_test_options
41 from pylib.perf import test_runner as perf_test_runner
42 from pylib.results import json_results
43 from pylib.results import report_results
44 from pylib.uiautomator import setup as uiautomator_setup
45 from pylib.uiautomator import test_options as uiautomator_test_options
46 from pylib.utils import apk_helper
47 from pylib.utils import base_error
48 from pylib.utils import reraiser_thread
49 from pylib.utils import run_tests_helper
52 def AddCommonOptions(parser):
53 """Adds all common options to |parser|."""
55 group = parser.add_argument_group('Common Options')
57 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
59 debug_or_release_group = group.add_mutually_exclusive_group()
60 debug_or_release_group.add_argument(
61 '--debug', action='store_const', const='Debug', dest='build_type',
62 default=default_build_type,
63 help=('If set, run test suites under out/Debug. '
64 'Default is env var BUILDTYPE or Debug.'))
65 debug_or_release_group.add_argument(
66 '--release', action='store_const', const='Release', dest='build_type',
67 help=('If set, run test suites under out/Release. '
68 'Default is env var BUILDTYPE or Debug.'))
70 group.add_argument('--build-directory', dest='build_directory',
71 help=('Path to the directory in which build files are'
72 ' located (should not include build type)'))
73 group.add_argument('--output-directory', dest='output_directory',
74 help=('Path to the directory in which build files are'
75 ' located (must include build type). This will take'
76 ' precedence over --debug, --release and'
77 ' --build-directory'))
78 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
79 help=('Number of retries for a test before '
80 'giving up (default: %(default)s).'))
81 group.add_argument('-v',
82 '--verbose',
83 dest='verbose_count',
84 default=0,
85 action='count',
86 help='Verbose level (multiple times for more)')
87 group.add_argument('--flakiness-dashboard-server',
88 dest='flakiness_dashboard_server',
89 help=('Address of the server that is hosting the '
90 'Chrome for Android flakiness dashboard.'))
91 group.add_argument('--enable-platform-mode', action='store_true',
92 help=('Run the test scripts in platform mode, which '
93 'conceptually separates the test runner from the '
94 '"device" (local or remote, real or emulated) on '
95 'which the tests are running. [experimental]'))
96 group.add_argument('-e', '--environment', default='local',
97 choices=constants.VALID_ENVIRONMENTS,
98 help='Test environment to run in (default: %(default)s).')
99 group.add_argument('--adb-path',
100 help=('Specify the absolute path of the adb binary that '
101 'should be used.'))
102 group.add_argument('--json-results-file', dest='json_results_file',
103 help='If set, will dump results in JSON form '
104 'to specified file.')
106 def ProcessCommonOptions(args):
107 """Processes and handles all common options."""
108 run_tests_helper.SetLogLevel(args.verbose_count)
109 constants.SetBuildType(args.build_type)
110 if args.build_directory:
111 constants.SetBuildDirectory(args.build_directory)
112 if args.output_directory:
113 constants.SetOutputDirectort(args.output_directory)
114 if args.adb_path:
115 constants.SetAdbPath(args.adb_path)
116 # Some things such as Forwarder require ADB to be in the environment path.
117 adb_dir = os.path.dirname(constants.GetAdbPath())
118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
122 def AddRemoteDeviceOptions(parser):
123 group = parser.add_argument_group('Remote Device Options')
125 group.add_argument('--trigger',
126 help=('Only triggers the test if set. Stores test_run_id '
127 'in given file path. '))
128 group.add_argument('--collect',
129 help=('Only collects the test results if set. '
130 'Gets test_run_id from given file path.'))
131 group.add_argument('--remote-device', action='append',
132 help='Device type to run test on.')
133 group.add_argument('--results-path',
134 help='File path to download results to.')
135 group.add_argument('--api-protocol',
136 help='HTTP protocol to use. (http or https)')
137 group.add_argument('--api-address',
138 help='Address to send HTTP requests.')
139 group.add_argument('--api-port',
140 help='Port to send HTTP requests to.')
141 group.add_argument('--runner-type',
142 help='Type of test to run as.')
143 group.add_argument('--runner-package',
144 help='Package name of test.')
145 group.add_argument('--device-type',
146 choices=constants.VALID_DEVICE_TYPES,
147 help=('Type of device to run on. iOS or android'))
148 group.add_argument('--device-oem', action='append',
149 help='Device OEM to run on.')
150 group.add_argument('--remote-device-file',
151 help=('File with JSON to select remote device. '
152 'Overrides all other flags.'))
153 group.add_argument('--remote-device-timeout', type=int,
154 help='Times to retry finding remote device')
156 device_os_group = group.add_mutually_exclusive_group()
157 device_os_group.add_argument('--remote-device-minimum-os',
158 help='Minimum OS on device.')
159 device_os_group.add_argument('--remote-device-os', action='append',
160 help='OS to have on the device.')
162 api_secret_group = group.add_mutually_exclusive_group()
163 api_secret_group.add_argument('--api-secret', default='',
164 help='API secret for remote devices.')
165 api_secret_group.add_argument('--api-secret-file', default='',
166 help='Path to file that contains API secret.')
168 api_key_group = group.add_mutually_exclusive_group()
169 api_key_group.add_argument('--api-key', default='',
170 help='API key for remote devices.')
171 api_key_group.add_argument('--api-key-file', default='',
172 help='Path to file that contains API key.')
175 def AddDeviceOptions(parser):
176 """Adds device options to |parser|."""
177 group = parser.add_argument_group(title='Device Options')
178 group.add_argument('--tool',
179 dest='tool',
180 help=('Run the test under a tool '
181 '(use --tool help to list them)'))
182 group.add_argument('-d', '--device', dest='test_device',
183 help=('Target device for the test suite '
184 'to run on.'))
187 def AddGTestOptions(parser):
188 """Adds gtest options to |parser|."""
190 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
191 + gtest_config.EXPERIMENTAL_TEST_SUITES)
193 group = parser.add_argument_group('GTest Options')
194 group.add_argument('-s', '--suite', dest='suite_name',
195 nargs='+', metavar='SUITE_NAME', required=True,
196 help=('Executable name of the test suite to run. '
197 'Available suites include (but are not limited to): '
198 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
199 group.add_argument('--gtest_also_run_disabled_tests',
200 '--gtest-also-run-disabled-tests',
201 dest='run_disabled', action='store_true',
202 help='Also run disabled tests if applicable.')
203 group.add_argument('-a', '--test-arguments', dest='test_arguments',
204 default='',
205 help='Additional arguments to pass to the test.')
206 group.add_argument('-t', dest='timeout', type=int, default=60,
207 help='Timeout to wait for each test '
208 '(default: %(default)s).')
209 group.add_argument('--isolate_file_path',
210 '--isolate-file-path',
211 dest='isolate_file_path',
212 help='.isolate file path to override the default '
213 'path')
215 filter_group = group.add_mutually_exclusive_group()
216 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
217 dest='test_filter',
218 help='googletest-style filter string.')
219 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
220 help='Path to file that contains googletest-style '
221 'filter strings. (Lines will be joined with '
222 '":" to create a single filter string.)')
224 AddDeviceOptions(parser)
225 AddCommonOptions(parser)
226 AddRemoteDeviceOptions(parser)
229 def AddLinkerTestOptions(parser):
230 group = parser.add_argument_group('Linker Test Options')
231 group.add_argument('-f', '--gtest-filter', dest='test_filter',
232 help='googletest-style filter string.')
233 AddCommonOptions(parser)
234 AddDeviceOptions(parser)
237 def AddJavaTestOptions(argument_group):
238 """Adds the Java test options to |option_parser|."""
240 argument_group.add_argument(
241 '-f', '--test-filter', dest='test_filter',
242 help=('Test filter (if not fully qualified, will run all matches).'))
243 argument_group.add_argument(
244 '-A', '--annotation', dest='annotation_str',
245 help=('Comma-separated list of annotations. Run only tests with any of '
246 'the given annotations. An annotation can be either a key or a '
247 'key-values pair. A test that has no annotation is considered '
248 '"SmallTest".'))
249 argument_group.add_argument(
250 '-E', '--exclude-annotation', dest='exclude_annotation_str',
251 help=('Comma-separated list of annotations. Exclude tests with these '
252 'annotations.'))
253 argument_group.add_argument(
254 '--screenshot', dest='screenshot_failures', action='store_true',
255 help='Capture screenshots of test failures')
256 argument_group.add_argument(
257 '--save-perf-json', action='store_true',
258 help='Saves the JSON file for each UI Perf test.')
259 argument_group.add_argument(
260 '--official-build', action='store_true', help='Run official build tests.')
261 argument_group.add_argument(
262 '--test_data', '--test-data', action='append', default=[],
263 help=('Each instance defines a directory of test data that should be '
264 'copied to the target(s) before running the tests. The argument '
265 'should be of the form <target>:<source>, <target> is relative to '
266 'the device data directory, and <source> is relative to the '
267 'chromium build directory.'))
268 argument_group.add_argument(
269 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
270 default=True, help='Removes the dalvik.vm.enableassertions property')
274 def ProcessJavaTestOptions(args):
275 """Processes options/arguments and populates |options| with defaults."""
277 # TODO(jbudorick): Handle most of this function in argparse.
278 if args.annotation_str:
279 args.annotations = args.annotation_str.split(',')
280 elif args.test_filter:
281 args.annotations = []
282 else:
283 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
284 'EnormousTest', 'IntegrationTest']
286 if args.exclude_annotation_str:
287 args.exclude_annotations = args.exclude_annotation_str.split(',')
288 else:
289 args.exclude_annotations = []
292 def AddInstrumentationTestOptions(parser):
293 """Adds Instrumentation test options to |parser|."""
295 parser.usage = '%(prog)s [options]'
297 group = parser.add_argument_group('Instrumentation Test Options')
298 AddJavaTestOptions(group)
300 java_or_python_group = group.add_mutually_exclusive_group()
301 java_or_python_group.add_argument(
302 '-j', '--java-only', action='store_false',
303 dest='run_python_tests', default=True, help='Run only the Java tests.')
304 java_or_python_group.add_argument(
305 '-p', '--python-only', action='store_false',
306 dest='run_java_tests', default=True,
307 help='Run only the host-driven tests.')
309 group.add_argument('--host-driven-root',
310 help='Root of the host-driven tests.')
311 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
312 action='store_true',
313 help='Wait for debugger.')
314 group.add_argument('--apk-under-test', dest='apk_under_test',
315 help=('the name of the apk under test.'))
316 group.add_argument('--test-apk', dest='test_apk', required=True,
317 help=('The name of the apk containing the tests '
318 '(without the .apk extension; '
319 'e.g. "ContentShellTest").'))
320 group.add_argument('--coverage-dir',
321 help=('Directory in which to place all generated '
322 'EMMA coverage files.'))
323 group.add_argument('--device-flags', dest='device_flags', default='',
324 help='The relative filepath to a file containing '
325 'command-line flags to set on the device')
326 group.add_argument('--device-flags-file', default='',
327 help='The relative filepath to a file containing '
328 'command-line flags to set on the device')
329 group.add_argument('--isolate_file_path',
330 '--isolate-file-path',
331 dest='isolate_file_path',
332 help='.isolate file path to override the default '
333 'path')
335 AddCommonOptions(parser)
336 AddDeviceOptions(parser)
337 AddRemoteDeviceOptions(parser)
340 def ProcessInstrumentationOptions(args):
341 """Processes options/arguments and populate |options| with defaults.
343 Args:
344 args: argparse.Namespace object.
346 Returns:
347 An InstrumentationOptions named tuple which contains all options relevant to
348 instrumentation tests.
351 ProcessJavaTestOptions(args)
353 if not args.host_driven_root:
354 args.run_python_tests = False
356 args.test_apk_path = os.path.join(
357 constants.GetOutDirectory(),
358 constants.SDK_BUILD_APKS_DIR,
359 '%s.apk' % args.test_apk)
360 args.test_apk_jar_path = os.path.join(
361 constants.GetOutDirectory(),
362 constants.SDK_BUILD_TEST_JAVALIB_DIR,
363 '%s.jar' % args.test_apk)
364 args.test_support_apk_path = '%sSupport%s' % (
365 os.path.splitext(args.test_apk_path))
367 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
369 # TODO(jbudorick): Get rid of InstrumentationOptions.
370 return instrumentation_test_options.InstrumentationOptions(
371 args.tool,
372 args.annotations,
373 args.exclude_annotations,
374 args.test_filter,
375 args.test_data,
376 args.save_perf_json,
377 args.screenshot_failures,
378 args.wait_for_debugger,
379 args.coverage_dir,
380 args.test_apk,
381 args.test_apk_path,
382 args.test_apk_jar_path,
383 args.test_runner,
384 args.test_support_apk_path,
385 args.device_flags,
386 args.isolate_file_path,
387 args.set_asserts
391 def AddUIAutomatorTestOptions(parser):
392 """Adds UI Automator test options to |parser|."""
394 group = parser.add_argument_group('UIAutomator Test Options')
395 AddJavaTestOptions(group)
396 group.add_argument(
397 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
398 metavar='PACKAGE', help='Package under test.')
399 group.add_argument(
400 '--test-jar', dest='test_jar', required=True,
401 help=('The name of the dexed jar containing the tests (without the '
402 '.dex.jar extension). Alternatively, this can be a full path '
403 'to the jar.'))
405 AddCommonOptions(parser)
406 AddDeviceOptions(parser)
409 def ProcessUIAutomatorOptions(args):
410 """Processes UIAutomator options/arguments.
412 Args:
413 args: argparse.Namespace object.
415 Returns:
416 A UIAutomatorOptions named tuple which contains all options relevant to
417 uiautomator tests.
420 ProcessJavaTestOptions(args)
422 if os.path.exists(args.test_jar):
423 # The dexed JAR is fully qualified, assume the info JAR lives along side.
424 args.uiautomator_jar = args.test_jar
425 else:
426 args.uiautomator_jar = os.path.join(
427 constants.GetOutDirectory(),
428 constants.SDK_BUILD_JAVALIB_DIR,
429 '%s.dex.jar' % args.test_jar)
430 args.uiautomator_info_jar = (
431 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
432 '_java.jar')
434 return uiautomator_test_options.UIAutomatorOptions(
435 args.tool,
436 args.annotations,
437 args.exclude_annotations,
438 args.test_filter,
439 args.test_data,
440 args.save_perf_json,
441 args.screenshot_failures,
442 args.uiautomator_jar,
443 args.uiautomator_info_jar,
444 args.package,
445 args.set_asserts)
448 def AddJUnitTestOptions(parser):
449 """Adds junit test options to |parser|."""
451 group = parser.add_argument_group('JUnit Test Options')
452 group.add_argument(
453 '-s', '--test-suite', dest='test_suite', required=True,
454 help=('JUnit test suite to run.'))
455 group.add_argument(
456 '-f', '--test-filter', dest='test_filter',
457 help='Filters tests googletest-style.')
458 group.add_argument(
459 '--package-filter', dest='package_filter',
460 help='Filters tests by package.')
461 group.add_argument(
462 '--runner-filter', dest='runner_filter',
463 help='Filters tests by runner class. Must be fully qualified.')
464 group.add_argument(
465 '--sdk-version', dest='sdk_version', type=int,
466 help='The Android SDK version.')
467 AddCommonOptions(parser)
470 def AddMonkeyTestOptions(parser):
471 """Adds monkey test options to |parser|."""
473 group = parser.add_argument_group('Monkey Test Options')
474 group.add_argument(
475 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
476 metavar='PACKAGE', help='Package under test.')
477 group.add_argument(
478 '--event-count', default=10000, type=int,
479 help='Number of events to generate (default: %(default)s).')
480 group.add_argument(
481 '--category', default='',
482 help='A list of allowed categories.')
483 group.add_argument(
484 '--throttle', default=100, type=int,
485 help='Delay between events (ms) (default: %(default)s). ')
486 group.add_argument(
487 '--seed', type=int,
488 help=('Seed value for pseudo-random generator. Same seed value generates '
489 'the same sequence of events. Seed is randomized by default.'))
490 group.add_argument(
491 '--extra-args', default='',
492 help=('String of other args to pass to the command verbatim.'))
494 AddCommonOptions(parser)
495 AddDeviceOptions(parser)
497 def ProcessMonkeyTestOptions(args):
498 """Processes all monkey test options.
500 Args:
501 args: argparse.Namespace object.
503 Returns:
504 A MonkeyOptions named tuple which contains all options relevant to
505 monkey tests.
507 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
508 category = args.category
509 if category:
510 category = args.category.split(',')
512 # TODO(jbudorick): Get rid of MonkeyOptions.
513 return monkey_test_options.MonkeyOptions(
514 args.verbose_count,
515 args.package,
516 args.event_count,
517 category,
518 args.throttle,
519 args.seed,
520 args.extra_args)
522 def AddUirobotTestOptions(parser):
523 """Adds uirobot test options to |option_parser|."""
524 group = parser.add_argument_group('Uirobot Test Options')
526 group.add_argument('--app-under-test', required=True,
527 help='APK to run tests on.')
528 group.add_argument(
529 '--minutes', default=5, type=int,
530 help='Number of minutes to run uirobot test [default: %(default)s].')
532 AddCommonOptions(parser)
533 AddDeviceOptions(parser)
534 AddRemoteDeviceOptions(parser)
536 def AddPerfTestOptions(parser):
537 """Adds perf test options to |parser|."""
539 group = parser.add_argument_group('Perf Test Options')
541 class SingleStepAction(argparse.Action):
542 def __call__(self, parser, namespace, values, option_string=None):
543 if values and not namespace.single_step:
544 parser.error('single step command provided, '
545 'but --single-step not specified.')
546 elif namespace.single_step and not values:
547 parser.error('--single-step specified, '
548 'but no single step command provided.')
549 setattr(namespace, self.dest, values)
551 step_group = group.add_mutually_exclusive_group(required=True)
552 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
553 # This requires removing "--" from client calls.
554 step_group.add_argument(
555 '--single-step', action='store_true',
556 help='Execute the given command with retries, but only print the result '
557 'for the "most successful" round.')
558 step_group.add_argument(
559 '--steps',
560 help='JSON file containing the list of commands to run.')
561 step_group.add_argument(
562 '--print-step',
563 help='The name of a previously executed perf step to print.')
565 group.add_argument(
566 '--output-json-list',
567 help='Write a simple list of names from --steps into the given file.')
568 group.add_argument(
569 '--collect-chartjson-data',
570 action='store_true',
571 help='Cache the chartjson output from each step for later use.')
572 group.add_argument(
573 '--output-chartjson-data',
574 default='',
575 help='Write out chartjson into the given file.')
576 group.add_argument(
577 '--flaky-steps',
578 help=('A JSON file containing steps that are flaky '
579 'and will have its exit code ignored.'))
580 group.add_argument(
581 '--no-timeout', action='store_true',
582 help=('Do not impose a timeout. Each perf step is responsible for '
583 'implementing the timeout logic.'))
584 group.add_argument(
585 '-f', '--test-filter',
586 help=('Test filter (will match against the names listed in --steps).'))
587 group.add_argument(
588 '--dry-run', action='store_true',
589 help='Just print the steps without executing.')
590 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
591 help='If --single-step is specified, the command to run.')
592 AddCommonOptions(parser)
593 AddDeviceOptions(parser)
596 def ProcessPerfTestOptions(args):
597 """Processes all perf test options.
599 Args:
600 args: argparse.Namespace object.
602 Returns:
603 A PerfOptions named tuple which contains all options relevant to
604 perf tests.
606 # TODO(jbudorick): Move single_step handling down into the perf tests.
607 if args.single_step:
608 args.single_step = ' '.join(args.single_step_command)
609 # TODO(jbudorick): Get rid of PerfOptions.
610 return perf_test_options.PerfOptions(
611 args.steps, args.flaky_steps, args.output_json_list,
612 args.print_step, args.no_timeout, args.test_filter,
613 args.dry_run, args.single_step, args.collect_chartjson_data,
614 args.output_chartjson_data)
617 def AddPythonTestOptions(parser):
618 group = parser.add_argument_group('Python Test Options')
619 group.add_argument(
620 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
621 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
622 help='Name of the test suite to run.')
623 AddCommonOptions(parser)
626 def _RunGTests(args, devices):
627 """Subcommand of RunTestsCommands which runs gtests."""
628 exit_code = 0
629 for suite_name in args.suite_name:
630 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
631 # into the gtest code.
632 gtest_options = gtest_test_options.GTestOptions(
633 args.tool,
634 args.test_filter,
635 args.run_disabled,
636 args.test_arguments,
637 args.timeout,
638 args.isolate_file_path,
639 suite_name)
640 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
642 results, test_exit_code = test_dispatcher.RunTests(
643 tests, runner_factory, devices, shard=True, test_timeout=None,
644 num_retries=args.num_retries)
646 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
647 exit_code = test_exit_code
649 report_results.LogFull(
650 results=results,
651 test_type='Unit test',
652 test_package=suite_name,
653 flakiness_server=args.flakiness_dashboard_server)
655 if args.json_results_file:
656 json_results.GenerateJsonResultsFile(results, args.json_results_file)
658 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
659 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
661 return exit_code
664 def _RunLinkerTests(args, devices):
665 """Subcommand of RunTestsCommands which runs linker tests."""
666 runner_factory, tests = linker_setup.Setup(args, devices)
668 results, exit_code = test_dispatcher.RunTests(
669 tests, runner_factory, devices, shard=True, test_timeout=60,
670 num_retries=args.num_retries)
672 report_results.LogFull(
673 results=results,
674 test_type='Linker test',
675 test_package='ChromiumLinkerTest')
677 if args.json_results_file:
678 json_results.GenerateJsonResultsFile(results, args.json_results_file)
680 return exit_code
683 def _RunInstrumentationTests(args, devices):
684 """Subcommand of RunTestsCommands which runs instrumentation tests."""
685 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
687 instrumentation_options = ProcessInstrumentationOptions(args)
689 if len(devices) > 1 and args.wait_for_debugger:
690 logging.warning('Debugger can not be sharded, using first available device')
691 devices = devices[:1]
693 results = base_test_result.TestRunResults()
694 exit_code = 0
696 if args.run_java_tests:
697 runner_factory, tests = instrumentation_setup.Setup(
698 instrumentation_options, devices)
700 test_results, exit_code = test_dispatcher.RunTests(
701 tests, runner_factory, devices, shard=True, test_timeout=None,
702 num_retries=args.num_retries)
704 results.AddTestRunResults(test_results)
706 if args.run_python_tests:
707 runner_factory, tests = host_driven_setup.InstrumentationSetup(
708 args.host_driven_root, args.official_build,
709 instrumentation_options)
711 if tests:
712 test_results, test_exit_code = test_dispatcher.RunTests(
713 tests, runner_factory, devices, shard=True, test_timeout=None,
714 num_retries=args.num_retries)
716 results.AddTestRunResults(test_results)
718 # Only allow exit code escalation
719 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
720 exit_code = test_exit_code
722 if args.device_flags:
723 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
724 args.device_flags)
726 report_results.LogFull(
727 results=results,
728 test_type='Instrumentation',
729 test_package=os.path.basename(args.test_apk),
730 annotation=args.annotations,
731 flakiness_server=args.flakiness_dashboard_server)
733 if args.json_results_file:
734 json_results.GenerateJsonResultsFile(results, args.json_results_file)
736 return exit_code
739 def _RunUIAutomatorTests(args, devices):
740 """Subcommand of RunTestsCommands which runs uiautomator tests."""
741 uiautomator_options = ProcessUIAutomatorOptions(args)
743 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
745 results, exit_code = test_dispatcher.RunTests(
746 tests, runner_factory, devices, shard=True, test_timeout=None,
747 num_retries=args.num_retries)
749 report_results.LogFull(
750 results=results,
751 test_type='UIAutomator',
752 test_package=os.path.basename(args.test_jar),
753 annotation=args.annotations,
754 flakiness_server=args.flakiness_dashboard_server)
756 if args.json_results_file:
757 json_results.GenerateJsonResultsFile(results, args.json_results_file)
759 return exit_code
762 def _RunJUnitTests(args):
763 """Subcommand of RunTestsCommand which runs junit tests."""
764 runner_factory, tests = junit_setup.Setup(args)
765 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
766 return exit_code
769 def _RunMonkeyTests(args, devices):
770 """Subcommand of RunTestsCommands which runs monkey tests."""
771 monkey_options = ProcessMonkeyTestOptions(args)
773 runner_factory, tests = monkey_setup.Setup(monkey_options)
775 results, exit_code = test_dispatcher.RunTests(
776 tests, runner_factory, devices, shard=False, test_timeout=None,
777 num_retries=args.num_retries)
779 report_results.LogFull(
780 results=results,
781 test_type='Monkey',
782 test_package='Monkey')
784 if args.json_results_file:
785 json_results.GenerateJsonResultsFile(results, args.json_results_file)
787 return exit_code
790 def _RunPerfTests(args):
791 """Subcommand of RunTestsCommands which runs perf tests."""
792 perf_options = ProcessPerfTestOptions(args)
794 # Just save a simple json with a list of test names.
795 if perf_options.output_json_list:
796 return perf_test_runner.OutputJsonList(
797 perf_options.steps, perf_options.output_json_list)
799 # Just print the results from a single previously executed step.
800 if perf_options.print_step:
801 return perf_test_runner.PrintTestOutput(
802 perf_options.print_step, perf_options.output_chartjson_data)
804 runner_factory, tests, devices = perf_setup.Setup(perf_options)
806 # shard=False means that each device will get the full list of tests
807 # and then each one will decide their own affinity.
808 # shard=True means each device will pop the next test available from a queue,
809 # which increases throughput but have no affinity.
810 results, _ = test_dispatcher.RunTests(
811 tests, runner_factory, devices, shard=False, test_timeout=None,
812 num_retries=args.num_retries)
814 report_results.LogFull(
815 results=results,
816 test_type='Perf',
817 test_package='Perf')
819 if args.json_results_file:
820 json_results.GenerateJsonResultsFile(results, args.json_results_file)
822 if perf_options.single_step:
823 return perf_test_runner.PrintTestOutput('single_step')
825 perf_test_runner.PrintSummary(tests)
827 # Always return 0 on the sharding stage. Individual tests exit_code
828 # will be returned on the print_step stage.
829 return 0
832 def _RunPythonTests(args):
833 """Subcommand of RunTestsCommand which runs python unit tests."""
834 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
835 suite_path = suite_vars['path']
836 suite_test_modules = suite_vars['test_modules']
838 sys.path = [suite_path] + sys.path
839 try:
840 suite = unittest.TestSuite()
841 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
842 for m in suite_test_modules)
843 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
844 return 0 if runner.run(suite).wasSuccessful() else 1
845 finally:
846 sys.path = sys.path[1:]
849 def _GetAttachedDevices(test_device=None):
850 """Get all attached devices.
852 Args:
853 test_device: Name of a specific device to use.
855 Returns:
856 A list of attached devices.
858 attached_devices = []
860 attached_devices = android_commands.GetAttachedDevices()
861 if test_device:
862 assert test_device in attached_devices, (
863 'Did not find device %s among attached device. Attached devices: %s'
864 % (test_device, ', '.join(attached_devices)))
865 attached_devices = [test_device]
867 assert attached_devices, 'No devices attached.'
869 return sorted(attached_devices)
872 def RunTestsCommand(args, parser):
873 """Checks test type and dispatches to the appropriate function.
875 Args:
876 args: argparse.Namespace object.
877 parser: argparse.ArgumentParser object.
879 Returns:
880 Integer indicated exit code.
882 Raises:
883 Exception: Unknown command name passed in, or an exception from an
884 individual test runner.
886 command = args.command
888 ProcessCommonOptions(args)
890 if args.enable_platform_mode:
891 return RunTestsInPlatformMode(args, parser)
893 if command in constants.LOCAL_MACHINE_TESTS:
894 devices = []
895 else:
896 devices = _GetAttachedDevices(args.test_device)
898 forwarder.Forwarder.RemoveHostLog()
899 if not ports.ResetTestServerPortAllocation():
900 raise Exception('Failed to reset test server port.')
902 if command == 'gtest':
903 return _RunGTests(args, devices)
904 elif command == 'linker':
905 return _RunLinkerTests(args, devices)
906 elif command == 'instrumentation':
907 return _RunInstrumentationTests(args, devices)
908 elif command == 'uiautomator':
909 return _RunUIAutomatorTests(args, devices)
910 elif command == 'junit':
911 return _RunJUnitTests(args)
912 elif command == 'monkey':
913 return _RunMonkeyTests(args, devices)
914 elif command == 'perf':
915 return _RunPerfTests(args)
916 elif command == 'python':
917 return _RunPythonTests(args)
918 else:
919 raise Exception('Unknown test type.')
922 _SUPPORTED_IN_PLATFORM_MODE = [
923 # TODO(jbudorick): Add support for more test types.
924 'gtest',
925 'instrumentation',
926 'uirobot',
930 def RunTestsInPlatformMode(args, parser):
932 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
933 parser.error('%s is not yet supported in platform mode' % args.command)
935 with environment_factory.CreateEnvironment(args, parser.error) as env:
936 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
937 with test_run_factory.CreateTestRun(
938 args, env, test, parser.error) as test_run:
939 results = test_run.RunTests()
941 if args.environment == 'remote_device' and args.trigger:
942 return 0 # Not returning results, only triggering.
944 report_results.LogFull(
945 results=results,
946 test_type=test.TestType(),
947 test_package=test_run.TestPackage(),
948 annotation=getattr(args, 'annotations', None),
949 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
951 if args.json_results_file:
952 json_results.GenerateJsonResultsFile(
953 results, args.json_results_file)
955 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
958 CommandConfigTuple = collections.namedtuple(
959 'CommandConfigTuple',
960 ['add_options_func', 'help_txt'])
961 VALID_COMMANDS = {
962 'gtest': CommandConfigTuple(
963 AddGTestOptions,
964 'googletest-based C++ tests'),
965 'instrumentation': CommandConfigTuple(
966 AddInstrumentationTestOptions,
967 'InstrumentationTestCase-based Java tests'),
968 'uiautomator': CommandConfigTuple(
969 AddUIAutomatorTestOptions,
970 "Tests that run via Android's uiautomator command"),
971 'junit': CommandConfigTuple(
972 AddJUnitTestOptions,
973 'JUnit4-based Java tests'),
974 'monkey': CommandConfigTuple(
975 AddMonkeyTestOptions,
976 "Tests based on Android's monkey"),
977 'perf': CommandConfigTuple(
978 AddPerfTestOptions,
979 'Performance tests'),
980 'python': CommandConfigTuple(
981 AddPythonTestOptions,
982 'Python tests based on unittest.TestCase'),
983 'linker': CommandConfigTuple(
984 AddLinkerTestOptions,
985 'Linker tests'),
986 'uirobot': CommandConfigTuple(
987 AddUirobotTestOptions,
988 'Uirobot test'),
992 def DumpThreadStacks(_signal, _frame):
993 for thread in threading.enumerate():
994 reraiser_thread.LogThreadStack(thread)
997 def main():
998 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1000 parser = argparse.ArgumentParser()
1001 command_parsers = parser.add_subparsers(title='test types',
1002 dest='command')
1004 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1005 key=lambda x: x[0]):
1006 subparser = command_parsers.add_parser(
1007 test_type, usage='%(prog)s [options]', help=config.help_txt)
1008 config.add_options_func(subparser)
1010 args = parser.parse_args()
1012 try:
1013 return RunTestsCommand(args, parser)
1014 except base_error.BaseError as e:
1015 logging.exception('Error occurred.')
1016 if e.is_infra_error:
1017 return constants.INFRA_EXIT_CODE
1018 else:
1019 return constants.ERROR_EXIT_CODE
1020 except: # pylint: disable=W0702
1021 logging.exception('Unrecognized error occurred.')
1022 return constants.ERROR_EXIT_CODE
1025 if __name__ == '__main__':
1026 sys.exit(main())