Change type of kSUIDSandboxApiNumber to int
[chromium-blink-merge.git] / build / android / test_runner.py
blob8088ad214db51601008a737897b0219a8d84eb28
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import signal
14 import sys
15 import threading
16 import unittest
18 from devil import base_error
19 from devil.android import apk_helper
20 from devil.android import device_blacklist
21 from devil.android import device_errors
22 from devil.android import device_utils
23 from devil.android import ports
24 from devil.utils import reraiser_thread
25 from devil.utils import run_tests_helper
27 from pylib import constants
28 from pylib import forwarder
29 from pylib.base import base_test_result
30 from pylib.base import environment_factory
31 from pylib.base import test_dispatcher
32 from pylib.base import test_instance_factory
33 from pylib.base import test_run_factory
34 from pylib.gtest import gtest_config
35 from pylib.gtest import setup as gtest_setup
36 from pylib.gtest import test_options as gtest_test_options
37 from pylib.linker import setup as linker_setup
38 from pylib.host_driven import setup as host_driven_setup
39 from pylib.instrumentation import setup as instrumentation_setup
40 from pylib.instrumentation import test_options as instrumentation_test_options
41 from pylib.junit import setup as junit_setup
42 from pylib.junit import test_dispatcher as junit_dispatcher
43 from pylib.monkey import setup as monkey_setup
44 from pylib.monkey import test_options as monkey_test_options
45 from pylib.perf import setup as perf_setup
46 from pylib.perf import test_options as perf_test_options
47 from pylib.perf import test_runner as perf_test_runner
48 from pylib.results import json_results
49 from pylib.results import report_results
50 from pylib.uiautomator import setup as uiautomator_setup
51 from pylib.uiautomator import test_options as uiautomator_test_options
54 def AddCommonOptions(parser):
55 """Adds all common options to |parser|."""
57 group = parser.add_argument_group('Common Options')
59 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
61 debug_or_release_group = group.add_mutually_exclusive_group()
62 debug_or_release_group.add_argument(
63 '--debug', action='store_const', const='Debug', dest='build_type',
64 default=default_build_type,
65 help=('If set, run test suites under out/Debug. '
66 'Default is env var BUILDTYPE or Debug.'))
67 debug_or_release_group.add_argument(
68 '--release', action='store_const', const='Release', dest='build_type',
69 help=('If set, run test suites under out/Release. '
70 'Default is env var BUILDTYPE or Debug.'))
72 group.add_argument('--build-directory', dest='build_directory',
73 help=('Path to the directory in which build files are'
74 ' located (should not include build type)'))
75 group.add_argument('--output-directory', dest='output_directory',
76 help=('Path to the directory in which build files are'
77 ' located (must include build type). This will take'
78 ' precedence over --debug, --release and'
79 ' --build-directory'))
80 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
81 help=('Number of retries for a test before '
82 'giving up (default: %(default)s).'))
83 group.add_argument('-v',
84 '--verbose',
85 dest='verbose_count',
86 default=0,
87 action='count',
88 help='Verbose level (multiple times for more)')
89 group.add_argument('--flakiness-dashboard-server',
90 dest='flakiness_dashboard_server',
91 help=('Address of the server that is hosting the '
92 'Chrome for Android flakiness dashboard.'))
93 group.add_argument('--enable-platform-mode', action='store_true',
94 help=('Run the test scripts in platform mode, which '
95 'conceptually separates the test runner from the '
96 '"device" (local or remote, real or emulated) on '
97 'which the tests are running. [experimental]'))
98 group.add_argument('-e', '--environment', default='local',
99 choices=constants.VALID_ENVIRONMENTS,
100 help='Test environment to run in (default: %(default)s).')
101 group.add_argument('--adb-path',
102 help=('Specify the absolute path of the adb binary that '
103 'should be used.'))
104 group.add_argument('--json-results-file', dest='json_results_file',
105 help='If set, will dump results in JSON form '
106 'to specified file.')
108 def ProcessCommonOptions(args):
109 """Processes and handles all common options."""
110 run_tests_helper.SetLogLevel(args.verbose_count)
111 constants.SetBuildType(args.build_type)
112 if args.build_directory:
113 constants.SetBuildDirectory(args.build_directory)
114 if args.output_directory:
115 constants.SetOutputDirectory(args.output_directory)
116 if args.adb_path:
117 constants.SetAdbPath(args.adb_path)
118 # Some things such as Forwarder require ADB to be in the environment path.
119 adb_dir = os.path.dirname(constants.GetAdbPath())
120 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
121 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
124 def AddRemoteDeviceOptions(parser):
125 group = parser.add_argument_group('Remote Device Options')
127 group.add_argument('--trigger',
128 help=('Only triggers the test if set. Stores test_run_id '
129 'in given file path. '))
130 group.add_argument('--collect',
131 help=('Only collects the test results if set. '
132 'Gets test_run_id from given file path.'))
133 group.add_argument('--remote-device', action='append',
134 help='Device type to run test on.')
135 group.add_argument('--results-path',
136 help='File path to download results to.')
137 group.add_argument('--api-protocol',
138 help='HTTP protocol to use. (http or https)')
139 group.add_argument('--api-address',
140 help='Address to send HTTP requests.')
141 group.add_argument('--api-port',
142 help='Port to send HTTP requests to.')
143 group.add_argument('--runner-type',
144 help='Type of test to run as.')
145 group.add_argument('--runner-package',
146 help='Package name of test.')
147 group.add_argument('--device-type',
148 choices=constants.VALID_DEVICE_TYPES,
149 help=('Type of device to run on. iOS or android'))
150 group.add_argument('--device-oem', action='append',
151 help='Device OEM to run on.')
152 group.add_argument('--remote-device-file',
153 help=('File with JSON to select remote device. '
154 'Overrides all other flags.'))
155 group.add_argument('--remote-device-timeout', type=int,
156 help='Times to retry finding remote device')
157 group.add_argument('--network-config', type=int,
158 help='Integer that specifies the network environment '
159 'that the tests will be run in.')
161 device_os_group = group.add_mutually_exclusive_group()
162 device_os_group.add_argument('--remote-device-minimum-os',
163 help='Minimum OS on device.')
164 device_os_group.add_argument('--remote-device-os', action='append',
165 help='OS to have on the device.')
167 api_secret_group = group.add_mutually_exclusive_group()
168 api_secret_group.add_argument('--api-secret', default='',
169 help='API secret for remote devices.')
170 api_secret_group.add_argument('--api-secret-file', default='',
171 help='Path to file that contains API secret.')
173 api_key_group = group.add_mutually_exclusive_group()
174 api_key_group.add_argument('--api-key', default='',
175 help='API key for remote devices.')
176 api_key_group.add_argument('--api-key-file', default='',
177 help='Path to file that contains API key.')
180 def AddDeviceOptions(parser):
181 """Adds device options to |parser|."""
182 group = parser.add_argument_group(title='Device Options')
183 group.add_argument('--tool',
184 dest='tool',
185 help=('Run the test under a tool '
186 '(use --tool help to list them)'))
187 group.add_argument('-d', '--device', dest='test_device',
188 help=('Target device for the test suite '
189 'to run on.'))
190 group.add_argument('--blacklist-file', help='Device blacklist file.')
193 def AddGTestOptions(parser):
194 """Adds gtest options to |parser|."""
196 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
197 + gtest_config.EXPERIMENTAL_TEST_SUITES)
199 group = parser.add_argument_group('GTest Options')
200 group.add_argument('-s', '--suite', dest='suite_name',
201 nargs='+', metavar='SUITE_NAME', required=True,
202 help=('Executable name of the test suite to run. '
203 'Available suites include (but are not limited to): '
204 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
205 group.add_argument('--gtest_also_run_disabled_tests',
206 '--gtest-also-run-disabled-tests',
207 dest='run_disabled', action='store_true',
208 help='Also run disabled tests if applicable.')
209 group.add_argument('-a', '--test-arguments', dest='test_arguments',
210 default='',
211 help='Additional arguments to pass to the test.')
212 group.add_argument('-t', dest='timeout', type=int, default=60,
213 help='Timeout to wait for each test '
214 '(default: %(default)s).')
215 group.add_argument('--isolate_file_path',
216 '--isolate-file-path',
217 dest='isolate_file_path',
218 help='.isolate file path to override the default '
219 'path')
220 group.add_argument('--app-data-file', action='append', dest='app_data_files',
221 help='A file path relative to the app data directory '
222 'that should be saved to the host.')
223 group.add_argument('--app-data-file-dir',
224 help='Host directory to which app data files will be'
225 ' saved. Used with --app-data-file.')
226 group.add_argument('--delete-stale-data', dest='delete_stale_data',
227 action='store_true',
228 help='Delete stale test data on the device.')
230 filter_group = group.add_mutually_exclusive_group()
231 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
232 dest='test_filter',
233 help='googletest-style filter string.')
234 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
235 help='Path to file that contains googletest-style '
236 'filter strings. (Lines will be joined with '
237 '":" to create a single filter string.)')
239 AddDeviceOptions(parser)
240 AddCommonOptions(parser)
241 AddRemoteDeviceOptions(parser)
244 def AddLinkerTestOptions(parser):
245 group = parser.add_argument_group('Linker Test Options')
246 group.add_argument('-f', '--gtest-filter', dest='test_filter',
247 help='googletest-style filter string.')
248 AddCommonOptions(parser)
249 AddDeviceOptions(parser)
252 def AddJavaTestOptions(argument_group):
253 """Adds the Java test options to |option_parser|."""
255 argument_group.add_argument(
256 '-f', '--test-filter', dest='test_filter',
257 help=('Test filter (if not fully qualified, will run all matches).'))
258 argument_group.add_argument(
259 '-A', '--annotation', dest='annotation_str',
260 help=('Comma-separated list of annotations. Run only tests with any of '
261 'the given annotations. An annotation can be either a key or a '
262 'key-values pair. A test that has no annotation is considered '
263 '"SmallTest".'))
264 argument_group.add_argument(
265 '-E', '--exclude-annotation', dest='exclude_annotation_str',
266 help=('Comma-separated list of annotations. Exclude tests with these '
267 'annotations.'))
268 argument_group.add_argument(
269 '--screenshot', dest='screenshot_failures', action='store_true',
270 help='Capture screenshots of test failures')
271 argument_group.add_argument(
272 '--save-perf-json', action='store_true',
273 help='Saves the JSON file for each UI Perf test.')
274 argument_group.add_argument(
275 '--official-build', action='store_true', help='Run official build tests.')
276 argument_group.add_argument(
277 '--test_data', '--test-data', action='append', default=[],
278 help=('Each instance defines a directory of test data that should be '
279 'copied to the target(s) before running the tests. The argument '
280 'should be of the form <target>:<source>, <target> is relative to '
281 'the device data directory, and <source> is relative to the '
282 'chromium build directory.'))
283 argument_group.add_argument(
284 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
285 default=True, help='Removes the dalvik.vm.enableassertions property')
289 def ProcessJavaTestOptions(args):
290 """Processes options/arguments and populates |options| with defaults."""
292 # TODO(jbudorick): Handle most of this function in argparse.
293 if args.annotation_str:
294 args.annotations = args.annotation_str.split(',')
295 elif args.test_filter:
296 args.annotations = []
297 else:
298 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
299 'EnormousTest', 'IntegrationTest']
301 if args.exclude_annotation_str:
302 args.exclude_annotations = args.exclude_annotation_str.split(',')
303 else:
304 args.exclude_annotations = []
307 def AddInstrumentationTestOptions(parser):
308 """Adds Instrumentation test options to |parser|."""
310 parser.usage = '%(prog)s [options]'
312 group = parser.add_argument_group('Instrumentation Test Options')
313 AddJavaTestOptions(group)
315 java_or_python_group = group.add_mutually_exclusive_group()
316 java_or_python_group.add_argument(
317 '-j', '--java-only', action='store_false',
318 dest='run_python_tests', default=True, help='Run only the Java tests.')
319 java_or_python_group.add_argument(
320 '-p', '--python-only', action='store_false',
321 dest='run_java_tests', default=True,
322 help='Run only the host-driven tests.')
324 group.add_argument('--host-driven-root',
325 help='Root of the host-driven tests.')
326 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
327 action='store_true',
328 help='Wait for debugger.')
329 group.add_argument('--apk-under-test', dest='apk_under_test',
330 help=('the name of the apk under test.'))
331 group.add_argument('--test-apk', dest='test_apk', required=True,
332 help=('The name of the apk containing the tests '
333 '(without the .apk extension; '
334 'e.g. "ContentShellTest").'))
335 group.add_argument('--coverage-dir',
336 help=('Directory in which to place all generated '
337 'EMMA coverage files.'))
338 group.add_argument('--device-flags', dest='device_flags', default='',
339 help='The relative filepath to a file containing '
340 'command-line flags to set on the device')
341 group.add_argument('--device-flags-file', default='',
342 help='The relative filepath to a file containing '
343 'command-line flags to set on the device')
344 group.add_argument('--isolate_file_path',
345 '--isolate-file-path',
346 dest='isolate_file_path',
347 help='.isolate file path to override the default '
348 'path')
349 group.add_argument('--delete-stale-data', dest='delete_stale_data',
350 action='store_true',
351 help='Delete stale test data on the device.')
353 AddCommonOptions(parser)
354 AddDeviceOptions(parser)
355 AddRemoteDeviceOptions(parser)
358 def ProcessInstrumentationOptions(args):
359 """Processes options/arguments and populate |options| with defaults.
361 Args:
362 args: argparse.Namespace object.
364 Returns:
365 An InstrumentationOptions named tuple which contains all options relevant to
366 instrumentation tests.
369 ProcessJavaTestOptions(args)
371 if not args.host_driven_root:
372 args.run_python_tests = False
374 args.test_apk_path = os.path.join(
375 constants.GetOutDirectory(),
376 constants.SDK_BUILD_APKS_DIR,
377 '%s.apk' % args.test_apk)
378 args.test_apk_jar_path = os.path.join(
379 constants.GetOutDirectory(),
380 constants.SDK_BUILD_TEST_JAVALIB_DIR,
381 '%s.jar' % args.test_apk)
382 args.test_support_apk_path = '%sSupport%s' % (
383 os.path.splitext(args.test_apk_path))
385 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
387 # TODO(jbudorick): Get rid of InstrumentationOptions.
388 return instrumentation_test_options.InstrumentationOptions(
389 args.tool,
390 args.annotations,
391 args.exclude_annotations,
392 args.test_filter,
393 args.test_data,
394 args.save_perf_json,
395 args.screenshot_failures,
396 args.wait_for_debugger,
397 args.coverage_dir,
398 args.test_apk,
399 args.test_apk_path,
400 args.test_apk_jar_path,
401 args.test_runner,
402 args.test_support_apk_path,
403 args.device_flags,
404 args.isolate_file_path,
405 args.set_asserts,
406 args.delete_stale_data
410 def AddUIAutomatorTestOptions(parser):
411 """Adds UI Automator test options to |parser|."""
413 group = parser.add_argument_group('UIAutomator Test Options')
414 AddJavaTestOptions(group)
415 group.add_argument(
416 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
417 metavar='PACKAGE', help='Package under test.')
418 group.add_argument(
419 '--test-jar', dest='test_jar', required=True,
420 help=('The name of the dexed jar containing the tests (without the '
421 '.dex.jar extension). Alternatively, this can be a full path '
422 'to the jar.'))
424 AddCommonOptions(parser)
425 AddDeviceOptions(parser)
428 def ProcessUIAutomatorOptions(args):
429 """Processes UIAutomator options/arguments.
431 Args:
432 args: argparse.Namespace object.
434 Returns:
435 A UIAutomatorOptions named tuple which contains all options relevant to
436 uiautomator tests.
439 ProcessJavaTestOptions(args)
441 if os.path.exists(args.test_jar):
442 # The dexed JAR is fully qualified, assume the info JAR lives along side.
443 args.uiautomator_jar = args.test_jar
444 else:
445 args.uiautomator_jar = os.path.join(
446 constants.GetOutDirectory(),
447 constants.SDK_BUILD_JAVALIB_DIR,
448 '%s.dex.jar' % args.test_jar)
449 args.uiautomator_info_jar = (
450 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
451 '_java.jar')
453 return uiautomator_test_options.UIAutomatorOptions(
454 args.tool,
455 args.annotations,
456 args.exclude_annotations,
457 args.test_filter,
458 args.test_data,
459 args.save_perf_json,
460 args.screenshot_failures,
461 args.uiautomator_jar,
462 args.uiautomator_info_jar,
463 args.package,
464 args.set_asserts)
467 def AddJUnitTestOptions(parser):
468 """Adds junit test options to |parser|."""
470 group = parser.add_argument_group('JUnit Test Options')
471 group.add_argument(
472 '-s', '--test-suite', dest='test_suite', required=True,
473 help=('JUnit test suite to run.'))
474 group.add_argument(
475 '-f', '--test-filter', dest='test_filter',
476 help='Filters tests googletest-style.')
477 group.add_argument(
478 '--package-filter', dest='package_filter',
479 help='Filters tests by package.')
480 group.add_argument(
481 '--runner-filter', dest='runner_filter',
482 help='Filters tests by runner class. Must be fully qualified.')
483 group.add_argument(
484 '--sdk-version', dest='sdk_version', type=int,
485 help='The Android SDK version.')
486 AddCommonOptions(parser)
489 def AddMonkeyTestOptions(parser):
490 """Adds monkey test options to |parser|."""
492 group = parser.add_argument_group('Monkey Test Options')
493 group.add_argument(
494 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
495 metavar='PACKAGE', help='Package under test.')
496 group.add_argument(
497 '--event-count', default=10000, type=int,
498 help='Number of events to generate (default: %(default)s).')
499 group.add_argument(
500 '--category', default='',
501 help='A list of allowed categories.')
502 group.add_argument(
503 '--throttle', default=100, type=int,
504 help='Delay between events (ms) (default: %(default)s). ')
505 group.add_argument(
506 '--seed', type=int,
507 help=('Seed value for pseudo-random generator. Same seed value generates '
508 'the same sequence of events. Seed is randomized by default.'))
509 group.add_argument(
510 '--extra-args', default='',
511 help=('String of other args to pass to the command verbatim.'))
513 AddCommonOptions(parser)
514 AddDeviceOptions(parser)
516 def ProcessMonkeyTestOptions(args):
517 """Processes all monkey test options.
519 Args:
520 args: argparse.Namespace object.
522 Returns:
523 A MonkeyOptions named tuple which contains all options relevant to
524 monkey tests.
526 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
527 category = args.category
528 if category:
529 category = args.category.split(',')
531 # TODO(jbudorick): Get rid of MonkeyOptions.
532 return monkey_test_options.MonkeyOptions(
533 args.verbose_count,
534 args.package,
535 args.event_count,
536 category,
537 args.throttle,
538 args.seed,
539 args.extra_args)
541 def AddUirobotTestOptions(parser):
542 """Adds uirobot test options to |option_parser|."""
543 group = parser.add_argument_group('Uirobot Test Options')
545 group.add_argument('--app-under-test', required=True,
546 help='APK to run tests on.')
547 group.add_argument(
548 '--minutes', default=5, type=int,
549 help='Number of minutes to run uirobot test [default: %(default)s].')
551 AddCommonOptions(parser)
552 AddDeviceOptions(parser)
553 AddRemoteDeviceOptions(parser)
555 def AddPerfTestOptions(parser):
556 """Adds perf test options to |parser|."""
558 group = parser.add_argument_group('Perf Test Options')
560 class SingleStepAction(argparse.Action):
561 def __call__(self, parser, namespace, values, option_string=None):
562 if values and not namespace.single_step:
563 parser.error('single step command provided, '
564 'but --single-step not specified.')
565 elif namespace.single_step and not values:
566 parser.error('--single-step specified, '
567 'but no single step command provided.')
568 setattr(namespace, self.dest, values)
570 step_group = group.add_mutually_exclusive_group(required=True)
571 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
572 # This requires removing "--" from client calls.
573 step_group.add_argument(
574 '--single-step', action='store_true',
575 help='Execute the given command with retries, but only print the result '
576 'for the "most successful" round.')
577 step_group.add_argument(
578 '--steps',
579 help='JSON file containing the list of commands to run.')
580 step_group.add_argument(
581 '--print-step',
582 help='The name of a previously executed perf step to print.')
584 group.add_argument(
585 '--output-json-list',
586 help='Write a simple list of names from --steps into the given file.')
587 group.add_argument(
588 '--collect-chartjson-data',
589 action='store_true',
590 help='Cache the chartjson output from each step for later use.')
591 group.add_argument(
592 '--output-chartjson-data',
593 default='',
594 help='Write out chartjson into the given file.')
595 group.add_argument(
596 '--flaky-steps',
597 help=('A JSON file containing steps that are flaky '
598 'and will have its exit code ignored.'))
599 group.add_argument(
600 '--no-timeout', action='store_true',
601 help=('Do not impose a timeout. Each perf step is responsible for '
602 'implementing the timeout logic.'))
603 group.add_argument(
604 '-f', '--test-filter',
605 help=('Test filter (will match against the names listed in --steps).'))
606 group.add_argument(
607 '--dry-run', action='store_true',
608 help='Just print the steps without executing.')
609 # Uses 0.1 degrees C because that's what Android does.
610 group.add_argument(
611 '--max-battery-temp', type=int,
612 help='Only start tests when the battery is at or below the given '
613 'temperature (0.1 C)')
614 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
615 help='If --single-step is specified, the command to run.')
616 group.add_argument('--min-battery-level', type=int,
617 help='Only starts tests when the battery is charged above '
618 'given level.')
619 AddCommonOptions(parser)
620 AddDeviceOptions(parser)
623 def ProcessPerfTestOptions(args):
624 """Processes all perf test options.
626 Args:
627 args: argparse.Namespace object.
629 Returns:
630 A PerfOptions named tuple which contains all options relevant to
631 perf tests.
633 # TODO(jbudorick): Move single_step handling down into the perf tests.
634 if args.single_step:
635 args.single_step = ' '.join(args.single_step_command)
636 # TODO(jbudorick): Get rid of PerfOptions.
637 return perf_test_options.PerfOptions(
638 args.steps, args.flaky_steps, args.output_json_list,
639 args.print_step, args.no_timeout, args.test_filter,
640 args.dry_run, args.single_step, args.collect_chartjson_data,
641 args.output_chartjson_data, args.max_battery_temp, args.min_battery_level)
644 def AddPythonTestOptions(parser):
645 group = parser.add_argument_group('Python Test Options')
646 group.add_argument(
647 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
648 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
649 help='Name of the test suite to run.')
650 AddCommonOptions(parser)
653 def _RunGTests(args, devices):
654 """Subcommand of RunTestsCommands which runs gtests."""
655 exit_code = 0
656 for suite_name in args.suite_name:
657 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
658 # into the gtest code.
659 gtest_options = gtest_test_options.GTestOptions(
660 args.tool,
661 args.test_filter,
662 args.run_disabled,
663 args.test_arguments,
664 args.timeout,
665 args.isolate_file_path,
666 suite_name,
667 args.app_data_files,
668 args.app_data_file_dir,
669 args.delete_stale_data)
670 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
672 results, test_exit_code = test_dispatcher.RunTests(
673 tests, runner_factory, devices, shard=True, test_timeout=None,
674 num_retries=args.num_retries)
676 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
677 exit_code = test_exit_code
679 report_results.LogFull(
680 results=results,
681 test_type='Unit test',
682 test_package=suite_name,
683 flakiness_server=args.flakiness_dashboard_server)
685 if args.json_results_file:
686 json_results.GenerateJsonResultsFile(results, args.json_results_file)
688 return exit_code
691 def _RunLinkerTests(args, devices):
692 """Subcommand of RunTestsCommands which runs linker tests."""
693 runner_factory, tests = linker_setup.Setup(args, devices)
695 results, exit_code = test_dispatcher.RunTests(
696 tests, runner_factory, devices, shard=True, test_timeout=60,
697 num_retries=args.num_retries)
699 report_results.LogFull(
700 results=results,
701 test_type='Linker test',
702 test_package='ChromiumLinkerTest')
704 if args.json_results_file:
705 json_results.GenerateJsonResultsFile(results, args.json_results_file)
707 return exit_code
710 def _RunInstrumentationTests(args, devices):
711 """Subcommand of RunTestsCommands which runs instrumentation tests."""
712 logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))
714 instrumentation_options = ProcessInstrumentationOptions(args)
716 if len(devices) > 1 and args.wait_for_debugger:
717 logging.warning('Debugger can not be sharded, using first available device')
718 devices = devices[:1]
720 results = base_test_result.TestRunResults()
721 exit_code = 0
723 if args.run_java_tests:
724 runner_factory, tests = instrumentation_setup.Setup(
725 instrumentation_options, devices)
727 test_results, exit_code = test_dispatcher.RunTests(
728 tests, runner_factory, devices, shard=True, test_timeout=None,
729 num_retries=args.num_retries)
731 results.AddTestRunResults(test_results)
733 if args.run_python_tests:
734 runner_factory, tests = host_driven_setup.InstrumentationSetup(
735 args.host_driven_root, args.official_build,
736 instrumentation_options)
738 if tests:
739 test_results, test_exit_code = test_dispatcher.RunTests(
740 tests, runner_factory, devices, shard=True, test_timeout=None,
741 num_retries=args.num_retries)
743 results.AddTestRunResults(test_results)
745 # Only allow exit code escalation
746 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
747 exit_code = test_exit_code
749 if args.device_flags:
750 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
751 args.device_flags)
753 report_results.LogFull(
754 results=results,
755 test_type='Instrumentation',
756 test_package=os.path.basename(args.test_apk),
757 annotation=args.annotations,
758 flakiness_server=args.flakiness_dashboard_server)
760 if args.json_results_file:
761 json_results.GenerateJsonResultsFile(results, args.json_results_file)
763 return exit_code
766 def _RunUIAutomatorTests(args, devices):
767 """Subcommand of RunTestsCommands which runs uiautomator tests."""
768 uiautomator_options = ProcessUIAutomatorOptions(args)
770 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices)
772 results, exit_code = test_dispatcher.RunTests(
773 tests, runner_factory, devices, shard=True, test_timeout=None,
774 num_retries=args.num_retries)
776 report_results.LogFull(
777 results=results,
778 test_type='UIAutomator',
779 test_package=os.path.basename(args.test_jar),
780 annotation=args.annotations,
781 flakiness_server=args.flakiness_dashboard_server)
783 if args.json_results_file:
784 json_results.GenerateJsonResultsFile(results, args.json_results_file)
786 return exit_code
789 def _RunJUnitTests(args):
790 """Subcommand of RunTestsCommand which runs junit tests."""
791 runner_factory, tests = junit_setup.Setup(args)
792 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
794 report_results.LogFull(
795 results=results,
796 test_type='JUnit',
797 test_package=args.test_suite)
799 if args.json_results_file:
800 json_results.GenerateJsonResultsFile(results, args.json_results_file)
802 return exit_code
805 def _RunMonkeyTests(args, devices):
806 """Subcommand of RunTestsCommands which runs monkey tests."""
807 monkey_options = ProcessMonkeyTestOptions(args)
809 runner_factory, tests = monkey_setup.Setup(monkey_options)
811 results, exit_code = test_dispatcher.RunTests(
812 tests, runner_factory, devices, shard=False, test_timeout=None,
813 num_retries=args.num_retries)
815 report_results.LogFull(
816 results=results,
817 test_type='Monkey',
818 test_package='Monkey')
820 if args.json_results_file:
821 json_results.GenerateJsonResultsFile(results, args.json_results_file)
823 return exit_code
826 def _RunPerfTests(args, active_devices):
827 """Subcommand of RunTestsCommands which runs perf tests."""
828 perf_options = ProcessPerfTestOptions(args)
830 # Just save a simple json with a list of test names.
831 if perf_options.output_json_list:
832 return perf_test_runner.OutputJsonList(
833 perf_options.steps, perf_options.output_json_list)
835 # Just print the results from a single previously executed step.
836 if perf_options.print_step:
837 return perf_test_runner.PrintTestOutput(
838 perf_options.print_step, perf_options.output_chartjson_data)
840 runner_factory, tests, devices = perf_setup.Setup(
841 perf_options, active_devices)
843 # shard=False means that each device will get the full list of tests
844 # and then each one will decide their own affinity.
845 # shard=True means each device will pop the next test available from a queue,
846 # which increases throughput but have no affinity.
847 results, _ = test_dispatcher.RunTests(
848 tests, runner_factory, devices, shard=False, test_timeout=None,
849 num_retries=args.num_retries)
851 report_results.LogFull(
852 results=results,
853 test_type='Perf',
854 test_package='Perf')
856 if args.json_results_file:
857 json_results.GenerateJsonResultsFile(results, args.json_results_file)
859 if perf_options.single_step:
860 return perf_test_runner.PrintTestOutput('single_step')
862 perf_test_runner.PrintSummary(tests)
864 # Always return 0 on the sharding stage. Individual tests exit_code
865 # will be returned on the print_step stage.
866 return 0
869 def _RunPythonTests(args):
870 """Subcommand of RunTestsCommand which runs python unit tests."""
871 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
872 suite_path = suite_vars['path']
873 suite_test_modules = suite_vars['test_modules']
875 sys.path = [suite_path] + sys.path
876 try:
877 suite = unittest.TestSuite()
878 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
879 for m in suite_test_modules)
880 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
881 return 0 if runner.run(suite).wasSuccessful() else 1
882 finally:
883 sys.path = sys.path[1:]
886 def _GetAttachedDevices(blacklist_file, test_device):
887 """Get all attached devices.
889 Args:
890 test_device: Name of a specific device to use.
892 Returns:
893 A list of attached devices.
895 blacklist = (device_blacklist.Blacklist(blacklist_file)
896 if blacklist_file
897 else None)
899 attached_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
900 if test_device:
901 test_device = [d for d in attached_devices if d == test_device]
902 if not test_device:
903 raise device_errors.DeviceUnreachableError(
904 'Did not find device %s among attached device. Attached devices: %s'
905 % (test_device, ', '.join(attached_devices)))
906 return test_device
908 else:
909 if not attached_devices:
910 raise device_errors.NoDevicesError()
911 return sorted(attached_devices)
914 def RunTestsCommand(args, parser): # pylint: disable=too-many-return-statements
915 """Checks test type and dispatches to the appropriate function.
917 Args:
918 args: argparse.Namespace object.
919 parser: argparse.ArgumentParser object.
921 Returns:
922 Integer indicated exit code.
924 Raises:
925 Exception: Unknown command name passed in, or an exception from an
926 individual test runner.
928 command = args.command
930 ProcessCommonOptions(args)
932 if args.enable_platform_mode:
933 return RunTestsInPlatformMode(args, parser)
935 if command in constants.LOCAL_MACHINE_TESTS:
936 devices = []
937 else:
938 devices = _GetAttachedDevices(args.blacklist_file, args.test_device)
940 forwarder.Forwarder.RemoveHostLog()
941 if not ports.ResetTestServerPortAllocation():
942 raise Exception('Failed to reset test server port.')
944 if command == 'gtest':
945 return RunTestsInPlatformMode(args, parser)
946 elif command == 'linker':
947 return _RunLinkerTests(args, devices)
948 elif command == 'instrumentation':
949 return _RunInstrumentationTests(args, devices)
950 elif command == 'uiautomator':
951 return _RunUIAutomatorTests(args, devices)
952 elif command == 'junit':
953 return _RunJUnitTests(args)
954 elif command == 'monkey':
955 return _RunMonkeyTests(args, devices)
956 elif command == 'perf':
957 return _RunPerfTests(args, devices)
958 elif command == 'python':
959 return _RunPythonTests(args)
960 else:
961 raise Exception('Unknown test type.')
964 _SUPPORTED_IN_PLATFORM_MODE = [
965 # TODO(jbudorick): Add support for more test types.
966 'gtest',
967 'instrumentation',
968 'uirobot',
972 def RunTestsInPlatformMode(args, parser):
974 def infra_error(message):
975 parser.exit(status=constants.INFRA_EXIT_CODE, message=message)
977 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
978 infra_error('%s is not yet supported in platform mode' % args.command)
980 with environment_factory.CreateEnvironment(args, infra_error) as env:
981 with test_instance_factory.CreateTestInstance(args, infra_error) as test:
982 with test_run_factory.CreateTestRun(
983 args, env, test, infra_error) as test_run:
984 results = test_run.RunTests()
986 if args.environment == 'remote_device' and args.trigger:
987 return 0 # Not returning results, only triggering.
989 report_results.LogFull(
990 results=results,
991 test_type=test.TestType(),
992 test_package=test_run.TestPackage(),
993 annotation=getattr(args, 'annotations', None),
994 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
996 if args.json_results_file:
997 json_results.GenerateJsonResultsFile(
998 results, args.json_results_file)
1000 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
1003 CommandConfigTuple = collections.namedtuple(
1004 'CommandConfigTuple',
1005 ['add_options_func', 'help_txt'])
1006 VALID_COMMANDS = {
1007 'gtest': CommandConfigTuple(
1008 AddGTestOptions,
1009 'googletest-based C++ tests'),
1010 'instrumentation': CommandConfigTuple(
1011 AddInstrumentationTestOptions,
1012 'InstrumentationTestCase-based Java tests'),
1013 'uiautomator': CommandConfigTuple(
1014 AddUIAutomatorTestOptions,
1015 "Tests that run via Android's uiautomator command"),
1016 'junit': CommandConfigTuple(
1017 AddJUnitTestOptions,
1018 'JUnit4-based Java tests'),
1019 'monkey': CommandConfigTuple(
1020 AddMonkeyTestOptions,
1021 "Tests based on Android's monkey"),
1022 'perf': CommandConfigTuple(
1023 AddPerfTestOptions,
1024 'Performance tests'),
1025 'python': CommandConfigTuple(
1026 AddPythonTestOptions,
1027 'Python tests based on unittest.TestCase'),
1028 'linker': CommandConfigTuple(
1029 AddLinkerTestOptions,
1030 'Linker tests'),
1031 'uirobot': CommandConfigTuple(
1032 AddUirobotTestOptions,
1033 'Uirobot test'),
1037 def DumpThreadStacks(_signal, _frame):
1038 for thread in threading.enumerate():
1039 reraiser_thread.LogThreadStack(thread)
1042 def main():
1043 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1045 parser = argparse.ArgumentParser()
1046 command_parsers = parser.add_subparsers(title='test types',
1047 dest='command')
1049 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1050 key=lambda x: x[0]):
1051 subparser = command_parsers.add_parser(
1052 test_type, usage='%(prog)s [options]', help=config.help_txt)
1053 config.add_options_func(subparser)
1055 args = parser.parse_args()
1057 try:
1058 return RunTestsCommand(args, parser)
1059 except base_error.BaseError as e:
1060 logging.exception('Error occurred.')
1061 if e.is_infra_error:
1062 return constants.INFRA_EXIT_CODE
1063 return constants.ERROR_EXIT_CODE
1064 except: # pylint: disable=W0702
1065 logging.exception('Unrecognized error occurred.')
1066 return constants.ERROR_EXIT_CODE
1069 if __name__ == '__main__':
1070 sys.exit(main())