Permission messages: Add a bunch of missing combinations/suppressions.
[chromium-blink-merge.git] / build / android / test_runner.py
blob204c81c0e6bbacd811d5d0af370fac233fbc2745
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from devil import base_error
20 from devil.android import apk_helper
21 from devil.android import device_blacklist
22 from devil.android import device_errors
23 from devil.android import device_utils
24 from devil.android import ports
25 from devil.utils import reraiser_thread
26 from devil.utils import run_tests_helper
28 from pylib import constants
29 from pylib import forwarder
30 from pylib.base import base_test_result
31 from pylib.base import environment_factory
32 from pylib.base import test_dispatcher
33 from pylib.base import test_instance_factory
34 from pylib.base import test_run_factory
35 from pylib.gtest import gtest_config
36 # TODO(jbudorick): Remove this once we stop selectively enabling platform mode.
37 from pylib.gtest import gtest_test_instance
38 from pylib.gtest import setup as gtest_setup
39 from pylib.gtest import test_options as gtest_test_options
40 from pylib.linker import setup as linker_setup
41 from pylib.host_driven import setup as host_driven_setup
42 from pylib.instrumentation import setup as instrumentation_setup
43 from pylib.instrumentation import test_options as instrumentation_test_options
44 from pylib.junit import setup as junit_setup
45 from pylib.junit import test_dispatcher as junit_dispatcher
46 from pylib.monkey import setup as monkey_setup
47 from pylib.monkey import test_options as monkey_test_options
48 from pylib.perf import setup as perf_setup
49 from pylib.perf import test_options as perf_test_options
50 from pylib.perf import test_runner as perf_test_runner
51 from pylib.results import json_results
52 from pylib.results import report_results
53 from pylib.uiautomator import setup as uiautomator_setup
54 from pylib.uiautomator import test_options as uiautomator_test_options
57 def AddCommonOptions(parser):
58 """Adds all common options to |parser|."""
60 group = parser.add_argument_group('Common Options')
62 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
64 debug_or_release_group = group.add_mutually_exclusive_group()
65 debug_or_release_group.add_argument(
66 '--debug', action='store_const', const='Debug', dest='build_type',
67 default=default_build_type,
68 help=('If set, run test suites under out/Debug. '
69 'Default is env var BUILDTYPE or Debug.'))
70 debug_or_release_group.add_argument(
71 '--release', action='store_const', const='Release', dest='build_type',
72 help=('If set, run test suites under out/Release. '
73 'Default is env var BUILDTYPE or Debug.'))
75 group.add_argument('--build-directory', dest='build_directory',
76 help=('Path to the directory in which build files are'
77 ' located (should not include build type)'))
78 group.add_argument('--output-directory', dest='output_directory',
79 help=('Path to the directory in which build files are'
80 ' located (must include build type). This will take'
81 ' precedence over --debug, --release and'
82 ' --build-directory'))
83 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
84 help=('Number of retries for a test before '
85 'giving up (default: %(default)s).'))
86 group.add_argument('-v',
87 '--verbose',
88 dest='verbose_count',
89 default=0,
90 action='count',
91 help='Verbose level (multiple times for more)')
92 group.add_argument('--flakiness-dashboard-server',
93 dest='flakiness_dashboard_server',
94 help=('Address of the server that is hosting the '
95 'Chrome for Android flakiness dashboard.'))
96 group.add_argument('--enable-platform-mode', action='store_true',
97 help=('Run the test scripts in platform mode, which '
98 'conceptually separates the test runner from the '
99 '"device" (local or remote, real or emulated) on '
100 'which the tests are running. [experimental]'))
101 group.add_argument('-e', '--environment', default='local',
102 choices=constants.VALID_ENVIRONMENTS,
103 help='Test environment to run in (default: %(default)s).')
104 group.add_argument('--adb-path',
105 help=('Specify the absolute path of the adb binary that '
106 'should be used.'))
107 group.add_argument('--json-results-file', dest='json_results_file',
108 help='If set, will dump results in JSON form '
109 'to specified file.')
111 def ProcessCommonOptions(args):
112 """Processes and handles all common options."""
113 run_tests_helper.SetLogLevel(args.verbose_count)
114 constants.SetBuildType(args.build_type)
115 if args.build_directory:
116 constants.SetBuildDirectory(args.build_directory)
117 if args.output_directory:
118 constants.SetOutputDirectory(args.output_directory)
119 if args.adb_path:
120 constants.SetAdbPath(args.adb_path)
121 # Some things such as Forwarder require ADB to be in the environment path.
122 adb_dir = os.path.dirname(constants.GetAdbPath())
123 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
124 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
127 def AddRemoteDeviceOptions(parser):
128 group = parser.add_argument_group('Remote Device Options')
130 group.add_argument('--trigger',
131 help=('Only triggers the test if set. Stores test_run_id '
132 'in given file path. '))
133 group.add_argument('--collect',
134 help=('Only collects the test results if set. '
135 'Gets test_run_id from given file path.'))
136 group.add_argument('--remote-device', action='append',
137 help='Device type to run test on.')
138 group.add_argument('--results-path',
139 help='File path to download results to.')
140 group.add_argument('--api-protocol',
141 help='HTTP protocol to use. (http or https)')
142 group.add_argument('--api-address',
143 help='Address to send HTTP requests.')
144 group.add_argument('--api-port',
145 help='Port to send HTTP requests to.')
146 group.add_argument('--runner-type',
147 help='Type of test to run as.')
148 group.add_argument('--runner-package',
149 help='Package name of test.')
150 group.add_argument('--device-type',
151 choices=constants.VALID_DEVICE_TYPES,
152 help=('Type of device to run on. iOS or android'))
153 group.add_argument('--device-oem', action='append',
154 help='Device OEM to run on.')
155 group.add_argument('--remote-device-file',
156 help=('File with JSON to select remote device. '
157 'Overrides all other flags.'))
158 group.add_argument('--remote-device-timeout', type=int,
159 help='Times to retry finding remote device')
160 group.add_argument('--network-config', type=int,
161 help='Integer that specifies the network environment '
162 'that the tests will be run in.')
164 device_os_group = group.add_mutually_exclusive_group()
165 device_os_group.add_argument('--remote-device-minimum-os',
166 help='Minimum OS on device.')
167 device_os_group.add_argument('--remote-device-os', action='append',
168 help='OS to have on the device.')
170 api_secret_group = group.add_mutually_exclusive_group()
171 api_secret_group.add_argument('--api-secret', default='',
172 help='API secret for remote devices.')
173 api_secret_group.add_argument('--api-secret-file', default='',
174 help='Path to file that contains API secret.')
176 api_key_group = group.add_mutually_exclusive_group()
177 api_key_group.add_argument('--api-key', default='',
178 help='API key for remote devices.')
179 api_key_group.add_argument('--api-key-file', default='',
180 help='Path to file that contains API key.')
183 def AddDeviceOptions(parser):
184 """Adds device options to |parser|."""
185 group = parser.add_argument_group(title='Device Options')
186 group.add_argument('--tool',
187 dest='tool',
188 help=('Run the test under a tool '
189 '(use --tool help to list them)'))
190 group.add_argument('-d', '--device', dest='test_device',
191 help=('Target device for the test suite '
192 'to run on.'))
193 group.add_argument('--blacklist-file', help='Device blacklist file.')
196 def AddGTestOptions(parser):
197 """Adds gtest options to |parser|."""
199 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
200 + gtest_config.EXPERIMENTAL_TEST_SUITES)
202 group = parser.add_argument_group('GTest Options')
203 group.add_argument('-s', '--suite', dest='suite_name',
204 nargs='+', metavar='SUITE_NAME', required=True,
205 help=('Executable name of the test suite to run. '
206 'Available suites include (but are not limited to): '
207 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
208 group.add_argument('--gtest_also_run_disabled_tests',
209 '--gtest-also-run-disabled-tests',
210 dest='run_disabled', action='store_true',
211 help='Also run disabled tests if applicable.')
212 group.add_argument('-a', '--test-arguments', dest='test_arguments',
213 default='',
214 help='Additional arguments to pass to the test.')
215 group.add_argument('-t', dest='timeout', type=int, default=60,
216 help='Timeout to wait for each test '
217 '(default: %(default)s).')
218 group.add_argument('--isolate_file_path',
219 '--isolate-file-path',
220 dest='isolate_file_path',
221 help='.isolate file path to override the default '
222 'path')
223 group.add_argument('--app-data-file', action='append', dest='app_data_files',
224 help='A file path relative to the app data directory '
225 'that should be saved to the host.')
226 group.add_argument('--app-data-file-dir',
227 help='Host directory to which app data files will be'
228 ' saved. Used with --app-data-file.')
229 group.add_argument('--delete-stale-data', dest='delete_stale_data',
230 action='store_true',
231 help='Delete stale test data on the device.')
233 filter_group = group.add_mutually_exclusive_group()
234 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
235 dest='test_filter',
236 help='googletest-style filter string.')
237 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
238 help='Path to file that contains googletest-style '
239 'filter strings. (Lines will be joined with '
240 '":" to create a single filter string.)')
242 AddDeviceOptions(parser)
243 AddCommonOptions(parser)
244 AddRemoteDeviceOptions(parser)
247 def AddLinkerTestOptions(parser):
248 group = parser.add_argument_group('Linker Test Options')
249 group.add_argument('-f', '--gtest-filter', dest='test_filter',
250 help='googletest-style filter string.')
251 AddCommonOptions(parser)
252 AddDeviceOptions(parser)
255 def AddJavaTestOptions(argument_group):
256 """Adds the Java test options to |option_parser|."""
258 argument_group.add_argument(
259 '-f', '--test-filter', dest='test_filter',
260 help=('Test filter (if not fully qualified, will run all matches).'))
261 argument_group.add_argument(
262 '-A', '--annotation', dest='annotation_str',
263 help=('Comma-separated list of annotations. Run only tests with any of '
264 'the given annotations. An annotation can be either a key or a '
265 'key-values pair. A test that has no annotation is considered '
266 '"SmallTest".'))
267 argument_group.add_argument(
268 '-E', '--exclude-annotation', dest='exclude_annotation_str',
269 help=('Comma-separated list of annotations. Exclude tests with these '
270 'annotations.'))
271 argument_group.add_argument(
272 '--screenshot', dest='screenshot_failures', action='store_true',
273 help='Capture screenshots of test failures')
274 argument_group.add_argument(
275 '--save-perf-json', action='store_true',
276 help='Saves the JSON file for each UI Perf test.')
277 argument_group.add_argument(
278 '--official-build', action='store_true', help='Run official build tests.')
279 argument_group.add_argument(
280 '--test_data', '--test-data', action='append', default=[],
281 help=('Each instance defines a directory of test data that should be '
282 'copied to the target(s) before running the tests. The argument '
283 'should be of the form <target>:<source>, <target> is relative to '
284 'the device data directory, and <source> is relative to the '
285 'chromium build directory.'))
286 argument_group.add_argument(
287 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
288 default=True, help='Removes the dalvik.vm.enableassertions property')
292 def ProcessJavaTestOptions(args):
293 """Processes options/arguments and populates |options| with defaults."""
295 # TODO(jbudorick): Handle most of this function in argparse.
296 if args.annotation_str:
297 args.annotations = args.annotation_str.split(',')
298 elif args.test_filter:
299 args.annotations = []
300 else:
301 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
302 'EnormousTest', 'IntegrationTest']
304 if args.exclude_annotation_str:
305 args.exclude_annotations = args.exclude_annotation_str.split(',')
306 else:
307 args.exclude_annotations = []
310 def AddInstrumentationTestOptions(parser):
311 """Adds Instrumentation test options to |parser|."""
313 parser.usage = '%(prog)s [options]'
315 group = parser.add_argument_group('Instrumentation Test Options')
316 AddJavaTestOptions(group)
318 java_or_python_group = group.add_mutually_exclusive_group()
319 java_or_python_group.add_argument(
320 '-j', '--java-only', action='store_false',
321 dest='run_python_tests', default=True, help='Run only the Java tests.')
322 java_or_python_group.add_argument(
323 '-p', '--python-only', action='store_false',
324 dest='run_java_tests', default=True,
325 help='Run only the host-driven tests.')
327 group.add_argument('--host-driven-root',
328 help='Root of the host-driven tests.')
329 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
330 action='store_true',
331 help='Wait for debugger.')
332 group.add_argument('--apk-under-test', dest='apk_under_test',
333 help=('the name of the apk under test.'))
334 group.add_argument('--test-apk', dest='test_apk', required=True,
335 help=('The name of the apk containing the tests '
336 '(without the .apk extension; '
337 'e.g. "ContentShellTest").'))
338 group.add_argument('--coverage-dir',
339 help=('Directory in which to place all generated '
340 'EMMA coverage files.'))
341 group.add_argument('--device-flags', dest='device_flags', default='',
342 help='The relative filepath to a file containing '
343 'command-line flags to set on the device')
344 group.add_argument('--device-flags-file', default='',
345 help='The relative filepath to a file containing '
346 'command-line flags to set on the device')
347 group.add_argument('--isolate_file_path',
348 '--isolate-file-path',
349 dest='isolate_file_path',
350 help='.isolate file path to override the default '
351 'path')
352 group.add_argument('--delete-stale-data', dest='delete_stale_data',
353 action='store_true',
354 help='Delete stale test data on the device.')
356 AddCommonOptions(parser)
357 AddDeviceOptions(parser)
358 AddRemoteDeviceOptions(parser)
361 def ProcessInstrumentationOptions(args):
362 """Processes options/arguments and populate |options| with defaults.
364 Args:
365 args: argparse.Namespace object.
367 Returns:
368 An InstrumentationOptions named tuple which contains all options relevant to
369 instrumentation tests.
372 ProcessJavaTestOptions(args)
374 if not args.host_driven_root:
375 args.run_python_tests = False
377 args.test_apk_path = os.path.join(
378 constants.GetOutDirectory(),
379 constants.SDK_BUILD_APKS_DIR,
380 '%s.apk' % args.test_apk)
381 args.test_apk_jar_path = os.path.join(
382 constants.GetOutDirectory(),
383 constants.SDK_BUILD_TEST_JAVALIB_DIR,
384 '%s.jar' % args.test_apk)
385 args.test_support_apk_path = '%sSupport%s' % (
386 os.path.splitext(args.test_apk_path))
388 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
390 # TODO(jbudorick): Get rid of InstrumentationOptions.
391 return instrumentation_test_options.InstrumentationOptions(
392 args.tool,
393 args.annotations,
394 args.exclude_annotations,
395 args.test_filter,
396 args.test_data,
397 args.save_perf_json,
398 args.screenshot_failures,
399 args.wait_for_debugger,
400 args.coverage_dir,
401 args.test_apk,
402 args.test_apk_path,
403 args.test_apk_jar_path,
404 args.test_runner,
405 args.test_support_apk_path,
406 args.device_flags,
407 args.isolate_file_path,
408 args.set_asserts,
409 args.delete_stale_data
413 def AddUIAutomatorTestOptions(parser):
414 """Adds UI Automator test options to |parser|."""
416 group = parser.add_argument_group('UIAutomator Test Options')
417 AddJavaTestOptions(group)
418 group.add_argument(
419 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
420 metavar='PACKAGE', help='Package under test.')
421 group.add_argument(
422 '--test-jar', dest='test_jar', required=True,
423 help=('The name of the dexed jar containing the tests (without the '
424 '.dex.jar extension). Alternatively, this can be a full path '
425 'to the jar.'))
427 AddCommonOptions(parser)
428 AddDeviceOptions(parser)
431 def ProcessUIAutomatorOptions(args):
432 """Processes UIAutomator options/arguments.
434 Args:
435 args: argparse.Namespace object.
437 Returns:
438 A UIAutomatorOptions named tuple which contains all options relevant to
439 uiautomator tests.
442 ProcessJavaTestOptions(args)
444 if os.path.exists(args.test_jar):
445 # The dexed JAR is fully qualified, assume the info JAR lives along side.
446 args.uiautomator_jar = args.test_jar
447 else:
448 args.uiautomator_jar = os.path.join(
449 constants.GetOutDirectory(),
450 constants.SDK_BUILD_JAVALIB_DIR,
451 '%s.dex.jar' % args.test_jar)
452 args.uiautomator_info_jar = (
453 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
454 '_java.jar')
456 return uiautomator_test_options.UIAutomatorOptions(
457 args.tool,
458 args.annotations,
459 args.exclude_annotations,
460 args.test_filter,
461 args.test_data,
462 args.save_perf_json,
463 args.screenshot_failures,
464 args.uiautomator_jar,
465 args.uiautomator_info_jar,
466 args.package,
467 args.set_asserts)
470 def AddJUnitTestOptions(parser):
471 """Adds junit test options to |parser|."""
473 group = parser.add_argument_group('JUnit Test Options')
474 group.add_argument(
475 '-s', '--test-suite', dest='test_suite', required=True,
476 help=('JUnit test suite to run.'))
477 group.add_argument(
478 '-f', '--test-filter', dest='test_filter',
479 help='Filters tests googletest-style.')
480 group.add_argument(
481 '--package-filter', dest='package_filter',
482 help='Filters tests by package.')
483 group.add_argument(
484 '--runner-filter', dest='runner_filter',
485 help='Filters tests by runner class. Must be fully qualified.')
486 group.add_argument(
487 '--sdk-version', dest='sdk_version', type=int,
488 help='The Android SDK version.')
489 AddCommonOptions(parser)
492 def AddMonkeyTestOptions(parser):
493 """Adds monkey test options to |parser|."""
495 group = parser.add_argument_group('Monkey Test Options')
496 group.add_argument(
497 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
498 metavar='PACKAGE', help='Package under test.')
499 group.add_argument(
500 '--event-count', default=10000, type=int,
501 help='Number of events to generate (default: %(default)s).')
502 group.add_argument(
503 '--category', default='',
504 help='A list of allowed categories.')
505 group.add_argument(
506 '--throttle', default=100, type=int,
507 help='Delay between events (ms) (default: %(default)s). ')
508 group.add_argument(
509 '--seed', type=int,
510 help=('Seed value for pseudo-random generator. Same seed value generates '
511 'the same sequence of events. Seed is randomized by default.'))
512 group.add_argument(
513 '--extra-args', default='',
514 help=('String of other args to pass to the command verbatim.'))
516 AddCommonOptions(parser)
517 AddDeviceOptions(parser)
519 def ProcessMonkeyTestOptions(args):
520 """Processes all monkey test options.
522 Args:
523 args: argparse.Namespace object.
525 Returns:
526 A MonkeyOptions named tuple which contains all options relevant to
527 monkey tests.
529 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
530 category = args.category
531 if category:
532 category = args.category.split(',')
534 # TODO(jbudorick): Get rid of MonkeyOptions.
535 return monkey_test_options.MonkeyOptions(
536 args.verbose_count,
537 args.package,
538 args.event_count,
539 category,
540 args.throttle,
541 args.seed,
542 args.extra_args)
544 def AddUirobotTestOptions(parser):
545 """Adds uirobot test options to |option_parser|."""
546 group = parser.add_argument_group('Uirobot Test Options')
548 group.add_argument('--app-under-test', required=True,
549 help='APK to run tests on.')
550 group.add_argument(
551 '--minutes', default=5, type=int,
552 help='Number of minutes to run uirobot test [default: %(default)s].')
554 AddCommonOptions(parser)
555 AddDeviceOptions(parser)
556 AddRemoteDeviceOptions(parser)
558 def AddPerfTestOptions(parser):
559 """Adds perf test options to |parser|."""
561 group = parser.add_argument_group('Perf Test Options')
563 class SingleStepAction(argparse.Action):
564 def __call__(self, parser, namespace, values, option_string=None):
565 if values and not namespace.single_step:
566 parser.error('single step command provided, '
567 'but --single-step not specified.')
568 elif namespace.single_step and not values:
569 parser.error('--single-step specified, '
570 'but no single step command provided.')
571 setattr(namespace, self.dest, values)
573 step_group = group.add_mutually_exclusive_group(required=True)
574 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
575 # This requires removing "--" from client calls.
576 step_group.add_argument(
577 '--single-step', action='store_true',
578 help='Execute the given command with retries, but only print the result '
579 'for the "most successful" round.')
580 step_group.add_argument(
581 '--steps',
582 help='JSON file containing the list of commands to run.')
583 step_group.add_argument(
584 '--print-step',
585 help='The name of a previously executed perf step to print.')
587 group.add_argument(
588 '--output-json-list',
589 help='Write a simple list of names from --steps into the given file.')
590 group.add_argument(
591 '--collect-chartjson-data',
592 action='store_true',
593 help='Cache the chartjson output from each step for later use.')
594 group.add_argument(
595 '--output-chartjson-data',
596 default='',
597 help='Write out chartjson into the given file.')
598 group.add_argument(
599 '--flaky-steps',
600 help=('A JSON file containing steps that are flaky '
601 'and will have its exit code ignored.'))
602 group.add_argument(
603 '--no-timeout', action='store_true',
604 help=('Do not impose a timeout. Each perf step is responsible for '
605 'implementing the timeout logic.'))
606 group.add_argument(
607 '-f', '--test-filter',
608 help=('Test filter (will match against the names listed in --steps).'))
609 group.add_argument(
610 '--dry-run', action='store_true',
611 help='Just print the steps without executing.')
612 # Uses 0.1 degrees C because that's what Android does.
613 group.add_argument(
614 '--max-battery-temp', type=int,
615 help='Only start tests when the battery is at or below the given '
616 'temperature (0.1 C)')
617 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
618 help='If --single-step is specified, the command to run.')
619 group.add_argument('--min-battery-level', type=int,
620 help='Only starts tests when the battery is charged above '
621 'given level.')
622 AddCommonOptions(parser)
623 AddDeviceOptions(parser)
626 def ProcessPerfTestOptions(args):
627 """Processes all perf test options.
629 Args:
630 args: argparse.Namespace object.
632 Returns:
633 A PerfOptions named tuple which contains all options relevant to
634 perf tests.
636 # TODO(jbudorick): Move single_step handling down into the perf tests.
637 if args.single_step:
638 args.single_step = ' '.join(args.single_step_command)
639 # TODO(jbudorick): Get rid of PerfOptions.
640 return perf_test_options.PerfOptions(
641 args.steps, args.flaky_steps, args.output_json_list,
642 args.print_step, args.no_timeout, args.test_filter,
643 args.dry_run, args.single_step, args.collect_chartjson_data,
644 args.output_chartjson_data, args.max_battery_temp, args.min_battery_level)
647 def AddPythonTestOptions(parser):
648 group = parser.add_argument_group('Python Test Options')
649 group.add_argument(
650 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
651 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
652 help='Name of the test suite to run.')
653 AddCommonOptions(parser)
656 def _RunGTests(args, devices):
657 """Subcommand of RunTestsCommands which runs gtests."""
658 exit_code = 0
659 for suite_name in args.suite_name:
660 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
661 # into the gtest code.
662 gtest_options = gtest_test_options.GTestOptions(
663 args.tool,
664 args.test_filter,
665 args.run_disabled,
666 args.test_arguments,
667 args.timeout,
668 args.isolate_file_path,
669 suite_name,
670 args.app_data_files,
671 args.app_data_file_dir,
672 args.delete_stale_data)
673 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
675 results, test_exit_code = test_dispatcher.RunTests(
676 tests, runner_factory, devices, shard=True, test_timeout=None,
677 num_retries=args.num_retries)
679 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
680 exit_code = test_exit_code
682 report_results.LogFull(
683 results=results,
684 test_type='Unit test',
685 test_package=suite_name,
686 flakiness_server=args.flakiness_dashboard_server)
688 if args.json_results_file:
689 json_results.GenerateJsonResultsFile(results, args.json_results_file)
691 return exit_code
694 def _RunLinkerTests(args, devices):
695 """Subcommand of RunTestsCommands which runs linker tests."""
696 runner_factory, tests = linker_setup.Setup(args, devices)
698 results, exit_code = test_dispatcher.RunTests(
699 tests, runner_factory, devices, shard=True, test_timeout=60,
700 num_retries=args.num_retries)
702 report_results.LogFull(
703 results=results,
704 test_type='Linker test',
705 test_package='ChromiumLinkerTest')
707 if args.json_results_file:
708 json_results.GenerateJsonResultsFile(results, args.json_results_file)
710 return exit_code
713 def _RunInstrumentationTests(args, devices):
714 """Subcommand of RunTestsCommands which runs instrumentation tests."""
715 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
717 instrumentation_options = ProcessInstrumentationOptions(args)
719 if len(devices) > 1 and args.wait_for_debugger:
720 logging.warning('Debugger can not be sharded, using first available device')
721 devices = devices[:1]
723 results = base_test_result.TestRunResults()
724 exit_code = 0
726 if args.run_java_tests:
727 runner_factory, tests = instrumentation_setup.Setup(
728 instrumentation_options, devices)
730 test_results, exit_code = test_dispatcher.RunTests(
731 tests, runner_factory, devices, shard=True, test_timeout=None,
732 num_retries=args.num_retries)
734 results.AddTestRunResults(test_results)
736 if args.run_python_tests:
737 runner_factory, tests = host_driven_setup.InstrumentationSetup(
738 args.host_driven_root, args.official_build,
739 instrumentation_options)
741 if tests:
742 test_results, test_exit_code = test_dispatcher.RunTests(
743 tests, runner_factory, devices, shard=True, test_timeout=None,
744 num_retries=args.num_retries)
746 results.AddTestRunResults(test_results)
748 # Only allow exit code escalation
749 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
750 exit_code = test_exit_code
752 if args.device_flags:
753 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
754 args.device_flags)
756 report_results.LogFull(
757 results=results,
758 test_type='Instrumentation',
759 test_package=os.path.basename(args.test_apk),
760 annotation=args.annotations,
761 flakiness_server=args.flakiness_dashboard_server)
763 if args.json_results_file:
764 json_results.GenerateJsonResultsFile(results, args.json_results_file)
766 return exit_code
769 def _RunUIAutomatorTests(args, devices):
770 """Subcommand of RunTestsCommands which runs uiautomator tests."""
771 uiautomator_options = ProcessUIAutomatorOptions(args)
773 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices)
775 results, exit_code = test_dispatcher.RunTests(
776 tests, runner_factory, devices, shard=True, test_timeout=None,
777 num_retries=args.num_retries)
779 report_results.LogFull(
780 results=results,
781 test_type='UIAutomator',
782 test_package=os.path.basename(args.test_jar),
783 annotation=args.annotations,
784 flakiness_server=args.flakiness_dashboard_server)
786 if args.json_results_file:
787 json_results.GenerateJsonResultsFile(results, args.json_results_file)
789 return exit_code
792 def _RunJUnitTests(args):
793 """Subcommand of RunTestsCommand which runs junit tests."""
794 runner_factory, tests = junit_setup.Setup(args)
795 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
797 report_results.LogFull(
798 results=results,
799 test_type='JUnit',
800 test_package=args.test_suite)
802 if args.json_results_file:
803 json_results.GenerateJsonResultsFile(results, args.json_results_file)
805 return exit_code
808 def _RunMonkeyTests(args, devices):
809 """Subcommand of RunTestsCommands which runs monkey tests."""
810 monkey_options = ProcessMonkeyTestOptions(args)
812 runner_factory, tests = monkey_setup.Setup(monkey_options)
814 results, exit_code = test_dispatcher.RunTests(
815 tests, runner_factory, devices, shard=False, test_timeout=None,
816 num_retries=args.num_retries)
818 report_results.LogFull(
819 results=results,
820 test_type='Monkey',
821 test_package='Monkey')
823 if args.json_results_file:
824 json_results.GenerateJsonResultsFile(results, args.json_results_file)
826 return exit_code
829 def _RunPerfTests(args, active_devices):
830 """Subcommand of RunTestsCommands which runs perf tests."""
831 perf_options = ProcessPerfTestOptions(args)
833 # Just save a simple json with a list of test names.
834 if perf_options.output_json_list:
835 return perf_test_runner.OutputJsonList(
836 perf_options.steps, perf_options.output_json_list)
838 # Just print the results from a single previously executed step.
839 if perf_options.print_step:
840 return perf_test_runner.PrintTestOutput(
841 perf_options.print_step, perf_options.output_chartjson_data)
843 runner_factory, tests, devices = perf_setup.Setup(
844 perf_options, active_devices)
846 # shard=False means that each device will get the full list of tests
847 # and then each one will decide their own affinity.
848 # shard=True means each device will pop the next test available from a queue,
849 # which increases throughput but have no affinity.
850 results, _ = test_dispatcher.RunTests(
851 tests, runner_factory, devices, shard=False, test_timeout=None,
852 num_retries=args.num_retries)
854 report_results.LogFull(
855 results=results,
856 test_type='Perf',
857 test_package='Perf')
859 if args.json_results_file:
860 json_results.GenerateJsonResultsFile(results, args.json_results_file)
862 if perf_options.single_step:
863 return perf_test_runner.PrintTestOutput('single_step')
865 perf_test_runner.PrintSummary(tests)
867 # Always return 0 on the sharding stage. Individual tests exit_code
868 # will be returned on the print_step stage.
869 return 0
872 def _RunPythonTests(args):
873 """Subcommand of RunTestsCommand which runs python unit tests."""
874 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
875 suite_path = suite_vars['path']
876 suite_test_modules = suite_vars['test_modules']
878 sys.path = [suite_path] + sys.path
879 try:
880 suite = unittest.TestSuite()
881 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
882 for m in suite_test_modules)
883 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
884 return 0 if runner.run(suite).wasSuccessful() else 1
885 finally:
886 sys.path = sys.path[1:]
889 def _GetAttachedDevices(blacklist_file, test_device):
890 """Get all attached devices.
892 Args:
893 test_device: Name of a specific device to use.
895 Returns:
896 A list of attached devices.
898 if not blacklist_file:
899 # TODO(jbudorick): Remove this once bots pass the blacklist file.
900 blacklist_file = device_blacklist.BLACKLIST_JSON
901 logging.warning('Using default device blacklist %s',
902 device_blacklist.BLACKLIST_JSON)
904 blacklist = device_blacklist.Blacklist(blacklist_file)
905 attached_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
906 if test_device:
907 test_device = [d for d in attached_devices if d == test_device]
908 if not test_device:
909 raise device_errors.DeviceUnreachableError(
910 'Did not find device %s among attached device. Attached devices: %s'
911 % (test_device, ', '.join(attached_devices)))
912 return test_device
914 else:
915 if not attached_devices:
916 raise device_errors.NoDevicesError()
917 return sorted(attached_devices)
920 def RunTestsCommand(args, parser):
921 """Checks test type and dispatches to the appropriate function.
923 Args:
924 args: argparse.Namespace object.
925 parser: argparse.ArgumentParser object.
927 Returns:
928 Integer indicated exit code.
930 Raises:
931 Exception: Unknown command name passed in, or an exception from an
932 individual test runner.
934 command = args.command
936 ProcessCommonOptions(args)
938 if args.enable_platform_mode:
939 return RunTestsInPlatformMode(args, parser)
941 if command in constants.LOCAL_MACHINE_TESTS:
942 devices = []
943 else:
944 devices = _GetAttachedDevices(args.blacklist_file, args.test_device)
946 forwarder.Forwarder.RemoveHostLog()
947 if not ports.ResetTestServerPortAllocation():
948 raise Exception('Failed to reset test server port.')
950 if command == 'gtest':
951 if args.suite_name[0] in gtest_test_instance.BROWSER_TEST_SUITES:
952 return RunTestsInPlatformMode(args, parser)
953 return _RunGTests(args, devices)
954 elif command == 'linker':
955 return _RunLinkerTests(args, devices)
956 elif command == 'instrumentation':
957 return _RunInstrumentationTests(args, devices)
958 elif command == 'uiautomator':
959 return _RunUIAutomatorTests(args, devices)
960 elif command == 'junit':
961 return _RunJUnitTests(args)
962 elif command == 'monkey':
963 return _RunMonkeyTests(args, devices)
964 elif command == 'perf':
965 return _RunPerfTests(args, devices)
966 elif command == 'python':
967 return _RunPythonTests(args)
968 else:
969 raise Exception('Unknown test type.')
972 _SUPPORTED_IN_PLATFORM_MODE = [
973 # TODO(jbudorick): Add support for more test types.
974 'gtest',
975 'instrumentation',
976 'uirobot',
980 def RunTestsInPlatformMode(args, parser):
982 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
983 parser.error('%s is not yet supported in platform mode' % args.command)
985 with environment_factory.CreateEnvironment(args, parser.error) as env:
986 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
987 with test_run_factory.CreateTestRun(
988 args, env, test, parser.error) as test_run:
989 results = test_run.RunTests()
991 if args.environment == 'remote_device' and args.trigger:
992 return 0 # Not returning results, only triggering.
994 report_results.LogFull(
995 results=results,
996 test_type=test.TestType(),
997 test_package=test_run.TestPackage(),
998 annotation=getattr(args, 'annotations', None),
999 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
1001 if args.json_results_file:
1002 json_results.GenerateJsonResultsFile(
1003 results, args.json_results_file)
1005 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
1008 CommandConfigTuple = collections.namedtuple(
1009 'CommandConfigTuple',
1010 ['add_options_func', 'help_txt'])
1011 VALID_COMMANDS = {
1012 'gtest': CommandConfigTuple(
1013 AddGTestOptions,
1014 'googletest-based C++ tests'),
1015 'instrumentation': CommandConfigTuple(
1016 AddInstrumentationTestOptions,
1017 'InstrumentationTestCase-based Java tests'),
1018 'uiautomator': CommandConfigTuple(
1019 AddUIAutomatorTestOptions,
1020 "Tests that run via Android's uiautomator command"),
1021 'junit': CommandConfigTuple(
1022 AddJUnitTestOptions,
1023 'JUnit4-based Java tests'),
1024 'monkey': CommandConfigTuple(
1025 AddMonkeyTestOptions,
1026 "Tests based on Android's monkey"),
1027 'perf': CommandConfigTuple(
1028 AddPerfTestOptions,
1029 'Performance tests'),
1030 'python': CommandConfigTuple(
1031 AddPythonTestOptions,
1032 'Python tests based on unittest.TestCase'),
1033 'linker': CommandConfigTuple(
1034 AddLinkerTestOptions,
1035 'Linker tests'),
1036 'uirobot': CommandConfigTuple(
1037 AddUirobotTestOptions,
1038 'Uirobot test'),
1042 def DumpThreadStacks(_signal, _frame):
1043 for thread in threading.enumerate():
1044 reraiser_thread.LogThreadStack(thread)
1047 def main():
1048 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1050 parser = argparse.ArgumentParser()
1051 command_parsers = parser.add_subparsers(title='test types',
1052 dest='command')
1054 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1055 key=lambda x: x[0]):
1056 subparser = command_parsers.add_parser(
1057 test_type, usage='%(prog)s [options]', help=config.help_txt)
1058 config.add_options_func(subparser)
1060 args = parser.parse_args()
1062 try:
1063 return RunTestsCommand(args, parser)
1064 except base_error.BaseError as e:
1065 logging.exception('Error occurred.')
1066 if e.is_infra_error:
1067 return constants.INFRA_EXIT_CODE
1068 return constants.ERROR_EXIT_CODE
1069 except: # pylint: disable=W0702
1070 logging.exception('Unrecognized error occurred.')
1071 return constants.ERROR_EXIT_CODE
1074 if __name__ == '__main__':
1075 sys.exit(main())