Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / build / android / test_runner.py
blob7b49c9e69a1f2c181d268393487c3e0aeebdecb4
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from pylib import constants
20 from pylib import forwarder
21 from pylib import ports
22 from pylib.base import base_test_result
23 from pylib.base import environment_factory
24 from pylib.base import test_dispatcher
25 from pylib.base import test_instance_factory
26 from pylib.base import test_run_factory
27 from pylib.device import device_errors
28 from pylib.device import device_utils
29 from pylib.gtest import gtest_config
30 # TODO(jbudorick): Remove this once we stop selectively enabling platform mode.
31 from pylib.gtest import gtest_test_instance
32 from pylib.gtest import setup as gtest_setup
33 from pylib.gtest import test_options as gtest_test_options
34 from pylib.linker import setup as linker_setup
35 from pylib.host_driven import setup as host_driven_setup
36 from pylib.instrumentation import setup as instrumentation_setup
37 from pylib.instrumentation import test_options as instrumentation_test_options
38 from pylib.junit import setup as junit_setup
39 from pylib.junit import test_dispatcher as junit_dispatcher
40 from pylib.monkey import setup as monkey_setup
41 from pylib.monkey import test_options as monkey_test_options
42 from pylib.perf import setup as perf_setup
43 from pylib.perf import test_options as perf_test_options
44 from pylib.perf import test_runner as perf_test_runner
45 from pylib.results import json_results
46 from pylib.results import report_results
47 from pylib.uiautomator import setup as uiautomator_setup
48 from pylib.uiautomator import test_options as uiautomator_test_options
49 from pylib.utils import apk_helper
50 from pylib.utils import base_error
51 from pylib.utils import reraiser_thread
52 from pylib.utils import run_tests_helper
55 def AddCommonOptions(parser):
56 """Adds all common options to |parser|."""
58 group = parser.add_argument_group('Common Options')
60 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
62 debug_or_release_group = group.add_mutually_exclusive_group()
63 debug_or_release_group.add_argument(
64 '--debug', action='store_const', const='Debug', dest='build_type',
65 default=default_build_type,
66 help=('If set, run test suites under out/Debug. '
67 'Default is env var BUILDTYPE or Debug.'))
68 debug_or_release_group.add_argument(
69 '--release', action='store_const', const='Release', dest='build_type',
70 help=('If set, run test suites under out/Release. '
71 'Default is env var BUILDTYPE or Debug.'))
73 group.add_argument('--build-directory', dest='build_directory',
74 help=('Path to the directory in which build files are'
75 ' located (should not include build type)'))
76 group.add_argument('--output-directory', dest='output_directory',
77 help=('Path to the directory in which build files are'
78 ' located (must include build type). This will take'
79 ' precedence over --debug, --release and'
80 ' --build-directory'))
81 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
82 help=('Number of retries for a test before '
83 'giving up (default: %(default)s).'))
84 group.add_argument('-v',
85 '--verbose',
86 dest='verbose_count',
87 default=0,
88 action='count',
89 help='Verbose level (multiple times for more)')
90 group.add_argument('--flakiness-dashboard-server',
91 dest='flakiness_dashboard_server',
92 help=('Address of the server that is hosting the '
93 'Chrome for Android flakiness dashboard.'))
94 group.add_argument('--enable-platform-mode', action='store_true',
95 help=('Run the test scripts in platform mode, which '
96 'conceptually separates the test runner from the '
97 '"device" (local or remote, real or emulated) on '
98 'which the tests are running. [experimental]'))
99 group.add_argument('-e', '--environment', default='local',
100 choices=constants.VALID_ENVIRONMENTS,
101 help='Test environment to run in (default: %(default)s).')
102 group.add_argument('--adb-path',
103 help=('Specify the absolute path of the adb binary that '
104 'should be used.'))
105 group.add_argument('--json-results-file', dest='json_results_file',
106 help='If set, will dump results in JSON form '
107 'to specified file.')
109 def ProcessCommonOptions(args):
110 """Processes and handles all common options."""
111 run_tests_helper.SetLogLevel(args.verbose_count)
112 constants.SetBuildType(args.build_type)
113 if args.build_directory:
114 constants.SetBuildDirectory(args.build_directory)
115 if args.output_directory:
116 constants.SetOutputDirectory(args.output_directory)
117 if args.adb_path:
118 constants.SetAdbPath(args.adb_path)
119 # Some things such as Forwarder require ADB to be in the environment path.
120 adb_dir = os.path.dirname(constants.GetAdbPath())
121 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
122 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
125 def AddRemoteDeviceOptions(parser):
126 group = parser.add_argument_group('Remote Device Options')
128 group.add_argument('--trigger',
129 help=('Only triggers the test if set. Stores test_run_id '
130 'in given file path. '))
131 group.add_argument('--collect',
132 help=('Only collects the test results if set. '
133 'Gets test_run_id from given file path.'))
134 group.add_argument('--remote-device', action='append',
135 help='Device type to run test on.')
136 group.add_argument('--results-path',
137 help='File path to download results to.')
138 group.add_argument('--api-protocol',
139 help='HTTP protocol to use. (http or https)')
140 group.add_argument('--api-address',
141 help='Address to send HTTP requests.')
142 group.add_argument('--api-port',
143 help='Port to send HTTP requests to.')
144 group.add_argument('--runner-type',
145 help='Type of test to run as.')
146 group.add_argument('--runner-package',
147 help='Package name of test.')
148 group.add_argument('--device-type',
149 choices=constants.VALID_DEVICE_TYPES,
150 help=('Type of device to run on. iOS or android'))
151 group.add_argument('--device-oem', action='append',
152 help='Device OEM to run on.')
153 group.add_argument('--remote-device-file',
154 help=('File with JSON to select remote device. '
155 'Overrides all other flags.'))
156 group.add_argument('--remote-device-timeout', type=int,
157 help='Times to retry finding remote device')
158 group.add_argument('--network-config', type=int,
159 help='Integer that specifies the network environment '
160 'that the tests will be run in.')
162 device_os_group = group.add_mutually_exclusive_group()
163 device_os_group.add_argument('--remote-device-minimum-os',
164 help='Minimum OS on device.')
165 device_os_group.add_argument('--remote-device-os', action='append',
166 help='OS to have on the device.')
168 api_secret_group = group.add_mutually_exclusive_group()
169 api_secret_group.add_argument('--api-secret', default='',
170 help='API secret for remote devices.')
171 api_secret_group.add_argument('--api-secret-file', default='',
172 help='Path to file that contains API secret.')
174 api_key_group = group.add_mutually_exclusive_group()
175 api_key_group.add_argument('--api-key', default='',
176 help='API key for remote devices.')
177 api_key_group.add_argument('--api-key-file', default='',
178 help='Path to file that contains API key.')
181 def AddDeviceOptions(parser):
182 """Adds device options to |parser|."""
183 group = parser.add_argument_group(title='Device Options')
184 group.add_argument('--tool',
185 dest='tool',
186 help=('Run the test under a tool '
187 '(use --tool help to list them)'))
188 group.add_argument('-d', '--device', dest='test_device',
189 help=('Target device for the test suite '
190 'to run on.'))
193 def AddGTestOptions(parser):
194 """Adds gtest options to |parser|."""
196 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
197 + gtest_config.EXPERIMENTAL_TEST_SUITES)
199 group = parser.add_argument_group('GTest Options')
200 group.add_argument('-s', '--suite', dest='suite_name',
201 nargs='+', metavar='SUITE_NAME', required=True,
202 help=('Executable name of the test suite to run. '
203 'Available suites include (but are not limited to): '
204 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
205 group.add_argument('--gtest_also_run_disabled_tests',
206 '--gtest-also-run-disabled-tests',
207 dest='run_disabled', action='store_true',
208 help='Also run disabled tests if applicable.')
209 group.add_argument('-a', '--test-arguments', dest='test_arguments',
210 default='',
211 help='Additional arguments to pass to the test.')
212 group.add_argument('-t', dest='timeout', type=int, default=60,
213 help='Timeout to wait for each test '
214 '(default: %(default)s).')
215 group.add_argument('--isolate_file_path',
216 '--isolate-file-path',
217 dest='isolate_file_path',
218 help='.isolate file path to override the default '
219 'path')
220 group.add_argument('--app-data-file', action='append', dest='app_data_files',
221 help='A file path relative to the app data directory '
222 'that should be saved to the host.')
223 group.add_argument('--app-data-file-dir',
224 help='Host directory to which app data files will be'
225 ' saved. Used with --app-data-file.')
226 group.add_argument('--delete-stale-data', dest='delete_stale_data',
227 action='store_true',
228 help='Delete stale test data on the device.')
230 filter_group = group.add_mutually_exclusive_group()
231 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
232 dest='test_filter',
233 help='googletest-style filter string.')
234 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
235 help='Path to file that contains googletest-style '
236 'filter strings. (Lines will be joined with '
237 '":" to create a single filter string.)')
239 AddDeviceOptions(parser)
240 AddCommonOptions(parser)
241 AddRemoteDeviceOptions(parser)
244 def AddLinkerTestOptions(parser):
245 group = parser.add_argument_group('Linker Test Options')
246 group.add_argument('-f', '--gtest-filter', dest='test_filter',
247 help='googletest-style filter string.')
248 AddCommonOptions(parser)
249 AddDeviceOptions(parser)
252 def AddJavaTestOptions(argument_group):
253 """Adds the Java test options to |option_parser|."""
255 argument_group.add_argument(
256 '-f', '--test-filter', dest='test_filter',
257 help=('Test filter (if not fully qualified, will run all matches).'))
258 argument_group.add_argument(
259 '-A', '--annotation', dest='annotation_str',
260 help=('Comma-separated list of annotations. Run only tests with any of '
261 'the given annotations. An annotation can be either a key or a '
262 'key-values pair. A test that has no annotation is considered '
263 '"SmallTest".'))
264 argument_group.add_argument(
265 '-E', '--exclude-annotation', dest='exclude_annotation_str',
266 help=('Comma-separated list of annotations. Exclude tests with these '
267 'annotations.'))
268 argument_group.add_argument(
269 '--screenshot', dest='screenshot_failures', action='store_true',
270 help='Capture screenshots of test failures')
271 argument_group.add_argument(
272 '--save-perf-json', action='store_true',
273 help='Saves the JSON file for each UI Perf test.')
274 argument_group.add_argument(
275 '--official-build', action='store_true', help='Run official build tests.')
276 argument_group.add_argument(
277 '--test_data', '--test-data', action='append', default=[],
278 help=('Each instance defines a directory of test data that should be '
279 'copied to the target(s) before running the tests. The argument '
280 'should be of the form <target>:<source>, <target> is relative to '
281 'the device data directory, and <source> is relative to the '
282 'chromium build directory.'))
283 argument_group.add_argument(
284 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
285 default=True, help='Removes the dalvik.vm.enableassertions property')
289 def ProcessJavaTestOptions(args):
290 """Processes options/arguments and populates |options| with defaults."""
292 # TODO(jbudorick): Handle most of this function in argparse.
293 if args.annotation_str:
294 args.annotations = args.annotation_str.split(',')
295 elif args.test_filter:
296 args.annotations = []
297 else:
298 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
299 'EnormousTest', 'IntegrationTest']
301 if args.exclude_annotation_str:
302 args.exclude_annotations = args.exclude_annotation_str.split(',')
303 else:
304 args.exclude_annotations = []
307 def AddInstrumentationTestOptions(parser):
308 """Adds Instrumentation test options to |parser|."""
310 parser.usage = '%(prog)s [options]'
312 group = parser.add_argument_group('Instrumentation Test Options')
313 AddJavaTestOptions(group)
315 java_or_python_group = group.add_mutually_exclusive_group()
316 java_or_python_group.add_argument(
317 '-j', '--java-only', action='store_false',
318 dest='run_python_tests', default=True, help='Run only the Java tests.')
319 java_or_python_group.add_argument(
320 '-p', '--python-only', action='store_false',
321 dest='run_java_tests', default=True,
322 help='Run only the host-driven tests.')
324 group.add_argument('--host-driven-root',
325 help='Root of the host-driven tests.')
326 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
327 action='store_true',
328 help='Wait for debugger.')
329 group.add_argument('--apk-under-test', dest='apk_under_test',
330 help=('the name of the apk under test.'))
331 group.add_argument('--test-apk', dest='test_apk', required=True,
332 help=('The name of the apk containing the tests '
333 '(without the .apk extension; '
334 'e.g. "ContentShellTest").'))
335 group.add_argument('--coverage-dir',
336 help=('Directory in which to place all generated '
337 'EMMA coverage files.'))
338 group.add_argument('--device-flags', dest='device_flags', default='',
339 help='The relative filepath to a file containing '
340 'command-line flags to set on the device')
341 group.add_argument('--device-flags-file', default='',
342 help='The relative filepath to a file containing '
343 'command-line flags to set on the device')
344 group.add_argument('--isolate_file_path',
345 '--isolate-file-path',
346 dest='isolate_file_path',
347 help='.isolate file path to override the default '
348 'path')
349 group.add_argument('--delete-stale-data', dest='delete_stale_data',
350 action='store_true',
351 help='Delete stale test data on the device.')
353 AddCommonOptions(parser)
354 AddDeviceOptions(parser)
355 AddRemoteDeviceOptions(parser)
358 def ProcessInstrumentationOptions(args):
359 """Processes options/arguments and populate |options| with defaults.
361 Args:
362 args: argparse.Namespace object.
364 Returns:
365 An InstrumentationOptions named tuple which contains all options relevant to
366 instrumentation tests.
369 ProcessJavaTestOptions(args)
371 if not args.host_driven_root:
372 args.run_python_tests = False
374 args.test_apk_path = os.path.join(
375 constants.GetOutDirectory(),
376 constants.SDK_BUILD_APKS_DIR,
377 '%s.apk' % args.test_apk)
378 args.test_apk_jar_path = os.path.join(
379 constants.GetOutDirectory(),
380 constants.SDK_BUILD_TEST_JAVALIB_DIR,
381 '%s.jar' % args.test_apk)
382 args.test_support_apk_path = '%sSupport%s' % (
383 os.path.splitext(args.test_apk_path))
385 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
387 # TODO(jbudorick): Get rid of InstrumentationOptions.
388 return instrumentation_test_options.InstrumentationOptions(
389 args.tool,
390 args.annotations,
391 args.exclude_annotations,
392 args.test_filter,
393 args.test_data,
394 args.save_perf_json,
395 args.screenshot_failures,
396 args.wait_for_debugger,
397 args.coverage_dir,
398 args.test_apk,
399 args.test_apk_path,
400 args.test_apk_jar_path,
401 args.test_runner,
402 args.test_support_apk_path,
403 args.device_flags,
404 args.isolate_file_path,
405 args.set_asserts,
406 args.delete_stale_data
410 def AddUIAutomatorTestOptions(parser):
411 """Adds UI Automator test options to |parser|."""
413 group = parser.add_argument_group('UIAutomator Test Options')
414 AddJavaTestOptions(group)
415 group.add_argument(
416 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
417 metavar='PACKAGE', help='Package under test.')
418 group.add_argument(
419 '--test-jar', dest='test_jar', required=True,
420 help=('The name of the dexed jar containing the tests (without the '
421 '.dex.jar extension). Alternatively, this can be a full path '
422 'to the jar.'))
424 AddCommonOptions(parser)
425 AddDeviceOptions(parser)
428 def ProcessUIAutomatorOptions(args):
429 """Processes UIAutomator options/arguments.
431 Args:
432 args: argparse.Namespace object.
434 Returns:
435 A UIAutomatorOptions named tuple which contains all options relevant to
436 uiautomator tests.
439 ProcessJavaTestOptions(args)
441 if os.path.exists(args.test_jar):
442 # The dexed JAR is fully qualified, assume the info JAR lives along side.
443 args.uiautomator_jar = args.test_jar
444 else:
445 args.uiautomator_jar = os.path.join(
446 constants.GetOutDirectory(),
447 constants.SDK_BUILD_JAVALIB_DIR,
448 '%s.dex.jar' % args.test_jar)
449 args.uiautomator_info_jar = (
450 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
451 '_java.jar')
453 return uiautomator_test_options.UIAutomatorOptions(
454 args.tool,
455 args.annotations,
456 args.exclude_annotations,
457 args.test_filter,
458 args.test_data,
459 args.save_perf_json,
460 args.screenshot_failures,
461 args.uiautomator_jar,
462 args.uiautomator_info_jar,
463 args.package,
464 args.set_asserts)
467 def AddJUnitTestOptions(parser):
468 """Adds junit test options to |parser|."""
470 group = parser.add_argument_group('JUnit Test Options')
471 group.add_argument(
472 '-s', '--test-suite', dest='test_suite', required=True,
473 help=('JUnit test suite to run.'))
474 group.add_argument(
475 '-f', '--test-filter', dest='test_filter',
476 help='Filters tests googletest-style.')
477 group.add_argument(
478 '--package-filter', dest='package_filter',
479 help='Filters tests by package.')
480 group.add_argument(
481 '--runner-filter', dest='runner_filter',
482 help='Filters tests by runner class. Must be fully qualified.')
483 group.add_argument(
484 '--sdk-version', dest='sdk_version', type=int,
485 help='The Android SDK version.')
486 AddCommonOptions(parser)
489 def AddMonkeyTestOptions(parser):
490 """Adds monkey test options to |parser|."""
492 group = parser.add_argument_group('Monkey Test Options')
493 group.add_argument(
494 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
495 metavar='PACKAGE', help='Package under test.')
496 group.add_argument(
497 '--event-count', default=10000, type=int,
498 help='Number of events to generate (default: %(default)s).')
499 group.add_argument(
500 '--category', default='',
501 help='A list of allowed categories.')
502 group.add_argument(
503 '--throttle', default=100, type=int,
504 help='Delay between events (ms) (default: %(default)s). ')
505 group.add_argument(
506 '--seed', type=int,
507 help=('Seed value for pseudo-random generator. Same seed value generates '
508 'the same sequence of events. Seed is randomized by default.'))
509 group.add_argument(
510 '--extra-args', default='',
511 help=('String of other args to pass to the command verbatim.'))
513 AddCommonOptions(parser)
514 AddDeviceOptions(parser)
516 def ProcessMonkeyTestOptions(args):
517 """Processes all monkey test options.
519 Args:
520 args: argparse.Namespace object.
522 Returns:
523 A MonkeyOptions named tuple which contains all options relevant to
524 monkey tests.
526 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
527 category = args.category
528 if category:
529 category = args.category.split(',')
531 # TODO(jbudorick): Get rid of MonkeyOptions.
532 return monkey_test_options.MonkeyOptions(
533 args.verbose_count,
534 args.package,
535 args.event_count,
536 category,
537 args.throttle,
538 args.seed,
539 args.extra_args)
541 def AddUirobotTestOptions(parser):
542 """Adds uirobot test options to |option_parser|."""
543 group = parser.add_argument_group('Uirobot Test Options')
545 group.add_argument('--app-under-test', required=True,
546 help='APK to run tests on.')
547 group.add_argument(
548 '--minutes', default=5, type=int,
549 help='Number of minutes to run uirobot test [default: %(default)s].')
551 AddCommonOptions(parser)
552 AddDeviceOptions(parser)
553 AddRemoteDeviceOptions(parser)
555 def AddPerfTestOptions(parser):
556 """Adds perf test options to |parser|."""
558 group = parser.add_argument_group('Perf Test Options')
560 class SingleStepAction(argparse.Action):
561 def __call__(self, parser, namespace, values, option_string=None):
562 if values and not namespace.single_step:
563 parser.error('single step command provided, '
564 'but --single-step not specified.')
565 elif namespace.single_step and not values:
566 parser.error('--single-step specified, '
567 'but no single step command provided.')
568 setattr(namespace, self.dest, values)
570 step_group = group.add_mutually_exclusive_group(required=True)
571 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
572 # This requires removing "--" from client calls.
573 step_group.add_argument(
574 '--single-step', action='store_true',
575 help='Execute the given command with retries, but only print the result '
576 'for the "most successful" round.')
577 step_group.add_argument(
578 '--steps',
579 help='JSON file containing the list of commands to run.')
580 step_group.add_argument(
581 '--print-step',
582 help='The name of a previously executed perf step to print.')
584 group.add_argument(
585 '--output-json-list',
586 help='Write a simple list of names from --steps into the given file.')
587 group.add_argument(
588 '--collect-chartjson-data',
589 action='store_true',
590 help='Cache the chartjson output from each step for later use.')
591 group.add_argument(
592 '--output-chartjson-data',
593 default='',
594 help='Write out chartjson into the given file.')
595 group.add_argument(
596 '--flaky-steps',
597 help=('A JSON file containing steps that are flaky '
598 'and will have its exit code ignored.'))
599 group.add_argument(
600 '--no-timeout', action='store_true',
601 help=('Do not impose a timeout. Each perf step is responsible for '
602 'implementing the timeout logic.'))
603 group.add_argument(
604 '-f', '--test-filter',
605 help=('Test filter (will match against the names listed in --steps).'))
606 group.add_argument(
607 '--dry-run', action='store_true',
608 help='Just print the steps without executing.')
609 # Uses 0.1 degrees C because that's what Android does.
610 group.add_argument(
611 '--max-battery-temp', type=int,
612 help='Only start tests when the battery is at or below the given '
613 'temperature (0.1 C)')
614 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
615 help='If --single-step is specified, the command to run.')
616 group.add_argument('--min-battery-level', type=int,
617 help='Only starts tests when the battery is charged above '
618 'given level.')
619 AddCommonOptions(parser)
620 AddDeviceOptions(parser)
623 def ProcessPerfTestOptions(args):
624 """Processes all perf test options.
626 Args:
627 args: argparse.Namespace object.
629 Returns:
630 A PerfOptions named tuple which contains all options relevant to
631 perf tests.
633 # TODO(jbudorick): Move single_step handling down into the perf tests.
634 if args.single_step:
635 args.single_step = ' '.join(args.single_step_command)
636 # TODO(jbudorick): Get rid of PerfOptions.
637 return perf_test_options.PerfOptions(
638 args.steps, args.flaky_steps, args.output_json_list,
639 args.print_step, args.no_timeout, args.test_filter,
640 args.dry_run, args.single_step, args.collect_chartjson_data,
641 args.output_chartjson_data, args.max_battery_temp, args.min_battery_level)
644 def AddPythonTestOptions(parser):
645 group = parser.add_argument_group('Python Test Options')
646 group.add_argument(
647 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
648 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
649 help='Name of the test suite to run.')
650 AddCommonOptions(parser)
653 def _RunGTests(args, devices):
654 """Subcommand of RunTestsCommands which runs gtests."""
655 exit_code = 0
656 for suite_name in args.suite_name:
657 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
658 # into the gtest code.
659 gtest_options = gtest_test_options.GTestOptions(
660 args.tool,
661 args.test_filter,
662 args.run_disabled,
663 args.test_arguments,
664 args.timeout,
665 args.isolate_file_path,
666 suite_name,
667 args.app_data_files,
668 args.app_data_file_dir,
669 args.delete_stale_data)
670 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
672 results, test_exit_code = test_dispatcher.RunTests(
673 tests, runner_factory, devices, shard=True, test_timeout=None,
674 num_retries=args.num_retries)
676 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
677 exit_code = test_exit_code
679 report_results.LogFull(
680 results=results,
681 test_type='Unit test',
682 test_package=suite_name,
683 flakiness_server=args.flakiness_dashboard_server)
685 if args.json_results_file:
686 json_results.GenerateJsonResultsFile(results, args.json_results_file)
688 return exit_code
691 def _RunLinkerTests(args, devices):
692 """Subcommand of RunTestsCommands which runs linker tests."""
693 runner_factory, tests = linker_setup.Setup(args, devices)
695 results, exit_code = test_dispatcher.RunTests(
696 tests, runner_factory, devices, shard=True, test_timeout=60,
697 num_retries=args.num_retries)
699 report_results.LogFull(
700 results=results,
701 test_type='Linker test',
702 test_package='ChromiumLinkerTest')
704 if args.json_results_file:
705 json_results.GenerateJsonResultsFile(results, args.json_results_file)
707 return exit_code
710 def _RunInstrumentationTests(args, devices):
711 """Subcommand of RunTestsCommands which runs instrumentation tests."""
712 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
714 instrumentation_options = ProcessInstrumentationOptions(args)
716 if len(devices) > 1 and args.wait_for_debugger:
717 logging.warning('Debugger can not be sharded, using first available device')
718 devices = devices[:1]
720 results = base_test_result.TestRunResults()
721 exit_code = 0
723 if args.run_java_tests:
724 runner_factory, tests = instrumentation_setup.Setup(
725 instrumentation_options, devices)
727 test_results, exit_code = test_dispatcher.RunTests(
728 tests, runner_factory, devices, shard=True, test_timeout=None,
729 num_retries=args.num_retries)
731 results.AddTestRunResults(test_results)
733 if args.run_python_tests:
734 runner_factory, tests = host_driven_setup.InstrumentationSetup(
735 args.host_driven_root, args.official_build,
736 instrumentation_options)
738 if tests:
739 test_results, test_exit_code = test_dispatcher.RunTests(
740 tests, runner_factory, devices, shard=True, test_timeout=None,
741 num_retries=args.num_retries)
743 results.AddTestRunResults(test_results)
745 # Only allow exit code escalation
746 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
747 exit_code = test_exit_code
749 if args.device_flags:
750 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
751 args.device_flags)
753 report_results.LogFull(
754 results=results,
755 test_type='Instrumentation',
756 test_package=os.path.basename(args.test_apk),
757 annotation=args.annotations,
758 flakiness_server=args.flakiness_dashboard_server)
760 if args.json_results_file:
761 json_results.GenerateJsonResultsFile(results, args.json_results_file)
763 return exit_code
766 def _RunUIAutomatorTests(args, devices):
767 """Subcommand of RunTestsCommands which runs uiautomator tests."""
768 uiautomator_options = ProcessUIAutomatorOptions(args)
770 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
772 results, exit_code = test_dispatcher.RunTests(
773 tests, runner_factory, devices, shard=True, test_timeout=None,
774 num_retries=args.num_retries)
776 report_results.LogFull(
777 results=results,
778 test_type='UIAutomator',
779 test_package=os.path.basename(args.test_jar),
780 annotation=args.annotations,
781 flakiness_server=args.flakiness_dashboard_server)
783 if args.json_results_file:
784 json_results.GenerateJsonResultsFile(results, args.json_results_file)
786 return exit_code
789 def _RunJUnitTests(args):
790 """Subcommand of RunTestsCommand which runs junit tests."""
791 runner_factory, tests = junit_setup.Setup(args)
792 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
794 report_results.LogFull(
795 results=results,
796 test_type='JUnit',
797 test_package=args.test_suite)
799 if args.json_results_file:
800 json_results.GenerateJsonResultsFile(results, args.json_results_file)
802 return exit_code
805 def _RunMonkeyTests(args, devices):
806 """Subcommand of RunTestsCommands which runs monkey tests."""
807 monkey_options = ProcessMonkeyTestOptions(args)
809 runner_factory, tests = monkey_setup.Setup(monkey_options)
811 results, exit_code = test_dispatcher.RunTests(
812 tests, runner_factory, devices, shard=False, test_timeout=None,
813 num_retries=args.num_retries)
815 report_results.LogFull(
816 results=results,
817 test_type='Monkey',
818 test_package='Monkey')
820 if args.json_results_file:
821 json_results.GenerateJsonResultsFile(results, args.json_results_file)
823 return exit_code
826 def _RunPerfTests(args):
827 """Subcommand of RunTestsCommands which runs perf tests."""
828 perf_options = ProcessPerfTestOptions(args)
830 # Just save a simple json with a list of test names.
831 if perf_options.output_json_list:
832 return perf_test_runner.OutputJsonList(
833 perf_options.steps, perf_options.output_json_list)
835 # Just print the results from a single previously executed step.
836 if perf_options.print_step:
837 return perf_test_runner.PrintTestOutput(
838 perf_options.print_step, perf_options.output_chartjson_data)
840 runner_factory, tests, devices = perf_setup.Setup(perf_options)
842 # shard=False means that each device will get the full list of tests
843 # and then each one will decide their own affinity.
844 # shard=True means each device will pop the next test available from a queue,
845 # which increases throughput but have no affinity.
846 results, _ = test_dispatcher.RunTests(
847 tests, runner_factory, devices, shard=False, test_timeout=None,
848 num_retries=args.num_retries)
850 report_results.LogFull(
851 results=results,
852 test_type='Perf',
853 test_package='Perf')
855 if args.json_results_file:
856 json_results.GenerateJsonResultsFile(results, args.json_results_file)
858 if perf_options.single_step:
859 return perf_test_runner.PrintTestOutput('single_step')
861 perf_test_runner.PrintSummary(tests)
863 # Always return 0 on the sharding stage. Individual tests exit_code
864 # will be returned on the print_step stage.
865 return 0
868 def _RunPythonTests(args):
869 """Subcommand of RunTestsCommand which runs python unit tests."""
870 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
871 suite_path = suite_vars['path']
872 suite_test_modules = suite_vars['test_modules']
874 sys.path = [suite_path] + sys.path
875 try:
876 suite = unittest.TestSuite()
877 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
878 for m in suite_test_modules)
879 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
880 return 0 if runner.run(suite).wasSuccessful() else 1
881 finally:
882 sys.path = sys.path[1:]
885 def _GetAttachedDevices(test_device=None):
886 """Get all attached devices.
888 Args:
889 test_device: Name of a specific device to use.
891 Returns:
892 A list of attached devices.
894 attached_devices = device_utils.DeviceUtils.HealthyDevices()
895 if test_device:
896 test_device = [d for d in attached_devices if d == test_device]
897 if not test_device:
898 raise device_errors.DeviceUnreachableError(
899 'Did not find device %s among attached device. Attached devices: %s'
900 % (test_device, ', '.join(attached_devices)))
901 return test_device
903 else:
904 if not attached_devices:
905 raise device_errors.NoDevicesError()
906 return sorted(attached_devices)
909 def RunTestsCommand(args, parser):
910 """Checks test type and dispatches to the appropriate function.
912 Args:
913 args: argparse.Namespace object.
914 parser: argparse.ArgumentParser object.
916 Returns:
917 Integer indicated exit code.
919 Raises:
920 Exception: Unknown command name passed in, or an exception from an
921 individual test runner.
923 command = args.command
925 ProcessCommonOptions(args)
927 if args.enable_platform_mode:
928 return RunTestsInPlatformMode(args, parser)
930 if command in constants.LOCAL_MACHINE_TESTS:
931 devices = []
932 else:
933 devices = _GetAttachedDevices(args.test_device)
935 forwarder.Forwarder.RemoveHostLog()
936 if not ports.ResetTestServerPortAllocation():
937 raise Exception('Failed to reset test server port.')
939 if command == 'gtest':
940 if args.suite_name[0] in gtest_test_instance.BROWSER_TEST_SUITES:
941 return RunTestsInPlatformMode(args, parser)
942 return _RunGTests(args, devices)
943 elif command == 'linker':
944 return _RunLinkerTests(args, devices)
945 elif command == 'instrumentation':
946 return _RunInstrumentationTests(args, devices)
947 elif command == 'uiautomator':
948 return _RunUIAutomatorTests(args, devices)
949 elif command == 'junit':
950 return _RunJUnitTests(args)
951 elif command == 'monkey':
952 return _RunMonkeyTests(args, devices)
953 elif command == 'perf':
954 return _RunPerfTests(args)
955 elif command == 'python':
956 return _RunPythonTests(args)
957 else:
958 raise Exception('Unknown test type.')
961 _SUPPORTED_IN_PLATFORM_MODE = [
962 # TODO(jbudorick): Add support for more test types.
963 'gtest',
964 'instrumentation',
965 'uirobot',
969 def RunTestsInPlatformMode(args, parser):
971 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
972 parser.error('%s is not yet supported in platform mode' % args.command)
974 with environment_factory.CreateEnvironment(args, parser.error) as env:
975 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
976 with test_run_factory.CreateTestRun(
977 args, env, test, parser.error) as test_run:
978 results = test_run.RunTests()
980 if args.environment == 'remote_device' and args.trigger:
981 return 0 # Not returning results, only triggering.
983 report_results.LogFull(
984 results=results,
985 test_type=test.TestType(),
986 test_package=test_run.TestPackage(),
987 annotation=getattr(args, 'annotations', None),
988 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
990 if args.json_results_file:
991 json_results.GenerateJsonResultsFile(
992 results, args.json_results_file)
994 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
997 CommandConfigTuple = collections.namedtuple(
998 'CommandConfigTuple',
999 ['add_options_func', 'help_txt'])
1000 VALID_COMMANDS = {
1001 'gtest': CommandConfigTuple(
1002 AddGTestOptions,
1003 'googletest-based C++ tests'),
1004 'instrumentation': CommandConfigTuple(
1005 AddInstrumentationTestOptions,
1006 'InstrumentationTestCase-based Java tests'),
1007 'uiautomator': CommandConfigTuple(
1008 AddUIAutomatorTestOptions,
1009 "Tests that run via Android's uiautomator command"),
1010 'junit': CommandConfigTuple(
1011 AddJUnitTestOptions,
1012 'JUnit4-based Java tests'),
1013 'monkey': CommandConfigTuple(
1014 AddMonkeyTestOptions,
1015 "Tests based on Android's monkey"),
1016 'perf': CommandConfigTuple(
1017 AddPerfTestOptions,
1018 'Performance tests'),
1019 'python': CommandConfigTuple(
1020 AddPythonTestOptions,
1021 'Python tests based on unittest.TestCase'),
1022 'linker': CommandConfigTuple(
1023 AddLinkerTestOptions,
1024 'Linker tests'),
1025 'uirobot': CommandConfigTuple(
1026 AddUirobotTestOptions,
1027 'Uirobot test'),
1031 def DumpThreadStacks(_signal, _frame):
1032 for thread in threading.enumerate():
1033 reraiser_thread.LogThreadStack(thread)
1036 def main():
1037 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1039 parser = argparse.ArgumentParser()
1040 command_parsers = parser.add_subparsers(title='test types',
1041 dest='command')
1043 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1044 key=lambda x: x[0]):
1045 subparser = command_parsers.add_parser(
1046 test_type, usage='%(prog)s [options]', help=config.help_txt)
1047 config.add_options_func(subparser)
1049 args = parser.parse_args()
1051 try:
1052 return RunTestsCommand(args, parser)
1053 except base_error.BaseError as e:
1054 logging.exception('Error occurred.')
1055 if e.is_infra_error:
1056 return constants.INFRA_EXIT_CODE
1057 return constants.ERROR_EXIT_CODE
1058 except: # pylint: disable=W0702
1059 logging.exception('Unrecognized error occurred.')
1060 return constants.ERROR_EXIT_CODE
1063 if __name__ == '__main__':
1064 sys.exit(main())