3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
19 from pylib
import constants
20 from pylib
import forwarder
21 from pylib
import ports
22 from pylib
.base
import base_test_result
23 from pylib
.base
import environment_factory
24 from pylib
.base
import test_dispatcher
25 from pylib
.base
import test_instance_factory
26 from pylib
.base
import test_run_factory
27 from pylib
.device
import device_errors
28 from pylib
.device
import device_utils
29 from pylib
.gtest
import gtest_config
30 from pylib
.gtest
import setup
as gtest_setup
31 from pylib
.gtest
import test_options
as gtest_test_options
32 from pylib
.linker
import setup
as linker_setup
33 from pylib
.host_driven
import setup
as host_driven_setup
34 from pylib
.instrumentation
import setup
as instrumentation_setup
35 from pylib
.instrumentation
import test_options
as instrumentation_test_options
36 from pylib
.junit
import setup
as junit_setup
37 from pylib
.junit
import test_dispatcher
as junit_dispatcher
38 from pylib
.monkey
import setup
as monkey_setup
39 from pylib
.monkey
import test_options
as monkey_test_options
40 from pylib
.perf
import setup
as perf_setup
41 from pylib
.perf
import test_options
as perf_test_options
42 from pylib
.perf
import test_runner
as perf_test_runner
43 from pylib
.results
import json_results
44 from pylib
.results
import report_results
45 from pylib
.uiautomator
import setup
as uiautomator_setup
46 from pylib
.uiautomator
import test_options
as uiautomator_test_options
47 from pylib
.utils
import apk_helper
48 from pylib
.utils
import base_error
49 from pylib
.utils
import reraiser_thread
50 from pylib
.utils
import run_tests_helper
53 def AddCommonOptions(parser
):
54 """Adds all common options to |parser|."""
56 group
= parser
.add_argument_group('Common Options')
58 default_build_type
= os
.environ
.get('BUILDTYPE', 'Debug')
60 debug_or_release_group
= group
.add_mutually_exclusive_group()
61 debug_or_release_group
.add_argument(
62 '--debug', action
='store_const', const
='Debug', dest
='build_type',
63 default
=default_build_type
,
64 help=('If set, run test suites under out/Debug. '
65 'Default is env var BUILDTYPE or Debug.'))
66 debug_or_release_group
.add_argument(
67 '--release', action
='store_const', const
='Release', dest
='build_type',
68 help=('If set, run test suites under out/Release. '
69 'Default is env var BUILDTYPE or Debug.'))
71 group
.add_argument('--build-directory', dest
='build_directory',
72 help=('Path to the directory in which build files are'
73 ' located (should not include build type)'))
74 group
.add_argument('--output-directory', dest
='output_directory',
75 help=('Path to the directory in which build files are'
76 ' located (must include build type). This will take'
77 ' precedence over --debug, --release and'
78 ' --build-directory'))
79 group
.add_argument('--num_retries', dest
='num_retries', type=int, default
=2,
80 help=('Number of retries for a test before '
81 'giving up (default: %(default)s).'))
82 group
.add_argument('-v',
87 help='Verbose level (multiple times for more)')
88 group
.add_argument('--flakiness-dashboard-server',
89 dest
='flakiness_dashboard_server',
90 help=('Address of the server that is hosting the '
91 'Chrome for Android flakiness dashboard.'))
92 group
.add_argument('--enable-platform-mode', action
='store_true',
93 help=('Run the test scripts in platform mode, which '
94 'conceptually separates the test runner from the '
95 '"device" (local or remote, real or emulated) on '
96 'which the tests are running. [experimental]'))
97 group
.add_argument('-e', '--environment', default
='local',
98 choices
=constants
.VALID_ENVIRONMENTS
,
99 help='Test environment to run in (default: %(default)s).')
100 group
.add_argument('--adb-path',
101 help=('Specify the absolute path of the adb binary that '
103 group
.add_argument('--json-results-file', dest
='json_results_file',
104 help='If set, will dump results in JSON form '
105 'to specified file.')
107 def ProcessCommonOptions(args
):
108 """Processes and handles all common options."""
109 run_tests_helper
.SetLogLevel(args
.verbose_count
)
110 constants
.SetBuildType(args
.build_type
)
111 if args
.build_directory
:
112 constants
.SetBuildDirectory(args
.build_directory
)
113 if args
.output_directory
:
114 constants
.SetOutputDirectory(args
.output_directory
)
116 constants
.SetAdbPath(args
.adb_path
)
117 # Some things such as Forwarder require ADB to be in the environment path.
118 adb_dir
= os
.path
.dirname(constants
.GetAdbPath())
119 if adb_dir
and adb_dir
not in os
.environ
['PATH'].split(os
.pathsep
):
120 os
.environ
['PATH'] = adb_dir
+ os
.pathsep
+ os
.environ
['PATH']
123 def AddRemoteDeviceOptions(parser
):
124 group
= parser
.add_argument_group('Remote Device Options')
126 group
.add_argument('--trigger',
127 help=('Only triggers the test if set. Stores test_run_id '
128 'in given file path. '))
129 group
.add_argument('--collect',
130 help=('Only collects the test results if set. '
131 'Gets test_run_id from given file path.'))
132 group
.add_argument('--remote-device', action
='append',
133 help='Device type to run test on.')
134 group
.add_argument('--results-path',
135 help='File path to download results to.')
136 group
.add_argument('--api-protocol',
137 help='HTTP protocol to use. (http or https)')
138 group
.add_argument('--api-address',
139 help='Address to send HTTP requests.')
140 group
.add_argument('--api-port',
141 help='Port to send HTTP requests to.')
142 group
.add_argument('--runner-type',
143 help='Type of test to run as.')
144 group
.add_argument('--runner-package',
145 help='Package name of test.')
146 group
.add_argument('--device-type',
147 choices
=constants
.VALID_DEVICE_TYPES
,
148 help=('Type of device to run on. iOS or android'))
149 group
.add_argument('--device-oem', action
='append',
150 help='Device OEM to run on.')
151 group
.add_argument('--remote-device-file',
152 help=('File with JSON to select remote device. '
153 'Overrides all other flags.'))
154 group
.add_argument('--remote-device-timeout', type=int,
155 help='Times to retry finding remote device')
156 group
.add_argument('--network-config', type=int,
157 help='Integer that specifies the network environment '
158 'that the tests will be run in.')
160 device_os_group
= group
.add_mutually_exclusive_group()
161 device_os_group
.add_argument('--remote-device-minimum-os',
162 help='Minimum OS on device.')
163 device_os_group
.add_argument('--remote-device-os', action
='append',
164 help='OS to have on the device.')
166 api_secret_group
= group
.add_mutually_exclusive_group()
167 api_secret_group
.add_argument('--api-secret', default
='',
168 help='API secret for remote devices.')
169 api_secret_group
.add_argument('--api-secret-file', default
='',
170 help='Path to file that contains API secret.')
172 api_key_group
= group
.add_mutually_exclusive_group()
173 api_key_group
.add_argument('--api-key', default
='',
174 help='API key for remote devices.')
175 api_key_group
.add_argument('--api-key-file', default
='',
176 help='Path to file that contains API key.')
179 def AddDeviceOptions(parser
):
180 """Adds device options to |parser|."""
181 group
= parser
.add_argument_group(title
='Device Options')
182 group
.add_argument('--tool',
184 help=('Run the test under a tool '
185 '(use --tool help to list them)'))
186 group
.add_argument('-d', '--device', dest
='test_device',
187 help=('Target device for the test suite '
191 def AddGTestOptions(parser
):
192 """Adds gtest options to |parser|."""
194 gtest_suites
= list(gtest_config
.STABLE_TEST_SUITES
195 + gtest_config
.EXPERIMENTAL_TEST_SUITES
)
197 group
= parser
.add_argument_group('GTest Options')
198 group
.add_argument('-s', '--suite', dest
='suite_name',
199 nargs
='+', metavar
='SUITE_NAME', required
=True,
200 help=('Executable name of the test suite to run. '
201 'Available suites include (but are not limited to): '
202 '%s' % ', '.join('"%s"' % s
for s
in gtest_suites
)))
203 group
.add_argument('--gtest_also_run_disabled_tests',
204 '--gtest-also-run-disabled-tests',
205 dest
='run_disabled', action
='store_true',
206 help='Also run disabled tests if applicable.')
207 group
.add_argument('-a', '--test-arguments', dest
='test_arguments',
209 help='Additional arguments to pass to the test.')
210 group
.add_argument('-t', dest
='timeout', type=int, default
=60,
211 help='Timeout to wait for each test '
212 '(default: %(default)s).')
213 group
.add_argument('--isolate_file_path',
214 '--isolate-file-path',
215 dest
='isolate_file_path',
216 help='.isolate file path to override the default '
219 filter_group
= group
.add_mutually_exclusive_group()
220 filter_group
.add_argument('-f', '--gtest_filter', '--gtest-filter',
222 help='googletest-style filter string.')
223 filter_group
.add_argument('--gtest-filter-file', dest
='test_filter_file',
224 help='Path to file that contains googletest-style '
225 'filter strings. (Lines will be joined with '
226 '":" to create a single filter string.)')
228 AddDeviceOptions(parser
)
229 AddCommonOptions(parser
)
230 AddRemoteDeviceOptions(parser
)
233 def AddLinkerTestOptions(parser
):
234 group
= parser
.add_argument_group('Linker Test Options')
235 group
.add_argument('-f', '--gtest-filter', dest
='test_filter',
236 help='googletest-style filter string.')
237 AddCommonOptions(parser
)
238 AddDeviceOptions(parser
)
241 def AddJavaTestOptions(argument_group
):
242 """Adds the Java test options to |option_parser|."""
244 argument_group
.add_argument(
245 '-f', '--test-filter', dest
='test_filter',
246 help=('Test filter (if not fully qualified, will run all matches).'))
247 argument_group
.add_argument(
248 '-A', '--annotation', dest
='annotation_str',
249 help=('Comma-separated list of annotations. Run only tests with any of '
250 'the given annotations. An annotation can be either a key or a '
251 'key-values pair. A test that has no annotation is considered '
253 argument_group
.add_argument(
254 '-E', '--exclude-annotation', dest
='exclude_annotation_str',
255 help=('Comma-separated list of annotations. Exclude tests with these '
257 argument_group
.add_argument(
258 '--screenshot', dest
='screenshot_failures', action
='store_true',
259 help='Capture screenshots of test failures')
260 argument_group
.add_argument(
261 '--save-perf-json', action
='store_true',
262 help='Saves the JSON file for each UI Perf test.')
263 argument_group
.add_argument(
264 '--official-build', action
='store_true', help='Run official build tests.')
265 argument_group
.add_argument(
266 '--test_data', '--test-data', action
='append', default
=[],
267 help=('Each instance defines a directory of test data that should be '
268 'copied to the target(s) before running the tests. The argument '
269 'should be of the form <target>:<source>, <target> is relative to '
270 'the device data directory, and <source> is relative to the '
271 'chromium build directory.'))
272 argument_group
.add_argument(
273 '--disable-dalvik-asserts', dest
='set_asserts', action
='store_false',
274 default
=True, help='Removes the dalvik.vm.enableassertions property')
278 def ProcessJavaTestOptions(args
):
279 """Processes options/arguments and populates |options| with defaults."""
281 # TODO(jbudorick): Handle most of this function in argparse.
282 if args
.annotation_str
:
283 args
.annotations
= args
.annotation_str
.split(',')
284 elif args
.test_filter
:
285 args
.annotations
= []
287 args
.annotations
= ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
288 'EnormousTest', 'IntegrationTest']
290 if args
.exclude_annotation_str
:
291 args
.exclude_annotations
= args
.exclude_annotation_str
.split(',')
293 args
.exclude_annotations
= []
296 def AddInstrumentationTestOptions(parser
):
297 """Adds Instrumentation test options to |parser|."""
299 parser
.usage
= '%(prog)s [options]'
301 group
= parser
.add_argument_group('Instrumentation Test Options')
302 AddJavaTestOptions(group
)
304 java_or_python_group
= group
.add_mutually_exclusive_group()
305 java_or_python_group
.add_argument(
306 '-j', '--java-only', action
='store_false',
307 dest
='run_python_tests', default
=True, help='Run only the Java tests.')
308 java_or_python_group
.add_argument(
309 '-p', '--python-only', action
='store_false',
310 dest
='run_java_tests', default
=True,
311 help='Run only the host-driven tests.')
313 group
.add_argument('--host-driven-root',
314 help='Root of the host-driven tests.')
315 group
.add_argument('-w', '--wait_debugger', dest
='wait_for_debugger',
317 help='Wait for debugger.')
318 group
.add_argument('--apk-under-test', dest
='apk_under_test',
319 help=('the name of the apk under test.'))
320 group
.add_argument('--test-apk', dest
='test_apk', required
=True,
321 help=('The name of the apk containing the tests '
322 '(without the .apk extension; '
323 'e.g. "ContentShellTest").'))
324 group
.add_argument('--coverage-dir',
325 help=('Directory in which to place all generated '
326 'EMMA coverage files.'))
327 group
.add_argument('--device-flags', dest
='device_flags', default
='',
328 help='The relative filepath to a file containing '
329 'command-line flags to set on the device')
330 group
.add_argument('--device-flags-file', default
='',
331 help='The relative filepath to a file containing '
332 'command-line flags to set on the device')
333 group
.add_argument('--isolate_file_path',
334 '--isolate-file-path',
335 dest
='isolate_file_path',
336 help='.isolate file path to override the default '
339 AddCommonOptions(parser
)
340 AddDeviceOptions(parser
)
341 AddRemoteDeviceOptions(parser
)
344 def ProcessInstrumentationOptions(args
):
345 """Processes options/arguments and populate |options| with defaults.
348 args: argparse.Namespace object.
351 An InstrumentationOptions named tuple which contains all options relevant to
352 instrumentation tests.
355 ProcessJavaTestOptions(args
)
357 if not args
.host_driven_root
:
358 args
.run_python_tests
= False
360 args
.test_apk_path
= os
.path
.join(
361 constants
.GetOutDirectory(),
362 constants
.SDK_BUILD_APKS_DIR
,
363 '%s.apk' % args
.test_apk
)
364 args
.test_apk_jar_path
= os
.path
.join(
365 constants
.GetOutDirectory(),
366 constants
.SDK_BUILD_TEST_JAVALIB_DIR
,
367 '%s.jar' % args
.test_apk
)
368 args
.test_support_apk_path
= '%sSupport%s' % (
369 os
.path
.splitext(args
.test_apk_path
))
371 args
.test_runner
= apk_helper
.GetInstrumentationName(args
.test_apk_path
)
373 # TODO(jbudorick): Get rid of InstrumentationOptions.
374 return instrumentation_test_options
.InstrumentationOptions(
377 args
.exclude_annotations
,
381 args
.screenshot_failures
,
382 args
.wait_for_debugger
,
386 args
.test_apk_jar_path
,
388 args
.test_support_apk_path
,
390 args
.isolate_file_path
,
395 def AddUIAutomatorTestOptions(parser
):
396 """Adds UI Automator test options to |parser|."""
398 group
= parser
.add_argument_group('UIAutomator Test Options')
399 AddJavaTestOptions(group
)
401 '--package', required
=True, choices
=constants
.PACKAGE_INFO
.keys(),
402 metavar
='PACKAGE', help='Package under test.')
404 '--test-jar', dest
='test_jar', required
=True,
405 help=('The name of the dexed jar containing the tests (without the '
406 '.dex.jar extension). Alternatively, this can be a full path '
409 AddCommonOptions(parser
)
410 AddDeviceOptions(parser
)
413 def ProcessUIAutomatorOptions(args
):
414 """Processes UIAutomator options/arguments.
417 args: argparse.Namespace object.
420 A UIAutomatorOptions named tuple which contains all options relevant to
424 ProcessJavaTestOptions(args
)
426 if os
.path
.exists(args
.test_jar
):
427 # The dexed JAR is fully qualified, assume the info JAR lives along side.
428 args
.uiautomator_jar
= args
.test_jar
430 args
.uiautomator_jar
= os
.path
.join(
431 constants
.GetOutDirectory(),
432 constants
.SDK_BUILD_JAVALIB_DIR
,
433 '%s.dex.jar' % args
.test_jar
)
434 args
.uiautomator_info_jar
= (
435 args
.uiautomator_jar
[:args
.uiautomator_jar
.find('.dex.jar')] +
438 return uiautomator_test_options
.UIAutomatorOptions(
441 args
.exclude_annotations
,
445 args
.screenshot_failures
,
446 args
.uiautomator_jar
,
447 args
.uiautomator_info_jar
,
452 def AddJUnitTestOptions(parser
):
453 """Adds junit test options to |parser|."""
455 group
= parser
.add_argument_group('JUnit Test Options')
457 '-s', '--test-suite', dest
='test_suite', required
=True,
458 help=('JUnit test suite to run.'))
460 '-f', '--test-filter', dest
='test_filter',
461 help='Filters tests googletest-style.')
463 '--package-filter', dest
='package_filter',
464 help='Filters tests by package.')
466 '--runner-filter', dest
='runner_filter',
467 help='Filters tests by runner class. Must be fully qualified.')
469 '--sdk-version', dest
='sdk_version', type=int,
470 help='The Android SDK version.')
471 AddCommonOptions(parser
)
474 def AddMonkeyTestOptions(parser
):
475 """Adds monkey test options to |parser|."""
477 group
= parser
.add_argument_group('Monkey Test Options')
479 '--package', required
=True, choices
=constants
.PACKAGE_INFO
.keys(),
480 metavar
='PACKAGE', help='Package under test.')
482 '--event-count', default
=10000, type=int,
483 help='Number of events to generate (default: %(default)s).')
485 '--category', default
='',
486 help='A list of allowed categories.')
488 '--throttle', default
=100, type=int,
489 help='Delay between events (ms) (default: %(default)s). ')
492 help=('Seed value for pseudo-random generator. Same seed value generates '
493 'the same sequence of events. Seed is randomized by default.'))
495 '--extra-args', default
='',
496 help=('String of other args to pass to the command verbatim.'))
498 AddCommonOptions(parser
)
499 AddDeviceOptions(parser
)
501 def ProcessMonkeyTestOptions(args
):
502 """Processes all monkey test options.
505 args: argparse.Namespace object.
508 A MonkeyOptions named tuple which contains all options relevant to
511 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
512 category
= args
.category
514 category
= args
.category
.split(',')
516 # TODO(jbudorick): Get rid of MonkeyOptions.
517 return monkey_test_options
.MonkeyOptions(
526 def AddUirobotTestOptions(parser
):
527 """Adds uirobot test options to |option_parser|."""
528 group
= parser
.add_argument_group('Uirobot Test Options')
530 group
.add_argument('--app-under-test', required
=True,
531 help='APK to run tests on.')
533 '--minutes', default
=5, type=int,
534 help='Number of minutes to run uirobot test [default: %(default)s].')
536 AddCommonOptions(parser
)
537 AddDeviceOptions(parser
)
538 AddRemoteDeviceOptions(parser
)
540 def AddPerfTestOptions(parser
):
541 """Adds perf test options to |parser|."""
543 group
= parser
.add_argument_group('Perf Test Options')
545 class SingleStepAction(argparse
.Action
):
546 def __call__(self
, parser
, namespace
, values
, option_string
=None):
547 if values
and not namespace
.single_step
:
548 parser
.error('single step command provided, '
549 'but --single-step not specified.')
550 elif namespace
.single_step
and not values
:
551 parser
.error('--single-step specified, '
552 'but no single step command provided.')
553 setattr(namespace
, self
.dest
, values
)
555 step_group
= group
.add_mutually_exclusive_group(required
=True)
556 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
557 # This requires removing "--" from client calls.
558 step_group
.add_argument(
559 '--single-step', action
='store_true',
560 help='Execute the given command with retries, but only print the result '
561 'for the "most successful" round.')
562 step_group
.add_argument(
564 help='JSON file containing the list of commands to run.')
565 step_group
.add_argument(
567 help='The name of a previously executed perf step to print.')
570 '--output-json-list',
571 help='Write a simple list of names from --steps into the given file.')
573 '--collect-chartjson-data',
575 help='Cache the chartjson output from each step for later use.')
577 '--output-chartjson-data',
579 help='Write out chartjson into the given file.')
582 help=('A JSON file containing steps that are flaky '
583 'and will have its exit code ignored.'))
585 '--no-timeout', action
='store_true',
586 help=('Do not impose a timeout. Each perf step is responsible for '
587 'implementing the timeout logic.'))
589 '-f', '--test-filter',
590 help=('Test filter (will match against the names listed in --steps).'))
592 '--dry-run', action
='store_true',
593 help='Just print the steps without executing.')
594 group
.add_argument('single_step_command', nargs
='*', action
=SingleStepAction
,
595 help='If --single-step is specified, the command to run.')
596 AddCommonOptions(parser
)
597 AddDeviceOptions(parser
)
600 def ProcessPerfTestOptions(args
):
601 """Processes all perf test options.
604 args: argparse.Namespace object.
607 A PerfOptions named tuple which contains all options relevant to
610 # TODO(jbudorick): Move single_step handling down into the perf tests.
612 args
.single_step
= ' '.join(args
.single_step_command
)
613 # TODO(jbudorick): Get rid of PerfOptions.
614 return perf_test_options
.PerfOptions(
615 args
.steps
, args
.flaky_steps
, args
.output_json_list
,
616 args
.print_step
, args
.no_timeout
, args
.test_filter
,
617 args
.dry_run
, args
.single_step
, args
.collect_chartjson_data
,
618 args
.output_chartjson_data
)
621 def AddPythonTestOptions(parser
):
622 group
= parser
.add_argument_group('Python Test Options')
624 '-s', '--suite', dest
='suite_name', metavar
='SUITE_NAME',
625 choices
=constants
.PYTHON_UNIT_TEST_SUITES
.keys(),
626 help='Name of the test suite to run.')
627 AddCommonOptions(parser
)
630 def _RunGTests(args
, devices
):
631 """Subcommand of RunTestsCommands which runs gtests."""
633 for suite_name
in args
.suite_name
:
634 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
635 # into the gtest code.
636 gtest_options
= gtest_test_options
.GTestOptions(
642 args
.isolate_file_path
,
644 runner_factory
, tests
= gtest_setup
.Setup(gtest_options
, devices
)
646 results
, test_exit_code
= test_dispatcher
.RunTests(
647 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
648 num_retries
=args
.num_retries
)
650 if test_exit_code
and exit_code
!= constants
.ERROR_EXIT_CODE
:
651 exit_code
= test_exit_code
653 report_results
.LogFull(
655 test_type
='Unit test',
656 test_package
=suite_name
,
657 flakiness_server
=args
.flakiness_dashboard_server
)
659 if args
.json_results_file
:
660 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
662 if os
.path
.isdir(constants
.ISOLATE_DEPS_DIR
):
663 shutil
.rmtree(constants
.ISOLATE_DEPS_DIR
)
668 def _RunLinkerTests(args
, devices
):
669 """Subcommand of RunTestsCommands which runs linker tests."""
670 runner_factory
, tests
= linker_setup
.Setup(args
, devices
)
672 results
, exit_code
= test_dispatcher
.RunTests(
673 tests
, runner_factory
, devices
, shard
=True, test_timeout
=60,
674 num_retries
=args
.num_retries
)
676 report_results
.LogFull(
678 test_type
='Linker test',
679 test_package
='ChromiumLinkerTest')
681 if args
.json_results_file
:
682 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
687 def _RunInstrumentationTests(args
, devices
):
688 """Subcommand of RunTestsCommands which runs instrumentation tests."""
689 logging
.info('_RunInstrumentationTests(%s, %s)' % (str(args
), str(devices
)))
691 instrumentation_options
= ProcessInstrumentationOptions(args
)
693 if len(devices
) > 1 and args
.wait_for_debugger
:
694 logging
.warning('Debugger can not be sharded, using first available device')
695 devices
= devices
[:1]
697 results
= base_test_result
.TestRunResults()
700 if args
.run_java_tests
:
701 runner_factory
, tests
= instrumentation_setup
.Setup(
702 instrumentation_options
, devices
)
704 test_results
, exit_code
= test_dispatcher
.RunTests(
705 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
706 num_retries
=args
.num_retries
)
708 results
.AddTestRunResults(test_results
)
710 if args
.run_python_tests
:
711 runner_factory
, tests
= host_driven_setup
.InstrumentationSetup(
712 args
.host_driven_root
, args
.official_build
,
713 instrumentation_options
)
716 test_results
, test_exit_code
= test_dispatcher
.RunTests(
717 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
718 num_retries
=args
.num_retries
)
720 results
.AddTestRunResults(test_results
)
722 # Only allow exit code escalation
723 if test_exit_code
and exit_code
!= constants
.ERROR_EXIT_CODE
:
724 exit_code
= test_exit_code
726 if args
.device_flags
:
727 args
.device_flags
= os
.path
.join(constants
.DIR_SOURCE_ROOT
,
730 report_results
.LogFull(
732 test_type
='Instrumentation',
733 test_package
=os
.path
.basename(args
.test_apk
),
734 annotation
=args
.annotations
,
735 flakiness_server
=args
.flakiness_dashboard_server
)
737 if args
.json_results_file
:
738 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
743 def _RunUIAutomatorTests(args
, devices
):
744 """Subcommand of RunTestsCommands which runs uiautomator tests."""
745 uiautomator_options
= ProcessUIAutomatorOptions(args
)
747 runner_factory
, tests
= uiautomator_setup
.Setup(uiautomator_options
)
749 results
, exit_code
= test_dispatcher
.RunTests(
750 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
751 num_retries
=args
.num_retries
)
753 report_results
.LogFull(
755 test_type
='UIAutomator',
756 test_package
=os
.path
.basename(args
.test_jar
),
757 annotation
=args
.annotations
,
758 flakiness_server
=args
.flakiness_dashboard_server
)
760 if args
.json_results_file
:
761 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
766 def _RunJUnitTests(args
):
767 """Subcommand of RunTestsCommand which runs junit tests."""
768 runner_factory
, tests
= junit_setup
.Setup(args
)
769 results
, exit_code
= junit_dispatcher
.RunTests(tests
, runner_factory
)
771 report_results
.LogFull(
774 test_package
=args
.test_suite
)
776 if args
.json_results_file
:
777 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
782 def _RunMonkeyTests(args
, devices
):
783 """Subcommand of RunTestsCommands which runs monkey tests."""
784 monkey_options
= ProcessMonkeyTestOptions(args
)
786 runner_factory
, tests
= monkey_setup
.Setup(monkey_options
)
788 results
, exit_code
= test_dispatcher
.RunTests(
789 tests
, runner_factory
, devices
, shard
=False, test_timeout
=None,
790 num_retries
=args
.num_retries
)
792 report_results
.LogFull(
795 test_package
='Monkey')
797 if args
.json_results_file
:
798 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
803 def _RunPerfTests(args
):
804 """Subcommand of RunTestsCommands which runs perf tests."""
805 perf_options
= ProcessPerfTestOptions(args
)
807 # Just save a simple json with a list of test names.
808 if perf_options
.output_json_list
:
809 return perf_test_runner
.OutputJsonList(
810 perf_options
.steps
, perf_options
.output_json_list
)
812 # Just print the results from a single previously executed step.
813 if perf_options
.print_step
:
814 return perf_test_runner
.PrintTestOutput(
815 perf_options
.print_step
, perf_options
.output_chartjson_data
)
817 runner_factory
, tests
, devices
= perf_setup
.Setup(perf_options
)
819 # shard=False means that each device will get the full list of tests
820 # and then each one will decide their own affinity.
821 # shard=True means each device will pop the next test available from a queue,
822 # which increases throughput but have no affinity.
823 results
, _
= test_dispatcher
.RunTests(
824 tests
, runner_factory
, devices
, shard
=False, test_timeout
=None,
825 num_retries
=args
.num_retries
)
827 report_results
.LogFull(
832 if args
.json_results_file
:
833 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
835 if perf_options
.single_step
:
836 return perf_test_runner
.PrintTestOutput('single_step')
838 perf_test_runner
.PrintSummary(tests
)
840 # Always return 0 on the sharding stage. Individual tests exit_code
841 # will be returned on the print_step stage.
845 def _RunPythonTests(args
):
846 """Subcommand of RunTestsCommand which runs python unit tests."""
847 suite_vars
= constants
.PYTHON_UNIT_TEST_SUITES
[args
.suite_name
]
848 suite_path
= suite_vars
['path']
849 suite_test_modules
= suite_vars
['test_modules']
851 sys
.path
= [suite_path
] + sys
.path
853 suite
= unittest
.TestSuite()
854 suite
.addTests(unittest
.defaultTestLoader
.loadTestsFromName(m
)
855 for m
in suite_test_modules
)
856 runner
= unittest
.TextTestRunner(verbosity
=1+args
.verbose_count
)
857 return 0 if runner
.run(suite
).wasSuccessful() else 1
859 sys
.path
= sys
.path
[1:]
862 def _GetAttachedDevices(test_device
=None):
863 """Get all attached devices.
866 test_device: Name of a specific device to use.
869 A list of attached devices.
871 attached_devices
= device_utils
.DeviceUtils
.HealthyDevices()
873 test_device
= [d
for d
in attached_devices
if d
== test_device
]
875 raise device_errors
.DeviceUnreachableError(
876 'Did not find device %s among attached device. Attached devices: %s'
877 % (test_device
, ', '.join(attached_devices
)))
881 if not attached_devices
:
882 raise device_errors
.NoDevicesError()
883 return sorted(attached_devices
)
886 def RunTestsCommand(args
, parser
):
887 """Checks test type and dispatches to the appropriate function.
890 args: argparse.Namespace object.
891 parser: argparse.ArgumentParser object.
894 Integer indicated exit code.
897 Exception: Unknown command name passed in, or an exception from an
898 individual test runner.
900 command
= args
.command
902 ProcessCommonOptions(args
)
904 if args
.enable_platform_mode
:
905 return RunTestsInPlatformMode(args
, parser
)
907 if command
in constants
.LOCAL_MACHINE_TESTS
:
910 devices
= _GetAttachedDevices(args
.test_device
)
912 forwarder
.Forwarder
.RemoveHostLog()
913 if not ports
.ResetTestServerPortAllocation():
914 raise Exception('Failed to reset test server port.')
916 if command
== 'gtest':
917 return _RunGTests(args
, devices
)
918 elif command
== 'linker':
919 return _RunLinkerTests(args
, devices
)
920 elif command
== 'instrumentation':
921 return _RunInstrumentationTests(args
, devices
)
922 elif command
== 'uiautomator':
923 return _RunUIAutomatorTests(args
, devices
)
924 elif command
== 'junit':
925 return _RunJUnitTests(args
)
926 elif command
== 'monkey':
927 return _RunMonkeyTests(args
, devices
)
928 elif command
== 'perf':
929 return _RunPerfTests(args
)
930 elif command
== 'python':
931 return _RunPythonTests(args
)
933 raise Exception('Unknown test type.')
936 _SUPPORTED_IN_PLATFORM_MODE
= [
937 # TODO(jbudorick): Add support for more test types.
944 def RunTestsInPlatformMode(args
, parser
):
946 if args
.command
not in _SUPPORTED_IN_PLATFORM_MODE
:
947 parser
.error('%s is not yet supported in platform mode' % args
.command
)
949 with environment_factory
.CreateEnvironment(args
, parser
.error
) as env
:
950 with test_instance_factory
.CreateTestInstance(args
, parser
.error
) as test
:
951 with test_run_factory
.CreateTestRun(
952 args
, env
, test
, parser
.error
) as test_run
:
953 results
= test_run
.RunTests()
955 if args
.environment
== 'remote_device' and args
.trigger
:
956 return 0 # Not returning results, only triggering.
958 report_results
.LogFull(
960 test_type
=test
.TestType(),
961 test_package
=test_run
.TestPackage(),
962 annotation
=getattr(args
, 'annotations', None),
963 flakiness_server
=getattr(args
, 'flakiness_dashboard_server', None))
965 if args
.json_results_file
:
966 json_results
.GenerateJsonResultsFile(
967 results
, args
.json_results_file
)
969 return 0 if results
.DidRunPass() else constants
.ERROR_EXIT_CODE
972 CommandConfigTuple
= collections
.namedtuple(
973 'CommandConfigTuple',
974 ['add_options_func', 'help_txt'])
976 'gtest': CommandConfigTuple(
978 'googletest-based C++ tests'),
979 'instrumentation': CommandConfigTuple(
980 AddInstrumentationTestOptions
,
981 'InstrumentationTestCase-based Java tests'),
982 'uiautomator': CommandConfigTuple(
983 AddUIAutomatorTestOptions
,
984 "Tests that run via Android's uiautomator command"),
985 'junit': CommandConfigTuple(
987 'JUnit4-based Java tests'),
988 'monkey': CommandConfigTuple(
989 AddMonkeyTestOptions
,
990 "Tests based on Android's monkey"),
991 'perf': CommandConfigTuple(
993 'Performance tests'),
994 'python': CommandConfigTuple(
995 AddPythonTestOptions
,
996 'Python tests based on unittest.TestCase'),
997 'linker': CommandConfigTuple(
998 AddLinkerTestOptions
,
1000 'uirobot': CommandConfigTuple(
1001 AddUirobotTestOptions
,
1006 def DumpThreadStacks(_signal
, _frame
):
1007 for thread
in threading
.enumerate():
1008 reraiser_thread
.LogThreadStack(thread
)
1012 signal
.signal(signal
.SIGUSR1
, DumpThreadStacks
)
1014 parser
= argparse
.ArgumentParser()
1015 command_parsers
= parser
.add_subparsers(title
='test types',
1018 for test_type
, config
in sorted(VALID_COMMANDS
.iteritems(),
1019 key
=lambda x
: x
[0]):
1020 subparser
= command_parsers
.add_parser(
1021 test_type
, usage
='%(prog)s [options]', help=config
.help_txt
)
1022 config
.add_options_func(subparser
)
1024 args
= parser
.parse_args()
1027 return RunTestsCommand(args
, parser
)
1028 except base_error
.BaseError
as e
:
1029 logging
.exception('Error occurred.')
1030 if e
.is_infra_error
:
1031 return constants
.INFRA_EXIT_CODE
1033 return constants
.ERROR_EXIT_CODE
1034 except: # pylint: disable=W0702
1035 logging
.exception('Unrecognized error occurred.')
1036 return constants
.ERROR_EXIT_CODE
1039 if __name__
== '__main__':