3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
19 from devil
import base_error
20 from devil
.android
import apk_helper
21 from devil
.android
import device_blacklist
22 from devil
.android
import device_errors
23 from devil
.android
import device_utils
24 from devil
.android
import ports
25 from devil
.utils
import reraiser_thread
26 from devil
.utils
import run_tests_helper
28 from pylib
import constants
29 from pylib
import forwarder
30 from pylib
.base
import base_test_result
31 from pylib
.base
import environment_factory
32 from pylib
.base
import test_dispatcher
33 from pylib
.base
import test_instance_factory
34 from pylib
.base
import test_run_factory
35 from pylib
.gtest
import gtest_config
36 # TODO(jbudorick): Remove this once we stop selectively enabling platform mode.
37 from pylib
.gtest
import gtest_test_instance
38 from pylib
.gtest
import setup
as gtest_setup
39 from pylib
.gtest
import test_options
as gtest_test_options
40 from pylib
.linker
import setup
as linker_setup
41 from pylib
.host_driven
import setup
as host_driven_setup
42 from pylib
.instrumentation
import setup
as instrumentation_setup
43 from pylib
.instrumentation
import test_options
as instrumentation_test_options
44 from pylib
.junit
import setup
as junit_setup
45 from pylib
.junit
import test_dispatcher
as junit_dispatcher
46 from pylib
.monkey
import setup
as monkey_setup
47 from pylib
.monkey
import test_options
as monkey_test_options
48 from pylib
.perf
import setup
as perf_setup
49 from pylib
.perf
import test_options
as perf_test_options
50 from pylib
.perf
import test_runner
as perf_test_runner
51 from pylib
.results
import json_results
52 from pylib
.results
import report_results
53 from pylib
.uiautomator
import setup
as uiautomator_setup
54 from pylib
.uiautomator
import test_options
as uiautomator_test_options
57 def AddCommonOptions(parser
):
58 """Adds all common options to |parser|."""
60 group
= parser
.add_argument_group('Common Options')
62 default_build_type
= os
.environ
.get('BUILDTYPE', 'Debug')
64 debug_or_release_group
= group
.add_mutually_exclusive_group()
65 debug_or_release_group
.add_argument(
66 '--debug', action
='store_const', const
='Debug', dest
='build_type',
67 default
=default_build_type
,
68 help=('If set, run test suites under out/Debug. '
69 'Default is env var BUILDTYPE or Debug.'))
70 debug_or_release_group
.add_argument(
71 '--release', action
='store_const', const
='Release', dest
='build_type',
72 help=('If set, run test suites under out/Release. '
73 'Default is env var BUILDTYPE or Debug.'))
75 group
.add_argument('--build-directory', dest
='build_directory',
76 help=('Path to the directory in which build files are'
77 ' located (should not include build type)'))
78 group
.add_argument('--output-directory', dest
='output_directory',
79 help=('Path to the directory in which build files are'
80 ' located (must include build type). This will take'
81 ' precedence over --debug, --release and'
82 ' --build-directory'))
83 group
.add_argument('--num_retries', dest
='num_retries', type=int, default
=2,
84 help=('Number of retries for a test before '
85 'giving up (default: %(default)s).'))
86 group
.add_argument('-v',
91 help='Verbose level (multiple times for more)')
92 group
.add_argument('--flakiness-dashboard-server',
93 dest
='flakiness_dashboard_server',
94 help=('Address of the server that is hosting the '
95 'Chrome for Android flakiness dashboard.'))
96 group
.add_argument('--enable-platform-mode', action
='store_true',
97 help=('Run the test scripts in platform mode, which '
98 'conceptually separates the test runner from the '
99 '"device" (local or remote, real or emulated) on '
100 'which the tests are running. [experimental]'))
101 group
.add_argument('-e', '--environment', default
='local',
102 choices
=constants
.VALID_ENVIRONMENTS
,
103 help='Test environment to run in (default: %(default)s).')
104 group
.add_argument('--adb-path',
105 help=('Specify the absolute path of the adb binary that '
107 group
.add_argument('--json-results-file', dest
='json_results_file',
108 help='If set, will dump results in JSON form '
109 'to specified file.')
111 def ProcessCommonOptions(args
):
112 """Processes and handles all common options."""
113 run_tests_helper
.SetLogLevel(args
.verbose_count
)
114 constants
.SetBuildType(args
.build_type
)
115 if args
.build_directory
:
116 constants
.SetBuildDirectory(args
.build_directory
)
117 if args
.output_directory
:
118 constants
.SetOutputDirectory(args
.output_directory
)
120 constants
.SetAdbPath(args
.adb_path
)
121 # Some things such as Forwarder require ADB to be in the environment path.
122 adb_dir
= os
.path
.dirname(constants
.GetAdbPath())
123 if adb_dir
and adb_dir
not in os
.environ
['PATH'].split(os
.pathsep
):
124 os
.environ
['PATH'] = adb_dir
+ os
.pathsep
+ os
.environ
['PATH']
127 def AddRemoteDeviceOptions(parser
):
128 group
= parser
.add_argument_group('Remote Device Options')
130 group
.add_argument('--trigger',
131 help=('Only triggers the test if set. Stores test_run_id '
132 'in given file path. '))
133 group
.add_argument('--collect',
134 help=('Only collects the test results if set. '
135 'Gets test_run_id from given file path.'))
136 group
.add_argument('--remote-device', action
='append',
137 help='Device type to run test on.')
138 group
.add_argument('--results-path',
139 help='File path to download results to.')
140 group
.add_argument('--api-protocol',
141 help='HTTP protocol to use. (http or https)')
142 group
.add_argument('--api-address',
143 help='Address to send HTTP requests.')
144 group
.add_argument('--api-port',
145 help='Port to send HTTP requests to.')
146 group
.add_argument('--runner-type',
147 help='Type of test to run as.')
148 group
.add_argument('--runner-package',
149 help='Package name of test.')
150 group
.add_argument('--device-type',
151 choices
=constants
.VALID_DEVICE_TYPES
,
152 help=('Type of device to run on. iOS or android'))
153 group
.add_argument('--device-oem', action
='append',
154 help='Device OEM to run on.')
155 group
.add_argument('--remote-device-file',
156 help=('File with JSON to select remote device. '
157 'Overrides all other flags.'))
158 group
.add_argument('--remote-device-timeout', type=int,
159 help='Times to retry finding remote device')
160 group
.add_argument('--network-config', type=int,
161 help='Integer that specifies the network environment '
162 'that the tests will be run in.')
164 device_os_group
= group
.add_mutually_exclusive_group()
165 device_os_group
.add_argument('--remote-device-minimum-os',
166 help='Minimum OS on device.')
167 device_os_group
.add_argument('--remote-device-os', action
='append',
168 help='OS to have on the device.')
170 api_secret_group
= group
.add_mutually_exclusive_group()
171 api_secret_group
.add_argument('--api-secret', default
='',
172 help='API secret for remote devices.')
173 api_secret_group
.add_argument('--api-secret-file', default
='',
174 help='Path to file that contains API secret.')
176 api_key_group
= group
.add_mutually_exclusive_group()
177 api_key_group
.add_argument('--api-key', default
='',
178 help='API key for remote devices.')
179 api_key_group
.add_argument('--api-key-file', default
='',
180 help='Path to file that contains API key.')
183 def AddDeviceOptions(parser
):
184 """Adds device options to |parser|."""
185 group
= parser
.add_argument_group(title
='Device Options')
186 group
.add_argument('--tool',
188 help=('Run the test under a tool '
189 '(use --tool help to list them)'))
190 group
.add_argument('-d', '--device', dest
='test_device',
191 help=('Target device for the test suite '
193 group
.add_argument('--blacklist-file', help='Device blacklist file.')
196 def AddGTestOptions(parser
):
197 """Adds gtest options to |parser|."""
199 gtest_suites
= list(gtest_config
.STABLE_TEST_SUITES
200 + gtest_config
.EXPERIMENTAL_TEST_SUITES
)
202 group
= parser
.add_argument_group('GTest Options')
203 group
.add_argument('-s', '--suite', dest
='suite_name',
204 nargs
='+', metavar
='SUITE_NAME', required
=True,
205 help=('Executable name of the test suite to run. '
206 'Available suites include (but are not limited to): '
207 '%s' % ', '.join('"%s"' % s
for s
in gtest_suites
)))
208 group
.add_argument('--gtest_also_run_disabled_tests',
209 '--gtest-also-run-disabled-tests',
210 dest
='run_disabled', action
='store_true',
211 help='Also run disabled tests if applicable.')
212 group
.add_argument('-a', '--test-arguments', dest
='test_arguments',
214 help='Additional arguments to pass to the test.')
215 group
.add_argument('-t', dest
='timeout', type=int, default
=60,
216 help='Timeout to wait for each test '
217 '(default: %(default)s).')
218 group
.add_argument('--isolate_file_path',
219 '--isolate-file-path',
220 dest
='isolate_file_path',
221 help='.isolate file path to override the default '
223 group
.add_argument('--app-data-file', action
='append', dest
='app_data_files',
224 help='A file path relative to the app data directory '
225 'that should be saved to the host.')
226 group
.add_argument('--app-data-file-dir',
227 help='Host directory to which app data files will be'
228 ' saved. Used with --app-data-file.')
229 group
.add_argument('--delete-stale-data', dest
='delete_stale_data',
231 help='Delete stale test data on the device.')
233 filter_group
= group
.add_mutually_exclusive_group()
234 filter_group
.add_argument('-f', '--gtest_filter', '--gtest-filter',
236 help='googletest-style filter string.')
237 filter_group
.add_argument('--gtest-filter-file', dest
='test_filter_file',
238 help='Path to file that contains googletest-style '
239 'filter strings. (Lines will be joined with '
240 '":" to create a single filter string.)')
242 AddDeviceOptions(parser
)
243 AddCommonOptions(parser
)
244 AddRemoteDeviceOptions(parser
)
247 def AddLinkerTestOptions(parser
):
248 group
= parser
.add_argument_group('Linker Test Options')
249 group
.add_argument('-f', '--gtest-filter', dest
='test_filter',
250 help='googletest-style filter string.')
251 AddCommonOptions(parser
)
252 AddDeviceOptions(parser
)
255 def AddJavaTestOptions(argument_group
):
256 """Adds the Java test options to |option_parser|."""
258 argument_group
.add_argument(
259 '-f', '--test-filter', dest
='test_filter',
260 help=('Test filter (if not fully qualified, will run all matches).'))
261 argument_group
.add_argument(
262 '-A', '--annotation', dest
='annotation_str',
263 help=('Comma-separated list of annotations. Run only tests with any of '
264 'the given annotations. An annotation can be either a key or a '
265 'key-values pair. A test that has no annotation is considered '
267 argument_group
.add_argument(
268 '-E', '--exclude-annotation', dest
='exclude_annotation_str',
269 help=('Comma-separated list of annotations. Exclude tests with these '
271 argument_group
.add_argument(
272 '--screenshot', dest
='screenshot_failures', action
='store_true',
273 help='Capture screenshots of test failures')
274 argument_group
.add_argument(
275 '--save-perf-json', action
='store_true',
276 help='Saves the JSON file for each UI Perf test.')
277 argument_group
.add_argument(
278 '--official-build', action
='store_true', help='Run official build tests.')
279 argument_group
.add_argument(
280 '--test_data', '--test-data', action
='append', default
=[],
281 help=('Each instance defines a directory of test data that should be '
282 'copied to the target(s) before running the tests. The argument '
283 'should be of the form <target>:<source>, <target> is relative to '
284 'the device data directory, and <source> is relative to the '
285 'chromium build directory.'))
286 argument_group
.add_argument(
287 '--disable-dalvik-asserts', dest
='set_asserts', action
='store_false',
288 default
=True, help='Removes the dalvik.vm.enableassertions property')
292 def ProcessJavaTestOptions(args
):
293 """Processes options/arguments and populates |options| with defaults."""
295 # TODO(jbudorick): Handle most of this function in argparse.
296 if args
.annotation_str
:
297 args
.annotations
= args
.annotation_str
.split(',')
298 elif args
.test_filter
:
299 args
.annotations
= []
301 args
.annotations
= ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
302 'EnormousTest', 'IntegrationTest']
304 if args
.exclude_annotation_str
:
305 args
.exclude_annotations
= args
.exclude_annotation_str
.split(',')
307 args
.exclude_annotations
= []
310 def AddInstrumentationTestOptions(parser
):
311 """Adds Instrumentation test options to |parser|."""
313 parser
.usage
= '%(prog)s [options]'
315 group
= parser
.add_argument_group('Instrumentation Test Options')
316 AddJavaTestOptions(group
)
318 java_or_python_group
= group
.add_mutually_exclusive_group()
319 java_or_python_group
.add_argument(
320 '-j', '--java-only', action
='store_false',
321 dest
='run_python_tests', default
=True, help='Run only the Java tests.')
322 java_or_python_group
.add_argument(
323 '-p', '--python-only', action
='store_false',
324 dest
='run_java_tests', default
=True,
325 help='Run only the host-driven tests.')
327 group
.add_argument('--host-driven-root',
328 help='Root of the host-driven tests.')
329 group
.add_argument('-w', '--wait_debugger', dest
='wait_for_debugger',
331 help='Wait for debugger.')
332 group
.add_argument('--apk-under-test', dest
='apk_under_test',
333 help=('the name of the apk under test.'))
334 group
.add_argument('--test-apk', dest
='test_apk', required
=True,
335 help=('The name of the apk containing the tests '
336 '(without the .apk extension; '
337 'e.g. "ContentShellTest").'))
338 group
.add_argument('--coverage-dir',
339 help=('Directory in which to place all generated '
340 'EMMA coverage files.'))
341 group
.add_argument('--device-flags', dest
='device_flags', default
='',
342 help='The relative filepath to a file containing '
343 'command-line flags to set on the device')
344 group
.add_argument('--device-flags-file', default
='',
345 help='The relative filepath to a file containing '
346 'command-line flags to set on the device')
347 group
.add_argument('--isolate_file_path',
348 '--isolate-file-path',
349 dest
='isolate_file_path',
350 help='.isolate file path to override the default '
352 group
.add_argument('--delete-stale-data', dest
='delete_stale_data',
354 help='Delete stale test data on the device.')
356 AddCommonOptions(parser
)
357 AddDeviceOptions(parser
)
358 AddRemoteDeviceOptions(parser
)
361 def ProcessInstrumentationOptions(args
):
362 """Processes options/arguments and populate |options| with defaults.
365 args: argparse.Namespace object.
368 An InstrumentationOptions named tuple which contains all options relevant to
369 instrumentation tests.
372 ProcessJavaTestOptions(args
)
374 if not args
.host_driven_root
:
375 args
.run_python_tests
= False
377 args
.test_apk_path
= os
.path
.join(
378 constants
.GetOutDirectory(),
379 constants
.SDK_BUILD_APKS_DIR
,
380 '%s.apk' % args
.test_apk
)
381 args
.test_apk_jar_path
= os
.path
.join(
382 constants
.GetOutDirectory(),
383 constants
.SDK_BUILD_TEST_JAVALIB_DIR
,
384 '%s.jar' % args
.test_apk
)
385 args
.test_support_apk_path
= '%sSupport%s' % (
386 os
.path
.splitext(args
.test_apk_path
))
388 args
.test_runner
= apk_helper
.GetInstrumentationName(args
.test_apk_path
)
390 # TODO(jbudorick): Get rid of InstrumentationOptions.
391 return instrumentation_test_options
.InstrumentationOptions(
394 args
.exclude_annotations
,
398 args
.screenshot_failures
,
399 args
.wait_for_debugger
,
403 args
.test_apk_jar_path
,
405 args
.test_support_apk_path
,
407 args
.isolate_file_path
,
409 args
.delete_stale_data
413 def AddUIAutomatorTestOptions(parser
):
414 """Adds UI Automator test options to |parser|."""
416 group
= parser
.add_argument_group('UIAutomator Test Options')
417 AddJavaTestOptions(group
)
419 '--package', required
=True, choices
=constants
.PACKAGE_INFO
.keys(),
420 metavar
='PACKAGE', help='Package under test.')
422 '--test-jar', dest
='test_jar', required
=True,
423 help=('The name of the dexed jar containing the tests (without the '
424 '.dex.jar extension). Alternatively, this can be a full path '
427 AddCommonOptions(parser
)
428 AddDeviceOptions(parser
)
431 def ProcessUIAutomatorOptions(args
):
432 """Processes UIAutomator options/arguments.
435 args: argparse.Namespace object.
438 A UIAutomatorOptions named tuple which contains all options relevant to
442 ProcessJavaTestOptions(args
)
444 if os
.path
.exists(args
.test_jar
):
445 # The dexed JAR is fully qualified, assume the info JAR lives along side.
446 args
.uiautomator_jar
= args
.test_jar
448 args
.uiautomator_jar
= os
.path
.join(
449 constants
.GetOutDirectory(),
450 constants
.SDK_BUILD_JAVALIB_DIR
,
451 '%s.dex.jar' % args
.test_jar
)
452 args
.uiautomator_info_jar
= (
453 args
.uiautomator_jar
[:args
.uiautomator_jar
.find('.dex.jar')] +
456 return uiautomator_test_options
.UIAutomatorOptions(
459 args
.exclude_annotations
,
463 args
.screenshot_failures
,
464 args
.uiautomator_jar
,
465 args
.uiautomator_info_jar
,
470 def AddJUnitTestOptions(parser
):
471 """Adds junit test options to |parser|."""
473 group
= parser
.add_argument_group('JUnit Test Options')
475 '-s', '--test-suite', dest
='test_suite', required
=True,
476 help=('JUnit test suite to run.'))
478 '-f', '--test-filter', dest
='test_filter',
479 help='Filters tests googletest-style.')
481 '--package-filter', dest
='package_filter',
482 help='Filters tests by package.')
484 '--runner-filter', dest
='runner_filter',
485 help='Filters tests by runner class. Must be fully qualified.')
487 '--sdk-version', dest
='sdk_version', type=int,
488 help='The Android SDK version.')
489 AddCommonOptions(parser
)
492 def AddMonkeyTestOptions(parser
):
493 """Adds monkey test options to |parser|."""
495 group
= parser
.add_argument_group('Monkey Test Options')
497 '--package', required
=True, choices
=constants
.PACKAGE_INFO
.keys(),
498 metavar
='PACKAGE', help='Package under test.')
500 '--event-count', default
=10000, type=int,
501 help='Number of events to generate (default: %(default)s).')
503 '--category', default
='',
504 help='A list of allowed categories.')
506 '--throttle', default
=100, type=int,
507 help='Delay between events (ms) (default: %(default)s). ')
510 help=('Seed value for pseudo-random generator. Same seed value generates '
511 'the same sequence of events. Seed is randomized by default.'))
513 '--extra-args', default
='',
514 help=('String of other args to pass to the command verbatim.'))
516 AddCommonOptions(parser
)
517 AddDeviceOptions(parser
)
519 def ProcessMonkeyTestOptions(args
):
520 """Processes all monkey test options.
523 args: argparse.Namespace object.
526 A MonkeyOptions named tuple which contains all options relevant to
529 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
530 category
= args
.category
532 category
= args
.category
.split(',')
534 # TODO(jbudorick): Get rid of MonkeyOptions.
535 return monkey_test_options
.MonkeyOptions(
544 def AddUirobotTestOptions(parser
):
545 """Adds uirobot test options to |option_parser|."""
546 group
= parser
.add_argument_group('Uirobot Test Options')
548 group
.add_argument('--app-under-test', required
=True,
549 help='APK to run tests on.')
551 '--minutes', default
=5, type=int,
552 help='Number of minutes to run uirobot test [default: %(default)s].')
554 AddCommonOptions(parser
)
555 AddDeviceOptions(parser
)
556 AddRemoteDeviceOptions(parser
)
558 def AddPerfTestOptions(parser
):
559 """Adds perf test options to |parser|."""
561 group
= parser
.add_argument_group('Perf Test Options')
563 class SingleStepAction(argparse
.Action
):
564 def __call__(self
, parser
, namespace
, values
, option_string
=None):
565 if values
and not namespace
.single_step
:
566 parser
.error('single step command provided, '
567 'but --single-step not specified.')
568 elif namespace
.single_step
and not values
:
569 parser
.error('--single-step specified, '
570 'but no single step command provided.')
571 setattr(namespace
, self
.dest
, values
)
573 step_group
= group
.add_mutually_exclusive_group(required
=True)
574 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
575 # This requires removing "--" from client calls.
576 step_group
.add_argument(
577 '--single-step', action
='store_true',
578 help='Execute the given command with retries, but only print the result '
579 'for the "most successful" round.')
580 step_group
.add_argument(
582 help='JSON file containing the list of commands to run.')
583 step_group
.add_argument(
585 help='The name of a previously executed perf step to print.')
588 '--output-json-list',
589 help='Write a simple list of names from --steps into the given file.')
591 '--collect-chartjson-data',
593 help='Cache the chartjson output from each step for later use.')
595 '--output-chartjson-data',
597 help='Write out chartjson into the given file.')
600 help=('A JSON file containing steps that are flaky '
601 'and will have its exit code ignored.'))
603 '--no-timeout', action
='store_true',
604 help=('Do not impose a timeout. Each perf step is responsible for '
605 'implementing the timeout logic.'))
607 '-f', '--test-filter',
608 help=('Test filter (will match against the names listed in --steps).'))
610 '--dry-run', action
='store_true',
611 help='Just print the steps without executing.')
612 # Uses 0.1 degrees C because that's what Android does.
614 '--max-battery-temp', type=int,
615 help='Only start tests when the battery is at or below the given '
616 'temperature (0.1 C)')
617 group
.add_argument('single_step_command', nargs
='*', action
=SingleStepAction
,
618 help='If --single-step is specified, the command to run.')
619 group
.add_argument('--min-battery-level', type=int,
620 help='Only starts tests when the battery is charged above '
622 AddCommonOptions(parser
)
623 AddDeviceOptions(parser
)
626 def ProcessPerfTestOptions(args
):
627 """Processes all perf test options.
630 args: argparse.Namespace object.
633 A PerfOptions named tuple which contains all options relevant to
636 # TODO(jbudorick): Move single_step handling down into the perf tests.
638 args
.single_step
= ' '.join(args
.single_step_command
)
639 # TODO(jbudorick): Get rid of PerfOptions.
640 return perf_test_options
.PerfOptions(
641 args
.steps
, args
.flaky_steps
, args
.output_json_list
,
642 args
.print_step
, args
.no_timeout
, args
.test_filter
,
643 args
.dry_run
, args
.single_step
, args
.collect_chartjson_data
,
644 args
.output_chartjson_data
, args
.max_battery_temp
, args
.min_battery_level
)
647 def AddPythonTestOptions(parser
):
648 group
= parser
.add_argument_group('Python Test Options')
650 '-s', '--suite', dest
='suite_name', metavar
='SUITE_NAME',
651 choices
=constants
.PYTHON_UNIT_TEST_SUITES
.keys(),
652 help='Name of the test suite to run.')
653 AddCommonOptions(parser
)
656 def _RunGTests(args
, devices
):
657 """Subcommand of RunTestsCommands which runs gtests."""
659 for suite_name
in args
.suite_name
:
660 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
661 # into the gtest code.
662 gtest_options
= gtest_test_options
.GTestOptions(
668 args
.isolate_file_path
,
671 args
.app_data_file_dir
,
672 args
.delete_stale_data
)
673 runner_factory
, tests
= gtest_setup
.Setup(gtest_options
, devices
)
675 results
, test_exit_code
= test_dispatcher
.RunTests(
676 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
677 num_retries
=args
.num_retries
)
679 if test_exit_code
and exit_code
!= constants
.ERROR_EXIT_CODE
:
680 exit_code
= test_exit_code
682 report_results
.LogFull(
684 test_type
='Unit test',
685 test_package
=suite_name
,
686 flakiness_server
=args
.flakiness_dashboard_server
)
688 if args
.json_results_file
:
689 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
694 def _RunLinkerTests(args
, devices
):
695 """Subcommand of RunTestsCommands which runs linker tests."""
696 runner_factory
, tests
= linker_setup
.Setup(args
, devices
)
698 results
, exit_code
= test_dispatcher
.RunTests(
699 tests
, runner_factory
, devices
, shard
=True, test_timeout
=60,
700 num_retries
=args
.num_retries
)
702 report_results
.LogFull(
704 test_type
='Linker test',
705 test_package
='ChromiumLinkerTest')
707 if args
.json_results_file
:
708 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
713 def _RunInstrumentationTests(args
, devices
):
714 """Subcommand of RunTestsCommands which runs instrumentation tests."""
715 logging
.info('_RunInstrumentationTests(%s, %s)' % (str(args
), str(devices
)))
717 instrumentation_options
= ProcessInstrumentationOptions(args
)
719 if len(devices
) > 1 and args
.wait_for_debugger
:
720 logging
.warning('Debugger can not be sharded, using first available device')
721 devices
= devices
[:1]
723 results
= base_test_result
.TestRunResults()
726 if args
.run_java_tests
:
727 runner_factory
, tests
= instrumentation_setup
.Setup(
728 instrumentation_options
, devices
)
730 test_results
, exit_code
= test_dispatcher
.RunTests(
731 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
732 num_retries
=args
.num_retries
)
734 results
.AddTestRunResults(test_results
)
736 if args
.run_python_tests
:
737 runner_factory
, tests
= host_driven_setup
.InstrumentationSetup(
738 args
.host_driven_root
, args
.official_build
,
739 instrumentation_options
)
742 test_results
, test_exit_code
= test_dispatcher
.RunTests(
743 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
744 num_retries
=args
.num_retries
)
746 results
.AddTestRunResults(test_results
)
748 # Only allow exit code escalation
749 if test_exit_code
and exit_code
!= constants
.ERROR_EXIT_CODE
:
750 exit_code
= test_exit_code
752 if args
.device_flags
:
753 args
.device_flags
= os
.path
.join(constants
.DIR_SOURCE_ROOT
,
756 report_results
.LogFull(
758 test_type
='Instrumentation',
759 test_package
=os
.path
.basename(args
.test_apk
),
760 annotation
=args
.annotations
,
761 flakiness_server
=args
.flakiness_dashboard_server
)
763 if args
.json_results_file
:
764 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
769 def _RunUIAutomatorTests(args
, devices
):
770 """Subcommand of RunTestsCommands which runs uiautomator tests."""
771 uiautomator_options
= ProcessUIAutomatorOptions(args
)
773 runner_factory
, tests
= uiautomator_setup
.Setup(uiautomator_options
, devices
)
775 results
, exit_code
= test_dispatcher
.RunTests(
776 tests
, runner_factory
, devices
, shard
=True, test_timeout
=None,
777 num_retries
=args
.num_retries
)
779 report_results
.LogFull(
781 test_type
='UIAutomator',
782 test_package
=os
.path
.basename(args
.test_jar
),
783 annotation
=args
.annotations
,
784 flakiness_server
=args
.flakiness_dashboard_server
)
786 if args
.json_results_file
:
787 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
792 def _RunJUnitTests(args
):
793 """Subcommand of RunTestsCommand which runs junit tests."""
794 runner_factory
, tests
= junit_setup
.Setup(args
)
795 results
, exit_code
= junit_dispatcher
.RunTests(tests
, runner_factory
)
797 report_results
.LogFull(
800 test_package
=args
.test_suite
)
802 if args
.json_results_file
:
803 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
808 def _RunMonkeyTests(args
, devices
):
809 """Subcommand of RunTestsCommands which runs monkey tests."""
810 monkey_options
= ProcessMonkeyTestOptions(args
)
812 runner_factory
, tests
= monkey_setup
.Setup(monkey_options
)
814 results
, exit_code
= test_dispatcher
.RunTests(
815 tests
, runner_factory
, devices
, shard
=False, test_timeout
=None,
816 num_retries
=args
.num_retries
)
818 report_results
.LogFull(
821 test_package
='Monkey')
823 if args
.json_results_file
:
824 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
829 def _RunPerfTests(args
, active_devices
):
830 """Subcommand of RunTestsCommands which runs perf tests."""
831 perf_options
= ProcessPerfTestOptions(args
)
833 # Just save a simple json with a list of test names.
834 if perf_options
.output_json_list
:
835 return perf_test_runner
.OutputJsonList(
836 perf_options
.steps
, perf_options
.output_json_list
)
838 # Just print the results from a single previously executed step.
839 if perf_options
.print_step
:
840 return perf_test_runner
.PrintTestOutput(
841 perf_options
.print_step
, perf_options
.output_chartjson_data
)
843 runner_factory
, tests
, devices
= perf_setup
.Setup(
844 perf_options
, active_devices
)
846 # shard=False means that each device will get the full list of tests
847 # and then each one will decide their own affinity.
848 # shard=True means each device will pop the next test available from a queue,
849 # which increases throughput but have no affinity.
850 results
, _
= test_dispatcher
.RunTests(
851 tests
, runner_factory
, devices
, shard
=False, test_timeout
=None,
852 num_retries
=args
.num_retries
)
854 report_results
.LogFull(
859 if args
.json_results_file
:
860 json_results
.GenerateJsonResultsFile(results
, args
.json_results_file
)
862 if perf_options
.single_step
:
863 return perf_test_runner
.PrintTestOutput('single_step')
865 perf_test_runner
.PrintSummary(tests
)
867 # Always return 0 on the sharding stage. Individual tests exit_code
868 # will be returned on the print_step stage.
872 def _RunPythonTests(args
):
873 """Subcommand of RunTestsCommand which runs python unit tests."""
874 suite_vars
= constants
.PYTHON_UNIT_TEST_SUITES
[args
.suite_name
]
875 suite_path
= suite_vars
['path']
876 suite_test_modules
= suite_vars
['test_modules']
878 sys
.path
= [suite_path
] + sys
.path
880 suite
= unittest
.TestSuite()
881 suite
.addTests(unittest
.defaultTestLoader
.loadTestsFromName(m
)
882 for m
in suite_test_modules
)
883 runner
= unittest
.TextTestRunner(verbosity
=1+args
.verbose_count
)
884 return 0 if runner
.run(suite
).wasSuccessful() else 1
886 sys
.path
= sys
.path
[1:]
889 def _GetAttachedDevices(blacklist_file
, test_device
):
890 """Get all attached devices.
893 test_device: Name of a specific device to use.
896 A list of attached devices.
898 if not blacklist_file
:
899 # TODO(jbudorick): Remove this once bots pass the blacklist file.
900 blacklist_file
= device_blacklist
.BLACKLIST_JSON
901 logging
.warning('Using default device blacklist %s',
902 device_blacklist
.BLACKLIST_JSON
)
904 blacklist
= device_blacklist
.Blacklist(blacklist_file
)
905 attached_devices
= device_utils
.DeviceUtils
.HealthyDevices(blacklist
)
907 test_device
= [d
for d
in attached_devices
if d
== test_device
]
909 raise device_errors
.DeviceUnreachableError(
910 'Did not find device %s among attached device. Attached devices: %s'
911 % (test_device
, ', '.join(attached_devices
)))
915 if not attached_devices
:
916 raise device_errors
.NoDevicesError()
917 return sorted(attached_devices
)
920 def RunTestsCommand(args
, parser
):
921 """Checks test type and dispatches to the appropriate function.
924 args: argparse.Namespace object.
925 parser: argparse.ArgumentParser object.
928 Integer indicated exit code.
931 Exception: Unknown command name passed in, or an exception from an
932 individual test runner.
934 command
= args
.command
936 ProcessCommonOptions(args
)
938 if args
.enable_platform_mode
:
939 return RunTestsInPlatformMode(args
, parser
)
941 if command
in constants
.LOCAL_MACHINE_TESTS
:
944 devices
= _GetAttachedDevices(args
.blacklist_file
, args
.test_device
)
946 forwarder
.Forwarder
.RemoveHostLog()
947 if not ports
.ResetTestServerPortAllocation():
948 raise Exception('Failed to reset test server port.')
950 if command
== 'gtest':
951 if args
.suite_name
[0] in gtest_test_instance
.BROWSER_TEST_SUITES
:
952 return RunTestsInPlatformMode(args
, parser
)
953 return _RunGTests(args
, devices
)
954 elif command
== 'linker':
955 return _RunLinkerTests(args
, devices
)
956 elif command
== 'instrumentation':
957 return _RunInstrumentationTests(args
, devices
)
958 elif command
== 'uiautomator':
959 return _RunUIAutomatorTests(args
, devices
)
960 elif command
== 'junit':
961 return _RunJUnitTests(args
)
962 elif command
== 'monkey':
963 return _RunMonkeyTests(args
, devices
)
964 elif command
== 'perf':
965 return _RunPerfTests(args
, devices
)
966 elif command
== 'python':
967 return _RunPythonTests(args
)
969 raise Exception('Unknown test type.')
972 _SUPPORTED_IN_PLATFORM_MODE
= [
973 # TODO(jbudorick): Add support for more test types.
980 def RunTestsInPlatformMode(args
, parser
):
982 if args
.command
not in _SUPPORTED_IN_PLATFORM_MODE
:
983 parser
.error('%s is not yet supported in platform mode' % args
.command
)
985 with environment_factory
.CreateEnvironment(args
, parser
.error
) as env
:
986 with test_instance_factory
.CreateTestInstance(args
, parser
.error
) as test
:
987 with test_run_factory
.CreateTestRun(
988 args
, env
, test
, parser
.error
) as test_run
:
989 results
= test_run
.RunTests()
991 if args
.environment
== 'remote_device' and args
.trigger
:
992 return 0 # Not returning results, only triggering.
994 report_results
.LogFull(
996 test_type
=test
.TestType(),
997 test_package
=test_run
.TestPackage(),
998 annotation
=getattr(args
, 'annotations', None),
999 flakiness_server
=getattr(args
, 'flakiness_dashboard_server', None))
1001 if args
.json_results_file
:
1002 json_results
.GenerateJsonResultsFile(
1003 results
, args
.json_results_file
)
1005 return 0 if results
.DidRunPass() else constants
.ERROR_EXIT_CODE
1008 CommandConfigTuple
= collections
.namedtuple(
1009 'CommandConfigTuple',
1010 ['add_options_func', 'help_txt'])
1012 'gtest': CommandConfigTuple(
1014 'googletest-based C++ tests'),
1015 'instrumentation': CommandConfigTuple(
1016 AddInstrumentationTestOptions
,
1017 'InstrumentationTestCase-based Java tests'),
1018 'uiautomator': CommandConfigTuple(
1019 AddUIAutomatorTestOptions
,
1020 "Tests that run via Android's uiautomator command"),
1021 'junit': CommandConfigTuple(
1022 AddJUnitTestOptions
,
1023 'JUnit4-based Java tests'),
1024 'monkey': CommandConfigTuple(
1025 AddMonkeyTestOptions
,
1026 "Tests based on Android's monkey"),
1027 'perf': CommandConfigTuple(
1029 'Performance tests'),
1030 'python': CommandConfigTuple(
1031 AddPythonTestOptions
,
1032 'Python tests based on unittest.TestCase'),
1033 'linker': CommandConfigTuple(
1034 AddLinkerTestOptions
,
1036 'uirobot': CommandConfigTuple(
1037 AddUirobotTestOptions
,
1042 def DumpThreadStacks(_signal
, _frame
):
1043 for thread
in threading
.enumerate():
1044 reraiser_thread
.LogThreadStack(thread
)
1048 signal
.signal(signal
.SIGUSR1
, DumpThreadStacks
)
1050 parser
= argparse
.ArgumentParser()
1051 command_parsers
= parser
.add_subparsers(title
='test types',
1054 for test_type
, config
in sorted(VALID_COMMANDS
.iteritems(),
1055 key
=lambda x
: x
[0]):
1056 subparser
= command_parsers
.add_parser(
1057 test_type
, usage
='%(prog)s [options]', help=config
.help_txt
)
1058 config
.add_options_func(subparser
)
1060 args
= parser
.parse_args()
1063 return RunTestsCommand(args
, parser
)
1064 except base_error
.BaseError
as e
:
1065 logging
.exception('Error occurred.')
1066 if e
.is_infra_error
:
1067 return constants
.INFRA_EXIT_CODE
1068 return constants
.ERROR_EXIT_CODE
1069 except: # pylint: disable=W0702
1070 logging
.exception('Unrecognized error occurred.')
1071 return constants
.ERROR_EXIT_CODE
1074 if __name__
== '__main__':