Updating trunk VERSION from 2139.0 to 2140.0
[chromium-blink-merge.git] / build / android / test_runner.py
blob8a4b9fa4c8a791dac67e665c21e666ffb43adf05
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import collections
10 import logging
11 import optparse
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
18 from pylib import android_commands
19 from pylib import constants
20 from pylib import forwarder
21 from pylib import ports
22 from pylib.base import base_test_result
23 from pylib.base import test_dispatcher
24 from pylib.gtest import gtest_config
25 from pylib.gtest import setup as gtest_setup
26 from pylib.gtest import test_options as gtest_test_options
27 from pylib.linker import setup as linker_setup
28 from pylib.host_driven import setup as host_driven_setup
29 from pylib.instrumentation import setup as instrumentation_setup
30 from pylib.instrumentation import test_options as instrumentation_test_options
31 from pylib.monkey import setup as monkey_setup
32 from pylib.monkey import test_options as monkey_test_options
33 from pylib.perf import setup as perf_setup
34 from pylib.perf import test_options as perf_test_options
35 from pylib.perf import test_runner as perf_test_runner
36 from pylib.uiautomator import setup as uiautomator_setup
37 from pylib.uiautomator import test_options as uiautomator_test_options
38 from pylib.utils import apk_helper
39 from pylib.utils import command_option_parser
40 from pylib.utils import report_results
41 from pylib.utils import reraiser_thread
42 from pylib.utils import run_tests_helper
45 def AddCommonOptions(option_parser):
46 """Adds all common options to |option_parser|."""
48 group = optparse.OptionGroup(option_parser, 'Common Options')
49 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
50 group.add_option('--debug', action='store_const', const='Debug',
51 dest='build_type', default=default_build_type,
52 help=('If set, run test suites under out/Debug. '
53 'Default is env var BUILDTYPE or Debug.'))
54 group.add_option('--release', action='store_const',
55 const='Release', dest='build_type',
56 help=('If set, run test suites under out/Release.'
57 ' Default is env var BUILDTYPE or Debug.'))
58 group.add_option('-c', dest='cleanup_test_files',
59 help='Cleanup test files on the device after run',
60 action='store_true')
61 group.add_option('--num_retries', dest='num_retries', type='int',
62 default=2,
63 help=('Number of retries for a test before '
64 'giving up.'))
65 group.add_option('-v',
66 '--verbose',
67 dest='verbose_count',
68 default=0,
69 action='count',
70 help='Verbose level (multiple times for more)')
71 group.add_option('--tool',
72 dest='tool',
73 help=('Run the test under a tool '
74 '(use --tool help to list them)'))
75 group.add_option('--flakiness-dashboard-server',
76 dest='flakiness_dashboard_server',
77 help=('Address of the server that is hosting the '
78 'Chrome for Android flakiness dashboard.'))
79 group.add_option('--skip-deps-push', dest='push_deps',
80 action='store_false', default=True,
81 help=('Do not push dependencies to the device. '
82 'Use this at own risk for speeding up test '
83 'execution on local machine.'))
84 group.add_option('-d', '--device', dest='test_device',
85 help=('Target device for the test suite '
86 'to run on.'))
87 option_parser.add_option_group(group)
90 def ProcessCommonOptions(options):
91 """Processes and handles all common options."""
92 run_tests_helper.SetLogLevel(options.verbose_count)
93 constants.SetBuildType(options.build_type)
96 def AddGTestOptions(option_parser):
97 """Adds gtest options to |option_parser|."""
99 option_parser.usage = '%prog gtest [options]'
100 option_parser.commands_dict = {}
101 option_parser.example = '%prog gtest -s base_unittests'
103 # TODO(gkanwar): Make this option required
104 option_parser.add_option('-s', '--suite', dest='suite_name',
105 help=('Executable name of the test suite to run '
106 '(use -s help to list them).'))
107 option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
108 dest='test_filter',
109 help='googletest-style filter string.')
110 option_parser.add_option('--gtest_also_run_disabled_tests',
111 '--gtest-also-run-disabled-tests',
112 dest='run_disabled', action='store_true',
113 help='Also run disabled tests if applicable.')
114 option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
115 default='',
116 help='Additional arguments to pass to the test.')
117 option_parser.add_option('-t', dest='timeout',
118 help='Timeout to wait for each test',
119 type='int',
120 default=60)
121 option_parser.add_option('--isolate_file_path',
122 '--isolate-file-path',
123 dest='isolate_file_path',
124 help='.isolate file path to override the default '
125 'path')
126 # TODO(gkanwar): Move these to Common Options once we have the plumbing
127 # in our other test types to handle these commands
128 AddCommonOptions(option_parser)
131 def AddLinkerTestOptions(option_parser):
132 option_parser.usage = '%prog linker'
133 option_parser.commands_dict = {}
134 option_parser.example = '%prog linker'
136 option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
137 help='googletest-style filter string.')
138 AddCommonOptions(option_parser)
141 def ProcessGTestOptions(options):
142 """Intercept test suite help to list test suites.
144 Args:
145 options: Command line options.
147 if options.suite_name == 'help':
148 print 'Available test suites are:'
149 for test_suite in (gtest_config.STABLE_TEST_SUITES +
150 gtest_config.EXPERIMENTAL_TEST_SUITES):
151 print test_suite
152 sys.exit(0)
154 # Convert to a list, assuming all test suites if nothing was specified.
155 # TODO(gkanwar): Require having a test suite
156 if options.suite_name:
157 options.suite_name = [options.suite_name]
158 else:
159 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
162 def AddJavaTestOptions(option_parser):
163 """Adds the Java test options to |option_parser|."""
165 option_parser.add_option('-f', '--test-filter', dest='test_filter',
166 help=('Test filter (if not fully qualified, '
167 'will run all matches).'))
168 option_parser.add_option(
169 '-A', '--annotation', dest='annotation_str',
170 help=('Comma-separated list of annotations. Run only tests with any of '
171 'the given annotations. An annotation can be either a key or a '
172 'key-values pair. A test that has no annotation is considered '
173 '"SmallTest".'))
174 option_parser.add_option(
175 '-E', '--exclude-annotation', dest='exclude_annotation_str',
176 help=('Comma-separated list of annotations. Exclude tests with these '
177 'annotations.'))
178 option_parser.add_option('--screenshot', dest='screenshot_failures',
179 action='store_true',
180 help='Capture screenshots of test failures')
181 option_parser.add_option('--save-perf-json', action='store_true',
182 help='Saves the JSON file for each UI Perf test.')
183 option_parser.add_option('--official-build', action='store_true',
184 help='Run official build tests.')
185 option_parser.add_option('--test_data', action='append', default=[],
186 help=('Each instance defines a directory of test '
187 'data that should be copied to the target(s) '
188 'before running the tests. The argument '
189 'should be of the form <target>:<source>, '
190 '<target> is relative to the device data'
191 'directory, and <source> is relative to the '
192 'chromium build directory.'))
195 def ProcessJavaTestOptions(options):
196 """Processes options/arguments and populates |options| with defaults."""
198 if options.annotation_str:
199 options.annotations = options.annotation_str.split(',')
200 elif options.test_filter:
201 options.annotations = []
202 else:
203 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
204 'EnormousTest', 'IntegrationTest']
206 if options.exclude_annotation_str:
207 options.exclude_annotations = options.exclude_annotation_str.split(',')
208 else:
209 options.exclude_annotations = []
212 def AddInstrumentationTestOptions(option_parser):
213 """Adds Instrumentation test options to |option_parser|."""
215 option_parser.usage = '%prog instrumentation [options]'
216 option_parser.commands_dict = {}
217 option_parser.example = ('%prog instrumentation '
218 '--test-apk=ChromeShellTest')
220 AddJavaTestOptions(option_parser)
221 AddCommonOptions(option_parser)
223 option_parser.add_option('-j', '--java-only', action='store_true',
224 default=False, help='Run only the Java tests.')
225 option_parser.add_option('-p', '--python-only', action='store_true',
226 default=False,
227 help='Run only the host-driven tests.')
228 option_parser.add_option('--host-driven-root',
229 help='Root of the host-driven tests.')
230 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
231 action='store_true',
232 help='Wait for debugger.')
233 option_parser.add_option(
234 '--test-apk', dest='test_apk',
235 help=('The name of the apk containing the tests '
236 '(without the .apk extension; e.g. "ContentShellTest").'))
237 option_parser.add_option('--coverage-dir',
238 help=('Directory in which to place all generated '
239 'EMMA coverage files.'))
240 option_parser.add_option('--device-flags', dest='device_flags', default='',
241 help='The relative filepath to a file containing '
242 'command-line flags to set on the device')
245 def ProcessInstrumentationOptions(options, error_func):
246 """Processes options/arguments and populate |options| with defaults.
248 Args:
249 options: optparse.Options object.
250 error_func: Function to call with the error message in case of an error.
252 Returns:
253 An InstrumentationOptions named tuple which contains all options relevant to
254 instrumentation tests.
257 ProcessJavaTestOptions(options)
259 if options.java_only and options.python_only:
260 error_func('Options java_only (-j) and python_only (-p) '
261 'are mutually exclusive.')
262 options.run_java_tests = True
263 options.run_python_tests = True
264 if options.java_only:
265 options.run_python_tests = False
266 elif options.python_only:
267 options.run_java_tests = False
269 if not options.host_driven_root:
270 options.run_python_tests = False
272 if not options.test_apk:
273 error_func('--test-apk must be specified.')
276 options.test_apk_path = os.path.join(
277 constants.GetOutDirectory(),
278 constants.SDK_BUILD_APKS_DIR,
279 '%s.apk' % options.test_apk)
280 options.test_apk_jar_path = os.path.join(
281 constants.GetOutDirectory(),
282 constants.SDK_BUILD_TEST_JAVALIB_DIR,
283 '%s.jar' % options.test_apk)
284 options.test_support_apk_path = '%sSupport%s' % (
285 os.path.splitext(options.test_apk_path))
287 options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path)
289 return instrumentation_test_options.InstrumentationOptions(
290 options.tool,
291 options.cleanup_test_files,
292 options.push_deps,
293 options.annotations,
294 options.exclude_annotations,
295 options.test_filter,
296 options.test_data,
297 options.save_perf_json,
298 options.screenshot_failures,
299 options.wait_for_debugger,
300 options.coverage_dir,
301 options.test_apk,
302 options.test_apk_path,
303 options.test_apk_jar_path,
304 options.test_runner,
305 options.test_support_apk_path,
306 options.device_flags
310 def AddUIAutomatorTestOptions(option_parser):
311 """Adds UI Automator test options to |option_parser|."""
313 option_parser.usage = '%prog uiautomator [options]'
314 option_parser.commands_dict = {}
315 option_parser.example = (
316 '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests'
317 ' --package=chrome_shell')
318 option_parser.add_option(
319 '--package',
320 help=('Package under test. Possible values: %s' %
321 constants.PACKAGE_INFO.keys()))
322 option_parser.add_option(
323 '--test-jar', dest='test_jar',
324 help=('The name of the dexed jar containing the tests (without the '
325 '.dex.jar extension). Alternatively, this can be a full path '
326 'to the jar.'))
328 AddJavaTestOptions(option_parser)
329 AddCommonOptions(option_parser)
332 def ProcessUIAutomatorOptions(options, error_func):
333 """Processes UIAutomator options/arguments.
335 Args:
336 options: optparse.Options object.
337 error_func: Function to call with the error message in case of an error.
339 Returns:
340 A UIAutomatorOptions named tuple which contains all options relevant to
341 uiautomator tests.
344 ProcessJavaTestOptions(options)
346 if not options.package:
347 error_func('--package is required.')
349 if options.package not in constants.PACKAGE_INFO:
350 error_func('Invalid package.')
352 if not options.test_jar:
353 error_func('--test-jar must be specified.')
355 if os.path.exists(options.test_jar):
356 # The dexed JAR is fully qualified, assume the info JAR lives along side.
357 options.uiautomator_jar = options.test_jar
358 else:
359 options.uiautomator_jar = os.path.join(
360 constants.GetOutDirectory(),
361 constants.SDK_BUILD_JAVALIB_DIR,
362 '%s.dex.jar' % options.test_jar)
363 options.uiautomator_info_jar = (
364 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
365 '_java.jar')
367 return uiautomator_test_options.UIAutomatorOptions(
368 options.tool,
369 options.cleanup_test_files,
370 options.push_deps,
371 options.annotations,
372 options.exclude_annotations,
373 options.test_filter,
374 options.test_data,
375 options.save_perf_json,
376 options.screenshot_failures,
377 options.uiautomator_jar,
378 options.uiautomator_info_jar,
379 options.package)
382 def AddMonkeyTestOptions(option_parser):
383 """Adds monkey test options to |option_parser|."""
385 option_parser.usage = '%prog monkey [options]'
386 option_parser.commands_dict = {}
387 option_parser.example = (
388 '%prog monkey --package=chrome_shell')
390 option_parser.add_option(
391 '--package',
392 help=('Package under test. Possible values: %s' %
393 constants.PACKAGE_INFO.keys()))
394 option_parser.add_option(
395 '--event-count', default=10000, type='int',
396 help='Number of events to generate [default: %default].')
397 option_parser.add_option(
398 '--category', default='',
399 help='A list of allowed categories.')
400 option_parser.add_option(
401 '--throttle', default=100, type='int',
402 help='Delay between events (ms) [default: %default]. ')
403 option_parser.add_option(
404 '--seed', type='int',
405 help=('Seed value for pseudo-random generator. Same seed value generates '
406 'the same sequence of events. Seed is randomized by default.'))
407 option_parser.add_option(
408 '--extra-args', default='',
409 help=('String of other args to pass to the command verbatim '
410 '[default: "%default"].'))
412 AddCommonOptions(option_parser)
415 def ProcessMonkeyTestOptions(options, error_func):
416 """Processes all monkey test options.
418 Args:
419 options: optparse.Options object.
420 error_func: Function to call with the error message in case of an error.
422 Returns:
423 A MonkeyOptions named tuple which contains all options relevant to
424 monkey tests.
426 if not options.package:
427 error_func('--package is required.')
429 if options.package not in constants.PACKAGE_INFO:
430 error_func('Invalid package.')
432 category = options.category
433 if category:
434 category = options.category.split(',')
436 return monkey_test_options.MonkeyOptions(
437 options.verbose_count,
438 options.package,
439 options.event_count,
440 category,
441 options.throttle,
442 options.seed,
443 options.extra_args)
446 def AddPerfTestOptions(option_parser):
447 """Adds perf test options to |option_parser|."""
449 option_parser.usage = '%prog perf [options]'
450 option_parser.commands_dict = {}
451 option_parser.example = ('%prog perf '
452 '[--single-step -- command args] or '
453 '[--steps perf_steps.json] or '
454 '[--print-step step]')
456 option_parser.add_option(
457 '--single-step',
458 action='store_true',
459 help='Execute the given command with retries, but only print the result '
460 'for the "most successful" round.')
461 option_parser.add_option(
462 '--steps',
463 help='JSON file containing the list of commands to run.')
464 option_parser.add_option(
465 '--flaky-steps',
466 help=('A JSON file containing steps that are flaky '
467 'and will have its exit code ignored.'))
468 option_parser.add_option(
469 '--output-json-list',
470 help='Write a simple list of names from --steps into the given file.')
471 option_parser.add_option(
472 '--print-step',
473 help='The name of a previously executed perf step to print.')
474 option_parser.add_option(
475 '--no-timeout', action='store_true',
476 help=('Do not impose a timeout. Each perf step is responsible for '
477 'implementing the timeout logic.'))
478 option_parser.add_option(
479 '-f', '--test-filter',
480 help=('Test filter (will match against the names listed in --steps).'))
481 option_parser.add_option(
482 '--dry-run',
483 action='store_true',
484 help='Just print the steps without executing.')
485 AddCommonOptions(option_parser)
488 def ProcessPerfTestOptions(options, args, error_func):
489 """Processes all perf test options.
491 Args:
492 options: optparse.Options object.
493 error_func: Function to call with the error message in case of an error.
495 Returns:
496 A PerfOptions named tuple which contains all options relevant to
497 perf tests.
499 # Only one of steps, print_step or single_step must be provided.
500 count = len(filter(None,
501 [options.steps, options.print_step, options.single_step]))
502 if count != 1:
503 error_func('Please specify one of: --steps, --print-step, --single-step.')
504 single_step = None
505 if options.single_step:
506 single_step = ' '.join(args[2:])
507 return perf_test_options.PerfOptions(
508 options.steps, options.flaky_steps, options.output_json_list,
509 options.print_step, options.no_timeout, options.test_filter,
510 options.dry_run, single_step)
513 def _RunGTests(options, devices):
514 """Subcommand of RunTestsCommands which runs gtests."""
515 ProcessGTestOptions(options)
517 exit_code = 0
518 for suite_name in options.suite_name:
519 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
520 # the gtest command.
521 gtest_options = gtest_test_options.GTestOptions(
522 options.tool,
523 options.cleanup_test_files,
524 options.push_deps,
525 options.test_filter,
526 options.run_disabled,
527 options.test_arguments,
528 options.timeout,
529 options.isolate_file_path,
530 suite_name)
531 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
533 results, test_exit_code = test_dispatcher.RunTests(
534 tests, runner_factory, devices, shard=True, test_timeout=None,
535 num_retries=options.num_retries)
537 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
538 exit_code = test_exit_code
540 report_results.LogFull(
541 results=results,
542 test_type='Unit test',
543 test_package=suite_name,
544 flakiness_server=options.flakiness_dashboard_server)
546 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
547 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
549 return exit_code
552 def _RunLinkerTests(options, devices):
553 """Subcommand of RunTestsCommands which runs linker tests."""
554 runner_factory, tests = linker_setup.Setup(options, devices)
556 results, exit_code = test_dispatcher.RunTests(
557 tests, runner_factory, devices, shard=True, test_timeout=60,
558 num_retries=options.num_retries)
560 report_results.LogFull(
561 results=results,
562 test_type='Linker test',
563 test_package='ChromiumLinkerTest')
565 return exit_code
568 def _RunInstrumentationTests(options, error_func, devices):
569 """Subcommand of RunTestsCommands which runs instrumentation tests."""
570 instrumentation_options = ProcessInstrumentationOptions(options, error_func)
572 if len(devices) > 1 and options.wait_for_debugger:
573 logging.warning('Debugger can not be sharded, using first available device')
574 devices = devices[:1]
576 results = base_test_result.TestRunResults()
577 exit_code = 0
579 if options.run_java_tests:
580 runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
582 test_results, exit_code = test_dispatcher.RunTests(
583 tests, runner_factory, devices, shard=True, test_timeout=None,
584 num_retries=options.num_retries)
586 results.AddTestRunResults(test_results)
588 if options.run_python_tests:
589 runner_factory, tests = host_driven_setup.InstrumentationSetup(
590 options.host_driven_root, options.official_build,
591 instrumentation_options)
593 if tests:
594 test_results, test_exit_code = test_dispatcher.RunTests(
595 tests, runner_factory, devices, shard=True, test_timeout=None,
596 num_retries=options.num_retries)
598 results.AddTestRunResults(test_results)
600 # Only allow exit code escalation
601 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
602 exit_code = test_exit_code
604 if options.device_flags:
605 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
606 options.device_flags)
608 report_results.LogFull(
609 results=results,
610 test_type='Instrumentation',
611 test_package=os.path.basename(options.test_apk),
612 annotation=options.annotations,
613 flakiness_server=options.flakiness_dashboard_server)
615 return exit_code
618 def _RunUIAutomatorTests(options, error_func, devices):
619 """Subcommand of RunTestsCommands which runs uiautomator tests."""
620 uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
622 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
624 results, exit_code = test_dispatcher.RunTests(
625 tests, runner_factory, devices, shard=True, test_timeout=None,
626 num_retries=options.num_retries)
628 report_results.LogFull(
629 results=results,
630 test_type='UIAutomator',
631 test_package=os.path.basename(options.test_jar),
632 annotation=options.annotations,
633 flakiness_server=options.flakiness_dashboard_server)
635 return exit_code
638 def _RunMonkeyTests(options, error_func, devices):
639 """Subcommand of RunTestsCommands which runs monkey tests."""
640 monkey_options = ProcessMonkeyTestOptions(options, error_func)
642 runner_factory, tests = monkey_setup.Setup(monkey_options)
644 results, exit_code = test_dispatcher.RunTests(
645 tests, runner_factory, devices, shard=False, test_timeout=None,
646 num_retries=options.num_retries)
648 report_results.LogFull(
649 results=results,
650 test_type='Monkey',
651 test_package='Monkey')
653 return exit_code
656 def _RunPerfTests(options, args, error_func):
657 """Subcommand of RunTestsCommands which runs perf tests."""
658 perf_options = ProcessPerfTestOptions(options, args, error_func)
660 # Just save a simple json with a list of test names.
661 if perf_options.output_json_list:
662 return perf_test_runner.OutputJsonList(
663 perf_options.steps, perf_options.output_json_list)
665 # Just print the results from a single previously executed step.
666 if perf_options.print_step:
667 return perf_test_runner.PrintTestOutput(perf_options.print_step)
669 runner_factory, tests, devices = perf_setup.Setup(perf_options)
671 # shard=False means that each device will get the full list of tests
672 # and then each one will decide their own affinity.
673 # shard=True means each device will pop the next test available from a queue,
674 # which increases throughput but have no affinity.
675 results, _ = test_dispatcher.RunTests(
676 tests, runner_factory, devices, shard=False, test_timeout=None,
677 num_retries=options.num_retries)
679 report_results.LogFull(
680 results=results,
681 test_type='Perf',
682 test_package='Perf')
684 if perf_options.single_step:
685 return perf_test_runner.PrintTestOutput('single_step')
687 perf_test_runner.PrintSummary(tests)
689 # Always return 0 on the sharding stage. Individual tests exit_code
690 # will be returned on the print_step stage.
691 return 0
694 def _GetAttachedDevices(test_device=None):
695 """Get all attached devices.
697 Args:
698 test_device: Name of a specific device to use.
700 Returns:
701 A list of attached devices.
703 attached_devices = []
705 attached_devices = android_commands.GetAttachedDevices()
706 if test_device:
707 assert test_device in attached_devices, (
708 'Did not find device %s among attached device. Attached devices: %s'
709 % (test_device, ', '.join(attached_devices)))
710 attached_devices = [test_device]
712 assert attached_devices, 'No devices attached.'
714 return sorted(attached_devices)
717 def RunTestsCommand(command, options, args, option_parser):
718 """Checks test type and dispatches to the appropriate function.
720 Args:
721 command: String indicating the command that was received to trigger
722 this function.
723 options: optparse options dictionary.
724 args: List of extra args from optparse.
725 option_parser: optparse.OptionParser object.
727 Returns:
728 Integer indicated exit code.
730 Raises:
731 Exception: Unknown command name passed in, or an exception from an
732 individual test runner.
735 # Check for extra arguments
736 if len(args) > 2 and command != 'perf':
737 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
738 return constants.ERROR_EXIT_CODE
739 if command == 'perf':
740 if ((options.single_step and len(args) <= 2) or
741 (not options.single_step and len(args) > 2)):
742 option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
743 return constants.ERROR_EXIT_CODE
745 ProcessCommonOptions(options)
747 devices = _GetAttachedDevices(options.test_device)
749 forwarder.Forwarder.RemoveHostLog()
750 if not ports.ResetTestServerPortAllocation():
751 raise Exception('Failed to reset test server port.')
753 if command == 'gtest':
754 return _RunGTests(options, devices)
755 elif command == 'linker':
756 return _RunLinkerTests(options, devices)
757 elif command == 'instrumentation':
758 return _RunInstrumentationTests(options, option_parser.error, devices)
759 elif command == 'uiautomator':
760 return _RunUIAutomatorTests(options, option_parser.error, devices)
761 elif command == 'monkey':
762 return _RunMonkeyTests(options, option_parser.error, devices)
763 elif command == 'perf':
764 return _RunPerfTests(options, args, option_parser.error)
765 else:
766 raise Exception('Unknown test type.')
769 def HelpCommand(command, _options, args, option_parser):
770 """Display help for a certain command, or overall help.
772 Args:
773 command: String indicating the command that was received to trigger
774 this function.
775 options: optparse options dictionary. unused.
776 args: List of extra args from optparse.
777 option_parser: optparse.OptionParser object.
779 Returns:
780 Integer indicated exit code.
782 # If we don't have any args, display overall help
783 if len(args) < 3:
784 option_parser.print_help()
785 return 0
786 # If we have too many args, print an error
787 if len(args) > 3:
788 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
789 return constants.ERROR_EXIT_CODE
791 command = args[2]
793 if command not in VALID_COMMANDS:
794 option_parser.error('Unrecognized command.')
796 # Treat the help command as a special case. We don't care about showing a
797 # specific help page for itself.
798 if command == 'help':
799 option_parser.print_help()
800 return 0
802 VALID_COMMANDS[command].add_options_func(option_parser)
803 option_parser.usage = '%prog ' + command + ' [options]'
804 option_parser.commands_dict = {}
805 option_parser.print_help()
807 return 0
810 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
811 # syntax is a bit prettier. The tuple is two functions: (add options, run
812 # command).
813 CommandFunctionTuple = collections.namedtuple(
814 'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
815 VALID_COMMANDS = {
816 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
817 'instrumentation': CommandFunctionTuple(
818 AddInstrumentationTestOptions, RunTestsCommand),
819 'uiautomator': CommandFunctionTuple(
820 AddUIAutomatorTestOptions, RunTestsCommand),
821 'monkey': CommandFunctionTuple(
822 AddMonkeyTestOptions, RunTestsCommand),
823 'perf': CommandFunctionTuple(
824 AddPerfTestOptions, RunTestsCommand),
825 'linker': CommandFunctionTuple(
826 AddLinkerTestOptions, RunTestsCommand),
827 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
831 def DumpThreadStacks(_signal, _frame):
832 for thread in threading.enumerate():
833 reraiser_thread.LogThreadStack(thread)
836 def main():
837 signal.signal(signal.SIGUSR1, DumpThreadStacks)
838 option_parser = command_option_parser.CommandOptionParser(
839 commands_dict=VALID_COMMANDS)
840 return command_option_parser.ParseAndExecute(option_parser)
843 if __name__ == '__main__':
844 sys.exit(main())