Switch DomAutomationController to be a RenderFrameObserver.
[chromium-blink-merge.git] / build / android / test_runner.py
blob2221e45b704c8d82bf0e7dce61e2550708a9efc1
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import collections
10 import logging
11 import optparse
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import traceback
19 from pylib import android_commands
20 from pylib import constants
21 from pylib import forwarder
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import test_dispatcher
25 from pylib.gtest import gtest_config
26 from pylib.gtest import setup as gtest_setup
27 from pylib.gtest import test_options as gtest_test_options
28 from pylib.linker import setup as linker_setup
29 from pylib.host_driven import setup as host_driven_setup
30 from pylib.instrumentation import setup as instrumentation_setup
31 from pylib.instrumentation import test_options as instrumentation_test_options
32 from pylib.monkey import setup as monkey_setup
33 from pylib.monkey import test_options as monkey_test_options
34 from pylib.perf import setup as perf_setup
35 from pylib.perf import test_options as perf_test_options
36 from pylib.perf import test_runner as perf_test_runner
37 from pylib.uiautomator import setup as uiautomator_setup
38 from pylib.uiautomator import test_options as uiautomator_test_options
39 from pylib.utils import command_option_parser
40 from pylib.utils import report_results
41 from pylib.utils import reraiser_thread
42 from pylib.utils import run_tests_helper
45 def AddCommonOptions(option_parser):
46 """Adds all common options to |option_parser|."""
48 group = optparse.OptionGroup(option_parser, 'Common Options')
49 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
50 group.add_option('--debug', action='store_const', const='Debug',
51 dest='build_type', default=default_build_type,
52 help=('If set, run test suites under out/Debug. '
53 'Default is env var BUILDTYPE or Debug.'))
54 group.add_option('--release', action='store_const',
55 const='Release', dest='build_type',
56 help=('If set, run test suites under out/Release.'
57 ' Default is env var BUILDTYPE or Debug.'))
58 group.add_option('-c', dest='cleanup_test_files',
59 help='Cleanup test files on the device after run',
60 action='store_true')
61 group.add_option('--num_retries', dest='num_retries', type='int',
62 default=2,
63 help=('Number of retries for a test before '
64 'giving up.'))
65 group.add_option('-v',
66 '--verbose',
67 dest='verbose_count',
68 default=0,
69 action='count',
70 help='Verbose level (multiple times for more)')
71 group.add_option('--tool',
72 dest='tool',
73 help=('Run the test under a tool '
74 '(use --tool help to list them)'))
75 group.add_option('--flakiness-dashboard-server',
76 dest='flakiness_dashboard_server',
77 help=('Address of the server that is hosting the '
78 'Chrome for Android flakiness dashboard.'))
79 group.add_option('--skip-deps-push', dest='push_deps',
80 action='store_false', default=True,
81 help=('Do not push dependencies to the device. '
82 'Use this at own risk for speeding up test '
83 'execution on local machine.'))
84 group.add_option('-d', '--device', dest='test_device',
85 help=('Target device for the test suite '
86 'to run on.'))
87 option_parser.add_option_group(group)
90 def ProcessCommonOptions(options):
91 """Processes and handles all common options."""
92 run_tests_helper.SetLogLevel(options.verbose_count)
93 constants.SetBuildType(options.build_type)
96 def AddGTestOptions(option_parser):
97 """Adds gtest options to |option_parser|."""
99 option_parser.usage = '%prog gtest [options]'
100 option_parser.commands_dict = {}
101 option_parser.example = '%prog gtest -s base_unittests'
103 # TODO(gkanwar): Make this option required
104 option_parser.add_option('-s', '--suite', dest='suite_name',
105 help=('Executable name of the test suite to run '
106 '(use -s help to list them).'))
107 option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
108 dest='test_filter',
109 help='googletest-style filter string.')
110 option_parser.add_option('--gtest_also_run_disabled_tests',
111 '--gtest-also-run-disabled-tests',
112 dest='run_disabled', action='store_true',
113 help='Also run disabled tests if applicable.')
114 option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
115 default='',
116 help='Additional arguments to pass to the test.')
117 option_parser.add_option('-t', dest='timeout',
118 help='Timeout to wait for each test',
119 type='int',
120 default=60)
121 # TODO(gkanwar): Move these to Common Options once we have the plumbing
122 # in our other test types to handle these commands
123 AddCommonOptions(option_parser)
126 def AddLinkerTestOptions(option_parser):
127 option_parser.usage = '%prog linker'
128 option_parser.commands_dict = {}
129 option_parser.example = '%prog linker'
131 option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
132 help='googletest-style filter string.')
133 AddCommonOptions(option_parser)
136 def ProcessGTestOptions(options):
137 """Intercept test suite help to list test suites.
139 Args:
140 options: Command line options.
142 if options.suite_name == 'help':
143 print 'Available test suites are:'
144 for test_suite in (gtest_config.STABLE_TEST_SUITES +
145 gtest_config.EXPERIMENTAL_TEST_SUITES):
146 print test_suite
147 sys.exit(0)
149 # Convert to a list, assuming all test suites if nothing was specified.
150 # TODO(gkanwar): Require having a test suite
151 if options.suite_name:
152 options.suite_name = [options.suite_name]
153 else:
154 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
157 def AddJavaTestOptions(option_parser):
158 """Adds the Java test options to |option_parser|."""
160 option_parser.add_option('-f', '--test-filter', dest='test_filter',
161 help=('Test filter (if not fully qualified, '
162 'will run all matches).'))
163 option_parser.add_option(
164 '-A', '--annotation', dest='annotation_str',
165 help=('Comma-separated list of annotations. Run only tests with any of '
166 'the given annotations. An annotation can be either a key or a '
167 'key-values pair. A test that has no annotation is considered '
168 '"SmallTest".'))
169 option_parser.add_option(
170 '-E', '--exclude-annotation', dest='exclude_annotation_str',
171 help=('Comma-separated list of annotations. Exclude tests with these '
172 'annotations.'))
173 option_parser.add_option('--screenshot', dest='screenshot_failures',
174 action='store_true',
175 help='Capture screenshots of test failures')
176 option_parser.add_option('--save-perf-json', action='store_true',
177 help='Saves the JSON file for each UI Perf test.')
178 option_parser.add_option('--official-build', action='store_true',
179 help='Run official build tests.')
180 option_parser.add_option('--test_data', action='append', default=[],
181 help=('Each instance defines a directory of test '
182 'data that should be copied to the target(s) '
183 'before running the tests. The argument '
184 'should be of the form <target>:<source>, '
185 '<target> is relative to the device data'
186 'directory, and <source> is relative to the '
187 'chromium build directory.'))
190 def ProcessJavaTestOptions(options, error_func):
191 """Processes options/arguments and populates |options| with defaults."""
193 if options.annotation_str:
194 options.annotations = options.annotation_str.split(',')
195 elif options.test_filter:
196 options.annotations = []
197 else:
198 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
199 'EnormousTest']
201 if options.exclude_annotation_str:
202 options.exclude_annotations = options.exclude_annotation_str.split(',')
203 else:
204 options.exclude_annotations = []
207 def AddInstrumentationTestOptions(option_parser):
208 """Adds Instrumentation test options to |option_parser|."""
210 option_parser.usage = '%prog instrumentation [options]'
211 option_parser.commands_dict = {}
212 option_parser.example = ('%prog instrumentation '
213 '--test-apk=ChromiumTestShellTest')
215 AddJavaTestOptions(option_parser)
216 AddCommonOptions(option_parser)
218 option_parser.add_option('-j', '--java-only', action='store_true',
219 default=False, help='Run only the Java tests.')
220 option_parser.add_option('-p', '--python-only', action='store_true',
221 default=False,
222 help='Run only the host-driven tests.')
223 option_parser.add_option('--host-driven-root',
224 help='Root of the host-driven tests.')
225 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
226 action='store_true',
227 help='Wait for debugger.')
228 option_parser.add_option(
229 '--test-apk', dest='test_apk',
230 help=('The name of the apk containing the tests '
231 '(without the .apk extension; e.g. "ContentShellTest").'))
232 option_parser.add_option('--coverage-dir',
233 help=('Directory in which to place all generated '
234 'EMMA coverage files.'))
237 def ProcessInstrumentationOptions(options, error_func):
238 """Processes options/arguments and populate |options| with defaults.
240 Args:
241 options: optparse.Options object.
242 error_func: Function to call with the error message in case of an error.
244 Returns:
245 An InstrumentationOptions named tuple which contains all options relevant to
246 instrumentation tests.
249 ProcessJavaTestOptions(options, error_func)
251 if options.java_only and options.python_only:
252 error_func('Options java_only (-j) and python_only (-p) '
253 'are mutually exclusive.')
254 options.run_java_tests = True
255 options.run_python_tests = True
256 if options.java_only:
257 options.run_python_tests = False
258 elif options.python_only:
259 options.run_java_tests = False
261 if not options.host_driven_root:
262 options.run_python_tests = False
264 if not options.test_apk:
265 error_func('--test-apk must be specified.')
268 options.test_apk_path = os.path.join(constants.GetOutDirectory(),
269 constants.SDK_BUILD_APKS_DIR,
270 '%s.apk' % options.test_apk)
271 options.test_apk_jar_path = os.path.join(
272 constants.GetOutDirectory(),
273 constants.SDK_BUILD_TEST_JAVALIB_DIR,
274 '%s.jar' % options.test_apk)
276 return instrumentation_test_options.InstrumentationOptions(
277 options.tool,
278 options.cleanup_test_files,
279 options.push_deps,
280 options.annotations,
281 options.exclude_annotations,
282 options.test_filter,
283 options.test_data,
284 options.save_perf_json,
285 options.screenshot_failures,
286 options.wait_for_debugger,
287 options.coverage_dir,
288 options.test_apk,
289 options.test_apk_path,
290 options.test_apk_jar_path)
293 def AddUIAutomatorTestOptions(option_parser):
294 """Adds UI Automator test options to |option_parser|."""
296 option_parser.usage = '%prog uiautomator [options]'
297 option_parser.commands_dict = {}
298 option_parser.example = (
299 '%prog uiautomator --test-jar=chromium_testshell_uiautomator_tests'
300 ' --package=chromium_test_shell')
301 option_parser.add_option(
302 '--package',
303 help=('Package under test. Possible values: %s' %
304 constants.PACKAGE_INFO.keys()))
305 option_parser.add_option(
306 '--test-jar', dest='test_jar',
307 help=('The name of the dexed jar containing the tests (without the '
308 '.dex.jar extension). Alternatively, this can be a full path '
309 'to the jar.'))
311 AddJavaTestOptions(option_parser)
312 AddCommonOptions(option_parser)
315 def ProcessUIAutomatorOptions(options, error_func):
316 """Processes UIAutomator options/arguments.
318 Args:
319 options: optparse.Options object.
320 error_func: Function to call with the error message in case of an error.
322 Returns:
323 A UIAutomatorOptions named tuple which contains all options relevant to
324 uiautomator tests.
327 ProcessJavaTestOptions(options, error_func)
329 if not options.package:
330 error_func('--package is required.')
332 if options.package not in constants.PACKAGE_INFO:
333 error_func('Invalid package.')
335 if not options.test_jar:
336 error_func('--test-jar must be specified.')
338 if os.path.exists(options.test_jar):
339 # The dexed JAR is fully qualified, assume the info JAR lives along side.
340 options.uiautomator_jar = options.test_jar
341 else:
342 options.uiautomator_jar = os.path.join(
343 constants.GetOutDirectory(),
344 constants.SDK_BUILD_JAVALIB_DIR,
345 '%s.dex.jar' % options.test_jar)
346 options.uiautomator_info_jar = (
347 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
348 '_java.jar')
350 return uiautomator_test_options.UIAutomatorOptions(
351 options.tool,
352 options.cleanup_test_files,
353 options.push_deps,
354 options.annotations,
355 options.exclude_annotations,
356 options.test_filter,
357 options.test_data,
358 options.save_perf_json,
359 options.screenshot_failures,
360 options.uiautomator_jar,
361 options.uiautomator_info_jar,
362 options.package)
365 def AddMonkeyTestOptions(option_parser):
366 """Adds monkey test options to |option_parser|."""
368 option_parser.usage = '%prog monkey [options]'
369 option_parser.commands_dict = {}
370 option_parser.example = (
371 '%prog monkey --package=chromium_test_shell')
373 option_parser.add_option(
374 '--package',
375 help=('Package under test. Possible values: %s' %
376 constants.PACKAGE_INFO.keys()))
377 option_parser.add_option(
378 '--event-count', default=10000, type='int',
379 help='Number of events to generate [default: %default].')
380 option_parser.add_option(
381 '--category', default='',
382 help='A list of allowed categories.')
383 option_parser.add_option(
384 '--throttle', default=100, type='int',
385 help='Delay between events (ms) [default: %default]. ')
386 option_parser.add_option(
387 '--seed', type='int',
388 help=('Seed value for pseudo-random generator. Same seed value generates '
389 'the same sequence of events. Seed is randomized by default.'))
390 option_parser.add_option(
391 '--extra-args', default='',
392 help=('String of other args to pass to the command verbatim '
393 '[default: "%default"].'))
395 AddCommonOptions(option_parser)
398 def ProcessMonkeyTestOptions(options, error_func):
399 """Processes all monkey test options.
401 Args:
402 options: optparse.Options object.
403 error_func: Function to call with the error message in case of an error.
405 Returns:
406 A MonkeyOptions named tuple which contains all options relevant to
407 monkey tests.
409 if not options.package:
410 error_func('--package is required.')
412 if options.package not in constants.PACKAGE_INFO:
413 error_func('Invalid package.')
415 category = options.category
416 if category:
417 category = options.category.split(',')
419 return monkey_test_options.MonkeyOptions(
420 options.verbose_count,
421 options.package,
422 options.event_count,
423 category,
424 options.throttle,
425 options.seed,
426 options.extra_args)
429 def AddPerfTestOptions(option_parser):
430 """Adds perf test options to |option_parser|."""
432 option_parser.usage = '%prog perf [options]'
433 option_parser.commands_dict = {}
434 option_parser.example = ('%prog perf '
435 '[--single-step -- command args] or '
436 '[--steps perf_steps.json] or '
437 '[--print-step step]')
439 option_parser.add_option(
440 '--single-step',
441 action='store_true',
442 help='Execute the given command with retries, but only print the result '
443 'for the "most successful" round.')
444 option_parser.add_option(
445 '--steps',
446 help='JSON file containing the list of commands to run.')
447 option_parser.add_option(
448 '--flaky-steps',
449 help=('A JSON file containing steps that are flaky '
450 'and will have its exit code ignored.'))
451 option_parser.add_option(
452 '--print-step',
453 help='The name of a previously executed perf step to print.')
454 option_parser.add_option(
455 '--no-timeout', action='store_true',
456 help=('Do not impose a timeout. Each perf step is responsible for '
457 'implementing the timeout logic.'))
458 option_parser.add_option(
459 '-f', '--test-filter',
460 help=('Test filter (will match against the names listed in --steps).'))
461 option_parser.add_option(
462 '--dry-run',
463 action='store_true',
464 help='Just print the steps without executing.')
465 AddCommonOptions(option_parser)
468 def ProcessPerfTestOptions(options, args, error_func):
469 """Processes all perf test options.
471 Args:
472 options: optparse.Options object.
473 error_func: Function to call with the error message in case of an error.
475 Returns:
476 A PerfOptions named tuple which contains all options relevant to
477 perf tests.
479 # Only one of steps, print_step or single_step must be provided.
480 count = len(filter(None,
481 [options.steps, options.print_step, options.single_step]))
482 if count != 1:
483 error_func('Please specify one of: --steps, --print-step, --single-step.')
484 single_step = None
485 if options.single_step:
486 single_step = ' '.join(args[2:])
487 return perf_test_options.PerfOptions(
488 options.steps, options.flaky_steps, options.print_step,
489 options.no_timeout, options.test_filter, options.dry_run,
490 single_step)
493 def _RunGTests(options, error_func, devices):
494 """Subcommand of RunTestsCommands which runs gtests."""
495 ProcessGTestOptions(options)
497 exit_code = 0
498 for suite_name in options.suite_name:
499 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
500 # the gtest command.
501 gtest_options = gtest_test_options.GTestOptions(
502 options.tool,
503 options.cleanup_test_files,
504 options.push_deps,
505 options.test_filter,
506 options.run_disabled,
507 options.test_arguments,
508 options.timeout,
509 suite_name)
510 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
512 results, test_exit_code = test_dispatcher.RunTests(
513 tests, runner_factory, devices, shard=True, test_timeout=None,
514 num_retries=options.num_retries)
516 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
517 exit_code = test_exit_code
519 report_results.LogFull(
520 results=results,
521 test_type='Unit test',
522 test_package=suite_name,
523 flakiness_server=options.flakiness_dashboard_server)
525 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
526 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
528 return exit_code
531 def _RunLinkerTests(options, error_func, devices):
532 """Subcommand of RunTestsCommands which runs linker tests."""
533 runner_factory, tests = linker_setup.Setup(options, devices)
535 results, exit_code = test_dispatcher.RunTests(
536 tests, runner_factory, devices, shard=True, test_timeout=60,
537 num_retries=options.num_retries)
539 report_results.LogFull(
540 results=results,
541 test_type='Linker test',
542 test_package='ChromiumLinkerTest')
544 return exit_code
547 def _RunInstrumentationTests(options, error_func, devices):
548 """Subcommand of RunTestsCommands which runs instrumentation tests."""
549 instrumentation_options = ProcessInstrumentationOptions(options, error_func)
551 if len(devices) > 1 and options.wait_for_debugger:
552 logging.warning('Debugger can not be sharded, using first available device')
553 devices = devices[:1]
555 results = base_test_result.TestRunResults()
556 exit_code = 0
558 if options.run_java_tests:
559 runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
561 test_results, exit_code = test_dispatcher.RunTests(
562 tests, runner_factory, devices, shard=True, test_timeout=None,
563 num_retries=options.num_retries)
565 results.AddTestRunResults(test_results)
567 if options.run_python_tests:
568 runner_factory, tests = host_driven_setup.InstrumentationSetup(
569 options.host_driven_root, options.official_build,
570 instrumentation_options)
572 if tests:
573 test_results, test_exit_code = test_dispatcher.RunTests(
574 tests, runner_factory, devices, shard=True, test_timeout=None,
575 num_retries=options.num_retries)
577 results.AddTestRunResults(test_results)
579 # Only allow exit code escalation
580 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
581 exit_code = test_exit_code
583 report_results.LogFull(
584 results=results,
585 test_type='Instrumentation',
586 test_package=os.path.basename(options.test_apk),
587 annotation=options.annotations,
588 flakiness_server=options.flakiness_dashboard_server)
590 return exit_code
593 def _RunUIAutomatorTests(options, error_func, devices):
594 """Subcommand of RunTestsCommands which runs uiautomator tests."""
595 uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
597 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
599 results, exit_code = test_dispatcher.RunTests(
600 tests, runner_factory, devices, shard=True, test_timeout=None,
601 num_retries=options.num_retries)
603 report_results.LogFull(
604 results=results,
605 test_type='UIAutomator',
606 test_package=os.path.basename(options.test_jar),
607 annotation=options.annotations,
608 flakiness_server=options.flakiness_dashboard_server)
610 return exit_code
613 def _RunMonkeyTests(options, error_func, devices):
614 """Subcommand of RunTestsCommands which runs monkey tests."""
615 monkey_options = ProcessMonkeyTestOptions(options, error_func)
617 runner_factory, tests = monkey_setup.Setup(monkey_options)
619 results, exit_code = test_dispatcher.RunTests(
620 tests, runner_factory, devices, shard=False, test_timeout=None,
621 num_retries=options.num_retries)
623 report_results.LogFull(
624 results=results,
625 test_type='Monkey',
626 test_package='Monkey')
628 return exit_code
631 def _RunPerfTests(options, args, error_func, devices):
632 """Subcommand of RunTestsCommands which runs perf tests."""
633 perf_options = ProcessPerfTestOptions(options, args, error_func)
634 # Just print the results from a single previously executed step.
635 if perf_options.print_step:
636 return perf_test_runner.PrintTestOutput(perf_options.print_step)
638 runner_factory, tests = perf_setup.Setup(perf_options)
640 results, _ = test_dispatcher.RunTests(
641 tests, runner_factory, devices, shard=True, test_timeout=None,
642 num_retries=options.num_retries)
644 report_results.LogFull(
645 results=results,
646 test_type='Perf',
647 test_package='Perf')
649 if perf_options.single_step:
650 return perf_test_runner.PrintTestOutput('single_step')
652 perf_test_runner.PrintSummary(tests)
654 # Always return 0 on the sharding stage. Individual tests exit_code
655 # will be returned on the print_step stage.
656 return 0
659 def _GetAttachedDevices(test_device=None):
660 """Get all attached devices.
662 Args:
663 test_device: Name of a specific device to use.
665 Returns:
666 A list of attached devices.
668 attached_devices = []
670 attached_devices = android_commands.GetAttachedDevices()
671 if test_device:
672 assert test_device in attached_devices, (
673 'Did not find device %s among attached device. Attached devices: %s'
674 % (test_device, ', '.join(attached_devices)))
675 attached_devices = [test_device]
677 assert attached_devices, 'No devices attached.'
679 return sorted(attached_devices)
682 def RunTestsCommand(command, options, args, option_parser):
683 """Checks test type and dispatches to the appropriate function.
685 Args:
686 command: String indicating the command that was received to trigger
687 this function.
688 options: optparse options dictionary.
689 args: List of extra args from optparse.
690 option_parser: optparse.OptionParser object.
692 Returns:
693 Integer indicated exit code.
695 Raises:
696 Exception: Unknown command name passed in, or an exception from an
697 individual test runner.
700 # Check for extra arguments
701 if len(args) > 2 and command != 'perf':
702 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
703 return constants.ERROR_EXIT_CODE
704 if command == 'perf':
705 if ((options.single_step and len(args) <= 2) or
706 (not options.single_step and len(args) > 2)):
707 option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
708 return constants.ERROR_EXIT_CODE
710 ProcessCommonOptions(options)
712 devices = _GetAttachedDevices(options.test_device)
714 forwarder.Forwarder.RemoveHostLog()
715 if not ports.ResetTestServerPortAllocation():
716 raise Exception('Failed to reset test server port.')
718 if command == 'gtest':
719 return _RunGTests(options, option_parser.error, devices)
720 elif command == 'linker':
721 return _RunLinkerTests(options, option_parser.error, devices)
722 elif command == 'instrumentation':
723 return _RunInstrumentationTests(options, option_parser.error, devices)
724 elif command == 'uiautomator':
725 return _RunUIAutomatorTests(options, option_parser.error, devices)
726 elif command == 'monkey':
727 return _RunMonkeyTests(options, option_parser.error, devices)
728 elif command == 'perf':
729 return _RunPerfTests(options, args, option_parser.error, devices)
730 else:
731 raise Exception('Unknown test type.')
734 def HelpCommand(command, options, args, option_parser):
735 """Display help for a certain command, or overall help.
737 Args:
738 command: String indicating the command that was received to trigger
739 this function.
740 options: optparse options dictionary.
741 args: List of extra args from optparse.
742 option_parser: optparse.OptionParser object.
744 Returns:
745 Integer indicated exit code.
747 # If we don't have any args, display overall help
748 if len(args) < 3:
749 option_parser.print_help()
750 return 0
751 # If we have too many args, print an error
752 if len(args) > 3:
753 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
754 return constants.ERROR_EXIT_CODE
756 command = args[2]
758 if command not in VALID_COMMANDS:
759 option_parser.error('Unrecognized command.')
761 # Treat the help command as a special case. We don't care about showing a
762 # specific help page for itself.
763 if command == 'help':
764 option_parser.print_help()
765 return 0
767 VALID_COMMANDS[command].add_options_func(option_parser)
768 option_parser.usage = '%prog ' + command + ' [options]'
769 option_parser.commands_dict = {}
770 option_parser.print_help()
772 return 0
775 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
776 # syntax is a bit prettier. The tuple is two functions: (add options, run
777 # command).
778 CommandFunctionTuple = collections.namedtuple(
779 'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
780 VALID_COMMANDS = {
781 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
782 'instrumentation': CommandFunctionTuple(
783 AddInstrumentationTestOptions, RunTestsCommand),
784 'uiautomator': CommandFunctionTuple(
785 AddUIAutomatorTestOptions, RunTestsCommand),
786 'monkey': CommandFunctionTuple(
787 AddMonkeyTestOptions, RunTestsCommand),
788 'perf': CommandFunctionTuple(
789 AddPerfTestOptions, RunTestsCommand),
790 'linker': CommandFunctionTuple(
791 AddLinkerTestOptions, RunTestsCommand),
792 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
796 def DumpThreadStacks(signal, frame):
797 for thread in threading.enumerate():
798 reraiser_thread.LogThreadStack(thread)
801 def main(argv):
802 signal.signal(signal.SIGUSR1, DumpThreadStacks)
803 option_parser = command_option_parser.CommandOptionParser(
804 commands_dict=VALID_COMMANDS)
805 return command_option_parser.ParseAndExecute(option_parser)
808 if __name__ == '__main__':
809 sys.exit(main(sys.argv))