Allow overlapping sync and async startup requests
[chromium-blink-merge.git] / build / android / test_runner.py
blob2c87df901f8499afdecc314bf046a87cd563ba32
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface.
9 TODO(gkanwar):
10 * Add options to run Monkey tests.
11 """
13 import collections
14 import logging
15 import optparse
16 import os
17 import shutil
18 import sys
20 from pylib import android_commands
21 from pylib import constants
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import test_dispatcher
25 from pylib.gtest import gtest_config
26 from pylib.gtest import setup as gtest_setup
27 from pylib.gtest import test_options as gtest_test_options
28 from pylib.host_driven import setup as host_driven_setup
29 from pylib.instrumentation import setup as instrumentation_setup
30 from pylib.instrumentation import test_options as instrumentation_test_options
31 from pylib.monkey import setup as monkey_setup
32 from pylib.monkey import test_options as monkey_test_options
33 from pylib.perf import setup as perf_setup
34 from pylib.perf import test_options as perf_test_options
35 from pylib.perf import test_runner as perf_test_runner
36 from pylib.uiautomator import setup as uiautomator_setup
37 from pylib.uiautomator import test_options as uiautomator_test_options
38 from pylib.utils import command_option_parser
39 from pylib.utils import report_results
40 from pylib.utils import run_tests_helper
43 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out')
46 def AddBuildTypeOption(option_parser):
47 """Adds the build type option to |option_parser|."""
48 default_build_type = 'Debug'
49 if 'BUILDTYPE' in os.environ:
50 default_build_type = os.environ['BUILDTYPE']
51 option_parser.add_option('--debug', action='store_const', const='Debug',
52 dest='build_type', default=default_build_type,
53 help=('If set, run test suites under out/Debug. '
54 'Default is env var BUILDTYPE or Debug.'))
55 option_parser.add_option('--release', action='store_const',
56 const='Release', dest='build_type',
57 help=('If set, run test suites under out/Release.'
58 ' Default is env var BUILDTYPE or Debug.'))
61 def AddCommonOptions(option_parser):
62 """Adds all common options to |option_parser|."""
64 AddBuildTypeOption(option_parser)
66 option_parser.add_option('-c', dest='cleanup_test_files',
67 help='Cleanup test files on the device after run',
68 action='store_true')
69 option_parser.add_option('--num_retries', dest='num_retries', type='int',
70 default=2,
71 help=('Number of retries for a test before '
72 'giving up.'))
73 option_parser.add_option('-v',
74 '--verbose',
75 dest='verbose_count',
76 default=0,
77 action='count',
78 help='Verbose level (multiple times for more)')
79 option_parser.add_option('--tool',
80 dest='tool',
81 help=('Run the test under a tool '
82 '(use --tool help to list them)'))
83 option_parser.add_option('--flakiness-dashboard-server',
84 dest='flakiness_dashboard_server',
85 help=('Address of the server that is hosting the '
86 'Chrome for Android flakiness dashboard.'))
87 option_parser.add_option('--skip-deps-push', dest='push_deps',
88 action='store_false', default=True,
89 help=('Do not push dependencies to the device. '
90 'Use this at own risk for speeding up test '
91 'execution on local machine.'))
92 option_parser.add_option('-d', '--device', dest='test_device',
93 help=('Target device for the test suite '
94 'to run on.'))
97 def ProcessCommonOptions(options):
98 """Processes and handles all common options."""
99 run_tests_helper.SetLogLevel(options.verbose_count)
100 constants.SetBuildType(options.build_type)
103 def AddGTestOptions(option_parser):
104 """Adds gtest options to |option_parser|."""
106 option_parser.usage = '%prog gtest [options]'
107 option_parser.command_list = []
108 option_parser.example = '%prog gtest -s base_unittests'
110 # TODO(gkanwar): Make this option required
111 option_parser.add_option('-s', '--suite', dest='suite_name',
112 help=('Executable name of the test suite to run '
113 '(use -s help to list them).'))
114 option_parser.add_option('-f', '--gtest_filter', dest='test_filter',
115 help='googletest-style filter string.')
116 option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
117 help='Additional arguments to pass to the test.')
118 option_parser.add_option('-t', dest='timeout',
119 help='Timeout to wait for each test',
120 type='int',
121 default=60)
122 # TODO(gkanwar): Move these to Common Options once we have the plumbing
123 # in our other test types to handle these commands
124 AddCommonOptions(option_parser)
127 def ProcessGTestOptions(options):
128 """Intercept test suite help to list test suites.
130 Args:
131 options: Command line options.
133 if options.suite_name == 'help':
134 print 'Available test suites are:'
135 for test_suite in (gtest_config.STABLE_TEST_SUITES +
136 gtest_config.EXPERIMENTAL_TEST_SUITES):
137 print test_suite
138 sys.exit(0)
140 # Convert to a list, assuming all test suites if nothing was specified.
141 # TODO(gkanwar): Require having a test suite
142 if options.suite_name:
143 options.suite_name = [options.suite_name]
144 else:
145 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
148 def AddJavaTestOptions(option_parser):
149 """Adds the Java test options to |option_parser|."""
151 option_parser.add_option('-f', '--test_filter', dest='test_filter',
152 help=('Test filter (if not fully qualified, '
153 'will run all matches).'))
154 option_parser.add_option(
155 '-A', '--annotation', dest='annotation_str',
156 help=('Comma-separated list of annotations. Run only tests with any of '
157 'the given annotations. An annotation can be either a key or a '
158 'key-values pair. A test that has no annotation is considered '
159 '"SmallTest".'))
160 option_parser.add_option(
161 '-E', '--exclude-annotation', dest='exclude_annotation_str',
162 help=('Comma-separated list of annotations. Exclude tests with these '
163 'annotations.'))
164 option_parser.add_option('--screenshot', dest='screenshot_failures',
165 action='store_true',
166 help='Capture screenshots of test failures')
167 option_parser.add_option('--save-perf-json', action='store_true',
168 help='Saves the JSON file for each UI Perf test.')
169 option_parser.add_option('--official-build', action='store_true',
170 help='Run official build tests.')
171 option_parser.add_option('--keep_test_server_ports',
172 action='store_true',
173 help=('Indicates the test server ports must be '
174 'kept. When this is run via a sharder '
175 'the test server ports should be kept and '
176 'should not be reset.'))
177 option_parser.add_option('--test_data', action='append', default=[],
178 help=('Each instance defines a directory of test '
179 'data that should be copied to the target(s) '
180 'before running the tests. The argument '
181 'should be of the form <target>:<source>, '
182 '<target> is relative to the device data'
183 'directory, and <source> is relative to the '
184 'chromium build directory.'))
187 def ProcessJavaTestOptions(options, error_func):
188 """Processes options/arguments and populates |options| with defaults."""
190 if options.annotation_str:
191 options.annotations = options.annotation_str.split(',')
192 elif options.test_filter:
193 options.annotations = []
194 else:
195 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
196 'EnormousTest']
198 if options.exclude_annotation_str:
199 options.exclude_annotations = options.exclude_annotation_str.split(',')
200 else:
201 options.exclude_annotations = []
203 if not options.keep_test_server_ports:
204 if not ports.ResetTestServerPortAllocation():
205 raise Exception('Failed to reset test server port.')
208 def AddInstrumentationTestOptions(option_parser):
209 """Adds Instrumentation test options to |option_parser|."""
211 option_parser.usage = '%prog instrumentation [options]'
212 option_parser.command_list = []
213 option_parser.example = ('%prog instrumentation '
214 '--test-apk=ChromiumTestShellTest')
216 AddJavaTestOptions(option_parser)
217 AddCommonOptions(option_parser)
219 option_parser.add_option('-j', '--java_only', action='store_true',
220 default=False, help='Run only the Java tests.')
221 option_parser.add_option('-p', '--python_only', action='store_true',
222 default=False,
223 help='Run only the host-driven tests.')
224 option_parser.add_option('--host-driven-root',
225 help='Root of the host-driven tests.')
226 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
227 action='store_true',
228 help='Wait for debugger.')
229 option_parser.add_option(
230 '--test-apk', dest='test_apk',
231 help=('The name of the apk containing the tests '
232 '(without the .apk extension; e.g. "ContentShellTest"). '
233 'Alternatively, this can be a full path to the apk.'))
234 option_parser.add_option('--coverage-dir',
235 help=('Directory in which to place all generated '
236 'EMMA coverage files.'))
239 def ProcessInstrumentationOptions(options, error_func):
240 """Processes options/arguments and populate |options| with defaults.
242 Args:
243 options: optparse.Options object.
244 error_func: Function to call with the error message in case of an error.
246 Returns:
247 An InstrumentationOptions named tuple which contains all options relevant to
248 instrumentation tests.
251 ProcessJavaTestOptions(options, error_func)
253 if options.java_only and options.python_only:
254 error_func('Options java_only (-j) and python_only (-p) '
255 'are mutually exclusive.')
256 options.run_java_tests = True
257 options.run_python_tests = True
258 if options.java_only:
259 options.run_python_tests = False
260 elif options.python_only:
261 options.run_java_tests = False
263 if not options.host_driven_root:
264 options.run_python_tests = False
266 if not options.test_apk:
267 error_func('--test-apk must be specified.')
269 if os.path.exists(options.test_apk):
270 # The APK is fully qualified, assume the JAR lives along side.
271 options.test_apk_path = options.test_apk
272 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] +
273 '.jar')
274 else:
275 options.test_apk_path = os.path.join(_SDK_OUT_DIR,
276 options.build_type,
277 constants.SDK_BUILD_APKS_DIR,
278 '%s.apk' % options.test_apk)
279 options.test_apk_jar_path = os.path.join(
280 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR,
281 '%s.jar' % options.test_apk)
283 return instrumentation_test_options.InstrumentationOptions(
284 options.tool,
285 options.cleanup_test_files,
286 options.push_deps,
287 options.annotations,
288 options.exclude_annotations,
289 options.test_filter,
290 options.test_data,
291 options.save_perf_json,
292 options.screenshot_failures,
293 options.wait_for_debugger,
294 options.coverage_dir,
295 options.test_apk,
296 options.test_apk_path,
297 options.test_apk_jar_path)
300 def AddUIAutomatorTestOptions(option_parser):
301 """Adds UI Automator test options to |option_parser|."""
303 option_parser.usage = '%prog uiautomator [options]'
304 option_parser.command_list = []
305 option_parser.example = (
306 '%prog uiautomator --test-jar=chromium_testshell_uiautomator_tests'
307 ' --package-name=org.chromium.chrome.testshell')
308 option_parser.add_option(
309 '--package-name',
310 help='The package name used by the apk containing the application.')
311 option_parser.add_option(
312 '--test-jar', dest='test_jar',
313 help=('The name of the dexed jar containing the tests (without the '
314 '.dex.jar extension). Alternatively, this can be a full path '
315 'to the jar.'))
317 AddJavaTestOptions(option_parser)
318 AddCommonOptions(option_parser)
321 def ProcessUIAutomatorOptions(options, error_func):
322 """Processes UIAutomator options/arguments.
324 Args:
325 options: optparse.Options object.
326 error_func: Function to call with the error message in case of an error.
328 Returns:
329 A UIAutomatorOptions named tuple which contains all options relevant to
330 uiautomator tests.
333 ProcessJavaTestOptions(options, error_func)
335 if not options.package_name:
336 error_func('--package-name must be specified.')
338 if not options.test_jar:
339 error_func('--test-jar must be specified.')
341 if os.path.exists(options.test_jar):
342 # The dexed JAR is fully qualified, assume the info JAR lives along side.
343 options.uiautomator_jar = options.test_jar
344 else:
345 options.uiautomator_jar = os.path.join(
346 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR,
347 '%s.dex.jar' % options.test_jar)
348 options.uiautomator_info_jar = (
349 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
350 '_java.jar')
352 return uiautomator_test_options.UIAutomatorOptions(
353 options.tool,
354 options.cleanup_test_files,
355 options.push_deps,
356 options.annotations,
357 options.exclude_annotations,
358 options.test_filter,
359 options.test_data,
360 options.save_perf_json,
361 options.screenshot_failures,
362 options.uiautomator_jar,
363 options.uiautomator_info_jar,
364 options.package_name)
367 def AddMonkeyTestOptions(option_parser):
368 """Adds monkey test options to |option_parser|."""
370 option_parser.usage = '%prog monkey [options]'
371 option_parser.command_list = []
372 option_parser.example = (
373 '%prog monkey --package-name=org.chromium.content_shell_apk'
374 ' --activity-name=.ContentShellActivity')
376 option_parser.add_option('--package-name', help='Allowed package.')
377 option_parser.add_option(
378 '--activity-name', help='Name of the activity to start.')
379 option_parser.add_option(
380 '--event-count', default=10000, type='int',
381 help='Number of events to generate [default: %default].')
382 option_parser.add_option(
383 '--category', default='',
384 help='A list of allowed categories.')
385 option_parser.add_option(
386 '--throttle', default=100, type='int',
387 help='Delay between events (ms) [default: %default]. ')
388 option_parser.add_option(
389 '--seed', type='int',
390 help=('Seed value for pseudo-random generator. Same seed value generates '
391 'the same sequence of events. Seed is randomized by default.'))
392 option_parser.add_option(
393 '--extra-args', default='',
394 help=('String of other args to pass to the command verbatim '
395 '[default: "%default"].'))
397 AddCommonOptions(option_parser)
400 def ProcessMonkeyTestOptions(options, error_func):
401 """Processes all monkey test options.
403 Args:
404 options: optparse.Options object.
405 error_func: Function to call with the error message in case of an error.
407 Returns:
408 A MonkeyOptions named tuple which contains all options relevant to
409 monkey tests.
411 if not options.package_name:
412 error_func('Package name is required.')
414 category = options.category
415 if category:
416 category = options.category.split(',')
418 return monkey_test_options.MonkeyOptions(
419 options.verbose_count,
420 options.package_name,
421 options.activity_name,
422 options.event_count,
423 category,
424 options.throttle,
425 options.seed,
426 options.extra_args)
429 def AddPerfTestOptions(option_parser):
430 """Adds perf test options to |option_parser|."""
432 option_parser.usage = '%prog perf [options]'
433 option_parser.command_list = []
434 option_parser.example = ('%prog perf --steps perf_steps.json')
436 option_parser.add_option('--steps', help='JSON file containing the list '
437 'of perf steps to run.')
438 option_parser.add_option('--flaky-steps',
439 help='A JSON file containing steps that are flaky '
440 'and will have its exit code ignored.')
441 option_parser.add_option('--print-step', help='The name of a previously '
442 'executed perf step to print.')
444 AddCommonOptions(option_parser)
447 def ProcessPerfTestOptions(options, error_func):
448 """Processes all perf test options.
450 Args:
451 options: optparse.Options object.
452 error_func: Function to call with the error message in case of an error.
454 Returns:
455 A PerfOptions named tuple which contains all options relevant to
456 perf tests.
458 if not options.steps and not options.print_step:
459 error_func('Please specify --steps or --print-step')
460 return perf_test_options.PerfOptions(
461 options.steps, options.flaky_steps, options.print_step)
464 def _RunGTests(options, error_func, devices):
465 """Subcommand of RunTestsCommands which runs gtests."""
466 ProcessGTestOptions(options)
468 exit_code = 0
469 for suite_name in options.suite_name:
470 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
471 # the gtest command.
472 gtest_options = gtest_test_options.GTestOptions(
473 options.tool,
474 options.cleanup_test_files,
475 options.push_deps,
476 options.test_filter,
477 options.test_arguments,
478 options.timeout,
479 suite_name)
480 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
482 results, test_exit_code = test_dispatcher.RunTests(
483 tests, runner_factory, devices, shard=True, test_timeout=None,
484 num_retries=options.num_retries)
486 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
487 exit_code = test_exit_code
489 report_results.LogFull(
490 results=results,
491 test_type='Unit test',
492 test_package=suite_name,
493 flakiness_server=options.flakiness_dashboard_server)
495 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
496 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
498 return exit_code
501 def _RunInstrumentationTests(options, error_func, devices):
502 """Subcommand of RunTestsCommands which runs instrumentation tests."""
503 instrumentation_options = ProcessInstrumentationOptions(options, error_func)
505 if len(devices) > 1 and options.wait_for_debugger:
506 logging.warning('Debugger can not be sharded, using first available device')
507 devices = devices[:1]
509 results = base_test_result.TestRunResults()
510 exit_code = 0
512 if options.run_java_tests:
513 runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
515 test_results, exit_code = test_dispatcher.RunTests(
516 tests, runner_factory, devices, shard=True, test_timeout=None,
517 num_retries=options.num_retries)
519 results.AddTestRunResults(test_results)
521 if options.run_python_tests:
522 runner_factory, tests = host_driven_setup.InstrumentationSetup(
523 options.host_driven_root, options.official_build,
524 instrumentation_options)
526 if tests:
527 test_results, test_exit_code = test_dispatcher.RunTests(
528 tests, runner_factory, devices, shard=True, test_timeout=None,
529 num_retries=options.num_retries)
531 results.AddTestRunResults(test_results)
533 # Only allow exit code escalation
534 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
535 exit_code = test_exit_code
537 report_results.LogFull(
538 results=results,
539 test_type='Instrumentation',
540 test_package=os.path.basename(options.test_apk),
541 annotation=options.annotations,
542 flakiness_server=options.flakiness_dashboard_server)
544 return exit_code
547 def _RunUIAutomatorTests(options, error_func, devices):
548 """Subcommand of RunTestsCommands which runs uiautomator tests."""
549 uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
551 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
553 results, exit_code = test_dispatcher.RunTests(
554 tests, runner_factory, devices, shard=True, test_timeout=None,
555 num_retries=options.num_retries)
557 report_results.LogFull(
558 results=results,
559 test_type='UIAutomator',
560 test_package=os.path.basename(options.test_jar),
561 annotation=options.annotations,
562 flakiness_server=options.flakiness_dashboard_server)
564 return exit_code
567 def _RunMonkeyTests(options, error_func, devices):
568 """Subcommand of RunTestsCommands which runs monkey tests."""
569 monkey_options = ProcessMonkeyTestOptions(options, error_func)
571 runner_factory, tests = monkey_setup.Setup(monkey_options)
573 results, exit_code = test_dispatcher.RunTests(
574 tests, runner_factory, devices, shard=False, test_timeout=None)
576 report_results.LogFull(
577 results=results,
578 test_type='Monkey',
579 test_package='Monkey')
581 return exit_code
584 def _RunPerfTests(options, error_func, devices):
585 """Subcommand of RunTestsCommands which runs perf tests."""
586 perf_options = ProcessPerfTestOptions(options, error_func)
587 # Just print the results from a single previously executed step.
588 if perf_options.print_step:
589 return perf_test_runner.PrintTestOutput(perf_options.print_step)
591 runner_factory, tests = perf_setup.Setup(perf_options)
593 results, _ = test_dispatcher.RunTests(
594 tests, runner_factory, devices, shard=True, test_timeout=None)
596 report_results.LogFull(
597 results=results,
598 test_type='Perf',
599 test_package='Perf')
600 # Always return 0 on the sharding stage. Individual tests exit_code
601 # will be returned on the print_step stage.
602 return 0
605 def _GetAttachedDevices(test_device=None):
606 """Get all attached devices.
608 Args:
609 test_device: Name of a specific device to use.
611 Returns:
612 A list of attached devices.
614 attached_devices = []
616 attached_devices = android_commands.GetAttachedDevices()
617 if test_device:
618 assert test_device in attached_devices, (
619 'Did not find device %s among attached device. Attached devices: %s'
620 % (test_device, ', '.join(attached_devices)))
621 attached_devices = [test_device]
623 assert attached_devices, 'No devices attached.'
625 return sorted(attached_devices)
628 def RunTestsCommand(command, options, args, option_parser):
629 """Checks test type and dispatches to the appropriate function.
631 Args:
632 command: String indicating the command that was received to trigger
633 this function.
634 options: optparse options dictionary.
635 args: List of extra args from optparse.
636 option_parser: optparse.OptionParser object.
638 Returns:
639 Integer indicated exit code.
641 Raises:
642 Exception: Unknown command name passed in, or an exception from an
643 individual test runner.
646 # Check for extra arguments
647 if len(args) > 2:
648 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
649 return constants.ERROR_EXIT_CODE
651 ProcessCommonOptions(options)
653 devices = _GetAttachedDevices(options.test_device)
655 if command == 'gtest':
656 return _RunGTests(options, option_parser.error, devices)
657 elif command == 'instrumentation':
658 return _RunInstrumentationTests(options, option_parser.error, devices)
659 elif command == 'uiautomator':
660 return _RunUIAutomatorTests(options, option_parser.error, devices)
661 elif command == 'monkey':
662 return _RunMonkeyTests(options, option_parser.error, devices)
663 elif command == 'perf':
664 return _RunPerfTests(options, option_parser.error, devices)
665 else:
666 raise Exception('Unknown test type.')
669 def HelpCommand(command, options, args, option_parser):
670 """Display help for a certain command, or overall help.
672 Args:
673 command: String indicating the command that was received to trigger
674 this function.
675 options: optparse options dictionary.
676 args: List of extra args from optparse.
677 option_parser: optparse.OptionParser object.
679 Returns:
680 Integer indicated exit code.
682 # If we don't have any args, display overall help
683 if len(args) < 3:
684 option_parser.print_help()
685 return 0
686 # If we have too many args, print an error
687 if len(args) > 3:
688 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
689 return constants.ERROR_EXIT_CODE
691 command = args[2]
693 if command not in VALID_COMMANDS:
694 option_parser.error('Unrecognized command.')
696 # Treat the help command as a special case. We don't care about showing a
697 # specific help page for itself.
698 if command == 'help':
699 option_parser.print_help()
700 return 0
702 VALID_COMMANDS[command].add_options_func(option_parser)
703 option_parser.usage = '%prog ' + command + ' [options]'
704 option_parser.command_list = None
705 option_parser.print_help()
707 return 0
710 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
711 # syntax is a bit prettier. The tuple is two functions: (add options, run
712 # command).
713 CommandFunctionTuple = collections.namedtuple(
714 'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
715 VALID_COMMANDS = {
716 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
717 'instrumentation': CommandFunctionTuple(
718 AddInstrumentationTestOptions, RunTestsCommand),
719 'uiautomator': CommandFunctionTuple(
720 AddUIAutomatorTestOptions, RunTestsCommand),
721 'monkey': CommandFunctionTuple(
722 AddMonkeyTestOptions, RunTestsCommand),
723 'perf': CommandFunctionTuple(
724 AddPerfTestOptions, RunTestsCommand),
725 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
729 def main(argv):
730 option_parser = command_option_parser.CommandOptionParser(
731 commands_dict=VALID_COMMANDS)
732 return command_option_parser.ParseAndExecute(option_parser)
735 if __name__ == '__main__':
736 sys.exit(main(sys.argv))