2 # Copyright 2014 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 '''A test runner for gtest application tests.'''
16 from mopy
import gtest
17 from mopy
.config
import Config
20 APPTESTS
= os
.path
.abspath(os
.path
.join(__file__
, '..', 'data', 'apptests'))
24 parser
= argparse
.ArgumentParser(description
='An application test runner.')
25 parser
.add_argument('build_dir', type=str, help='The build output directory.')
26 parser
.add_argument('--verbose', default
=False, action
='store_true',
27 help='Print additional logging information.')
28 parser
.add_argument('--repeat-count', default
=1, metavar
='INT',
29 action
='store', type=int,
30 help='The number of times to repeat the set of tests.')
31 parser
.add_argument('--write-full-results-to', metavar
='FILENAME',
32 help='The path to write the JSON list of full results.')
33 parser
.add_argument('--test-list-file', metavar
='FILENAME', type=file,
34 default
=APPTESTS
, help='The file listing tests to run.')
35 parser
.add_argument('--apptest-filter', default
='',
36 help='A comma-separated list of mojo:apptests to run.')
37 args
, commandline_args
= parser
.parse_known_args()
39 logger
= logging
.getLogger()
40 logging
.basicConfig(stream
=sys
.stdout
, format
='%(levelname)s:%(message)s')
41 logger
.setLevel(logging
.DEBUG
if args
.verbose
else logging
.WARNING
)
42 logger
.debug('Initialized logging: level=%s' % logger
.level
)
44 logger
.debug('Test list file: %s', args
.test_list_file
)
45 config
= Config(args
.build_dir
, is_verbose
=args
.verbose
,
46 apk_name
='MojoRunnerApptests.apk')
47 execution_globals
= {'config': config
}
48 exec args
.test_list_file
in execution_globals
49 test_list
= execution_globals
['tests']
50 logger
.debug('Test list: %s' % test_list
)
53 if config
.target_os
== Config
.OS_ANDROID
:
54 from mopy
.android
import AndroidShell
55 shell
= AndroidShell(config
)
56 result
= shell
.InitShell()
63 apptest_filter
= [a
for a
in string
.split(args
.apptest_filter
, ',') if a
]
64 gtest_filter
= [a
for a
in commandline_args
if a
.startswith('--gtest_filter')]
65 for _
in range(args
.repeat_count
):
66 for test_dict
in test_list
:
67 test
= test_dict
['test']
68 test_name
= test_dict
.get('name', test
)
69 test_type
= test_dict
.get('type', 'gtest')
70 test_args
= test_dict
.get('args', []) + commandline_args
71 if apptest_filter
and not set(apptest_filter
) & set([test
, test_name
]):
74 print 'Running %s...%s' % (test_name
, ('\n' if args
.verbose
else '')),
77 assert test_type
in ('gtest', 'gtest_isolated')
78 if test_type
== 'gtest':
79 print ('WARNING: tests are forced to gtest_isolated until '
80 'http://crbug.com/529487 is fixed')
81 test_type
= 'gtest_isolated'
82 isolate
= test_type
== 'gtest_isolated'
83 (ran
, fail
) = gtest
.run_apptest(config
, shell
, test_args
, test
, isolate
)
84 # Ignore empty fixture lists when the commandline has a gtest filter flag.
85 if gtest_filter
and not ran
and not fail
:
86 print '[ NO TESTS ] ' + (test_name
if args
.verbose
else '')
88 # Use the apptest name if the whole suite failed or no fixtures were run.
89 fail
= [test_name
] if (not ran
and (not fail
or fail
== [test
])) else fail
92 result
= ran
and not fail
93 print '[ PASSED ]' if result
else '[ FAILED ]',
94 print test_name
if args
.verbose
or not result
else ''
95 # Abort when 3 apptest suites, or a tenth of all, have failed.
96 # base::TestLauncher does this for timeouts and unknown results.
97 failed_suites
+= 0 if result
else 1
98 if failed_suites
>= max(3, len(test_list
) / 10):
99 print 'Too many failing suites (%d), exiting now.' % failed_suites
100 failed
.append('Test runner aborted for excessive failures.')
106 print '[==========] %d tests ran.' % len(tests
)
107 print '[ PASSED ] %d tests.' % (len(tests
) - len(failed
))
109 print '[ FAILED ] %d tests, listed below:' % len(failed
)
110 for failure
in failed
:
111 print '[ FAILED ] %s' % failure
113 if args
.write_full_results_to
:
114 _WriteJSONResults(tests
, failed
, args
.write_full_results_to
)
116 return 1 if failed
else 0
119 def _WriteJSONResults(tests
, failed
, write_full_results_to
):
120 '''Write the apptest results in the Chromium JSON test results format.
121 See <http://www.chromium.org/developers/the-json-test-results-format>
122 TODO(msw): Use Chromium and TYP testing infrastructure.
123 TODO(msw): Use GTest Suite.Fixture names, not the apptest names.
124 Adapted from chrome/test/mini_installer/test_installer.py
127 'interrupted': False,
128 'path_delimiter': '.',
130 'seconds_since_epoch': time
.time(),
131 'num_failures_by_type': {
133 'PASS': len(tests
) - len(failed
),
141 'actual': 'FAIL' if test
in failed
else 'PASS',
142 'is_unexpected': True if test
in failed
else False,
144 _AddPathToTrie(results
['tests'], test
, value
)
146 with
open(write_full_results_to
, 'w') as fp
:
147 json
.dump(results
, fp
, indent
=2)
153 def _AddPathToTrie(trie
, path
, value
):
157 directory
, rest
= path
.split('.', 1)
158 if directory
not in trie
:
160 _AddPathToTrie(trie
[directory
], rest
, value
)
163 if __name__
== '__main__':