2 # Copyright 2011-2020, Damian Johnson and The Tor Project
3 # See LICENSE for licensing information
6 Runs unit and integration tests. For usage information run this with '--help'.
13 import logging
.handlers
14 import multiprocessing
26 import stem
.util
.system
27 import stem
.util
.test_tools
36 from test
.output
import STATUS
, SUCCESS
, ERROR
, NO_NL
, STDERR
, println
38 CONFIG
= stem
.util
.conf
.config_dict('test', {
39 'integ.test_directory': './test/data',
40 'test.unit_tests': '',
41 'test.integ_tests': '',
44 MOCK_UNAVAILABLE_MSG
= """\
45 To run stem's tests you'll need mock...
47 https://pypi.org/project/mock/
50 MOCK_OUT_OF_DATE_MSG
= """\
51 To run stem's tests you'll need mock. You have version %s, but you need
52 version 0.8.0 or later...
54 https://pypi.org/project/mock/
57 NEW_CAPABILITIES_FOUND
= """\
58 Your version of Tor has capabilities stem currently isn't taking advantage of.
59 If you're running the latest version of stem then please file a ticket on:
61 https://github.com/torproject/stem/issues/
67 def log_traceback(sig
, frame
):
69 Dump the stacktraces of all threads on stderr.
72 # Attempt to get the name of our signal. Unfortunately the signal module
73 # doesn't provide a reverse mapping, so we need to get this ourselves
74 # from the attributes.
76 signal_name
= str(sig
)
78 for attr_name
, value
in signal
.__dict
__.items():
79 if attr_name
.startswith('SIG') and value
== sig
:
80 signal_name
= attr_name
84 '', # initial NL so we start on our own line
86 'Signal %s received by thread %s in process %i' % (signal_name
, threading
.current_thread().name
, os
.getpid()),
89 for thread_name
, stacktrace
in test
.output
.thread_stacktraces().items():
90 lines
.append('-' * 80)
91 lines
.append('%s thread stacktrace' % thread_name
)
93 lines
.append(stacktrace
)
95 lines
.append('=' * 80)
96 println('\n'.join(lines
), STDERR
)
98 # propagate the signal to any multiprocessing children
100 for p
in multiprocessing
.active_children():
103 except OSError as exc
:
104 if exc
.errno
== errno
.ESRCH
:
105 pass # already exited, no such process
109 if sig
== signal
.SIGABRT
:
110 # we need to use os._exit() to abort every thread in the interpreter,
111 # rather than raise a SystemExit exception that can be caught
115 def get_unit_tests(module_prefixes
, exclude
):
117 Provides the classes for our unit tests.
119 :param list module_prefixes: only provide the test if the module starts with
120 any of these substrings
121 :param list exclude: test modules explicitly excluded
123 :returns: an **iterator** for our unit tests
126 return _get_tests(CONFIG
['test.unit_tests'].splitlines(), module_prefixes
, exclude
)
129 def get_integ_tests(module_prefixes
, exclude
):
131 Provides the classes for our integration tests.
133 :param list module_prefixes: only provide the test if the module starts with
134 any of these substrings
135 :param list exclude: test modules explicitly excluded
137 :returns: an **iterator** for our integration tests
140 return _get_tests(CONFIG
['test.integ_tests'].splitlines(), module_prefixes
, exclude
)
143 def _get_tests(modules
, module_prefixes
, exclude
):
144 for import_name
in modules
:
145 cropped_name
= test
.arguments
.crop_module_name(import_name
)
146 cropped_name
= cropped_name
.rsplit('.', 1)[0] # exclude the class name
149 # Check if '--exclude-test' says we should skip this whole module. The
150 # argument can also skip individual tests, but that must be handled
155 for exclude_prefix
in exclude
:
156 if cropped_name
.startswith(exclude_prefix
):
163 if not module_prefixes
:
166 for prefix
in module_prefixes
:
167 if cropped_name
.startswith(prefix
):
170 elif prefix
.startswith(cropped_name
):
171 # single test for this module
173 test_name
= prefix
.rsplit('.', 1)[1]
174 yield '%s.%s' % (import_name
, test_name
)
179 start_time
= time
.time()
181 major_version
, minor_version
= sys
.version_info
[0:2]
183 if major_version
< 3 or (major_version
== 3 and minor_version
< 6):
184 println('stem requires python version 3.6 or greater\n')
187 signal
.signal(signal
.SIGABRT
, log_traceback
)
188 signal
.signal(signal
.SIGUSR1
, log_traceback
)
190 test_config
= stem
.util
.conf
.get_config('test')
191 test_config
.load(os
.path
.join(test
.STEM_BASE
, 'test', 'settings.cfg'))
193 if 'STEM_TEST_CONFIG' in os
.environ
:
194 test_config
.load(os
.environ
['STEM_TEST_CONFIG'])
197 args
= test
.arguments
.Arguments
.parse(sys
.argv
[1:])
198 test
.task
.TOR_VERSION
.args
= (args
.tor_path
,)
199 test
.output
.SUPPRESS_STDOUT
= args
.quiet
200 except ValueError as exc
:
205 println(test
.arguments
.Arguments
.get_help())
207 elif not args
.run_unit
and not args
.run_integ
:
208 println('Nothing to run (for usage provide --help)\n')
213 test
.task
.STEM_VERSION
,
214 test
.task
.TOR_VERSION
if args
.run_integ
else None,
215 test
.task
.PYTHON_VERSION
,
216 test
.task
.PLATFORM_VERSION
,
217 test
.task
.CRYPTO_VERSION
,
218 test
.task
.PYFLAKES_VERSION
,
219 test
.task
.PYCODESTYLE_VERSION
,
220 test
.task
.MYPY_VERSION
,
222 test
.task
.UNUSED_TESTS
,
223 test
.task
.IMPORT_TESTS
,
224 test
.task
.REMOVE_TOR_DATA_DIR
if args
.run_integ
else None,
225 test
.task
.PYFLAKES_TASK
if not args
.specific_test
else None,
226 test
.task
.PYCODESTYLE_TASK
if not args
.specific_test
else None,
227 test
.task
.MYPY_TASK
if not args
.specific_test
else None,
230 # Test logging. If '--log-file' is provided we log to that location,
231 # otherwise we buffer messages and log to stdout after its test completes.
233 logging_buffer
= queue
.Queue()
235 if args
.logging_runlevel
:
236 if args
.logging_path
:
237 handler
= logging
.FileHandler(args
.logging_path
, mode
= 'w')
238 handler
.setLevel(stem
.util
.log
.logging_level(args
.logging_runlevel
))
239 handler
.setFormatter(stem
.util
.log
.FORMATTER
)
241 handler
= logging
.handlers
.QueueHandler(logging_buffer
)
242 handler
.setLevel(stem
.util
.log
.logging_level(args
.logging_runlevel
))
244 stem
.util
.log
.get_logger().addHandler(handler
)
246 # filters for how testing output is displayed
248 error_tracker
= test
.output
.ErrorTracker()
251 error_tracker
.get_filter(),
252 test
.output
.runtimes
,
253 test
.output
.strip_module
,
254 test
.output
.align_results
,
255 test
.output
.colorize
,
258 # Number of tests that we have skipped. This is only available with python
259 # 2.7 or later because before that test results didn't have a 'skipped'
262 # TODO: handling of earlier python versions is no longer necessary here
263 # TODO: this invokes all asynchronous tests, even if we have a --test or
264 # --exclude-test argument
269 default_test_dir
= stem
.util
.system
.expand_path(CONFIG
['integ.test_directory'], test
.STEM_BASE
)
270 async_args
= test
.AsyncTestArgs(default_test_dir
, args
.tor_path
)
272 for module_str
in stem
.util
.test_tools
.ASYNC_TESTS
:
273 module
= importlib
.import_module(module_str
.rsplit('.', 1)[0])
274 test_classes
= [v
for k
, v
in module
.__dict
__.items() if k
.startswith('Test')]
276 if len(test_classes
) != 1:
277 print('BUG: Detected multiple tests for %s: %s' % (module_str
, ', '.join(test_classes
)))
280 test_classes
[0].run_tests(async_args
)
283 test
.output
.print_divider('UNIT TESTS', True)
284 error_tracker
.set_category('UNIT TEST')
286 for test_class
in get_unit_tests(args
.specific_test
, args
.exclude_test
):
287 run_result
= _run_test(args
, test_class
, args
.exclude_test
, output_filters
)
288 test
.output
.print_logging(logging_buffer
)
289 skipped_tests
+= len(getattr(run_result
, 'skipped', []))
294 test
.output
.print_divider('INTEGRATION TESTS', True)
295 integ_runner
= test
.runner
.get_runner()
297 for target
in args
.run_targets
:
298 error_tracker
.set_category(target
)
301 integ_runner
.start(target
, args
.attribute_targets
, args
.tor_path
)
303 println('Running tests...\n', STATUS
)
305 for test_class
in get_integ_tests(args
.specific_test
, args
.exclude_test
):
306 run_result
= _run_test(args
, test_class
, args
.exclude_test
, output_filters
)
307 test
.output
.print_logging(logging_buffer
)
308 skipped_tests
+= len(getattr(run_result
, 'skipped', []))
310 if not integ_runner
.assert_tor_is_running():
311 # our tor process died
313 error_tracker
.register_error()
315 except KeyboardInterrupt:
316 println(' aborted starting tor: keyboard interrupt\n', ERROR
)
318 except ValueError as exc
:
319 println(str(exc
), ERROR
) # can arise if there's bad settings.cfg data
322 error_tracker
.register_error()
328 # ensure that we join all our threads
330 active_threads
= threading
.enumerate()
332 if len(active_threads
) > 1:
333 println('Threads lingering after test run:', ERROR
)
335 for lingering_thread
in active_threads
:
336 println(' %s' % lingering_thread
, ERROR
)
338 static_check_issues
= {}
340 for task
in (test
.task
.PYFLAKES_TASK
, test
.task
.PYCODESTYLE_TASK
, test
.task
.MYPY_TASK
):
341 if not task
.is_available
and task
.unavailable_msg
:
342 println(task
.unavailable_msg
, ERROR
)
344 task
.join() # no-op if these haven't been run
347 for path
, issues
in task
.result
.items():
349 static_check_issues
.setdefault(path
, []).append(issue
)
351 _print_static_issues(static_check_issues
)
353 if error_tracker
.has_errors_occured():
354 println('TESTING FAILED (%i seconds)' % (time
.time() - start_time
), ERROR
, STDERR
)
356 for line
in error_tracker
:
357 println(' %s' % line
, ERROR
, STDERR
)
359 error_modules
= error_tracker
.get_modules()
361 if len(error_modules
) < 10 and not args
.specific_test
:
362 println('\nYou can re-run just these tests with:\n', ERROR
, STDERR
)
364 for module
in error_modules
:
365 println(' %s --test %s' % (' '.join(sys
.argv
), test
.arguments
.crop_module_name(module
)), ERROR
, STDERR
)
367 if skipped_tests
> 0:
368 println('%i TESTS WERE SKIPPED' % skipped_tests
, STATUS
)
370 println('TESTING PASSED (%i seconds)\n' % (time
.time() - start_time
), SUCCESS
)
372 new_capabilities
= test
.get_new_capabilities()
375 println(NEW_CAPABILITIES_FOUND
, ERROR
)
377 for capability_type
, msg
in sorted(new_capabilities
, key
= lambda x
: x
[1]):
378 println(' [%s] %s' % (capability_type
, msg
), ERROR
)
380 sys
.exit(1 if error_tracker
.has_errors_occured() else 0)
383 def _print_static_issues(static_check_issues
):
384 if static_check_issues
:
385 println('STATIC CHECKS', STATUS
)
387 for file_path
in sorted(static_check_issues
):
388 println('* %s' % file_path
, STATUS
)
390 # Make a dict of line numbers to its issues. This is so we can both sort
391 # by the line number and clear any duplicate messages.
395 for issue
in static_check_issues
[file_path
]:
396 line_to_issues
.setdefault(issue
.line_number
, set()).add((issue
.message
, issue
.line
))
398 for line_number
in sorted(line_to_issues
.keys()):
399 for msg
, line
in line_to_issues
[line_number
]:
400 line_count
= '%-4s' % line_number
401 content
= ' | %s' % line
.strip() if line
.strip() else ''
402 println(' line %s - %-40s%s' % (line_count
, msg
, content
))
407 def _run_test(args
, test_class
, exclude
, output_filters
):
408 # When logging to a file we don't have stdout's test delimiters to correlate
409 # logs with the test that generated them.
411 if args
.logging_path
:
412 stem
.util
.log
.notice('Beginning test %s' % test_class
)
414 start_time
= time
.time()
416 # Test classes look like...
418 # test.unit.util.conf.TestConf.test_parse_enum_csv
420 # We want to strip the 'test.unit.' or 'test.integ.' prefix since it's
421 # redundant. We also want to drop the test class name. The individual test
422 # name at the end it optional (only present if we used the '--test'
425 label_comp
= test_class
.split('.')[2:]
426 del label_comp
[-1 if label_comp
[-1][0].isupper() else -2]
427 test_label
= ' %-52s' % ('.'.join(label_comp
) + '...')
430 test
.output
.print_divider(test_class
)
432 println(test_label
, STATUS
, NO_NL
)
435 suite
= unittest
.TestLoader().loadTestsFromName(test_class
)
436 except AttributeError:
437 if args
.specific_test
:
438 # should only come up if user provided '--test' for something that doesn't exist
439 println(' no such test', ERROR
)
443 except Exception as exc
:
444 println(' failed', ERROR
)
445 traceback
.print_exc(exc
)
448 # check if we should skip any individual tests within this module
451 cropped_name
= test
.arguments
.crop_module_name(test_class
)
452 cropped_name
= cropped_name
.rsplit('.', 1)[0] # exclude the class name
454 for prefix
in exclude
:
455 if prefix
.startswith(cropped_name
):
456 test_name
= prefix
.split('.')[-1]
458 suite
._tests
= list(filter(lambda test
: test
.id().split('.')[-1] != test_name
, suite
._tests
))
460 test_results
= io
.StringIO()
461 run_result
= stem
.util
.test_tools
.TimedTestRunner(test_results
, verbosity
= 2).run(suite
)
464 println(test
.output
.apply_filters(test_results
.getvalue(), *output_filters
))
465 elif not run_result
.failures
and not run_result
.errors
:
466 println(' success (%0.2fs)' % (time
.time() - start_time
), SUCCESS
)
469 println(test_label
, STATUS
, NO_NL
, STDERR
)
470 println(' failed (%0.2fs)' % (time
.time() - start_time
), ERROR
, STDERR
)
471 println(test
.output
.apply_filters(test_results
.getvalue(), *output_filters
), STDERR
)
473 println(' failed (%0.2fs)' % (time
.time() - start_time
), ERROR
)
474 println(test
.output
.apply_filters(test_results
.getvalue(), *output_filters
), NO_NL
)
476 if args
.logging_path
:
477 stem
.util
.log
.notice('Finished test %s' % test_class
)
482 if __name__
== '__main__':