4 Runs unit and integration tests. For usage information run this with '--help'.
17 import test
.check_whitespace
18 import test
.unit
.connection
.authentication
19 import test
.unit
.control
.controller
20 import test
.unit
.descriptor
.export
21 import test
.unit
.descriptor
.reader
22 import test
.unit
.descriptor
.server_descriptor
23 import test
.unit
.descriptor
.extrainfo_descriptor
24 import test
.unit
.descriptor
.router_status_entry
25 import test
.unit
.descriptor
.networkstatus
.directory_authority
26 import test
.unit
.descriptor
.networkstatus
.key_certificate
27 import test
.unit
.descriptor
.networkstatus
.document_v2
28 import test
.unit
.descriptor
.networkstatus
.document_v3
29 import test
.unit
.response
.control_line
30 import test
.unit
.response
.control_message
31 import test
.unit
.response
.events
32 import test
.unit
.response
.getinfo
33 import test
.unit
.response
.getconf
34 import test
.unit
.response
.protocolinfo
35 import test
.unit
.response
.authchallenge
36 import test
.unit
.response
.singleline
37 import test
.unit
.response
.mapaddress
38 import test
.unit
.util
.conf
39 import test
.unit
.util
.connection
40 import test
.unit
.util
.enum
41 import test
.unit
.util
.proc
42 import test
.unit
.util
.str_tools
43 import test
.unit
.util
.system
44 import test
.unit
.util
.tor_tools
45 import test
.unit
.exit_policy
.policy
46 import test
.unit
.exit_policy
.rule
47 import test
.unit
.version
48 import test
.unit
.tutorial
49 import test
.integ
.connection
.authentication
50 import test
.integ
.connection
.connect
51 import test
.integ
.control
.base_controller
52 import test
.integ
.control
.controller
53 import test
.integ
.socket
.control_message
54 import test
.integ
.socket
.control_socket
55 import test
.integ
.descriptor
.reader
56 import test
.integ
.descriptor
.server_descriptor
57 import test
.integ
.descriptor
.extrainfo_descriptor
58 import test
.integ
.descriptor
.networkstatus
59 import test
.integ
.response
.protocolinfo
60 import test
.integ
.util
.conf
61 import test
.integ
.util
.proc
62 import test
.integ
.util
.system
63 import test
.integ
.process
64 import test
.integ
.version
69 import stem
.util
.log
as log
70 import stem
.util
.term
as term
71 import stem
.util
.system
as system
74 OPT_EXPANDED
= ["unit", "integ", "targets=", "test=", "log=", "tor=", "config=", "help"]
77 CONFIG
= stem
.util
.conf
.config_dict("test", {
78 "argument.unit": False,
79 "argument.integ": False,
82 "argument.tor": "tor",
83 "argument.no_color": False,
86 "target.description": {},
91 Target
= stem
.util
.enum
.UppercaseEnum(
106 DEFAULT_RUN_TARGET
= Target
.RUN_OPEN
108 ERROR_ATTR
= (term
.Color
.RED
, term
.Attr
.BOLD
)
110 # Tests are ordered by the dependencies so the lowest level tests come first.
111 # This is because a problem in say, controller message parsing, will cause all
112 # higher level tests to fail too. Hence we want the test that most narrowly
113 # exhibits problems to come first.
116 test
.unit
.util
.enum
.TestEnum
,
117 test
.unit
.util
.connection
.TestConnection
,
118 test
.unit
.util
.conf
.TestConf
,
119 test
.unit
.util
.proc
.TestProc
,
120 test
.unit
.util
.str_tools
.TestStrTools
,
121 test
.unit
.util
.system
.TestSystem
,
122 test
.unit
.util
.tor_tools
.TestTorTools
,
123 test
.unit
.descriptor
.export
.TestExport
,
124 test
.unit
.descriptor
.reader
.TestDescriptorReader
,
125 test
.unit
.descriptor
.server_descriptor
.TestServerDescriptor
,
126 test
.unit
.descriptor
.extrainfo_descriptor
.TestExtraInfoDescriptor
,
127 test
.unit
.descriptor
.router_status_entry
.TestRouterStatusEntry
,
128 test
.unit
.descriptor
.networkstatus
.directory_authority
.TestDirectoryAuthority
,
129 test
.unit
.descriptor
.networkstatus
.key_certificate
.TestKeyCertificate
,
130 test
.unit
.descriptor
.networkstatus
.document_v2
.TestNetworkStatusDocument
,
131 test
.unit
.descriptor
.networkstatus
.document_v3
.TestNetworkStatusDocument
,
132 test
.unit
.exit_policy
.rule
.TestExitPolicyRule
,
133 test
.unit
.exit_policy
.policy
.TestExitPolicy
,
134 test
.unit
.version
.TestVersion
,
135 test
.unit
.tutorial
.TestTutorial
,
136 test
.unit
.response
.control_message
.TestControlMessage
,
137 test
.unit
.response
.control_line
.TestControlLine
,
138 test
.unit
.response
.events
.TestEvents
,
139 test
.unit
.response
.getinfo
.TestGetInfoResponse
,
140 test
.unit
.response
.getconf
.TestGetConfResponse
,
141 test
.unit
.response
.singleline
.TestSingleLineResponse
,
142 test
.unit
.response
.mapaddress
.TestMapAddressResponse
,
143 test
.unit
.response
.protocolinfo
.TestProtocolInfoResponse
,
144 test
.unit
.response
.authchallenge
.TestAuthChallengeResponse
,
145 test
.unit
.connection
.authentication
.TestAuthenticate
,
146 test
.unit
.control
.controller
.TestControl
,
150 test
.integ
.util
.conf
.TestConf
,
151 test
.integ
.util
.proc
.TestProc
,
152 test
.integ
.util
.system
.TestSystem
,
153 test
.integ
.descriptor
.reader
.TestDescriptorReader
,
154 test
.integ
.descriptor
.server_descriptor
.TestServerDescriptor
,
155 test
.integ
.descriptor
.extrainfo_descriptor
.TestExtraInfoDescriptor
,
156 test
.integ
.descriptor
.networkstatus
.TestNetworkStatus
,
157 test
.integ
.version
.TestVersion
,
158 test
.integ
.response
.protocolinfo
.TestProtocolInfo
,
159 test
.integ
.process
.TestProcess
,
160 test
.integ
.socket
.control_socket
.TestControlSocket
,
161 test
.integ
.socket
.control_message
.TestControlMessage
,
162 test
.integ
.connection
.authentication
.TestAuthenticate
,
163 test
.integ
.connection
.connect
.TestConnect
,
164 test
.integ
.control
.base_controller
.TestBaseController
,
165 test
.integ
.control
.controller
.TestController
,
168 def load_user_configuration(test_config
):
170 Parses our commandline arguments, loading our custom test configuration if
171 '--config' was provided and then appending arguments to that. This does some
172 sanity checking on the input, printing an error and quitting if validation
176 arg_overrides
, config_path
= {}, None
179 opts
= getopt
.getopt(sys
.argv
[1:], OPT
, OPT_EXPANDED
)[0]
180 except getopt
.GetoptError
, exc
:
181 print "%s (for usage provide --help)" % exc
184 # suppress color output if our output is being piped
185 if (not sys
.stdout
.isatty()) or system
.is_windows():
186 arg_overrides
["argument.no_color"] = "true"
188 for opt
, arg
in opts
:
189 if opt
in ("-u", "--unit"):
190 arg_overrides
["argument.unit"] = "true"
191 elif opt
in ("-i", "--integ"):
192 arg_overrides
["argument.integ"] = "true"
193 elif opt
in ("-c", "--config"):
194 config_path
= os
.path
.abspath(arg
)
195 elif opt
in ("-t", "--targets"):
196 integ_targets
= arg
.split(",")
198 # validates the targets
199 if not integ_targets
:
200 print "No targets provided"
203 for target
in integ_targets
:
204 if not target
in Target
:
205 print "Invalid integration target: %s" % target
208 target_config
= test_config
.get("target.config", {}).get(target
)
209 if target_config
: arg_overrides
[target_config
] = "true"
210 elif opt
in ("-l", "--test"):
211 arg_overrides
["argument.test"] = arg
212 elif opt
in ("-l", "--log"):
213 arg_overrides
["argument.log"] = arg
.upper()
214 elif opt
in ("--tor"):
215 arg_overrides
["argument.tor"] = arg
216 elif opt
in ("-h", "--help"):
217 # Prints usage information and quits. This includes a listing of the
218 # valid integration targets.
220 print CONFIG
["msg.help"]
222 # gets the longest target length so we can show the entries in columns
223 target_name_length
= max(map(len, Target
))
224 description_format
= " %%-%is - %%s" % target_name_length
226 for target
in Target
:
227 print description_format
% (target
, CONFIG
["target.description"].get(target
, ""))
233 # load a testrc if '--config' was given, then apply arguments
237 test_config
.load(config_path
)
239 print "Unable to load testing configuration at '%s': %s" % (config_path
, exc
)
242 for key
, value
in arg_overrides
.items():
243 test_config
.set(key
, value
)
245 # basic validation on user input
247 log_config
= CONFIG
["argument.log"]
248 if log_config
and not log_config
in log
.LOG_VALUES
:
249 print "'%s' isn't a logging runlevel, use one of the following instead:" % log_config
250 print " TRACE, DEBUG, INFO, NOTICE, WARN, ERROR"
253 tor_config
= CONFIG
["argument.tor"]
254 if CONFIG
["argument.integ"] and not os
.path
.exists(tor_config
) and not stem
.util
.system
.is_available(tor_config
):
255 print "Unable to start tor, '%s' does not exists." % tor_config
258 def _clean_orphaned_pyc():
259 test
.output
.print_noline(" checking for orphaned .pyc files... ", *test
.runner
.STATUS_ATTR
)
263 for base_dir
in ('stem', 'test', 'run_tests.py'):
264 for pyc_path
in test
.check_whitespace
._get
_files
_with
_suffix
(base_dir
, ".pyc"):
265 if not os
.path
.exists(pyc_path
[:-1]):
266 orphaned_pyc
.append(pyc_path
)
269 # no orphaned files, nothing to do
270 test
.output
.print_line("done", *test
.runner
.STATUS_ATTR
)
273 for pyc_file
in orphaned_pyc
:
274 test
.output
.print_line(" removing %s" % pyc_file
, *test
.runner
.ERROR_ATTR
)
277 if __name__
== '__main__':
279 stem
.prereq
.check_requirements()
280 except ImportError, exc
:
286 start_time
= time
.time()
288 # override flag to indicate at the end that testing failed somewhere
289 testing_failed
= False
291 # count how many tests have been skipped.
292 skipped_test_count
= 0
294 # loads and validates our various configurations
295 test_config
= stem
.util
.conf
.get_config("test")
297 settings_path
= os
.path
.join(test
.runner
.STEM_BASE
, "test", "settings.cfg")
298 test_config
.load(settings_path
)
300 load_user_configuration(test_config
)
302 if not CONFIG
["argument.unit"] and not CONFIG
["argument.integ"]:
303 test
.output
.print_line("Nothing to run (for usage provide --help)\n")
306 # if we have verbose logging then provide the testing config
307 our_level
= stem
.util
.log
.logging_level(CONFIG
["argument.log"])
308 info_level
= stem
.util
.log
.logging_level(stem
.util
.log
.INFO
)
310 if our_level
<= info_level
: test
.output
.print_config(test_config
)
312 error_tracker
= test
.output
.ErrorTracker()
314 error_tracker
.get_filter(),
315 test
.output
.strip_module
,
316 test
.output
.align_results
,
317 test
.output
.colorize
,
320 stem_logger
= log
.get_logger()
321 logging_buffer
= log
.LogBuffer(CONFIG
["argument.log"])
322 stem_logger
.addHandler(logging_buffer
)
324 test
.output
.print_divider("INITIALISING", True)
326 test
.output
.print_line("Performing startup activities...", *test
.runner
.STATUS_ATTR
)
327 _clean_orphaned_pyc()
331 if CONFIG
["argument.unit"]:
332 test
.output
.print_divider("UNIT TESTS", True)
333 error_tracker
.set_category("UNIT TEST")
335 for test_class
in UNIT_TESTS
:
336 if CONFIG
["argument.test"] and \
337 not test_class
.__module
__.startswith(CONFIG
["argument.test"]):
340 test
.output
.print_divider(test_class
.__module
__)
341 suite
= unittest
.TestLoader().loadTestsFromTestCase(test_class
)
342 test_results
= StringIO
.StringIO()
343 run_result
= unittest
.TextTestRunner(test_results
, verbosity
=2).run(suite
)
344 if stem
.prereq
.is_python_27():
345 skipped_test_count
+= len(run_result
.skipped
)
347 sys
.stdout
.write(test
.output
.apply_filters(test_results
.getvalue(), *output_filters
))
350 test
.output
.print_logging(logging_buffer
)
354 if CONFIG
["argument.integ"]:
355 test
.output
.print_divider("INTEGRATION TESTS", True)
356 integ_runner
= test
.runner
.get_runner()
358 # Queue up all the targets with torrc options we want to run against.
360 integ_run_targets
= []
361 all_run_targets
= [t
for t
in Target
if CONFIG
["target.torrc"].get(t
) != None]
363 if test_config
.get("integ.target.run.all", False):
364 # test against everything with torrc options
365 integ_run_targets
= all_run_targets
367 for target
in all_run_targets
:
368 target_config
= CONFIG
["target.config"].get(target
)
370 if target_config
and test_config
.get(target_config
, False):
371 integ_run_targets
.append(target
)
373 # if we didn't specify any targets then use the default
374 if not integ_run_targets
:
375 integ_run_targets
.append(DEFAULT_RUN_TARGET
)
377 # Determine targets we don't meet the prereqs for. Warnings are given about
378 # these at the end of the test run so they're more noticeable.
380 our_version
, skip_targets
= None, []
382 for target
in integ_run_targets
:
383 target_prereq
= CONFIG
["target.prereq"].get(target
)
386 # lazy loaded to skip system call if we don't have any prereqs
388 our_version
= stem
.version
.get_system_tor_version(CONFIG
["argument.tor"])
390 if our_version
< stem
.version
.Requirement
[target_prereq
]:
391 skip_targets
.append(target
)
393 for target
in integ_run_targets
:
394 if target
in skip_targets
: continue
395 error_tracker
.set_category(target
)
398 # converts the 'target.torrc' csv into a list of test.runner.Torrc enums
401 for opt
in test_config
.get_str_csv("target.torrc", [], sub_key
= target
):
402 if opt
in test
.runner
.Torrc
.keys():
403 torrc_opts
.append(test
.runner
.Torrc
[opt
])
405 test
.output
.print_line("'%s' isn't a test.runner.Torrc enumeration" % opt
)
408 integ_runner
.start(CONFIG
["argument.tor"], extra_torrc_opts
= torrc_opts
)
410 test
.output
.print_line("Running tests...", term
.Color
.BLUE
, term
.Attr
.BOLD
)
413 for test_class
in INTEG_TESTS
:
414 if CONFIG
["argument.test"] and \
415 not test_class
.__module
__.startswith(CONFIG
["argument.test"]):
418 test
.output
.print_divider(test_class
.__module
__)
419 suite
= unittest
.TestLoader().loadTestsFromTestCase(test_class
)
420 test_results
= StringIO
.StringIO()
421 run_result
= unittest
.TextTestRunner(test_results
, verbosity
=2).run(suite
)
422 if stem
.prereq
.is_python_27():
423 skipped_test_count
+= len(run_result
.skipped
)
425 sys
.stdout
.write(test
.output
.apply_filters(test_results
.getvalue(), *output_filters
))
428 test
.output
.print_logging(logging_buffer
)
430 # We should have joined on all threads. If not then that indicates a
431 # leak that could both likely be a bug and disrupt further targets.
433 active_threads
= threading
.enumerate()
435 if len(active_threads
) > 1:
436 test
.output
.print_line("Threads lingering after test run:", *ERROR_ATTR
)
438 for lingering_thread
in active_threads
:
439 test
.output
.print_line(" %s" % lingering_thread
, *ERROR_ATTR
)
441 testing_failed
= True
443 except KeyboardInterrupt:
444 test
.output
.print_line(" aborted starting tor: keyboard interrupt\n", *ERROR_ATTR
)
454 for target
in skip_targets
:
455 req_version
= stem
.version
.Requirement
[CONFIG
["target.prereq"][target
]]
456 test
.output
.print_line("Unable to run target %s, this requires tor version %s" % (target
, req_version
), term
.Color
.RED
, term
.Attr
.BOLD
)
460 # TODO: note unused config options afterward?
462 base_path
= os
.path
.sep
.join(__file__
.split(os
.path
.sep
)[:-1])
463 whitespace_issues
= test
.check_whitespace
.get_issues(os
.path
.join(base_path
, "stem"))
464 whitespace_issues
.update(test
.check_whitespace
.get_issues(os
.path
.join(base_path
, "test")))
465 whitespace_issues
.update(test
.check_whitespace
.get_issues(os
.path
.join(base_path
, "run_tests.py")))
467 if whitespace_issues
:
468 test
.output
.print_line("WHITESPACE ISSUES", term
.Color
.BLUE
, term
.Attr
.BOLD
)
470 for file_path
in whitespace_issues
:
471 test
.output
.print_line("* %s" % file_path
, term
.Color
.BLUE
, term
.Attr
.BOLD
)
473 for line_number
, msg
in whitespace_issues
[file_path
]:
474 line_count
= "%-4s" % line_number
475 test
.output
.print_line(" line %s - %s" % (line_count
, msg
))
479 runtime
= time
.time() - start_time
480 if runtime
< 1: runtime_label
= "(%0.1f seconds)" % runtime
481 else: runtime_label
= "(%i seconds)" % runtime
483 if testing_failed
or error_tracker
.has_error_occured():
484 test
.output
.print_line("TESTING FAILED %s" % runtime_label
, *ERROR_ATTR
)
486 for line
in error_tracker
:
487 test
.output
.print_line(" %s" % line
, *ERROR_ATTR
)
488 elif skipped_test_count
> 0:
489 test
.output
.print_line("%i TESTS WERE SKIPPED" % skipped_test_count
, term
.Color
.BLUE
, term
.Attr
.BOLD
)
490 test
.output
.print_line("ALL OTHER TESTS PASSED %s" % runtime_label
, term
.Color
.GREEN
, term
.Attr
.BOLD
)
493 test
.output
.print_line("TESTING PASSED %s" % runtime_label
, term
.Color
.GREEN
, term
.Attr
.BOLD
)