3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ directory
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from
14 This script adds SCons/ and testing/ directories to PYTHONPATH,
15 performs test discovery and processes tests according to options.
18 from __future__
import annotations
28 from abc
import ABC
, abstractmethod
29 from io
import StringIO
30 from pathlib
import Path
, PurePath
, PureWindowsPath
31 from queue
import Queue
32 from typing
import TextIO
35 debug
: str |
None = None
36 scons
: str |
None = None
37 catch_output
: bool = False
38 suppress_output
: bool = False
39 script
= PurePath(sys
.argv
[0]).name
40 usagestr
= f
"{script} [OPTIONS] [TEST ...]"
42 Environment Variables:
43 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
44 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
47 # this is currently expected to be global, maybe refactor later?
50 parser
= argparse
.ArgumentParser(
54 formatter_class
=argparse
.RawDescriptionHelpFormatter
,
57 # test selection options:
58 testsel
= parser
.add_argument_group(description
='Test selection options:')
59 testsel
.add_argument(metavar
='TEST', nargs
='*', dest
='testlist',
60 help="Select TEST(s) (tests and/or directories) to run")
61 testlisting
= testsel
.add_mutually_exclusive_group()
62 testlisting
.add_argument('-f', '--file', metavar
='FILE', dest
='testlistfile',
63 help="Select only tests in FILE")
64 testlisting
.add_argument('-a', '--all', action
='store_true',
65 help="Select all tests")
66 testlisting
.add_argument('--retry', action
='store_true',
67 help="Rerun the last failed tests in 'failed_tests.log'")
68 testsel
.add_argument('--exclude-list', metavar
="FILE", dest
='excludelistfile',
69 help="""Exclude tests in FILE from current selection""")
70 testtype
= testsel
.add_mutually_exclusive_group()
71 testtype
.add_argument('--e2e-only', action
='store_true',
72 help="Exclude unit tests from selection")
73 testtype
.add_argument('--unit-only', action
='store_true',
74 help="Exclude end-to-end tests from selection")
76 # miscellaneous options
77 parser
.add_argument('-b', '--baseline', metavar
='BASE',
78 help="Run test scripts against baseline BASE.")
79 parser
.add_argument('-d', '--debug', action
='store_true',
80 help="Run test scripts under the Python debugger.")
81 parser
.add_argument('-D', '--devmode', action
='store_true',
82 help="Run tests in Python's development mode (Py3.7+ only).")
83 parser
.add_argument('-e', '--external', action
='store_true',
84 help="Run the script in external mode (for external Tools)")
86 def posint(arg
: str) -> int:
87 """Special positive-int type for argparse"""
90 raise argparse
.ArgumentTypeError("JOBS value must not be negative")
93 parser
.add_argument('-j', '--jobs', metavar
='JOBS', default
=1, type=posint
,
94 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
95 parser
.add_argument('-l', '--list', action
='store_true', dest
='list_only',
96 help="List available tests and exit.")
97 parser
.add_argument('-n', '--no-exec', action
='store_false',
99 help="No execute, just print command lines.")
100 parser
.add_argument('--nopipefiles', action
='store_false',
101 dest
='allow_pipe_files',
102 help="""Do not use the "file pipe" workaround for subprocess
103 for starting tests. See source code for warnings.""")
104 parser
.add_argument('-P', '--python', metavar
='PYTHON',
105 help="Use the specified Python interpreter.")
106 parser
.add_argument('--quit-on-failure', action
='store_true',
107 help="Quit on any test failure.")
108 parser
.add_argument('--runner', metavar
='CLASS',
109 help="Test runner class for unit tests.")
110 parser
.add_argument('-X', dest
='scons_exec', action
='store_true',
111 help="Test script is executable, don't feed to Python.")
112 parser
.add_argument('-x', '--exec', metavar
="SCRIPT",
113 help="Test using SCRIPT as path to SCons.")
114 parser
.add_argument('--faillog', dest
='error_log', metavar
="FILE",
115 default
='failed_tests.log',
116 help="Log failed tests to FILE (enabled by default, "
117 "default file 'failed_tests.log')")
118 parser
.add_argument('--no-faillog', dest
='error_log',
119 action
='store_const', const
=None,
120 default
='failed_tests.log',
121 help="Do not log failed tests to a file")
123 parser
.add_argument('--no-ignore-skips', dest
='dont_ignore_skips',
126 help="If any tests are skipped, exit status 2")
128 outctl
= parser
.add_argument_group(description
='Output control options:')
129 outctl
.add_argument('-k', '--no-progress', action
='store_false',
130 dest
='print_progress',
131 help="Suppress count and progress percentage messages.")
132 outctl
.add_argument('--passed', action
='store_true',
133 dest
='print_passed_summary',
134 help="Summarize which tests passed.")
135 outctl
.add_argument('-q', '--quiet', action
='store_false',
137 help="Don't print the test being executed.")
138 outctl
.add_argument('-s', '--short-progress', action
='store_true',
139 help="""Short progress, prints only the command line
140 and a progress percentage, no results.""")
141 outctl
.add_argument('-t', '--time', action
='store_true', dest
='print_times',
142 help="Print test execution time.")
143 outctl
.add_argument('--verbose', metavar
='LEVEL', type=int, choices
=range(1, 4),
144 help="""Set verbose level
145 (1=print executed commands,
146 2=print commands and non-zero output,
147 3=print commands and all output).""")
149 # outctl.add_argument('--version', action='version', version=f'{script} 1.0')
151 logctl
= parser
.add_argument_group(description
='Log control options:')
152 logctl
.add_argument('-o', '--output', metavar
='LOG', help="Save console output to LOG.")
156 help="Save results to XML in SCons XML format (use - for stdout).",
159 # process args and handle a few specific cases:
160 args
: argparse
.Namespace
= parser
.parse_args()
162 # we can't do this check with an argparse exclusive group, since those
163 # only work with optional args, and the cmdline tests (args.testlist)
164 # are not optional args,
165 if args
.testlist
and (args
.testlistfile
or args
.all
or args
.retry
):
167 parser
.format_usage()
168 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
173 args
.testlistfile
= 'failed_tests.log'
175 if args
.testlistfile
:
176 # args.testlistfile changes from a string to a pathlib Path object
178 ptest
= Path(args
.testlistfile
)
179 args
.testlistfile
= ptest
.resolve(strict
=True)
180 except FileNotFoundError
:
182 parser
.format_usage()
183 + f
'error: -f/--file testlist file "{args.testlistfile}" not found\n'
187 if args
.excludelistfile
:
188 # args.excludelistfile changes from a string to a pathlib Path object
190 pexcl
= Path(args
.excludelistfile
)
191 args
.excludelistfile
= pexcl
.resolve(strict
=True)
192 except FileNotFoundError
:
194 parser
.format_usage()
195 + f
'error: --exclude-list file "{args.excludelistfile}" not found\n'
201 # on Linux, check available rather than physical CPUs
202 args
.jobs
= len(os
.sched_getaffinity(0))
203 except AttributeError:
205 args
.jobs
= os
.cpu_count()
210 parser
.format_usage()
211 + "Unable to detect CPU count, give -j a non-zero value\n"
215 if args
.jobs
> 1 or args
.output
:
216 # 1. don't let tests write stdout/stderr directly if multi-job,
217 # else outputs will interleave and be hard to read.
218 # 2. If we're going to write a logfile, we also need to catch the output.
221 if not args
.printcommand
:
222 suppress_output
= catch_output
= True
225 os
.environ
['TESTCMD_VERBOSE'] = str(args
.verbose
)
227 if args
.short_progress
:
228 args
.print_progress
= True
229 suppress_output
= catch_output
= True
232 # TODO: add a way to pass a specific debugger
238 # --- setup stdout/stderr ---
240 def __init__(self
, file):
243 def write(self
, arg
):
247 def __getattr__(self
, attr
):
248 return getattr(self
.file, attr
)
250 sys
.stdout
= Unbuffered(sys
.stdout
)
251 sys
.stderr
= Unbuffered(sys
.stderr
)
253 # possible alternative: switch to using print, and:
254 # print = functools.partial(print, flush)
258 def __init__(self
, openfile
, stream
):
262 def write(self
, data
):
263 self
.file.write(data
)
264 self
.stream
.write(data
)
266 def flush(self
, data
):
267 self
.file.flush(data
)
268 self
.stream
.flush(data
)
270 logfile
= open(args
.output
, 'w')
271 # this is not ideal: we monkeypatch stdout/stderr a second time
272 # (already did for Unbuffered), so here we can't easily detect what
273 # state we're in on closedown. Just hope it's okay...
274 sys
.stdout
= Tee(logfile
, sys
.stdout
)
275 sys
.stderr
= Tee(logfile
, sys
.stderr
)
277 # --- define helpers ----
278 if sys
.platform
== 'win32':
279 # thanks to Eryk Sun for this recipe
282 shlwapi
= ctypes
.OleDLL('shlwapi')
283 shlwapi
.AssocQueryStringW
.argtypes
= (
284 ctypes
.c_ulong
, # flags
285 ctypes
.c_ulong
, # str
286 ctypes
.c_wchar_p
, # pszAssoc
287 ctypes
.c_wchar_p
, # pszExtra
288 ctypes
.c_wchar_p
, # pszOut
289 ctypes
.POINTER(ctypes
.c_ulong
), # pcchOut
292 ASSOCF_NOTRUNCATE
= 0x00000020
293 ASSOCF_INIT_IGNOREUNKNOWN
= 0x00000400
295 ASSOCSTR_EXECUTABLE
= 2
296 E_POINTER
= ctypes
.c_long(0x80004003).value
298 def get_template_command(filetype
, verb
=None):
299 """Return the association-related string for *filetype*"""
300 flags
= ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
301 assoc_str
= ASSOCSTR_COMMAND
302 cch
= ctypes
.c_ulong(260)
304 buf
= (ctypes
.c_wchar
* cch
.value
)()
306 shlwapi
.AssocQueryStringW(
307 flags
, assoc_str
, filetype
, verb
, buf
, ctypes
.byref(cch
)
310 if e
.winerror
!= E_POINTER
:
318 # Without any output suppressed, we let the subprocess
319 # write its stuff freely to stdout/stderr.
321 def spawn_it(command_args
, env
):
322 cp
= subprocess
.run(command_args
, shell
=False, env
=env
)
323 return cp
.stdout
, cp
.stderr
, cp
.returncode
326 # Else, we catch the output of both pipes...
327 if args
.allow_pipe_files
:
328 # The subprocess.Popen() suffers from a well-known
329 # problem. Data for stdout/stderr is read into a
330 # memory buffer of fixed size, 65K which is not very much.
331 # When it fills up, it simply stops letting the child process
332 # write to it. The child will then sit and patiently wait to
333 # be able to write the rest of its output. Hang!
334 # In order to work around this, we follow a suggestion
335 # by Anders Pearson in
336 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
337 # and pass temp file objects to Popen() instead of the ubiquitous
340 def spawn_it(command_args
, env
):
341 # Create temporary files
342 tmp_stdout
= tempfile
.TemporaryFile(mode
='w+t')
343 tmp_stderr
= tempfile
.TemporaryFile(mode
='w+t')
344 # Start subprocess...
354 # Rewind to start of files
358 spawned_stdout
= tmp_stdout
.read()
359 spawned_stderr
= tmp_stderr
.read()
361 # Remove temp files by closing them
366 return spawned_stderr
, spawned_stdout
, cp
.returncode
369 # We get here only if the user gave the '--nopipefiles'
370 # option, meaning the "temp file" approach for
371 # subprocess.communicate() above shouldn't be used.
372 # He hopefully knows what he's doing, but again we have a
373 # potential deadlock situation in the following code:
374 # If the subprocess writes a lot of data to its stderr,
375 # the pipe will fill up (nobody's reading it yet) and the
376 # subprocess will wait for someone to read it.
377 # But the parent process is trying to read from stdin
378 # (but the subprocess isn't writing anything there).
380 # Be dragons here! Better don't use this!
382 def spawn_it(command_args
, env
):
385 stdout
=subprocess
.PIPE
,
386 stderr
=subprocess
.PIPE
,
390 return cp
.stdout
, cp
.stderr
, cp
.returncode
393 class RuntestBase(ABC
):
394 """ Base class for tests """
395 _ids
= itertools
.count(1) # to geenerate test # automatically
397 def __init__(self
, path
, spe
=None):
398 self
.path
= str(path
)
399 self
.testno
= next(self
._ids
)
400 self
.stdout
= self
.stderr
= self
.status
= None
401 self
.abspath
= path
.absolute()
402 self
.command_args
= []
403 self
.command_str
= ""
404 self
.test_time
= self
.total_time
= 0
407 f
= os
.path
.join(d
, path
)
408 if os
.path
.isfile(f
):
413 def execute(self
, env
):
417 class SystemExecutor(RuntestBase
):
418 """ Test class for tests executed with spawn_it() """
419 def execute(self
, env
):
420 self
.stderr
, self
.stdout
, s
= spawn_it(self
.command_args
, env
)
422 if s
< 0 or s
> 2 and s
!= 5:
423 sys
.stdout
.write("Unexpected exit status %d\n" % s
)
426 class PopenExecutor(RuntestBase
):
427 """ Test class for tests executed with Popen
429 A bit of a misnomer as the Popen call is now wrapped
430 by calling subprocess.run (behind the covers uses Popen.
431 Very similar to SystemExecutor, but doesn't allow for not catching
434 # For an explanation of the following 'if ... else'
435 # and the 'allow_pipe_files' option, please check out the
436 # definition of spawn_it() above.
437 if args
.allow_pipe_files
:
439 def execute(self
, env
) -> None:
440 # Create temporary files
441 tmp_stdout
= tempfile
.TemporaryFile(mode
='w+t')
442 tmp_stderr
= tempfile
.TemporaryFile(mode
='w+t')
443 # Start subprocess...
452 self
.status
= cp
.returncode
455 # Rewind to start of files
459 self
.stdout
= tmp_stdout
.read()
460 self
.stderr
= tmp_stderr
.read()
462 # Remove temp files by closing them
467 def execute(self
, env
) -> None:
470 stdout
=subprocess
.PIPE
,
471 stderr
=subprocess
.PIPE
,
476 self
.status
, self
.stdout
, self
.stderr
= cp
.returncode
, cp
.stdout
, cp
.stderr
478 class XML(PopenExecutor
):
479 """ Test class for tests that will output in scons xml """
482 f
.write(' <results>\n')
486 f
.write(' <file_name>%s</file_name>\n' % self
.path
)
487 f
.write(' <command_line>%s</command_line>\n' % self
.command_str
)
488 f
.write(' <exit_status>%s</exit_status>\n' % self
.status
)
489 f
.write(' <stdout>%s</stdout>\n' % self
.stdout
)
490 f
.write(' <stderr>%s</stderr>\n' % self
.stderr
)
491 f
.write(' <time>%.1f</time>\n' % self
.test_time
)
492 f
.write(' </test>\n')
495 f
.write(' <time>%.1f</time>\n' % self
.total_time
)
496 f
.write(' </results>\n')
501 Test
= SystemExecutor
503 # --- start processing ---
505 if not args
.baseline
or args
.baseline
== '.':
507 elif args
.baseline
== '-':
509 "'baseline' logic used to checkout from svn. It has been removed. "
510 "If you used this, please let us know on devel mailing list, "
511 "IRC, or discord server\n"
515 baseline
= args
.baseline
516 scons_runtest_dir
= baseline
518 if not args
.external
:
519 scons_script_dir
= os
.path
.join(baseline
, 'scripts')
520 scons_tools_dir
= os
.path
.join(baseline
, 'bin')
521 scons_lib_dir
= baseline
523 scons_script_dir
= ''
528 'SCONS_RUNTEST_DIR': scons_runtest_dir
,
529 'SCONS_TOOLS_DIR': scons_tools_dir
,
530 'SCONS_SCRIPT_DIR': scons_script_dir
,
535 # Let the version of SCons that the -x option pointed to find
537 testenv
['SCONS'] = scons
539 # Because SCons is really aggressive about finding its modules,
540 # it sometimes finds SCons modules elsewhere on the system.
541 # This forces SCons to use the modules that are being tested.
542 testenv
['SCONS_LIB_DIR'] = scons_lib_dir
545 testenv
['SCONS_EXEC'] = '1'
548 testenv
['SCONS_EXTERNAL_TEST'] = '1'
550 # Insert scons path and path for testing framework to PYTHONPATH
551 scriptpath
= os
.path
.dirname(os
.path
.realpath(__file__
))
552 frameworkpath
= os
.path
.join(scriptpath
, 'testing', 'framework')
553 testenv
['PYTHONPATH'] = os
.pathsep
.join((scons_lib_dir
, frameworkpath
))
554 pythonpath
= os
.environ
.get('PYTHONPATH')
556 testenv
['PYTHONPATH'] = testenv
['PYTHONPATH'] + os
.pathsep
+ pythonpath
558 if sys
.platform
== 'win32':
559 # Windows doesn't support "shebang" lines directly (the Python launcher
560 # and Windows Store version do, but you have to get them launched first)
561 # so to directly launch a script we depend on an assoc for .py to work.
562 # Some systems may have none, and in some cases IDE programs take over
563 # the assoc. Detect this so the small number of tests affected can skip.
565 python_assoc
= get_template_command('.py')
568 if not python_assoc
or "py" not in python_assoc
:
569 testenv
['SCONS_NO_DIRECT_SCRIPT'] = '1'
571 os
.environ
.update(testenv
)
573 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
574 if '_JAVA_OPTIONS' in os
.environ
:
575 del os
.environ
['_JAVA_OPTIONS']
578 # ---[ test discovery ]------------------------------------
579 # This section figures out which tests to run.
581 # The initial testlist is made by reading from the testlistfile,
582 # if supplied, or by looking at the test arguments, if supplied,
583 # or by looking for all test files if the "all" argument is supplied.
584 # One of the three is required.
586 # Each test path, whichever of the three sources it comes from,
587 # specifies either a test file or a directory to search for
588 # SCons tests. SCons code layout assumes that any file under the 'SCons'
589 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
590 # script (*.py) under the 'test' subdirectory is an end-to-end test.
591 # We need to track these because they are invoked differently.
592 # find_unit_tests and find_e2e_tests are used for this searching.
594 # Note that there are some tests under 'SCons' that *begin* with
595 # 'test_', but they're packaging and installation tests, not
596 # functional tests, so we don't execute them by default. (They can
597 # still be executed by hand, though).
599 # Test exclusions, if specified, are then applied.
602 def scanlist(testfile
):
603 """ Process a testlist file """
604 data
= StringIO(testfile
.read_text())
605 tests
= [t
.strip() for t
in data
.readlines() if not t
.startswith('#')]
606 # in order to allow scanned lists to work whether they use forward or
607 # backward slashes, first create the object as a PureWindowsPath which
608 # accepts either, then use that to make a Path object to use for
609 # comparisons like "file in scanned_list".
610 if sys
.platform
== 'win32':
611 return [Path(t
) for t
in tests
if t
]
613 return [Path(PureWindowsPath(t
).as_posix()) for t
in tests
if t
]
616 def find_unit_tests(directory
):
617 """ Look for unit tests """
619 for dirpath
, _
, filenames
in os
.walk(directory
):
620 # Skip folders containing a sconstest.skip file
621 if 'sconstest.skip' in filenames
:
623 for fname
in filenames
:
624 if fname
.endswith("Tests.py"):
625 result
.append(Path(dirpath
, fname
))
627 return sorted(result
)
630 def find_e2e_tests(directory
):
631 """ Look for end-to-end tests """
633 for dirpath
, _
, filenames
in os
.walk(directory
):
634 # Skip folders containing a sconstest.skip file
635 if 'sconstest.skip' in filenames
:
638 # Slurp in any tests in exclude lists
640 if ".exclude_tests" in filenames
:
641 excludefile
= Path(dirpath
, ".exclude_tests").resolve()
642 excludes
= scanlist(excludefile
)
644 for fname
in filenames
:
645 if fname
.endswith(".py") and Path(fname
) not in excludes
:
646 result
.append(Path(dirpath
, fname
))
648 return sorted(result
)
652 # if we have a testlist file read that, else hunt for tests.
655 if args
.testlistfile
:
656 tests
= scanlist(args
.testlistfile
)
659 if args
.all
: # -a flag
660 testpaths
= [Path('SCons'), Path('test')]
661 elif args
.testlist
: # paths given on cmdline
662 if sys
.platform
== 'win32':
663 testpaths
= [Path(t
) for t
in args
.testlist
]
665 testpaths
= [Path(PureWindowsPath(t
).as_posix()) for t
in args
.testlist
]
667 for path
in testpaths
:
668 # Clean up path removing leading ./ or .\
670 if name
.startswith('.') and name
[1] in (os
.sep
, os
.altsep
):
671 path
= path
.with_name(tn
[2:])
675 if path
.parts
[0] == "SCons" or path
.parts
[0] == "testing":
676 unittests
.extend(find_unit_tests(path
))
677 elif path
.parts
[0] == 'test':
678 endtests
.extend(find_e2e_tests(path
))
679 # else: TODO: what if user pointed to a dir outside scons tree?
681 if path
.match("*Tests.py"):
682 unittests
.append(path
)
683 elif path
.match("*.py"):
684 endtests
.append(path
)
686 tests
= sorted(unittests
+ endtests
)
690 tests
= [t
for t
in tests
if not t
.match("*Tests.py")]
692 tests
= [t
for t
in tests
if t
.match("*Tests.py")]
693 if args
.excludelistfile
:
694 excludetests
= scanlist(args
.excludelistfile
)
695 tests
= [t
for t
in tests
if t
not in excludetests
]
697 # did we end up with any tests?
699 sys
.stderr
.write(parser
.format_usage() + """
700 error: no tests matching the specification were found.
701 See "Test selection options" in the help for details on
702 how to specify and/or exclude tests.
706 # ---[ test processing ]-----------------------------------
707 tests
= [Test(t
) for t
in tests
]
715 if os
.name
== 'java':
716 args
.python
= os
.path
.join(sys
.prefix
, 'jython')
718 args
.python
= sys
.executable
719 os
.environ
["python_executable"] = args
.python
723 def print_time(fmt
, tm
):
728 def print_time(fmt
, tm
):
731 time_func
= time
.perf_counter
732 total_start_time
= time_func()
733 total_num_tests
= len(tests
)
736 def log_result(t
, io_lock
=None):
737 """ log the result of a test.
739 "log" in this case means writing to stdout. Since we might be
740 called from from any of several different threads (multi-job run),
741 we need to lock access to the log to avoid interleaving. The same
742 would apply if output was a file.
745 t (Test): (completed) testcase instance
746 io_lock (threading.lock): (optional) lock to use
749 # there is no lock in single-job run, which includes
750 # running test/runtest tests from multi-job run, so check.
754 if suppress_output
or catch_output
:
755 sys
.stdout
.write(t
.headline
)
756 if not suppress_output
:
761 print_time("Test execution time: %.1f seconds", t
.test_time
)
766 if args
.quit_on_failure
and t
.status
== 1:
767 print("Exiting due to error")
772 def run_test(t
, io_lock
=None, run_async
=True):
775 Builds the command line to give to execute().
776 Also the best place to record some information that will be
777 used in output, which in some conditions is printed here.
780 t (Test): testcase instance
781 io_lock (threading.Lock): (optional) lock to use
782 run_async (bool): whether to run asynchronously
788 command_args
.extend(['-m', debug
])
790 command_args
.append('-X dev')
791 command_args
.append(t
.path
)
792 if args
.runner
and t
.path
in unittests
:
793 # For example --runner TestUnit.TAPTestRunner
794 command_args
.append('--runner ' + args
.runner
)
795 t
.command_args
= [args
.python
] + command_args
796 t
.command_str
= " ".join(t
.command_args
)
797 if args
.printcommand
:
798 if args
.print_progress
:
799 t
.headline
+= "%d/%d (%.2f%s) %s\n" % (
800 t
.testno
, total_num_tests
,
801 float(t
.testno
) * 100.0 / float(total_num_tests
),
806 t
.headline
+= t
.command_str
+ "\n"
807 if not suppress_output
and not catch_output
:
808 # defer printing the headline until test is done
809 sys
.stdout
.write(t
.headline
)
810 head
, _
= os
.path
.split(t
.abspath
)
813 fixture_dirs
.append(head
)
814 fixture_dirs
.append(os
.path
.join(scriptpath
, 'test', 'fixture'))
816 # Set the list of fixture dirs directly in the environment. Just putting
817 # it in os.environ and spawning the process is racy. Make it reliable by
818 # overriding the environment passed to execute().
819 env
= dict(os
.environ
)
820 env
['FIXTURE_DIRS'] = os
.pathsep
.join(fixture_dirs
)
822 test_start_time
= time_func()
823 if args
.execute_tests
:
826 t
.test_time
= time_func() - test_start_time
827 log_result(t
, io_lock
=io_lock
)
830 class RunTest(threading
.Thread
):
831 """ Test Runner class.
833 One instance will be created for each job thread in multi-job mode
835 def __init__(self
, queue
=None, io_lock
=None, group
=None, target
=None, name
=None):
836 super().__init
__(group
=group
, target
=target
, name
=name
)
838 self
.io_lock
= io_lock
841 for t
in iter(self
.queue
.get
, None):
842 run_test(t
, io_lock
=self
.io_lock
, run_async
=True)
843 self
.queue
.task_done()
846 print("Running tests using %d jobs" % args
.jobs
)
850 testlock
= threading
.Lock()
851 # Start worker threads to consume the queue
852 threads
= [RunTest(queue
=testq
, io_lock
=testlock
) for _
in range(args
.jobs
)]
856 # wait on the queue rather than the individual threads
860 run_test(t
, io_lock
=None, run_async
=False)
862 # --- all tests are complete by the time we get here ---
864 tests
[0].total_time
= time_func() - total_start_time
865 print_time("Total execution time for all tests: %.1f seconds", tests
[0].total_time
)
867 passed
= [t
for t
in tests
if t
.status
== 0]
868 fail
= [t
for t
in tests
if t
.status
== 1]
869 no_result
= [t
for t
in tests
if t
.status
== 2]
871 # print summaries, but only if multiple tests were run
872 if len(tests
) != 1 and args
.execute_tests
:
873 if passed
and args
.print_passed_summary
:
875 sys
.stdout
.write("\nPassed the following test:\n")
877 sys
.stdout
.write("\nPassed the following %d tests:\n" % len(passed
))
878 paths
= [x
.path
for x
in passed
]
879 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
882 sys
.stdout
.write("\nFailed the following test:\n")
884 sys
.stdout
.write("\nFailed the following %d tests:\n" % len(fail
))
885 paths
= [x
.path
for x
in fail
]
886 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
888 if len(no_result
) == 1:
889 sys
.stdout
.write("\nNO RESULT from the following test:\n")
891 sys
.stdout
.write("\nNO RESULT from the following %d tests:\n" % len(no_result
))
892 paths
= [x
.path
for x
in no_result
]
893 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
895 # save the fails to a file
897 with
open(args
.error_log
, "w") as f
:
899 paths
= [x
.path
for x
in fail
]
902 # if there are no fails, file will be cleared
905 if args
.output
== '-':
908 f
= open(args
.xml
, 'w')
910 #f.write("test_result = [\n")
915 if args
.output
!= '-':
919 if isinstance(sys
.stdout
, Tee
):
920 sys
.stdout
.file.close()
921 if isinstance(sys
.stderr
, Tee
):
922 sys
.stderr
.file.close()
926 elif no_result
and args
.dont_ignore_skips
:
927 # if no fails, but skips were found
934 # indent-tabs-mode:nil
936 # vim: set expandtab tabstop=4 shiftwidth=4: