3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ directory
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from
14 This script adds SCons/ and testing/ directories to PYTHONPATH,
15 performs test discovery and processes tests according to options.
26 from abc
import ABC
, abstractmethod
27 from io
import StringIO
28 from pathlib
import Path
, PurePath
, PureWindowsPath
29 from queue
import Queue
30 from typing
import List
, TextIO
, Optional
33 debug
: Optional
[str] = None
34 scons
: Optional
[str] = None
35 catch_output
: bool = False
36 suppress_output
: bool = False
37 script
= PurePath(sys
.argv
[0]).name
38 usagestr
= f
"{script} [OPTIONS] [TEST ...]"
40 Environment Variables:
41 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
42 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
45 # this is currently expected to be global, maybe refactor later?
48 parser
= argparse
.ArgumentParser(
52 formatter_class
=argparse
.RawDescriptionHelpFormatter
,
55 # test selection options:
56 testsel
= parser
.add_argument_group(description
='Test selection options:')
57 testsel
.add_argument(metavar
='TEST', nargs
='*', dest
='testlist',
58 help="Select TEST(s) (tests and/or directories) to run")
59 testlisting
= testsel
.add_mutually_exclusive_group()
60 testlisting
.add_argument('-f', '--file', metavar
='FILE', dest
='testlistfile',
61 help="Select only tests in FILE")
62 testlisting
.add_argument('-a', '--all', action
='store_true',
63 help="Select all tests")
64 testlisting
.add_argument('--retry', action
='store_true',
65 help="Rerun the last failed tests in 'failed_tests.log'")
66 testsel
.add_argument('--exclude-list', metavar
="FILE", dest
='excludelistfile',
67 help="""Exclude tests in FILE from current selection""")
68 testtype
= testsel
.add_mutually_exclusive_group()
69 testtype
.add_argument('--e2e-only', action
='store_true',
70 help="Exclude unit tests from selection")
71 testtype
.add_argument('--unit-only', action
='store_true',
72 help="Exclude end-to-end tests from selection")
74 # miscellaneous options
75 parser
.add_argument('-b', '--baseline', metavar
='BASE',
76 help="Run test scripts against baseline BASE.")
77 parser
.add_argument('-d', '--debug', action
='store_true',
78 help="Run test scripts under the Python debugger.")
79 parser
.add_argument('-D', '--devmode', action
='store_true',
80 help="Run tests in Python's development mode (Py3.7+ only).")
81 parser
.add_argument('-e', '--external', action
='store_true',
82 help="Run the script in external mode (for external Tools)")
84 def posint(arg
: str) -> int:
85 """Special positive-int type for argparse"""
88 raise argparse
.ArgumentTypeError("JOBS value must not be negative")
91 parser
.add_argument('-j', '--jobs', metavar
='JOBS', default
=1, type=posint
,
92 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
93 parser
.add_argument('-l', '--list', action
='store_true', dest
='list_only',
94 help="List available tests and exit.")
95 parser
.add_argument('-n', '--no-exec', action
='store_false',
97 help="No execute, just print command lines.")
98 parser
.add_argument('--nopipefiles', action
='store_false',
99 dest
='allow_pipe_files',
100 help="""Do not use the "file pipe" workaround for subprocess
101 for starting tests. See source code for warnings.""")
102 parser
.add_argument('-P', '--python', metavar
='PYTHON',
103 help="Use the specified Python interpreter.")
104 parser
.add_argument('--quit-on-failure', action
='store_true',
105 help="Quit on any test failure.")
106 parser
.add_argument('--runner', metavar
='CLASS',
107 help="Test runner class for unit tests.")
108 parser
.add_argument('-X', dest
='scons_exec', action
='store_true',
109 help="Test script is executable, don't feed to Python.")
110 parser
.add_argument('-x', '--exec', metavar
="SCRIPT",
111 help="Test using SCRIPT as path to SCons.")
112 parser
.add_argument('--faillog', dest
='error_log', metavar
="FILE",
113 default
='failed_tests.log',
114 help="Log failed tests to FILE (enabled by default, "
115 "default file 'failed_tests.log')")
116 parser
.add_argument('--no-faillog', dest
='error_log',
117 action
='store_const', const
=None,
118 default
='failed_tests.log',
119 help="Do not log failed tests to a file")
121 parser
.add_argument('--no-ignore-skips', dest
='dont_ignore_skips',
124 help="If any tests are skipped, exit status 2")
126 outctl
= parser
.add_argument_group(description
='Output control options:')
127 outctl
.add_argument('-k', '--no-progress', action
='store_false',
128 dest
='print_progress',
129 help="Suppress count and progress percentage messages.")
130 outctl
.add_argument('--passed', action
='store_true',
131 dest
='print_passed_summary',
132 help="Summarize which tests passed.")
133 outctl
.add_argument('-q', '--quiet', action
='store_false',
135 help="Don't print the test being executed.")
136 outctl
.add_argument('-s', '--short-progress', action
='store_true',
137 help="""Short progress, prints only the command line
138 and a progress percentage, no results.""")
139 outctl
.add_argument('-t', '--time', action
='store_true', dest
='print_times',
140 help="Print test execution time.")
141 outctl
.add_argument('--verbose', metavar
='LEVEL', type=int, choices
=range(1, 4),
142 help="""Set verbose level
143 (1=print executed commands,
144 2=print commands and non-zero output,
145 3=print commands and all output).""")
147 # outctl.add_argument('--version', action='version', version=f'{script} 1.0')
149 logctl
= parser
.add_argument_group(description
='Log control options:')
150 logctl
.add_argument('-o', '--output', metavar
='LOG', help="Save console output to LOG.")
154 help="Save results to XML in SCons XML format (use - for stdout).",
157 # process args and handle a few specific cases:
158 args
: argparse
.Namespace
= parser
.parse_args()
160 # we can't do this check with an argparse exclusive group, since those
161 # only work with optional args, and the cmdline tests (args.testlist)
162 # are not optional args,
163 if args
.testlist
and (args
.testlistfile
or args
.all
or args
.retry
):
165 parser
.format_usage()
166 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
171 args
.testlistfile
= 'failed_tests.log'
173 if args
.testlistfile
:
174 # args.testlistfile changes from a string to a pathlib Path object
176 ptest
= Path(args
.testlistfile
)
177 args
.testlistfile
= ptest
.resolve(strict
=True)
178 except FileNotFoundError
:
180 parser
.format_usage()
181 + f
'error: -f/--file testlist file "{args.testlistfile}" not found\n'
185 if args
.excludelistfile
:
186 # args.excludelistfile changes from a string to a pathlib Path object
188 pexcl
= Path(args
.excludelistfile
)
189 args
.excludelistfile
= pexcl
.resolve(strict
=True)
190 except FileNotFoundError
:
192 parser
.format_usage()
193 + f
'error: --exclude-list file "{args.excludelistfile}" not found\n'
199 # on Linux, check available rather than physical CPUs
200 args
.jobs
= len(os
.sched_getaffinity(0))
201 except AttributeError:
203 args
.jobs
= os
.cpu_count()
208 parser
.format_usage()
209 + "Unable to detect CPU count, give -j a non-zero value\n"
213 if args
.jobs
> 1 or args
.output
:
214 # 1. don't let tests write stdout/stderr directly if multi-job,
215 # else outputs will interleave and be hard to read.
216 # 2. If we're going to write a logfile, we also need to catch the output.
219 if not args
.printcommand
:
220 suppress_output
= catch_output
= True
223 os
.environ
['TESTCMD_VERBOSE'] = str(args
.verbose
)
225 if args
.short_progress
:
226 args
.print_progress
= True
227 suppress_output
= catch_output
= True
230 # TODO: add a way to pass a specific debugger
236 # --- setup stdout/stderr ---
238 def __init__(self
, file):
241 def write(self
, arg
):
245 def __getattr__(self
, attr
):
246 return getattr(self
.file, attr
)
248 sys
.stdout
= Unbuffered(sys
.stdout
)
249 sys
.stderr
= Unbuffered(sys
.stderr
)
251 # possible alternative: switch to using print, and:
252 # print = functools.partial(print, flush)
256 def __init__(self
, openfile
, stream
):
260 def write(self
, data
):
261 self
.file.write(data
)
262 self
.stream
.write(data
)
264 def flush(self
, data
):
265 self
.file.flush(data
)
266 self
.stream
.flush(data
)
268 logfile
= open(args
.output
, 'w')
269 # this is not ideal: we monkeypatch stdout/stderr a second time
270 # (already did for Unbuffered), so here we can't easily detect what
271 # state we're in on closedown. Just hope it's okay...
272 sys
.stdout
= Tee(logfile
, sys
.stdout
)
273 sys
.stderr
= Tee(logfile
, sys
.stderr
)
275 # --- define helpers ----
276 if sys
.platform
== 'win32':
277 # thanks to Eryk Sun for this recipe
280 shlwapi
= ctypes
.OleDLL('shlwapi')
281 shlwapi
.AssocQueryStringW
.argtypes
= (
282 ctypes
.c_ulong
, # flags
283 ctypes
.c_ulong
, # str
284 ctypes
.c_wchar_p
, # pszAssoc
285 ctypes
.c_wchar_p
, # pszExtra
286 ctypes
.c_wchar_p
, # pszOut
287 ctypes
.POINTER(ctypes
.c_ulong
), # pcchOut
290 ASSOCF_NOTRUNCATE
= 0x00000020
291 ASSOCF_INIT_IGNOREUNKNOWN
= 0x00000400
293 ASSOCSTR_EXECUTABLE
= 2
294 E_POINTER
= ctypes
.c_long(0x80004003).value
296 def get_template_command(filetype
, verb
=None):
297 """Return the association-related string for *filetype*"""
298 flags
= ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
299 assoc_str
= ASSOCSTR_COMMAND
300 cch
= ctypes
.c_ulong(260)
302 buf
= (ctypes
.c_wchar
* cch
.value
)()
304 shlwapi
.AssocQueryStringW(
305 flags
, assoc_str
, filetype
, verb
, buf
, ctypes
.byref(cch
)
308 if e
.winerror
!= E_POINTER
:
316 # Without any output suppressed, we let the subprocess
317 # write its stuff freely to stdout/stderr.
319 def spawn_it(command_args
, env
):
320 cp
= subprocess
.run(command_args
, shell
=False, env
=env
)
321 return cp
.stdout
, cp
.stderr
, cp
.returncode
324 # Else, we catch the output of both pipes...
325 if args
.allow_pipe_files
:
326 # The subprocess.Popen() suffers from a well-known
327 # problem. Data for stdout/stderr is read into a
328 # memory buffer of fixed size, 65K which is not very much.
329 # When it fills up, it simply stops letting the child process
330 # write to it. The child will then sit and patiently wait to
331 # be able to write the rest of its output. Hang!
332 # In order to work around this, we follow a suggestion
333 # by Anders Pearson in
334 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
335 # and pass temp file objects to Popen() instead of the ubiquitous
338 def spawn_it(command_args
, env
):
339 # Create temporary files
340 tmp_stdout
= tempfile
.TemporaryFile(mode
='w+t')
341 tmp_stderr
= tempfile
.TemporaryFile(mode
='w+t')
342 # Start subprocess...
352 # Rewind to start of files
356 spawned_stdout
= tmp_stdout
.read()
357 spawned_stderr
= tmp_stderr
.read()
359 # Remove temp files by closing them
364 return spawned_stderr
, spawned_stdout
, cp
.returncode
367 # We get here only if the user gave the '--nopipefiles'
368 # option, meaning the "temp file" approach for
369 # subprocess.communicate() above shouldn't be used.
370 # He hopefully knows what he's doing, but again we have a
371 # potential deadlock situation in the following code:
372 # If the subprocess writes a lot of data to its stderr,
373 # the pipe will fill up (nobody's reading it yet) and the
374 # subprocess will wait for someone to read it.
375 # But the parent process is trying to read from stdin
376 # (but the subprocess isn't writing anything there).
378 # Be dragons here! Better don't use this!
380 def spawn_it(command_args
, env
):
383 stdout
=subprocess
.PIPE
,
384 stderr
=subprocess
.PIPE
,
388 return cp
.stdout
, cp
.stderr
, cp
.returncode
391 class RuntestBase(ABC
):
392 """ Base class for tests """
393 _ids
= itertools
.count(1) # to geenerate test # automatically
395 def __init__(self
, path
, spe
=None):
396 self
.path
= str(path
)
397 self
.testno
= next(self
._ids
)
398 self
.stdout
= self
.stderr
= self
.status
= None
399 self
.abspath
= path
.absolute()
400 self
.command_args
= []
401 self
.command_str
= ""
402 self
.test_time
= self
.total_time
= 0
405 f
= os
.path
.join(d
, path
)
406 if os
.path
.isfile(f
):
411 def execute(self
, env
):
415 class SystemExecutor(RuntestBase
):
416 """ Test class for tests executed with spawn_it() """
417 def execute(self
, env
):
418 self
.stderr
, self
.stdout
, s
= spawn_it(self
.command_args
, env
)
420 if s
< 0 or s
> 2 and s
!= 5:
421 sys
.stdout
.write("Unexpected exit status %d\n" % s
)
424 class PopenExecutor(RuntestBase
):
425 """ Test class for tests executed with Popen
427 A bit of a misnomer as the Popen call is now wrapped
428 by calling subprocess.run (behind the covers uses Popen.
429 Very similar to SystemExecutor, but doesn't allow for not catching
432 # For an explanation of the following 'if ... else'
433 # and the 'allow_pipe_files' option, please check out the
434 # definition of spawn_it() above.
435 if args
.allow_pipe_files
:
437 def execute(self
, env
) -> None:
438 # Create temporary files
439 tmp_stdout
= tempfile
.TemporaryFile(mode
='w+t')
440 tmp_stderr
= tempfile
.TemporaryFile(mode
='w+t')
441 # Start subprocess...
450 self
.status
= cp
.returncode
453 # Rewind to start of files
457 self
.stdout
= tmp_stdout
.read()
458 self
.stderr
= tmp_stderr
.read()
460 # Remove temp files by closing them
465 def execute(self
, env
) -> None:
468 stdout
=subprocess
.PIPE
,
469 stderr
=subprocess
.PIPE
,
474 self
.status
, self
.stdout
, self
.stderr
= cp
.returncode
, cp
.stdout
, cp
.stderr
476 class XML(PopenExecutor
):
477 """ Test class for tests that will output in scons xml """
480 f
.write(' <results>\n')
484 f
.write(' <file_name>%s</file_name>\n' % self
.path
)
485 f
.write(' <command_line>%s</command_line>\n' % self
.command_str
)
486 f
.write(' <exit_status>%s</exit_status>\n' % self
.status
)
487 f
.write(' <stdout>%s</stdout>\n' % self
.stdout
)
488 f
.write(' <stderr>%s</stderr>\n' % self
.stderr
)
489 f
.write(' <time>%.1f</time>\n' % self
.test_time
)
490 f
.write(' </test>\n')
493 f
.write(' <time>%.1f</time>\n' % self
.total_time
)
494 f
.write(' </results>\n')
499 Test
= SystemExecutor
501 # --- start processing ---
503 if not args
.baseline
or args
.baseline
== '.':
505 elif args
.baseline
== '-':
507 "'baseline' logic used to checkout from svn. It has been removed. "
508 "If you used this, please let us know on devel mailing list, "
509 "IRC, or discord server\n"
513 baseline
= args
.baseline
514 scons_runtest_dir
= baseline
516 if not args
.external
:
517 scons_script_dir
= os
.path
.join(baseline
, 'scripts')
518 scons_tools_dir
= os
.path
.join(baseline
, 'bin')
519 scons_lib_dir
= baseline
521 scons_script_dir
= ''
526 'SCONS_RUNTEST_DIR': scons_runtest_dir
,
527 'SCONS_TOOLS_DIR': scons_tools_dir
,
528 'SCONS_SCRIPT_DIR': scons_script_dir
,
533 # Let the version of SCons that the -x option pointed to find
535 testenv
['SCONS'] = scons
537 # Because SCons is really aggressive about finding its modules,
538 # it sometimes finds SCons modules elsewhere on the system.
539 # This forces SCons to use the modules that are being tested.
540 testenv
['SCONS_LIB_DIR'] = scons_lib_dir
543 testenv
['SCONS_EXEC'] = '1'
546 testenv
['SCONS_EXTERNAL_TEST'] = '1'
548 # Insert scons path and path for testing framework to PYTHONPATH
549 scriptpath
= os
.path
.dirname(os
.path
.realpath(__file__
))
550 frameworkpath
= os
.path
.join(scriptpath
, 'testing', 'framework')
551 testenv
['PYTHONPATH'] = os
.pathsep
.join((scons_lib_dir
, frameworkpath
))
552 pythonpath
= os
.environ
.get('PYTHONPATH')
554 testenv
['PYTHONPATH'] = testenv
['PYTHONPATH'] + os
.pathsep
+ pythonpath
556 if sys
.platform
== 'win32':
557 # Windows doesn't support "shebang" lines directly (the Python launcher
558 # and Windows Store version do, but you have to get them launched first)
559 # so to directly launch a script we depend on an assoc for .py to work.
560 # Some systems may have none, and in some cases IDE programs take over
561 # the assoc. Detect this so the small number of tests affected can skip.
563 python_assoc
= get_template_command('.py')
566 if not python_assoc
or "py" not in python_assoc
:
567 testenv
['SCONS_NO_DIRECT_SCRIPT'] = '1'
569 os
.environ
.update(testenv
)
571 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
572 if '_JAVA_OPTIONS' in os
.environ
:
573 del os
.environ
['_JAVA_OPTIONS']
576 # ---[ test discovery ]------------------------------------
577 # This section figures out which tests to run.
579 # The initial testlist is made by reading from the testlistfile,
580 # if supplied, or by looking at the test arguments, if supplied,
581 # or by looking for all test files if the "all" argument is supplied.
582 # One of the three is required.
584 # Each test path, whichever of the three sources it comes from,
585 # specifies either a test file or a directory to search for
586 # SCons tests. SCons code layout assumes that any file under the 'SCons'
587 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
588 # script (*.py) under the 'test' subdirectory is an end-to-end test.
589 # We need to track these because they are invoked differently.
590 # find_unit_tests and find_e2e_tests are used for this searching.
592 # Note that there are some tests under 'SCons' that *begin* with
593 # 'test_', but they're packaging and installation tests, not
594 # functional tests, so we don't execute them by default. (They can
595 # still be executed by hand, though).
597 # Test exclusions, if specified, are then applied.
600 def scanlist(testfile
):
601 """ Process a testlist file """
602 data
= StringIO(testfile
.read_text())
603 tests
= [t
.strip() for t
in data
.readlines() if not t
.startswith('#')]
604 # in order to allow scanned lists to work whether they use forward or
605 # backward slashes, first create the object as a PureWindowsPath which
606 # accepts either, then use that to make a Path object to use for
607 # comparisons like "file in scanned_list".
608 if sys
.platform
== 'win32':
609 return [Path(t
) for t
in tests
if t
]
611 return [Path(PureWindowsPath(t
).as_posix()) for t
in tests
if t
]
614 def find_unit_tests(directory
):
615 """ Look for unit tests """
617 for dirpath
, _
, filenames
in os
.walk(directory
):
618 # Skip folders containing a sconstest.skip file
619 if 'sconstest.skip' in filenames
:
621 for fname
in filenames
:
622 if fname
.endswith("Tests.py"):
623 result
.append(Path(dirpath
, fname
))
625 return sorted(result
)
628 def find_e2e_tests(directory
):
629 """ Look for end-to-end tests """
631 for dirpath
, _
, filenames
in os
.walk(directory
):
632 # Skip folders containing a sconstest.skip file
633 if 'sconstest.skip' in filenames
:
636 # Slurp in any tests in exclude lists
638 if ".exclude_tests" in filenames
:
639 excludefile
= Path(dirpath
, ".exclude_tests").resolve()
640 excludes
= scanlist(excludefile
)
642 for fname
in filenames
:
643 if fname
.endswith(".py") and Path(fname
) not in excludes
:
644 result
.append(Path(dirpath
, fname
))
646 return sorted(result
)
650 # if we have a testlist file read that, else hunt for tests.
653 if args
.testlistfile
:
654 tests
= scanlist(args
.testlistfile
)
657 if args
.all
: # -a flag
658 testpaths
= [Path('SCons'), Path('test')]
659 elif args
.testlist
: # paths given on cmdline
660 if sys
.platform
== 'win32':
661 testpaths
= [Path(t
) for t
in args
.testlist
]
663 testpaths
= [Path(PureWindowsPath(t
).as_posix()) for t
in args
.testlist
]
665 for path
in testpaths
:
666 # Clean up path removing leading ./ or .\
668 if name
.startswith('.') and name
[1] in (os
.sep
, os
.altsep
):
669 path
= path
.with_name(tn
[2:])
673 if path
.parts
[0] == "SCons" or path
.parts
[0] == "testing":
674 unittests
.extend(find_unit_tests(path
))
675 elif path
.parts
[0] == 'test':
676 endtests
.extend(find_e2e_tests(path
))
677 # else: TODO: what if user pointed to a dir outside scons tree?
679 if path
.match("*Tests.py"):
680 unittests
.append(path
)
681 elif path
.match("*.py"):
682 endtests
.append(path
)
684 tests
= sorted(unittests
+ endtests
)
688 tests
= [t
for t
in tests
if not t
.match("*Tests.py")]
690 tests
= [t
for t
in tests
if t
.match("*Tests.py")]
691 if args
.excludelistfile
:
692 excludetests
= scanlist(args
.excludelistfile
)
693 tests
= [t
for t
in tests
if t
not in excludetests
]
695 # did we end up with any tests?
697 sys
.stderr
.write(parser
.format_usage() + """
698 error: no tests matching the specification were found.
699 See "Test selection options" in the help for details on
700 how to specify and/or exclude tests.
704 # ---[ test processing ]-----------------------------------
705 tests
= [Test(t
) for t
in tests
]
713 if os
.name
== 'java':
714 args
.python
= os
.path
.join(sys
.prefix
, 'jython')
716 args
.python
= sys
.executable
717 os
.environ
["python_executable"] = args
.python
721 def print_time(fmt
, tm
):
726 def print_time(fmt
, tm
):
729 time_func
= time
.perf_counter
730 total_start_time
= time_func()
731 total_num_tests
= len(tests
)
734 def log_result(t
, io_lock
=None):
735 """ log the result of a test.
737 "log" in this case means writing to stdout. Since we might be
738 called from from any of several different threads (multi-job run),
739 we need to lock access to the log to avoid interleaving. The same
740 would apply if output was a file.
743 t (Test): (completed) testcase instance
744 io_lock (threading.lock): (optional) lock to use
747 # there is no lock in single-job run, which includes
748 # running test/runtest tests from multi-job run, so check.
752 if suppress_output
or catch_output
:
753 sys
.stdout
.write(t
.headline
)
754 if not suppress_output
:
759 print_time("Test execution time: %.1f seconds", t
.test_time
)
764 if args
.quit_on_failure
and t
.status
== 1:
765 print("Exiting due to error")
770 def run_test(t
, io_lock
=None, run_async
=True):
773 Builds the command line to give to execute().
774 Also the best place to record some information that will be
775 used in output, which in some conditions is printed here.
778 t (Test): testcase instance
779 io_lock (threading.Lock): (optional) lock to use
780 run_async (bool): whether to run asynchronously
786 command_args
.extend(['-m', debug
])
787 if args
.devmode
and sys
.version_info
>= (3, 7, 0):
788 command_args
.append('-X dev')
789 command_args
.append(t
.path
)
790 if args
.runner
and t
.path
in unittests
:
791 # For example --runner TestUnit.TAPTestRunner
792 command_args
.append('--runner ' + args
.runner
)
793 t
.command_args
= [args
.python
] + command_args
794 t
.command_str
= " ".join(t
.command_args
)
795 if args
.printcommand
:
796 if args
.print_progress
:
797 t
.headline
+= "%d/%d (%.2f%s) %s\n" % (
798 t
.testno
, total_num_tests
,
799 float(t
.testno
) * 100.0 / float(total_num_tests
),
804 t
.headline
+= t
.command_str
+ "\n"
805 if not suppress_output
and not catch_output
:
806 # defer printing the headline until test is done
807 sys
.stdout
.write(t
.headline
)
808 head
, _
= os
.path
.split(t
.abspath
)
811 fixture_dirs
.append(head
)
812 fixture_dirs
.append(os
.path
.join(scriptpath
, 'test', 'fixture'))
814 # Set the list of fixture dirs directly in the environment. Just putting
815 # it in os.environ and spawning the process is racy. Make it reliable by
816 # overriding the environment passed to execute().
817 env
= dict(os
.environ
)
818 env
['FIXTURE_DIRS'] = os
.pathsep
.join(fixture_dirs
)
820 test_start_time
= time_func()
821 if args
.execute_tests
:
824 t
.test_time
= time_func() - test_start_time
825 log_result(t
, io_lock
=io_lock
)
828 class RunTest(threading
.Thread
):
829 """ Test Runner class.
831 One instance will be created for each job thread in multi-job mode
833 def __init__(self
, queue
=None, io_lock
=None, group
=None, target
=None, name
=None):
834 super().__init
__(group
=group
, target
=target
, name
=name
)
836 self
.io_lock
= io_lock
839 for t
in iter(self
.queue
.get
, None):
840 run_test(t
, io_lock
=self
.io_lock
, run_async
=True)
841 self
.queue
.task_done()
844 print("Running tests using %d jobs" % args
.jobs
)
848 testlock
= threading
.Lock()
849 # Start worker threads to consume the queue
850 threads
= [RunTest(queue
=testq
, io_lock
=testlock
) for _
in range(args
.jobs
)]
854 # wait on the queue rather than the individual threads
858 run_test(t
, io_lock
=None, run_async
=False)
860 # --- all tests are complete by the time we get here ---
862 tests
[0].total_time
= time_func() - total_start_time
863 print_time("Total execution time for all tests: %.1f seconds", tests
[0].total_time
)
865 passed
= [t
for t
in tests
if t
.status
== 0]
866 fail
= [t
for t
in tests
if t
.status
== 1]
867 no_result
= [t
for t
in tests
if t
.status
== 2]
869 # print summaries, but only if multiple tests were run
870 if len(tests
) != 1 and args
.execute_tests
:
871 if passed
and args
.print_passed_summary
:
873 sys
.stdout
.write("\nPassed the following test:\n")
875 sys
.stdout
.write("\nPassed the following %d tests:\n" % len(passed
))
876 paths
= [x
.path
for x
in passed
]
877 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
880 sys
.stdout
.write("\nFailed the following test:\n")
882 sys
.stdout
.write("\nFailed the following %d tests:\n" % len(fail
))
883 paths
= [x
.path
for x
in fail
]
884 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
886 if len(no_result
) == 1:
887 sys
.stdout
.write("\nNO RESULT from the following test:\n")
889 sys
.stdout
.write("\nNO RESULT from the following %d tests:\n" % len(no_result
))
890 paths
= [x
.path
for x
in no_result
]
891 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
893 # save the fails to a file
895 with
open(args
.error_log
, "w") as f
:
897 paths
= [x
.path
for x
in fail
]
900 # if there are no fails, file will be cleared
903 if args
.output
== '-':
906 f
= open(args
.xml
, 'w')
908 #f.write("test_result = [\n")
913 if args
.output
!= '-':
917 if isinstance(sys
.stdout
, Tee
):
918 sys
.stdout
.file.close()
919 if isinstance(sys
.stderr
, Tee
):
920 sys
.stderr
.file.close()
924 elif no_result
and args
.dont_ignore_skips
:
925 # if no fails, but skips were found
932 # indent-tabs-mode:nil
934 # vim: set expandtab tabstop=4 shiftwidth=4: