3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ dir
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from testing/
13 This script adds SCons/ and testing/ directories to PYTHONPATH,
14 performs test discovery and processes tests according to options.
17 # TODO: normalize requested and testlist/exclude paths for easier comparison.
18 # e.g.: "runtest foo/bar" on windows will produce paths like foo/bar\test.py
19 # this is hard to match with excludelists, and makes those both os.sep-specific
20 # and command-line-typing specific.
31 from abc
import ABC
, abstractmethod
32 from pathlib
import Path
33 from queue
import Queue
40 suppress_output
= False
42 script
= os
.path
.basename(sys
.argv
[0])
44 %(script)s [OPTIONS] [TEST ...]
48 Environment Variables:
49 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
50 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
53 parser
= argparse
.ArgumentParser(
54 usage
=usagestr
, epilog
=epilogstr
, allow_abbrev
=False,
55 formatter_class
=argparse
.RawDescriptionHelpFormatter
58 # test selection options:
59 testsel
= parser
.add_argument_group(description
='Test selection options:')
60 testsel
.add_argument(metavar
='TEST', nargs
='*', dest
='testlist',
61 help="Select TEST(s) (tests and/or directories) to run")
62 testlisting
= testsel
.add_mutually_exclusive_group()
63 testlisting
.add_argument('-f', '--file', metavar
='FILE', dest
='testlistfile',
64 help="Select only tests in FILE")
65 testlisting
.add_argument('-a', '--all', action
='store_true',
66 help="Select all tests")
67 testlisting
.add_argument('--retry', action
='store_true',
68 help="Rerun the last failed tests in 'failed_tests.log'")
69 testsel
.add_argument('--exclude-list', metavar
="FILE", dest
='excludelistfile',
70 help="""Exclude tests in FILE from current selection""")
71 testtype
= testsel
.add_mutually_exclusive_group()
72 testtype
.add_argument('--e2e-only', action
='store_true',
73 help="Exclude unit tests from selection")
74 testtype
.add_argument('--unit-only', action
='store_true',
75 help="Exclude end-to-end tests from selection")
77 # miscellaneous options
78 parser
.add_argument('-b', '--baseline', metavar
='BASE',
79 help="Run test scripts against baseline BASE.")
80 parser
.add_argument('-d', '--debug', action
='store_true',
81 help="Run test scripts under the Python debugger.")
82 parser
.add_argument('-D', '--devmode', action
='store_true',
83 help="Run tests in Python's development mode (Py3.7+ only).")
84 parser
.add_argument('-e', '--external', action
='store_true',
85 help="Run the script in external mode (for external Tools)")
86 parser
.add_argument('-j', '--jobs', metavar
='JOBS', default
=1, type=int,
87 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
88 parser
.add_argument('-l', '--list', action
='store_true', dest
='list_only',
89 help="List available tests and exit.")
90 parser
.add_argument('-n', '--no-exec', action
='store_false',
92 help="No execute, just print command lines.")
93 parser
.add_argument('--nopipefiles', action
='store_false',
94 dest
='allow_pipe_files',
95 help="""Do not use the "file pipe" workaround for subprocess
96 for starting tests. See source code for warnings.""")
97 parser
.add_argument('-P', '--python', metavar
='PYTHON',
98 help="Use the specified Python interpreter.")
99 parser
.add_argument('--quit-on-failure', action
='store_true',
100 help="Quit on any test failure.")
101 parser
.add_argument('--runner', metavar
='CLASS',
102 help="Test runner class for unit tests.")
103 parser
.add_argument('-X', dest
='scons_exec', action
='store_true',
104 help="Test script is executable, don't feed to Python.")
105 parser
.add_argument('-x', '--exec', metavar
="SCRIPT",
106 help="Test using SCRIPT as path to SCons.")
107 parser
.add_argument('--faillog', dest
='error_log', metavar
="FILE",
108 default
='failed_tests.log',
109 help="Log failed tests to FILE (enabled by default, "
110 "default file 'failed_tests.log')")
111 parser
.add_argument('--no-faillog', dest
='error_log',
112 action
='store_const', const
=None,
113 default
='failed_tests.log',
114 help="Do not log failed tests to a file")
116 parser
.add_argument('--no-ignore-skips', dest
='dont_ignore_skips',
119 help="If any tests are skipped, exit status 2")
121 outctl
= parser
.add_argument_group(description
='Output control options:')
122 outctl
.add_argument('-k', '--no-progress', action
='store_false',
123 dest
='print_progress',
124 help="Suppress count and progress percentage messages.")
125 outctl
.add_argument('--passed', action
='store_true',
126 dest
='print_passed_summary',
127 help="Summarize which tests passed.")
128 outctl
.add_argument('-q', '--quiet', action
='store_false',
130 help="Don't print the test being executed.")
131 outctl
.add_argument('-s', '--short-progress', action
='store_true',
132 help="""Short progress, prints only the command line
133 and a progress percentage.""")
134 outctl
.add_argument('-t', '--time', action
='store_true', dest
='print_times',
135 help="Print test execution time.")
136 outctl
.add_argument('--verbose', metavar
='LEVEL', type=int, choices
=range(1, 4),
137 help="""Set verbose level
138 (1=print executed commands,
139 2=print commands and non-zero output,
140 3=print commands and all output).""")
142 # outctl.add_argument('--version', action='version', version='%s 1.0' % script)
144 logctl
= parser
.add_argument_group(description
='Log control options:')
145 logctl
.add_argument('-o', '--output', metavar
='LOG', help="Save console output to LOG.")
146 logctl
.add_argument('--xml', metavar
='XML', help="Save results to XML in SCons XML format.")
148 # process args and handle a few specific cases:
149 args
= parser
.parse_args()
151 # we can't do this check with an argparse exclusive group, since those
152 # only work with optional args, and the cmdline tests (args.testlist)
153 # are not optional args,
154 if args
.testlist
and (args
.testlistfile
or args
.all
or args
.retry
):
156 parser
.format_usage()
157 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
162 args
.testlistfile
= 'failed_tests.log'
164 if args
.testlistfile
:
165 # args.testlistfile changes from a string to a pathlib Path object
167 p
= Path(args
.testlistfile
)
168 # TODO simplify when Py3.5 dropped
169 if sys
.version_info
.major
== 3 and sys
.version_info
.minor
< 6:
170 args
.testlistfile
= p
.resolve()
172 args
.testlistfile
= p
.resolve(strict
=True)
173 except FileNotFoundError
:
175 parser
.format_usage()
176 + 'error: -f/--file testlist file "%s" not found\n' % p
180 if args
.excludelistfile
:
181 # args.excludelistfile changes from a string to a pathlib Path object
183 p
= Path(args
.excludelistfile
)
184 # TODO simplify when Py3.5 dropped
185 if sys
.version_info
.major
== 3 and sys
.version_info
.minor
< 6:
186 args
.excludelistfile
= p
.resolve()
188 args
.excludelistfile
= p
.resolve(strict
=True)
189 except FileNotFoundError
:
191 parser
.format_usage()
192 + 'error: --exclude-list file "%s" not found\n' % p
198 # on Linux, check available rather then physical CPUs
199 args
.jobs
= len(os
.sched_getaffinity(0))
200 except AttributeError:
202 args
.jobs
= os
.cpu_count()
207 parser
.format_usage()
208 + "Unable to detect CPU count, give -j a non-zero value\n"
212 if args
.jobs
> 1 or args
.output
:
213 # 1. don't let tests write stdout/stderr directly if multi-job,
214 # else outputs will interleave and be hard to read.
215 # 2. If we're going to write a logfile, we also need to catch the output.
218 if not args
.printcommand
:
219 suppress_output
= catch_output
= True
222 os
.environ
['TESTCMD_VERBOSE'] = str(args
.verbose
)
224 if args
.short_progress
:
225 args
.print_progress
= True
226 suppress_output
= catch_output
= True
229 # TODO: add a way to pass a specific debugger
235 # --- setup stdout/stderr ---
237 def __init__(self
, file):
240 def write(self
, arg
):
244 def __getattr__(self
, attr
):
245 return getattr(self
.file, attr
)
247 sys
.stdout
= Unbuffered(sys
.stdout
)
248 sys
.stderr
= Unbuffered(sys
.stderr
)
250 # possible alternative: switch to using print, and:
251 # print = functools.partial(print, flush)
255 def __init__(self
, openfile
, stream
):
259 def write(self
, data
):
260 self
.file.write(data
)
261 self
.stream
.write(data
)
263 def flush(self
, data
):
264 self
.file.flush(data
)
265 self
.stream
.flush(data
)
267 logfile
= open(args
.output
, 'w')
268 # this is not ideal: we monkeypatch stdout/stderr a second time
269 # (already did for Unbuffered), so here we can't easily detect what
270 # state we're in on closedown. Just hope it's okay...
271 sys
.stdout
= Tee(logfile
, sys
.stdout
)
272 sys
.stderr
= Tee(logfile
, sys
.stderr
)
274 # --- define helpers ----
275 if sys
.platform
== 'win32':
276 # thanks to Eryk Sun for this recipe
279 shlwapi
= ctypes
.OleDLL('shlwapi')
280 shlwapi
.AssocQueryStringW
.argtypes
= (
281 ctypes
.c_ulong
, # flags
282 ctypes
.c_ulong
, # str
283 ctypes
.c_wchar_p
, # pszAssoc
284 ctypes
.c_wchar_p
, # pszExtra
285 ctypes
.c_wchar_p
, # pszOut
286 ctypes
.POINTER(ctypes
.c_ulong
), # pcchOut
289 ASSOCF_NOTRUNCATE
= 0x00000020
290 ASSOCF_INIT_IGNOREUNKNOWN
= 0x00000400
292 ASSOCSTR_EXECUTABLE
= 2
293 E_POINTER
= ctypes
.c_long(0x80004003).value
295 def get_template_command(filetype
, verb
=None):
296 flags
= ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
297 assoc_str
= ASSOCSTR_COMMAND
298 cch
= ctypes
.c_ulong(260)
300 buf
= (ctypes
.c_wchar
* cch
.value
)()
302 shlwapi
.AssocQueryStringW(
303 flags
, assoc_str
, filetype
, verb
, buf
, ctypes
.byref(cch
)
306 if e
.winerror
!= E_POINTER
:
314 # Without any output suppressed, we let the subprocess
315 # write its stuff freely to stdout/stderr.
317 def spawn_it(command_args
, env
):
318 cp
= subprocess
.run(command_args
, shell
=False, env
=env
)
319 return cp
.stdout
, cp
.stderr
, cp
.returncode
322 # Else, we catch the output of both pipes...
323 if args
.allow_pipe_files
:
324 # The subprocess.Popen() suffers from a well-known
325 # problem. Data for stdout/stderr is read into a
326 # memory buffer of fixed size, 65K which is not very much.
327 # When it fills up, it simply stops letting the child process
328 # write to it. The child will then sit and patiently wait to
329 # be able to write the rest of its output. Hang!
330 # In order to work around this, we follow a suggestion
331 # by Anders Pearson in
332 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
333 # and pass temp file objects to Popen() instead of the ubiquitous
336 def spawn_it(command_args
, env
):
337 # Create temporary files
338 tmp_stdout
= tempfile
.TemporaryFile(mode
='w+t')
339 tmp_stderr
= tempfile
.TemporaryFile(mode
='w+t')
340 # Start subprocess...
350 # Rewind to start of files
354 spawned_stdout
= tmp_stdout
.read()
355 spawned_stderr
= tmp_stderr
.read()
357 # Remove temp files by closing them
362 return spawned_stderr
, spawned_stdout
, cp
.returncode
365 # We get here only if the user gave the '--nopipefiles'
366 # option, meaning the "temp file" approach for
367 # subprocess.communicate() above shouldn't be used.
368 # He hopefully knows what he's doing, but again we have a
369 # potential deadlock situation in the following code:
370 # If the subprocess writes a lot of data to its stderr,
371 # the pipe will fill up (nobody's reading it yet) and the
372 # subprocess will wait for someone to read it.
373 # But the parent process is trying to read from stdin
374 # (but the subprocess isn't writing anything there).
376 # Be dragons here! Better don't use this!
378 def spawn_it(command_args
, env
):
381 stdout
=subprocess
.PIPE
,
382 stderr
=subprocess
.PIPE
,
386 return cp
.stdout
, cp
.stderr
, cp
.returncode
389 class RuntestBase(ABC
):
390 """ Base class for tests """
391 def __init__(self
, path
, num
, spe
=None):
394 self
.stdout
= self
.stderr
= self
.status
= None
395 self
.abspath
= os
.path
.abspath(path
)
396 self
.command_args
= []
397 self
.command_str
= ""
398 self
.test_time
= self
.total_time
= 0
401 f
= os
.path
.join(d
, path
)
402 if os
.path
.isfile(f
):
411 class SystemExecutor(RuntestBase
):
412 """ Test class for tests executed with spawn_it() """
413 def execute(self
, env
):
414 self
.stderr
, self
.stdout
, s
= spawn_it(self
.command_args
, env
)
417 sys
.stdout
.write("Unexpected exit status %d\n" % s
)
420 class PopenExecutor(RuntestBase
):
421 """ Test class for tests executed with Popen
423 A bit of a misnomer as the Popen call is now wrapped
424 by calling subprocess.run (behind the covers uses Popen.
425 Very similar to SystemExecutor, but doesn't allow for not catching
428 # For an explanation of the following 'if ... else'
429 # and the 'allow_pipe_files' option, please check out the
430 # definition of spawn_it() above.
431 if args
.allow_pipe_files
:
433 def execute(self
, env
):
434 # Create temporary files
435 tmp_stdout
= tempfile
.TemporaryFile(mode
='w+t')
436 tmp_stderr
= tempfile
.TemporaryFile(mode
='w+t')
437 # Start subprocess...
445 self
.status
= cp
.returncode
448 # Rewind to start of files
452 self
.stdout
= tmp_stdout
.read()
453 self
.stderr
= tmp_stderr
.read()
455 # Remove temp files by closing them
460 def execute(self
, env
):
463 stdout
=subprocess
.PIPE
,
464 stderr
=subprocess
.PIPE
,
468 self
.status
, self
.stdout
, self
.stderr
= cp
.returncode
, cp
.stdout
, cp
.stderr
470 class XML(PopenExecutor
):
471 """ Test class for tests that will output in scons xml """
474 f
.write(' <results>\n')
478 f
.write(' <file_name>%s</file_name>\n' % self
.path
)
479 f
.write(' <command_line>%s</command_line>\n' % self
.command_str
)
480 f
.write(' <exit_status>%s</exit_status>\n' % self
.status
)
481 f
.write(' <stdout>%s</stdout>\n' % self
.stdout
)
482 f
.write(' <stderr>%s</stderr>\n' % self
.stderr
)
483 f
.write(' <time>%.1f</time>\n' % self
.test_time
)
484 f
.write(' </test>\n')
487 f
.write(' <time>%.1f</time>\n' % self
.total_time
)
488 f
.write(' </results>\n')
493 Test
= SystemExecutor
495 # --- start processing ---
497 if not args
.baseline
or args
.baseline
== '.':
499 elif args
.baseline
== '-':
500 print("This logic used to checkout from svn. It's been removed. If you used this, please let us know on devel mailing list, IRC, or discord server")
503 baseline
= args
.baseline
504 scons_runtest_dir
= baseline
506 if not args
.external
:
507 scons_script_dir
= os
.path
.join(baseline
, 'scripts')
508 scons_tools_dir
= os
.path
.join(baseline
, 'bin')
509 scons_lib_dir
= baseline
511 scons_script_dir
= ''
516 'SCONS_RUNTEST_DIR': scons_runtest_dir
,
517 'SCONS_TOOLS_DIR': scons_tools_dir
,
518 'SCONS_SCRIPT_DIR': scons_script_dir
,
523 # Let the version of SCons that the -x option pointed to find
525 testenv
['SCONS'] = scons
527 # Because SCons is really aggressive about finding its modules,
528 # it sometimes finds SCons modules elsewhere on the system.
529 # This forces SCons to use the modules that are being tested.
530 testenv
['SCONS_LIB_DIR'] = scons_lib_dir
533 testenv
['SCONS_EXEC'] = '1'
536 testenv
['SCONS_EXTERNAL_TEST'] = '1'
538 # Insert scons path and path for testing framework to PYTHONPATH
539 scriptpath
= os
.path
.dirname(os
.path
.realpath(__file__
))
540 frameworkpath
= os
.path
.join(scriptpath
, 'testing', 'framework')
541 testenv
['PYTHONPATH'] = os
.pathsep
.join((scons_lib_dir
, frameworkpath
))
542 pythonpath
= os
.environ
.get('PYTHONPATH')
544 testenv
['PYTHONPATH'] = testenv
['PYTHONPATH'] + os
.pathsep
+ pythonpath
546 if sys
.platform
== 'win32':
547 # Windows doesn't support "shebang" lines directly (the Python launcher
548 # and Windows Store version do, but you have to get them launched first)
549 # so to directly launch a script we depend on an assoc for .py to work.
550 # Some systems may have none, and in some cases IDE programs take over
551 # the assoc. Detect this so the small number of tests affected can skip.
553 python_assoc
= get_template_command('.py')
556 if not python_assoc
or "py" not in python_assoc
:
557 testenv
['SCONS_NO_DIRECT_SCRIPT'] = '1'
559 os
.environ
.update(testenv
)
561 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
562 if '_JAVA_OPTIONS' in os
.environ
:
563 del os
.environ
['_JAVA_OPTIONS']
566 # ---[ test discovery ]------------------------------------
567 # This section figures which tests to run.
569 # The initial testlist is made by reading from the testlistfile,
570 # if supplied, or by looking at the test arguments, if supplied,
571 # or by looking for all test files if the "all" argument is supplied.
572 # One of the three is required.
574 # Each test path, whichever of the three sources it comes from,
575 # specifies either a test file or a directory to search for
576 # SCons tests. SCons code layout assumes that any file under the 'SCons'
577 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
578 # script (*.py) under the 'test' subdirectory is an end-to-end test.
579 # We need to track these because they are invoked differently.
580 # find_unit_tests and find_e2e_tests are used for this searching.
582 # Note that there are some tests under 'SCons' that *begin* with
583 # 'test_', but they're packaging and installation tests, not
584 # functional tests, so we don't execute them by default. (They can
585 # still be executed by hand, though).
587 # Test exclusions, if specified, are then applied.
590 def scanlist(testlist
):
591 """ Process a testlist file """
592 tests
= [t
.strip() for t
in testlist
if not t
.startswith('#')]
593 return [t
for t
in tests
if t
]
596 def find_unit_tests(directory
):
597 """ Look for unit tests """
599 for dirpath
, dirnames
, filenames
in os
.walk(directory
):
600 # Skip folders containing a sconstest.skip file
601 if 'sconstest.skip' in filenames
:
603 for fname
in filenames
:
604 if fname
.endswith("Tests.py"):
605 result
.append(os
.path
.join(dirpath
, fname
))
606 return sorted(result
)
609 def find_e2e_tests(directory
):
610 """ Look for end-to-end tests """
612 for dirpath
, dirnames
, filenames
in os
.walk(directory
):
613 # Skip folders containing a sconstest.skip file
614 if 'sconstest.skip' in filenames
:
617 # Slurp in any tests in exclude lists
619 if ".exclude_tests" in filenames
:
620 p
= Path(dirpath
).joinpath(".exclude_tests")
621 # TODO simplify when Py3.5 dropped
622 if sys
.version_info
.major
== 3 and sys
.version_info
.minor
< 6:
623 excludefile
= p
.resolve()
625 excludefile
= p
.resolve(strict
=True)
626 with excludefile
.open() as f
:
627 excludes
= scanlist(f
)
629 for fname
in filenames
:
630 if fname
.endswith(".py") and fname
not in excludes
:
631 result
.append(os
.path
.join(dirpath
, fname
))
633 return sorted(result
)
639 if args
.testlistfile
:
640 with args
.testlistfile
.open() as f
:
645 testpaths
= ['SCons', 'test']
647 testpaths
= args
.testlist
650 # Clean up path so it can match startswith's below
651 # remove leading ./ or .\
652 if tp
.startswith('.') and tp
[1] in (os
.sep
, os
.altsep
):
655 for path
in glob
.glob(tp
):
656 if os
.path
.isdir(path
):
657 if path
.startswith(('SCons', 'testing')):
658 unittests
.extend(find_unit_tests(path
))
659 elif path
.startswith('test'):
660 endtests
.extend(find_e2e_tests(path
))
662 if path
.endswith("Tests.py"):
663 unittests
.append(path
)
664 elif path
.endswith(".py"):
665 endtests
.append(path
)
666 tests
= sorted(unittests
+ endtests
)
671 tests
= [t
for t
in tests
if not t
.endswith("Tests.py")]
673 tests
= [t
for t
in tests
if t
.endswith("Tests.py")]
674 if args
.excludelistfile
:
675 with args
.excludelistfile
.open() as f
:
676 excludetests
= scanlist(f
)
677 tests
= [t
for t
in tests
if t
not in excludetests
]
680 sys
.stderr
.write(parser
.format_usage() + """
681 error: no tests were found.
682 Tests can be specified on the command line, read from a file with
683 the -f/--file option, or discovered with -a/--all to run all tests.
687 # ---[ test processing ]-----------------------------------
688 tests
= [Test(t
, n
+ 1) for n
, t
in enumerate(tests
)]
692 sys
.stdout
.write(t
.path
+ "\n")
696 if os
.name
== 'java':
697 args
.python
= os
.path
.join(sys
.prefix
, 'jython')
699 args
.python
= sys
.executable
700 os
.environ
["python_executable"] = args
.python
704 def print_time(fmt
, tm
):
705 sys
.stdout
.write(fmt
% tm
)
709 def print_time(fmt
, tm
):
712 time_func
= time
.perf_counter
713 total_start_time
= time_func()
714 total_num_tests
= len(tests
)
717 def log_result(t
, io_lock
=None):
718 """ log the result of a test.
720 "log" in this case means writing to stdout. Since we might be
721 called from from any of several different threads (multi-job run),
722 we need to lock access to the log to avoid interleaving. The same
723 would apply if output was a file.
726 t (Test): (completed) testcase instance
727 io_lock (threading.lock): (optional) lock to use
730 # there is no lock in single-job run, which includes
731 # running test/runtest tests from multi-job run, so check.
735 if suppress_output
or catch_output
:
736 sys
.stdout
.write(t
.headline
)
737 if not suppress_output
:
742 print_time("Test execution time: %.1f seconds\n", t
.test_time
)
747 if args
.quit_on_failure
and t
.status
== 1:
748 print("Exiting due to error")
753 def run_test(t
, io_lock
=None, run_async
=True):
756 Builds the command line to give to execute().
757 Also the best place to record some information that will be
758 used in output, which in some conditions is printed here.
761 t (Test): testcase instance
762 io_lock (threading.Lock): (optional) lock to use
763 run_async (bool): whether to run asynchronously
769 command_args
.extend(['-m', debug
])
770 if args
.devmode
and sys
.version_info
>= (3, 7, 0):
771 command_args
.append('-X dev')
772 command_args
.append(t
.path
)
773 if args
.runner
and t
.path
in unittests
:
774 # For example --runner TestUnit.TAPTestRunner
775 command_args
.append('--runner ' + args
.runner
)
776 t
.command_args
= [args
.python
] + command_args
777 t
.command_str
= " ".join(t
.command_args
)
778 if args
.printcommand
:
779 if args
.print_progress
:
780 t
.headline
+= "%d/%d (%.2f%s) %s\n" % (
781 t
.num
, total_num_tests
,
782 float(t
.num
) * 100.0 / float(total_num_tests
),
787 t
.headline
+= t
.command_str
+ "\n"
788 if not suppress_output
and not catch_output
:
789 # defer printing the headline until test is done
790 sys
.stdout
.write(t
.headline
)
791 head
, _
= os
.path
.split(t
.abspath
)
794 fixture_dirs
.append(head
)
795 fixture_dirs
.append(os
.path
.join(scriptpath
, 'test', 'fixture'))
797 # Set the list of fixture dirs directly in the environment. Just putting
798 # it in os.environ and spawning the process is racy. Make it reliable by
799 # overriding the environment passed to execute().
800 env
= dict(os
.environ
)
801 env
['FIXTURE_DIRS'] = os
.pathsep
.join(fixture_dirs
)
803 test_start_time
= time_func()
804 if args
.execute_tests
:
807 t
.test_time
= time_func() - test_start_time
808 log_result(t
, io_lock
=io_lock
)
811 class RunTest(threading
.Thread
):
812 """ Test Runner class.
814 One instance will be created for each job thread in multi-job mode
816 def __init__(self
, queue
=None, io_lock
=None, group
=None, target
=None, name
=None):
817 super().__init
__(group
=group
, target
=target
, name
=name
)
819 self
.io_lock
= io_lock
822 for t
in iter(self
.queue
.get
, None):
823 run_test(t
, io_lock
=self
.io_lock
, run_async
=True)
824 self
.queue
.task_done()
827 print("Running tests using %d jobs" % args
.jobs
)
831 testlock
= threading
.Lock()
832 # Start worker threads to consume the queue
833 threads
= [RunTest(queue
=testq
, io_lock
=testlock
) for _
in range(args
.jobs
)]
837 # wait on the queue rather than the individual threads
841 run_test(t
, io_lock
=None, run_async
=False)
843 # --- all tests are complete by the time we get here ---
845 tests
[0].total_time
= time_func() - total_start_time
846 print_time("Total execution time for all tests: %.1f seconds\n", tests
[0].total_time
)
848 passed
= [t
for t
in tests
if t
.status
== 0]
849 fail
= [t
for t
in tests
if t
.status
== 1]
850 no_result
= [t
for t
in tests
if t
.status
== 2]
852 # print summaries, but only if multiple tests were run
853 if len(tests
) != 1 and args
.execute_tests
:
854 if passed
and args
.print_passed_summary
:
856 sys
.stdout
.write("\nPassed the following test:\n")
858 sys
.stdout
.write("\nPassed the following %d tests:\n" % len(passed
))
859 paths
= [x
.path
for x
in passed
]
860 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
863 sys
.stdout
.write("\nFailed the following test:\n")
865 sys
.stdout
.write("\nFailed the following %d tests:\n" % len(fail
))
866 paths
= [x
.path
for x
in fail
]
867 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
869 if len(no_result
) == 1:
870 sys
.stdout
.write("\nNO RESULT from the following test:\n")
872 sys
.stdout
.write("\nNO RESULT from the following %d tests:\n" % len(no_result
))
873 paths
= [x
.path
for x
in no_result
]
874 sys
.stdout
.write("\t" + "\n\t".join(paths
) + "\n")
876 # save the fails to a file
878 with
open(args
.error_log
, "w") as f
:
880 paths
= [x
.path
for x
in fail
]
883 # if there are no fails, file will be cleared
886 if args
.output
== '-':
889 f
= open(args
.xml
, 'w')
891 #f.write("test_result = [\n")
896 if args
.output
!= '-':
900 if isinstance(sys
.stdout
, Tee
):
901 sys
.stdout
.file.close()
902 if isinstance(sys
.stderr
, Tee
):
903 sys
.stderr
.file.close()
907 elif no_result
and args
.dont_ignore_skips
:
908 # if no fails, but skips were found
915 # indent-tabs-mode:nil
917 # vim: set expandtab tabstop=4 shiftwidth=4: