Merge pull request #4553 from mwichmann/clone-variables
[scons.git] / runtest.py
blob1922ccff79d0c1a72143f36323367ced9362d70d
1 #!/usr/bin/env python
3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ directory
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from
12 testing/framework.
14 This script adds SCons/ and testing/ directories to PYTHONPATH,
15 performs test discovery and processes tests according to options.
16 """
18 import argparse
19 import itertools
20 import os
21 import subprocess
22 import sys
23 import tempfile
24 import threading
25 import time
26 from abc import ABC, abstractmethod
27 from io import StringIO
28 from pathlib import Path, PurePath, PureWindowsPath
29 from queue import Queue
30 from typing import List, TextIO, Optional
32 cwd = os.getcwd()
33 debug: Optional[str] = None
34 scons: Optional[str] = None
35 catch_output: bool = False
36 suppress_output: bool = False
37 script = PurePath(sys.argv[0]).name
38 usagestr = f"{script} [OPTIONS] [TEST ...]"
39 epilogstr = """\
40 Environment Variables:
41 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
42 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
43 """
45 # this is currently expected to be global, maybe refactor later?
46 unittests: List[str]
48 parser = argparse.ArgumentParser(
49 usage=usagestr,
50 epilog=epilogstr,
51 allow_abbrev=False,
52 formatter_class=argparse.RawDescriptionHelpFormatter,
55 # test selection options:
56 testsel = parser.add_argument_group(description='Test selection options:')
57 testsel.add_argument(metavar='TEST', nargs='*', dest='testlist',
58 help="Select TEST(s) (tests and/or directories) to run")
59 testlisting = testsel.add_mutually_exclusive_group()
60 testlisting.add_argument('-f', '--file', metavar='FILE', dest='testlistfile',
61 help="Select only tests in FILE")
62 testlisting.add_argument('-a', '--all', action='store_true',
63 help="Select all tests")
64 testlisting.add_argument('--retry', action='store_true',
65 help="Rerun the last failed tests in 'failed_tests.log'")
66 testsel.add_argument('--exclude-list', metavar="FILE", dest='excludelistfile',
67 help="""Exclude tests in FILE from current selection""")
68 testtype = testsel.add_mutually_exclusive_group()
69 testtype.add_argument('--e2e-only', action='store_true',
70 help="Exclude unit tests from selection")
71 testtype.add_argument('--unit-only', action='store_true',
72 help="Exclude end-to-end tests from selection")
74 # miscellaneous options
75 parser.add_argument('-b', '--baseline', metavar='BASE',
76 help="Run test scripts against baseline BASE.")
77 parser.add_argument('-d', '--debug', action='store_true',
78 help="Run test scripts under the Python debugger.")
79 parser.add_argument('-D', '--devmode', action='store_true',
80 help="Run tests in Python's development mode (Py3.7+ only).")
81 parser.add_argument('-e', '--external', action='store_true',
82 help="Run the script in external mode (for external Tools)")
84 def posint(arg: str) -> int:
85 """Special positive-int type for argparse"""
86 num = int(arg)
87 if num < 0:
88 raise argparse.ArgumentTypeError("JOBS value must not be negative")
89 return num
91 parser.add_argument('-j', '--jobs', metavar='JOBS', default=1, type=posint,
92 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
93 parser.add_argument('-l', '--list', action='store_true', dest='list_only',
94 help="List available tests and exit.")
95 parser.add_argument('-n', '--no-exec', action='store_false',
96 dest='execute_tests',
97 help="No execute, just print command lines.")
98 parser.add_argument('--nopipefiles', action='store_false',
99 dest='allow_pipe_files',
100 help="""Do not use the "file pipe" workaround for subprocess
101 for starting tests. See source code for warnings.""")
102 parser.add_argument('-P', '--python', metavar='PYTHON',
103 help="Use the specified Python interpreter.")
104 parser.add_argument('--quit-on-failure', action='store_true',
105 help="Quit on any test failure.")
106 parser.add_argument('--runner', metavar='CLASS',
107 help="Test runner class for unit tests.")
108 parser.add_argument('-X', dest='scons_exec', action='store_true',
109 help="Test script is executable, don't feed to Python.")
110 parser.add_argument('-x', '--exec', metavar="SCRIPT",
111 help="Test using SCRIPT as path to SCons.")
112 parser.add_argument('--faillog', dest='error_log', metavar="FILE",
113 default='failed_tests.log',
114 help="Log failed tests to FILE (enabled by default, "
115 "default file 'failed_tests.log')")
116 parser.add_argument('--no-faillog', dest='error_log',
117 action='store_const', const=None,
118 default='failed_tests.log',
119 help="Do not log failed tests to a file")
121 parser.add_argument('--no-ignore-skips', dest='dont_ignore_skips',
122 action='store_true',
123 default=False,
124 help="If any tests are skipped, exit status 2")
126 outctl = parser.add_argument_group(description='Output control options:')
127 outctl.add_argument('-k', '--no-progress', action='store_false',
128 dest='print_progress',
129 help="Suppress count and progress percentage messages.")
130 outctl.add_argument('--passed', action='store_true',
131 dest='print_passed_summary',
132 help="Summarize which tests passed.")
133 outctl.add_argument('-q', '--quiet', action='store_false',
134 dest='printcommand',
135 help="Don't print the test being executed.")
136 outctl.add_argument('-s', '--short-progress', action='store_true',
137 help="""Short progress, prints only the command line
138 and a progress percentage, no results.""")
139 outctl.add_argument('-t', '--time', action='store_true', dest='print_times',
140 help="Print test execution time.")
141 outctl.add_argument('--verbose', metavar='LEVEL', type=int, choices=range(1, 4),
142 help="""Set verbose level
143 (1=print executed commands,
144 2=print commands and non-zero output,
145 3=print commands and all output).""")
146 # maybe add?
147 # outctl.add_argument('--version', action='version', version=f'{script} 1.0')
149 logctl = parser.add_argument_group(description='Log control options:')
150 logctl.add_argument('-o', '--output', metavar='LOG', help="Save console output to LOG.")
151 logctl.add_argument(
152 '--xml',
153 metavar='XML',
154 help="Save results to XML in SCons XML format (use - for stdout).",
157 # process args and handle a few specific cases:
158 args: argparse.Namespace = parser.parse_args()
160 # we can't do this check with an argparse exclusive group, since those
161 # only work with optional args, and the cmdline tests (args.testlist)
162 # are not optional args,
163 if args.testlist and (args.testlistfile or args.all or args.retry):
164 sys.stderr.write(
165 parser.format_usage()
166 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
168 sys.exit(1)
170 if args.retry:
171 args.testlistfile = 'failed_tests.log'
173 if args.testlistfile:
174 # args.testlistfile changes from a string to a pathlib Path object
175 try:
176 ptest = Path(args.testlistfile)
177 args.testlistfile = ptest.resolve(strict=True)
178 except FileNotFoundError:
179 sys.stderr.write(
180 parser.format_usage()
181 + f'error: -f/--file testlist file "{args.testlistfile}" not found\n'
183 sys.exit(1)
185 if args.excludelistfile:
186 # args.excludelistfile changes from a string to a pathlib Path object
187 try:
188 pexcl = Path(args.excludelistfile)
189 args.excludelistfile = pexcl.resolve(strict=True)
190 except FileNotFoundError:
191 sys.stderr.write(
192 parser.format_usage()
193 + f'error: --exclude-list file "{args.excludelistfile}" not found\n'
195 sys.exit(1)
197 if args.jobs == 0:
198 try:
199 # on Linux, check available rather than physical CPUs
200 args.jobs = len(os.sched_getaffinity(0))
201 except AttributeError:
202 # Windows
203 args.jobs = os.cpu_count()
205 # sanity check
206 if args.jobs == 0:
207 sys.stderr.write(
208 parser.format_usage()
209 + "Unable to detect CPU count, give -j a non-zero value\n"
211 sys.exit(1)
213 if args.jobs > 1 or args.output:
214 # 1. don't let tests write stdout/stderr directly if multi-job,
215 # else outputs will interleave and be hard to read.
216 # 2. If we're going to write a logfile, we also need to catch the output.
217 catch_output = True
219 if not args.printcommand:
220 suppress_output = catch_output = True
222 if args.verbose:
223 os.environ['TESTCMD_VERBOSE'] = str(args.verbose)
225 if args.short_progress:
226 args.print_progress = True
227 suppress_output = catch_output = True
229 if args.debug:
230 # TODO: add a way to pass a specific debugger
231 debug = "pdb"
233 if args.exec:
234 scons = args.exec
236 # --- setup stdout/stderr ---
237 class Unbuffered:
238 def __init__(self, file):
239 self.file = file
241 def write(self, arg):
242 self.file.write(arg)
243 self.file.flush()
245 def __getattr__(self, attr):
246 return getattr(self.file, attr)
248 sys.stdout = Unbuffered(sys.stdout)
249 sys.stderr = Unbuffered(sys.stderr)
251 # possible alternative: switch to using print, and:
252 # print = functools.partial(print, flush)
254 if args.output:
255 class Tee:
256 def __init__(self, openfile, stream):
257 self.file = openfile
258 self.stream = stream
260 def write(self, data):
261 self.file.write(data)
262 self.stream.write(data)
264 def flush(self, data):
265 self.file.flush(data)
266 self.stream.flush(data)
268 logfile = open(args.output, 'w')
269 # this is not ideal: we monkeypatch stdout/stderr a second time
270 # (already did for Unbuffered), so here we can't easily detect what
271 # state we're in on closedown. Just hope it's okay...
272 sys.stdout = Tee(logfile, sys.stdout)
273 sys.stderr = Tee(logfile, sys.stderr)
275 # --- define helpers ----
276 if sys.platform == 'win32':
277 # thanks to Eryk Sun for this recipe
278 import ctypes
280 shlwapi = ctypes.OleDLL('shlwapi')
281 shlwapi.AssocQueryStringW.argtypes = (
282 ctypes.c_ulong, # flags
283 ctypes.c_ulong, # str
284 ctypes.c_wchar_p, # pszAssoc
285 ctypes.c_wchar_p, # pszExtra
286 ctypes.c_wchar_p, # pszOut
287 ctypes.POINTER(ctypes.c_ulong), # pcchOut
290 ASSOCF_NOTRUNCATE = 0x00000020
291 ASSOCF_INIT_IGNOREUNKNOWN = 0x00000400
292 ASSOCSTR_COMMAND = 1
293 ASSOCSTR_EXECUTABLE = 2
294 E_POINTER = ctypes.c_long(0x80004003).value
296 def get_template_command(filetype, verb=None):
297 """Return the association-related string for *filetype*"""
298 flags = ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
299 assoc_str = ASSOCSTR_COMMAND
300 cch = ctypes.c_ulong(260)
301 while True:
302 buf = (ctypes.c_wchar * cch.value)()
303 try:
304 shlwapi.AssocQueryStringW(
305 flags, assoc_str, filetype, verb, buf, ctypes.byref(cch)
307 except OSError as e:
308 if e.winerror != E_POINTER:
309 raise
310 continue
311 break
312 return buf.value
315 if not catch_output:
316 # Without any output suppressed, we let the subprocess
317 # write its stuff freely to stdout/stderr.
319 def spawn_it(command_args, env):
320 cp = subprocess.run(command_args, shell=False, env=env)
321 return cp.stdout, cp.stderr, cp.returncode
323 else:
324 # Else, we catch the output of both pipes...
325 if args.allow_pipe_files:
326 # The subprocess.Popen() suffers from a well-known
327 # problem. Data for stdout/stderr is read into a
328 # memory buffer of fixed size, 65K which is not very much.
329 # When it fills up, it simply stops letting the child process
330 # write to it. The child will then sit and patiently wait to
331 # be able to write the rest of its output. Hang!
332 # In order to work around this, we follow a suggestion
333 # by Anders Pearson in
334 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
335 # and pass temp file objects to Popen() instead of the ubiquitous
336 # subprocess.PIPE.
338 def spawn_it(command_args, env):
339 # Create temporary files
340 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
341 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
342 # Start subprocess...
343 cp = subprocess.run(
344 command_args,
345 stdout=tmp_stdout,
346 stderr=tmp_stderr,
347 shell=False,
348 env=env,
351 try:
352 # Rewind to start of files
353 tmp_stdout.seek(0)
354 tmp_stderr.seek(0)
355 # Read output
356 spawned_stdout = tmp_stdout.read()
357 spawned_stderr = tmp_stderr.read()
358 finally:
359 # Remove temp files by closing them
360 tmp_stdout.close()
361 tmp_stderr.close()
363 # Return values
364 return spawned_stderr, spawned_stdout, cp.returncode
366 else:
367 # We get here only if the user gave the '--nopipefiles'
368 # option, meaning the "temp file" approach for
369 # subprocess.communicate() above shouldn't be used.
370 # He hopefully knows what he's doing, but again we have a
371 # potential deadlock situation in the following code:
372 # If the subprocess writes a lot of data to its stderr,
373 # the pipe will fill up (nobody's reading it yet) and the
374 # subprocess will wait for someone to read it.
375 # But the parent process is trying to read from stdin
376 # (but the subprocess isn't writing anything there).
377 # Hence a deadlock.
378 # Be dragons here! Better don't use this!
380 def spawn_it(command_args, env):
381 cp = subprocess.run(
382 command_args,
383 stdout=subprocess.PIPE,
384 stderr=subprocess.PIPE,
385 shell=False,
386 env=env,
388 return cp.stdout, cp.stderr, cp.returncode
391 class RuntestBase(ABC):
392 """ Base class for tests """
393 _ids = itertools.count(1) # to geenerate test # automatically
395 def __init__(self, path, spe=None):
396 self.path = str(path)
397 self.testno = next(self._ids)
398 self.stdout = self.stderr = self.status = None
399 self.abspath = path.absolute()
400 self.command_args = []
401 self.command_str = ""
402 self.test_time = self.total_time = 0
403 if spe:
404 for d in spe:
405 f = os.path.join(d, path)
406 if os.path.isfile(f):
407 self.abspath = f
408 break
410 @abstractmethod
411 def execute(self, env):
412 pass
415 class SystemExecutor(RuntestBase):
416 """ Test class for tests executed with spawn_it() """
417 def execute(self, env):
418 self.stderr, self.stdout, s = spawn_it(self.command_args, env)
419 self.status = s
420 if s < 0 or s > 2:
421 sys.stdout.write("Unexpected exit status %d\n" % s)
424 class PopenExecutor(RuntestBase):
425 """ Test class for tests executed with Popen
427 A bit of a misnomer as the Popen call is now wrapped
428 by calling subprocess.run (behind the covers uses Popen.
429 Very similar to SystemExecutor, but doesn't allow for not catching
430 the output).
432 # For an explanation of the following 'if ... else'
433 # and the 'allow_pipe_files' option, please check out the
434 # definition of spawn_it() above.
435 if args.allow_pipe_files:
437 def execute(self, env) -> None:
438 # Create temporary files
439 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
440 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
441 # Start subprocess...
442 cp = subprocess.run(
443 self.command_args,
444 stdout=tmp_stdout,
445 stderr=tmp_stderr,
446 shell=False,
447 env=env,
448 check=False,
450 self.status = cp.returncode
452 try:
453 # Rewind to start of files
454 tmp_stdout.seek(0)
455 tmp_stderr.seek(0)
456 # Read output
457 self.stdout = tmp_stdout.read()
458 self.stderr = tmp_stderr.read()
459 finally:
460 # Remove temp files by closing them
461 tmp_stdout.close()
462 tmp_stderr.close()
463 else:
465 def execute(self, env) -> None:
466 cp = subprocess.run(
467 self.command_args,
468 stdout=subprocess.PIPE,
469 stderr=subprocess.PIPE,
470 shell=False,
471 env=env,
472 check=False,
474 self.status, self.stdout, self.stderr = cp.returncode, cp.stdout, cp.stderr
476 class XML(PopenExecutor):
477 """ Test class for tests that will output in scons xml """
478 @staticmethod
479 def header(f):
480 f.write(' <results>\n')
482 def write(self, f):
483 f.write(' <test>\n')
484 f.write(' <file_name>%s</file_name>\n' % self.path)
485 f.write(' <command_line>%s</command_line>\n' % self.command_str)
486 f.write(' <exit_status>%s</exit_status>\n' % self.status)
487 f.write(' <stdout>%s</stdout>\n' % self.stdout)
488 f.write(' <stderr>%s</stderr>\n' % self.stderr)
489 f.write(' <time>%.1f</time>\n' % self.test_time)
490 f.write(' </test>\n')
492 def footer(self, f):
493 f.write(' <time>%.1f</time>\n' % self.total_time)
494 f.write(' </results>\n')
496 if args.xml:
497 Test = XML
498 else:
499 Test = SystemExecutor
501 # --- start processing ---
503 if not args.baseline or args.baseline == '.':
504 baseline = cwd
505 elif args.baseline == '-':
506 sys.stderr.write(
507 "'baseline' logic used to checkout from svn. It has been removed. "
508 "If you used this, please let us know on devel mailing list, "
509 "IRC, or discord server\n"
511 sys.exit(-1)
512 else:
513 baseline = args.baseline
514 scons_runtest_dir = baseline
516 if not args.external:
517 scons_script_dir = os.path.join(baseline, 'scripts')
518 scons_tools_dir = os.path.join(baseline, 'bin')
519 scons_lib_dir = baseline
520 else:
521 scons_script_dir = ''
522 scons_tools_dir = ''
523 scons_lib_dir = ''
525 testenv = {
526 'SCONS_RUNTEST_DIR': scons_runtest_dir,
527 'SCONS_TOOLS_DIR': scons_tools_dir,
528 'SCONS_SCRIPT_DIR': scons_script_dir,
529 'SCONS_CWD': cwd,
532 if scons:
533 # Let the version of SCons that the -x option pointed to find
534 # its own modules.
535 testenv['SCONS'] = scons
536 elif scons_lib_dir:
537 # Because SCons is really aggressive about finding its modules,
538 # it sometimes finds SCons modules elsewhere on the system.
539 # This forces SCons to use the modules that are being tested.
540 testenv['SCONS_LIB_DIR'] = scons_lib_dir
542 if args.scons_exec:
543 testenv['SCONS_EXEC'] = '1'
545 if args.external:
546 testenv['SCONS_EXTERNAL_TEST'] = '1'
548 # Insert scons path and path for testing framework to PYTHONPATH
549 scriptpath = os.path.dirname(os.path.realpath(__file__))
550 frameworkpath = os.path.join(scriptpath, 'testing', 'framework')
551 testenv['PYTHONPATH'] = os.pathsep.join((scons_lib_dir, frameworkpath))
552 pythonpath = os.environ.get('PYTHONPATH')
553 if pythonpath:
554 testenv['PYTHONPATH'] = testenv['PYTHONPATH'] + os.pathsep + pythonpath
556 if sys.platform == 'win32':
557 # Windows doesn't support "shebang" lines directly (the Python launcher
558 # and Windows Store version do, but you have to get them launched first)
559 # so to directly launch a script we depend on an assoc for .py to work.
560 # Some systems may have none, and in some cases IDE programs take over
561 # the assoc. Detect this so the small number of tests affected can skip.
562 try:
563 python_assoc = get_template_command('.py')
564 except OSError:
565 python_assoc = None
566 if not python_assoc or "py" not in python_assoc:
567 testenv['SCONS_NO_DIRECT_SCRIPT'] = '1'
569 os.environ.update(testenv)
571 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
572 if '_JAVA_OPTIONS' in os.environ:
573 del os.environ['_JAVA_OPTIONS']
576 # ---[ test discovery ]------------------------------------
577 # This section figures out which tests to run.
579 # The initial testlist is made by reading from the testlistfile,
580 # if supplied, or by looking at the test arguments, if supplied,
581 # or by looking for all test files if the "all" argument is supplied.
582 # One of the three is required.
584 # Each test path, whichever of the three sources it comes from,
585 # specifies either a test file or a directory to search for
586 # SCons tests. SCons code layout assumes that any file under the 'SCons'
587 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
588 # script (*.py) under the 'test' subdirectory is an end-to-end test.
589 # We need to track these because they are invoked differently.
590 # find_unit_tests and find_e2e_tests are used for this searching.
592 # Note that there are some tests under 'SCons' that *begin* with
593 # 'test_', but they're packaging and installation tests, not
594 # functional tests, so we don't execute them by default. (They can
595 # still be executed by hand, though).
597 # Test exclusions, if specified, are then applied.
600 def scanlist(testfile):
601 """ Process a testlist file """
602 data = StringIO(testfile.read_text())
603 tests = [t.strip() for t in data.readlines() if not t.startswith('#')]
604 # in order to allow scanned lists to work whether they use forward or
605 # backward slashes, first create the object as a PureWindowsPath which
606 # accepts either, then use that to make a Path object to use for
607 # comparisons like "file in scanned_list".
608 if sys.platform == 'win32':
609 return [Path(t) for t in tests if t]
610 else:
611 return [Path(PureWindowsPath(t).as_posix()) for t in tests if t]
614 def find_unit_tests(directory):
615 """ Look for unit tests """
616 result = []
617 for dirpath, _, filenames in os.walk(directory):
618 # Skip folders containing a sconstest.skip file
619 if 'sconstest.skip' in filenames:
620 continue
621 for fname in filenames:
622 if fname.endswith("Tests.py"):
623 result.append(Path(dirpath, fname))
625 return sorted(result)
628 def find_e2e_tests(directory):
629 """ Look for end-to-end tests """
630 result = []
631 for dirpath, _, filenames in os.walk(directory):
632 # Skip folders containing a sconstest.skip file
633 if 'sconstest.skip' in filenames:
634 continue
636 # Slurp in any tests in exclude lists
637 excludes = []
638 if ".exclude_tests" in filenames:
639 excludefile = Path(dirpath, ".exclude_tests").resolve()
640 excludes = scanlist(excludefile)
642 for fname in filenames:
643 if fname.endswith(".py") and Path(fname) not in excludes:
644 result.append(Path(dirpath, fname))
646 return sorted(result)
649 # initial selection:
650 # if we have a testlist file read that, else hunt for tests.
651 unittests = []
652 endtests = []
653 if args.testlistfile:
654 tests = scanlist(args.testlistfile)
655 else:
656 testpaths = []
657 if args.all: # -a flag
658 testpaths = [Path('SCons'), Path('test')]
659 elif args.testlist: # paths given on cmdline
660 if sys.platform == 'win32':
661 testpaths = [Path(t) for t in args.testlist]
662 else:
663 testpaths = [Path(PureWindowsPath(t).as_posix()) for t in args.testlist]
665 for path in testpaths:
666 # Clean up path removing leading ./ or .\
667 name = str(path)
668 if name.startswith('.') and name[1] in (os.sep, os.altsep):
669 path = path.with_name(tn[2:])
671 if path.exists():
672 if path.is_dir():
673 if path.parts[0] == "SCons" or path.parts[0] == "testing":
674 unittests.extend(find_unit_tests(path))
675 elif path.parts[0] == 'test':
676 endtests.extend(find_e2e_tests(path))
677 # else: TODO: what if user pointed to a dir outside scons tree?
678 else:
679 if path.match("*Tests.py"):
680 unittests.append(path)
681 elif path.match("*.py"):
682 endtests.append(path)
684 tests = sorted(unittests + endtests)
686 # Remove exclusions:
687 if args.e2e_only:
688 tests = [t for t in tests if not t.match("*Tests.py")]
689 if args.unit_only:
690 tests = [t for t in tests if t.match("*Tests.py")]
691 if args.excludelistfile:
692 excludetests = scanlist(args.excludelistfile)
693 tests = [t for t in tests if t not in excludetests]
695 # did we end up with any tests?
696 if not tests:
697 sys.stderr.write(parser.format_usage() + """
698 error: no tests matching the specification were found.
699 See "Test selection options" in the help for details on
700 how to specify and/or exclude tests.
701 """)
702 sys.exit(1)
704 # ---[ test processing ]-----------------------------------
705 tests = [Test(t) for t in tests]
707 if args.list_only:
708 for t in tests:
709 print(t.path)
710 sys.exit(0)
712 if not args.python:
713 if os.name == 'java':
714 args.python = os.path.join(sys.prefix, 'jython')
715 else:
716 args.python = sys.executable
717 os.environ["python_executable"] = args.python
719 if args.print_times:
721 def print_time(fmt, tm):
722 print(fmt % tm)
724 else:
726 def print_time(fmt, tm):
727 pass
729 time_func = time.perf_counter
730 total_start_time = time_func()
731 total_num_tests = len(tests)
734 def log_result(t, io_lock=None):
735 """ log the result of a test.
737 "log" in this case means writing to stdout. Since we might be
738 called from from any of several different threads (multi-job run),
739 we need to lock access to the log to avoid interleaving. The same
740 would apply if output was a file.
742 Args:
743 t (Test): (completed) testcase instance
744 io_lock (threading.lock): (optional) lock to use
747 # there is no lock in single-job run, which includes
748 # running test/runtest tests from multi-job run, so check.
749 if io_lock:
750 io_lock.acquire()
751 try:
752 if suppress_output or catch_output:
753 sys.stdout.write(t.headline)
754 if not suppress_output:
755 if t.stdout:
756 print(t.stdout)
757 if t.stderr:
758 print(t.stderr)
759 print_time("Test execution time: %.1f seconds", t.test_time)
760 finally:
761 if io_lock:
762 io_lock.release()
764 if args.quit_on_failure and t.status == 1:
765 print("Exiting due to error")
766 print(t.status)
767 sys.exit(1)
770 def run_test(t, io_lock=None, run_async=True):
771 """ Run a testcase.
773 Builds the command line to give to execute().
774 Also the best place to record some information that will be
775 used in output, which in some conditions is printed here.
777 Args:
778 t (Test): testcase instance
779 io_lock (threading.Lock): (optional) lock to use
780 run_async (bool): whether to run asynchronously
783 t.headline = ""
784 command_args = []
785 if debug:
786 command_args.extend(['-m', debug])
787 if args.devmode and sys.version_info >= (3, 7, 0):
788 command_args.append('-X dev')
789 command_args.append(t.path)
790 if args.runner and t.path in unittests:
791 # For example --runner TestUnit.TAPTestRunner
792 command_args.append('--runner ' + args.runner)
793 t.command_args = [args.python] + command_args
794 t.command_str = " ".join(t.command_args)
795 if args.printcommand:
796 if args.print_progress:
797 t.headline += "%d/%d (%.2f%s) %s\n" % (
798 t.testno, total_num_tests,
799 float(t.testno) * 100.0 / float(total_num_tests),
800 "%",
801 t.command_str,
803 else:
804 t.headline += t.command_str + "\n"
805 if not suppress_output and not catch_output:
806 # defer printing the headline until test is done
807 sys.stdout.write(t.headline)
808 head, _ = os.path.split(t.abspath)
809 fixture_dirs = []
810 if head:
811 fixture_dirs.append(head)
812 fixture_dirs.append(os.path.join(scriptpath, 'test', 'fixture'))
814 # Set the list of fixture dirs directly in the environment. Just putting
815 # it in os.environ and spawning the process is racy. Make it reliable by
816 # overriding the environment passed to execute().
817 env = dict(os.environ)
818 env['FIXTURE_DIRS'] = os.pathsep.join(fixture_dirs)
820 test_start_time = time_func()
821 if args.execute_tests:
822 t.execute(env)
824 t.test_time = time_func() - test_start_time
825 log_result(t, io_lock=io_lock)
828 class RunTest(threading.Thread):
829 """ Test Runner class.
831 One instance will be created for each job thread in multi-job mode
833 def __init__(self, queue=None, io_lock=None, group=None, target=None, name=None):
834 super().__init__(group=group, target=target, name=name)
835 self.queue = queue
836 self.io_lock = io_lock
838 def run(self):
839 for t in iter(self.queue.get, None):
840 run_test(t, io_lock=self.io_lock, run_async=True)
841 self.queue.task_done()
843 if args.jobs > 1:
844 print("Running tests using %d jobs" % args.jobs)
845 testq = Queue()
846 for t in tests:
847 testq.put(t)
848 testlock = threading.Lock()
849 # Start worker threads to consume the queue
850 threads = [RunTest(queue=testq, io_lock=testlock) for _ in range(args.jobs)]
851 for t in threads:
852 t.daemon = True
853 t.start()
854 # wait on the queue rather than the individual threads
855 testq.join()
856 else:
857 for t in tests:
858 run_test(t, io_lock=None, run_async=False)
860 # --- all tests are complete by the time we get here ---
861 if tests:
862 tests[0].total_time = time_func() - total_start_time
863 print_time("Total execution time for all tests: %.1f seconds", tests[0].total_time)
865 passed = [t for t in tests if t.status == 0]
866 fail = [t for t in tests if t.status == 1]
867 no_result = [t for t in tests if t.status == 2]
869 # print summaries, but only if multiple tests were run
870 if len(tests) != 1 and args.execute_tests:
871 if passed and args.print_passed_summary:
872 if len(passed) == 1:
873 sys.stdout.write("\nPassed the following test:\n")
874 else:
875 sys.stdout.write("\nPassed the following %d tests:\n" % len(passed))
876 paths = [x.path for x in passed]
877 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
878 if fail:
879 if len(fail) == 1:
880 sys.stdout.write("\nFailed the following test:\n")
881 else:
882 sys.stdout.write("\nFailed the following %d tests:\n" % len(fail))
883 paths = [x.path for x in fail]
884 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
885 if no_result:
886 if len(no_result) == 1:
887 sys.stdout.write("\nNO RESULT from the following test:\n")
888 else:
889 sys.stdout.write("\nNO RESULT from the following %d tests:\n" % len(no_result))
890 paths = [x.path for x in no_result]
891 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
893 # save the fails to a file
894 if args.error_log:
895 with open(args.error_log, "w") as f:
896 if fail:
897 paths = [x.path for x in fail]
898 for test in paths:
899 print(test, file=f)
900 # if there are no fails, file will be cleared
902 if args.xml:
903 if args.output == '-':
904 f = sys.stdout
905 else:
906 f = open(args.xml, 'w')
907 tests[0].header(f)
908 #f.write("test_result = [\n")
909 for t in tests:
910 t.write(f)
911 tests[0].footer(f)
912 #f.write("];\n")
913 if args.output != '-':
914 f.close()
916 if args.output:
917 if isinstance(sys.stdout, Tee):
918 sys.stdout.file.close()
919 if isinstance(sys.stderr, Tee):
920 sys.stderr.file.close()
922 if fail:
923 sys.exit(1)
924 elif no_result and args.dont_ignore_skips:
925 # if no fails, but skips were found
926 sys.exit(2)
927 else:
928 sys.exit(0)
930 # Local Variables:
931 # tab-width:4
932 # indent-tabs-mode:nil
933 # End:
934 # vim: set expandtab tabstop=4 shiftwidth=4: