Merge pull request #4674 from bdbaddog/fix_2281_Aliases_ignore_pre_post_add_actions
[scons.git] / runtest.py
blob262576b903b9696459556adc87530a1dbd0642b4
1 #!/usr/bin/env python
3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ directory
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from
12 testing/framework.
14 This script adds SCons/ and testing/ directories to PYTHONPATH,
15 performs test discovery and processes tests according to options.
16 """
18 from __future__ import annotations
20 import argparse
21 import itertools
22 import os
23 import subprocess
24 import sys
25 import tempfile
26 import threading
27 import time
28 from abc import ABC, abstractmethod
29 from io import StringIO
30 from pathlib import Path, PurePath, PureWindowsPath
31 from queue import Queue
32 from typing import TextIO
34 cwd = os.getcwd()
35 debug: str | None = None
36 scons: str | None = None
37 catch_output: bool = False
38 suppress_output: bool = False
39 script = PurePath(sys.argv[0]).name
40 usagestr = f"{script} [OPTIONS] [TEST ...]"
41 epilogstr = """\
42 Environment Variables:
43 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
44 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
45 """
47 # this is currently expected to be global, maybe refactor later?
48 unittests: list[str]
50 parser = argparse.ArgumentParser(
51 usage=usagestr,
52 epilog=epilogstr,
53 allow_abbrev=False,
54 formatter_class=argparse.RawDescriptionHelpFormatter,
57 # test selection options:
58 testsel = parser.add_argument_group(description='Test selection options:')
59 testsel.add_argument(metavar='TEST', nargs='*', dest='testlist',
60 help="Select TEST(s) (tests and/or directories) to run")
61 testlisting = testsel.add_mutually_exclusive_group()
62 testlisting.add_argument('-f', '--file', metavar='FILE', dest='testlistfile',
63 help="Select only tests in FILE")
64 testlisting.add_argument('-a', '--all', action='store_true',
65 help="Select all tests")
66 testlisting.add_argument('--retry', action='store_true',
67 help="Rerun the last failed tests in 'failed_tests.log'")
68 testsel.add_argument('--exclude-list', metavar="FILE", dest='excludelistfile',
69 help="""Exclude tests in FILE from current selection""")
70 testtype = testsel.add_mutually_exclusive_group()
71 testtype.add_argument('--e2e-only', action='store_true',
72 help="Exclude unit tests from selection")
73 testtype.add_argument('--unit-only', action='store_true',
74 help="Exclude end-to-end tests from selection")
76 # miscellaneous options
77 parser.add_argument('-b', '--baseline', metavar='BASE',
78 help="Run test scripts against baseline BASE.")
79 parser.add_argument('-d', '--debug', action='store_true',
80 help="Run test scripts under the Python debugger.")
81 parser.add_argument('-D', '--devmode', action='store_true',
82 help="Run tests in Python's development mode (Py3.7+ only).")
83 parser.add_argument('-e', '--external', action='store_true',
84 help="Run the script in external mode (for external Tools)")
86 def posint(arg: str) -> int:
87 """Special positive-int type for argparse"""
88 num = int(arg)
89 if num < 0:
90 raise argparse.ArgumentTypeError("JOBS value must not be negative")
91 return num
93 parser.add_argument('-j', '--jobs', metavar='JOBS', default=1, type=posint,
94 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
95 parser.add_argument('-l', '--list', action='store_true', dest='list_only',
96 help="List available tests and exit.")
97 parser.add_argument('-n', '--no-exec', action='store_false',
98 dest='execute_tests',
99 help="No execute, just print command lines.")
100 parser.add_argument('--nopipefiles', action='store_false',
101 dest='allow_pipe_files',
102 help="""Do not use the "file pipe" workaround for subprocess
103 for starting tests. See source code for warnings.""")
104 parser.add_argument('-P', '--python', metavar='PYTHON',
105 help="Use the specified Python interpreter.")
106 parser.add_argument('--quit-on-failure', action='store_true',
107 help="Quit on any test failure.")
108 parser.add_argument('--runner', metavar='CLASS',
109 help="Test runner class for unit tests.")
110 parser.add_argument('-X', dest='scons_exec', action='store_true',
111 help="Test script is executable, don't feed to Python.")
112 parser.add_argument('-x', '--exec', metavar="SCRIPT",
113 help="Test using SCRIPT as path to SCons.")
114 parser.add_argument('--faillog', dest='error_log', metavar="FILE",
115 default='failed_tests.log',
116 help="Log failed tests to FILE (enabled by default, "
117 "default file 'failed_tests.log')")
118 parser.add_argument('--no-faillog', dest='error_log',
119 action='store_const', const=None,
120 default='failed_tests.log',
121 help="Do not log failed tests to a file")
123 parser.add_argument('--no-ignore-skips', dest='dont_ignore_skips',
124 action='store_true',
125 default=False,
126 help="If any tests are skipped, exit status 2")
128 outctl = parser.add_argument_group(description='Output control options:')
129 outctl.add_argument('-k', '--no-progress', action='store_false',
130 dest='print_progress',
131 help="Suppress count and progress percentage messages.")
132 outctl.add_argument('--passed', action='store_true',
133 dest='print_passed_summary',
134 help="Summarize which tests passed.")
135 outctl.add_argument('-q', '--quiet', action='store_false',
136 dest='printcommand',
137 help="Don't print the test being executed.")
138 outctl.add_argument('-s', '--short-progress', action='store_true',
139 help="""Short progress, prints only the command line
140 and a progress percentage, no results.""")
141 outctl.add_argument('-t', '--time', action='store_true', dest='print_times',
142 help="Print test execution time.")
143 outctl.add_argument('--verbose', metavar='LEVEL', type=int, choices=range(1, 4),
144 help="""Set verbose level
145 (1=print executed commands,
146 2=print commands and non-zero output,
147 3=print commands and all output).""")
148 # maybe add?
149 # outctl.add_argument('--version', action='version', version=f'{script} 1.0')
151 logctl = parser.add_argument_group(description='Log control options:')
152 logctl.add_argument('-o', '--output', metavar='LOG', help="Save console output to LOG.")
153 logctl.add_argument(
154 '--xml',
155 metavar='XML',
156 help="Save results to XML in SCons XML format (use - for stdout).",
159 # process args and handle a few specific cases:
160 args: argparse.Namespace = parser.parse_args()
162 # we can't do this check with an argparse exclusive group, since those
163 # only work with optional args, and the cmdline tests (args.testlist)
164 # are not optional args,
165 if args.testlist and (args.testlistfile or args.all or args.retry):
166 sys.stderr.write(
167 parser.format_usage()
168 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
170 sys.exit(1)
172 if args.retry:
173 args.testlistfile = 'failed_tests.log'
175 if args.testlistfile:
176 # args.testlistfile changes from a string to a pathlib Path object
177 try:
178 ptest = Path(args.testlistfile)
179 args.testlistfile = ptest.resolve(strict=True)
180 except FileNotFoundError:
181 sys.stderr.write(
182 parser.format_usage()
183 + f'error: -f/--file testlist file "{args.testlistfile}" not found\n'
185 sys.exit(1)
187 if args.excludelistfile:
188 # args.excludelistfile changes from a string to a pathlib Path object
189 try:
190 pexcl = Path(args.excludelistfile)
191 args.excludelistfile = pexcl.resolve(strict=True)
192 except FileNotFoundError:
193 sys.stderr.write(
194 parser.format_usage()
195 + f'error: --exclude-list file "{args.excludelistfile}" not found\n'
197 sys.exit(1)
199 if args.jobs == 0:
200 try:
201 # on Linux, check available rather than physical CPUs
202 args.jobs = len(os.sched_getaffinity(0))
203 except AttributeError:
204 # Windows
205 args.jobs = os.cpu_count()
207 # sanity check
208 if args.jobs == 0:
209 sys.stderr.write(
210 parser.format_usage()
211 + "Unable to detect CPU count, give -j a non-zero value\n"
213 sys.exit(1)
215 if args.jobs > 1 or args.output:
216 # 1. don't let tests write stdout/stderr directly if multi-job,
217 # else outputs will interleave and be hard to read.
218 # 2. If we're going to write a logfile, we also need to catch the output.
219 catch_output = True
221 if not args.printcommand:
222 suppress_output = catch_output = True
224 if args.verbose:
225 os.environ['TESTCMD_VERBOSE'] = str(args.verbose)
227 if args.short_progress:
228 args.print_progress = True
229 suppress_output = catch_output = True
231 if args.debug:
232 # TODO: add a way to pass a specific debugger
233 debug = "pdb"
235 if args.exec:
236 scons = args.exec
238 # --- setup stdout/stderr ---
239 class Unbuffered:
240 def __init__(self, file):
241 self.file = file
243 def write(self, arg):
244 self.file.write(arg)
245 self.file.flush()
247 def __getattr__(self, attr):
248 return getattr(self.file, attr)
250 sys.stdout = Unbuffered(sys.stdout)
251 sys.stderr = Unbuffered(sys.stderr)
253 # possible alternative: switch to using print, and:
254 # print = functools.partial(print, flush)
256 if args.output:
257 class Tee:
258 def __init__(self, openfile, stream):
259 self.file = openfile
260 self.stream = stream
262 def write(self, data):
263 self.file.write(data)
264 self.stream.write(data)
266 def flush(self, data):
267 self.file.flush(data)
268 self.stream.flush(data)
270 logfile = open(args.output, 'w')
271 # this is not ideal: we monkeypatch stdout/stderr a second time
272 # (already did for Unbuffered), so here we can't easily detect what
273 # state we're in on closedown. Just hope it's okay...
274 sys.stdout = Tee(logfile, sys.stdout)
275 sys.stderr = Tee(logfile, sys.stderr)
277 # --- define helpers ----
278 if sys.platform == 'win32':
279 # thanks to Eryk Sun for this recipe
280 import ctypes
282 shlwapi = ctypes.OleDLL('shlwapi')
283 shlwapi.AssocQueryStringW.argtypes = (
284 ctypes.c_ulong, # flags
285 ctypes.c_ulong, # str
286 ctypes.c_wchar_p, # pszAssoc
287 ctypes.c_wchar_p, # pszExtra
288 ctypes.c_wchar_p, # pszOut
289 ctypes.POINTER(ctypes.c_ulong), # pcchOut
292 ASSOCF_NOTRUNCATE = 0x00000020
293 ASSOCF_INIT_IGNOREUNKNOWN = 0x00000400
294 ASSOCSTR_COMMAND = 1
295 ASSOCSTR_EXECUTABLE = 2
296 E_POINTER = ctypes.c_long(0x80004003).value
298 def get_template_command(filetype, verb=None):
299 """Return the association-related string for *filetype*"""
300 flags = ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
301 assoc_str = ASSOCSTR_COMMAND
302 cch = ctypes.c_ulong(260)
303 while True:
304 buf = (ctypes.c_wchar * cch.value)()
305 try:
306 shlwapi.AssocQueryStringW(
307 flags, assoc_str, filetype, verb, buf, ctypes.byref(cch)
309 except OSError as e:
310 if e.winerror != E_POINTER:
311 raise
312 continue
313 break
314 return buf.value
317 if not catch_output:
318 # Without any output suppressed, we let the subprocess
319 # write its stuff freely to stdout/stderr.
321 def spawn_it(command_args, env):
322 cp = subprocess.run(command_args, shell=False, env=env)
323 return cp.stdout, cp.stderr, cp.returncode
325 else:
326 # Else, we catch the output of both pipes...
327 if args.allow_pipe_files:
328 # The subprocess.Popen() suffers from a well-known
329 # problem. Data for stdout/stderr is read into a
330 # memory buffer of fixed size, 65K which is not very much.
331 # When it fills up, it simply stops letting the child process
332 # write to it. The child will then sit and patiently wait to
333 # be able to write the rest of its output. Hang!
334 # In order to work around this, we follow a suggestion
335 # by Anders Pearson in
336 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
337 # and pass temp file objects to Popen() instead of the ubiquitous
338 # subprocess.PIPE.
340 def spawn_it(command_args, env):
341 # Create temporary files
342 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
343 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
344 # Start subprocess...
345 cp = subprocess.run(
346 command_args,
347 stdout=tmp_stdout,
348 stderr=tmp_stderr,
349 shell=False,
350 env=env,
353 try:
354 # Rewind to start of files
355 tmp_stdout.seek(0)
356 tmp_stderr.seek(0)
357 # Read output
358 spawned_stdout = tmp_stdout.read()
359 spawned_stderr = tmp_stderr.read()
360 finally:
361 # Remove temp files by closing them
362 tmp_stdout.close()
363 tmp_stderr.close()
365 # Return values
366 return spawned_stderr, spawned_stdout, cp.returncode
368 else:
369 # We get here only if the user gave the '--nopipefiles'
370 # option, meaning the "temp file" approach for
371 # subprocess.communicate() above shouldn't be used.
372 # He hopefully knows what he's doing, but again we have a
373 # potential deadlock situation in the following code:
374 # If the subprocess writes a lot of data to its stderr,
375 # the pipe will fill up (nobody's reading it yet) and the
376 # subprocess will wait for someone to read it.
377 # But the parent process is trying to read from stdin
378 # (but the subprocess isn't writing anything there).
379 # Hence a deadlock.
380 # Be dragons here! Better don't use this!
382 def spawn_it(command_args, env):
383 cp = subprocess.run(
384 command_args,
385 stdout=subprocess.PIPE,
386 stderr=subprocess.PIPE,
387 shell=False,
388 env=env,
390 return cp.stdout, cp.stderr, cp.returncode
393 class RuntestBase(ABC):
394 """ Base class for tests """
395 _ids = itertools.count(1) # to geenerate test # automatically
397 def __init__(self, path, spe=None):
398 self.path = str(path)
399 self.testno = next(self._ids)
400 self.stdout = self.stderr = self.status = None
401 self.abspath = path.absolute()
402 self.command_args = []
403 self.command_str = ""
404 self.test_time = self.total_time = 0
405 if spe:
406 for d in spe:
407 f = os.path.join(d, path)
408 if os.path.isfile(f):
409 self.abspath = f
410 break
412 @abstractmethod
413 def execute(self, env):
414 pass
417 class SystemExecutor(RuntestBase):
418 """ Test class for tests executed with spawn_it() """
419 def execute(self, env):
420 self.stderr, self.stdout, s = spawn_it(self.command_args, env)
421 self.status = s
422 if s < 0 or s > 2 and s != 5:
423 sys.stdout.write("Unexpected exit status %d\n" % s)
426 class PopenExecutor(RuntestBase):
427 """ Test class for tests executed with Popen
429 A bit of a misnomer as the Popen call is now wrapped
430 by calling subprocess.run (behind the covers uses Popen.
431 Very similar to SystemExecutor, but doesn't allow for not catching
432 the output).
434 # For an explanation of the following 'if ... else'
435 # and the 'allow_pipe_files' option, please check out the
436 # definition of spawn_it() above.
437 if args.allow_pipe_files:
439 def execute(self, env) -> None:
440 # Create temporary files
441 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
442 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
443 # Start subprocess...
444 cp = subprocess.run(
445 self.command_args,
446 stdout=tmp_stdout,
447 stderr=tmp_stderr,
448 shell=False,
449 env=env,
450 check=False,
452 self.status = cp.returncode
454 try:
455 # Rewind to start of files
456 tmp_stdout.seek(0)
457 tmp_stderr.seek(0)
458 # Read output
459 self.stdout = tmp_stdout.read()
460 self.stderr = tmp_stderr.read()
461 finally:
462 # Remove temp files by closing them
463 tmp_stdout.close()
464 tmp_stderr.close()
465 else:
467 def execute(self, env) -> None:
468 cp = subprocess.run(
469 self.command_args,
470 stdout=subprocess.PIPE,
471 stderr=subprocess.PIPE,
472 shell=False,
473 env=env,
474 check=False,
476 self.status, self.stdout, self.stderr = cp.returncode, cp.stdout, cp.stderr
478 class XML(PopenExecutor):
479 """ Test class for tests that will output in scons xml """
480 @staticmethod
481 def header(f):
482 f.write(' <results>\n')
484 def write(self, f):
485 f.write(' <test>\n')
486 f.write(' <file_name>%s</file_name>\n' % self.path)
487 f.write(' <command_line>%s</command_line>\n' % self.command_str)
488 f.write(' <exit_status>%s</exit_status>\n' % self.status)
489 f.write(' <stdout>%s</stdout>\n' % self.stdout)
490 f.write(' <stderr>%s</stderr>\n' % self.stderr)
491 f.write(' <time>%.1f</time>\n' % self.test_time)
492 f.write(' </test>\n')
494 def footer(self, f):
495 f.write(' <time>%.1f</time>\n' % self.total_time)
496 f.write(' </results>\n')
498 if args.xml:
499 Test = XML
500 else:
501 Test = SystemExecutor
503 # --- start processing ---
505 if not args.baseline or args.baseline == '.':
506 baseline = cwd
507 elif args.baseline == '-':
508 sys.stderr.write(
509 "'baseline' logic used to checkout from svn. It has been removed. "
510 "If you used this, please let us know on devel mailing list, "
511 "IRC, or discord server\n"
513 sys.exit(-1)
514 else:
515 baseline = args.baseline
516 scons_runtest_dir = baseline
518 if not args.external:
519 scons_script_dir = os.path.join(baseline, 'scripts')
520 scons_tools_dir = os.path.join(baseline, 'bin')
521 scons_lib_dir = baseline
522 else:
523 scons_script_dir = ''
524 scons_tools_dir = ''
525 scons_lib_dir = ''
527 testenv = {
528 'SCONS_RUNTEST_DIR': scons_runtest_dir,
529 'SCONS_TOOLS_DIR': scons_tools_dir,
530 'SCONS_SCRIPT_DIR': scons_script_dir,
531 'SCONS_CWD': cwd,
534 if scons:
535 # Let the version of SCons that the -x option pointed to find
536 # its own modules.
537 testenv['SCONS'] = scons
538 elif scons_lib_dir:
539 # Because SCons is really aggressive about finding its modules,
540 # it sometimes finds SCons modules elsewhere on the system.
541 # This forces SCons to use the modules that are being tested.
542 testenv['SCONS_LIB_DIR'] = scons_lib_dir
544 if args.scons_exec:
545 testenv['SCONS_EXEC'] = '1'
547 if args.external:
548 testenv['SCONS_EXTERNAL_TEST'] = '1'
550 # Insert scons path and path for testing framework to PYTHONPATH
551 scriptpath = os.path.dirname(os.path.realpath(__file__))
552 frameworkpath = os.path.join(scriptpath, 'testing', 'framework')
553 testenv['PYTHONPATH'] = os.pathsep.join((scons_lib_dir, frameworkpath))
554 pythonpath = os.environ.get('PYTHONPATH')
555 if pythonpath:
556 testenv['PYTHONPATH'] = testenv['PYTHONPATH'] + os.pathsep + pythonpath
558 if sys.platform == 'win32':
559 # Windows doesn't support "shebang" lines directly (the Python launcher
560 # and Windows Store version do, but you have to get them launched first)
561 # so to directly launch a script we depend on an assoc for .py to work.
562 # Some systems may have none, and in some cases IDE programs take over
563 # the assoc. Detect this so the small number of tests affected can skip.
564 try:
565 python_assoc = get_template_command('.py')
566 except OSError:
567 python_assoc = None
568 if not python_assoc or "py" not in python_assoc:
569 testenv['SCONS_NO_DIRECT_SCRIPT'] = '1'
571 os.environ.update(testenv)
573 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
574 if '_JAVA_OPTIONS' in os.environ:
575 del os.environ['_JAVA_OPTIONS']
578 # ---[ test discovery ]------------------------------------
579 # This section figures out which tests to run.
581 # The initial testlist is made by reading from the testlistfile,
582 # if supplied, or by looking at the test arguments, if supplied,
583 # or by looking for all test files if the "all" argument is supplied.
584 # One of the three is required.
586 # Each test path, whichever of the three sources it comes from,
587 # specifies either a test file or a directory to search for
588 # SCons tests. SCons code layout assumes that any file under the 'SCons'
589 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
590 # script (*.py) under the 'test' subdirectory is an end-to-end test.
591 # We need to track these because they are invoked differently.
592 # find_unit_tests and find_e2e_tests are used for this searching.
594 # Note that there are some tests under 'SCons' that *begin* with
595 # 'test_', but they're packaging and installation tests, not
596 # functional tests, so we don't execute them by default. (They can
597 # still be executed by hand, though).
599 # Test exclusions, if specified, are then applied.
602 def scanlist(testfile):
603 """ Process a testlist file """
604 data = StringIO(testfile.read_text())
605 tests = [t.strip() for t in data.readlines() if not t.startswith('#')]
606 # in order to allow scanned lists to work whether they use forward or
607 # backward slashes, first create the object as a PureWindowsPath which
608 # accepts either, then use that to make a Path object to use for
609 # comparisons like "file in scanned_list".
610 if sys.platform == 'win32':
611 return [Path(t) for t in tests if t]
612 else:
613 return [Path(PureWindowsPath(t).as_posix()) for t in tests if t]
616 def find_unit_tests(directory):
617 """ Look for unit tests """
618 result = []
619 for dirpath, _, filenames in os.walk(directory):
620 # Skip folders containing a sconstest.skip file
621 if 'sconstest.skip' in filenames:
622 continue
623 for fname in filenames:
624 if fname.endswith("Tests.py"):
625 result.append(Path(dirpath, fname))
627 return sorted(result)
630 def find_e2e_tests(directory):
631 """ Look for end-to-end tests """
632 result = []
633 for dirpath, _, filenames in os.walk(directory):
634 # Skip folders containing a sconstest.skip file
635 if 'sconstest.skip' in filenames:
636 continue
638 # Slurp in any tests in exclude lists
639 excludes = []
640 if ".exclude_tests" in filenames:
641 excludefile = Path(dirpath, ".exclude_tests").resolve()
642 excludes = scanlist(excludefile)
644 for fname in filenames:
645 if fname.endswith(".py") and Path(fname) not in excludes:
646 result.append(Path(dirpath, fname))
648 return sorted(result)
651 # initial selection:
652 # if we have a testlist file read that, else hunt for tests.
653 unittests = []
654 endtests = []
655 if args.testlistfile:
656 tests = scanlist(args.testlistfile)
657 else:
658 testpaths = []
659 if args.all: # -a flag
660 testpaths = [Path('SCons'), Path('test')]
661 elif args.testlist: # paths given on cmdline
662 if sys.platform == 'win32':
663 testpaths = [Path(t) for t in args.testlist]
664 else:
665 testpaths = [Path(PureWindowsPath(t).as_posix()) for t in args.testlist]
667 for path in testpaths:
668 # Clean up path removing leading ./ or .\
669 name = str(path)
670 if name.startswith('.') and name[1] in (os.sep, os.altsep):
671 path = path.with_name(tn[2:])
673 if path.exists():
674 if path.is_dir():
675 if path.parts[0] == "SCons" or path.parts[0] == "testing":
676 unittests.extend(find_unit_tests(path))
677 elif path.parts[0] == 'test':
678 endtests.extend(find_e2e_tests(path))
679 # else: TODO: what if user pointed to a dir outside scons tree?
680 else:
681 if path.match("*Tests.py"):
682 unittests.append(path)
683 elif path.match("*.py"):
684 endtests.append(path)
686 tests = sorted(unittests + endtests)
688 # Remove exclusions:
689 if args.e2e_only:
690 tests = [t for t in tests if not t.match("*Tests.py")]
691 if args.unit_only:
692 tests = [t for t in tests if t.match("*Tests.py")]
693 if args.excludelistfile:
694 excludetests = scanlist(args.excludelistfile)
695 tests = [t for t in tests if t not in excludetests]
697 # did we end up with any tests?
698 if not tests:
699 sys.stderr.write(parser.format_usage() + """
700 error: no tests matching the specification were found.
701 See "Test selection options" in the help for details on
702 how to specify and/or exclude tests.
703 """)
704 sys.exit(1)
706 # ---[ test processing ]-----------------------------------
707 tests = [Test(t) for t in tests]
709 if args.list_only:
710 for t in tests:
711 print(t.path)
712 sys.exit(0)
714 if not args.python:
715 if os.name == 'java':
716 args.python = os.path.join(sys.prefix, 'jython')
717 else:
718 args.python = sys.executable
719 os.environ["python_executable"] = args.python
721 if args.print_times:
723 def print_time(fmt, tm):
724 print(fmt % tm)
726 else:
728 def print_time(fmt, tm):
729 pass
731 time_func = time.perf_counter
732 total_start_time = time_func()
733 total_num_tests = len(tests)
736 def log_result(t, io_lock=None):
737 """ log the result of a test.
739 "log" in this case means writing to stdout. Since we might be
740 called from from any of several different threads (multi-job run),
741 we need to lock access to the log to avoid interleaving. The same
742 would apply if output was a file.
744 Args:
745 t (Test): (completed) testcase instance
746 io_lock (threading.lock): (optional) lock to use
749 # there is no lock in single-job run, which includes
750 # running test/runtest tests from multi-job run, so check.
751 if io_lock:
752 io_lock.acquire()
753 try:
754 if suppress_output or catch_output:
755 sys.stdout.write(t.headline)
756 if not suppress_output:
757 if t.stdout:
758 print(t.stdout)
759 if t.stderr:
760 print(t.stderr)
761 print_time("Test execution time: %.1f seconds", t.test_time)
762 finally:
763 if io_lock:
764 io_lock.release()
766 if args.quit_on_failure and t.status == 1:
767 print("Exiting due to error")
768 print(t.status)
769 sys.exit(1)
772 def run_test(t, io_lock=None, run_async=True):
773 """ Run a testcase.
775 Builds the command line to give to execute().
776 Also the best place to record some information that will be
777 used in output, which in some conditions is printed here.
779 Args:
780 t (Test): testcase instance
781 io_lock (threading.Lock): (optional) lock to use
782 run_async (bool): whether to run asynchronously
785 t.headline = ""
786 command_args = []
787 if debug:
788 command_args.extend(['-m', debug])
789 if args.devmode:
790 command_args.append('-X dev')
791 command_args.append(t.path)
792 if args.runner and t.path in unittests:
793 # For example --runner TestUnit.TAPTestRunner
794 command_args.append('--runner ' + args.runner)
795 t.command_args = [args.python] + command_args
796 t.command_str = " ".join(t.command_args)
797 if args.printcommand:
798 if args.print_progress:
799 t.headline += "%d/%d (%.2f%s) %s\n" % (
800 t.testno, total_num_tests,
801 float(t.testno) * 100.0 / float(total_num_tests),
802 "%",
803 t.command_str,
805 else:
806 t.headline += t.command_str + "\n"
807 if not suppress_output and not catch_output:
808 # defer printing the headline until test is done
809 sys.stdout.write(t.headline)
810 head, _ = os.path.split(t.abspath)
811 fixture_dirs = []
812 if head:
813 fixture_dirs.append(head)
814 fixture_dirs.append(os.path.join(scriptpath, 'test', 'fixture'))
816 # Set the list of fixture dirs directly in the environment. Just putting
817 # it in os.environ and spawning the process is racy. Make it reliable by
818 # overriding the environment passed to execute().
819 env = dict(os.environ)
820 env['FIXTURE_DIRS'] = os.pathsep.join(fixture_dirs)
822 test_start_time = time_func()
823 if args.execute_tests:
824 t.execute(env)
826 t.test_time = time_func() - test_start_time
827 log_result(t, io_lock=io_lock)
830 class RunTest(threading.Thread):
831 """ Test Runner class.
833 One instance will be created for each job thread in multi-job mode
835 def __init__(self, queue=None, io_lock=None, group=None, target=None, name=None):
836 super().__init__(group=group, target=target, name=name)
837 self.queue = queue
838 self.io_lock = io_lock
840 def run(self):
841 for t in iter(self.queue.get, None):
842 run_test(t, io_lock=self.io_lock, run_async=True)
843 self.queue.task_done()
845 if args.jobs > 1:
846 print("Running tests using %d jobs" % args.jobs)
847 testq = Queue()
848 for t in tests:
849 testq.put(t)
850 testlock = threading.Lock()
851 # Start worker threads to consume the queue
852 threads = [RunTest(queue=testq, io_lock=testlock) for _ in range(args.jobs)]
853 for t in threads:
854 t.daemon = True
855 t.start()
856 # wait on the queue rather than the individual threads
857 testq.join()
858 else:
859 for t in tests:
860 run_test(t, io_lock=None, run_async=False)
862 # --- all tests are complete by the time we get here ---
863 if tests:
864 tests[0].total_time = time_func() - total_start_time
865 print_time("Total execution time for all tests: %.1f seconds", tests[0].total_time)
867 passed = [t for t in tests if t.status == 0]
868 fail = [t for t in tests if t.status == 1]
869 no_result = [t for t in tests if t.status == 2]
871 # print summaries, but only if multiple tests were run
872 if len(tests) != 1 and args.execute_tests:
873 if passed and args.print_passed_summary:
874 if len(passed) == 1:
875 sys.stdout.write("\nPassed the following test:\n")
876 else:
877 sys.stdout.write("\nPassed the following %d tests:\n" % len(passed))
878 paths = [x.path for x in passed]
879 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
880 if fail:
881 if len(fail) == 1:
882 sys.stdout.write("\nFailed the following test:\n")
883 else:
884 sys.stdout.write("\nFailed the following %d tests:\n" % len(fail))
885 paths = [x.path for x in fail]
886 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
887 if no_result:
888 if len(no_result) == 1:
889 sys.stdout.write("\nNO RESULT from the following test:\n")
890 else:
891 sys.stdout.write("\nNO RESULT from the following %d tests:\n" % len(no_result))
892 paths = [x.path for x in no_result]
893 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
895 # save the fails to a file
896 if args.error_log:
897 with open(args.error_log, "w") as f:
898 if fail:
899 paths = [x.path for x in fail]
900 for test in paths:
901 print(test, file=f)
902 # if there are no fails, file will be cleared
904 if args.xml:
905 if args.output == '-':
906 f = sys.stdout
907 else:
908 f = open(args.xml, 'w')
909 tests[0].header(f)
910 #f.write("test_result = [\n")
911 for t in tests:
912 t.write(f)
913 tests[0].footer(f)
914 #f.write("];\n")
915 if args.output != '-':
916 f.close()
918 if args.output:
919 if isinstance(sys.stdout, Tee):
920 sys.stdout.file.close()
921 if isinstance(sys.stderr, Tee):
922 sys.stderr.file.close()
924 if fail:
925 sys.exit(1)
926 elif no_result and args.dont_ignore_skips:
927 # if no fails, but skips were found
928 sys.exit(2)
929 else:
930 sys.exit(0)
932 # Local Variables:
933 # tab-width:4
934 # indent-tabs-mode:nil
935 # End:
936 # vim: set expandtab tabstop=4 shiftwidth=4: