Updates to PR 4374 from mwichmann to correct config file hash changes
[scons.git] / runtest.py
blob07d9f332dffe63d57d9a659348677ba0b86692b0
1 #!/usr/bin/env python
3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ dir
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from testing/
13 This script adds SCons/ and testing/ directories to PYTHONPATH,
14 performs test discovery and processes tests according to options.
15 """
17 import argparse
18 import itertools
19 import os
20 import subprocess
21 import sys
22 import tempfile
23 import threading
24 import time
25 from abc import ABC, abstractmethod
26 from io import StringIO
27 from pathlib import Path, PurePath, PureWindowsPath
28 from queue import Queue
30 cwd = os.getcwd()
32 debug = None
33 scons = None
34 catch_output = False
35 suppress_output = False
37 script = PurePath(sys.argv[0]).name
38 usagestr = """\
39 %(script)s [OPTIONS] [TEST ...]
40 """ % locals()
42 epilogstr = """\
43 Environment Variables:
44 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
45 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
46 """
48 parser = argparse.ArgumentParser(
49 usage=usagestr, epilog=epilogstr, allow_abbrev=False,
50 formatter_class=argparse.RawDescriptionHelpFormatter
53 # test selection options:
54 testsel = parser.add_argument_group(description='Test selection options:')
55 testsel.add_argument(metavar='TEST', nargs='*', dest='testlist',
56 help="Select TEST(s) (tests and/or directories) to run")
57 testlisting = testsel.add_mutually_exclusive_group()
58 testlisting.add_argument('-f', '--file', metavar='FILE', dest='testlistfile',
59 help="Select only tests in FILE")
60 testlisting.add_argument('-a', '--all', action='store_true',
61 help="Select all tests")
62 testlisting.add_argument('--retry', action='store_true',
63 help="Rerun the last failed tests in 'failed_tests.log'")
64 testsel.add_argument('--exclude-list', metavar="FILE", dest='excludelistfile',
65 help="""Exclude tests in FILE from current selection""")
66 testtype = testsel.add_mutually_exclusive_group()
67 testtype.add_argument('--e2e-only', action='store_true',
68 help="Exclude unit tests from selection")
69 testtype.add_argument('--unit-only', action='store_true',
70 help="Exclude end-to-end tests from selection")
72 # miscellaneous options
73 parser.add_argument('-b', '--baseline', metavar='BASE',
74 help="Run test scripts against baseline BASE.")
75 parser.add_argument('-d', '--debug', action='store_true',
76 help="Run test scripts under the Python debugger.")
77 parser.add_argument('-D', '--devmode', action='store_true',
78 help="Run tests in Python's development mode (Py3.7+ only).")
79 parser.add_argument('-e', '--external', action='store_true',
80 help="Run the script in external mode (for external Tools)")
81 parser.add_argument('-j', '--jobs', metavar='JOBS', default=1, type=int,
82 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
83 parser.add_argument('-l', '--list', action='store_true', dest='list_only',
84 help="List available tests and exit.")
85 parser.add_argument('-n', '--no-exec', action='store_false',
86 dest='execute_tests',
87 help="No execute, just print command lines.")
88 parser.add_argument('--nopipefiles', action='store_false',
89 dest='allow_pipe_files',
90 help="""Do not use the "file pipe" workaround for subprocess
91 for starting tests. See source code for warnings.""")
92 parser.add_argument('-P', '--python', metavar='PYTHON',
93 help="Use the specified Python interpreter.")
94 parser.add_argument('--quit-on-failure', action='store_true',
95 help="Quit on any test failure.")
96 parser.add_argument('--runner', metavar='CLASS',
97 help="Test runner class for unit tests.")
98 parser.add_argument('-X', dest='scons_exec', action='store_true',
99 help="Test script is executable, don't feed to Python.")
100 parser.add_argument('-x', '--exec', metavar="SCRIPT",
101 help="Test using SCRIPT as path to SCons.")
102 parser.add_argument('--faillog', dest='error_log', metavar="FILE",
103 default='failed_tests.log',
104 help="Log failed tests to FILE (enabled by default, "
105 "default file 'failed_tests.log')")
106 parser.add_argument('--no-faillog', dest='error_log',
107 action='store_const', const=None,
108 default='failed_tests.log',
109 help="Do not log failed tests to a file")
111 parser.add_argument('--no-ignore-skips', dest='dont_ignore_skips',
112 action='store_true',
113 default=False,
114 help="If any tests are skipped, exit status 2")
116 outctl = parser.add_argument_group(description='Output control options:')
117 outctl.add_argument('-k', '--no-progress', action='store_false',
118 dest='print_progress',
119 help="Suppress count and progress percentage messages.")
120 outctl.add_argument('--passed', action='store_true',
121 dest='print_passed_summary',
122 help="Summarize which tests passed.")
123 outctl.add_argument('-q', '--quiet', action='store_false',
124 dest='printcommand',
125 help="Don't print the test being executed.")
126 outctl.add_argument('-s', '--short-progress', action='store_true',
127 help="""Short progress, prints only the command line
128 and a progress percentage.""")
129 outctl.add_argument('-t', '--time', action='store_true', dest='print_times',
130 help="Print test execution time.")
131 outctl.add_argument('--verbose', metavar='LEVEL', type=int, choices=range(1, 4),
132 help="""Set verbose level
133 (1=print executed commands,
134 2=print commands and non-zero output,
135 3=print commands and all output).""")
136 # maybe add?
137 # outctl.add_argument('--version', action='version', version='%s 1.0' % script)
139 logctl = parser.add_argument_group(description='Log control options:')
140 logctl.add_argument('-o', '--output', metavar='LOG', help="Save console output to LOG.")
141 logctl.add_argument('--xml', metavar='XML', help="Save results to XML in SCons XML format.")
143 # process args and handle a few specific cases:
144 args = parser.parse_args()
146 # we can't do this check with an argparse exclusive group, since those
147 # only work with optional args, and the cmdline tests (args.testlist)
148 # are not optional args,
149 if args.testlist and (args.testlistfile or args.all or args.retry):
150 sys.stderr.write(
151 parser.format_usage()
152 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
154 sys.exit(1)
156 if args.retry:
157 args.testlistfile = 'failed_tests.log'
159 if args.testlistfile:
160 # args.testlistfile changes from a string to a pathlib Path object
161 try:
162 p = Path(args.testlistfile)
163 # TODO simplify when Py3.5 dropped
164 if sys.version_info.major == 3 and sys.version_info.minor < 6:
165 args.testlistfile = p.resolve()
166 else:
167 args.testlistfile = p.resolve(strict=True)
168 except FileNotFoundError:
169 sys.stderr.write(
170 parser.format_usage()
171 + 'error: -f/--file testlist file "%s" not found\n' % p
173 sys.exit(1)
175 if args.excludelistfile:
176 # args.excludelistfile changes from a string to a pathlib Path object
177 try:
178 p = Path(args.excludelistfile)
179 # TODO simplify when Py3.5 dropped
180 if sys.version_info.major == 3 and sys.version_info.minor < 6:
181 args.excludelistfile = p.resolve()
182 else:
183 args.excludelistfile = p.resolve(strict=True)
184 except FileNotFoundError:
185 sys.stderr.write(
186 parser.format_usage()
187 + 'error: --exclude-list file "%s" not found\n' % p
189 sys.exit(1)
191 if args.jobs == 0:
192 try:
193 # on Linux, check available rather then physical CPUs
194 args.jobs = len(os.sched_getaffinity(0))
195 except AttributeError:
196 # Windows
197 args.jobs = os.cpu_count()
199 # sanity check
200 if args.jobs == 0:
201 sys.stderr.write(
202 parser.format_usage()
203 + "Unable to detect CPU count, give -j a non-zero value\n"
205 sys.exit(1)
207 if args.jobs > 1 or args.output:
208 # 1. don't let tests write stdout/stderr directly if multi-job,
209 # else outputs will interleave and be hard to read.
210 # 2. If we're going to write a logfile, we also need to catch the output.
211 catch_output = True
213 if not args.printcommand:
214 suppress_output = catch_output = True
216 if args.verbose:
217 os.environ['TESTCMD_VERBOSE'] = str(args.verbose)
219 if args.short_progress:
220 args.print_progress = True
221 suppress_output = catch_output = True
223 if args.debug:
224 # TODO: add a way to pass a specific debugger
225 debug = "pdb"
227 if args.exec:
228 scons = args.exec
230 # --- setup stdout/stderr ---
231 class Unbuffered:
232 def __init__(self, file):
233 self.file = file
235 def write(self, arg):
236 self.file.write(arg)
237 self.file.flush()
239 def __getattr__(self, attr):
240 return getattr(self.file, attr)
242 sys.stdout = Unbuffered(sys.stdout)
243 sys.stderr = Unbuffered(sys.stderr)
245 # possible alternative: switch to using print, and:
246 # print = functools.partial(print, flush)
248 if args.output:
249 class Tee:
250 def __init__(self, openfile, stream):
251 self.file = openfile
252 self.stream = stream
254 def write(self, data):
255 self.file.write(data)
256 self.stream.write(data)
258 def flush(self, data):
259 self.file.flush(data)
260 self.stream.flush(data)
262 logfile = open(args.output, 'w')
263 # this is not ideal: we monkeypatch stdout/stderr a second time
264 # (already did for Unbuffered), so here we can't easily detect what
265 # state we're in on closedown. Just hope it's okay...
266 sys.stdout = Tee(logfile, sys.stdout)
267 sys.stderr = Tee(logfile, sys.stderr)
269 # --- define helpers ----
270 if sys.platform == 'win32':
271 # thanks to Eryk Sun for this recipe
272 import ctypes
274 shlwapi = ctypes.OleDLL('shlwapi')
275 shlwapi.AssocQueryStringW.argtypes = (
276 ctypes.c_ulong, # flags
277 ctypes.c_ulong, # str
278 ctypes.c_wchar_p, # pszAssoc
279 ctypes.c_wchar_p, # pszExtra
280 ctypes.c_wchar_p, # pszOut
281 ctypes.POINTER(ctypes.c_ulong), # pcchOut
284 ASSOCF_NOTRUNCATE = 0x00000020
285 ASSOCF_INIT_IGNOREUNKNOWN = 0x00000400
286 ASSOCSTR_COMMAND = 1
287 ASSOCSTR_EXECUTABLE = 2
288 E_POINTER = ctypes.c_long(0x80004003).value
290 def get_template_command(filetype, verb=None):
291 flags = ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
292 assoc_str = ASSOCSTR_COMMAND
293 cch = ctypes.c_ulong(260)
294 while True:
295 buf = (ctypes.c_wchar * cch.value)()
296 try:
297 shlwapi.AssocQueryStringW(
298 flags, assoc_str, filetype, verb, buf, ctypes.byref(cch)
300 except OSError as e:
301 if e.winerror != E_POINTER:
302 raise
303 continue
304 break
305 return buf.value
308 if not catch_output:
309 # Without any output suppressed, we let the subprocess
310 # write its stuff freely to stdout/stderr.
312 def spawn_it(command_args, env):
313 cp = subprocess.run(command_args, shell=False, env=env)
314 return cp.stdout, cp.stderr, cp.returncode
316 else:
317 # Else, we catch the output of both pipes...
318 if args.allow_pipe_files:
319 # The subprocess.Popen() suffers from a well-known
320 # problem. Data for stdout/stderr is read into a
321 # memory buffer of fixed size, 65K which is not very much.
322 # When it fills up, it simply stops letting the child process
323 # write to it. The child will then sit and patiently wait to
324 # be able to write the rest of its output. Hang!
325 # In order to work around this, we follow a suggestion
326 # by Anders Pearson in
327 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
328 # and pass temp file objects to Popen() instead of the ubiquitous
329 # subprocess.PIPE.
331 def spawn_it(command_args, env):
332 # Create temporary files
333 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
334 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
335 # Start subprocess...
336 cp = subprocess.run(
337 command_args,
338 stdout=tmp_stdout,
339 stderr=tmp_stderr,
340 shell=False,
341 env=env,
344 try:
345 # Rewind to start of files
346 tmp_stdout.seek(0)
347 tmp_stderr.seek(0)
348 # Read output
349 spawned_stdout = tmp_stdout.read()
350 spawned_stderr = tmp_stderr.read()
351 finally:
352 # Remove temp files by closing them
353 tmp_stdout.close()
354 tmp_stderr.close()
356 # Return values
357 return spawned_stderr, spawned_stdout, cp.returncode
359 else:
360 # We get here only if the user gave the '--nopipefiles'
361 # option, meaning the "temp file" approach for
362 # subprocess.communicate() above shouldn't be used.
363 # He hopefully knows what he's doing, but again we have a
364 # potential deadlock situation in the following code:
365 # If the subprocess writes a lot of data to its stderr,
366 # the pipe will fill up (nobody's reading it yet) and the
367 # subprocess will wait for someone to read it.
368 # But the parent process is trying to read from stdin
369 # (but the subprocess isn't writing anything there).
370 # Hence a deadlock.
371 # Be dragons here! Better don't use this!
373 def spawn_it(command_args, env):
374 cp = subprocess.run(
375 command_args,
376 stdout=subprocess.PIPE,
377 stderr=subprocess.PIPE,
378 shell=False,
379 env=env,
381 return cp.stdout, cp.stderr, cp.returncode
384 class RuntestBase(ABC):
385 """ Base class for tests """
386 _ids = itertools.count(1) # to geenerate test # automatically
388 def __init__(self, path, spe=None):
389 self.path = str(path)
390 self.testno = next(self._ids)
391 self.stdout = self.stderr = self.status = None
392 self.abspath = path.absolute()
393 self.command_args = []
394 self.command_str = ""
395 self.test_time = self.total_time = 0
396 if spe:
397 for d in spe:
398 f = os.path.join(d, path)
399 if os.path.isfile(f):
400 self.abspath = f
401 break
403 @abstractmethod
404 def execute(self, env):
405 pass
408 class SystemExecutor(RuntestBase):
409 """ Test class for tests executed with spawn_it() """
410 def execute(self, env):
411 self.stderr, self.stdout, s = spawn_it(self.command_args, env)
412 self.status = s
413 if s < 0 or s > 2:
414 sys.stdout.write("Unexpected exit status %d\n" % s)
417 class PopenExecutor(RuntestBase):
418 """ Test class for tests executed with Popen
420 A bit of a misnomer as the Popen call is now wrapped
421 by calling subprocess.run (behind the covers uses Popen.
422 Very similar to SystemExecutor, but doesn't allow for not catching
423 the output).
425 # For an explanation of the following 'if ... else'
426 # and the 'allow_pipe_files' option, please check out the
427 # definition of spawn_it() above.
428 if args.allow_pipe_files:
430 def execute(self, env):
431 # Create temporary files
432 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
433 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
434 # Start subprocess...
435 cp = subprocess.run(
436 self.command_args,
437 stdout=tmp_stdout,
438 stderr=tmp_stderr,
439 shell=False,
440 env=env,
442 self.status = cp.returncode
444 try:
445 # Rewind to start of files
446 tmp_stdout.seek(0)
447 tmp_stderr.seek(0)
448 # Read output
449 self.stdout = tmp_stdout.read()
450 self.stderr = tmp_stderr.read()
451 finally:
452 # Remove temp files by closing them
453 tmp_stdout.close()
454 tmp_stderr.close()
455 else:
457 def execute(self, env):
458 cp = subprocess.run(
459 self.command_args,
460 stdout=subprocess.PIPE,
461 stderr=subprocess.PIPE,
462 shell=False,
463 env=env,
465 self.status, self.stdout, self.stderr = cp.returncode, cp.stdout, cp.stderr
467 class XML(PopenExecutor):
468 """ Test class for tests that will output in scons xml """
469 @staticmethod
470 def header(f):
471 f.write(' <results>\n')
473 def write(self, f):
474 f.write(' <test>\n')
475 f.write(' <file_name>%s</file_name>\n' % self.path)
476 f.write(' <command_line>%s</command_line>\n' % self.command_str)
477 f.write(' <exit_status>%s</exit_status>\n' % self.status)
478 f.write(' <stdout>%s</stdout>\n' % self.stdout)
479 f.write(' <stderr>%s</stderr>\n' % self.stderr)
480 f.write(' <time>%.1f</time>\n' % self.test_time)
481 f.write(' </test>\n')
483 def footer(self, f):
484 f.write(' <time>%.1f</time>\n' % self.total_time)
485 f.write(' </results>\n')
487 if args.xml:
488 Test = XML
489 else:
490 Test = SystemExecutor
492 # --- start processing ---
494 if not args.baseline or args.baseline == '.':
495 baseline = cwd
496 elif args.baseline == '-':
497 print("This logic used to checkout from svn. It's been removed. If you used this, please let us know on devel mailing list, IRC, or discord server")
498 sys.exit(-1)
499 else:
500 baseline = args.baseline
501 scons_runtest_dir = baseline
503 if not args.external:
504 scons_script_dir = os.path.join(baseline, 'scripts')
505 scons_tools_dir = os.path.join(baseline, 'bin')
506 scons_lib_dir = baseline
507 else:
508 scons_script_dir = ''
509 scons_tools_dir = ''
510 scons_lib_dir = ''
512 testenv = {
513 'SCONS_RUNTEST_DIR': scons_runtest_dir,
514 'SCONS_TOOLS_DIR': scons_tools_dir,
515 'SCONS_SCRIPT_DIR': scons_script_dir,
516 'SCONS_CWD': cwd,
519 if scons:
520 # Let the version of SCons that the -x option pointed to find
521 # its own modules.
522 testenv['SCONS'] = scons
523 elif scons_lib_dir:
524 # Because SCons is really aggressive about finding its modules,
525 # it sometimes finds SCons modules elsewhere on the system.
526 # This forces SCons to use the modules that are being tested.
527 testenv['SCONS_LIB_DIR'] = scons_lib_dir
529 if args.scons_exec:
530 testenv['SCONS_EXEC'] = '1'
532 if args.external:
533 testenv['SCONS_EXTERNAL_TEST'] = '1'
535 # Insert scons path and path for testing framework to PYTHONPATH
536 scriptpath = os.path.dirname(os.path.realpath(__file__))
537 frameworkpath = os.path.join(scriptpath, 'testing', 'framework')
538 testenv['PYTHONPATH'] = os.pathsep.join((scons_lib_dir, frameworkpath))
539 pythonpath = os.environ.get('PYTHONPATH')
540 if pythonpath:
541 testenv['PYTHONPATH'] = testenv['PYTHONPATH'] + os.pathsep + pythonpath
543 if sys.platform == 'win32':
544 # Windows doesn't support "shebang" lines directly (the Python launcher
545 # and Windows Store version do, but you have to get them launched first)
546 # so to directly launch a script we depend on an assoc for .py to work.
547 # Some systems may have none, and in some cases IDE programs take over
548 # the assoc. Detect this so the small number of tests affected can skip.
549 try:
550 python_assoc = get_template_command('.py')
551 except OSError:
552 python_assoc = None
553 if not python_assoc or "py" not in python_assoc:
554 testenv['SCONS_NO_DIRECT_SCRIPT'] = '1'
556 os.environ.update(testenv)
558 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
559 if '_JAVA_OPTIONS' in os.environ:
560 del os.environ['_JAVA_OPTIONS']
563 # ---[ test discovery ]------------------------------------
564 # This section figures out which tests to run.
566 # The initial testlist is made by reading from the testlistfile,
567 # if supplied, or by looking at the test arguments, if supplied,
568 # or by looking for all test files if the "all" argument is supplied.
569 # One of the three is required.
571 # Each test path, whichever of the three sources it comes from,
572 # specifies either a test file or a directory to search for
573 # SCons tests. SCons code layout assumes that any file under the 'SCons'
574 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
575 # script (*.py) under the 'test' subdirectory is an end-to-end test.
576 # We need to track these because they are invoked differently.
577 # find_unit_tests and find_e2e_tests are used for this searching.
579 # Note that there are some tests under 'SCons' that *begin* with
580 # 'test_', but they're packaging and installation tests, not
581 # functional tests, so we don't execute them by default. (They can
582 # still be executed by hand, though).
584 # Test exclusions, if specified, are then applied.
587 def scanlist(testfile):
588 """ Process a testlist file """
589 data = StringIO(testfile.read_text())
590 tests = [t.strip() for t in data.readlines() if not t.startswith('#')]
591 # in order to allow scanned lists to work whether they use forward or
592 # backward slashes, first create the object as a PureWindowsPath which
593 # accepts either, then use that to make a Path object to use for
594 # comparisons like "file in scanned_list".
595 if sys.platform == 'win32':
596 return [Path(t) for t in tests if t]
597 else:
598 return [Path(PureWindowsPath(t).as_posix()) for t in tests if t]
601 def find_unit_tests(directory):
602 """ Look for unit tests """
603 result = []
604 for dirpath, dirnames, filenames in os.walk(directory):
605 # Skip folders containing a sconstest.skip file
606 if 'sconstest.skip' in filenames:
607 continue
608 for fname in filenames:
609 if fname.endswith("Tests.py"):
610 result.append(Path(dirpath, fname))
612 return sorted(result)
615 def find_e2e_tests(directory):
616 """ Look for end-to-end tests """
617 result = []
618 for dirpath, dirnames, filenames in os.walk(directory):
619 # Skip folders containing a sconstest.skip file
620 if 'sconstest.skip' in filenames:
621 continue
623 # Slurp in any tests in exclude lists
624 excludes = []
625 if ".exclude_tests" in filenames:
626 excludefile = Path(dirpath, ".exclude_tests").resolve()
627 excludes = scanlist(excludefile)
629 for fname in filenames:
630 if fname.endswith(".py") and Path(fname) not in excludes:
631 result.append(Path(dirpath, fname))
633 return sorted(result)
636 # initial selection:
637 # if we have a testlist file read that, else hunt for tests.
638 unittests = []
639 endtests = []
640 if args.testlistfile:
641 tests = scanlist(args.testlistfile)
642 else:
643 testpaths = []
644 if args.all: # -a flag
645 testpaths = [Path('SCons'), Path('test')]
646 elif args.testlist: # paths given on cmdline
647 if sys.platform == 'win32':
648 testpaths = [Path(t) for t in args.testlist]
649 else:
650 testpaths = [Path(PureWindowsPath(t).as_posix()) for t in args.testlist]
652 for path in testpaths:
653 # Clean up path removing leading ./ or .\
654 name = str(path)
655 if name.startswith('.') and name[1] in (os.sep, os.altsep):
656 path = path.with_name(tn[2:])
658 if path.exists():
659 if path.is_dir():
660 if path.parts[0] == "SCons" or path.parts[0] == "testing":
661 unittests.extend(find_unit_tests(path))
662 elif path.parts[0] == 'test':
663 endtests.extend(find_e2e_tests(path))
664 # else: TODO: what if user pointed to a dir outside scons tree?
665 else:
666 if path.match("*Tests.py"):
667 unittests.append(path)
668 elif path.match("*.py"):
669 endtests.append(path)
671 tests = sorted(unittests + endtests)
673 # Remove exclusions:
674 if args.e2e_only:
675 tests = [t for t in tests if not t.match("*Tests.py")]
676 if args.unit_only:
677 tests = [t for t in tests if t.match("*Tests.py")]
678 if args.excludelistfile:
679 excludetests = scanlist(args.excludelistfile)
680 tests = [t for t in tests if t not in excludetests]
682 # did we end up with any tests?
683 if not tests:
684 sys.stderr.write(parser.format_usage() + """
685 error: no tests matching the specification were found.
686 See "Test selection options" in the help for details on
687 how to specify and/or exclude tests.
688 """)
689 sys.exit(1)
691 # ---[ test processing ]-----------------------------------
692 tests = [Test(t) for t in tests]
694 if args.list_only:
695 for t in tests:
696 print(t.path)
697 sys.exit(0)
699 if not args.python:
700 if os.name == 'java':
701 args.python = os.path.join(sys.prefix, 'jython')
702 else:
703 args.python = sys.executable
704 os.environ["python_executable"] = args.python
706 if args.print_times:
708 def print_time(fmt, tm):
709 print(fmt % tm)
711 else:
713 def print_time(fmt, tm):
714 pass
716 time_func = time.perf_counter
717 total_start_time = time_func()
718 total_num_tests = len(tests)
721 def log_result(t, io_lock=None):
722 """ log the result of a test.
724 "log" in this case means writing to stdout. Since we might be
725 called from from any of several different threads (multi-job run),
726 we need to lock access to the log to avoid interleaving. The same
727 would apply if output was a file.
729 Args:
730 t (Test): (completed) testcase instance
731 io_lock (threading.lock): (optional) lock to use
734 # there is no lock in single-job run, which includes
735 # running test/runtest tests from multi-job run, so check.
736 if io_lock:
737 io_lock.acquire()
738 try:
739 if suppress_output or catch_output:
740 sys.stdout.write(t.headline)
741 if not suppress_output:
742 if t.stdout:
743 print(t.stdout)
744 if t.stderr:
745 print(t.stderr)
746 print_time("Test execution time: %.1f seconds", t.test_time)
747 finally:
748 if io_lock:
749 io_lock.release()
751 if args.quit_on_failure and t.status == 1:
752 print("Exiting due to error")
753 print(t.status)
754 sys.exit(1)
757 def run_test(t, io_lock=None, run_async=True):
758 """ Run a testcase.
760 Builds the command line to give to execute().
761 Also the best place to record some information that will be
762 used in output, which in some conditions is printed here.
764 Args:
765 t (Test): testcase instance
766 io_lock (threading.Lock): (optional) lock to use
767 run_async (bool): whether to run asynchronously
770 t.headline = ""
771 command_args = []
772 if debug:
773 command_args.extend(['-m', debug])
774 if args.devmode and sys.version_info >= (3, 7, 0):
775 command_args.append('-X dev')
776 command_args.append(t.path)
777 if args.runner and t.path in unittests:
778 # For example --runner TestUnit.TAPTestRunner
779 command_args.append('--runner ' + args.runner)
780 t.command_args = [args.python] + command_args
781 t.command_str = " ".join(t.command_args)
782 if args.printcommand:
783 if args.print_progress:
784 t.headline += "%d/%d (%.2f%s) %s\n" % (
785 t.testno, total_num_tests,
786 float(t.testno) * 100.0 / float(total_num_tests),
787 "%",
788 t.command_str,
790 else:
791 t.headline += t.command_str + "\n"
792 if not suppress_output and not catch_output:
793 # defer printing the headline until test is done
794 sys.stdout.write(t.headline)
795 head, _ = os.path.split(t.abspath)
796 fixture_dirs = []
797 if head:
798 fixture_dirs.append(head)
799 fixture_dirs.append(os.path.join(scriptpath, 'test', 'fixture'))
801 # Set the list of fixture dirs directly in the environment. Just putting
802 # it in os.environ and spawning the process is racy. Make it reliable by
803 # overriding the environment passed to execute().
804 env = dict(os.environ)
805 env['FIXTURE_DIRS'] = os.pathsep.join(fixture_dirs)
807 test_start_time = time_func()
808 if args.execute_tests:
809 t.execute(env)
811 t.test_time = time_func() - test_start_time
812 log_result(t, io_lock=io_lock)
815 class RunTest(threading.Thread):
816 """ Test Runner class.
818 One instance will be created for each job thread in multi-job mode
820 def __init__(self, queue=None, io_lock=None, group=None, target=None, name=None):
821 super().__init__(group=group, target=target, name=name)
822 self.queue = queue
823 self.io_lock = io_lock
825 def run(self):
826 for t in iter(self.queue.get, None):
827 run_test(t, io_lock=self.io_lock, run_async=True)
828 self.queue.task_done()
830 if args.jobs > 1:
831 print("Running tests using %d jobs" % args.jobs)
832 testq = Queue()
833 for t in tests:
834 testq.put(t)
835 testlock = threading.Lock()
836 # Start worker threads to consume the queue
837 threads = [RunTest(queue=testq, io_lock=testlock) for _ in range(args.jobs)]
838 for t in threads:
839 t.daemon = True
840 t.start()
841 # wait on the queue rather than the individual threads
842 testq.join()
843 else:
844 for t in tests:
845 run_test(t, io_lock=None, run_async=False)
847 # --- all tests are complete by the time we get here ---
848 if tests:
849 tests[0].total_time = time_func() - total_start_time
850 print_time("Total execution time for all tests: %.1f seconds", tests[0].total_time)
852 passed = [t for t in tests if t.status == 0]
853 fail = [t for t in tests if t.status == 1]
854 no_result = [t for t in tests if t.status == 2]
856 # print summaries, but only if multiple tests were run
857 if len(tests) != 1 and args.execute_tests:
858 if passed and args.print_passed_summary:
859 if len(passed) == 1:
860 sys.stdout.write("\nPassed the following test:\n")
861 else:
862 sys.stdout.write("\nPassed the following %d tests:\n" % len(passed))
863 paths = [x.path for x in passed]
864 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
865 if fail:
866 if len(fail) == 1:
867 sys.stdout.write("\nFailed the following test:\n")
868 else:
869 sys.stdout.write("\nFailed the following %d tests:\n" % len(fail))
870 paths = [x.path for x in fail]
871 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
872 if no_result:
873 if len(no_result) == 1:
874 sys.stdout.write("\nNO RESULT from the following test:\n")
875 else:
876 sys.stdout.write("\nNO RESULT from the following %d tests:\n" % len(no_result))
877 paths = [x.path for x in no_result]
878 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
880 # save the fails to a file
881 if args.error_log:
882 with open(args.error_log, "w") as f:
883 if fail:
884 paths = [x.path for x in fail]
885 for test in paths:
886 print(test, file=f)
887 # if there are no fails, file will be cleared
889 if args.xml:
890 if args.output == '-':
891 f = sys.stdout
892 else:
893 f = open(args.xml, 'w')
894 tests[0].header(f)
895 #f.write("test_result = [\n")
896 for t in tests:
897 t.write(f)
898 tests[0].footer(f)
899 #f.write("];\n")
900 if args.output != '-':
901 f.close()
903 if args.output:
904 if isinstance(sys.stdout, Tee):
905 sys.stdout.file.close()
906 if isinstance(sys.stderr, Tee):
907 sys.stderr.file.close()
909 if fail:
910 sys.exit(1)
911 elif no_result and args.dont_ignore_skips:
912 # if no fails, but skips were found
913 sys.exit(2)
914 else:
915 sys.exit(0)
917 # Local Variables:
918 # tab-width:4
919 # indent-tabs-mode:nil
920 # End:
921 # vim: set expandtab tabstop=4 shiftwidth=4: