Add arm64 to the MSVS supported architectures list for VS2017 and later to be consist...
[scons.git] / runtest.py
bloba2ece7ee9eac060504145833da8d9d2d20ea82fe
1 #!/usr/bin/env python
3 # Copyright The SCons Foundation
5 """runtest - wrapper script for running SCons tests
7 The SCons test suite consists of:
9 * unit tests - *Tests.py files from the SCons/ dir
10 * end-to-end tests - *.py files in the test/ directory that
11 require the custom SCons framework from testing/
13 This script adds SCons/ and testing/ directories to PYTHONPATH,
14 performs test discovery and processes tests according to options.
15 """
17 # TODO: normalize requested and testlist/exclude paths for easier comparison.
18 # e.g.: "runtest foo/bar" on windows will produce paths like foo/bar\test.py
19 # this is hard to match with excludelists, and makes those both os.sep-specific
20 # and command-line-typing specific.
22 import argparse
23 import glob
24 import os
25 import stat
26 import subprocess
27 import sys
28 import tempfile
29 import threading
30 import time
31 from abc import ABC, abstractmethod
32 from pathlib import Path
33 from queue import Queue
35 cwd = os.getcwd()
37 debug = None
38 scons = None
39 catch_output = False
40 suppress_output = False
42 script = os.path.basename(sys.argv[0])
43 usagestr = """\
44 %(script)s [OPTIONS] [TEST ...]
45 """ % locals()
47 epilogstr = """\
48 Environment Variables:
49 PRESERVE, PRESERVE_{PASS,FAIL,NO_RESULT}: preserve test subdirs
50 TESTCMD_VERBOSE: turn on verbosity in TestCommand\
51 """
53 parser = argparse.ArgumentParser(
54 usage=usagestr, epilog=epilogstr, allow_abbrev=False,
55 formatter_class=argparse.RawDescriptionHelpFormatter
58 # test selection options:
59 testsel = parser.add_argument_group(description='Test selection options:')
60 testsel.add_argument(metavar='TEST', nargs='*', dest='testlist',
61 help="Select TEST(s) (tests and/or directories) to run")
62 testlisting = testsel.add_mutually_exclusive_group()
63 testlisting.add_argument('-f', '--file', metavar='FILE', dest='testlistfile',
64 help="Select only tests in FILE")
65 testlisting.add_argument('-a', '--all', action='store_true',
66 help="Select all tests")
67 testlisting.add_argument('--retry', action='store_true',
68 help="Rerun the last failed tests in 'failed_tests.log'")
69 testsel.add_argument('--exclude-list', metavar="FILE", dest='excludelistfile',
70 help="""Exclude tests in FILE from current selection""")
71 testtype = testsel.add_mutually_exclusive_group()
72 testtype.add_argument('--e2e-only', action='store_true',
73 help="Exclude unit tests from selection")
74 testtype.add_argument('--unit-only', action='store_true',
75 help="Exclude end-to-end tests from selection")
77 # miscellaneous options
78 parser.add_argument('-b', '--baseline', metavar='BASE',
79 help="Run test scripts against baseline BASE.")
80 parser.add_argument('-d', '--debug', action='store_true',
81 help="Run test scripts under the Python debugger.")
82 parser.add_argument('-D', '--devmode', action='store_true',
83 help="Run tests in Python's development mode (Py3.7+ only).")
84 parser.add_argument('-e', '--external', action='store_true',
85 help="Run the script in external mode (for external Tools)")
86 parser.add_argument('-j', '--jobs', metavar='JOBS', default=1, type=int,
87 help="Run tests in JOBS parallel jobs (0 for cpu_count).")
88 parser.add_argument('-l', '--list', action='store_true', dest='list_only',
89 help="List available tests and exit.")
90 parser.add_argument('-n', '--no-exec', action='store_false',
91 dest='execute_tests',
92 help="No execute, just print command lines.")
93 parser.add_argument('--nopipefiles', action='store_false',
94 dest='allow_pipe_files',
95 help="""Do not use the "file pipe" workaround for subprocess
96 for starting tests. See source code for warnings.""")
97 parser.add_argument('-P', '--python', metavar='PYTHON',
98 help="Use the specified Python interpreter.")
99 parser.add_argument('--quit-on-failure', action='store_true',
100 help="Quit on any test failure.")
101 parser.add_argument('--runner', metavar='CLASS',
102 help="Test runner class for unit tests.")
103 parser.add_argument('-X', dest='scons_exec', action='store_true',
104 help="Test script is executable, don't feed to Python.")
105 parser.add_argument('-x', '--exec', metavar="SCRIPT",
106 help="Test using SCRIPT as path to SCons.")
107 parser.add_argument('--faillog', dest='error_log', metavar="FILE",
108 default='failed_tests.log',
109 help="Log failed tests to FILE (enabled by default, "
110 "default file 'failed_tests.log')")
111 parser.add_argument('--no-faillog', dest='error_log',
112 action='store_const', const=None,
113 default='failed_tests.log',
114 help="Do not log failed tests to a file")
116 parser.add_argument('--no-ignore-skips', dest='dont_ignore_skips',
117 action='store_true',
118 default=False,
119 help="If any tests are skipped, exit status 2")
121 outctl = parser.add_argument_group(description='Output control options:')
122 outctl.add_argument('-k', '--no-progress', action='store_false',
123 dest='print_progress',
124 help="Suppress count and progress percentage messages.")
125 outctl.add_argument('--passed', action='store_true',
126 dest='print_passed_summary',
127 help="Summarize which tests passed.")
128 outctl.add_argument('-q', '--quiet', action='store_false',
129 dest='printcommand',
130 help="Don't print the test being executed.")
131 outctl.add_argument('-s', '--short-progress', action='store_true',
132 help="""Short progress, prints only the command line
133 and a progress percentage.""")
134 outctl.add_argument('-t', '--time', action='store_true', dest='print_times',
135 help="Print test execution time.")
136 outctl.add_argument('--verbose', metavar='LEVEL', type=int, choices=range(1, 4),
137 help="""Set verbose level
138 (1=print executed commands,
139 2=print commands and non-zero output,
140 3=print commands and all output).""")
141 # maybe add?
142 # outctl.add_argument('--version', action='version', version='%s 1.0' % script)
144 logctl = parser.add_argument_group(description='Log control options:')
145 logctl.add_argument('-o', '--output', metavar='LOG', help="Save console output to LOG.")
146 logctl.add_argument('--xml', metavar='XML', help="Save results to XML in SCons XML format.")
148 # process args and handle a few specific cases:
149 args = parser.parse_args()
151 # we can't do this check with an argparse exclusive group, since those
152 # only work with optional args, and the cmdline tests (args.testlist)
153 # are not optional args,
154 if args.testlist and (args.testlistfile or args.all or args.retry):
155 sys.stderr.write(
156 parser.format_usage()
157 + "error: command line tests cannot be combined with -f/--file, -a/--all or --retry\n"
159 sys.exit(1)
161 if args.retry:
162 args.testlistfile = 'failed_tests.log'
164 if args.testlistfile:
165 # args.testlistfile changes from a string to a pathlib Path object
166 try:
167 p = Path(args.testlistfile)
168 # TODO simplify when Py3.5 dropped
169 if sys.version_info.major == 3 and sys.version_info.minor < 6:
170 args.testlistfile = p.resolve()
171 else:
172 args.testlistfile = p.resolve(strict=True)
173 except FileNotFoundError:
174 sys.stderr.write(
175 parser.format_usage()
176 + 'error: -f/--file testlist file "%s" not found\n' % p
178 sys.exit(1)
180 if args.excludelistfile:
181 # args.excludelistfile changes from a string to a pathlib Path object
182 try:
183 p = Path(args.excludelistfile)
184 # TODO simplify when Py3.5 dropped
185 if sys.version_info.major == 3 and sys.version_info.minor < 6:
186 args.excludelistfile = p.resolve()
187 else:
188 args.excludelistfile = p.resolve(strict=True)
189 except FileNotFoundError:
190 sys.stderr.write(
191 parser.format_usage()
192 + 'error: --exclude-list file "%s" not found\n' % p
194 sys.exit(1)
196 if args.jobs == 0:
197 try:
198 # on Linux, check available rather then physical CPUs
199 args.jobs = len(os.sched_getaffinity(0))
200 except AttributeError:
201 # Windows
202 args.jobs = os.cpu_count()
204 # sanity check
205 if args.jobs == 0:
206 sys.stderr.write(
207 parser.format_usage()
208 + "Unable to detect CPU count, give -j a non-zero value\n"
210 sys.exit(1)
212 if args.jobs > 1 or args.output:
213 # 1. don't let tests write stdout/stderr directly if multi-job,
214 # else outputs will interleave and be hard to read.
215 # 2. If we're going to write a logfile, we also need to catch the output.
216 catch_output = True
218 if not args.printcommand:
219 suppress_output = catch_output = True
221 if args.verbose:
222 os.environ['TESTCMD_VERBOSE'] = str(args.verbose)
224 if args.short_progress:
225 args.print_progress = True
226 suppress_output = catch_output = True
228 if args.debug:
229 # TODO: add a way to pass a specific debugger
230 debug = "pdb"
232 if args.exec:
233 scons = args.exec
235 # --- setup stdout/stderr ---
236 class Unbuffered:
237 def __init__(self, file):
238 self.file = file
240 def write(self, arg):
241 self.file.write(arg)
242 self.file.flush()
244 def __getattr__(self, attr):
245 return getattr(self.file, attr)
247 sys.stdout = Unbuffered(sys.stdout)
248 sys.stderr = Unbuffered(sys.stderr)
250 # possible alternative: switch to using print, and:
251 # print = functools.partial(print, flush)
253 if args.output:
254 class Tee:
255 def __init__(self, openfile, stream):
256 self.file = openfile
257 self.stream = stream
259 def write(self, data):
260 self.file.write(data)
261 self.stream.write(data)
263 def flush(self, data):
264 self.file.flush(data)
265 self.stream.flush(data)
267 logfile = open(args.output, 'w')
268 # this is not ideal: we monkeypatch stdout/stderr a second time
269 # (already did for Unbuffered), so here we can't easily detect what
270 # state we're in on closedown. Just hope it's okay...
271 sys.stdout = Tee(logfile, sys.stdout)
272 sys.stderr = Tee(logfile, sys.stderr)
274 # --- define helpers ----
275 if sys.platform == 'win32':
276 # thanks to Eryk Sun for this recipe
277 import ctypes
279 shlwapi = ctypes.OleDLL('shlwapi')
280 shlwapi.AssocQueryStringW.argtypes = (
281 ctypes.c_ulong, # flags
282 ctypes.c_ulong, # str
283 ctypes.c_wchar_p, # pszAssoc
284 ctypes.c_wchar_p, # pszExtra
285 ctypes.c_wchar_p, # pszOut
286 ctypes.POINTER(ctypes.c_ulong), # pcchOut
289 ASSOCF_NOTRUNCATE = 0x00000020
290 ASSOCF_INIT_IGNOREUNKNOWN = 0x00000400
291 ASSOCSTR_COMMAND = 1
292 ASSOCSTR_EXECUTABLE = 2
293 E_POINTER = ctypes.c_long(0x80004003).value
295 def get_template_command(filetype, verb=None):
296 flags = ASSOCF_INIT_IGNOREUNKNOWN | ASSOCF_NOTRUNCATE
297 assoc_str = ASSOCSTR_COMMAND
298 cch = ctypes.c_ulong(260)
299 while True:
300 buf = (ctypes.c_wchar * cch.value)()
301 try:
302 shlwapi.AssocQueryStringW(
303 flags, assoc_str, filetype, verb, buf, ctypes.byref(cch)
305 except OSError as e:
306 if e.winerror != E_POINTER:
307 raise
308 continue
309 break
310 return buf.value
313 if not catch_output:
314 # Without any output suppressed, we let the subprocess
315 # write its stuff freely to stdout/stderr.
317 def spawn_it(command_args, env):
318 cp = subprocess.run(command_args, shell=False, env=env)
319 return cp.stdout, cp.stderr, cp.returncode
321 else:
322 # Else, we catch the output of both pipes...
323 if args.allow_pipe_files:
324 # The subprocess.Popen() suffers from a well-known
325 # problem. Data for stdout/stderr is read into a
326 # memory buffer of fixed size, 65K which is not very much.
327 # When it fills up, it simply stops letting the child process
328 # write to it. The child will then sit and patiently wait to
329 # be able to write the rest of its output. Hang!
330 # In order to work around this, we follow a suggestion
331 # by Anders Pearson in
332 # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
333 # and pass temp file objects to Popen() instead of the ubiquitous
334 # subprocess.PIPE.
336 def spawn_it(command_args, env):
337 # Create temporary files
338 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
339 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
340 # Start subprocess...
341 cp = subprocess.run(
342 command_args,
343 stdout=tmp_stdout,
344 stderr=tmp_stderr,
345 shell=False,
346 env=env,
349 try:
350 # Rewind to start of files
351 tmp_stdout.seek(0)
352 tmp_stderr.seek(0)
353 # Read output
354 spawned_stdout = tmp_stdout.read()
355 spawned_stderr = tmp_stderr.read()
356 finally:
357 # Remove temp files by closing them
358 tmp_stdout.close()
359 tmp_stderr.close()
361 # Return values
362 return spawned_stderr, spawned_stdout, cp.returncode
364 else:
365 # We get here only if the user gave the '--nopipefiles'
366 # option, meaning the "temp file" approach for
367 # subprocess.communicate() above shouldn't be used.
368 # He hopefully knows what he's doing, but again we have a
369 # potential deadlock situation in the following code:
370 # If the subprocess writes a lot of data to its stderr,
371 # the pipe will fill up (nobody's reading it yet) and the
372 # subprocess will wait for someone to read it.
373 # But the parent process is trying to read from stdin
374 # (but the subprocess isn't writing anything there).
375 # Hence a deadlock.
376 # Be dragons here! Better don't use this!
378 def spawn_it(command_args, env):
379 cp = subprocess.run(
380 command_args,
381 stdout=subprocess.PIPE,
382 stderr=subprocess.PIPE,
383 shell=False,
384 env=env,
386 return cp.stdout, cp.stderr, cp.returncode
389 class RuntestBase(ABC):
390 """ Base class for tests """
391 def __init__(self, path, num, spe=None):
392 self.path = path
393 self.num = num
394 self.stdout = self.stderr = self.status = None
395 self.abspath = os.path.abspath(path)
396 self.command_args = []
397 self.command_str = ""
398 self.test_time = self.total_time = 0
399 if spe:
400 for d in spe:
401 f = os.path.join(d, path)
402 if os.path.isfile(f):
403 self.abspath = f
404 break
406 @abstractmethod
407 def execute(self):
408 pass
411 class SystemExecutor(RuntestBase):
412 """ Test class for tests executed with spawn_it() """
413 def execute(self, env):
414 self.stderr, self.stdout, s = spawn_it(self.command_args, env)
415 self.status = s
416 if s < 0 or s > 2:
417 sys.stdout.write("Unexpected exit status %d\n" % s)
420 class PopenExecutor(RuntestBase):
421 """ Test class for tests executed with Popen
423 A bit of a misnomer as the Popen call is now wrapped
424 by calling subprocess.run (behind the covers uses Popen.
425 Very similar to SystemExecutor, but doesn't allow for not catching
426 the output).
428 # For an explanation of the following 'if ... else'
429 # and the 'allow_pipe_files' option, please check out the
430 # definition of spawn_it() above.
431 if args.allow_pipe_files:
433 def execute(self, env):
434 # Create temporary files
435 tmp_stdout = tempfile.TemporaryFile(mode='w+t')
436 tmp_stderr = tempfile.TemporaryFile(mode='w+t')
437 # Start subprocess...
438 cp = subprocess.run(
439 self.command_args,
440 stdout=tmp_stdout,
441 stderr=tmp_stderr,
442 shell=False,
443 env=env,
445 self.status = cp.returncode
447 try:
448 # Rewind to start of files
449 tmp_stdout.seek(0)
450 tmp_stderr.seek(0)
451 # Read output
452 self.stdout = tmp_stdout.read()
453 self.stderr = tmp_stderr.read()
454 finally:
455 # Remove temp files by closing them
456 tmp_stdout.close()
457 tmp_stderr.close()
458 else:
460 def execute(self, env):
461 cp = subprocess.run(
462 self.command_args,
463 stdout=subprocess.PIPE,
464 stderr=subprocess.PIPE,
465 shell=False,
466 env=env,
468 self.status, self.stdout, self.stderr = cp.returncode, cp.stdout, cp.stderr
470 class XML(PopenExecutor):
471 """ Test class for tests that will output in scons xml """
472 @staticmethod
473 def header(f):
474 f.write(' <results>\n')
476 def write(self, f):
477 f.write(' <test>\n')
478 f.write(' <file_name>%s</file_name>\n' % self.path)
479 f.write(' <command_line>%s</command_line>\n' % self.command_str)
480 f.write(' <exit_status>%s</exit_status>\n' % self.status)
481 f.write(' <stdout>%s</stdout>\n' % self.stdout)
482 f.write(' <stderr>%s</stderr>\n' % self.stderr)
483 f.write(' <time>%.1f</time>\n' % self.test_time)
484 f.write(' </test>\n')
486 def footer(self, f):
487 f.write(' <time>%.1f</time>\n' % self.total_time)
488 f.write(' </results>\n')
490 if args.xml:
491 Test = XML
492 else:
493 Test = SystemExecutor
495 # --- start processing ---
497 if not args.baseline or args.baseline == '.':
498 baseline = cwd
499 elif args.baseline == '-':
500 print("This logic used to checkout from svn. It's been removed. If you used this, please let us know on devel mailing list, IRC, or discord server")
501 sys.exit(-1)
502 else:
503 baseline = args.baseline
504 scons_runtest_dir = baseline
506 if not args.external:
507 scons_script_dir = os.path.join(baseline, 'scripts')
508 scons_tools_dir = os.path.join(baseline, 'bin')
509 scons_lib_dir = baseline
510 else:
511 scons_script_dir = ''
512 scons_tools_dir = ''
513 scons_lib_dir = ''
515 testenv = {
516 'SCONS_RUNTEST_DIR': scons_runtest_dir,
517 'SCONS_TOOLS_DIR': scons_tools_dir,
518 'SCONS_SCRIPT_DIR': scons_script_dir,
519 'SCONS_CWD': cwd,
522 if scons:
523 # Let the version of SCons that the -x option pointed to find
524 # its own modules.
525 testenv['SCONS'] = scons
526 elif scons_lib_dir:
527 # Because SCons is really aggressive about finding its modules,
528 # it sometimes finds SCons modules elsewhere on the system.
529 # This forces SCons to use the modules that are being tested.
530 testenv['SCONS_LIB_DIR'] = scons_lib_dir
532 if args.scons_exec:
533 testenv['SCONS_EXEC'] = '1'
535 if args.external:
536 testenv['SCONS_EXTERNAL_TEST'] = '1'
538 # Insert scons path and path for testing framework to PYTHONPATH
539 scriptpath = os.path.dirname(os.path.realpath(__file__))
540 frameworkpath = os.path.join(scriptpath, 'testing', 'framework')
541 testenv['PYTHONPATH'] = os.pathsep.join((scons_lib_dir, frameworkpath))
542 pythonpath = os.environ.get('PYTHONPATH')
543 if pythonpath:
544 testenv['PYTHONPATH'] = testenv['PYTHONPATH'] + os.pathsep + pythonpath
546 if sys.platform == 'win32':
547 # Windows doesn't support "shebang" lines directly (the Python launcher
548 # and Windows Store version do, but you have to get them launched first)
549 # so to directly launch a script we depend on an assoc for .py to work.
550 # Some systems may have none, and in some cases IDE programs take over
551 # the assoc. Detect this so the small number of tests affected can skip.
552 try:
553 python_assoc = get_template_command('.py')
554 except OSError:
555 python_assoc = None
556 if not python_assoc or "py" not in python_assoc:
557 testenv['SCONS_NO_DIRECT_SCRIPT'] = '1'
559 os.environ.update(testenv)
561 # Clear _JAVA_OPTIONS which java tools output to stderr when run breaking tests
562 if '_JAVA_OPTIONS' in os.environ:
563 del os.environ['_JAVA_OPTIONS']
566 # ---[ test discovery ]------------------------------------
567 # This section figures which tests to run.
569 # The initial testlist is made by reading from the testlistfile,
570 # if supplied, or by looking at the test arguments, if supplied,
571 # or by looking for all test files if the "all" argument is supplied.
572 # One of the three is required.
574 # Each test path, whichever of the three sources it comes from,
575 # specifies either a test file or a directory to search for
576 # SCons tests. SCons code layout assumes that any file under the 'SCons'
577 # subdirectory that ends with 'Tests.py' is a unit test, and any Python
578 # script (*.py) under the 'test' subdirectory is an end-to-end test.
579 # We need to track these because they are invoked differently.
580 # find_unit_tests and find_e2e_tests are used for this searching.
582 # Note that there are some tests under 'SCons' that *begin* with
583 # 'test_', but they're packaging and installation tests, not
584 # functional tests, so we don't execute them by default. (They can
585 # still be executed by hand, though).
587 # Test exclusions, if specified, are then applied.
590 def scanlist(testlist):
591 """ Process a testlist file """
592 tests = [t.strip() for t in testlist if not t.startswith('#')]
593 return [t for t in tests if t]
596 def find_unit_tests(directory):
597 """ Look for unit tests """
598 result = []
599 for dirpath, dirnames, filenames in os.walk(directory):
600 # Skip folders containing a sconstest.skip file
601 if 'sconstest.skip' in filenames:
602 continue
603 for fname in filenames:
604 if fname.endswith("Tests.py"):
605 result.append(os.path.join(dirpath, fname))
606 return sorted(result)
609 def find_e2e_tests(directory):
610 """ Look for end-to-end tests """
611 result = []
612 for dirpath, dirnames, filenames in os.walk(directory):
613 # Skip folders containing a sconstest.skip file
614 if 'sconstest.skip' in filenames:
615 continue
617 # Slurp in any tests in exclude lists
618 excludes = []
619 if ".exclude_tests" in filenames:
620 p = Path(dirpath).joinpath(".exclude_tests")
621 # TODO simplify when Py3.5 dropped
622 if sys.version_info.major == 3 and sys.version_info.minor < 6:
623 excludefile = p.resolve()
624 else:
625 excludefile = p.resolve(strict=True)
626 with excludefile.open() as f:
627 excludes = scanlist(f)
629 for fname in filenames:
630 if fname.endswith(".py") and fname not in excludes:
631 result.append(os.path.join(dirpath, fname))
633 return sorted(result)
636 # initial selection:
637 unittests = []
638 endtests = []
639 if args.testlistfile:
640 with args.testlistfile.open() as f:
641 tests = scanlist(f)
642 else:
643 testpaths = []
644 if args.all:
645 testpaths = ['SCons', 'test']
646 elif args.testlist:
647 testpaths = args.testlist
649 for tp in testpaths:
650 # Clean up path so it can match startswith's below
651 # remove leading ./ or .\
652 if tp.startswith('.') and tp[1] in (os.sep, os.altsep):
653 tp = tp[2:]
655 for path in glob.glob(tp):
656 if os.path.isdir(path):
657 if path.startswith(('SCons', 'testing')):
658 unittests.extend(find_unit_tests(path))
659 elif path.startswith('test'):
660 endtests.extend(find_e2e_tests(path))
661 else:
662 if path.endswith("Tests.py"):
663 unittests.append(path)
664 elif path.endswith(".py"):
665 endtests.append(path)
666 tests = sorted(unittests + endtests)
669 # Remove exclusions:
670 if args.e2e_only:
671 tests = [t for t in tests if not t.endswith("Tests.py")]
672 if args.unit_only:
673 tests = [t for t in tests if t.endswith("Tests.py")]
674 if args.excludelistfile:
675 with args.excludelistfile.open() as f:
676 excludetests = scanlist(f)
677 tests = [t for t in tests if t not in excludetests]
679 if not tests:
680 sys.stderr.write(parser.format_usage() + """
681 error: no tests were found.
682 Tests can be specified on the command line, read from a file with
683 the -f/--file option, or discovered with -a/--all to run all tests.
684 """)
685 sys.exit(1)
687 # ---[ test processing ]-----------------------------------
688 tests = [Test(t, n + 1) for n, t in enumerate(tests)]
690 if args.list_only:
691 for t in tests:
692 sys.stdout.write(t.path + "\n")
693 sys.exit(0)
695 if not args.python:
696 if os.name == 'java':
697 args.python = os.path.join(sys.prefix, 'jython')
698 else:
699 args.python = sys.executable
700 os.environ["python_executable"] = args.python
702 if args.print_times:
704 def print_time(fmt, tm):
705 sys.stdout.write(fmt % tm)
707 else:
709 def print_time(fmt, tm):
710 pass
712 time_func = time.perf_counter
713 total_start_time = time_func()
714 total_num_tests = len(tests)
717 def log_result(t, io_lock=None):
718 """ log the result of a test.
720 "log" in this case means writing to stdout. Since we might be
721 called from from any of several different threads (multi-job run),
722 we need to lock access to the log to avoid interleaving. The same
723 would apply if output was a file.
725 Args:
726 t (Test): (completed) testcase instance
727 io_lock (threading.lock): (optional) lock to use
730 # there is no lock in single-job run, which includes
731 # running test/runtest tests from multi-job run, so check.
732 if io_lock:
733 io_lock.acquire()
734 try:
735 if suppress_output or catch_output:
736 sys.stdout.write(t.headline)
737 if not suppress_output:
738 if t.stdout:
739 print(t.stdout)
740 if t.stderr:
741 print(t.stderr)
742 print_time("Test execution time: %.1f seconds\n", t.test_time)
743 finally:
744 if io_lock:
745 io_lock.release()
747 if args.quit_on_failure and t.status == 1:
748 print("Exiting due to error")
749 print(t.status)
750 sys.exit(1)
753 def run_test(t, io_lock=None, run_async=True):
754 """ Run a testcase.
756 Builds the command line to give to execute().
757 Also the best place to record some information that will be
758 used in output, which in some conditions is printed here.
760 Args:
761 t (Test): testcase instance
762 io_lock (threading.Lock): (optional) lock to use
763 run_async (bool): whether to run asynchronously
766 t.headline = ""
767 command_args = []
768 if debug:
769 command_args.extend(['-m', debug])
770 if args.devmode and sys.version_info >= (3, 7, 0):
771 command_args.append('-X dev')
772 command_args.append(t.path)
773 if args.runner and t.path in unittests:
774 # For example --runner TestUnit.TAPTestRunner
775 command_args.append('--runner ' + args.runner)
776 t.command_args = [args.python] + command_args
777 t.command_str = " ".join(t.command_args)
778 if args.printcommand:
779 if args.print_progress:
780 t.headline += "%d/%d (%.2f%s) %s\n" % (
781 t.num, total_num_tests,
782 float(t.num) * 100.0 / float(total_num_tests),
783 "%",
784 t.command_str,
786 else:
787 t.headline += t.command_str + "\n"
788 if not suppress_output and not catch_output:
789 # defer printing the headline until test is done
790 sys.stdout.write(t.headline)
791 head, _ = os.path.split(t.abspath)
792 fixture_dirs = []
793 if head:
794 fixture_dirs.append(head)
795 fixture_dirs.append(os.path.join(scriptpath, 'test', 'fixture'))
797 # Set the list of fixture dirs directly in the environment. Just putting
798 # it in os.environ and spawning the process is racy. Make it reliable by
799 # overriding the environment passed to execute().
800 env = dict(os.environ)
801 env['FIXTURE_DIRS'] = os.pathsep.join(fixture_dirs)
803 test_start_time = time_func()
804 if args.execute_tests:
805 t.execute(env)
807 t.test_time = time_func() - test_start_time
808 log_result(t, io_lock=io_lock)
811 class RunTest(threading.Thread):
812 """ Test Runner class.
814 One instance will be created for each job thread in multi-job mode
816 def __init__(self, queue=None, io_lock=None, group=None, target=None, name=None):
817 super().__init__(group=group, target=target, name=name)
818 self.queue = queue
819 self.io_lock = io_lock
821 def run(self):
822 for t in iter(self.queue.get, None):
823 run_test(t, io_lock=self.io_lock, run_async=True)
824 self.queue.task_done()
826 if args.jobs > 1:
827 print("Running tests using %d jobs" % args.jobs)
828 testq = Queue()
829 for t in tests:
830 testq.put(t)
831 testlock = threading.Lock()
832 # Start worker threads to consume the queue
833 threads = [RunTest(queue=testq, io_lock=testlock) for _ in range(args.jobs)]
834 for t in threads:
835 t.daemon = True
836 t.start()
837 # wait on the queue rather than the individual threads
838 testq.join()
839 else:
840 for t in tests:
841 run_test(t, io_lock=None, run_async=False)
843 # --- all tests are complete by the time we get here ---
844 if tests:
845 tests[0].total_time = time_func() - total_start_time
846 print_time("Total execution time for all tests: %.1f seconds\n", tests[0].total_time)
848 passed = [t for t in tests if t.status == 0]
849 fail = [t for t in tests if t.status == 1]
850 no_result = [t for t in tests if t.status == 2]
852 # print summaries, but only if multiple tests were run
853 if len(tests) != 1 and args.execute_tests:
854 if passed and args.print_passed_summary:
855 if len(passed) == 1:
856 sys.stdout.write("\nPassed the following test:\n")
857 else:
858 sys.stdout.write("\nPassed the following %d tests:\n" % len(passed))
859 paths = [x.path for x in passed]
860 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
861 if fail:
862 if len(fail) == 1:
863 sys.stdout.write("\nFailed the following test:\n")
864 else:
865 sys.stdout.write("\nFailed the following %d tests:\n" % len(fail))
866 paths = [x.path for x in fail]
867 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
868 if no_result:
869 if len(no_result) == 1:
870 sys.stdout.write("\nNO RESULT from the following test:\n")
871 else:
872 sys.stdout.write("\nNO RESULT from the following %d tests:\n" % len(no_result))
873 paths = [x.path for x in no_result]
874 sys.stdout.write("\t" + "\n\t".join(paths) + "\n")
876 # save the fails to a file
877 if args.error_log:
878 with open(args.error_log, "w") as f:
879 if fail:
880 paths = [x.path for x in fail]
881 for test in paths:
882 print(test, file=f)
883 # if there are no fails, file will be cleared
885 if args.xml:
886 if args.output == '-':
887 f = sys.stdout
888 else:
889 f = open(args.xml, 'w')
890 tests[0].header(f)
891 #f.write("test_result = [\n")
892 for t in tests:
893 t.write(f)
894 tests[0].footer(f)
895 #f.write("];\n")
896 if args.output != '-':
897 f.close()
899 if args.output:
900 if isinstance(sys.stdout, Tee):
901 sys.stdout.file.close()
902 if isinstance(sys.stderr, Tee):
903 sys.stderr.file.close()
905 if fail:
906 sys.exit(1)
907 elif no_result and args.dont_ignore_skips:
908 # if no fails, but skips were found
909 sys.exit(2)
910 else:
911 sys.exit(0)
913 # Local Variables:
914 # tab-width:4
915 # indent-tabs-mode:nil
916 # End:
917 # vim: set expandtab tabstop=4 shiftwidth=4: