Fix checkstyle warning
[zfs.git] / tests / test-runner / bin / test-runner.py.in
blobc454bf8d7c6a40c6a6f80ac5c6297f350f8e9eb8
1 #!/usr/bin/env @PYTHON_SHEBANG@
4 # This file and its contents are supplied under the terms of the
5 # Common Development and Distribution License ("CDDL"), version 1.0.
6 # You may only use this file in accordance with the terms of version
7 # 1.0 of the CDDL.
9 # A full copy of the text of the CDDL should have accompanied this
10 # source. A copy of the CDDL is also available via the Internet at
11 # http://www.illumos.org/license/CDDL.
15 # Copyright (c) 2012, 2018 by Delphix. All rights reserved.
16 # Copyright (c) 2019 Datto Inc.
18 # This script must remain compatible with Python 3.6+.
21 import os
22 import sys
23 import ctypes
24 import re
25 import configparser
27 from datetime import datetime
28 from optparse import OptionParser
29 from pwd import getpwnam
30 from pwd import getpwuid
31 from select import select
32 from subprocess import PIPE
33 from subprocess import Popen
34 from subprocess import check_output
35 from threading import Timer
36 from time import time, CLOCK_MONOTONIC
37 from os.path import exists
39 BASEDIR = '/var/tmp/test_results'
40 TESTDIR = '/usr/share/zfs/'
41 KMEMLEAK_FILE = '/sys/kernel/debug/kmemleak'
42 KILL = 'kill'
43 TRUE = 'true'
44 SUDO = 'sudo'
45 LOG_FILE = 'LOG_FILE'
46 LOG_OUT = 'LOG_OUT'
47 LOG_ERR = 'LOG_ERR'
48 LOG_FILE_OBJ = None
50 try:
51 from time import monotonic as monotonic_time
52 except ImportError:
53 class timespec(ctypes.Structure):
54 _fields_ = [
55 ('tv_sec', ctypes.c_long),
56 ('tv_nsec', ctypes.c_long)
59 librt = ctypes.CDLL('librt.so.1', use_errno=True)
60 clock_gettime = librt.clock_gettime
61 clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)]
63 def monotonic_time():
64 t = timespec()
65 if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
66 errno_ = ctypes.get_errno()
67 raise OSError(errno_, os.strerror(errno_))
68 return t.tv_sec + t.tv_nsec * 1e-9
71 class Result(object):
72 total = 0
73 runresults = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0, 'RERAN': 0}
75 def __init__(self):
76 self.starttime = None
77 self.returncode = None
78 self.runtime = ''
79 self.stdout = []
80 self.stderr = []
81 self.kmemleak = ''
82 self.result = ''
84 def done(self, proc, killed, reran):
85 """
86 Finalize the results of this Cmd.
87 """
88 Result.total += 1
89 m, s = divmod(monotonic_time() - self.starttime, 60)
90 self.runtime = '%02d:%02d' % (m, s)
91 self.returncode = proc.returncode
92 if reran is True:
93 Result.runresults['RERAN'] += 1
94 if killed:
95 self.result = 'KILLED'
96 Result.runresults['KILLED'] += 1
97 elif len(self.kmemleak) > 0:
98 self.result = 'FAIL'
99 Result.runresults['FAIL'] += 1
100 elif self.returncode == 0:
101 self.result = 'PASS'
102 Result.runresults['PASS'] += 1
103 elif self.returncode == 4:
104 self.result = 'SKIP'
105 Result.runresults['SKIP'] += 1
106 elif self.returncode != 0:
107 self.result = 'FAIL'
108 Result.runresults['FAIL'] += 1
111 class Output(object):
113 This class is a slightly modified version of the 'Stream' class found
114 here: http://goo.gl/aSGfv
116 def __init__(self, stream):
117 self.stream = stream
118 self._buf = b''
119 self.lines = []
121 def fileno(self):
122 return self.stream.fileno()
124 def read(self, drain=0):
126 Read from the file descriptor. If 'drain' set, read until EOF.
128 while self._read() is not None:
129 if not drain:
130 break
132 def _read(self):
134 Read up to 4k of data from this output stream. Collect the output
135 up to the last newline, and append it to any leftover data from a
136 previous call. The lines are stored as a (timestamp, data) tuple
137 for easy sorting/merging later.
139 fd = self.fileno()
140 buf = os.read(fd, 4096)
141 if not buf:
142 return None
143 if b'\n' not in buf:
144 self._buf += buf
145 return []
147 buf = self._buf + buf
148 tmp, rest = buf.rsplit(b'\n', 1)
149 self._buf = rest
150 now = datetime.now()
151 rows = tmp.split(b'\n')
152 self.lines += [(now, r) for r in rows]
155 class Cmd(object):
156 verified_users = []
158 def __init__(self, pathname, identifier=None, outputdir=None,
159 timeout=None, user=None, tags=None):
160 self.pathname = pathname
161 self.identifier = identifier
162 self.outputdir = outputdir or 'BASEDIR'
164 The timeout for tests is measured in wall-clock time
166 self.timeout = timeout
167 self.user = user or ''
168 self.killed = False
169 self.reran = None
170 self.result = Result()
172 if self.timeout is None:
173 self.timeout = 60
175 def __str__(self):
176 return '''\
177 Pathname: %s
178 Identifier: %s
179 Outputdir: %s
180 Timeout: %d
181 User: %s
182 ''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user)
184 def kill_cmd(self, proc, keyboard_interrupt=False):
186 Kill a running command due to timeout, or ^C from the keyboard. If
187 sudo is required, this user was verified previously.
189 self.killed = True
190 do_sudo = len(self.user) != 0
191 signal = '-TERM'
193 cmd = [SUDO, KILL, signal, str(proc.pid)]
194 if not do_sudo:
195 del cmd[0]
197 try:
198 kp = Popen(cmd)
199 kp.wait()
200 except Exception:
201 pass
204 If this is not a user-initiated kill and the test has not been
205 reran before we consider if the test needs to be reran:
206 If the test has spent some time hibernating and didn't run the whole
207 length of time before being timed out we will rerun the test.
209 if keyboard_interrupt is False and self.reran is None:
210 runtime = monotonic_time() - self.result.starttime
211 if int(self.timeout) > runtime:
212 self.killed = False
213 self.reran = False
214 self.run(False)
215 self.reran = True
217 def update_cmd_privs(self, cmd, user):
219 If a user has been specified to run this Cmd and we're not already
220 running as that user, prepend the appropriate sudo command to run
221 as that user.
223 me = getpwuid(os.getuid())
225 if not user or user is me:
226 if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK):
227 cmd += '.ksh'
228 if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK):
229 cmd += '.sh'
230 return cmd
232 if not os.path.isfile(cmd):
233 if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK):
234 cmd += '.ksh'
235 if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK):
236 cmd += '.sh'
238 ret = '%s -E -u %s %s' % (SUDO, user, cmd)
239 return ret.split(' ')
241 def collect_output(self, proc):
243 Read from stdout/stderr as data becomes available, until the
244 process is no longer running. Return the lines from the stdout and
245 stderr Output objects.
247 out = Output(proc.stdout)
248 err = Output(proc.stderr)
249 res = []
250 while proc.returncode is None:
251 proc.poll()
252 res = select([out, err], [], [], .1)
253 for fd in res[0]:
254 fd.read()
255 for fd in res[0]:
256 fd.read(drain=1)
258 return out.lines, err.lines
260 def run(self, dryrun, kmemleak, kmsg):
262 This is the main function that runs each individual test.
263 Determine whether or not the command requires sudo, and modify it
264 if needed. Run the command, and update the result object.
266 if dryrun is True:
267 print(self)
268 return
270 privcmd = self.update_cmd_privs(self.pathname, self.user)
271 try:
272 old = os.umask(0)
273 if not os.path.isdir(self.outputdir):
274 os.makedirs(self.outputdir, mode=0o777)
275 os.umask(old)
276 except OSError as e:
277 fail('%s' % e)
280 Log each test we run to /dev/kmsg (on Linux), so if there's a kernel
281 warning we'll be able to match it up to a particular test.
283 if kmsg is True and exists("/dev/kmsg"):
284 try:
285 kp = Popen([SUDO, "sh", "-c",
286 f"echo ZTS run {self.pathname} > /dev/kmsg"])
287 kp.wait()
288 except Exception:
289 pass
291 self.result.starttime = monotonic_time()
293 if kmemleak:
294 cmd = f'{SUDO} sh -c "echo clear > {KMEMLEAK_FILE}"'
295 check_output(cmd, shell=True)
297 proc = Popen(privcmd, stdout=PIPE, stderr=PIPE)
298 # Allow a special timeout value of 0 to mean infinity
299 if int(self.timeout) == 0:
300 self.timeout = sys.maxsize / (10 ** 9)
301 t = Timer(int(self.timeout), self.kill_cmd, [proc])
303 try:
304 t.start()
305 self.result.stdout, self.result.stderr = self.collect_output(proc)
307 if kmemleak:
308 cmd = f'{SUDO} sh -c "echo scan > {KMEMLEAK_FILE}"'
309 check_output(cmd, shell=True)
310 cmd = f'{SUDO} cat {KMEMLEAK_FILE}'
311 self.result.kmemleak = check_output(cmd, shell=True)
312 except KeyboardInterrupt:
313 self.kill_cmd(proc, True)
314 fail('\nRun terminated at user request.')
315 finally:
316 t.cancel()
318 if self.reran is not False:
319 self.result.done(proc, self.killed, self.reran)
321 def skip(self):
323 Initialize enough of the test result that we can log a skipped
324 command.
326 Result.total += 1
327 Result.runresults['SKIP'] += 1
328 self.result.stdout = self.result.stderr = []
329 self.result.starttime = monotonic_time()
330 m, s = divmod(monotonic_time() - self.result.starttime, 60)
331 self.result.runtime = '%02d:%02d' % (m, s)
332 self.result.result = 'SKIP'
334 def log(self, options, suppress_console=False):
336 This function is responsible for writing all output. This includes
337 the console output, the logfile of all results (with timestamped
338 merged stdout and stderr), and for each test, the unmodified
339 stdout/stderr/merged in its own file.
342 logname = getpwuid(os.getuid()).pw_name
343 rer = ''
344 if self.reran is True:
345 rer = ' (RERAN)'
346 user = ' (run as %s)' % (self.user if len(self.user) else logname)
347 if self.identifier:
348 msga = 'Test (%s): %s%s ' % (self.identifier, self.pathname, user)
349 else:
350 msga = 'Test: %s%s ' % (self.pathname, user)
351 msgb = '[%s] [%s]%s\n' % (self.result.runtime, self.result.result, rer)
352 pad = ' ' * (80 - (len(msga) + len(msgb)))
353 result_line = msga + pad + msgb
355 # The result line is always written to the log file. If -q was
356 # specified only failures are written to the console, otherwise
357 # the result line is written to the console. The console output
358 # may be suppressed by calling log() with suppress_console=True.
359 write_log(bytearray(result_line, encoding='utf-8'), LOG_FILE)
360 if not suppress_console:
361 if not options.quiet:
362 write_log(result_line, LOG_OUT)
363 elif options.quiet and self.result.result != 'PASS':
364 write_log(result_line, LOG_OUT)
366 lines = sorted(self.result.stdout + self.result.stderr,
367 key=lambda x: x[0])
369 # Write timestamped output (stdout and stderr) to the logfile
370 for dt, line in lines:
371 timestamp = bytearray(dt.strftime("%H:%M:%S.%f ")[:11],
372 encoding='utf-8')
373 write_log(b'%s %s\n' % (timestamp, line), LOG_FILE)
375 # Write the separate stdout/stderr/merged files, if the data exists
376 if len(self.result.stdout):
377 with open(os.path.join(self.outputdir, 'stdout'), 'wb') as out:
378 for _, line in self.result.stdout:
379 os.write(out.fileno(), b'%s\n' % line)
380 if len(self.result.stderr):
381 with open(os.path.join(self.outputdir, 'stderr'), 'wb') as err:
382 for _, line in self.result.stderr:
383 os.write(err.fileno(), b'%s\n' % line)
384 if len(self.result.stdout) and len(self.result.stderr):
385 with open(os.path.join(self.outputdir, 'merged'), 'wb') as merged:
386 for _, line in lines:
387 os.write(merged.fileno(), b'%s\n' % line)
388 if len(self.result.kmemleak):
389 with open(os.path.join(self.outputdir, 'kmemleak'), 'wb') as kmem:
390 kmem.write(self.result.kmemleak)
393 class Test(Cmd):
394 props = ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post',
395 'post_user', 'failsafe', 'failsafe_user', 'tags']
397 def __init__(self, pathname,
398 pre=None, pre_user=None, post=None, post_user=None,
399 failsafe=None, failsafe_user=None, tags=None, **kwargs):
400 super(Test, self).__init__(pathname, **kwargs)
401 self.pre = pre or ''
402 self.pre_user = pre_user or ''
403 self.post = post or ''
404 self.post_user = post_user or ''
405 self.failsafe = failsafe or ''
406 self.failsafe_user = failsafe_user or ''
407 self.tags = tags or []
409 def __str__(self):
410 post_user = pre_user = failsafe_user = ''
411 if len(self.pre_user):
412 pre_user = ' (as %s)' % (self.pre_user)
413 if len(self.post_user):
414 post_user = ' (as %s)' % (self.post_user)
415 if len(self.failsafe_user):
416 failsafe_user = ' (as %s)' % (self.failsafe_user)
417 return '''\
418 Pathname: %s
419 Identifier: %s
420 Outputdir: %s
421 Timeout: %d
422 User: %s
423 Pre: %s%s
424 Post: %s%s
425 Failsafe: %s%s
426 Tags: %s
427 ''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user,
428 self.pre, pre_user, self.post, post_user, self.failsafe,
429 failsafe_user, self.tags)
431 def verify(self):
433 Check the pre/post/failsafe scripts, user and Test. Omit the Test from
434 this run if there are any problems.
436 files = [self.pre, self.pathname, self.post, self.failsafe]
437 users = [self.pre_user, self.user, self.post_user, self.failsafe_user]
439 for f in [f for f in files if len(f)]:
440 if not verify_file(f):
441 write_log("Warning: Test '%s' not added to this run because"
442 " it failed verification.\n" % f, LOG_ERR)
443 return False
445 for user in [user for user in users if len(user)]:
446 if not verify_user(user):
447 write_log("Not adding Test '%s' to this run.\n" %
448 self.pathname, LOG_ERR)
449 return False
451 return True
453 def run(self, options):
455 Create Cmd instances for the pre/post/failsafe scripts. If the pre
456 script doesn't pass, skip this Test. Run the post script regardless.
457 If the Test is killed, also run the failsafe script.
459 odir = os.path.join(self.outputdir, os.path.basename(self.pre))
460 pretest = Cmd(self.pre, identifier=self.identifier, outputdir=odir,
461 timeout=self.timeout, user=self.pre_user)
462 test = Cmd(self.pathname, identifier=self.identifier,
463 outputdir=self.outputdir, timeout=self.timeout,
464 user=self.user)
465 odir = os.path.join(self.outputdir, os.path.basename(self.failsafe))
466 failsafe = Cmd(self.failsafe, identifier=self.identifier,
467 outputdir=odir, timeout=self.timeout,
468 user=self.failsafe_user)
469 odir = os.path.join(self.outputdir, os.path.basename(self.post))
470 posttest = Cmd(self.post, identifier=self.identifier, outputdir=odir,
471 timeout=self.timeout, user=self.post_user)
473 cont = True
474 if len(pretest.pathname):
475 pretest.run(options.dryrun, False, options.kmsg)
476 cont = pretest.result.result == 'PASS'
477 pretest.log(options)
479 if cont:
480 test.run(options.dryrun, options.kmemleak, options.kmsg)
481 if test.result.result == 'KILLED' and len(failsafe.pathname):
482 failsafe.run(options.dryrun, False, options.kmsg)
483 failsafe.log(options, suppress_console=True)
484 else:
485 test.skip()
487 test.log(options)
489 if len(posttest.pathname):
490 posttest.run(options.dryrun, False, options.kmsg)
491 posttest.log(options)
494 class TestGroup(Test):
495 props = Test.props + ['tests']
497 def __init__(self, pathname, tests=None, **kwargs):
498 super(TestGroup, self).__init__(pathname, **kwargs)
499 self.tests = tests or []
501 def __str__(self):
502 post_user = pre_user = failsafe_user = ''
503 if len(self.pre_user):
504 pre_user = ' (as %s)' % (self.pre_user)
505 if len(self.post_user):
506 post_user = ' (as %s)' % (self.post_user)
507 if len(self.failsafe_user):
508 failsafe_user = ' (as %s)' % (self.failsafe_user)
509 return '''\
510 Pathname: %s
511 Identifier: %s
512 Outputdir: %s
513 Tests: %s
514 Timeout: %s
515 User: %s
516 Pre: %s%s
517 Post: %s%s
518 Failsafe: %s%s
519 Tags: %s
520 ''' % (self.pathname, self.identifier, self.outputdir, self.tests,
521 self.timeout, self.user, self.pre, pre_user, self.post, post_user,
522 self.failsafe, failsafe_user, self.tags)
524 def filter(self, keeplist):
525 self.tests = [x for x in self.tests if x in keeplist]
527 def verify(self):
529 Check the pre/post/failsafe scripts, user and tests in this TestGroup.
530 Omit the TestGroup entirely, or simply delete the relevant tests in the
531 group, if that's all that's required.
533 # If the pre/post/failsafe scripts are relative pathnames, convert to
534 # absolute, so they stand a chance of passing verification.
535 if len(self.pre) and not os.path.isabs(self.pre):
536 self.pre = os.path.join(self.pathname, self.pre)
537 if len(self.post) and not os.path.isabs(self.post):
538 self.post = os.path.join(self.pathname, self.post)
539 if len(self.failsafe) and not os.path.isabs(self.failsafe):
540 self.post = os.path.join(self.pathname, self.post)
542 auxfiles = [self.pre, self.post, self.failsafe]
543 users = [self.pre_user, self.user, self.post_user, self.failsafe_user]
545 for f in [f for f in auxfiles if len(f)]:
546 if f != self.failsafe and self.pathname != os.path.dirname(f):
547 write_log("Warning: TestGroup '%s' not added to this run. "
548 "Auxiliary script '%s' exists in a different "
549 "directory.\n" % (self.pathname, f), LOG_ERR)
550 return False
552 if not verify_file(f):
553 write_log("Warning: TestGroup '%s' not added to this run. "
554 "Auxiliary script '%s' failed verification.\n" %
555 (self.pathname, f), LOG_ERR)
556 return False
558 for user in [user for user in users if len(user)]:
559 if not verify_user(user):
560 write_log("Not adding TestGroup '%s' to this run.\n" %
561 self.pathname, LOG_ERR)
562 return False
564 # If one of the tests is invalid, delete it, log it, and drive on.
565 for test in self.tests:
566 if not verify_file(os.path.join(self.pathname, test)):
567 del self.tests[self.tests.index(test)]
568 write_log("Warning: Test '%s' removed from TestGroup '%s' "
569 "because it failed verification.\n" %
570 (test, self.pathname), LOG_ERR)
572 return len(self.tests) != 0
574 def run(self, options):
576 Create Cmd instances for the pre/post/failsafe scripts. If the pre
577 script doesn't pass, skip all the tests in this TestGroup. Run the
578 post script regardless. Run the failsafe script when a test is killed.
580 # tags assigned to this test group also include the test names
581 if options.tags and not set(self.tags).intersection(set(options.tags)):
582 return
584 odir = os.path.join(self.outputdir, os.path.basename(self.pre))
585 pretest = Cmd(self.pre, outputdir=odir, timeout=self.timeout,
586 user=self.pre_user, identifier=self.identifier)
587 odir = os.path.join(self.outputdir, os.path.basename(self.post))
588 posttest = Cmd(self.post, outputdir=odir, timeout=self.timeout,
589 user=self.post_user, identifier=self.identifier)
591 cont = True
592 if len(pretest.pathname):
593 pretest.run(options.dryrun, False, options.kmsg)
594 cont = pretest.result.result == 'PASS'
595 pretest.log(options)
597 for fname in self.tests:
598 odir = os.path.join(self.outputdir, fname)
599 test = Cmd(os.path.join(self.pathname, fname), outputdir=odir,
600 timeout=self.timeout, user=self.user,
601 identifier=self.identifier)
602 odir = os.path.join(odir, os.path.basename(self.failsafe))
603 failsafe = Cmd(self.failsafe, outputdir=odir, timeout=self.timeout,
604 user=self.failsafe_user, identifier=self.identifier)
605 if cont:
606 test.run(options.dryrun, options.kmemleak, options.kmsg)
607 if test.result.result == 'KILLED' and len(failsafe.pathname):
608 failsafe.run(options.dryrun, False, options.kmsg)
609 failsafe.log(options, suppress_console=True)
610 else:
611 test.skip()
613 test.log(options)
615 if len(posttest.pathname):
616 posttest.run(options.dryrun, False, options.kmsg)
617 posttest.log(options)
620 class TestRun(object):
621 props = ['quiet', 'outputdir']
623 def __init__(self, options):
624 self.tests = {}
625 self.testgroups = {}
626 self.starttime = time()
627 self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S')
628 self.outputdir = os.path.join(options.outputdir, self.timestamp)
629 self.setup_logging(options)
630 self.defaults = [
631 ('outputdir', BASEDIR),
632 ('quiet', False),
633 ('timeout', 60),
634 ('user', ''),
635 ('pre', ''),
636 ('pre_user', ''),
637 ('post', ''),
638 ('post_user', ''),
639 ('failsafe', ''),
640 ('failsafe_user', ''),
641 ('tags', [])
644 def __str__(self):
645 s = 'TestRun:\n outputdir: %s\n' % self.outputdir
646 s += 'TESTS:\n'
647 for key in sorted(self.tests.keys()):
648 s += '%s%s' % (self.tests[key].__str__(), '\n')
649 s += 'TESTGROUPS:\n'
650 for key in sorted(self.testgroups.keys()):
651 s += '%s%s' % (self.testgroups[key].__str__(), '\n')
652 return s
654 def addtest(self, pathname, options):
656 Create a new Test, and apply any properties that were passed in
657 from the command line. If it passes verification, add it to the
658 TestRun.
660 test = Test(pathname)
661 for prop in Test.props:
662 setattr(test, prop, getattr(options, prop))
664 if test.verify():
665 self.tests[pathname] = test
667 def addtestgroup(self, dirname, filenames, options):
669 Create a new TestGroup, and apply any properties that were passed
670 in from the command line. If it passes verification, add it to the
671 TestRun.
673 if dirname not in self.testgroups:
674 testgroup = TestGroup(dirname)
675 for prop in Test.props:
676 setattr(testgroup, prop, getattr(options, prop))
678 # Prevent pre/post/failsafe scripts from running as regular tests
679 for f in [testgroup.pre, testgroup.post, testgroup.failsafe]:
680 if f in filenames:
681 del filenames[filenames.index(f)]
683 self.testgroups[dirname] = testgroup
684 self.testgroups[dirname].tests = sorted(filenames)
686 testgroup.verify()
688 def filter(self, keeplist):
689 for group in list(self.testgroups.keys()):
690 if group not in keeplist:
691 del self.testgroups[group]
692 continue
694 g = self.testgroups[group]
696 if g.pre and os.path.basename(g.pre) in keeplist[group]:
697 continue
699 g.filter(keeplist[group])
701 for test in list(self.tests.keys()):
702 directory, base = os.path.split(test)
703 if directory not in keeplist or base not in keeplist[directory]:
704 del self.tests[test]
706 def read(self, options):
708 Read in the specified runfiles, and apply the TestRun properties
709 listed in the 'DEFAULT' section to our TestRun. Then read each
710 section, and apply the appropriate properties to the Test or
711 TestGroup. Properties from individual sections override those set
712 in the 'DEFAULT' section. If the Test or TestGroup passes
713 verification, add it to the TestRun.
715 config = configparser.RawConfigParser()
716 parsed = config.read(options.runfiles)
717 failed = options.runfiles - set(parsed)
718 if len(failed):
719 files = ' '.join(sorted(failed))
720 fail("Couldn't read config files: %s" % files)
722 for opt in TestRun.props:
723 if config.has_option('DEFAULT', opt):
724 setattr(self, opt, config.get('DEFAULT', opt))
725 self.outputdir = os.path.join(self.outputdir, self.timestamp)
727 testdir = options.testdir
729 for section in config.sections():
730 if 'tests' in config.options(section):
731 parts = section.split(':', 1)
732 sectiondir = parts[0]
733 identifier = parts[1] if len(parts) == 2 else None
734 if os.path.isdir(sectiondir):
735 pathname = sectiondir
736 elif os.path.isdir(os.path.join(testdir, sectiondir)):
737 pathname = os.path.join(testdir, sectiondir)
738 else:
739 pathname = sectiondir
741 testgroup = TestGroup(os.path.abspath(pathname),
742 identifier=identifier)
743 for prop in TestGroup.props:
744 for sect in ['DEFAULT', section]:
745 if config.has_option(sect, prop):
746 if prop == 'tags':
747 setattr(testgroup, prop,
748 eval(config.get(sect, prop)))
749 elif prop == 'failsafe':
750 failsafe = config.get(sect, prop)
751 setattr(testgroup, prop,
752 os.path.join(testdir, failsafe))
753 else:
754 setattr(testgroup, prop,
755 config.get(sect, prop))
757 # Repopulate tests using eval to convert the string to a list
758 testgroup.tests = eval(config.get(section, 'tests'))
760 if testgroup.verify():
761 self.testgroups[section] = testgroup
762 else:
763 test = Test(section)
764 for prop in Test.props:
765 for sect in ['DEFAULT', section]:
766 if config.has_option(sect, prop):
767 if prop == 'failsafe':
768 failsafe = config.get(sect, prop)
769 setattr(test, prop,
770 os.path.join(testdir, failsafe))
771 else:
772 setattr(test, prop, config.get(sect, prop))
774 if test.verify():
775 self.tests[section] = test
777 def write(self, options):
779 Create a configuration file for editing and later use. The
780 'DEFAULT' section of the config file is created from the
781 properties that were specified on the command line. Tests are
782 simply added as sections that inherit everything from the
783 'DEFAULT' section. TestGroups are the same, except they get an
784 option including all the tests to run in that directory.
787 defaults = dict([(prop, getattr(options, prop)) for prop, _ in
788 self.defaults])
789 config = configparser.RawConfigParser(defaults)
791 for test in sorted(self.tests.keys()):
792 config.add_section(test)
793 for prop in Test.props:
794 if prop not in self.props:
795 config.set(test, prop,
796 getattr(self.tests[test], prop))
798 for testgroup in sorted(self.testgroups.keys()):
799 config.add_section(testgroup)
800 config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
801 for prop in TestGroup.props:
802 if prop not in self.props:
803 config.set(testgroup, prop,
804 getattr(self.testgroups[testgroup], prop))
806 try:
807 with open(options.template, 'w') as f:
808 return config.write(f)
809 except IOError:
810 fail('Could not open \'%s\' for writing.' % options.template)
812 def complete_outputdirs(self):
814 Collect all the pathnames for Tests, and TestGroups. Work
815 backwards one pathname component at a time, to create a unique
816 directory name in which to deposit test output. Tests will be able
817 to write output files directly in the newly modified outputdir.
818 TestGroups will be able to create one subdirectory per test in the
819 outputdir, and are guaranteed uniqueness because a group can only
820 contain files in one directory. Pre and post tests will create a
821 directory rooted at the outputdir of the Test or TestGroup in
822 question for their output. Failsafe scripts will create a directory
823 rooted at the outputdir of each Test for their output.
825 done = False
826 components = 0
827 tmp_dict = dict(list(self.tests.items()) +
828 list(self.testgroups.items()))
829 total = len(tmp_dict)
830 base = self.outputdir
832 while not done:
833 paths = []
834 components -= 1
835 for testfile in list(tmp_dict.keys()):
836 uniq = '/'.join(testfile.split('/')[components:]).lstrip('/')
837 if uniq not in paths:
838 paths.append(uniq)
839 tmp_dict[testfile].outputdir = os.path.join(base, uniq)
840 else:
841 break
842 done = total == len(paths)
844 def setup_logging(self, options):
846 This function creates the output directory and gets a file object
847 for the logfile. This function must be called before write_log()
848 can be used.
850 if options.dryrun is True:
851 return
853 global LOG_FILE_OBJ
854 if not options.template:
855 try:
856 old = os.umask(0)
857 os.makedirs(self.outputdir, mode=0o777)
858 os.umask(old)
859 filename = os.path.join(self.outputdir, 'log')
860 LOG_FILE_OBJ = open(filename, buffering=0, mode='wb')
861 except OSError as e:
862 fail('%s' % e)
864 def run(self, options):
866 Walk through all the Tests and TestGroups, calling run().
868 try:
869 os.chdir(self.outputdir)
870 except OSError:
871 fail('Could not change to directory %s' % self.outputdir)
872 # make a symlink to the output for the currently running test
873 logsymlink = os.path.join(self.outputdir, '../current')
874 if os.path.islink(logsymlink):
875 os.unlink(logsymlink)
876 if not os.path.exists(logsymlink):
877 os.symlink(self.outputdir, logsymlink)
878 else:
879 write_log('Could not make a symlink to directory %s\n' %
880 self.outputdir, LOG_ERR)
882 if options.kmemleak:
883 cmd = f'{SUDO} -c "echo scan=0 > {KMEMLEAK_FILE}"'
884 check_output(cmd, shell=True)
886 iteration = 0
887 while iteration < options.iterations:
888 for test in sorted(self.tests.keys()):
889 self.tests[test].run(options)
890 for testgroup in sorted(self.testgroups.keys()):
891 self.testgroups[testgroup].run(options)
892 iteration += 1
894 def summary(self):
895 if Result.total == 0:
896 return 2
898 print('\nResults Summary')
899 for key in list(Result.runresults.keys()):
900 if Result.runresults[key] != 0:
901 print('%s\t% 4d' % (key, Result.runresults[key]))
903 m, s = divmod(time() - self.starttime, 60)
904 h, m = divmod(m, 60)
905 print('\nRunning Time:\t%02d:%02d:%02d' % (h, m, s))
906 print('Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
907 float(Result.total)) * 100))
908 print('Log directory:\t%s' % self.outputdir)
910 if Result.runresults['FAIL'] > 0:
911 return 1
913 if Result.runresults['KILLED'] > 0:
914 return 1
916 if Result.runresults['RERAN'] > 0:
917 return 3
919 return 0
922 def write_log(msg, target):
924 Write the provided message to standard out, standard error or
925 the logfile. If specifying LOG_FILE, then `msg` must be a bytes
926 like object. This way we can still handle output from tests that
927 may be in unexpected encodings.
929 if target == LOG_OUT:
930 os.write(sys.stdout.fileno(), bytearray(msg, encoding='utf-8'))
931 elif target == LOG_ERR:
932 os.write(sys.stderr.fileno(), bytearray(msg, encoding='utf-8'))
933 elif target == LOG_FILE:
934 os.write(LOG_FILE_OBJ.fileno(), msg)
935 else:
936 fail('log_msg called with unknown target "%s"' % target)
939 def verify_file(pathname):
941 Verify that the supplied pathname is an executable regular file.
943 if os.path.isdir(pathname) or os.path.islink(pathname):
944 return False
946 for ext in '', '.ksh', '.sh':
947 script_path = pathname + ext
948 if os.path.isfile(script_path) and os.access(script_path, os.X_OK):
949 return True
951 return False
954 def verify_user(user):
956 Verify that the specified user exists on this system, and can execute
957 sudo without being prompted for a password.
959 testcmd = [SUDO, '-n', '-u', user, TRUE]
961 if user in Cmd.verified_users:
962 return True
964 try:
965 getpwnam(user)
966 except KeyError:
967 write_log("Warning: user '%s' does not exist.\n" % user,
968 LOG_ERR)
969 return False
971 p = Popen(testcmd)
972 p.wait()
973 if p.returncode != 0:
974 write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user,
975 LOG_ERR)
976 return False
977 else:
978 Cmd.verified_users.append(user)
980 return True
983 def find_tests(testrun, options):
985 For the given list of pathnames, add files as Tests. For directories,
986 if do_groups is True, add the directory as a TestGroup. If False,
987 recursively search for executable files.
990 for p in sorted(options.pathnames):
991 if os.path.isdir(p):
992 for dirname, _, filenames in os.walk(p):
993 if options.do_groups:
994 testrun.addtestgroup(dirname, filenames, options)
995 else:
996 for f in sorted(filenames):
997 testrun.addtest(os.path.join(dirname, f), options)
998 else:
999 testrun.addtest(p, options)
1002 def filter_tests(testrun, options):
1003 try:
1004 fh = open(options.logfile, "r")
1005 except Exception as e:
1006 fail('%s' % e)
1008 failed = {}
1009 while True:
1010 line = fh.readline()
1011 if not line:
1012 break
1013 m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line)
1014 if not m:
1015 continue
1016 group, test = m.group(1, 2)
1017 try:
1018 failed[group].append(test)
1019 except KeyError:
1020 failed[group] = [test]
1021 fh.close()
1023 testrun.filter(failed)
1026 def fail(retstr, ret=1):
1027 print('%s: %s' % (sys.argv[0], retstr))
1028 exit(ret)
1031 def kmemleak_cb(option, opt_str, value, parser):
1032 if not os.path.exists(KMEMLEAK_FILE):
1033 fail(f"File '{KMEMLEAK_FILE}' doesn't exist. " +
1034 "Enable CONFIG_DEBUG_KMEMLEAK in kernel configuration.")
1036 setattr(parser.values, option.dest, True)
1039 def options_cb(option, opt_str, value, parser):
1040 path_options = ['outputdir', 'template', 'testdir', 'logfile']
1042 if opt_str in parser.rargs:
1043 fail('%s may only be specified once.' % opt_str)
1045 if option.dest == 'runfiles':
1046 parser.values.cmd = 'rdconfig'
1047 value = set(os.path.abspath(p) for p in value.split(','))
1048 if option.dest == 'tags':
1049 value = [x.strip() for x in value.split(',')]
1051 if option.dest in path_options:
1052 setattr(parser.values, option.dest, os.path.abspath(value))
1053 else:
1054 setattr(parser.values, option.dest, value)
1057 def parse_args():
1058 parser = OptionParser()
1059 parser.add_option('-c', action='callback', callback=options_cb,
1060 type='string', dest='runfiles', metavar='runfiles',
1061 help='Specify tests to run via config files.')
1062 parser.add_option('-d', action='store_true', default=False, dest='dryrun',
1063 help='Dry run. Print tests, but take no other action.')
1064 parser.add_option('-l', action='callback', callback=options_cb,
1065 default=None, dest='logfile', metavar='logfile',
1066 type='string',
1067 help='Read logfile and re-run tests which failed.')
1068 parser.add_option('-g', action='store_true', default=False,
1069 dest='do_groups', help='Make directories TestGroups.')
1070 parser.add_option('-o', action='callback', callback=options_cb,
1071 default=BASEDIR, dest='outputdir', type='string',
1072 metavar='outputdir', help='Specify an output directory.')
1073 parser.add_option('-i', action='callback', callback=options_cb,
1074 default=TESTDIR, dest='testdir', type='string',
1075 metavar='testdir', help='Specify a test directory.')
1076 parser.add_option('-K', action='store_true', default=False, dest='kmsg',
1077 help='Log tests names to /dev/kmsg')
1078 parser.add_option('-m', action='callback', callback=kmemleak_cb,
1079 default=False, dest='kmemleak',
1080 help='Enable kmemleak reporting (Linux only)')
1081 parser.add_option('-p', action='callback', callback=options_cb,
1082 default='', dest='pre', metavar='script',
1083 type='string', help='Specify a pre script.')
1084 parser.add_option('-P', action='callback', callback=options_cb,
1085 default='', dest='post', metavar='script',
1086 type='string', help='Specify a post script.')
1087 parser.add_option('-q', action='store_true', default=False, dest='quiet',
1088 help='Silence on the console during a test run.')
1089 parser.add_option('-s', action='callback', callback=options_cb,
1090 default='', dest='failsafe', metavar='script',
1091 type='string', help='Specify a failsafe script.')
1092 parser.add_option('-S', action='callback', callback=options_cb,
1093 default='', dest='failsafe_user',
1094 metavar='failsafe_user', type='string',
1095 help='Specify a user to execute the failsafe script.')
1096 parser.add_option('-t', action='callback', callback=options_cb, default=60,
1097 dest='timeout', metavar='seconds', type='int',
1098 help='Timeout (in seconds) for an individual test.')
1099 parser.add_option('-u', action='callback', callback=options_cb,
1100 default='', dest='user', metavar='user', type='string',
1101 help='Specify a different user name to run as.')
1102 parser.add_option('-w', action='callback', callback=options_cb,
1103 default=None, dest='template', metavar='template',
1104 type='string', help='Create a new config file.')
1105 parser.add_option('-x', action='callback', callback=options_cb, default='',
1106 dest='pre_user', metavar='pre_user', type='string',
1107 help='Specify a user to execute the pre script.')
1108 parser.add_option('-X', action='callback', callback=options_cb, default='',
1109 dest='post_user', metavar='post_user', type='string',
1110 help='Specify a user to execute the post script.')
1111 parser.add_option('-T', action='callback', callback=options_cb, default='',
1112 dest='tags', metavar='tags', type='string',
1113 help='Specify tags to execute specific test groups.')
1114 parser.add_option('-I', action='callback', callback=options_cb, default=1,
1115 dest='iterations', metavar='iterations', type='int',
1116 help='Number of times to run the test run.')
1117 (options, pathnames) = parser.parse_args()
1119 if options.runfiles and len(pathnames):
1120 fail('Extraneous arguments.')
1122 options.pathnames = [os.path.abspath(path) for path in pathnames]
1124 return options
1127 def main():
1128 options = parse_args()
1130 testrun = TestRun(options)
1132 if options.runfiles:
1133 testrun.read(options)
1134 else:
1135 find_tests(testrun, options)
1137 if options.logfile:
1138 filter_tests(testrun, options)
1140 if options.template:
1141 testrun.write(options)
1142 exit(0)
1144 testrun.complete_outputdirs()
1145 testrun.run(options)
1146 exit(testrun.summary())
1149 if __name__ == '__main__':
1150 main()