1 #!/usr/bin/env @PYTHON_SHEBANG@
4 # This file and its contents are supplied under the terms of the
5 # Common Development and Distribution License ("CDDL"), version 1.0.
6 # You may only use this file in accordance with the terms of version
9 # A full copy of the text of the CDDL should have accompanied this
10 # source. A copy of the CDDL is also available via the Internet at
11 # http://www.illumos.org/license/CDDL.
15 # Copyright (c) 2012, 2018 by Delphix. All rights reserved.
16 # Copyright (c) 2019 Datto Inc.
18 # This script must remain compatible with Python 3.6+.
27 from datetime
import datetime
28 from optparse
import OptionParser
29 from pwd
import getpwnam
30 from pwd
import getpwuid
31 from select
import select
32 from subprocess
import PIPE
33 from subprocess
import Popen
34 from subprocess
import check_output
35 from threading
import Timer
36 from time
import time
, CLOCK_MONOTONIC
37 from os
.path
import exists
39 BASEDIR
= '/var/tmp/test_results'
40 TESTDIR
= '/usr/share/zfs/'
41 KMEMLEAK_FILE
= '/sys/kernel/debug/kmemleak'
51 from time
import monotonic
as monotonic_time
53 class timespec(ctypes
.Structure
):
55 ('tv_sec', ctypes
.c_long
),
56 ('tv_nsec', ctypes
.c_long
)
59 librt
= ctypes
.CDLL('librt.so.1', use_errno
=True)
60 clock_gettime
= librt
.clock_gettime
61 clock_gettime
.argtypes
= [ctypes
.c_int
, ctypes
.POINTER(timespec
)]
65 if clock_gettime(CLOCK_MONOTONIC
, ctypes
.pointer(t
)) != 0:
66 errno_
= ctypes
.get_errno()
67 raise OSError(errno_
, os
.strerror(errno_
))
68 return t
.tv_sec
+ t
.tv_nsec
* 1e-9
73 runresults
= {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0, 'RERAN': 0}
77 self
.returncode
= None
84 def done(self
, proc
, killed
, reran
):
86 Finalize the results of this Cmd.
89 m
, s
= divmod(monotonic_time() - self
.starttime
, 60)
90 self
.runtime
= '%02d:%02d' % (m
, s
)
91 self
.returncode
= proc
.returncode
93 Result
.runresults
['RERAN'] += 1
95 self
.result
= 'KILLED'
96 Result
.runresults
['KILLED'] += 1
97 elif len(self
.kmemleak
) > 0:
99 Result
.runresults
['FAIL'] += 1
100 elif self
.returncode
== 0:
102 Result
.runresults
['PASS'] += 1
103 elif self
.returncode
== 4:
105 Result
.runresults
['SKIP'] += 1
106 elif self
.returncode
!= 0:
108 Result
.runresults
['FAIL'] += 1
111 class Output(object):
113 This class is a slightly modified version of the 'Stream' class found
114 here: https://stackoverflow.com/q/4984549/
116 def __init__(self
, stream
, debug
=False):
123 return self
.stream
.fileno()
125 def read(self
, drain
=0):
127 Read from the file descriptor. If 'drain' set, read until EOF.
129 while self
._read
() is not None:
135 Read up to 4k of data from this output stream. Collect the output
136 up to the last newline, and append it to any leftover data from a
137 previous call. The lines are stored as a (timestamp, data) tuple
138 for easy sorting/merging later.
141 buf
= os
.read(fd
, 4096)
145 os
.write(sys
.stderr
.fileno(), buf
)
150 buf
= self
._buf
+ buf
151 tmp
, rest
= buf
.rsplit(b
'\n', 1)
154 rows
= tmp
.split(b
'\n')
155 self
.lines
+= [(now
, r
) for r
in rows
]
161 def __init__(self
, pathname
, identifier
=None, outputdir
=None,
162 timeout
=None, user
=None, tags
=None):
163 self
.pathname
= pathname
164 self
.identifier
= identifier
165 self
.outputdir
= outputdir
or 'BASEDIR'
167 The timeout for tests is measured in wall-clock time
169 self
.timeout
= timeout
170 self
.user
= user
or ''
173 self
.result
= Result()
175 if self
.timeout
is None:
185 ''' % (self
.pathname
, self
.identifier
, self
.outputdir
, self
.timeout
, self
.user
)
187 def kill_cmd(self
, proc
, options
, kmemleak
, keyboard_interrupt
=False):
189 Kill a running command due to timeout, or ^C from the keyboard. If
190 sudo is required, this user was verified previously.
193 do_sudo
= len(self
.user
) != 0
196 cmd
= [SUDO
, KILL
, signal
, str(proc
.pid
)]
207 If this is not a user-initiated kill and the test has not been
208 reran before we consider if the test needs to be reran:
209 If the test has spent some time hibernating and didn't run the whole
210 length of time before being timed out we will rerun the test.
212 if keyboard_interrupt
is False and self
.reran
is None:
213 runtime
= monotonic_time() - self
.result
.starttime
214 if int(self
.timeout
) > runtime
:
217 self
.run(options
, dryrun
=False, kmemleak
=kmemleak
)
220 def update_cmd_privs(self
, cmd
, user
):
222 If a user has been specified to run this Cmd and we're not already
223 running as that user, prepend the appropriate sudo command to run
226 me
= getpwuid(os
.getuid())
228 if not user
or user
is me
:
229 if os
.path
.isfile(cmd
+'.ksh') and os
.access(cmd
+'.ksh', os
.X_OK
):
231 if os
.path
.isfile(cmd
+'.sh') and os
.access(cmd
+'.sh', os
.X_OK
):
235 if not os
.path
.isfile(cmd
):
236 if os
.path
.isfile(cmd
+'.ksh') and os
.access(cmd
+'.ksh', os
.X_OK
):
238 if os
.path
.isfile(cmd
+'.sh') and os
.access(cmd
+'.sh', os
.X_OK
):
241 ret
= '%s -E -u %s %s' % (SUDO
, user
, cmd
)
242 return ret
.split(' ')
244 def collect_output(self
, proc
, debug
=False):
246 Read from stdout/stderr as data becomes available, until the
247 process is no longer running. Return the lines from the stdout and
248 stderr Output objects.
250 out
= Output(proc
.stdout
, debug
)
251 err
= Output(proc
.stderr
, debug
)
253 while proc
.returncode
is None:
255 res
= select([out
, err
], [], [], .1)
261 return out
.lines
, err
.lines
263 def run(self
, options
, dryrun
=None, kmemleak
=None):
265 This is the main function that runs each individual test.
266 Determine whether or not the command requires sudo, and modify it
267 if needed. Run the command, and update the result object.
270 dryrun
= options
.dryrun
275 kmemleak
= options
.kmemleak
277 privcmd
= self
.update_cmd_privs(self
.pathname
, self
.user
)
280 if not os
.path
.isdir(self
.outputdir
):
281 os
.makedirs(self
.outputdir
, mode
=0o777)
287 Log each test we run to /dev/kmsg (on Linux), so if there's a kernel
288 warning we'll be able to match it up to a particular test.
290 if options
.kmsg
is True and exists("/dev/kmsg"):
292 kp
= Popen([SUDO
, "sh", "-c",
293 f
"echo ZTS run {self.pathname} > /dev/kmsg"])
299 Log each test we run to /dev/ttyu0 (on FreeBSD), so if there's a kernel
300 warning we'll be able to match it up to a particular test.
302 if options
.kmsg
is True and exists("/dev/ttyu0"):
304 kp
= Popen([SUDO
, "sh", "-c",
305 f
"echo ZTS run {self.pathname} > /dev/ttyu0"])
310 self
.result
.starttime
= monotonic_time()
313 cmd
= f
'{SUDO} sh -c "echo clear > {KMEMLEAK_FILE}"'
314 check_output(cmd
, shell
=True)
316 proc
= Popen(privcmd
, stdout
=PIPE
, stderr
=PIPE
)
317 # Allow a special timeout value of 0 to mean infinity
318 if int(self
.timeout
) == 0:
319 self
.timeout
= sys
.maxsize
/ (10 ** 9)
321 int(self
.timeout
), self
.kill_cmd
, [proc
, options
, kmemleak
]
327 out
, err
= self
.collect_output(proc
, options
.debug
)
328 self
.result
.stdout
= out
329 self
.result
.stderr
= err
332 cmd
= f
'{SUDO} sh -c "echo scan > {KMEMLEAK_FILE}"'
333 check_output(cmd
, shell
=True)
334 cmd
= f
'{SUDO} cat {KMEMLEAK_FILE}'
335 self
.result
.kmemleak
= check_output(cmd
, shell
=True)
336 except KeyboardInterrupt:
337 self
.kill_cmd(proc
, options
, kmemleak
, True)
338 fail('\nRun terminated at user request.')
342 if self
.reran
is not False:
343 self
.result
.done(proc
, self
.killed
, self
.reran
)
347 Initialize enough of the test result that we can log a skipped
351 Result
.runresults
['SKIP'] += 1
352 self
.result
.stdout
= self
.result
.stderr
= []
353 self
.result
.starttime
= monotonic_time()
354 m
, s
= divmod(monotonic_time() - self
.result
.starttime
, 60)
355 self
.result
.runtime
= '%02d:%02d' % (m
, s
)
356 self
.result
.result
= 'SKIP'
358 def log(self
, options
, suppress_console
=False):
360 This function is responsible for writing all output. This includes
361 the console output, the logfile of all results (with timestamped
362 merged stdout and stderr), and for each test, the unmodified
363 stdout/stderr/merged in its own file.
366 logname
= getpwuid(os
.getuid()).pw_name
368 if self
.reran
is True:
370 user
= ' (run as %s)' % (self
.user
if len(self
.user
) else logname
)
372 msga
= 'Test (%s): %s%s ' % (self
.identifier
, self
.pathname
, user
)
374 msga
= 'Test: %s%s ' % (self
.pathname
, user
)
375 msgb
= '[%s] [%s]%s\n' % (self
.result
.runtime
, self
.result
.result
, rer
)
376 pad
= ' ' * (80 - (len(msga
) + len(msgb
)))
377 result_line
= msga
+ pad
+ msgb
379 # The result line is always written to the log file. If -q was
380 # specified only failures are written to the console, otherwise
381 # the result line is written to the console. The console output
382 # may be suppressed by calling log() with suppress_console=True.
383 write_log(bytearray(result_line
, encoding
='utf-8'), LOG_FILE
)
384 if not suppress_console
:
385 if not options
.quiet
:
386 write_log(result_line
, LOG_OUT
)
387 elif options
.quiet
and self
.result
.result
!= 'PASS':
388 write_log(result_line
, LOG_OUT
)
390 lines
= sorted(self
.result
.stdout
+ self
.result
.stderr
,
393 # Write timestamped output (stdout and stderr) to the logfile
394 for dt
, line
in lines
:
395 timestamp
= bytearray(dt
.strftime("%H:%M:%S.%f ")[:11],
397 write_log(b
'%s %s\n' % (timestamp
, line
), LOG_FILE
)
399 # Write the separate stdout/stderr/merged files, if the data exists
400 if len(self
.result
.stdout
):
401 with
open(os
.path
.join(self
.outputdir
, 'stdout'), 'wb') as out
:
402 for _
, line
in self
.result
.stdout
:
403 os
.write(out
.fileno(), b
'%s\n' % line
)
404 if len(self
.result
.stderr
):
405 with
open(os
.path
.join(self
.outputdir
, 'stderr'), 'wb') as err
:
406 for _
, line
in self
.result
.stderr
:
407 os
.write(err
.fileno(), b
'%s\n' % line
)
408 if len(self
.result
.stdout
) and len(self
.result
.stderr
):
409 with
open(os
.path
.join(self
.outputdir
, 'merged'), 'wb') as merged
:
410 for _
, line
in lines
:
411 os
.write(merged
.fileno(), b
'%s\n' % line
)
412 if len(self
.result
.kmemleak
):
413 with
open(os
.path
.join(self
.outputdir
, 'kmemleak'), 'wb') as kmem
:
414 kmem
.write(self
.result
.kmemleak
)
418 props
= ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post',
419 'post_user', 'failsafe', 'failsafe_user', 'tags']
421 def __init__(self
, pathname
,
422 pre
=None, pre_user
=None, post
=None, post_user
=None,
423 failsafe
=None, failsafe_user
=None, tags
=None, **kwargs
):
424 super(Test
, self
).__init
__(pathname
, **kwargs
)
426 self
.pre_user
= pre_user
or ''
427 self
.post
= post
or ''
428 self
.post_user
= post_user
or ''
429 self
.failsafe
= failsafe
or ''
430 self
.failsafe_user
= failsafe_user
or ''
431 self
.tags
= tags
or []
434 post_user
= pre_user
= failsafe_user
= ''
435 if len(self
.pre_user
):
436 pre_user
= ' (as %s)' % (self
.pre_user
)
437 if len(self
.post_user
):
438 post_user
= ' (as %s)' % (self
.post_user
)
439 if len(self
.failsafe_user
):
440 failsafe_user
= ' (as %s)' % (self
.failsafe_user
)
451 ''' % (self
.pathname
, self
.identifier
, self
.outputdir
, self
.timeout
, self
.user
,
452 self
.pre
, pre_user
, self
.post
, post_user
, self
.failsafe
,
453 failsafe_user
, self
.tags
)
457 Check the pre/post/failsafe scripts, user and Test. Omit the Test from
458 this run if there are any problems.
460 files
= [self
.pre
, self
.pathname
, self
.post
, self
.failsafe
]
461 users
= [self
.pre_user
, self
.user
, self
.post_user
, self
.failsafe_user
]
463 for f
in [f
for f
in files
if len(f
)]:
464 if not verify_file(f
):
465 write_log("Warning: Test '%s' not added to this run because"
466 " it failed verification.\n" % f
, LOG_ERR
)
469 for user
in [user
for user
in users
if len(user
)]:
470 if not verify_user(user
):
471 write_log("Not adding Test '%s' to this run.\n" %
472 self
.pathname
, LOG_ERR
)
477 def run(self
, options
, dryrun
=None, kmemleak
=None):
479 Create Cmd instances for the pre/post/failsafe scripts. If the pre
480 script doesn't pass, skip this Test. Run the post script regardless.
481 If the Test is killed, also run the failsafe script.
483 odir
= os
.path
.join(self
.outputdir
, os
.path
.basename(self
.pre
))
484 pretest
= Cmd(self
.pre
, identifier
=self
.identifier
, outputdir
=odir
,
485 timeout
=self
.timeout
, user
=self
.pre_user
)
486 test
= Cmd(self
.pathname
, identifier
=self
.identifier
,
487 outputdir
=self
.outputdir
, timeout
=self
.timeout
,
489 odir
= os
.path
.join(self
.outputdir
, os
.path
.basename(self
.failsafe
))
490 failsafe
= Cmd(self
.failsafe
, identifier
=self
.identifier
,
491 outputdir
=odir
, timeout
=self
.timeout
,
492 user
=self
.failsafe_user
)
493 odir
= os
.path
.join(self
.outputdir
, os
.path
.basename(self
.post
))
494 posttest
= Cmd(self
.post
, identifier
=self
.identifier
, outputdir
=odir
,
495 timeout
=self
.timeout
, user
=self
.post_user
)
498 if len(pretest
.pathname
):
499 pretest
.run(options
, kmemleak
=False)
500 cont
= pretest
.result
.result
== 'PASS'
504 test
.run(options
, kmemleak
=kmemleak
)
505 if test
.result
.result
== 'KILLED' and len(failsafe
.pathname
):
506 failsafe
.run(options
, kmemleak
=False)
507 failsafe
.log(options
, suppress_console
=True)
513 if len(posttest
.pathname
):
514 posttest
.run(options
, kmemleak
=False)
515 posttest
.log(options
)
518 class TestGroup(Test
):
519 props
= Test
.props
+ ['tests']
521 def __init__(self
, pathname
, tests
=None, **kwargs
):
522 super(TestGroup
, self
).__init
__(pathname
, **kwargs
)
523 self
.tests
= tests
or []
526 post_user
= pre_user
= failsafe_user
= ''
527 if len(self
.pre_user
):
528 pre_user
= ' (as %s)' % (self
.pre_user
)
529 if len(self
.post_user
):
530 post_user
= ' (as %s)' % (self
.post_user
)
531 if len(self
.failsafe_user
):
532 failsafe_user
= ' (as %s)' % (self
.failsafe_user
)
544 ''' % (self
.pathname
, self
.identifier
, self
.outputdir
, self
.tests
,
545 self
.timeout
, self
.user
, self
.pre
, pre_user
, self
.post
, post_user
,
546 self
.failsafe
, failsafe_user
, self
.tags
)
548 def filter(self
, keeplist
):
549 self
.tests
= [x
for x
in self
.tests
if x
in keeplist
]
553 Check the pre/post/failsafe scripts, user and tests in this TestGroup.
554 Omit the TestGroup entirely, or simply delete the relevant tests in the
555 group, if that's all that's required.
557 # If the pre/post/failsafe scripts are relative pathnames, convert to
558 # absolute, so they stand a chance of passing verification.
559 if len(self
.pre
) and not os
.path
.isabs(self
.pre
):
560 self
.pre
= os
.path
.join(self
.pathname
, self
.pre
)
561 if len(self
.post
) and not os
.path
.isabs(self
.post
):
562 self
.post
= os
.path
.join(self
.pathname
, self
.post
)
563 if len(self
.failsafe
) and not os
.path
.isabs(self
.failsafe
):
564 self
.post
= os
.path
.join(self
.pathname
, self
.post
)
566 auxfiles
= [self
.pre
, self
.post
, self
.failsafe
]
567 users
= [self
.pre_user
, self
.user
, self
.post_user
, self
.failsafe_user
]
569 for f
in [f
for f
in auxfiles
if len(f
)]:
570 if f
!= self
.failsafe
and self
.pathname
!= os
.path
.dirname(f
):
571 write_log("Warning: TestGroup '%s' not added to this run. "
572 "Auxiliary script '%s' exists in a different "
573 "directory.\n" % (self
.pathname
, f
), LOG_ERR
)
576 if not verify_file(f
):
577 write_log("Warning: TestGroup '%s' not added to this run. "
578 "Auxiliary script '%s' failed verification.\n" %
579 (self
.pathname
, f
), LOG_ERR
)
582 for user
in [user
for user
in users
if len(user
)]:
583 if not verify_user(user
):
584 write_log("Not adding TestGroup '%s' to this run.\n" %
585 self
.pathname
, LOG_ERR
)
588 # If one of the tests is invalid, delete it, log it, and drive on.
589 for test
in self
.tests
:
590 if not verify_file(os
.path
.join(self
.pathname
, test
)):
591 del self
.tests
[self
.tests
.index(test
)]
592 write_log("Warning: Test '%s' removed from TestGroup '%s' "
593 "because it failed verification.\n" %
594 (test
, self
.pathname
), LOG_ERR
)
596 return len(self
.tests
) != 0
598 def run(self
, options
, dryrun
=None, kmemleak
=None):
600 Create Cmd instances for the pre/post/failsafe scripts. If the pre
601 script doesn't pass, skip all the tests in this TestGroup. Run the
602 post script regardless. Run the failsafe script when a test is killed.
604 # tags assigned to this test group also include the test names
605 if options
.tags
and not set(self
.tags
).intersection(set(options
.tags
)):
608 odir
= os
.path
.join(self
.outputdir
, os
.path
.basename(self
.pre
))
609 pretest
= Cmd(self
.pre
, outputdir
=odir
, timeout
=self
.timeout
,
610 user
=self
.pre_user
, identifier
=self
.identifier
)
611 odir
= os
.path
.join(self
.outputdir
, os
.path
.basename(self
.post
))
612 posttest
= Cmd(self
.post
, outputdir
=odir
, timeout
=self
.timeout
,
613 user
=self
.post_user
, identifier
=self
.identifier
)
616 if len(pretest
.pathname
):
617 pretest
.run(options
, dryrun
=dryrun
, kmemleak
=False)
618 cont
= pretest
.result
.result
== 'PASS'
621 for fname
in self
.tests
:
622 odir
= os
.path
.join(self
.outputdir
, fname
)
623 test
= Cmd(os
.path
.join(self
.pathname
, fname
), outputdir
=odir
,
624 timeout
=self
.timeout
, user
=self
.user
,
625 identifier
=self
.identifier
)
626 odir
= os
.path
.join(odir
, os
.path
.basename(self
.failsafe
))
627 failsafe
= Cmd(self
.failsafe
, outputdir
=odir
, timeout
=self
.timeout
,
628 user
=self
.failsafe_user
, identifier
=self
.identifier
)
630 test
.run(options
, dryrun
=dryrun
, kmemleak
=kmemleak
)
631 if test
.result
.result
== 'KILLED' and len(failsafe
.pathname
):
632 failsafe
.run(options
, dryrun
=dryrun
, kmemleak
=False)
633 failsafe
.log(options
, suppress_console
=True)
639 if len(posttest
.pathname
):
640 posttest
.run(options
, dryrun
=dryrun
, kmemleak
=False)
641 posttest
.log(options
)
644 class TestRun(object):
645 props
= ['quiet', 'outputdir', 'debug']
647 def __init__(self
, options
):
650 self
.starttime
= time()
651 self
.timestamp
= datetime
.now().strftime('%Y%m%dT%H%M%S')
652 self
.outputdir
= os
.path
.join(options
.outputdir
, self
.timestamp
)
653 self
.setup_logging(options
)
655 ('outputdir', BASEDIR
),
664 ('failsafe_user', ''),
670 s
= 'TestRun:\n outputdir: %s\n' % self
.outputdir
672 for key
in sorted(self
.tests
.keys()):
673 s
+= '%s%s' % (self
.tests
[key
].__str
__(), '\n')
675 for key
in sorted(self
.testgroups
.keys()):
676 s
+= '%s%s' % (self
.testgroups
[key
].__str
__(), '\n')
679 def addtest(self
, pathname
, options
):
681 Create a new Test, and apply any properties that were passed in
682 from the command line. If it passes verification, add it to the
685 test
= Test(pathname
)
686 for prop
in Test
.props
:
687 setattr(test
, prop
, getattr(options
, prop
))
690 self
.tests
[pathname
] = test
692 def addtestgroup(self
, dirname
, filenames
, options
):
694 Create a new TestGroup, and apply any properties that were passed
695 in from the command line. If it passes verification, add it to the
698 if dirname
not in self
.testgroups
:
699 testgroup
= TestGroup(dirname
)
700 for prop
in Test
.props
:
701 setattr(testgroup
, prop
, getattr(options
, prop
))
703 # Prevent pre/post/failsafe scripts from running as regular tests
704 for f
in [testgroup
.pre
, testgroup
.post
, testgroup
.failsafe
]:
706 del filenames
[filenames
.index(f
)]
708 self
.testgroups
[dirname
] = testgroup
709 self
.testgroups
[dirname
].tests
= sorted(filenames
)
713 def filter(self
, keeplist
):
714 for group
in list(self
.testgroups
.keys()):
715 if group
not in keeplist
:
716 del self
.testgroups
[group
]
719 g
= self
.testgroups
[group
]
721 if g
.pre
and os
.path
.basename(g
.pre
) in keeplist
[group
]:
724 g
.filter(keeplist
[group
])
726 for test
in list(self
.tests
.keys()):
727 directory
, base
= os
.path
.split(test
)
728 if directory
not in keeplist
or base
not in keeplist
[directory
]:
731 def read(self
, options
):
733 Read in the specified runfiles, and apply the TestRun properties
734 listed in the 'DEFAULT' section to our TestRun. Then read each
735 section, and apply the appropriate properties to the Test or
736 TestGroup. Properties from individual sections override those set
737 in the 'DEFAULT' section. If the Test or TestGroup passes
738 verification, add it to the TestRun.
740 config
= configparser
.RawConfigParser()
741 parsed
= config
.read(options
.runfiles
)
742 failed
= options
.runfiles
- set(parsed
)
744 files
= ' '.join(sorted(failed
))
745 fail("Couldn't read config files: %s" % files
)
747 for opt
in TestRun
.props
:
748 if config
.has_option('DEFAULT', opt
):
749 setattr(self
, opt
, config
.get('DEFAULT', opt
))
750 self
.outputdir
= os
.path
.join(self
.outputdir
, self
.timestamp
)
752 testdir
= options
.testdir
754 for section
in config
.sections():
755 if 'tests' in config
.options(section
):
756 parts
= section
.split(':', 1)
757 sectiondir
= parts
[0]
758 identifier
= parts
[1] if len(parts
) == 2 else None
759 if os
.path
.isdir(sectiondir
):
760 pathname
= sectiondir
761 elif os
.path
.isdir(os
.path
.join(testdir
, sectiondir
)):
762 pathname
= os
.path
.join(testdir
, sectiondir
)
764 pathname
= sectiondir
766 testgroup
= TestGroup(os
.path
.abspath(pathname
),
767 identifier
=identifier
)
768 for prop
in TestGroup
.props
:
769 for sect
in ['DEFAULT', section
]:
770 if config
.has_option(sect
, prop
):
772 setattr(testgroup
, prop
,
773 eval(config
.get(sect
, prop
)))
774 elif prop
== 'failsafe':
775 failsafe
= config
.get(sect
, prop
)
776 setattr(testgroup
, prop
,
777 os
.path
.join(testdir
, failsafe
))
779 setattr(testgroup
, prop
,
780 config
.get(sect
, prop
))
782 # Repopulate tests using eval to convert the string to a list
783 testgroup
.tests
= eval(config
.get(section
, 'tests'))
785 if testgroup
.verify():
786 self
.testgroups
[section
] = testgroup
789 for prop
in Test
.props
:
790 for sect
in ['DEFAULT', section
]:
791 if config
.has_option(sect
, prop
):
792 if prop
== 'failsafe':
793 failsafe
= config
.get(sect
, prop
)
795 os
.path
.join(testdir
, failsafe
))
797 setattr(test
, prop
, config
.get(sect
, prop
))
800 self
.tests
[section
] = test
802 def write(self
, options
):
804 Create a configuration file for editing and later use. The
805 'DEFAULT' section of the config file is created from the
806 properties that were specified on the command line. Tests are
807 simply added as sections that inherit everything from the
808 'DEFAULT' section. TestGroups are the same, except they get an
809 option including all the tests to run in that directory.
812 defaults
= dict([(prop
, getattr(options
, prop
)) for prop
, _
in
814 config
= configparser
.RawConfigParser(defaults
)
816 for test
in sorted(self
.tests
.keys()):
817 config
.add_section(test
)
818 for prop
in Test
.props
:
819 if prop
not in self
.props
:
820 config
.set(test
, prop
,
821 getattr(self
.tests
[test
], prop
))
823 for testgroup
in sorted(self
.testgroups
.keys()):
824 config
.add_section(testgroup
)
825 config
.set(testgroup
, 'tests', self
.testgroups
[testgroup
].tests
)
826 for prop
in TestGroup
.props
:
827 if prop
not in self
.props
:
828 config
.set(testgroup
, prop
,
829 getattr(self
.testgroups
[testgroup
], prop
))
832 with
open(options
.template
, 'w') as f
:
833 return config
.write(f
)
835 fail('Could not open \'%s\' for writing.' % options
.template
)
837 def complete_outputdirs(self
):
839 Collect all the pathnames for Tests, and TestGroups. Work
840 backwards one pathname component at a time, to create a unique
841 directory name in which to deposit test output. Tests will be able
842 to write output files directly in the newly modified outputdir.
843 TestGroups will be able to create one subdirectory per test in the
844 outputdir, and are guaranteed uniqueness because a group can only
845 contain files in one directory. Pre and post tests will create a
846 directory rooted at the outputdir of the Test or TestGroup in
847 question for their output. Failsafe scripts will create a directory
848 rooted at the outputdir of each Test for their output.
852 tmp_dict
= dict(list(self
.tests
.items()) +
853 list(self
.testgroups
.items()))
854 total
= len(tmp_dict
)
855 base
= self
.outputdir
860 for testfile
in list(tmp_dict
.keys()):
861 uniq
= '/'.join(testfile
.split('/')[components
:]).lstrip('/')
862 if uniq
not in paths
:
864 tmp_dict
[testfile
].outputdir
= os
.path
.join(base
, uniq
)
867 done
= total
== len(paths
)
869 def setup_logging(self
, options
):
871 This function creates the output directory and gets a file object
872 for the logfile. This function must be called before write_log()
875 if options
.dryrun
is True:
879 if not options
.template
:
882 os
.makedirs(self
.outputdir
, mode
=0o777)
884 filename
= os
.path
.join(self
.outputdir
, 'log')
885 LOG_FILE_OBJ
= open(filename
, buffering
=0, mode
='wb')
889 def run(self
, options
):
891 Walk through all the Tests and TestGroups, calling run().
894 os
.chdir(self
.outputdir
)
896 fail('Could not change to directory %s' % self
.outputdir
)
897 # make a symlink to the output for the currently running test
898 logsymlink
= os
.path
.join(self
.outputdir
, '../current')
899 if os
.path
.islink(logsymlink
):
900 os
.unlink(logsymlink
)
901 if not os
.path
.exists(logsymlink
):
902 os
.symlink(self
.outputdir
, logsymlink
)
904 write_log('Could not make a symlink to directory %s\n' %
905 self
.outputdir
, LOG_ERR
)
908 cmd
= f
'{SUDO} -c "echo scan=0 > {KMEMLEAK_FILE}"'
909 check_output(cmd
, shell
=True)
912 while iteration
< options
.iterations
:
913 for test
in sorted(self
.tests
.keys()):
914 self
.tests
[test
].run(options
)
915 for testgroup
in sorted(self
.testgroups
.keys()):
916 self
.testgroups
[testgroup
].run(options
)
920 if Result
.total
== 0:
923 print('\nResults Summary')
924 for key
in list(Result
.runresults
.keys()):
925 if Result
.runresults
[key
] != 0:
926 print('%s\t% 4d' % (key
, Result
.runresults
[key
]))
928 m
, s
= divmod(time() - self
.starttime
, 60)
930 print('\nRunning Time:\t%02d:%02d:%02d' % (h
, m
, s
))
931 print('Percent passed:\t%.1f%%' % ((float(Result
.runresults
['PASS']) /
932 float(Result
.total
)) * 100))
933 print('Log directory:\t%s' % self
.outputdir
)
935 if Result
.runresults
['FAIL'] > 0:
938 if Result
.runresults
['KILLED'] > 0:
941 if Result
.runresults
['RERAN'] > 0:
947 def write_log(msg
, target
):
949 Write the provided message to standard out, standard error or
950 the logfile. If specifying LOG_FILE, then `msg` must be a bytes
951 like object. This way we can still handle output from tests that
952 may be in unexpected encodings.
954 if target
== LOG_OUT
:
955 os
.write(sys
.stdout
.fileno(), bytearray(msg
, encoding
='utf-8'))
956 elif target
== LOG_ERR
:
957 os
.write(sys
.stderr
.fileno(), bytearray(msg
, encoding
='utf-8'))
958 elif target
== LOG_FILE
:
959 os
.write(LOG_FILE_OBJ
.fileno(), msg
)
961 fail('log_msg called with unknown target "%s"' % target
)
964 def verify_file(pathname
):
966 Verify that the supplied pathname is an executable regular file.
968 if os
.path
.isdir(pathname
) or os
.path
.islink(pathname
):
971 for ext
in '', '.ksh', '.sh':
972 script_path
= pathname
+ ext
973 if os
.path
.isfile(script_path
) and os
.access(script_path
, os
.X_OK
):
979 def verify_user(user
):
981 Verify that the specified user exists on this system, and can execute
982 sudo without being prompted for a password.
984 testcmd
= [SUDO
, '-n', '-u', user
, TRUE
]
986 if user
in Cmd
.verified_users
:
992 write_log("Warning: user '%s' does not exist.\n" % user
,
998 if p
.returncode
!= 0:
999 write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user
,
1003 Cmd
.verified_users
.append(user
)
1008 def find_tests(testrun
, options
):
1010 For the given list of pathnames, add files as Tests. For directories,
1011 if do_groups is True, add the directory as a TestGroup. If False,
1012 recursively search for executable files.
1015 for p
in sorted(options
.pathnames
):
1016 if os
.path
.isdir(p
):
1017 for dirname
, _
, filenames
in os
.walk(p
):
1018 if options
.do_groups
:
1019 testrun
.addtestgroup(dirname
, filenames
, options
)
1021 for f
in sorted(filenames
):
1022 testrun
.addtest(os
.path
.join(dirname
, f
), options
)
1024 testrun
.addtest(p
, options
)
1027 def filter_tests(testrun
, options
):
1029 fh
= open(options
.logfile
, "r")
1030 except Exception as e
:
1035 line
= fh
.readline()
1038 m
= re
.match(r
'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line
)
1041 group
, test
= m
.group(1, 2)
1043 failed
[group
].append(test
)
1045 failed
[group
] = [test
]
1048 testrun
.filter(failed
)
1051 def fail(retstr
, ret
=1):
1052 print('%s: %s' % (sys
.argv
[0], retstr
))
1056 def kmemleak_cb(option
, opt_str
, value
, parser
):
1057 if not os
.path
.exists(KMEMLEAK_FILE
):
1058 fail(f
"File '{KMEMLEAK_FILE}' doesn't exist. " +
1059 "Enable CONFIG_DEBUG_KMEMLEAK in kernel configuration.")
1061 setattr(parser
.values
, option
.dest
, True)
1064 def options_cb(option
, opt_str
, value
, parser
):
1065 path_options
= ['outputdir', 'template', 'testdir', 'logfile']
1067 if opt_str
in parser
.rargs
:
1068 fail('%s may only be specified once.' % opt_str
)
1070 if option
.dest
== 'runfiles':
1071 parser
.values
.cmd
= 'rdconfig'
1072 value
= set(os
.path
.abspath(p
) for p
in value
.split(','))
1073 if option
.dest
== 'tags':
1074 value
= [x
.strip() for x
in value
.split(',')]
1076 if option
.dest
in path_options
:
1077 setattr(parser
.values
, option
.dest
, os
.path
.abspath(value
))
1079 setattr(parser
.values
, option
.dest
, value
)
1083 parser
= OptionParser()
1084 parser
.add_option('-c', action
='callback', callback
=options_cb
,
1085 type='string', dest
='runfiles', metavar
='runfiles',
1086 help='Specify tests to run via config files.')
1087 parser
.add_option('-d', action
='store_true', default
=False, dest
='dryrun',
1088 help='Dry run. Print tests, but take no other action.')
1089 parser
.add_option('-D', action
='store_true', default
=False, dest
='debug',
1090 help='Write all test output to stdout as it arrives.')
1091 parser
.add_option('-l', action
='callback', callback
=options_cb
,
1092 default
=None, dest
='logfile', metavar
='logfile',
1094 help='Read logfile and re-run tests which failed.')
1095 parser
.add_option('-g', action
='store_true', default
=False,
1096 dest
='do_groups', help='Make directories TestGroups.')
1097 parser
.add_option('-o', action
='callback', callback
=options_cb
,
1098 default
=BASEDIR
, dest
='outputdir', type='string',
1099 metavar
='outputdir', help='Specify an output directory.')
1100 parser
.add_option('-i', action
='callback', callback
=options_cb
,
1101 default
=TESTDIR
, dest
='testdir', type='string',
1102 metavar
='testdir', help='Specify a test directory.')
1103 parser
.add_option('-K', action
='store_true', default
=False, dest
='kmsg',
1104 help='Log tests names to /dev/kmsg')
1105 parser
.add_option('-m', action
='callback', callback
=kmemleak_cb
,
1106 default
=False, dest
='kmemleak',
1107 help='Enable kmemleak reporting (Linux only)')
1108 parser
.add_option('-p', action
='callback', callback
=options_cb
,
1109 default
='', dest
='pre', metavar
='script',
1110 type='string', help='Specify a pre script.')
1111 parser
.add_option('-P', action
='callback', callback
=options_cb
,
1112 default
='', dest
='post', metavar
='script',
1113 type='string', help='Specify a post script.')
1114 parser
.add_option('-q', action
='store_true', default
=False, dest
='quiet',
1115 help='Silence on the console during a test run.')
1116 parser
.add_option('-s', action
='callback', callback
=options_cb
,
1117 default
='', dest
='failsafe', metavar
='script',
1118 type='string', help='Specify a failsafe script.')
1119 parser
.add_option('-S', action
='callback', callback
=options_cb
,
1120 default
='', dest
='failsafe_user',
1121 metavar
='failsafe_user', type='string',
1122 help='Specify a user to execute the failsafe script.')
1123 parser
.add_option('-t', action
='callback', callback
=options_cb
, default
=60,
1124 dest
='timeout', metavar
='seconds', type='int',
1125 help='Timeout (in seconds) for an individual test.')
1126 parser
.add_option('-u', action
='callback', callback
=options_cb
,
1127 default
='', dest
='user', metavar
='user', type='string',
1128 help='Specify a different user name to run as.')
1129 parser
.add_option('-w', action
='callback', callback
=options_cb
,
1130 default
=None, dest
='template', metavar
='template',
1131 type='string', help='Create a new config file.')
1132 parser
.add_option('-x', action
='callback', callback
=options_cb
, default
='',
1133 dest
='pre_user', metavar
='pre_user', type='string',
1134 help='Specify a user to execute the pre script.')
1135 parser
.add_option('-X', action
='callback', callback
=options_cb
, default
='',
1136 dest
='post_user', metavar
='post_user', type='string',
1137 help='Specify a user to execute the post script.')
1138 parser
.add_option('-T', action
='callback', callback
=options_cb
, default
='',
1139 dest
='tags', metavar
='tags', type='string',
1140 help='Specify tags to execute specific test groups.')
1141 parser
.add_option('-I', action
='callback', callback
=options_cb
, default
=1,
1142 dest
='iterations', metavar
='iterations', type='int',
1143 help='Number of times to run the test run.')
1144 (options
, pathnames
) = parser
.parse_args()
1146 if options
.runfiles
and len(pathnames
):
1147 fail('Extraneous arguments.')
1149 options
.pathnames
= [os
.path
.abspath(path
) for path
in pathnames
]
1155 options
= parse_args()
1157 testrun
= TestRun(options
)
1159 if options
.runfiles
:
1160 testrun
.read(options
)
1162 find_tests(testrun
, options
)
1165 filter_tests(testrun
, options
)
1167 if options
.template
:
1168 testrun
.write(options
)
1171 testrun
.complete_outputdirs()
1172 testrun
.run(options
)
1173 exit(testrun
.summary())
1176 if __name__
== '__main__':