2 # Copyright (C) Lumiera.org
3 # 2007, 2008, 2009, 2010, Christian Thaeter <ct@pipapo.org>
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License as
7 # published by the Free Software Foundation; either version 2 of the
8 # License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 # TESTMODE=FULL yet unimplemented
20 # run all tests, PLANNED which fail count as error
23 # run only tests which recently failed
26 # stop testing on the first failure
30 #intro Christian Thäter
32 #intro A shell script driving software tests.
35 # doc generation disabled here (temporary) for inclusion in the nobug manual
37 # =tests Writing tests
54 #config HEAD~ Test Configuration; configuration, tests; configure tests
56 #config Runtime behaviour of the testsuite can be configured by setting certain
57 #config envirinment variables.
59 #config PARA LOGSUPPRESS; LOGSUPPRESS; suppress certain lines from stderr
61 #config LOGSUPPRESS='^[0-9]\{10,\}: \(TRACE\|INFO\|NOTICE\):'
63 #config Programms sometimes emit additional diagnostics on stderr which is volatile and not necessary for
64 #config validating the output the `LOGSUPRESS` variable can be set to a regex to filter this things out.
65 #config The default as shown above filters some NoBug annotations and non fatal logging out.
67 LOGSUPPRESS
='^\(\*\*[0-9]*\*\* \)\?[0-9]\{10,\}[:!] \(TRACE\|INFO\|NOTICE\|WARNING\|ERR\):'
69 #config HEAD^ Resource Limits; ulimit, tests; constrain test resource limits
71 #config It is possible to set some limits for tests to protect the system against really broken cases.
72 #config Since running under valgrind takes consinderable more resources there are separate variants for
73 #config limits when running under valgrind.
74 #config INDEX LIMIT_CPU; LIMIT_CPU; limit the cpu time
78 #config Maximal CPU time the test may take after it will be killed with SIGXCPU. This protects agaist Lifelocks.
79 #config INDEX LIMIT_TIME; LIMIT_TIME; limit the wall time
83 #config Maximal wall-time a test may take after this it will be killed with SIGKILL. Protects against Deadlocks.
84 #config INDEX LIMIT_VSZ; LIMIT_VSZ; limit virtual memory size
86 #config LIMIT_VSZ=524288
88 #config Maximal virtual memory size the process may map, allocations/mappings will fail when this limit is reached.
89 #config Protects against memory leaks.
90 #config INDEX LIMIT_VG_*; LIMIT_VG_*; lmitation when running tests under valgrind
92 #config LIMIT_VG_CPU=20
93 #config LIMIT_VG_TIME=30
94 #config LIMIT_VG_VSZ=524288
96 #config Same variables again with limits when running under valgrind.
106 #configf HEAD~ Configuration Files; configuration files; define variables to configure the test
108 #configf `test.sh` reads config files from the following location if they are exist
110 #configf . 'test.conf' from the current directory
111 #configf . '$srcdir/test.conf' `$srcdir` is set by autotools
112 #configf . '$srcdir/tests/test.conf' `tests/` is suspected as default directory for tests
113 #configf . '$TEST_CONF' a user defineable variable to point to a config file
115 test -f 'test.conf' && source test.conf
116 test -n "$srcdir" -a -e "$srcdir/test.conf" && source "$srcdir/test.conf"
117 test -n "$srcdir" -a -e "$srcdir/tests/test.conf" && source "$srcdir/tests/test.conf"
118 test -n "$TEST_CONF" -a -e "$TEST_CONF" && source "$TEST_CONF"
123 TESTDIR
="$(dirname "$arg0")"
126 #libtool HEAD Libtool; libtool; support for libtool
128 #libtool When test.sh detects the presence of './libtool' it runs all tests with
129 #libtool `./libtool --mode=execute`.
132 if test -x .
/libtool
; then
133 LIBTOOL_EX
="./libtool --mode=execute"
136 #valgrind HEAD~ Valgrind; valgrind; valgrind support
138 #valgrind Test are run under valgrind supervision by default, if not disabled.
140 #valgrind PARA VALGRINDFLAGS; VALGRINDFLAGS; control valgrind options
142 #valgrind VALGRINDFLAGS="--leak-check=yes --show-reachable=yes"
144 #valgrind `VALGRINDFLAGS` define the options which are passed to valgrind. This can be used to override
145 #valgrind the defaults or switching the valgrind tool. The special case `VALGRINDFLAGS=DISABLE` will disable
146 #valgrind valgrind for the tests.
148 #valgrind HEAD^ Generating Valgrind Suppression Files; vgsuppression; ignore false positives
150 #valgrind When there is a 'vgsuppression' executable in the current dir (build by something external) then
151 #valgrind test.sh uses this to generate a local 'vgsuppression.supp' file and uses that to suppress
152 #valgrind all errors generated by 'vgsuppression'. The Idea here is that one adds code which triggers known
153 #valgrind false positives in 'vgsuppression'. Care must be taken that this file is simple and does
154 #valgrind not generate true positives.
156 ulimit -S -t ${LIMIT_CPU:-5} -v ${LIMIT_VSZ:-524288}
158 LIMIT_TIME_REAL
="$LIMIT_TIME"
159 if [ "$VALGRINDFLAGS" = 'DISABLE' ]; then
160 echo "valgrind explicit disabled"
162 if [ "$(which valgrind)" ]; then
163 ulimit -S -t ${LIMIT_VG_CPU:-20} -v ${LIMIT_VG_VSZ:-524288}
164 LIMIT_TIME_REAL
="$LIMIT_VG_TIME"
165 if [[ -x 'vgsuppression' ]]; then
166 if [[ 'vgsuppression' -nt 'vgsuppression.supp' ]]; then
167 echo 'generating valgrind supression file'
169 $LIBTOOL_EX $
(which valgrind
) ${VALGRINDFLAGS:---leak-check=yes --show-reachable=yes} -q --gen-suppressions=all vgsuppression
2>&1 \
170 |
awk '/^{/ {i = 1;} /^}/ {i = 0; print $0;} {if (i == 1) print $0;}' >vgsuppression.supp
172 valgrind
="$(which valgrind) ${VALGRINDFLAGS:---leak-check=yes --show-reachable=no} --suppressions=vgsuppression.supp -q"
174 valgrind
="$(which valgrind) ${VALGRINDFLAGS:---leak-check=yes --show-reachable=no -q}"
177 echo "no valgrind found, go without it"
182 echo "================ ${0##*/} ================"
189 # the old testlog if existing will be used to check for previous test states
190 if test -f ,testlog
; then
191 mv ,testlog
,testlog.pre
198 function compare_template
() # template plainfile
206 IFS
='' read -u 3 -r template ||
return 0
207 IFS
='' read -u 4 -r line ||
{ echo "no output"; return 1; }
209 local cmd
="${template%%:*}:"
210 local arg
="${template#*: }"
214 if [[ $line =~
$arg ]]; then
215 IFS
='' read -u 4 -r line ||
216 if IFS
='' read -u 3 -r template
; then
217 echo "premature end in output, expecting $template:$templateno"
225 if [[ $
((++miss
)) -gt 1 ]]; then
226 echo -e "'$line':$lineno\ndoes not match\n$template:$templateno"
229 IFS
='' read -u 3 -r template ||
{ echo "more output than expected: '$line':$lineno"; return 1; }
234 if [[ "$line" = "$arg" ]]; then
235 IFS
='' read -u 3 -r template
&& IFS
='' read -u 4 -r line ||
{
239 echo -e "'$line':$lineno\ndoes not match\n$template:$templateno"
244 echo "UNKOWN MATCH COMMAND '$cmd'" 1>&2
252 #tests HEAD- Testing; testing; how to write testsuites
254 #tests Tests are nothing more than bash scripts with some functions from the test.sh
255 #tests framework defined. Test.sh looks in the current directory for all files which ending in .test
256 #tests and runs them in alphabetical order. The selection of this tests can be constrained with the
257 #tests `TESTSUITES` environment variable.
259 #tests HEAD~ Testsuites; test files; writing tests
261 #tests It is common to start the name of the '.test' files with a 2 digit number to give them a proper
262 #tests order: '10foo.test', '20bar.test' and so on. Each such test should only test a certain aspect of
263 #tests the system. You have to select the testing binary with the `TESTING` function and then write
264 #tests certain TEST's defining how the test should react. Since tests are shell scripts it is possible
265 #tests to add some supplemental commands there to set and clean up the given test environment.
267 #tests PARA Select Test Executable; TESTING; set the test binary
269 #tests TESTING "message" test_program
271 #tests Selects the test binary for the follwing tests, prints an informal message.
274 #tests message to be printed
275 #tests `test_program`::
276 #tests an existing program to drive the tests or a shell function
282 echo -e "\n#### $1, $TESTFILE, $2" >>,testlog
287 #tests PARA Define a Test; TEST; single test
289 #tests TEST "title" arguments.. <<END
291 #tests Defines a single test
294 #tests describes this test and is also used as identifier for this test,
295 #tests must be unique for all your tests
297 #tests the following arguments are passed to the test program
298 #tests `<<END .. END`::
299 #tests a list of control commands expected in and outputs is given as 'heredoc'.
301 #tests Each line of the test specification in the heredoc starts with an arbitary number of spaces
302 #tests followed by a command, followed by a colon and a space, followed by additional arguments or
303 #tests being an empty or comment line.
305 #testcmds HEAD^ Test Commands; commands; define expected in and outputs
310 #testcmds PARA in; in; stdin data for a test
314 #testcmds Send `text` to stdin of the test binary. If no `in:` commands are given, nothing is send to the
315 #testcmds tests input.
317 #testcmds PARA out; out; expected stdout (regex) from a test
321 #testcmds Expect `regex` on stdout. This regexes have a 'triggering' semantic. That means it is tried to match
322 #testcmds a given regex on as much lines as possible (`.*` will match any remaining output), if the match fails,
323 #testcmds the next expected output line is tried. When that fails too the test is aborted and counted as failure.
325 #testcmds When no `out:` or `out-lit:` commands are given, then stdout is not checked, any output is ignored.
327 #testcmds PARA err; err; expected stderr (regex) from a test
331 #testcmds Same as 'out:' but expects data on stderr. When no `err:` or `err-lit:` commands are given, then stdout is
332 #testcmds not checked, any output there is ignored.
334 #testcmds PARA out-lit; out-lit; expected stdout (literal) from a test
336 #testcmds out-lit: text
338 #testcmds Expect `text` on stdout, must match exactly or will fail.
340 #testcmds PARA err-lit; err-lit; expected stderr (literal) from a test
342 #testcmds err-lit: text
344 #testcmds Same as 'out-lit:' but expects data on stderr.
346 #testcmds PARA return; return; expected exit value of a test
348 #testcmds return: value
350 #testcmds Expects `value` as exit code of the tested program. The check can be negated by prepending the value with
351 #testcmds an exclamation mark, `return: !0` expects any exist code except zero.
353 #testcmds If no `return:` command is given then a zero (success) return from the test program is expected.
355 #testcmds HEAD^ Conditional Tests; conditional tests; switch tests on conditions
357 #testcmds Sometimes tests need to be adapted to the environment/platform they are running on. This can be archived
358 #testcmds with common if-else-elseif-endif statements. This statements can be nested.
360 #testcmds PARA if; if; conditional test
364 #testcmds Executes `check` as shell command, if its return is zero (success) then the following test parts are used.
366 #testcmds PARA else; else; conditional alternative
370 #testcmds If the previous `if` failed then the following test parts are included in the test, otherwise they
371 #testcmds are excluded.
373 #testcmds PARA elseif; elseif; conditional alternative with test
375 #testcmds elseif: check
377 #testcmds Composition of else and if, only includes the following test parts if the if's and elseif's before failed
378 #testcmds and `check` succeeded.
380 #testcmds PARA endif; endif; end of conditonal test part
384 #testcmds Ends an `if` statement.
386 #testcmds HEAD^ Other Elements;;
388 #testcmds PARA msg; msg; print a diagnostic message
390 #testcmds msg: message..
392 #testcmds Prints `message` while processing the test suite.
394 #testcmds PARA comments; comments; adding comments to tests
398 #testcmds Lines starting with the hash mark and empty lines count as comment and are not used.
409 local valgrind
="$valgrind"
410 if [ "$VALGRINDFLAGS" = 'DISABLE' ]; then
415 while read -r line
; do
416 local cmd
="${line%%:*}:"
417 local arg
="${line#*: }"
419 if [[ ! "$arg" ]]; then
426 condstack
="1$condstack"
428 condstack
="0$condstack"
432 if [[ "${condstack:0:1}" = "0" ]]; then
434 condstack
="1${condstack:1}"
436 condstack
="0${condstack:1}"
439 condstack
="2${condstack:1}"
443 if [[ "${condstack:0:1}" != "0" ]]; then
444 condstack
="0${condstack:1}"
446 condstack
="1${condstack:1}"
450 condstack
="${condstack:1}"
453 if [[ "${condstack:0:1}" = "1" ]]; then
459 echo "$arg" >>,send_stdin
462 echo "regex_cont: $arg" >>,expect_stdout
465 echo "regex_cont: $arg" >>,expect_stderr
468 echo "literal: $arg" >>,expect_stdout
471 echo "literal: $arg" >>,expect_stderr
480 echo "UNKOWN TEST COMMAND '$cmd'" 1>&2
488 echo -n "TEST $name: "
489 echo -en "\nTEST $name: $* " >>,testlog
493 if grep "^TEST $name: .* FAILED" ,testlog.pre
>&/dev
/null
; then
495 MSGFAIL
=" (still broken)"
496 elif grep "^TEST $name: .* \\(SKIPPED (ok)\\|OK\\)" ,testlog.pre
>&/dev
/null
; then
497 echo ".. SKIPPED (ok)"
498 echo ".. SKIPPED (ok)" >>,testlog
499 SKIPCNT
=$
(($SKIPCNT + 1))
500 TESTCNT
=$
(($TESTCNT + 1))
513 TESTCNT
=$
(($TESTCNT + 1))
520 if declare -F |
grep $TESTBIN >&/dev
/null
; then
522 elif test -x $TESTBIN; then
523 CALL
="env $LIBTOOL_EX $valgrind"
527 echo "test binary '$TESTBIN' not found" >,stderr
531 if test "$CALL" != '-'; then
532 if test -f ,send_stdin
; then
534 $CALL $TESTBIN "$@" <,send_stdin
2>,stderr
>,stdout
539 $CALL $TESTBIN "$@" 2>,stderr
>,stdout
546 ( sleep $LIMIT_TIME_REAL && kill -KILL $pid ) &>/dev
/null
&
550 if [[ "$return" -le 128 ]]; then
551 kill -INT $wpid >&/dev
/null
554 if test -f ,expect_stdout
; then
555 grep -v "$LOGSUPPRESS" <,stdout
>,tmp
556 if ! compare_template
,expect_stdout
,tmp
>>,cmptmp
; then
557 echo "unexpected data on stdout" >>,testtmp
558 cat ,cmptmp
>>,testtmp
564 if test -f ,expect_stderr
; then
565 grep -v "$LOGSUPPRESS" <,stderr
>,tmp
567 if ! compare_template
,expect_stderr
,tmp
>>,cmptmp
; then
568 echo "unexpected data on stderr" >>,testtmp
569 cat ,cmptmp
>>,testtmp
575 if [[ "${expect_return:0:1}" = '!' ]]; then
576 if [[ "${expect_return#\!}" = "$return" ]]; then
577 echo "unexpected return value $return, expected $expect_return" >>,testtmp
581 if [[ "${expect_return}" != "$return" ]]; then
582 echo "unexpected return value $return, expected $expect_return" >>,testtmp
588 if test $fails -eq 0; then
590 echo ".. OK$MSGOK" >>,testlog
592 echo ".. FAILED$MSGFAIL";
593 echo ".. FAILED$MSGFAIL" >>,testlog
594 cat ,testtmp
>>,testlog
596 echo "stderr was:" >>,testlog
597 cat ,stderr
>>,testlog
599 FAILCNT
=$
(($FAILCNT + 1))
608 #tests PARA Planned Tests; PLANNED; deactivated test
610 #tests PLANNED "title" arguments.. <<END
612 #tests Skip a single test.
615 #tests describes this test and is also used as identifier for this test,
616 #tests must be unique for all your tests
618 #tests the following arguments are passed to the test program
619 #tests `<<END .. END`::
620 #tests a list of control commands expected in and outputs is given as 'heredoc'.
622 #tests `PLANNED` acts as dropin replacement for `TEST`. Each such test is skipped (and counted as skipped)
623 #tests This can be used to specify tests in advance and activate them as soon development goes on or
624 #tests deactivate intentional broken tests to be fixed later.
628 echo -n "PLANNED $1: "
629 echo -en "\nPLANNED $* " >>,testlog
630 echo ".. SKIPPED (planned)"
631 echo ".. SKIPPED (planned)" >>,testlog
632 SKIPCNT
=$
(($SKIPCNT + 1))
633 TESTCNT
=$
(($TESTCNT + 1))
638 if test \
( ! "${TESTSUITES/*,*/}" \
) -a "$TESTSUITES"; then
639 TESTSUITES
="{$TESTSUITES}"
641 for t
in $
(eval echo "$TESTDIR/*$TESTSUITES*.tests"); do
643 done |
sort |
uniq |
{
644 while read TESTFILE
; do
646 echo "### $TESTFILE" >&2
647 if test -f $TESTFILE; then
652 if [ $FAILCNT = 0 ]; then
653 echo " ... PASSED $(($TESTCNT - $SKIPCNT)) TESTS, $SKIPCNT SKIPPED"
656 echo " ... SUCCEEDED $(($TESTCNT - $FAILCNT - $SKIPCNT)) TESTS"
657 echo " ... FAILED $FAILCNT TESTS"
658 echo " ... SKIPPED $SKIPCNT TESTS"
659 echo " see ',testlog' for details"
665 TESTSUITES
="${TESTSUITES}${1:+${TESTSUITES:+,}$1}"