2 # Copyright (c) 2014-2017 The Bitcoin Core developers
3 # Distributed under the MIT software license, see the accompanying
4 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 """Run regression test suite.
7 This module calls down into individual test cases via subprocess. It will
8 forward all unrecognized arguments onto the individual test scripts.
10 Functional tests are disabled on Windows by default. Use --force to run them anyway.
12 For a description of arguments recognized by test scripts, see
13 `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
18 from collections
import deque
31 # Formatting. Default colors to empty strings.
32 BOLD
, BLUE
, RED
, GREY
= ("", ""), ("", ""), ("", ""), ("", "")
34 # Make sure python thinks it can write unicode to its stdout
35 "\u2713".encode("utf_8").decode(sys
.stdout
.encoding
)
39 except UnicodeDecodeError:
44 if os
.name
== 'posix':
45 # primitive formatting on supported
46 # terminal via ANSI escape sequences:
47 BOLD
= ('\033[0m', '\033[1m')
48 BLUE
= ('\033[0m', '\033[0;34m')
49 RED
= ('\033[0m', '\033[0;31m')
50 GREY
= ('\033[0m', '\033[1;30m')
53 TEST_EXIT_SKIPPED
= 77
56 # Scripts that are run by the travis build process.
57 # Longest test should go first, to favor running tests in parallel
60 # vv Tests less than 5m vv
61 'p2p-fullblocktest.py',
62 'fundrawtransaction.py',
63 'p2p-compactblocks.py',
65 # vv Tests less than 2m vv
70 'listtransactions.py',
71 # vv Tests less than 60s vv
79 'bip68-112-113-p2p.py',
82 # vv Tests less than 30s vv
86 'mempool_resurrect_test.py',
87 'txn_doublespend.py --mineblock',
91 'mempool_spendcoinbase.py',
98 'signrawtransactions.py',
107 'prioritise_transaction.py',
108 'invalidblockrequest.py',
109 'invalidtxrequest.py',
110 'p2p-versionbits-warning.py',
112 'importprunedfunds.py',
121 'wallet-encryption.py',
125 'resendwallettransactions.py',
127 'p2p-fingerprint.py',
129 'p2p-acceptblock.py',
130 'feature_logging.py',
131 'node_network_limited.py',
133 # Don't append tests at the end to avoid merge conflicts
134 # Put them in a random line within the section that fits their approximate run-time
138 # These tests are not run by the travis build process.
139 # Longest test should go first, to favor running tests in parallel
141 # vv Tests less than 20m vv
143 # vv Tests less than 5m vv
144 'maxuploadtarget.py',
145 'mempool_packages.py',
147 # vv Tests less than 2m vv
149 'getblocktemplate_longpoll.py',
151 # vv Tests less than 60s vv
155 # vv Tests less than 30s vv
158 'txn_doublespend.py',
159 'txn_clone.py --mineblock',
161 'invalidateblock.py',
165 # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
166 ALL_SCRIPTS
= EXTENDED_SCRIPTS
+ BASE_SCRIPTS
169 # These are python files that live in the functional tests directory, but are not test scripts.
176 # Parse arguments and pass through unrecognised args
177 parser
= argparse
.ArgumentParser(add_help
=False,
178 usage
='%(prog)s [test_runner.py options] [script options] [scripts]',
181 Help text and arguments for individual test script:''',
182 formatter_class
=argparse
.RawTextHelpFormatter
)
183 parser
.add_argument('--combinedlogslen', '-c', type=int, default
=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
184 parser
.add_argument('--coverage', action
='store_true', help='generate a basic coverage report for the RPC interface')
185 parser
.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
186 parser
.add_argument('--extended', action
='store_true', help='run the extended test suite in addition to the basic tests')
187 parser
.add_argument('--force', '-f', action
='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
188 parser
.add_argument('--help', '-h', '-?', action
='store_true', help='print help text and exit')
189 parser
.add_argument('--jobs', '-j', type=int, default
=4, help='how many test scripts to run in parallel. Default=4.')
190 parser
.add_argument('--keepcache', '-k', action
='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
191 parser
.add_argument('--quiet', '-q', action
='store_true', help='only print results summary and failure logs')
192 parser
.add_argument('--tmpdirprefix', '-t', default
=tempfile
.gettempdir(), help="Root directory for datadirs")
193 args
, unknown_args
= parser
.parse_known_args()
195 # args to be passed on always start with two dashes; tests are the remaining unknown args
196 tests
= [arg
for arg
in unknown_args
if arg
[:2] != "--"]
197 passon_args
= [arg
for arg
in unknown_args
if arg
[:2] == "--"]
199 # Read config generated by configure.
200 config
= configparser
.ConfigParser()
201 configfile
= os
.path
.abspath(os
.path
.dirname(__file__
)) + "/../config.ini"
202 config
.read_file(open(configfile
))
204 passon_args
.append("--configfile=%s" % configfile
)
207 logging_level
= logging
.INFO
if args
.quiet
else logging
.DEBUG
208 logging
.basicConfig(format
='%(message)s', level
=logging_level
)
210 # Create base test directory
211 tmpdir
= "%s/bitcoin_test_runner_%s" % (args
.tmpdirprefix
, datetime
.datetime
.now().strftime("%Y%m%d_%H%M%S"))
214 logging
.debug("Temporary test directory at %s" % tmpdir
)
216 enable_wallet
= config
["components"].getboolean("ENABLE_WALLET")
217 enable_utils
= config
["components"].getboolean("ENABLE_UTILS")
218 enable_bitcoind
= config
["components"].getboolean("ENABLE_BITCOIND")
220 if config
["environment"]["EXEEXT"] == ".exe" and not args
.force
:
221 # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
222 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
223 print("Tests currently disabled on Windows by default. Use --force option to enable")
226 if not (enable_wallet
and enable_utils
and enable_bitcoind
):
227 print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
228 print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
231 # Build list of tests
233 # Individual tests have been specified. Run specified tests that exist
234 # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
235 tests
= [re
.sub("\.py$", "", t
) + ".py" for t
in tests
]
241 print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD
[1], BOLD
[0], t
))
243 # No individual tests have been specified.
244 # Run all base tests, and optionally run extended tests.
245 test_list
= BASE_SCRIPTS
247 # place the EXTENDED_SCRIPTS first since the three longest ones
248 # are there and the list is shorter
249 test_list
= EXTENDED_SCRIPTS
+ test_list
251 # Remove the test cases that the user has explicitly asked to exclude.
253 tests_excl
= [re
.sub("\.py$", "", t
) + ".py" for t
in args
.exclude
.split(',')]
254 for exclude_test
in tests_excl
:
255 if exclude_test
in test_list
:
256 test_list
.remove(exclude_test
)
258 print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD
[1], BOLD
[0], exclude_test
))
261 print("No valid test scripts specified. Check that your test is in one "
262 "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
266 # Print help for test_runner.py, then print help of the first script (with args removed) and exit.
268 subprocess
.check_call([(config
["environment"]["SRCDIR"] + '/test/functional/' + test_list
[0].split()[0])] + ['-h'])
271 check_script_list(config
["environment"]["SRCDIR"])
273 if not args
.keepcache
:
274 shutil
.rmtree("%s/test/cache" % config
["environment"]["BUILDDIR"], ignore_errors
=True)
276 run_tests(test_list
, config
["environment"]["SRCDIR"], config
["environment"]["BUILDDIR"], config
["environment"]["EXEEXT"], tmpdir
, args
.jobs
, args
.coverage
, passon_args
, args
.combinedlogslen
)
278 def run_tests(test_list
, src_dir
, build_dir
, exeext
, tmpdir
, jobs
=1, enable_coverage
=False, args
=[], combined_logs_len
=0):
279 # Warn if bitcoind is already running (unix only)
281 if subprocess
.check_output(["pidof", "bitcoind"]) is not None:
282 print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD
[1], BOLD
[0]))
283 except (OSError, subprocess
.SubprocessError
):
286 # Warn if there is a cache directory
287 cache_dir
= "%s/test/cache" % build_dir
288 if os
.path
.isdir(cache_dir
):
289 print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD
[1], BOLD
[0], cache_dir
))
292 if "BITCOIND" not in os
.environ
:
293 os
.environ
["BITCOIND"] = build_dir
+ '/src/bitcoind' + exeext
294 os
.environ
["BITCOINCLI"] = build_dir
+ '/src/bitcoin-cli' + exeext
296 tests_dir
= src_dir
+ '/test/functional/'
298 flags
= ["--srcdir={}/src".format(build_dir
)] + args
299 flags
.append("--cachedir=%s" % cache_dir
)
302 coverage
= RPCCoverage()
303 flags
.append(coverage
.flag
)
304 logging
.debug("Initializing coverage directory at %s" % coverage
.dir)
308 if len(test_list
) > 1 and jobs
> 1:
311 subprocess
.check_output([tests_dir
+ 'create_cache.py'] + flags
+ ["--tmpdir=%s/cache" % tmpdir
])
312 except Exception as e
:
317 job_queue
= TestHandler(jobs
, tests_dir
, tmpdir
, test_list
, flags
)
321 max_len_name
= len(max(test_list
, key
=len))
323 for _
in range(len(test_list
)):
324 test_result
, testdir
, stdout
, stderr
= job_queue
.get_next()
325 test_results
.append(test_result
)
327 if test_result
.status
== "Passed":
328 logging
.debug("\n%s%s%s passed, Duration: %s s" % (BOLD
[1], test_result
.name
, BOLD
[0], test_result
.time
))
329 elif test_result
.status
== "Skipped":
330 logging
.debug("\n%s%s%s skipped" % (BOLD
[1], test_result
.name
, BOLD
[0]))
332 print("\n%s%s%s failed, Duration: %s s\n" % (BOLD
[1], test_result
.name
, BOLD
[0], test_result
.time
))
333 print(BOLD
[1] + 'stdout:\n' + BOLD
[0] + stdout
+ '\n')
334 print(BOLD
[1] + 'stderr:\n' + BOLD
[0] + stderr
+ '\n')
335 if combined_logs_len
and os
.path
.isdir(testdir
):
336 # Print the final `combinedlogslen` lines of the combined logs
337 print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD
[1], combined_logs_len
, BOLD
[0]))
338 print('\n============')
339 print('{}Combined log for {}:{}'.format(BOLD
[1], testdir
, BOLD
[0]))
340 print('============\n')
341 combined_logs
, _
= subprocess
.Popen([os
.path
.join(tests_dir
, 'combine_logs.py'), '-c', testdir
], universal_newlines
=True, stdout
=subprocess
.PIPE
).communicate()
342 print("\n".join(deque(combined_logs
.splitlines(), combined_logs_len
)))
344 print_results(test_results
, max_len_name
, (int(time
.time() - time0
)))
347 coverage
.report_rpc_coverage()
349 logging
.debug("Cleaning up coverage data")
352 # Clear up the temp directory if all subdirectories are gone
353 if not os
.listdir(tmpdir
):
356 all_passed
= all(map(lambda test_result
: test_result
.was_successful
, test_results
))
358 sys
.exit(not all_passed
)
360 def print_results(test_results
, max_len_name
, runtime
):
361 results
= "\n" + BOLD
[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name
), "STATUS ", "DURATION") + BOLD
[0]
363 test_results
.sort(key
=lambda result
: result
.name
.lower())
367 for test_result
in test_results
:
368 all_passed
= all_passed
and test_result
.was_successful
369 time_sum
+= test_result
.time
370 test_result
.padding
= max_len_name
371 results
+= str(test_result
)
373 status
= TICK
+ "Passed" if all_passed
else CROSS
+ "Failed"
374 results
+= BOLD
[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name
), status
.ljust(9), time_sum
) + BOLD
[0]
375 results
+= "Runtime: %s s\n" % (runtime
)
380 Trigger the test scripts passed in via the list.
383 def __init__(self
, num_tests_parallel
, tests_dir
, tmpdir
, test_list
=None, flags
=None):
384 assert(num_tests_parallel
>= 1)
385 self
.num_jobs
= num_tests_parallel
386 self
.tests_dir
= tests_dir
388 self
.test_list
= test_list
391 # In case there is a graveyard of zombie bitcoinds, we can apply a
392 # pseudorandom offset to hopefully jump over them.
393 # (625 is PORT_RANGE/MAX_NODES)
394 self
.portseed_offset
= int(time
.time() * 1000) % 625
398 while self
.num_running
< self
.num_jobs
and self
.test_list
:
400 self
.num_running
+= 1
401 t
= self
.test_list
.pop(0)
402 portseed
= len(self
.test_list
) + self
.portseed_offset
403 portseed_arg
= ["--portseed={}".format(portseed
)]
404 log_stdout
= tempfile
.SpooledTemporaryFile(max_size
=2**16)
405 log_stderr
= tempfile
.SpooledTemporaryFile(max_size
=2**16)
406 test_argv
= t
.split()
407 testdir
= "{}/{}_{}".format(self
.tmpdir
, re
.sub(".py$", "", test_argv
[0]), portseed
)
408 tmpdir_arg
= ["--tmpdir={}".format(testdir
)]
411 subprocess
.Popen([self
.tests_dir
+ test_argv
[0]] + test_argv
[1:] + self
.flags
+ portseed_arg
+ tmpdir_arg
,
412 universal_newlines
=True,
419 raise IndexError('pop from empty list')
421 # Return first proc that finishes
424 (name
, time0
, proc
, testdir
, log_out
, log_err
) = j
425 if os
.getenv('TRAVIS') == 'true' and int(time
.time() - time0
) > 20 * 60:
426 # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
427 # providing useful output.
428 proc
.send_signal(signal
.SIGINT
)
429 if proc
.poll() is not None:
430 log_out
.seek(0), log_err
.seek(0)
431 [stdout
, stderr
] = [l
.read().decode('utf-8') for l
in (log_out
, log_err
)]
432 log_out
.close(), log_err
.close()
433 if proc
.returncode
== TEST_EXIT_PASSED
and stderr
== "":
435 elif proc
.returncode
== TEST_EXIT_SKIPPED
:
439 self
.num_running
-= 1
442 return TestResult(name
, status
, int(time
.time() - time0
)), testdir
, stdout
, stderr
443 print('.', end
='', flush
=True)
446 def __init__(self
, name
, status
, time
):
453 if self
.status
== "Passed":
456 elif self
.status
== "Failed":
459 elif self
.status
== "Skipped":
463 return color
[1] + "%s | %s%s | %s s\n" % (self
.name
.ljust(self
.padding
), glyph
, self
.status
.ljust(7), self
.time
) + color
[0]
466 def was_successful(self
):
467 return self
.status
!= "Failed"
470 def check_script_list(src_dir
):
471 """Check scripts directory.
473 Check that there are no scripts in the functional tests directory which are
474 not being run by pull-tester.py."""
475 script_dir
= src_dir
+ '/test/functional/'
476 python_files
= set([t
for t
in os
.listdir(script_dir
) if t
[-3:] == ".py"])
477 missed_tests
= list(python_files
- set(map(lambda x
: x
.split()[0], ALL_SCRIPTS
+ NON_SCRIPTS
)))
478 if len(missed_tests
) != 0:
479 print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD
[1], BOLD
[0], str(missed_tests
)))
480 if os
.getenv('TRAVIS') == 'true':
481 # On travis this warning is an error to prevent merging incomplete commits into master
486 Coverage reporting utilities for test_runner.
488 Coverage calculation works by having each test script subprocess write
489 coverage files into a particular directory. These files contain the RPC
490 commands invoked during testing, as well as a complete listing of RPC
491 commands per `bitcoin-cli help` (`rpc_interface.txt`).
493 After all tests complete, the commands run are combined and diff'd against
494 the complete list to calculate uncovered RPC commands.
496 See also: test/functional/test_framework/coverage.py
500 self
.dir = tempfile
.mkdtemp(prefix
="coverage")
501 self
.flag
= '--coveragedir=%s' % self
.dir
503 def report_rpc_coverage(self
):
505 Print out RPC commands that were unexercised by tests.
508 uncovered
= self
._get
_uncovered
_rpc
_commands
()
511 print("Uncovered RPC commands:")
512 print("".join((" - %s\n" % i
) for i
in sorted(uncovered
)))
514 print("All RPC commands covered.")
517 return shutil
.rmtree(self
.dir)
519 def _get_uncovered_rpc_commands(self
):
521 Return a set of currently untested RPC commands.
524 # This is shared from `test/functional/test-framework/coverage.py`
525 reference_filename
= 'rpc_interface.txt'
526 coverage_file_prefix
= 'coverage.'
528 coverage_ref_filename
= os
.path
.join(self
.dir, reference_filename
)
529 coverage_filenames
= set()
533 if not os
.path
.isfile(coverage_ref_filename
):
534 raise RuntimeError("No coverage reference found")
536 with
open(coverage_ref_filename
, 'r') as f
:
537 all_cmds
.update([i
.strip() for i
in f
.readlines()])
539 for root
, dirs
, files
in os
.walk(self
.dir):
540 for filename
in files
:
541 if filename
.startswith(coverage_file_prefix
):
542 coverage_filenames
.add(os
.path
.join(root
, filename
))
544 for filename
in coverage_filenames
:
545 with
open(filename
, 'r') as f
:
546 covered_cmds
.update([i
.strip() for i
in f
.readlines()])
548 return all_cmds
- covered_cmds
551 if __name__
== '__main__':