Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / lldb / test / API / lldbtest.py
blob2b44bb7f6f9f4bf3db5dacb469a977371e618269
1 import os
2 import re
3 import operator
5 import lit.Test
6 import lit.TestRunner
7 import lit.util
8 from lit.formats.base import TestFormat
11 class LLDBTest(TestFormat):
12 def __init__(self, dotest_cmd):
13 self.dotest_cmd = dotest_cmd
15 def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
16 source_path = testSuite.getSourcePath(path_in_suite)
17 for filename in os.listdir(source_path):
18 # Ignore dot files and excluded tests.
19 if filename.startswith(".") or filename in localConfig.excludes:
20 continue
22 # Ignore files that don't start with 'Test'.
23 if not filename.startswith("Test"):
24 continue
26 filepath = os.path.join(source_path, filename)
27 if not os.path.isdir(filepath):
28 base, ext = os.path.splitext(filename)
29 if ext in localConfig.suffixes:
30 yield lit.Test.Test(
31 testSuite, path_in_suite + (filename,), localConfig
34 def execute(self, test, litConfig):
35 if litConfig.noExecute:
36 return lit.Test.PASS, ""
38 if not getattr(test.config, "lldb_enable_python", False):
39 return (lit.Test.UNSUPPORTED, "Python module disabled")
41 if test.config.unsupported:
42 return (lit.Test.UNSUPPORTED, "Test is unsupported")
44 testPath, testFile = os.path.split(test.getSourcePath())
46 # The Python used to run lit can be different from the Python LLDB was
47 # build with.
48 executable = test.config.python_executable
50 isLuaTest = testFile == test.config.lua_test_entry
52 # On Windows, the system does not always correctly interpret
53 # shebang lines. To make sure we can execute the tests, add
54 # python exe as the first parameter of the command.
55 cmd = [executable] + self.dotest_cmd + [testPath, "-p", testFile]
57 if isLuaTest:
58 luaExecutable = test.config.lua_executable
59 cmd.extend(["--env", "LUA_EXECUTABLE=%s" % luaExecutable])
61 timeoutInfo = None
62 try:
63 out, err, exitCode = lit.util.executeCommand(
64 cmd,
65 env=test.config.environment,
66 timeout=litConfig.maxIndividualTestTime,
68 except lit.util.ExecuteCommandTimeoutException as e:
69 out = e.out
70 err = e.err
71 exitCode = e.exitCode
72 timeoutInfo = "Reached timeout of {} seconds".format(
73 litConfig.maxIndividualTestTime
76 output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (" ".join(cmd), exitCode)
77 if timeoutInfo is not None:
78 output += """Timeout: %s\n""" % (timeoutInfo,)
79 output += "\n"
81 if out:
82 output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
83 if err:
84 output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
86 if timeoutInfo:
87 return lit.Test.TIMEOUT, output
89 # Parse the dotest output from stderr.
90 result_regex = r"\((\d+) passes, (\d+) failures, (\d+) errors, (\d+) skipped, (\d+) expected failures, (\d+) unexpected successes\)"
91 results = re.search(result_regex, err)
93 # If parsing fails mark this test as unresolved.
94 if not results:
95 return lit.Test.UNRESOLVED, output
97 passes = int(results.group(1))
98 failures = int(results.group(2))
99 errors = int(results.group(3))
100 skipped = int(results.group(4))
101 expected_failures = int(results.group(5))
102 unexpected_successes = int(results.group(6))
104 if exitCode:
105 # Mark this test as FAIL if at least one test failed.
106 if failures > 0:
107 return lit.Test.FAIL, output
108 lit_results = [
109 (failures, lit.Test.FAIL),
110 (errors, lit.Test.UNRESOLVED),
111 (unexpected_successes, lit.Test.XPASS),
113 else:
114 # Mark this test as PASS if at least one test passed.
115 if passes > 0:
116 return lit.Test.PASS, output
117 lit_results = [
118 (passes, lit.Test.PASS),
119 (skipped, lit.Test.UNSUPPORTED),
120 (expected_failures, lit.Test.XFAIL),
123 # Return the lit result code with the maximum occurrence. Only look at
124 # the first element and rely on the original order to break ties.
125 return max(lit_results, key=operator.itemgetter(0))[1], output