[Reland][Runtimes] Merge 'compile_commands.json' files from runtimes build (#116303)
[llvm-project.git] / lldb / test / API / lldbtest.py
blobd6b79ebc2c434254eb2e0706f862a028c0e9b339
1 import collections
2 import os
3 import re
4 import operator
6 import lit.Test
7 import lit.TestRunner
8 import lit.util
9 from lit.formats.base import TestFormat
12 class LLDBTest(TestFormat):
13 def __init__(self, dotest_cmd):
14 self.dotest_cmd = dotest_cmd
16 def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
17 source_path = testSuite.getSourcePath(path_in_suite)
18 for filename in os.listdir(source_path):
19 # Ignore dot files and excluded tests.
20 if filename.startswith(".") or filename in localConfig.excludes:
21 continue
23 # Ignore files that don't start with 'Test'.
24 if not filename.startswith("Test"):
25 continue
27 filepath = os.path.join(source_path, filename)
28 if not os.path.isdir(filepath):
29 base, ext = os.path.splitext(filename)
30 if ext in localConfig.suffixes:
31 yield lit.Test.Test(
32 testSuite, path_in_suite + (filename,), localConfig
35 def execute(self, test, litConfig):
36 if litConfig.noExecute:
37 return lit.Test.PASS, ""
39 if not getattr(test.config, "lldb_enable_python", False):
40 return (lit.Test.UNSUPPORTED, "Python module disabled")
42 if test.config.unsupported:
43 return (lit.Test.UNSUPPORTED, "Test is unsupported")
45 testPath, testFile = os.path.split(test.getSourcePath())
47 # The Python used to run lit can be different from the Python LLDB was
48 # build with.
49 executable = test.config.python_executable
51 isLuaTest = testFile == test.config.lua_test_entry
53 # On Windows, the system does not always correctly interpret
54 # shebang lines. To make sure we can execute the tests, add
55 # python exe as the first parameter of the command.
56 cmd = [executable] + self.dotest_cmd + [testPath, "-p", testFile]
58 if isLuaTest:
59 cmd.extend(["--env", "LUA_EXECUTABLE=%s" % test.config.lua_executable])
60 cmd.extend(["--env", "LLDB_LUA_CPATH=%s" % test.config.lldb_lua_cpath])
62 timeoutInfo = None
63 try:
64 out, err, exitCode = lit.util.executeCommand(
65 cmd,
66 env=test.config.environment,
67 timeout=litConfig.maxIndividualTestTime,
69 except lit.util.ExecuteCommandTimeoutException as e:
70 out = e.out
71 err = e.err
72 exitCode = e.exitCode
73 timeoutInfo = "Reached timeout of {} seconds".format(
74 litConfig.maxIndividualTestTime
77 output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (" ".join(cmd), exitCode)
78 if timeoutInfo is not None:
79 output += """Timeout: %s\n""" % (timeoutInfo,)
80 output += "\n"
82 if out:
83 output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
84 if err:
85 output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
87 if timeoutInfo:
88 return lit.Test.TIMEOUT, output
90 # Parse the dotest output from stderr. First get the # of total tests, in order to infer the # of passes.
91 # Example: "Ran 5 tests in 0.042s"
92 num_ran_regex = r"^Ran (\d+) tests? in "
93 num_ran_results = re.search(num_ran_regex, err, re.MULTILINE)
95 # If parsing fails mark this test as unresolved.
96 if not num_ran_results:
97 return lit.Test.UNRESOLVED, output
98 num_ran = int(num_ran_results.group(1))
100 # Then look for a detailed summary, which is OK or FAILED followed by optional details.
101 # Example: "OK (skipped=1, expected failures=1)"
102 # Example: "FAILED (failures=3)"
103 # Example: "OK"
104 result_regex = r"^(?:OK|FAILED)(?: \((.*)\))?\r?$"
105 results = re.search(result_regex, err, re.MULTILINE)
107 # If parsing fails mark this test as unresolved.
108 if not results:
109 return lit.Test.UNRESOLVED, output
111 details = results.group(1)
112 parsed_details = collections.defaultdict(int)
113 if details:
114 for detail in details.split(", "):
115 detail_parts = detail.split("=")
116 if len(detail_parts) != 2:
117 return lit.Test.UNRESOLVED, output
118 parsed_details[detail_parts[0]] = int(detail_parts[1])
120 failures = parsed_details["failures"]
121 errors = parsed_details["errors"]
122 skipped = parsed_details["skipped"]
123 expected_failures = parsed_details["expected failures"]
124 unexpected_successes = parsed_details["unexpected successes"]
126 non_pass = (
127 failures + errors + skipped + expected_failures + unexpected_successes
129 passes = num_ran - non_pass
131 if exitCode:
132 # Mark this test as FAIL if at least one test failed.
133 if failures > 0:
134 return lit.Test.FAIL, output
135 lit_results = [
136 (failures, lit.Test.FAIL),
137 (errors, lit.Test.UNRESOLVED),
138 (unexpected_successes, lit.Test.XPASS),
140 else:
141 # Mark this test as PASS if at least one test passed.
142 if passes > 0:
143 return lit.Test.PASS, output
144 lit_results = [
145 (passes, lit.Test.PASS),
146 (skipped, lit.Test.UNSUPPORTED),
147 (expected_failures, lit.Test.XFAIL),
150 # Return the lit result code with the maximum occurrence. Only look at
151 # the first element and rely on the original order to break ties.
152 return max(lit_results, key=operator.itemgetter(0))[1], output