[PowerPC] Collect some CallLowering arguments into a struct. [NFC]
[llvm-project.git] / lldb / packages / Python / lldbsuite / test / configuration.py
blob09fc646f96eaa82cabf6e5535f58da8da66a4a27
1 """
2 Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 See https://llvm.org/LICENSE.txt for license information.
4 SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 Provides the configuration class, which holds all information related to
7 how this invocation of the test suite should be run.
8 """
10 from __future__ import absolute_import
11 from __future__ import print_function
13 # System modules
14 import os
17 # Third-party modules
18 import unittest2
20 # LLDB Modules
21 import lldbsuite
24 # The test suite.
25 suite = unittest2.TestSuite()
27 # The list of categories we said we care about
28 categories_list = None
29 # set to true if we are going to use categories for cherry-picking test cases
30 use_categories = False
31 # Categories we want to skip
32 skip_categories = ["darwin-log"]
33 # Categories we expect to fail
34 xfail_categories = []
35 # use this to track per-category failures
36 failures_per_category = {}
38 # The path to LLDB.framework is optional.
39 lldb_framework_path = None
41 # Test suite repeat count. Can be overwritten with '-# count'.
42 count = 1
44 # The 'arch' and 'compiler' can be specified via command line.
45 arch = None # Must be initialized after option parsing
46 compiler = None # Must be initialized after option parsing
48 # The overriden dwarf verison.
49 dwarf_version = 0
51 # Any overridden settings.
52 # Always disable default dynamic types for testing purposes.
53 settings = [('target.prefer-dynamic-value', 'no-dynamic-values')]
55 # Path to the FileCheck testing tool. Not optional.
56 filecheck = None
58 # The arch might dictate some specific CFLAGS to be passed to the toolchain to build
59 # the inferior programs. The global variable cflags_extras provides a hook to do
60 # just that.
61 cflags_extras = ''
63 # The filters (testclass.testmethod) used to admit tests into our test suite.
64 filters = []
66 # The regular expression pattern to match against eligible filenames as
67 # our test cases.
68 regexp = None
70 # Sets of tests which are excluded at runtime
71 skip_tests = None
72 xfail_tests = None
74 # By default, recorded session info for errored/failed test are dumped into its
75 # own file under a session directory named after the timestamp of the test suite
76 # run. Use '-s session-dir-name' to specify a specific dir name.
77 sdir_name = None
79 # Valid options:
80 # f - test file name (without extension)
81 # n - test class name
82 # m - test method name
83 # a - architecture
84 # c - compiler path
85 # The default is to write all fields.
86 session_file_format = 'fnmac'
88 # Set this flag if there is any session info dumped during the test run.
89 sdir_has_content = False
91 # svn_info stores the output from 'svn info lldb.base.dir'.
92 svn_info = ''
94 # Default verbosity is 0.
95 verbose = 0
97 # By default, search from the script directory.
98 # We can't use sys.path[0] to determine the script directory
99 # because it doesn't work under a debugger
100 testdirs = [os.path.dirname(os.path.realpath(__file__))]
102 # Separator string.
103 separator = '-' * 70
105 failed = False
107 # LLDB Remote platform setting
108 lldb_platform_name = None
109 lldb_platform_url = None
110 lldb_platform_working_dir = None
112 # The base directory in which the tests are being built.
113 test_build_dir = None
115 # The clang module cache directory used by lldb.
116 lldb_module_cache_dir = None
117 # The clang module cache directory used by clang.
118 clang_module_cache_dir = None
120 # The only directory to scan for tests. If multiple test directories are
121 # specified, and an exclusive test subdirectory is specified, the latter option
122 # takes precedence.
123 exclusive_test_subdir = None
125 # Test results handling globals
126 results_filename = None
127 results_formatter_name = None
128 results_formatter_object = None
129 results_formatter_options = None
130 test_result = None
132 # Test rerun configuration vars
133 rerun_all_issues = False
135 # The names of all tests. Used to assert we don't have two tests with the
136 # same base name.
137 all_tests = set()
139 def shouldSkipBecauseOfCategories(test_categories):
140 if use_categories:
141 if len(test_categories) == 0 or len(
142 categories_list & set(test_categories)) == 0:
143 return True
145 for category in skip_categories:
146 if category in test_categories:
147 return True
149 return False
152 def get_absolute_path_to_exclusive_test_subdir():
154 If an exclusive test subdirectory is specified, return its absolute path.
155 Otherwise return None.
157 test_directory = os.path.dirname(os.path.realpath(__file__))
159 if not exclusive_test_subdir:
160 return
162 if len(exclusive_test_subdir) > 0:
163 test_subdir = os.path.join(test_directory, exclusive_test_subdir)
164 if os.path.isdir(test_subdir):
165 return test_subdir
167 print('specified test subdirectory {} is not a valid directory\n'
168 .format(test_subdir))
171 def get_absolute_path_to_root_test_dir():
173 If an exclusive test subdirectory is specified, return its absolute path.
174 Otherwise, return the absolute path of the root test directory.
176 test_subdir = get_absolute_path_to_exclusive_test_subdir()
177 if test_subdir:
178 return test_subdir
180 return os.path.dirname(os.path.realpath(__file__))
183 def get_filecheck_path():
185 Get the path to the FileCheck testing tool.
187 if filecheck and os.path.lexists(filecheck):
188 return filecheck