fix the spelling in whole piglit
[piglit.git] / framework / programs / run.py
blob0ef57bf2641933be9ef0ec9df6b69487218f41ee
1 # coding=utf-8
2 # Permission is hereby granted, free of charge, to any person
3 # obtaining a copy of this software and associated documentation
4 # files (the "Software"), to deal in the Software without
5 # restriction, including without limitation the rights to use,
6 # copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following
9 # conditions:
11 # This permission notice shall be included in all copies or
12 # substantial portions of the Software.
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
15 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
16 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
17 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR(S) BE
18 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
19 # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 # OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 # DEALINGS IN THE SOFTWARE.
23 import argparse
24 import ctypes
25 import os
26 import os.path as path
27 import re
28 import shutil
29 import sys
30 import time
32 from framework import core, backends, options
33 from framework import dmesg
34 from framework import exceptions
35 from framework import monitoring
36 from framework import profile
37 from framework.results import TimeAttribute
38 from framework.test import base
39 from . import parsers
41 __all__ = ['run',
42 'resume']
45 def booltype(val):
46 if val.lower() in ['false', 'no', '0']:
47 return False
48 elif val.lower() in ['true', 'yes', '1']:
49 return True
50 raise argparse.ArgumentTypeError(
51 'Case insensitive values of "yes", "no", "false", "true", and "0" or '
52 '"1" are accepted.')
55 def _default_platform():
56 """ Logic to determine the default platform to use
58 This assumes that the platform can only be set on Linux, it probably works
59 on BSD. This is only relevant if piglit is built with waffle support. When
60 waffle support lands for Windows and if it ever happens for OSX, this will
61 need to be extended.
63 On Linux this will try in order,
64 1) An option provided via the -p/--platform option (this is handled in
65 argparse, not in this function)
66 2) PIGLIT_PLATFORM from the environment
67 3) [core]:platform from the config file
68 4) mixed_glx_egl
70 """
71 if os.environ.get('PIGLIT_PLATFORM'):
72 return os.environ.get('PIGLIT_PLATFORM')
73 else:
74 plat = core.PIGLIT_CONFIG.safe_get('core', 'platform', 'mixed_glx_egl')
75 if plat not in core.PLATFORMS:
76 raise exceptions.PiglitFatalError(
77 'Platform is not valid\nvalid platforms are: {}'.format(
78 core.PLATFORMS))
79 return plat
82 def _default_backend():
83 """ Logic to se the default backend to use
85 There are two options, either the one set via the -b/--backend option, or
86 the one in the config file. The default if that fails is to use json
88 """
89 backend = core.PIGLIT_CONFIG.safe_get('core', 'backend', 'json')
90 if backend not in backends.BACKENDS.keys():
91 raise exceptions.PiglitFatalError(
92 'Backend is not valid\nvalid backends are: {}'.format(
93 ' '.join(backends.BACKENDS.keys())))
94 return backend
97 def _run_parser(input_):
98 """ Parser for piglit run command """
99 unparsed = parsers.parse_config(input_)[1]
101 # Set the parent of the config to add the -f/--config message
102 parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
103 parser.add_argument("-n", "--name",
104 metavar="<test name>",
105 default=None,
106 help="Name of this test run")
107 parser.add_argument("-d", "--dry-run",
108 action="store_false",
109 dest="execute",
110 help="Do not execute the tests")
111 parser.add_argument("-t", "--include-tests",
112 default=[],
113 action="append",
114 metavar="<regex>",
115 help="Run only matching tests "
116 "(can be used more than once)")
117 parser.add_argument("-x", "--exclude-tests",
118 default=[],
119 action="append",
120 metavar="<regex>",
121 help="Exclude matching tests "
122 "(can be used more than once)")
123 parser.add_argument('-b', '--backend',
124 default=_default_backend(),
125 choices=backends.BACKENDS.keys(),
126 help='select a results backend to use')
127 conc_parser = parser.add_mutually_exclusive_group()
128 conc_parser.add_argument('-c', '--all-concurrent',
129 action="store_const",
130 default="some",
131 const="all",
132 dest="concurrency",
133 help="Run all tests concurrently")
134 conc_parser.add_argument("-1", "--no-concurrency",
135 action="store_const",
136 default="some",
137 const="none",
138 dest="concurrency",
139 help="Disable concurrent test runs")
140 parser.add_argument("-p", "--platform",
141 choices=core.PLATFORMS,
142 default=_default_platform(),
143 help="Name of windows system passed to waffle")
144 parser.add_argument("--valgrind",
145 action="store_true",
146 help="Run tests in valgrind's memcheck")
147 parser.add_argument("--dmesg",
148 action="store_true",
149 help="Capture a difference in dmesg before and "
150 "after each test. Implies -1/--no-concurrency")
151 parser.add_argument("--abort-on-monitored-error",
152 action="store_true",
153 dest="monitored",
154 help="Enable monitoring according the rules defined "
155 "in piglit.conf, and stop the execution when a "
156 "monitored error is detected. Exit code 3. "
157 "Implies -1/--no-concurrency")
158 parser.add_argument("-s", "--sync",
159 action="store_true",
160 help="Sync results to disk after every test")
161 parser.add_argument("--junit_suffix",
162 type=str,
163 default="",
164 help="suffix string to append to each test name in junit")
165 parser.add_argument("--junit-subtests",
166 action='store_true',
167 help="Encode tests with subtets as testsuite elements. "
168 "Some xUnit parsers do not handle nested "
169 "testsuites, though it is allowed in the spec.")
170 log_parser = parser.add_mutually_exclusive_group()
171 log_parser.add_argument('-v', '--verbose',
172 action='store_const',
173 const='verbose',
174 default='quiet',
175 dest='log_level',
176 help='DEPRECATED! Print more information during '
177 'test runs. Use -l/--log-level instead')
178 log_parser.add_argument("-l", "--log-level",
179 dest="log_level",
180 action="store",
181 choices=['quiet', 'verbose', 'dummy', 'http'],
182 default='quiet',
183 help="Set the logger verbosity level")
184 parser.add_argument("--test-list",
185 type=os.path.abspath,
186 help="A file containing a list of tests to run")
187 parser.add_argument('-o', '--overwrite',
188 dest='overwrite',
189 action='store_true',
190 help='If the results_path already exists, delete it')
191 parser.add_argument('--deqp-mustpass-list',
192 dest='deqp_mustpass',
193 action='store_true',
194 help='Run only the tests in the deqp mustpass list '
195 'when running a deqp gles{2,3,31} profile, '
196 'otherwise run all tests.')
197 parser.add_argument('--process-isolation',
198 dest='process_isolation',
199 action='store',
200 type=booltype,
201 default=core.PIGLIT_CONFIG.safe_get(
202 'core', 'process isolation', 'true'),
203 metavar='<bool>',
204 help='Set this to allow tests to run without process '
205 'isolation. This allows, but does not require, '
206 'tests to run multiple tests per process. '
207 'This value can also be set in piglit.conf.')
208 parser.add_argument('-j', '--jobs',
209 dest='jobs',
210 action='store',
211 type=int,
212 default=core.PIGLIT_CONFIG.safe_get(
213 'core', 'jobs', None),
214 help='Set the maximum number of jobs to run concurrently. '
215 'By default, the reported number of CPUs is used.')
216 parser.add_argument("--ignore-missing",
217 dest="ignore_missing",
218 action="store_true",
219 help="missing tests are considered as 'notrun'")
220 parser.add_argument('--timeout',
221 dest='timeout',
222 action='store',
223 type=int,
224 default=None,
225 metavar='<int>',
226 help='Set a default timeout threshold for tests to run in.')
227 parser.add_argument("test_profile",
228 metavar="<Profile path(s)>",
229 nargs='+',
230 help="Path to one or more test profiles to run. "
231 "If more than one profile is provided then they "
232 "will be merged.")
233 parser.add_argument("results_path",
234 type=path.realpath,
235 metavar="<Results Path>",
236 help="Path to results folder")
237 parser.add_argument("--glsl",
238 action="store_true",
239 help="Run shader runner tests with the -glsl (force GLSL) option")
241 return parser.parse_args(unparsed)
244 def _create_metadata(args, name, forced_test_list):
245 """Create and return a metadata dict for Backend.initialize()."""
246 opts = dict(options.OPTIONS)
247 opts['profile'] = args.test_profile
248 opts['log_level'] = args.log_level
249 opts['concurrent'] = args.concurrency
250 opts['include_filter'] = args.include_tests
251 opts['exclude_filter'] = args.exclude_tests
252 opts['dmesg'] = args.dmesg
253 opts['monitoring'] = args.monitored
254 if args.platform:
255 opts['platform'] = args.platform
256 opts['forced_test_list'] = forced_test_list
257 opts['ignore_missing'] = args.ignore_missing
258 opts['timeout'] = args.timeout
260 metadata = {'options': opts}
261 metadata['name'] = name
262 metadata['info'] = {}
263 metadata['info']['system'] = core.collect_system_info()
265 return metadata
268 def _disable_windows_exception_messages():
269 """Disable Windows error message boxes for this and all child processes."""
270 if sys.platform == 'win32':
271 # This disables messages boxes for uncaught exceptions, but it will not
272 # disable the message boxes for assertion failures or abort(). Those
273 # are created not by the system but by the CRT itself, and must be
274 # disabled by the child processes themselves.
275 SEM_FAILCRITICALERRORS = 0x0001
276 SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
277 SEM_NOGPFAULTERRORBOX = 0x0002
278 SEM_NOOPENFILEERRORBOX = 0x8000
279 uMode = ctypes.windll.kernel32.SetErrorMode(0)
280 uMode |= SEM_FAILCRITICALERRORS \
281 | SEM_NOALIGNMENTFAULTEXCEPT \
282 | SEM_NOGPFAULTERRORBOX \
283 | SEM_NOOPENFILEERRORBOX
284 ctypes.windll.kernel32.SetErrorMode(uMode)
287 def _results_handler(path):
288 """Handler for core.check_dir."""
289 if os.path.isdir(path):
290 shutil.rmtree(path)
291 else:
292 os.unlink(path)
295 @exceptions.handler
296 def run(input_):
297 """ Function for piglit run command
299 This is a function because it allows it to be shared between piglit-run.py
300 and piglit run
303 args = _run_parser(input_)
304 base.Test.timeout = args.timeout
305 _disable_windows_exception_messages()
307 # If dmesg is requested we must have serial run, this is because dmesg
308 # isn't reliable with threaded run
309 if args.dmesg or args.monitored:
310 args.concurrency = "none"
312 # Pass arguments into Options
313 options.OPTIONS.execute = args.execute
314 options.OPTIONS.valgrind = args.valgrind
315 options.OPTIONS.sync = args.sync
316 options.OPTIONS.deqp_mustpass = args.deqp_mustpass
317 options.OPTIONS.process_isolation = args.process_isolation
318 options.OPTIONS.jobs = args.jobs
319 options.OPTIONS.force_glsl = args.glsl
321 # Set the platform to pass to waffle
322 options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform
324 # Change working directory to the root of the piglit directory
325 piglit_dir = path.dirname(path.realpath(sys.argv[0]))
326 os.chdir(piglit_dir)
328 # If the results directory already exists and if overwrite was set, then
329 # clear the directory. If it wasn't set, then raise fatal error.
330 try:
331 core.check_dir(args.results_path,
332 failifexists=args.overwrite,
333 handler=_results_handler)
334 except exceptions.PiglitException:
335 raise exceptions.PiglitFatalError(
336 'Cannot overwrite existing folder without the -o/--overwrite '
337 'option being set.')
339 # If a test list is provided then set the forced_test_list value.
340 forced_test_list = None
341 if args.test_list:
342 if len(args.test_profile) != 1:
343 raise exceptions.PiglitFatalError(
344 'Unable to force a test list with more than one profile')
346 with open(args.test_list) as test_list:
347 # Strip newlines and comments, ignore empty lines
348 stripped = (t.split('#')[0].strip() for t in test_list)
349 forced_test_list = [t for t in stripped if t]
351 time_elapsed = TimeAttribute(start=time.time())
353 backend = backends.get_backend(args.backend)(
354 args.results_path,
355 junit_suffix=args.junit_suffix,
356 junit_subtests=args.junit_subtests)
357 backend.initialize(_create_metadata(
358 args, args.name or path.basename(args.results_path), forced_test_list))
360 profiles = [profile.load_test_profile(p) for p in args.test_profile]
361 for p in profiles:
362 p.results_dir = args.results_path
364 # Set the forced_test_list, if applicable
365 if forced_test_list:
366 profiles[0].forced_test_list = forced_test_list
368 # Set the dmesg type
369 if args.dmesg:
370 for p in profiles:
371 p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)
373 if args.monitored:
374 for p in profiles:
375 p.options['monitor'] = monitoring.Monitoring(args.monitored)
377 if args.ignore_missing:
378 for p in profiles:
379 p.options['ignore_missing'] = args.ignore_missing
381 for p in profiles:
382 if args.exclude_tests:
383 p.filters.append(profile.RegexFilter(args.exclude_tests,
384 inverse=True))
385 if args.include_tests:
386 p.filters.append(profile.RegexFilter(args.include_tests))
388 profile.run(profiles, args.log_level, backend, args.concurrency, args.jobs)
390 time_elapsed.end = time.time()
391 backend.finalize({'time_elapsed': time_elapsed.to_json()})
393 print('Thank you for running Piglit!\n'
394 'Results have been written to ' + args.results_path)
397 @exceptions.handler
398 def resume(input_):
399 unparsed = parsers.parse_config(input_)[1]
401 parser = argparse.ArgumentParser()
402 parser.add_argument("results_path",
403 type=path.realpath,
404 metavar="<Results Path>",
405 help="Path to results folder")
406 parser.add_argument("-f", "--config",
407 dest="config_file",
408 type=argparse.FileType("r"),
409 help="Optionally specify a piglit config file to use. "
410 "Default is piglit.conf")
411 parser.add_argument("-n", "--no-retry",
412 dest="no_retry",
413 action="store_true",
414 help="Do not retry incomplete tests")
415 parser.add_argument('-j', '--jobs',
416 dest='jobs',
417 action='store',
418 type=int,
419 default=core.PIGLIT_CONFIG.safe_get(
420 'core', 'jobs', None),
421 help='Set the maximum number of jobs to run concurrently. '
422 'By default, the reported number of CPUs is used.')
423 args = parser.parse_args(unparsed)
424 _disable_windows_exception_messages()
426 results = backends.load(args.results_path)
427 options.OPTIONS.execute = results.options['execute']
428 options.OPTIONS.valgrind = results.options['valgrind']
429 options.OPTIONS.sync = results.options['sync']
430 options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
431 options.OPTIONS.process_isolation = results.options['process_isolation']
432 options.OPTIONS.jobs = args.jobs
433 options.OPTIONS.no_retry = args.no_retry
434 options.OPTIONS.force_glsl = results.options['force_glsl']
436 core.get_config(args.config_file)
438 options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
439 base.Test.timeout = results.options['timeout']
441 results.options['env'] = core.collect_system_info()
442 results.options['name'] = results.name
444 # Resume only works with the JSON backend
445 backend = backends.get_backend('json')(
446 args.results_path,
447 file_start_count=len(results.tests) + 1)
448 # Specifically do not initialize again, everything initialize does is done.
450 # Don't re-run tests that have already completed, incomplete status tests
451 # have obviously not completed.
452 exclude_tests = set()
453 for name, result in results.tests.items():
454 if args.no_retry or result.result != 'incomplete':
455 exclude_tests.add(name)
457 profiles = [profile.load_test_profile(p)
458 for p in results.options['profile']]
459 for p in profiles:
460 p.results_dir = args.results_path
462 if results.options['dmesg']:
463 p.dmesg = dmesg.get_dmesg(results.options['dmesg'])
465 if results.options['monitoring']:
466 p.options['monitor'] = monitoring.Monitoring(
467 results.options['monitoring'])
469 if results.options['ignore_missing']:
470 p.options['ignore_missing'] = results.options['ignore_missing']
472 if exclude_tests:
473 p.filters.append(lambda n, _: n not in exclude_tests)
474 if results.options['exclude_filter']:
475 p.filters.append(
476 profile.RegexFilter(results.options['exclude_filter'],
477 inverse=True))
478 if results.options['include_filter']:
479 p.filters.append(
480 profile.RegexFilter(results.options['include_filter']))
482 if results.options['forced_test_list']:
483 p.forced_test_list = results.options['forced_test_list']
485 # This is resumed, don't bother with time since it won't be accurate anyway
486 try:
487 profile.run(
488 profiles,
489 results.options['log_level'],
490 backend,
491 results.options['concurrent'],
492 args.jobs)
493 except exceptions.PiglitUserError as e:
494 if str(e) != 'no matching tests':
495 raise
497 backend.finalize()
499 print("Thank you for running Piglit!\n"
500 "Results have been written to {0}".format(args.results_path))