[clang-format] Fix a bug in aligning comments above PPDirective (#72791)
[llvm-project.git] / clang / utils / analyzer / SATest.py
blobd70e33f24c2df12dd53a74580f6a71802440a81e
1 #!/usr/bin/env python
3 import argparse
4 import sys
5 import os
7 from subprocess import call
9 SCRIPTS_DIR = os.path.dirname(os.path.realpath(__file__))
10 PROJECTS_DIR = os.path.join(SCRIPTS_DIR, "projects")
11 DEFAULT_LLVM_DIR = os.path.realpath(
12 os.path.join(SCRIPTS_DIR, os.path.pardir, os.path.pardir, os.path.pardir)
16 def add(parser, args):
17 import SATestAdd
18 from ProjectMap import ProjectInfo
20 if args.source == "git" and (args.origin == "" or args.commit == ""):
21 parser.error("Please provide both --origin and --commit if source is 'git'")
23 if args.source != "git" and (args.origin != "" or args.commit != ""):
24 parser.error(
25 "Options --origin and --commit don't make sense when " "source is not 'git'"
28 project = ProjectInfo(
29 args.name[0], args.mode, args.source, args.origin, args.commit
32 SATestAdd.add_new_project(project)
35 def build(parser, args):
36 import SATestBuild
38 SATestBuild.VERBOSE = args.verbose
40 projects = get_projects(parser, args)
41 tester = SATestBuild.RegressionTester(
42 args.jobs,
43 projects,
44 args.override_compiler,
45 args.extra_analyzer_config,
46 args.extra_checkers,
47 args.regenerate,
48 args.strictness,
50 tests_passed = tester.test_all()
52 if not tests_passed:
53 sys.stderr.write("ERROR: Tests failed.\n")
54 sys.exit(42)
57 def compare(parser, args):
58 import CmpRuns
60 choices = [
61 CmpRuns.HistogramType.RELATIVE.value,
62 CmpRuns.HistogramType.LOG_RELATIVE.value,
63 CmpRuns.HistogramType.ABSOLUTE.value,
66 if args.histogram is not None and args.histogram not in choices:
67 parser.error(
68 "Incorrect histogram type, available choices are {}".format(choices)
71 dir_old = CmpRuns.ResultsDirectory(args.old[0], args.root_old)
72 dir_new = CmpRuns.ResultsDirectory(args.new[0], args.root_new)
74 CmpRuns.dump_scan_build_results_diff(
75 dir_old,
76 dir_new,
77 show_stats=args.show_stats,
78 stats_only=args.stats_only,
79 histogram=args.histogram,
80 verbose_log=args.verbose_log,
84 def update(parser, args):
85 import SATestUpdateDiffs
86 from ProjectMap import ProjectMap
88 project_map = ProjectMap()
89 for project in project_map.projects:
90 SATestUpdateDiffs.update_reference_results(project, args.git)
93 def benchmark(parser, args):
94 from SATestBenchmark import Benchmark
96 projects = get_projects(parser, args)
97 benchmark = Benchmark(projects, args.iterations, args.output)
98 benchmark.run()
101 def benchmark_compare(parser, args):
102 import SATestBenchmark
104 SATestBenchmark.compare(args.old, args.new, args.output)
107 def get_projects(parser, args):
108 from ProjectMap import ProjectMap, Size
110 project_map = ProjectMap()
111 projects = project_map.projects
113 def filter_projects(projects, predicate, force=False):
114 return [
115 project.with_fields(
116 enabled=(force or project.enabled) and predicate(project)
118 for project in projects
121 if args.projects:
122 projects_arg = args.projects.split(",")
123 available_projects = [project.name for project in projects]
125 # validate that given projects are present in the project map file
126 for manual_project in projects_arg:
127 if manual_project not in available_projects:
128 parser.error(
129 "Project '{project}' is not found in "
130 "the project map file. Available projects are "
131 "{all}.".format(project=manual_project, all=available_projects)
134 projects = filter_projects(
135 projects, lambda project: project.name in projects_arg, force=True
138 try:
139 max_size = Size.from_str(args.max_size)
140 except ValueError as e:
141 parser.error("{}".format(e))
143 projects = filter_projects(projects, lambda project: project.size <= max_size)
145 return projects
148 def docker(parser, args):
149 if len(args.rest) > 0:
150 if args.rest[0] != "--":
151 parser.error("REST arguments should start with '--'")
152 args.rest = args.rest[1:]
154 if args.build_image:
155 docker_build_image()
156 elif args.shell:
157 docker_shell(args)
158 else:
159 sys.exit(docker_run(args, " ".join(args.rest)))
162 def docker_build_image():
163 sys.exit(call("docker build --tag satest-image {}".format(SCRIPTS_DIR), shell=True))
166 def docker_shell(args):
167 try:
168 # First we need to start the docker container in a waiting mode,
169 # so it doesn't do anything, but most importantly keeps working
170 # while the shell session is in progress.
171 docker_run(args, "--wait", "--detach")
172 # Since the docker container is running, we can actually connect to it
173 call("docker exec -it satest bash", shell=True)
175 except KeyboardInterrupt:
176 pass
178 finally:
179 docker_cleanup()
182 def docker_run(args, command, docker_args=""):
183 try:
184 return call(
185 "docker run --rm --name satest "
186 "-v {llvm}:/llvm-project "
187 "-v {build}:/build "
188 "-v {clang}:/analyzer "
189 "-v {scripts}:/scripts "
190 "-v {projects}:/projects "
191 "{docker_args} "
192 "satest-image:latest {command}".format(
193 llvm=args.llvm_project_dir,
194 build=args.build_dir,
195 clang=args.clang_dir,
196 scripts=SCRIPTS_DIR,
197 projects=PROJECTS_DIR,
198 docker_args=docker_args,
199 command=command,
201 shell=True,
204 except KeyboardInterrupt:
205 docker_cleanup()
208 def docker_cleanup():
209 print("Please wait for docker to clean up")
210 call("docker stop satest", shell=True)
213 def main():
214 parser = argparse.ArgumentParser()
215 subparsers = parser.add_subparsers()
217 # add subcommand
218 add_parser = subparsers.add_parser(
219 "add", help="Add a new project for the analyzer testing."
221 # TODO: Add an option not to build.
222 # TODO: Set the path to the Repository directory.
223 add_parser.add_argument("name", nargs=1, help="Name of the new project")
224 add_parser.add_argument(
225 "--mode",
226 action="store",
227 default=1,
228 type=int,
229 choices=[0, 1, 2],
230 help="Build mode: 0 for single file project, "
231 "1 for scan_build, "
232 "2 for single file c++11 project",
234 add_parser.add_argument(
235 "--source",
236 action="store",
237 default="script",
238 choices=["script", "git", "zip"],
239 help="Source type of the new project: "
240 "'git' for getting from git "
241 "(please provide --origin and --commit), "
242 "'zip' for unpacking source from a zip file, "
243 "'script' for downloading source by running "
244 "a custom script",
246 add_parser.add_argument(
247 "--origin", action="store", default="", help="Origin link for a git repository"
249 add_parser.add_argument(
250 "--commit", action="store", default="", help="Git hash for a commit to checkout"
252 add_parser.set_defaults(func=add)
254 # build subcommand
255 build_parser = subparsers.add_parser(
256 "build",
257 help="Build projects from the project map and compare results with "
258 "the reference.",
260 build_parser.add_argument(
261 "--strictness",
262 dest="strictness",
263 type=int,
264 default=0,
265 help="0 to fail on runtime errors, 1 to fail "
266 "when the number of found bugs are different "
267 "from the reference, 2 to fail on any "
268 "difference from the reference. Default is 0.",
270 build_parser.add_argument(
271 "-r",
272 dest="regenerate",
273 action="store_true",
274 default=False,
275 help="Regenerate reference output.",
277 build_parser.add_argument(
278 "--override-compiler",
279 action="store_true",
280 default=False,
281 help="Call scan-build with " "--override-compiler option.",
283 build_parser.add_argument(
284 "-j",
285 "--jobs",
286 dest="jobs",
287 type=int,
288 default=0,
289 help="Number of projects to test concurrently",
291 build_parser.add_argument(
292 "--extra-analyzer-config",
293 dest="extra_analyzer_config",
294 type=str,
295 default="",
296 help="Arguments passed to to -analyzer-config",
298 build_parser.add_argument(
299 "--extra-checkers",
300 dest="extra_checkers",
301 type=str,
302 default="",
303 help="Extra checkers to enable",
305 build_parser.add_argument(
306 "--projects",
307 action="store",
308 default="",
309 help="Comma-separated list of projects to test",
311 build_parser.add_argument(
312 "--max-size",
313 action="store",
314 default=None,
315 help="Maximum size for the projects to test",
317 build_parser.add_argument("-v", "--verbose", action="count", default=0)
318 build_parser.set_defaults(func=build)
320 # compare subcommand
321 cmp_parser = subparsers.add_parser(
322 "compare",
323 help="Comparing two static analyzer runs in terms of "
324 "reported warnings and execution time statistics.",
326 cmp_parser.add_argument(
327 "--root-old",
328 dest="root_old",
329 help="Prefix to ignore on source files for " "OLD directory",
330 action="store",
331 type=str,
332 default="",
334 cmp_parser.add_argument(
335 "--root-new",
336 dest="root_new",
337 help="Prefix to ignore on source files for " "NEW directory",
338 action="store",
339 type=str,
340 default="",
342 cmp_parser.add_argument(
343 "--verbose-log",
344 dest="verbose_log",
345 help="Write additional information to LOG " "[default=None]",
346 action="store",
347 type=str,
348 default=None,
349 metavar="LOG",
351 cmp_parser.add_argument(
352 "--stats-only",
353 action="store_true",
354 dest="stats_only",
355 default=False,
356 help="Only show statistics on reports",
358 cmp_parser.add_argument(
359 "--show-stats",
360 action="store_true",
361 dest="show_stats",
362 default=False,
363 help="Show change in statistics",
365 cmp_parser.add_argument(
366 "--histogram",
367 action="store",
368 default=None,
369 help="Show histogram of paths differences. " "Requires matplotlib",
371 cmp_parser.add_argument("old", nargs=1, help="Directory with old results")
372 cmp_parser.add_argument("new", nargs=1, help="Directory with new results")
373 cmp_parser.set_defaults(func=compare)
375 # update subcommand
376 upd_parser = subparsers.add_parser(
377 "update",
378 help="Update static analyzer reference results based on the previous "
379 "run of SATest build. Assumes that SATest build was just run.",
381 upd_parser.add_argument(
382 "--git", action="store_true", help="Stage updated results using git."
384 upd_parser.set_defaults(func=update)
386 # docker subcommand
387 dock_parser = subparsers.add_parser(
388 "docker", help="Run regression system in the docker."
391 dock_parser.add_argument(
392 "--build-image",
393 action="store_true",
394 help="Build docker image for running tests.",
396 dock_parser.add_argument(
397 "--shell", action="store_true", help="Start a shell on docker."
399 dock_parser.add_argument(
400 "--llvm-project-dir",
401 action="store",
402 default=DEFAULT_LLVM_DIR,
403 help="Path to LLVM source code. Defaults "
404 "to the repo where this script is located. ",
406 dock_parser.add_argument(
407 "--build-dir",
408 action="store",
409 default="",
410 help="Path to a directory where docker should " "build LLVM code.",
412 dock_parser.add_argument(
413 "--clang-dir",
414 action="store",
415 default="",
416 help="Path to find/install LLVM installation.",
418 dock_parser.add_argument(
419 "rest",
420 nargs=argparse.REMAINDER,
421 default=[],
422 help="Additional args that will be forwarded " "to the docker's entrypoint.",
424 dock_parser.set_defaults(func=docker)
426 # benchmark subcommand
427 bench_parser = subparsers.add_parser(
428 "benchmark", help="Run benchmarks by building a set of projects multiple times."
431 bench_parser.add_argument(
432 "-i",
433 "--iterations",
434 action="store",
435 type=int,
436 default=20,
437 help="Number of iterations for building each " "project.",
439 bench_parser.add_argument(
440 "-o",
441 "--output",
442 action="store",
443 default="benchmark.csv",
444 help="Output csv file for the benchmark results",
446 bench_parser.add_argument(
447 "--projects",
448 action="store",
449 default="",
450 help="Comma-separated list of projects to test",
452 bench_parser.add_argument(
453 "--max-size",
454 action="store",
455 default=None,
456 help="Maximum size for the projects to test",
458 bench_parser.set_defaults(func=benchmark)
460 bench_subparsers = bench_parser.add_subparsers()
461 bench_compare_parser = bench_subparsers.add_parser(
462 "compare", help="Compare benchmark runs."
464 bench_compare_parser.add_argument(
465 "--old",
466 action="store",
467 required=True,
468 help="Benchmark reference results to " "compare agains.",
470 bench_compare_parser.add_argument(
471 "--new", action="store", required=True, help="New benchmark results to check."
473 bench_compare_parser.add_argument(
474 "-o", "--output", action="store", required=True, help="Output file for plots."
476 bench_compare_parser.set_defaults(func=benchmark_compare)
478 args = parser.parse_args()
479 args.func(parser, args)
482 if __name__ == "__main__":
483 main()