3 A test update script. This script is a utility to update LLVM 'llvm-mc' based test cases with new FileCheck patterns.
6 from __future__
import print_function
10 import os
# Used to advertise this file's name ("autogenerated_note").
12 from UpdateTestChecks
import common
20 ERROR_RE
= re
.compile(r
":\d+: (warning|error): .*")
21 ERROR_CHECK_RE
= re
.compile(r
"# COM: .*")
22 OUTPUT_SKIPPED_RE
= re
.compile(r
"(.text)")
23 COMMENT
= {"asm": "//", "dasm": "#"}
26 def invoke_tool(exe
, check_rc
, cmd_args
, testline
, verbose
=False):
27 if isinstance(cmd_args
, list):
28 args
= [applySubstitutions(a
, substitutions
) for a
in cmd_args
]
32 cmd
= 'echo "' + testline
+ '" | ' + exe
+ " " + args
34 print("Command: ", cmd
)
40 stdout
=subprocess
.PIPE
,
41 stderr
=subprocess
.DEVNULL
,
44 # Fix line endings to unix CR style.
45 return out
.decode().replace("\r\n", "\n")
48 # create tests line-by-line, here we just filter out the check lines and comments
49 # and treat all others as tests
50 def isTestLine(input_line
, mc_mode
):
51 line
= input_line
.strip()
52 # Skip empty and comment lines
53 if not line
or line
.startswith(COMMENT
[mc_mode
]):
55 # skip any CHECK lines.
56 elif common
.CHECK_RE
.match(input_line
):
62 return common
.RUN_LINE_RE
.match(l
)
66 return err
and ERROR_RE
.search(err
) is not None
69 def getErrString(err
):
73 # take the first match
74 for line
in err
.splitlines():
75 s
= ERROR_RE
.search(line
)
81 def getOutputString(out
):
86 for line
in out
.splitlines():
87 if OUTPUT_SKIPPED_RE
.search(line
):
89 if line
.strip("\t ") == "":
91 output
+= line
.lstrip("\t ")
95 def should_add_line_to_output(input_line
, prefix_set
, mc_mode
):
97 if mc_mode
== "dasm" and ERROR_CHECK_RE
.search(input_line
):
100 return common
.should_add_line_to_output(
101 input_line
, prefix_set
, comment_marker
=COMMENT
[mc_mode
]
105 def getStdCheckLine(prefix
, output
, mc_mode
):
107 for line
in output
.splitlines():
108 o
+= COMMENT
[mc_mode
] + " " + prefix
+ ": " + line
+ "\n"
112 def getErrCheckLine(prefix
, output
, mc_mode
, line_offset
=1):
118 + ":[[@LINE-{}]]".format(line_offset
)
125 parser
= argparse
.ArgumentParser(description
=__doc__
)
129 help='The "mc" binary to use to generate the test case',
134 help="Treat the given tool name as an mc-like tool for which check lines should be generated",
139 help="Set a default -march for when neither triple nor arch are found in a RUN line",
145 help="remove duplicated test line if found",
151 help="sort testline in alphabetic order (keep run-lines on top), this option could be dangerous as it"
152 "could change the order of lines that are not expected",
154 parser
.add_argument("tests", nargs
="+")
155 initial_args
= common
.parse_commandline_args(parser
)
157 script_name
= os
.path
.basename(__file__
)
159 for ti
in common
.itertests(
160 initial_args
.tests
, parser
, script_name
="utils/" + script_name
162 if ti
.path
.endswith(".s"):
164 elif ti
.path
.endswith(".txt"):
168 print("sorting with dasm(.txt) file is not supported!")
172 common
.warn("Expected .s and .txt, Skipping file : ", ti
.path
)
176 for l
in ti
.input_lines
:
177 m
= common
.TRIPLE_IR_RE
.match(l
)
179 triple_in_ir
= m
.groups()[0]
183 for l
in ti
.run_lines
:
185 common
.warn("Skipping unparsable RUN line: " + l
)
188 commands
= [cmd
.strip() for cmd
in l
.split("|")]
189 assert len(commands
) >= 2
190 mc_cmd
= " | ".join(commands
[:-1])
191 filecheck_cmd
= commands
[-1]
193 # special handling for negating exit status
194 # if not is used in runline, disable rc check, since
195 # the command might or might not
196 # return non-zero code on a single line run
198 mc_cmd_args
= mc_cmd
.strip().split()
199 if mc_cmd_args
[0] == "not":
201 mc_tool
= mc_cmd_args
[1]
202 mc_cmd
= mc_cmd
[len(mc_cmd_args
[0]) :].strip()
204 mc_tool
= mc_cmd_args
[0]
207 m
= common
.TRIPLE_ARG_RE
.search(mc_cmd
)
209 triple_in_cmd
= m
.groups()[0]
211 march_in_cmd
= ti
.args
.default_march
212 m
= common
.MARCH_ARG_RE
.search(mc_cmd
)
214 march_in_cmd
= m
.groups()[0]
216 common
.verify_filecheck_prefixes(filecheck_cmd
)
218 mc_like_tools
= mc_LIKE_TOOLS
[:]
220 mc_like_tools
.append(ti
.args
.tool
)
221 if mc_tool
not in mc_like_tools
:
222 common
.warn("Skipping non-mc RUN line: " + l
)
225 if not filecheck_cmd
.startswith("FileCheck "):
226 common
.warn("Skipping non-FileChecked RUN line: " + l
)
229 mc_cmd_args
= mc_cmd
[len(mc_tool
) :].strip()
230 mc_cmd_args
= mc_cmd_args
.replace("< %s", "").replace("%s", "").strip()
231 check_prefixes
= common
.get_check_prefixes(filecheck_cmd
)
244 # find all test line from input
245 testlines
= [l
for l
in ti
.input_lines
if isTestLine(l
, mc_mode
)]
246 # remove duplicated lines to save running time
247 testlines
= list(dict.fromkeys(testlines
))
248 common
.debug("Valid test line found: ", len(testlines
))
250 run_list_size
= len(run_list
)
251 testnum
= len(testlines
)
263 common
.debug("Extracted mc cmd:", mc_tool
, mc_args
)
264 common
.debug("Extracted FileCheck prefixes:", str(prefixes
))
265 common
.debug("Extracted triple :", str(triple_in_cmd
))
266 common
.debug("Extracted march:", str(march_in_cmd
))
268 triple
= triple_in_cmd
or triple_in_ir
270 triple
= common
.get_triple_from_march(march_in_cmd
)
272 raw_output
.append([])
273 for line
in testlines
:
274 # get output for each testline
276 ti
.args
.llvm_mc_binary
or mc_tool
,
280 verbose
=ti
.args
.verbose
,
282 raw_output
[-1].append(out
)
284 common
.debug("Collect raw tool lines:", str(len(raw_output
[-1])))
286 raw_prefixes
.append(prefixes
)
289 generated_prefixes
= {}
290 used_prefixes
= set()
291 prefix_set
= set([prefix
for p
in run_list
for prefix
in p
[0]])
292 common
.debug("Rewriting FileCheck prefixes:", str(prefix_set
))
294 for test_id
in range(testnum
):
295 input_line
= testlines
[test_id
]
297 # a {prefix : output, [runid] } dict
298 # insert output to a prefix-key dict, and do a max sorting
299 # to select the most-used prefix which share the same output string
301 for run_id
in range(run_list_size
):
302 out
= raw_output
[run_id
][test_id
]
305 o
= getErrString(out
)
307 o
= getOutputString(out
)
309 prefixes
= raw_prefixes
[run_id
]
313 p_dict
[p
] = o
, [run_id
]
315 if p_dict
[p
] == (None, []):
318 prev_o
, run_ids
= p_dict
[p
]
320 run_ids
.append(run_id
)
321 p_dict
[p
] = o
, run_ids
326 p_dict_sorted
= dict(
327 sorted(p_dict
.items(), key
=lambda item
: -len(item
[1][1]))
330 # prefix is selected and generated with most shared output lines
331 # each run_id can only be used once
335 # line number diff between generated prefix and testline
337 for prefix
, tup
in p_dict_sorted
.items():
340 if len(run_ids
) == 0:
350 used_prefixes
.add(prefix
)
353 newline
= getErrCheckLine(prefix
, o
, mc_mode
, line_offset
)
355 newline
= getStdCheckLine(prefix
, o
, mc_mode
)
358 gen_prefix
+= newline
361 generated_prefixes
[input_line
] = gen_prefix
.rstrip("\n")
364 for input_info
in ti
.iterlines(output_lines
):
365 input_line
= input_info
.line
366 if input_line
in testlines
:
367 output_lines
.append(input_line
)
368 output_lines
.append(generated_prefixes
[input_line
])
370 elif should_add_line_to_output(input_line
, prefix_set
, mc_mode
):
371 output_lines
.append(input_line
)
373 if ti
.args
.unique
or ti
.args
.sort
:
374 # split with double newlines
375 test_units
= "\n".join(output_lines
).split("\n\n")
377 # select the key line for each test unit
379 for unit
in test_units
:
380 lines
= unit
.split("\n")
382 # if contains multiple lines, use
383 # the first testline or runline as key
384 if isTestLine(l
, mc_mode
):
394 written_lines
= set()
395 for unit
in test_units
:
396 # if not testline/runline, we just add it
397 if unit
not in test_dic
:
398 new_test_units
.append(unit
)
400 if test_dic
[unit
] in written_lines
:
401 common
.debug("Duplicated test skipped: ", unit
)
404 written_lines
.add(test_dic
[unit
])
405 new_test_units
.append(unit
)
406 test_units
= new_test_units
412 # find key of test unit, otherwise use first line
416 line
= l
.split("\n")[0]
418 # runline placed on the top
419 return (not isRunLine(line
), line
)
421 test_units
= sorted(test_units
, key
=getkey
)
423 # join back to be output string
424 output_lines
= "\n\n".join(test_units
).split("\n")
427 if ti
.args
.gen_unused_prefix_body
:
429 ti
.get_checks_for_unused_prefixes(run_list
, used_prefixes
)
432 common
.debug("Writing %d lines to %s..." % (len(output_lines
), ti
.path
))
433 with
open(ti
.path
, "wb") as f
:
434 f
.writelines(["{}\n".format(l
).encode("utf-8") for l
in output_lines
])
437 if __name__
== "__main__":