Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / third-party / benchmark / tools / gbench / util.py
bloba46903a6d248d20d16d5b0ba3a26cff7de428ea0
1 """util.py - General utilities for running, loading, and processing benchmarks
2 """
3 import json
4 import os
5 import tempfile
6 import subprocess
7 import sys
8 import functools
10 # Input file type enumeration
11 IT_Invalid = 0
12 IT_JSON = 1
13 IT_Executable = 2
15 _num_magic_bytes = 2 if sys.platform.startswith("win") else 4
18 def is_executable_file(filename):
19 """
20 Return 'True' if 'filename' names a valid file which is likely
21 an executable. A file is considered an executable if it starts with the
22 magic bytes for a EXE, Mach O, or ELF file.
23 """
24 if not os.path.isfile(filename):
25 return False
26 with open(filename, mode="rb") as f:
27 magic_bytes = f.read(_num_magic_bytes)
28 if sys.platform == "darwin":
29 return magic_bytes in [
30 b"\xfe\xed\xfa\xce", # MH_MAGIC
31 b"\xce\xfa\xed\xfe", # MH_CIGAM
32 b"\xfe\xed\xfa\xcf", # MH_MAGIC_64
33 b"\xcf\xfa\xed\xfe", # MH_CIGAM_64
34 b"\xca\xfe\xba\xbe", # FAT_MAGIC
35 b"\xbe\xba\xfe\xca", # FAT_CIGAM
37 elif sys.platform.startswith("win"):
38 return magic_bytes == b"MZ"
39 else:
40 return magic_bytes == b"\x7FELF"
43 def is_json_file(filename):
44 """
45 Returns 'True' if 'filename' names a valid JSON output file.
46 'False' otherwise.
47 """
48 try:
49 with open(filename, "r") as f:
50 json.load(f)
51 return True
52 except BaseException:
53 pass
54 return False
57 def classify_input_file(filename):
58 """
59 Return a tuple (type, msg) where 'type' specifies the classified type
60 of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
61 string represeting the error.
62 """
63 ftype = IT_Invalid
64 err_msg = None
65 if not os.path.exists(filename):
66 err_msg = "'%s' does not exist" % filename
67 elif not os.path.isfile(filename):
68 err_msg = "'%s' does not name a file" % filename
69 elif is_executable_file(filename):
70 ftype = IT_Executable
71 elif is_json_file(filename):
72 ftype = IT_JSON
73 else:
74 err_msg = (
75 "'%s' does not name a valid benchmark executable or JSON file" % filename
77 return ftype, err_msg
80 def check_input_file(filename):
81 """
82 Classify the file named by 'filename' and return the classification.
83 If the file is classified as 'IT_Invalid' print an error message and exit
84 the program.
85 """
86 ftype, msg = classify_input_file(filename)
87 if ftype == IT_Invalid:
88 print("Invalid input file: %s" % msg)
89 sys.exit(1)
90 return ftype
93 def find_benchmark_flag(prefix, benchmark_flags):
94 """
95 Search the specified list of flags for a flag matching `<prefix><arg>` and
96 if it is found return the arg it specifies. If specified more than once the
97 last value is returned. If the flag is not found None is returned.
98 """
99 assert prefix.startswith("--") and prefix.endswith("=")
100 result = None
101 for f in benchmark_flags:
102 if f.startswith(prefix):
103 result = f[len(prefix) :]
104 return result
107 def remove_benchmark_flags(prefix, benchmark_flags):
109 Return a new list containing the specified benchmark_flags except those
110 with the specified prefix.
112 assert prefix.startswith("--") and prefix.endswith("=")
113 return [f for f in benchmark_flags if not f.startswith(prefix)]
116 def load_benchmark_results(fname):
118 Read benchmark output from a file and return the JSON object.
119 REQUIRES: 'fname' names a file containing JSON benchmark output.
121 with open(fname, "r") as f:
122 return json.load(f)
125 def sort_benchmark_results(result):
126 benchmarks = result["benchmarks"]
128 # From inner key to the outer key!
129 benchmarks = sorted(
130 benchmarks,
131 key=lambda benchmark: benchmark["repetition_index"]
132 if "repetition_index" in benchmark
133 else -1,
135 benchmarks = sorted(
136 benchmarks,
137 key=lambda benchmark: 1
138 if "run_type" in benchmark and benchmark["run_type"] == "aggregate"
139 else 0,
141 benchmarks = sorted(
142 benchmarks,
143 key=lambda benchmark: benchmark["per_family_instance_index"]
144 if "per_family_instance_index" in benchmark
145 else -1,
147 benchmarks = sorted(
148 benchmarks,
149 key=lambda benchmark: benchmark["family_index"]
150 if "family_index" in benchmark
151 else -1,
154 result["benchmarks"] = benchmarks
155 return result
158 def run_benchmark(exe_name, benchmark_flags):
160 Run a benchmark specified by 'exe_name' with the specified
161 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
162 real time console output.
163 RETURNS: A JSON object representing the benchmark output
165 output_name = find_benchmark_flag("--benchmark_out=", benchmark_flags)
166 is_temp_output = False
167 if output_name is None:
168 is_temp_output = True
169 thandle, output_name = tempfile.mkstemp()
170 os.close(thandle)
171 benchmark_flags = list(benchmark_flags) + ["--benchmark_out=%s" % output_name]
173 cmd = [exe_name] + benchmark_flags
174 print("RUNNING: %s" % " ".join(cmd))
175 exitCode = subprocess.call(cmd)
176 if exitCode != 0:
177 print("TEST FAILED...")
178 sys.exit(exitCode)
179 json_res = load_benchmark_results(output_name)
180 if is_temp_output:
181 os.unlink(output_name)
182 return json_res
185 def run_or_load_benchmark(filename, benchmark_flags):
187 Get the results for a specified benchmark. If 'filename' specifies
188 an executable benchmark then the results are generated by running the
189 benchmark. Otherwise 'filename' must name a valid JSON output file,
190 which is loaded and the result returned.
192 ftype = check_input_file(filename)
193 if ftype == IT_JSON:
194 return load_benchmark_results(filename)
195 if ftype == IT_Executable:
196 return run_benchmark(filename, benchmark_flags)
197 raise ValueError("Unknown file type %s" % ftype)