1 """util.py - General utilities for running, loading, and processing benchmarks
10 # Input file type enumeration
15 _num_magic_bytes
= 2 if sys
.platform
.startswith("win") else 4
18 def is_executable_file(filename
):
20 Return 'True' if 'filename' names a valid file which is likely
21 an executable. A file is considered an executable if it starts with the
22 magic bytes for a EXE, Mach O, or ELF file.
24 if not os
.path
.isfile(filename
):
26 with
open(filename
, mode
="rb") as f
:
27 magic_bytes
= f
.read(_num_magic_bytes
)
28 if sys
.platform
== "darwin":
29 return magic_bytes
in [
30 b
"\xfe\xed\xfa\xce", # MH_MAGIC
31 b
"\xce\xfa\xed\xfe", # MH_CIGAM
32 b
"\xfe\xed\xfa\xcf", # MH_MAGIC_64
33 b
"\xcf\xfa\xed\xfe", # MH_CIGAM_64
34 b
"\xca\xfe\xba\xbe", # FAT_MAGIC
35 b
"\xbe\xba\xfe\xca", # FAT_CIGAM
37 elif sys
.platform
.startswith("win"):
38 return magic_bytes
== b
"MZ"
40 return magic_bytes
== b
"\x7FELF"
43 def is_json_file(filename
):
45 Returns 'True' if 'filename' names a valid JSON output file.
49 with
open(filename
, "r") as f
:
57 def classify_input_file(filename
):
59 Return a tuple (type, msg) where 'type' specifies the classified type
60 of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
61 string representing the error.
65 if not os
.path
.exists(filename
):
66 err_msg
= "'%s' does not exist" % filename
67 elif not os
.path
.isfile(filename
):
68 err_msg
= "'%s' does not name a file" % filename
69 elif is_executable_file(filename
):
71 elif is_json_file(filename
):
75 "'%s' does not name a valid benchmark executable or JSON file"
81 def check_input_file(filename
):
83 Classify the file named by 'filename' and return the classification.
84 If the file is classified as 'IT_Invalid' print an error message and exit
87 ftype
, msg
= classify_input_file(filename
)
88 if ftype
== IT_Invalid
:
89 print("Invalid input file: %s" % msg
)
94 def find_benchmark_flag(prefix
, benchmark_flags
):
96 Search the specified list of flags for a flag matching `<prefix><arg>` and
97 if it is found return the arg it specifies. If specified more than once the
98 last value is returned. If the flag is not found None is returned.
100 assert prefix
.startswith("--") and prefix
.endswith("=")
102 for f
in benchmark_flags
:
103 if f
.startswith(prefix
):
104 result
= f
[len(prefix
) :]
108 def remove_benchmark_flags(prefix
, benchmark_flags
):
110 Return a new list containing the specified benchmark_flags except those
111 with the specified prefix.
113 assert prefix
.startswith("--") and prefix
.endswith("=")
114 return [f
for f
in benchmark_flags
if not f
.startswith(prefix
)]
117 def load_benchmark_results(fname
, benchmark_filter
):
119 Read benchmark output from a file and return the JSON object.
121 Apply benchmark_filter, a regular expression, with nearly the same
122 semantics of the --benchmark_filter argument. May be None.
123 Note: the Python regular expression engine is used instead of the
124 one used by the C++ code, which may produce different results
127 REQUIRES: 'fname' names a file containing JSON benchmark output.
130 def benchmark_wanted(benchmark
):
131 if benchmark_filter
is None:
133 name
= benchmark
.get("run_name", None) or benchmark
["name"]
134 return re
.search(benchmark_filter
, name
) is not None
136 with
open(fname
, "r") as f
:
137 results
= json
.load(f
)
138 if "context" in results
:
139 if "json_schema_version" in results
["context"]:
140 json_schema_version
= results
["context"]["json_schema_version"]
141 if json_schema_version
!= 1:
143 "In %s, got unnsupported JSON schema version: %i, expected 1"
144 % (fname
, json_schema_version
)
147 if "benchmarks" in results
:
148 results
["benchmarks"] = list(
149 filter(benchmark_wanted
, results
["benchmarks"])
154 def sort_benchmark_results(result
):
155 benchmarks
= result
["benchmarks"]
157 # From inner key to the outer key!
160 key
=lambda benchmark
: benchmark
["repetition_index"]
161 if "repetition_index" in benchmark
166 key
=lambda benchmark
: 1
167 if "run_type" in benchmark
and benchmark
["run_type"] == "aggregate"
172 key
=lambda benchmark
: benchmark
["per_family_instance_index"]
173 if "per_family_instance_index" in benchmark
178 key
=lambda benchmark
: benchmark
["family_index"]
179 if "family_index" in benchmark
183 result
["benchmarks"] = benchmarks
187 def run_benchmark(exe_name
, benchmark_flags
):
189 Run a benchmark specified by 'exe_name' with the specified
190 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
191 real time console output.
192 RETURNS: A JSON object representing the benchmark output
194 output_name
= find_benchmark_flag("--benchmark_out=", benchmark_flags
)
195 is_temp_output
= False
196 if output_name
is None:
197 is_temp_output
= True
198 thandle
, output_name
= tempfile
.mkstemp()
200 benchmark_flags
= list(benchmark_flags
) + [
201 "--benchmark_out=%s" % output_name
204 cmd
= [exe_name
] + benchmark_flags
205 print("RUNNING: %s" % " ".join(cmd
))
206 exitCode
= subprocess
.call(cmd
)
208 print("TEST FAILED...")
210 json_res
= load_benchmark_results(output_name
, None)
212 os
.unlink(output_name
)
216 def run_or_load_benchmark(filename
, benchmark_flags
):
218 Get the results for a specified benchmark. If 'filename' specifies
219 an executable benchmark then the results are generated by running the
220 benchmark. Otherwise 'filename' must name a valid JSON output file,
221 which is loaded and the result returned.
223 ftype
= check_input_file(filename
)
225 benchmark_filter
= find_benchmark_flag(
226 "--benchmark_filter=", benchmark_flags
228 return load_benchmark_results(filename
, benchmark_filter
)
229 if ftype
== IT_Executable
:
230 return run_benchmark(filename
, benchmark_flags
)
231 raise ValueError("Unknown file type %s" % ftype
)