[llvm-readobj] - Implement LLVM-style dumping for .stack_sizes sections.
[llvm-complete.git] / utils / benchmark / tools / gbench / report.py
blob0c090981a833a626711e5e8391f61c0f1d9debbf
1 """report.py - Utilities for reporting statistics about benchmark results
2 """
3 import os
4 import re
5 import copy
7 class BenchmarkColor(object):
8 def __init__(self, name, code):
9 self.name = name
10 self.code = code
12 def __repr__(self):
13 return '%s%r' % (self.__class__.__name__,
14 (self.name, self.code))
16 def __format__(self, format):
17 return self.code
19 # Benchmark Colors Enumeration
20 BC_NONE = BenchmarkColor('NONE', '')
21 BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
22 BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
23 BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
24 BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
25 BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
26 BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
27 BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
28 BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
29 BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
30 BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
32 def color_format(use_color, fmt_str, *args, **kwargs):
33 """
34 Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
35 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
36 is False then all color codes in 'args' and 'kwargs' are replaced with
37 the empty string.
38 """
39 assert use_color is True or use_color is False
40 if not use_color:
41 args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
42 for arg in args]
43 kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
44 for key, arg in kwargs.items()}
45 return fmt_str.format(*args, **kwargs)
48 def find_longest_name(benchmark_list):
49 """
50 Return the length of the longest benchmark name in a given list of
51 benchmark JSON objects
52 """
53 longest_name = 1
54 for bc in benchmark_list:
55 if len(bc['name']) > longest_name:
56 longest_name = len(bc['name'])
57 return longest_name
60 def calculate_change(old_val, new_val):
61 """
62 Return a float representing the decimal change between old_val and new_val.
63 """
64 if old_val == 0 and new_val == 0:
65 return 0.0
66 if old_val == 0:
67 return float(new_val - old_val) / (float(old_val + new_val) / 2)
68 return float(new_val - old_val) / abs(old_val)
71 def filter_benchmark(json_orig, family, replacement=""):
72 """
73 Apply a filter to the json, and only leave the 'family' of benchmarks.
74 """
75 regex = re.compile(family)
76 filtered = {}
77 filtered['benchmarks'] = []
78 for be in json_orig['benchmarks']:
79 if not regex.search(be['name']):
80 continue
81 filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
82 filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
83 filtered['benchmarks'].append(filteredbench)
84 return filtered
87 def generate_difference_report(json1, json2, use_color=True):
88 """
89 Calculate and report the difference between each test of two benchmarks
90 runs specified as 'json1' and 'json2'.
91 """
92 first_col_width = find_longest_name(json1['benchmarks'])
93 def find_test(name):
94 for b in json2['benchmarks']:
95 if b['name'] == name:
96 return b
97 return None
98 first_col_width = max(first_col_width, len('Benchmark'))
99 first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
100 'Benchmark', 12 + first_col_width)
101 output_strs = [first_line, '-' * len(first_line)]
103 gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
104 for bn in gen:
105 other_bench = find_test(bn['name'])
106 if not other_bench:
107 continue
109 if bn['time_unit'] != other_bench['time_unit']:
110 continue
112 def get_color(res):
113 if res > 0.05:
114 return BC_FAIL
115 elif res > -0.07:
116 return BC_WHITE
117 else:
118 return BC_CYAN
119 fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
120 tres = calculate_change(bn['real_time'], other_bench['real_time'])
121 cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
122 output_strs += [color_format(use_color, fmt_str,
123 BC_HEADER, bn['name'], first_col_width,
124 get_color(tres), tres, get_color(cpures), cpures,
125 bn['real_time'], other_bench['real_time'],
126 bn['cpu_time'], other_bench['cpu_time'],
127 endc=BC_ENDC)]
128 return output_strs
130 ###############################################################################
131 # Unit tests
133 import unittest
135 class TestReportDifference(unittest.TestCase):
136 def load_results(self):
137 import json
138 testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
139 testOutput1 = os.path.join(testInputs, 'test1_run1.json')
140 testOutput2 = os.path.join(testInputs, 'test1_run2.json')
141 with open(testOutput1, 'r') as f:
142 json1 = json.load(f)
143 with open(testOutput2, 'r') as f:
144 json2 = json.load(f)
145 return json1, json2
147 def test_basic(self):
148 expect_lines = [
149 ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
150 ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
151 ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
152 ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
153 ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
154 ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
155 ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
156 ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
157 ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
158 ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
159 ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
160 ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
162 json1, json2 = self.load_results()
163 output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
164 output_lines = output_lines_with_header[2:]
165 print("\n".join(output_lines_with_header))
166 self.assertEqual(len(output_lines), len(expect_lines))
167 for i in range(0, len(output_lines)):
168 parts = [x for x in output_lines[i].split(' ') if x]
169 self.assertEqual(len(parts), 7)
170 self.assertEqual(parts, expect_lines[i])
173 class TestReportDifferenceBetweenFamilies(unittest.TestCase):
174 def load_result(self):
175 import json
176 testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
177 testOutput = os.path.join(testInputs, 'test2_run.json')
178 with open(testOutput, 'r') as f:
179 json = json.load(f)
180 return json
182 def test_basic(self):
183 expect_lines = [
184 ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
185 ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
186 ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
187 ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
189 json = self.load_result()
190 json1 = filter_benchmark(json, "BM_Z.ro", ".")
191 json2 = filter_benchmark(json, "BM_O.e", ".")
192 output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
193 output_lines = output_lines_with_header[2:]
194 print("\n")
195 print("\n".join(output_lines_with_header))
196 self.assertEqual(len(output_lines), len(expect_lines))
197 for i in range(0, len(output_lines)):
198 parts = [x for x in output_lines[i].split(' ') if x]
199 self.assertEqual(len(parts), 7)
200 self.assertEqual(parts, expect_lines[i])
203 if __name__ == '__main__':
204 unittest.main()
206 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
207 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
208 # kate: indent-mode python; remove-trailing-spaces modified;