4 # Copyright The SCons Foundation
6 # Permission is hereby granted, free of charge, to any person obtaining
7 # a copy of this software and associated documentation files (the
8 # "Software"), to deal in the Software without restriction, including
9 # without limitation the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the Software, and to
11 # permit persons to whom the Software is furnished to do so, subject to
12 # the following conditions:
14 # The above copyright notice and this permission notice shall be included
15 # in all copies or substantial portions of the Software.
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
18 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
19 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 A script for timing snippets of Python code.
28 By default, this script will execute a single Python file specified on
29 the command line and time any functions in a list named "FunctionList"
30 set by the Python file under test, or (by default) time any functions
31 in the file whose names begin with "Func".
33 All functions are assumed to get passed the same arguments, and the
34 inputs are specified in a list named "Data," each element of which
35 is a list consisting of a tag name, a list of positional arguments,
36 and a dictionary of keyword arguments.
38 Each function is expected to test a single, comparable snippet of
39 of Python code. IMPORTANT: We want to test the timing of the code
40 itself, not Python function call overhead, so every function should
41 put its code under test within the following block:
43 for i in IterationList:
45 This will allow (as much as possible) us to time just the code itself,
46 not Python function call overhead.
48 from __future__
import division
, print_function
55 from pprint
import pprint
59 Usage: bench.py OPTIONS file.py
60 --clock Use the time.clock function
61 --func PREFIX Test functions whose names begin with PREFIX
62 -h, --help Display this help and exit
63 -i ITER, --iterations ITER Run each code snippet ITER times
64 --time Use the time.time function
65 -r RUNS, --runs RUNS Average times for RUNS invocations of
68 # How many times each snippet of code will be (or should be) run by the
69 # functions under test to gather the time (the "inner loop").
73 # How many times we'll run each function to collect its aggregate time
74 # and try to average out timing differences induced by system performance
79 # The prefix of the functions under test. This will be used if
80 # there's no explicit list defined in FunctionList.
82 FunctionPrefix
= 'Func'
84 # Comment from Python2 timeit module:
85 # The difference in default timer function is because on Windows,
86 # clock() has microsecond granularity but time()'s granularity is 1/60th
87 # of a second; on Unix, clock() has 1/100th of a second granularity and
88 # time() is much more precise. On either platform, the default timer
89 # functions measure wall clock time, not the CPU time. This means that
90 # other processes running on the same computer may interfere with the
91 # timing. The best thing to do when accurate timing is necessary is to
92 # repeat the timing a few times and use the best time. The -i option is
94 # On Python3, a new time.perf_counter function picks the best available
95 # timer, so we use that if we can, else fall back as per above.
98 Now
= time
.perf_counter
99 except AttributeError:
100 if sys
.platform
== 'win32':
105 opts
, args
= getopt
.getopt(sys
.argv
[1:], 'hi:r:',
106 ['clock', 'func=', 'help',
107 'iterations=', 'time', 'runs='])
113 except AttributeError:
114 sys
.stderr
.write("time.clock unavailable on this Python\n")
116 elif o
in ['--func']:
118 elif o
in ['-h', '--help']:
119 sys
.stdout
.write(Usage
)
121 elif o
in ['-i', '--iterations']:
123 elif o
in ['--time']:
125 elif o
in ['-r', '--runs']:
129 sys
.stderr
.write("bench.py: only one file argument must be specified\n")
130 sys
.stderr
.write(Usage
)
134 (bench_name
,_
) = os
.path
.splitext(args
[0])
135 print(f
"Bench: {bench_name} : {args[0]}")
136 results_filename
= f
"{bench_name}_results.csv"
138 with
open(args
[0], 'r') as f
:
144 function_names
= sorted([x
for x
in list(locals().keys()) if x
[:4] == FunctionPrefix
])
146 l
= [lvars
[f
] for f
in function_names
]
147 FunctionList
= [f
for f
in l
if isinstance(f
, types
.FunctionType
)]
149 IterationList
= [None] * Iterations
152 def timer(func
, *args
, **kw
):
154 for i
in range(Runs
):
158 results
.append((finish
- start
) / Iterations
)
165 def display(func
, result_label
, results
):
170 test_result
= (total
* 1e6
) / len(results
)
171 print(" %8.3f" % (test_result
), ':', result_label
)
172 if results_dict
.get(func
.__doc
__, False):
173 results_dict
[func
.__doc
__][result_label
] = test_result
175 results_dict
[func
.__doc
__] = { result_label
: test_result
}
178 for func
in FunctionList
:
180 d
= ' (' + func
.__doc
__ + ')'
183 print(func
.__name
__ + d
+ ':')
185 for label
, args
, kw
in Data
:
186 r
= timer(func
, *args
, **kw
)
187 # print(f"{label} {r}")
188 display(func
, label
, r
)
190 py_ver_string
= "%d.%d"%(sys
.version_info
.major
, sys
.version_info
.minor
)
193 # pprint(results_dict)
195 tests
= [label
for label
, args
, kw
in Data
]
196 columns
= ['Python Version', 'Implementation'] + tests
197 with
open(results_filename
, 'a') as r
:
198 print(",".join(columns
), file=r
)
200 for implementation
in results_dict
:
201 print(f
'{py_ver_string},"{implementation}"', file=r
, end
='')
203 print(',%8.3f' % results_dict
[implementation
][test
], file=r
, end
='')
208 # indent-tabs-mode:nil
210 # vim: set expandtab tabstop=4 shiftwidth=4: