3 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 # See https://llvm.org/LICENSE.txt for license information.
5 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 # ==------------------------------------------------------------------------==#
17 from urllib
.parse
import urlencode
18 from urllib
.request
import urlopen
, Request
20 from urllib
import urlencode
21 from urllib2
import urlopen
, Request
24 parser
= argparse
.ArgumentParser()
25 parser
.add_argument('benchmark_directory')
26 parser
.add_argument('--runs', type=int, default
=10)
27 parser
.add_argument('--wrapper', default
='')
28 parser
.add_argument('--machine', required
=True)
29 parser
.add_argument('--revision', required
=True)
30 parser
.add_argument('--threads', action
='store_true')
31 parser
.add_argument('--url', help='The lnt server url to send the results to',
32 default
='http://localhost:8000/db_default/v4/link/submitRun')
33 args
= parser
.parse_args()
36 def __init__(self
, directory
, variant
):
37 self
.directory
= directory
38 self
.variant
= variant
42 return '%s-%s' % (self
.directory
, self
.variant
)
46 for i
in glob
.glob('*/response*.txt'):
47 m
= re
.match('response-(.*)\.txt', os
.path
.basename(i
))
48 variant
= m
.groups()[0] if m
else None
49 ret
.append(Bench(os
.path
.dirname(i
), variant
))
52 def parsePerfNum(num
):
53 num
= num
.replace(b
',',b
'')
59 def parsePerfLine(line
):
61 line
= line
.split(b
'#')[0].strip()
64 ret
[p
[1].strip().decode('ascii')] = parsePerfNum(p
[0])
67 def parsePerf(output
):
69 lines
= [x
.strip() for x
in output
.split(b
'\n')]
71 seconds
= [x
for x
in lines
if b
'seconds time elapsed' in x
][0]
72 seconds
= seconds
.strip().split()[0].strip()
73 ret
['seconds-elapsed'] = parsePerfNum(seconds
)
75 measurement_lines
= [x
for x
in lines
if b
'#' in x
]
76 for l
in measurement_lines
:
77 ret
.update(parsePerfLine(l
))
82 return subprocess
.check_output(cmd
, stderr
=subprocess
.STDOUT
)
83 except subprocess
.CalledProcessError
as e
:
87 def combinePerfRun(acc
, d
):
94 # Discard the first run to warm up any system cache.
98 wrapper_args
= [x
for x
in args
.wrapper
.split(',') if x
]
99 for i
in range(args
.runs
):
101 out
= run(wrapper_args
+ ['perf', 'stat'] + cmd
)
103 combinePerfRun(ret
, r
)
108 thread_arg
= [] if args
.threads
else ['--no-threads']
109 os
.chdir(bench
.directory
)
110 suffix
= '-%s' % bench
.variant
if bench
.variant
else ''
111 response
= 'response' + suffix
+ '.txt'
112 ret
= perf(['../ld.lld', '@' + response
, '-o', 't'] + thread_arg
)
113 ret
['name'] = str(bench
)
117 def buildLntJson(benchmarks
):
118 start
= datetime
.datetime
.utcnow().isoformat()
119 tests
= [runBench(b
) for b
in benchmarks
]
120 end
= datetime
.datetime
.utcnow().isoformat()
122 'format_version' : 2,
123 'machine' : { 'name' : args
.machine
},
127 'llvm_project_revision': args
.revision
131 return json
.dumps(ret
, sort_keys
=True, indent
=4)
133 def submitToServer(data
):
134 data2
= urlencode({ 'input_data' : data
}).encode('ascii')
135 urlopen(Request(args
.url
, data2
))
137 os
.chdir(args
.benchmark_directory
)
138 data
= buildLntJson(getBenchmarks())