3 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 # See https://llvm.org/LICENSE.txt for license information.
5 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 # ==------------------------------------------------------------------------==#
18 from urllib
.parse
import urlencode
19 from urllib
.request
import urlopen
, Request
21 from urllib
import urlencode
22 from urllib2
import urlopen
, Request
25 parser
= argparse
.ArgumentParser()
26 parser
.add_argument("benchmark_directory")
27 parser
.add_argument("--runs", type=int, default
=10)
28 parser
.add_argument("--wrapper", default
="")
29 parser
.add_argument("--machine", required
=True)
30 parser
.add_argument("--revision", required
=True)
31 parser
.add_argument("--threads", action
="store_true")
34 help="The lnt server url to send the results to",
35 default
="http://localhost:8000/db_default/v4/link/submitRun",
37 args
= parser
.parse_args()
41 def __init__(self
, directory
, variant
):
42 self
.directory
= directory
43 self
.variant
= variant
48 return "%s-%s" % (self
.directory
, self
.variant
)
53 for i
in glob
.glob("*/response*.txt"):
54 m
= re
.match("response-(.*)\.txt", os
.path
.basename(i
))
55 variant
= m
.groups()[0] if m
else None
56 ret
.append(Bench(os
.path
.dirname(i
), variant
))
60 def parsePerfNum(num
):
61 num
= num
.replace(b
",", b
"")
68 def parsePerfLine(line
):
70 line
= line
.split(b
"#")[0].strip()
73 ret
[p
[1].strip().decode("ascii")] = parsePerfNum(p
[0])
77 def parsePerf(output
):
79 lines
= [x
.strip() for x
in output
.split(b
"\n")]
81 seconds
= [x
for x
in lines
if b
"seconds time elapsed" in x
][0]
82 seconds
= seconds
.strip().split()[0].strip()
83 ret
["seconds-elapsed"] = parsePerfNum(seconds
)
85 measurement_lines
= [x
for x
in lines
if b
"#" in x
]
86 for l
in measurement_lines
:
87 ret
.update(parsePerfLine(l
))
93 return subprocess
.check_output(cmd
, stderr
=subprocess
.STDOUT
)
94 except subprocess
.CalledProcessError
as e
:
99 def combinePerfRun(acc
, d
):
100 for k
, v
in d
.items():
107 # Discard the first run to warm up any system cache.
111 wrapper_args
= [x
for x
in args
.wrapper
.split(",") if x
]
112 for i
in range(args
.runs
):
114 out
= run(wrapper_args
+ ["perf", "stat"] + cmd
)
116 combinePerfRun(ret
, r
)
122 thread_arg
= [] if args
.threads
else ["--no-threads"]
123 os
.chdir(bench
.directory
)
124 suffix
= "-%s" % bench
.variant
if bench
.variant
else ""
125 response
= "response" + suffix
+ ".txt"
126 ret
= perf(["../ld.lld", "@" + response
, "-o", "t"] + thread_arg
)
127 ret
["name"] = str(bench
)
132 def buildLntJson(benchmarks
):
133 start
= datetime
.datetime
.utcnow().isoformat()
134 tests
= [runBench(b
) for b
in benchmarks
]
135 end
= datetime
.datetime
.utcnow().isoformat()
138 "machine": {"name": args
.machine
},
142 "llvm_project_revision": args
.revision
,
146 return json
.dumps(ret
, sort_keys
=True, indent
=4)
149 def submitToServer(data
):
150 data2
= urlencode({"input_data": data
}).encode("ascii")
151 urlopen(Request(args
.url
, data2
))
154 os
.chdir(args
.benchmark_directory
)
155 data
= buildLntJson(getBenchmarks())