2 # -*- coding: utf-8 -*-
10 from urllib import error as urlerror
11 from urllib import parse as urlparse
12 from urllib import request
15 mlir_source_root = "@MLIR_SOURCE_DIR@"
16 sys.path.insert(0, os.path.join(mlir_source_root, "utils", "mbr", "mbr"))
21 if __name__ == "__main__":
22 parser = argparse.ArgumentParser()
26 help="A platform identifier on which the "
27 "benchmarks are run. For example"
28 " <hardware>-<arch>-<optimization level>-<branch-name>"
33 help="The key used to identify different runs. "
34 "Could be anything as long as it"
35 " can be sorted by python's sort function"
39 help="The lnt server url to send the results to",
40 default="http://localhost:8000/db_default/v4/nts/submitRun"
44 help="Print benchmarking results to stdout instead"
45 " of sending it to lnt",
47 action=argparse.BooleanOptionalAction
51 help="The top level path from which to search for benchmarks",
56 help="Should we stop the benchmark run on errors? Defaults to false",
59 args = parser.parse_args()
61 complete_benchmark_start_time = datetime.datetime.utcnow().isoformat()
62 benchmark_function_dicts = main(args.top_level_path, args.stop_on_error)
63 complete_benchmark_end_time = datetime.datetime.utcnow().isoformat()
65 "format_version": "2",
66 "machine": {"name": args.machine},
68 "end_time": complete_benchmark_start_time,
69 "start_time": complete_benchmark_end_time,
70 "llvm_project_revision": args.revision
72 "tests": benchmark_function_dicts,
73 "name": "MLIR benchmark suite"
75 lnt_json = json.dumps(lnt_dict, indent=4)
76 if args.result_stdout is True:
79 request_data = urlparse.urlencode(
80 {"input_data": lnt_json}
82 req = request.Request(args.url, request_data)
84 resp = request.urlopen(req)
85 except urlerror.HTTPError as e: