[Infra] Fix version-check workflow (#100090)
[llvm-project.git] / mlir / utils / mbr / mlir-mbr.in
blob858c8ca718b9669d11541cb9f1ba5dbd803b3281
1 #!@Python3_EXECUTABLE@
2 # -*- coding: utf-8 -*-
4 import argparse
5 import datetime
6 import json
7 import os
8 import sys
10 from urllib import error as urlerror
11 from urllib import parse as urlparse
12 from urllib import request
15 mlir_source_root = "@MLIR_SOURCE_DIR@"
16 sys.path.insert(0, os.path.join(mlir_source_root, "utils", "mbr", "mbr"))
18 from main import main
21 if __name__ == "__main__":
22     parser = argparse.ArgumentParser()
23     parser.add_argument(
24         "--machine",
25         required=True,
26         help="A platform identifier on which the "
27              "benchmarks are run. For example"
28              " <hardware>-<arch>-<optimization level>-<branch-name>"
29     )
30     parser.add_argument(
31         "--revision",
32         required=True,
33         help="The key used to identify different runs. "
34              "Could be anything as long as it"
35              " can be sorted by python's sort function"
36     )
37     parser.add_argument(
38         "--url",
39         help="The lnt server url to send the results to",
40         default="http://localhost:8000/db_default/v4/nts/submitRun"
41     )
42     parser.add_argument(
43         "--result-stdout",
44         help="Print benchmarking results to stdout instead"
45              " of sending it to lnt",
46         default=False,
47         action=argparse.BooleanOptionalAction
48     )
49     parser.add_argument(
50         "top_level_path",
51         help="The top level path from which to search for benchmarks",
52         default=os.getcwd(),
53     )
54     parser.add_argument(
55         "--stop_on_error",
56         help="Should we stop the benchmark run on errors? Defaults to false",
57         default=False,
58     )
59     args = parser.parse_args()
61     complete_benchmark_start_time = datetime.datetime.utcnow().isoformat()
62     benchmark_function_dicts = main(args.top_level_path, args.stop_on_error)
63     complete_benchmark_end_time = datetime.datetime.utcnow().isoformat()
64     lnt_dict = {
65         "format_version": "2",
66         "machine": {"name": args.machine},
67         "run": {
68             "end_time": complete_benchmark_start_time,
69             "start_time": complete_benchmark_end_time,
70             "llvm_project_revision": args.revision
71         },
72         "tests": benchmark_function_dicts,
73         "name": "MLIR benchmark suite"
74     }
75     lnt_json = json.dumps(lnt_dict, indent=4)
76     if args.result_stdout is True:
77         print(lnt_json)
78     else:
79         request_data = urlparse.urlencode(
80             {"input_data": lnt_json}
81         ).encode("ascii")
82         req = request.Request(args.url, request_data)
83         try:
84             resp = request.urlopen(req)
85         except urlerror.HTTPError as e:
86             print(e)