[NFC][Py Reformat] Reformat python files in llvm
[llvm-project.git] / llvm / lib / Analysis / models / interactive_host.py
blob759c791614a1da046cefe6ab66306bb10f536c07
1 """Utility for testing InteractiveModelRunner.
3 Use it from pass-specific tests by providing a main .py which calls this library's
4 `run_interactive` with an appropriate callback to provide advice.
6 From .ll tests, just call the above-mentioned main as a prefix to the opt/llc
7 invocation (with the appropriate flags enabling the interactive mode)
9 Examples:
10 test/Transforms/Inline/ML/interactive-mode.ll
11 test/CodeGen/MLRegalloc/interactive-mode.ll
12 """
14 import ctypes
15 import log_reader
16 import io
17 import math
18 import os
19 import subprocess
20 from typing import Callable, List, Union
23 def send(f: io.BufferedWriter, value: Union[int, float], spec: log_reader.TensorSpec):
24 """Send the `value` - currently just a scalar - formatted as per `spec`."""
26 # just int64 for now
27 assert spec.element_type == ctypes.c_int64
28 to_send = ctypes.c_int64(int(value))
29 assert f.write(bytes(to_send)) == ctypes.sizeof(spec.element_type) * math.prod(
30 spec.shape
32 f.flush()
35 def run_interactive(
36 temp_rootname: str,
37 make_response: Callable[[List[log_reader.TensorValue]], Union[int, float]],
38 process_and_args: List[str],
40 """Host the compiler.
41 Args:
42 temp_rootname: the base file name from which to construct the 2 pipes for
43 communicating with the compiler.
44 make_response: a function that, given the current tensor values, provides a
45 response.
46 process_and_args: the full commandline for the compiler. It it assumed it
47 contains a flag poiting to `temp_rootname` so that the InteractiveModeRunner
48 would attempt communication on the same pair as this function opens.
50 This function sets up the communication with the compiler - via 2 files named
51 `temp_rootname`.in and `temp_rootname`.out - prints out the received features,
52 and sends back to the compiler an advice (which it gets from `make_response`).
53 It's used for testing, and also to showcase how to set up communication in an
54 interactive ML ("gym") environment.
55 """
56 to_compiler = temp_rootname + ".in"
57 from_compiler = temp_rootname + ".out"
58 try:
59 os.mkfifo(to_compiler, 0o666)
60 os.mkfifo(from_compiler, 0o666)
61 compiler_proc = subprocess.Popen(
62 process_and_args, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL
64 with io.BufferedWriter(io.FileIO(to_compiler, "wb")) as tc:
65 with io.BufferedReader(io.FileIO(from_compiler, "rb")) as fc:
66 tensor_specs, _, advice_spec = log_reader.read_header(fc)
67 context = None
68 while compiler_proc.poll() is None:
69 next_event = fc.readline()
70 if not next_event:
71 break
73 last_context,
74 observation_id,
75 features,
77 ) = log_reader.read_one_observation(
78 context, next_event, fc, tensor_specs, None
80 if last_context != context:
81 print(f"context: {last_context}")
82 context = last_context
83 print(f"observation: {observation_id}")
84 tensor_values = []
85 for fv in features:
86 log_reader.pretty_print_tensor_value(fv)
87 tensor_values.append(fv)
88 send(tc, make_response(tensor_values), advice_spec)
89 _, err = compiler_proc.communicate()
90 print(err.decode("utf-8"))
91 compiler_proc.wait()
93 finally:
94 os.unlink(to_compiler)
95 os.unlink(from_compiler)