1 //===- MLModelRunnerTest.cpp - test for MLModelRunner ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/Analysis/MLModelRunner.h"
10 #include "llvm/Analysis/InteractiveModelRunner.h"
11 #include "llvm/Analysis/NoInferenceModelRunner.h"
12 #include "llvm/Analysis/ReleaseModeModelRunner.h"
13 #include "llvm/Support/BinaryByteStream.h"
14 #include "llvm/Support/FileSystem.h"
15 #include "llvm/Support/FileUtilities.h"
16 #include "llvm/Support/JSON.h"
17 #include "llvm/Support/Path.h"
18 #include "llvm/Support/raw_ostream.h"
19 #include "llvm/Testing/Support/SupportHelpers.h"
20 #include "gtest/gtest.h"
28 // This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors
29 // of shape {1}, and 'evaluation' adds them.
30 // The interface is the one expected by ReleaseModelRunner.
31 class MockAOTModel final
{
37 MockAOTModel() = default;
38 int LookupArgIndex(const std::string
&Name
) {
39 if (Name
== "prefix_a")
41 if (Name
== "prefix_b")
45 int LookupResultIndex(const std::string
&) { return 0; }
46 void Run() { R
= A
+ B
; }
47 void *result_data(int RIndex
) {
52 void *arg_data(int Index
) {
65 TEST(NoInferenceModelRunner
, AccessTensors
) {
66 const std::vector
<TensorSpec
> Inputs
{
67 TensorSpec::createSpec
<int64_t>("F1", {1}),
68 TensorSpec::createSpec
<int64_t>("F2", {10}),
69 TensorSpec::createSpec
<float>("F2", {5}),
72 NoInferenceModelRunner
NIMR(Ctx
, Inputs
);
73 NIMR
.getTensor
<int64_t>(0)[0] = 1;
74 std::memcpy(NIMR
.getTensor
<int64_t>(1),
75 std::vector
<int64_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}.data(),
76 10 * sizeof(int64_t));
77 std::memcpy(NIMR
.getTensor
<float>(2),
78 std::vector
<float>{0.1f
, 0.2f
, 0.3f
, 0.4f
, 0.5f
}.data(),
80 ASSERT_EQ(NIMR
.getTensor
<int64_t>(0)[0], 1);
81 ASSERT_EQ(NIMR
.getTensor
<int64_t>(1)[8], 9);
82 ASSERT_EQ(NIMR
.getTensor
<float>(2)[1], 0.2f
);
85 TEST(ReleaseModeRunner
, NormalUse
) {
87 std::vector
<TensorSpec
> Inputs
{TensorSpec::createSpec
<int64_t>("a", {1}),
88 TensorSpec::createSpec
<int64_t>("b", {1})};
89 auto Evaluator
= std::make_unique
<ReleaseModeModelRunner
<MockAOTModel
>>(
90 Ctx
, Inputs
, "", "prefix_");
91 *Evaluator
->getTensor
<int64_t>(0) = 1;
92 *Evaluator
->getTensor
<int64_t>(1) = 2;
93 EXPECT_EQ(Evaluator
->evaluate
<int64_t>(), 3);
94 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(0), 1);
95 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(1), 2);
98 TEST(ReleaseModeRunner
, ExtraFeatures
) {
100 std::vector
<TensorSpec
> Inputs
{TensorSpec::createSpec
<int64_t>("a", {1}),
101 TensorSpec::createSpec
<int64_t>("b", {1}),
102 TensorSpec::createSpec
<int64_t>("c", {1})};
103 auto Evaluator
= std::make_unique
<ReleaseModeModelRunner
<MockAOTModel
>>(
104 Ctx
, Inputs
, "", "prefix_");
105 *Evaluator
->getTensor
<int64_t>(0) = 1;
106 *Evaluator
->getTensor
<int64_t>(1) = 2;
107 *Evaluator
->getTensor
<int64_t>(2) = -3;
108 EXPECT_EQ(Evaluator
->evaluate
<int64_t>(), 3);
109 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(0), 1);
110 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(1), 2);
111 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(2), -3);
114 TEST(ReleaseModeRunner
, ExtraFeaturesOutOfOrder
) {
116 std::vector
<TensorSpec
> Inputs
{
117 TensorSpec::createSpec
<int64_t>("a", {1}),
118 TensorSpec::createSpec
<int64_t>("c", {1}),
119 TensorSpec::createSpec
<int64_t>("b", {1}),
121 auto Evaluator
= std::make_unique
<ReleaseModeModelRunner
<MockAOTModel
>>(
122 Ctx
, Inputs
, "", "prefix_");
123 *Evaluator
->getTensor
<int64_t>(0) = 1; // a
124 *Evaluator
->getTensor
<int64_t>(1) = 2; // c
125 *Evaluator
->getTensor
<int64_t>(2) = -3; // b
126 EXPECT_EQ(Evaluator
->evaluate
<int64_t>(), -2); // a + b
127 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(0), 1);
128 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(1), 2);
129 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(2), -3);
132 #if defined(LLVM_ON_UNIX)
133 TEST(InteractiveModelRunner
, Evaluation
) {
135 // Test the interaction with an external advisor by asking for advice twice.
136 // Use simple values, since we use the Logger underneath, that's tested more
137 // extensively elsewhere.
138 std::vector
<TensorSpec
> Inputs
{
139 TensorSpec::createSpec
<int64_t>("a", {1}),
140 TensorSpec::createSpec
<int64_t>("b", {1}),
141 TensorSpec::createSpec
<int64_t>("c", {1}),
143 TensorSpec AdviceSpec
= TensorSpec::createSpec
<float>("advice", {1});
145 // Create the 2 files. Ideally we'd create them as named pipes, but that's not
146 // quite supported by the generic API.
148 llvm::unittest::TempDir
Tmp("tmpdir", /*Unique=*/true);
149 SmallString
<128> FromCompilerName(Tmp
.path().begin(), Tmp
.path().end());
150 SmallString
<128> ToCompilerName(Tmp
.path().begin(), Tmp
.path().end());
151 sys::path::append(FromCompilerName
, "InteractiveModelRunner_Evaluation.out");
152 sys::path::append(ToCompilerName
, "InteractiveModelRunner_Evaluation.in");
153 EXPECT_EQ(::mkfifo(FromCompilerName
.c_str(), 0666), 0);
154 EXPECT_EQ(::mkfifo(ToCompilerName
.c_str(), 0666), 0);
156 FileRemover
Cleanup1(FromCompilerName
);
157 FileRemover
Cleanup2(ToCompilerName
);
159 // Since the evaluator sends the features over and then blocks waiting for
160 // an answer, we must spawn a thread playing the role of the advisor / host:
161 std::atomic
<int> SeenObservations
= 0;
162 // Start the host first to make sure the pipes are being prepared. Otherwise
163 // the evaluator will hang.
164 std::thread
Advisor([&]() {
165 // Open the writer first. This is because the evaluator will try opening
166 // the "input" pipe first. An alternative that avoids ordering is for the
167 // host to open the pipes RW.
168 raw_fd_ostream
ToCompiler(ToCompilerName
, EC
);
170 int FromCompilerHandle
= 0;
172 sys::fs::openFileForRead(FromCompilerName
, FromCompilerHandle
));
173 sys::fs::file_t FromCompiler
=
174 sys::fs::convertFDToNativeFile(FromCompilerHandle
);
175 EXPECT_EQ(SeenObservations
, 0);
176 // Helper to read headers and other json lines.
177 SmallVector
<char, 1024> Buffer
;
178 auto ReadLn
= [&]() {
182 auto ReadOrErr
= sys::fs::readNativeFile(FromCompiler
, {&Chr
, 1});
183 EXPECT_FALSE(ReadOrErr
.takeError());
187 return StringRef(Buffer
.data(), Buffer
.size());
188 Buffer
.push_back(Chr
);
191 // See include/llvm/Analysis/Utils/TrainingLogger.h
192 // First comes the header
193 auto Header
= json::parse(ReadLn());
194 EXPECT_FALSE(Header
.takeError());
195 EXPECT_NE(Header
->getAsObject()->getArray("features"), nullptr);
196 EXPECT_NE(Header
->getAsObject()->getObject("advice"), nullptr);
197 // Then comes the context
198 EXPECT_FALSE(json::parse(ReadLn()).takeError());
200 int64_t Features
[3] = {0};
201 auto FullyRead
= [&]() {
203 const size_t ToRead
= 3 * Inputs
[0].getTotalTensorBufferSize();
204 char *Buff
= reinterpret_cast<char *>(Features
);
205 while (InsPt
< ToRead
) {
206 auto ReadOrErr
= sys::fs::readNativeFile(
207 FromCompiler
, {Buff
+ InsPt
, ToRead
- InsPt
});
208 EXPECT_FALSE(ReadOrErr
.takeError());
213 EXPECT_FALSE(json::parse(ReadLn()).takeError());
218 auto ReadNL
= [&]() {
220 auto ReadOrErr
= sys::fs::readNativeFile(FromCompiler
, {&Chr
, 1});
221 EXPECT_FALSE(ReadOrErr
.takeError());
227 EXPECT_EQ(Chr
, '\n');
228 EXPECT_EQ(Features
[0], 42);
229 EXPECT_EQ(Features
[1], 43);
230 EXPECT_EQ(Features
[2], 100);
234 float Advice
= 42.0012;
235 ToCompiler
.write(reinterpret_cast<const char *>(&Advice
),
236 AdviceSpec
.getTotalTensorBufferSize());
239 // Second observation, and same idea as above
240 EXPECT_FALSE(json::parse(ReadLn()).takeError());
243 EXPECT_EQ(Chr
, '\n');
244 EXPECT_EQ(Features
[0], 10);
245 EXPECT_EQ(Features
[1], -2);
246 EXPECT_EQ(Features
[2], 1);
249 ToCompiler
.write(reinterpret_cast<const char *>(&Advice
),
250 AdviceSpec
.getTotalTensorBufferSize());
252 sys::fs::closeFile(FromCompiler
);
255 InteractiveModelRunner
Evaluator(Ctx
, Inputs
, AdviceSpec
, FromCompilerName
,
258 Evaluator
.switchContext("hi");
260 EXPECT_EQ(SeenObservations
, 0);
261 *Evaluator
.getTensor
<int64_t>(0) = 42;
262 *Evaluator
.getTensor
<int64_t>(1) = 43;
263 *Evaluator
.getTensor
<int64_t>(2) = 100;
264 float Ret
= Evaluator
.evaluate
<float>();
265 EXPECT_EQ(SeenObservations
, 1);
266 EXPECT_FLOAT_EQ(Ret
, 42.0012);
268 *Evaluator
.getTensor
<int64_t>(0) = 10;
269 *Evaluator
.getTensor
<int64_t>(1) = -2;
270 *Evaluator
.getTensor
<int64_t>(2) = 1;
271 Ret
= Evaluator
.evaluate
<float>();
272 EXPECT_EQ(SeenObservations
, 2);
273 EXPECT_FLOAT_EQ(Ret
, 50.30);