1 //===- MLModelRunnerTest.cpp - test for MLModelRunner ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/Analysis/MLModelRunner.h"
10 #include "llvm/Analysis/NoInferenceModelRunner.h"
11 #include "llvm/Analysis/ReleaseModeModelRunner.h"
12 #include "gtest/gtest.h"
17 // This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors
18 // of shape {1}, and 'evaluation' adds them.
19 // The interface is the one expected by ReleaseModelRunner.
20 class MockAOTModel final
{
26 MockAOTModel() = default;
27 int LookupArgIndex(const std::string
&Name
) {
28 if (Name
== "prefix_a")
30 if (Name
== "prefix_b")
34 int LookupResultIndex(const std::string
&) { return 0; }
35 void Run() { R
= A
+ B
; }
36 void *result_data(int RIndex
) {
41 void *arg_data(int Index
) {
54 TEST(NoInferenceModelRunner
, AccessTensors
) {
55 const std::vector
<TensorSpec
> Inputs
{
56 TensorSpec::createSpec
<int64_t>("F1", {1}),
57 TensorSpec::createSpec
<int64_t>("F2", {10}),
58 TensorSpec::createSpec
<float>("F2", {5}),
61 NoInferenceModelRunner
NIMR(Ctx
, Inputs
);
62 NIMR
.getTensor
<int64_t>(0)[0] = 1;
63 std::memcpy(NIMR
.getTensor
<int64_t>(1),
64 std::vector
<int64_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}.data(),
65 10 * sizeof(int64_t));
66 std::memcpy(NIMR
.getTensor
<float>(2),
67 std::vector
<float>{0.1f
, 0.2f
, 0.3f
, 0.4f
, 0.5f
}.data(),
69 ASSERT_EQ(NIMR
.getTensor
<int64_t>(0)[0], 1);
70 ASSERT_EQ(NIMR
.getTensor
<int64_t>(1)[8], 9);
71 ASSERT_EQ(NIMR
.getTensor
<float>(2)[1], 0.2f
);
74 TEST(ReleaseModeRunner
, NormalUse
) {
76 std::vector
<TensorSpec
> Inputs
{TensorSpec::createSpec
<int64_t>("a", {1}),
77 TensorSpec::createSpec
<int64_t>("b", {1})};
78 auto Evaluator
= std::make_unique
<ReleaseModeModelRunner
<MockAOTModel
>>(
79 Ctx
, Inputs
, "", "prefix_");
80 *Evaluator
->getTensor
<int64_t>(0) = 1;
81 *Evaluator
->getTensor
<int64_t>(1) = 2;
82 EXPECT_EQ(Evaluator
->evaluate
<int64_t>(), 3);
83 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(0), 1);
84 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(1), 2);
87 TEST(ReleaseModeRunner
, ExtraFeatures
) {
89 std::vector
<TensorSpec
> Inputs
{TensorSpec::createSpec
<int64_t>("a", {1}),
90 TensorSpec::createSpec
<int64_t>("b", {1}),
91 TensorSpec::createSpec
<int64_t>("c", {1})};
92 auto Evaluator
= std::make_unique
<ReleaseModeModelRunner
<MockAOTModel
>>(
93 Ctx
, Inputs
, "", "prefix_");
94 *Evaluator
->getTensor
<int64_t>(0) = 1;
95 *Evaluator
->getTensor
<int64_t>(1) = 2;
96 *Evaluator
->getTensor
<int64_t>(2) = -3;
97 EXPECT_EQ(Evaluator
->evaluate
<int64_t>(), 3);
98 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(0), 1);
99 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(1), 2);
100 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(2), -3);
103 TEST(ReleaseModeRunner
, ExtraFeaturesOutOfOrder
) {
105 std::vector
<TensorSpec
> Inputs
{
106 TensorSpec::createSpec
<int64_t>("a", {1}),
107 TensorSpec::createSpec
<int64_t>("c", {1}),
108 TensorSpec::createSpec
<int64_t>("b", {1}),
110 auto Evaluator
= std::make_unique
<ReleaseModeModelRunner
<MockAOTModel
>>(
111 Ctx
, Inputs
, "", "prefix_");
112 *Evaluator
->getTensor
<int64_t>(0) = 1; // a
113 *Evaluator
->getTensor
<int64_t>(1) = 2; // c
114 *Evaluator
->getTensor
<int64_t>(2) = -3; // b
115 EXPECT_EQ(Evaluator
->evaluate
<int64_t>(), -2); // a + b
116 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(0), 1);
117 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(1), 2);
118 EXPECT_EQ(*Evaluator
->getTensor
<int64_t>(2), -3);