[docs] Fix build-docs.sh
[llvm-project.git] / llvm / unittests / Analysis / MLModelRunnerTest.cpp
blob79cd77a1f558e4e970cc8e2224673e3c3d3df23c
1 //===- MLModelRunnerTest.cpp - test for MLModelRunner ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/Analysis/MLModelRunner.h"
10 #include "llvm/Analysis/NoInferenceModelRunner.h"
11 #include "llvm/Analysis/ReleaseModeModelRunner.h"
12 #include "gtest/gtest.h"
14 using namespace llvm;
16 namespace llvm {
17 // This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors
18 // of shape {1}, and 'evaluation' adds them.
19 // The interface is the one expected by ReleaseModelRunner.
20 class MockAOTModel final {
21 int64_t A = 0;
22 int64_t B = 0;
23 int64_t R = 0;
25 public:
26 MockAOTModel() = default;
27 int LookupArgIndex(const std::string &Name) {
28 if (Name == "prefix_a")
29 return 0;
30 if (Name == "prefix_b")
31 return 1;
32 return -1;
34 int LookupResultIndex(const std::string &) { return 0; }
35 void Run() { R = A + B; }
36 void *result_data(int RIndex) {
37 if (RIndex == 0)
38 return &R;
39 return nullptr;
41 void *arg_data(int Index) {
42 switch (Index) {
43 case 0:
44 return &A;
45 case 1:
46 return &B;
47 default:
48 return nullptr;
52 } // namespace llvm
54 TEST(NoInferenceModelRunner, AccessTensors) {
55 const std::vector<TensorSpec> Inputs{
56 TensorSpec::createSpec<int64_t>("F1", {1}),
57 TensorSpec::createSpec<int64_t>("F2", {10}),
58 TensorSpec::createSpec<float>("F2", {5}),
60 LLVMContext Ctx;
61 NoInferenceModelRunner NIMR(Ctx, Inputs);
62 NIMR.getTensor<int64_t>(0)[0] = 1;
63 std::memcpy(NIMR.getTensor<int64_t>(1),
64 std::vector<int64_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}.data(),
65 10 * sizeof(int64_t));
66 std::memcpy(NIMR.getTensor<float>(2),
67 std::vector<float>{0.1f, 0.2f, 0.3f, 0.4f, 0.5f}.data(),
68 5 * sizeof(float));
69 ASSERT_EQ(NIMR.getTensor<int64_t>(0)[0], 1);
70 ASSERT_EQ(NIMR.getTensor<int64_t>(1)[8], 9);
71 ASSERT_EQ(NIMR.getTensor<float>(2)[1], 0.2f);
74 TEST(ReleaseModeRunner, NormalUse) {
75 LLVMContext Ctx;
76 std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
77 TensorSpec::createSpec<int64_t>("b", {1})};
78 auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
79 Ctx, Inputs, "", "prefix_");
80 *Evaluator->getTensor<int64_t>(0) = 1;
81 *Evaluator->getTensor<int64_t>(1) = 2;
82 EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3);
83 EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
84 EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
87 TEST(ReleaseModeRunner, ExtraFeatures) {
88 LLVMContext Ctx;
89 std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
90 TensorSpec::createSpec<int64_t>("b", {1}),
91 TensorSpec::createSpec<int64_t>("c", {1})};
92 auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
93 Ctx, Inputs, "", "prefix_");
94 *Evaluator->getTensor<int64_t>(0) = 1;
95 *Evaluator->getTensor<int64_t>(1) = 2;
96 *Evaluator->getTensor<int64_t>(2) = -3;
97 EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3);
98 EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
99 EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
100 EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3);
103 TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) {
104 LLVMContext Ctx;
105 std::vector<TensorSpec> Inputs{
106 TensorSpec::createSpec<int64_t>("a", {1}),
107 TensorSpec::createSpec<int64_t>("c", {1}),
108 TensorSpec::createSpec<int64_t>("b", {1}),
110 auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
111 Ctx, Inputs, "", "prefix_");
112 *Evaluator->getTensor<int64_t>(0) = 1; // a
113 *Evaluator->getTensor<int64_t>(1) = 2; // c
114 *Evaluator->getTensor<int64_t>(2) = -3; // b
115 EXPECT_EQ(Evaluator->evaluate<int64_t>(), -2); // a + b
116 EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
117 EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
118 EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3);