1 //===- TFUtils.cpp - TFLite-based evaluation utilities --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements utilities for interfacing with TFLite.
11 //===----------------------------------------------------------------------===//
12 #include "llvm/Config/config.h"
13 #if defined(LLVM_HAVE_TFLITE)
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/Analysis/Utils/TFUtils.h"
17 #include "llvm/Support/Base64.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/JSON.h"
21 #include "llvm/Support/MemoryBuffer.h"
22 #include "llvm/Support/Path.h"
23 #include "llvm/Support/raw_ostream.h"
25 #include "tensorflow/lite/interpreter.h"
26 #include "tensorflow/lite/kernels/register.h"
27 #include "tensorflow/lite/model.h"
28 #include "tensorflow/lite/model_builder.h"
29 #include "tensorflow/lite/op_resolver.h"
30 #include "tensorflow/lite/logger.h"
39 class EvaluationResultImpl
{
41 EvaluationResultImpl(const std::vector
<const TfLiteTensor
*> &Outputs
)
44 const TfLiteTensor
*getOutput(size_t I
) { return Outputs
[I
]; }
46 EvaluationResultImpl(const EvaluationResultImpl
&) = delete;
47 EvaluationResultImpl(EvaluationResultImpl
&&Other
) = delete;
50 const std::vector
<const TfLiteTensor
*> Outputs
;
53 class TFModelEvaluatorImpl
{
55 TFModelEvaluatorImpl(StringRef SavedModelPath
,
56 const std::vector
<TensorSpec
> &InputSpecs
,
57 const std::vector
<TensorSpec
> &OutputSpecs
,
60 bool isValid() const { return IsValid
; }
61 size_t outputSize() const { return Output
.size(); }
63 std::unique_ptr
<EvaluationResultImpl
> evaluate() {
64 Interpreter
->Invoke();
65 return std::make_unique
<EvaluationResultImpl
>(Output
);
68 const std::vector
<TfLiteTensor
*> &getInput() const { return Input
; }
70 ~TFModelEvaluatorImpl();
73 std::unique_ptr
<tflite::FlatBufferModel
> Model
;
75 /// The objects necessary for carrying out an evaluation of the SavedModel.
76 /// They are expensive to set up, and we maintain them accross all the
77 /// evaluations of the model.
78 std::unique_ptr
<tflite::Interpreter
> Interpreter
;
80 /// The input tensors. We set up the tensors once and just mutate theirs
81 /// scalars before each evaluation. The input tensors keep their value after
83 std::vector
<TfLiteTensor
*> Input
;
86 std::vector
<const TfLiteTensor
*> Output
;
88 void invalidate() { IsValid
= false; }
92 /// Reusable utility for ensuring we can bind the requested Name to a node in
93 /// the SavedModel Graph.
94 bool checkReportAndInvalidate(const TfLiteTensor
*Tensor
,
95 const TensorSpec
&Spec
);
100 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
101 StringRef SavedModelPath
, const std::vector
<TensorSpec
> &InputSpecs
,
102 const std::vector
<TensorSpec
> &OutputSpecs
, const char *Tags
= "serve")
103 : Input(InputSpecs
.size()), Output(OutputSpecs
.size()) {
104 // INFO and DEBUG messages could be numerous and not particularly interesting
105 tflite::LoggerOptions::SetMinimumLogSeverity(tflite::TFLITE_LOG_WARNING
);
106 // FIXME: make ErrorReporter a member (may also need subclassing
107 // StatefulErrorReporter) to easily get the latest error status, for
109 tflite::StderrReporter ErrorReporter
;
110 SmallVector
<char, 128> TFLitePathBuff
;
111 llvm::sys::path::append(TFLitePathBuff
, SavedModelPath
, "model.tflite");
112 StringRef
TFLitePath(TFLitePathBuff
.data(), TFLitePathBuff
.size());
113 Model
= tflite::FlatBufferModel::BuildFromFile(TFLitePath
.str().c_str(),
120 tflite::ops::builtin::BuiltinOpResolver Resolver
;
121 tflite::InterpreterBuilder
Builder(*Model
, Resolver
);
122 Builder(&Interpreter
);
129 // We assume the input buffers are valid for the lifetime of the interpreter.
130 // By default, tflite allocates memory in an arena and will periodically take
131 // away memory and reallocate it in a different location after evaluations in
132 // order to improve utilization of the buffers owned in the arena. So, we
133 // explicitly mark our input buffers as persistent to avoid this behavior.
134 for (size_t I
= 0; I
< Interpreter
->inputs().size(); ++I
)
135 Interpreter
->tensor(I
)->allocation_type
=
136 TfLiteAllocationType::kTfLiteArenaRwPersistent
;
138 if (Interpreter
->AllocateTensors() != TfLiteStatus::kTfLiteOk
) {
142 // Known inputs and outputs
143 StringMap
<int> InputsMap
;
144 StringMap
<int> OutputsMap
;
145 for (size_t I
= 0; I
< Interpreter
->inputs().size(); ++I
)
146 InputsMap
[Interpreter
->GetInputName(I
)] = I
;
147 for (size_t I
= 0; I
< Interpreter
->outputs().size(); ++I
)
148 OutputsMap
[Interpreter
->GetOutputName(I
)] = I
;
150 size_t NumberFeaturesPassed
= 0;
151 for (size_t I
= 0; I
< InputSpecs
.size(); ++I
) {
152 auto &InputSpec
= InputSpecs
[I
];
153 auto MapI
= InputsMap
.find(InputSpec
.name() + ":" +
154 std::to_string(InputSpec
.port()));
155 if (MapI
== InputsMap
.end()) {
159 Input
[I
] = Interpreter
->tensor(MapI
->second
);
160 if (!checkReportAndInvalidate(Input
[I
], InputSpec
))
162 std::memset(Input
[I
]->data
.data
, 0,
163 InputSpecs
[I
].getTotalTensorBufferSize());
164 ++NumberFeaturesPassed
;
167 if (NumberFeaturesPassed
< Interpreter
->inputs().size()) {
168 // we haven't passed all the required features to the model, throw an error.
169 errs() << "Required feature(s) have not been passed to the ML model";
174 for (size_t I
= 0; I
< OutputSpecs
.size(); ++I
) {
175 const auto &OutputSpec
= OutputSpecs
[I
];
176 Output
[I
] = Interpreter
->output_tensor(
177 OutputsMap
[OutputSpec
.name() + ":" +
178 std::to_string(OutputSpec
.port())]);
179 if (!checkReportAndInvalidate(Output
[I
], OutputSpec
))
184 TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath
,
185 const std::vector
<TensorSpec
> &InputSpecs
,
186 const std::vector
<TensorSpec
> &OutputSpecs
,
188 : Impl(new TFModelEvaluatorImpl(SavedModelPath
, InputSpecs
, OutputSpecs
,
190 if (!Impl
->isValid())
194 TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {}
196 bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor
*Tensor
,
197 const TensorSpec
&Spec
) {
199 errs() << "Could not find TF_Output named: " + Spec
.name();
202 if (Spec
.getTotalTensorBufferSize() != Tensor
->bytes
)
205 // If the total sizes match, there could still be a mismatch in the shape.
206 // We ignore that for now.
211 std::optional
<TFModelEvaluator::EvaluationResult
> TFModelEvaluator::evaluate() {
214 return EvaluationResult(Impl
->evaluate());
217 void *TFModelEvaluator::getUntypedInput(size_t Index
) {
218 TfLiteTensor
*T
= Impl
->getInput()[Index
];
224 TFModelEvaluator::EvaluationResult::EvaluationResult(
225 std::unique_ptr
<EvaluationResultImpl
> Impl
)
226 : Impl(std::move(Impl
)) {}
228 TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult
&&Other
)
229 : Impl(std::move(Other
.Impl
)) {}
231 TFModelEvaluator::EvaluationResult
&
232 TFModelEvaluator::EvaluationResult::operator=(EvaluationResult
&&Other
) {
233 Impl
= std::move(Other
.Impl
);
237 void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index
) {
238 return Impl
->getOutput(Index
)->data
.data
;
242 TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index
) const {
243 return Impl
->getOutput(Index
)->data
.data
;
246 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
247 TFModelEvaluator::~TFModelEvaluator() {}
249 #endif // defined(LLVM_HAVE_TFLITE)