1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/CallSite.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/MC/SubtargetFeature.h"
27 #include "llvm/Support/Casting.h"
28 #include "llvm/Support/MachineValueType.h"
29 #include "llvm/Target/TargetMachine.h"
37 #define DEBUG_TYPE "armtti"
39 static cl::opt
<bool> DisableLowOverheadLoops(
40 "disable-arm-loloops", cl::Hidden
, cl::init(false),
41 cl::desc("Disable the generation of low-overhead loops"));
43 bool ARMTTIImpl::areInlineCompatible(const Function
*Caller
,
44 const Function
*Callee
) const {
45 const TargetMachine
&TM
= getTLI()->getTargetMachine();
46 const FeatureBitset
&CallerBits
=
47 TM
.getSubtargetImpl(*Caller
)->getFeatureBits();
48 const FeatureBitset
&CalleeBits
=
49 TM
.getSubtargetImpl(*Callee
)->getFeatureBits();
51 // To inline a callee, all features not in the whitelist must match exactly.
52 bool MatchExact
= (CallerBits
& ~InlineFeatureWhitelist
) ==
53 (CalleeBits
& ~InlineFeatureWhitelist
);
54 // For features in the whitelist, the callee's features must be a subset of
56 bool MatchSubset
= ((CallerBits
& CalleeBits
) & InlineFeatureWhitelist
) ==
57 (CalleeBits
& InlineFeatureWhitelist
);
58 return MatchExact
&& MatchSubset
;
61 int ARMTTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
) {
62 assert(Ty
->isIntegerTy());
64 unsigned Bits
= Ty
->getPrimitiveSizeInBits();
65 if (Bits
== 0 || Imm
.getActiveBits() >= 64)
68 int64_t SImmVal
= Imm
.getSExtValue();
69 uint64_t ZImmVal
= Imm
.getZExtValue();
71 if ((SImmVal
>= 0 && SImmVal
< 65536) ||
72 (ARM_AM::getSOImmVal(ZImmVal
) != -1) ||
73 (ARM_AM::getSOImmVal(~ZImmVal
) != -1))
75 return ST
->hasV6T2Ops() ? 2 : 3;
78 if ((SImmVal
>= 0 && SImmVal
< 65536) ||
79 (ARM_AM::getT2SOImmVal(ZImmVal
) != -1) ||
80 (ARM_AM::getT2SOImmVal(~ZImmVal
) != -1))
82 return ST
->hasV6T2Ops() ? 2 : 3;
84 // Thumb1, any i8 imm cost 1.
85 if (Bits
== 8 || (SImmVal
>= 0 && SImmVal
< 256))
87 if ((~SImmVal
< 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal
))
89 // Load from constantpool.
93 // Constants smaller than 256 fit in the immediate field of
94 // Thumb1 instructions so we return a zero cost and 1 otherwise.
95 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode
, unsigned Idx
,
96 const APInt
&Imm
, Type
*Ty
) {
97 if (Imm
.isNonNegative() && Imm
.getLimitedValue() < 256)
103 int ARMTTIImpl::getIntImmCost(unsigned Opcode
, unsigned Idx
, const APInt
&Imm
,
105 // Division by a constant can be turned into multiplication, but only if we
106 // know it's constant. So it's not so much that the immediate is cheap (it's
107 // not), but that the alternative is worse.
108 // FIXME: this is probably unneeded with GlobalISel.
109 if ((Opcode
== Instruction::SDiv
|| Opcode
== Instruction::UDiv
||
110 Opcode
== Instruction::SRem
|| Opcode
== Instruction::URem
) &&
114 if (Opcode
== Instruction::And
) {
116 if (Imm
== 255 || Imm
== 65535)
118 // Conversion to BIC is free, and means we can use ~Imm instead.
119 return std::min(getIntImmCost(Imm
, Ty
), getIntImmCost(~Imm
, Ty
));
122 if (Opcode
== Instruction::Add
)
123 // Conversion to SUB is free, and means we can use -Imm instead.
124 return std::min(getIntImmCost(Imm
, Ty
), getIntImmCost(-Imm
, Ty
));
126 if (Opcode
== Instruction::ICmp
&& Imm
.isNegative() &&
127 Ty
->getIntegerBitWidth() == 32) {
128 int64_t NegImm
= -Imm
.getSExtValue();
129 if (ST
->isThumb2() && NegImm
< 1<<12)
130 // icmp X, #-C -> cmn X, #C
132 if (ST
->isThumb() && NegImm
< 1<<8)
133 // icmp X, #-C -> adds X, #C
137 // xor a, -1 can always be folded to MVN
138 if (Opcode
== Instruction::Xor
&& Imm
.isAllOnesValue())
141 return getIntImmCost(Imm
, Ty
);
144 int ARMTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
145 const Instruction
*I
) {
146 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
147 assert(ISD
&& "Invalid opcode");
149 // Single to/from double precision conversions.
150 static const CostTblEntry NEONFltDblTbl
[] = {
151 // Vector fptrunc/fpext conversions.
152 { ISD::FP_ROUND
, MVT::v2f64
, 2 },
153 { ISD::FP_EXTEND
, MVT::v2f32
, 2 },
154 { ISD::FP_EXTEND
, MVT::v4f32
, 4 }
157 if (Src
->isVectorTy() && ST
->hasNEON() && (ISD
== ISD::FP_ROUND
||
158 ISD
== ISD::FP_EXTEND
)) {
159 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Src
);
160 if (const auto *Entry
= CostTableLookup(NEONFltDblTbl
, ISD
, LT
.second
))
161 return LT
.first
* Entry
->Cost
;
164 EVT SrcTy
= TLI
->getValueType(DL
, Src
);
165 EVT DstTy
= TLI
->getValueType(DL
, Dst
);
167 if (!SrcTy
.isSimple() || !DstTy
.isSimple())
168 return BaseT::getCastInstrCost(Opcode
, Dst
, Src
);
170 // Some arithmetic, load and store operations have specific instructions
171 // to cast up/down their types automatically at no extra cost.
172 // TODO: Get these tables to know at least what the related operations are.
173 static const TypeConversionCostTblEntry NEONVectorConversionTbl
[] = {
174 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i16
, 0 },
175 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i16
, 0 },
176 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v2i32
, 1 },
177 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v2i32
, 1 },
178 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 0 },
179 { ISD::TRUNCATE
, MVT::v4i16
, MVT::v4i32
, 1 },
181 // The number of vmovl instructions for the extension.
182 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i16
, 3 },
183 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i16
, 3 },
184 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i8
, 3 },
185 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i8
, 3 },
186 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i8
, 7 },
187 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i8
, 7 },
188 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i16
, 6 },
189 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i16
, 6 },
190 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i8
, 6 },
191 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i8
, 6 },
193 // Operations that we legalize using splitting.
194 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i32
, 6 },
195 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i32
, 3 },
197 // Vector float <-> i32 conversions.
198 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 1 },
199 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 1 },
201 { ISD::SINT_TO_FP
, MVT::v2f32
, MVT::v2i8
, 3 },
202 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i8
, 3 },
203 { ISD::SINT_TO_FP
, MVT::v2f32
, MVT::v2i16
, 2 },
204 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i16
, 2 },
205 { ISD::SINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 1 },
206 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 1 },
207 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i1
, 3 },
208 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i1
, 3 },
209 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i8
, 3 },
210 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i8
, 3 },
211 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i16
, 2 },
212 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i16
, 2 },
213 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 4 },
214 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 4 },
215 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 2 },
216 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 2 },
217 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i16
, 8 },
218 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i16
, 8 },
219 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i32
, 4 },
220 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i32
, 4 },
222 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v4f32
, 1 },
223 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 1 },
224 { ISD::FP_TO_SINT
, MVT::v4i8
, MVT::v4f32
, 3 },
225 { ISD::FP_TO_UINT
, MVT::v4i8
, MVT::v4f32
, 3 },
226 { ISD::FP_TO_SINT
, MVT::v4i16
, MVT::v4f32
, 2 },
227 { ISD::FP_TO_UINT
, MVT::v4i16
, MVT::v4f32
, 2 },
229 // Vector double <-> i32 conversions.
230 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 2 },
231 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 2 },
233 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i8
, 4 },
234 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i8
, 4 },
235 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i16
, 3 },
236 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i16
, 3 },
237 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 2 },
238 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 2 },
240 { ISD::FP_TO_SINT
, MVT::v2i32
, MVT::v2f64
, 2 },
241 { ISD::FP_TO_UINT
, MVT::v2i32
, MVT::v2f64
, 2 },
242 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v8f32
, 4 },
243 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v8f32
, 4 },
244 { ISD::FP_TO_SINT
, MVT::v16i16
, MVT::v16f32
, 8 },
245 { ISD::FP_TO_UINT
, MVT::v16i16
, MVT::v16f32
, 8 }
248 if (SrcTy
.isVector() && ST
->hasNEON()) {
249 if (const auto *Entry
= ConvertCostTableLookup(NEONVectorConversionTbl
, ISD
,
251 SrcTy
.getSimpleVT()))
255 // Scalar float to integer conversions.
256 static const TypeConversionCostTblEntry NEONFloatConversionTbl
[] = {
257 { ISD::FP_TO_SINT
, MVT::i1
, MVT::f32
, 2 },
258 { ISD::FP_TO_UINT
, MVT::i1
, MVT::f32
, 2 },
259 { ISD::FP_TO_SINT
, MVT::i1
, MVT::f64
, 2 },
260 { ISD::FP_TO_UINT
, MVT::i1
, MVT::f64
, 2 },
261 { ISD::FP_TO_SINT
, MVT::i8
, MVT::f32
, 2 },
262 { ISD::FP_TO_UINT
, MVT::i8
, MVT::f32
, 2 },
263 { ISD::FP_TO_SINT
, MVT::i8
, MVT::f64
, 2 },
264 { ISD::FP_TO_UINT
, MVT::i8
, MVT::f64
, 2 },
265 { ISD::FP_TO_SINT
, MVT::i16
, MVT::f32
, 2 },
266 { ISD::FP_TO_UINT
, MVT::i16
, MVT::f32
, 2 },
267 { ISD::FP_TO_SINT
, MVT::i16
, MVT::f64
, 2 },
268 { ISD::FP_TO_UINT
, MVT::i16
, MVT::f64
, 2 },
269 { ISD::FP_TO_SINT
, MVT::i32
, MVT::f32
, 2 },
270 { ISD::FP_TO_UINT
, MVT::i32
, MVT::f32
, 2 },
271 { ISD::FP_TO_SINT
, MVT::i32
, MVT::f64
, 2 },
272 { ISD::FP_TO_UINT
, MVT::i32
, MVT::f64
, 2 },
273 { ISD::FP_TO_SINT
, MVT::i64
, MVT::f32
, 10 },
274 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f32
, 10 },
275 { ISD::FP_TO_SINT
, MVT::i64
, MVT::f64
, 10 },
276 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f64
, 10 }
278 if (SrcTy
.isFloatingPoint() && ST
->hasNEON()) {
279 if (const auto *Entry
= ConvertCostTableLookup(NEONFloatConversionTbl
, ISD
,
281 SrcTy
.getSimpleVT()))
285 // Scalar integer to float conversions.
286 static const TypeConversionCostTblEntry NEONIntegerConversionTbl
[] = {
287 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i1
, 2 },
288 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i1
, 2 },
289 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i1
, 2 },
290 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i1
, 2 },
291 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i8
, 2 },
292 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i8
, 2 },
293 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i8
, 2 },
294 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i8
, 2 },
295 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i16
, 2 },
296 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i16
, 2 },
297 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i16
, 2 },
298 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i16
, 2 },
299 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i32
, 2 },
300 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i32
, 2 },
301 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i32
, 2 },
302 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i32
, 2 },
303 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i64
, 10 },
304 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i64
, 10 },
305 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i64
, 10 },
306 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 10 }
309 if (SrcTy
.isInteger() && ST
->hasNEON()) {
310 if (const auto *Entry
= ConvertCostTableLookup(NEONIntegerConversionTbl
,
311 ISD
, DstTy
.getSimpleVT(),
312 SrcTy
.getSimpleVT()))
316 // Scalar integer conversion costs.
317 static const TypeConversionCostTblEntry ARMIntegerConversionTbl
[] = {
318 // i16 -> i64 requires two dependent operations.
319 { ISD::SIGN_EXTEND
, MVT::i64
, MVT::i16
, 2 },
321 // Truncates on i64 are assumed to be free.
322 { ISD::TRUNCATE
, MVT::i32
, MVT::i64
, 0 },
323 { ISD::TRUNCATE
, MVT::i16
, MVT::i64
, 0 },
324 { ISD::TRUNCATE
, MVT::i8
, MVT::i64
, 0 },
325 { ISD::TRUNCATE
, MVT::i1
, MVT::i64
, 0 }
328 if (SrcTy
.isInteger()) {
329 if (const auto *Entry
= ConvertCostTableLookup(ARMIntegerConversionTbl
, ISD
,
331 SrcTy
.getSimpleVT()))
335 return BaseT::getCastInstrCost(Opcode
, Dst
, Src
);
338 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*ValTy
,
340 // Penalize inserting into an D-subregister. We end up with a three times
341 // lower estimated throughput on swift.
342 if (ST
->hasSlowLoadDSubregister() && Opcode
== Instruction::InsertElement
&&
343 ValTy
->isVectorTy() && ValTy
->getScalarSizeInBits() <= 32)
346 if ((Opcode
== Instruction::InsertElement
||
347 Opcode
== Instruction::ExtractElement
)) {
348 // Cross-class copies are expensive on many microarchitectures,
349 // so assume they are expensive by default.
350 if (ValTy
->getVectorElementType()->isIntegerTy())
353 // Even if it's not a cross class copy, this likely leads to mixing
354 // of NEON and VFP code and should be therefore penalized.
355 if (ValTy
->isVectorTy() &&
356 ValTy
->getScalarSizeInBits() <= 32)
357 return std::max(BaseT::getVectorInstrCost(Opcode
, ValTy
, Index
), 2U);
360 return BaseT::getVectorInstrCost(Opcode
, ValTy
, Index
);
363 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
364 const Instruction
*I
) {
365 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
366 // On NEON a vector select gets lowered to vbsl.
367 if (ST
->hasNEON() && ValTy
->isVectorTy() && ISD
== ISD::SELECT
) {
368 // Lowering of some vector selects is currently far from perfect.
369 static const TypeConversionCostTblEntry NEONVectorSelectTbl
[] = {
370 { ISD::SELECT
, MVT::v4i1
, MVT::v4i64
, 4*4 + 1*2 + 1 },
371 { ISD::SELECT
, MVT::v8i1
, MVT::v8i64
, 50 },
372 { ISD::SELECT
, MVT::v16i1
, MVT::v16i64
, 100 }
375 EVT SelCondTy
= TLI
->getValueType(DL
, CondTy
);
376 EVT SelValTy
= TLI
->getValueType(DL
, ValTy
);
377 if (SelCondTy
.isSimple() && SelValTy
.isSimple()) {
378 if (const auto *Entry
= ConvertCostTableLookup(NEONVectorSelectTbl
, ISD
,
379 SelCondTy
.getSimpleVT(),
380 SelValTy
.getSimpleVT()))
384 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
388 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, I
);
391 int ARMTTIImpl::getAddressComputationCost(Type
*Ty
, ScalarEvolution
*SE
,
393 // Address computations in vectorized code with non-consecutive addresses will
394 // likely result in more instructions compared to scalar code where the
395 // computation can more often be merged into the index mode. The resulting
396 // extra micro-ops can significantly decrease throughput.
397 unsigned NumVectorInstToHideOverhead
= 10;
398 int MaxMergeDistance
= 64;
400 if (Ty
->isVectorTy() && SE
&&
401 !BaseT::isConstantStridedAccessLessThan(SE
, Ptr
, MaxMergeDistance
+ 1))
402 return NumVectorInstToHideOverhead
;
404 // In many cases the address computation is not merged into the instruction
409 int ARMTTIImpl::getMemcpyCost(const Instruction
*I
) {
410 const MemCpyInst
*MI
= dyn_cast
<MemCpyInst
>(I
);
411 assert(MI
&& "MemcpyInst expected");
412 ConstantInt
*C
= dyn_cast
<ConstantInt
>(MI
->getLength());
414 // To model the cost of a library call, we assume 1 for the call, and
415 // 3 for the argument setup.
416 const unsigned LibCallCost
= 4;
418 // If 'size' is not a constant, a library call will be generated.
422 const unsigned Size
= C
->getValue().getZExtValue();
423 const unsigned DstAlign
= MI
->getDestAlignment();
424 const unsigned SrcAlign
= MI
->getSourceAlignment();
425 const Function
*F
= I
->getParent()->getParent();
426 const unsigned Limit
= TLI
->getMaxStoresPerMemmove(F
->hasMinSize());
427 std::vector
<EVT
> MemOps
;
429 // MemOps will be poplulated with a list of data types that needs to be
430 // loaded and stored. That's why we multiply the number of elements by 2 to
431 // get the cost for this memcpy.
432 if (getTLI()->findOptimalMemOpLowering(
433 MemOps
, Limit
, Size
, DstAlign
, SrcAlign
, false /*IsMemset*/,
434 false /*ZeroMemset*/, false /*MemcpyStrSrc*/, false /*AllowOverlap*/,
435 MI
->getDestAddressSpace(), MI
->getSourceAddressSpace(),
437 return MemOps
.size() * 2;
439 // If we can't find an optimal memop lowering, return the default cost
443 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
445 if (Kind
== TTI::SK_Broadcast
) {
446 static const CostTblEntry NEONDupTbl
[] = {
447 // VDUP handles these cases.
448 {ISD::VECTOR_SHUFFLE
, MVT::v2i32
, 1},
449 {ISD::VECTOR_SHUFFLE
, MVT::v2f32
, 1},
450 {ISD::VECTOR_SHUFFLE
, MVT::v2i64
, 1},
451 {ISD::VECTOR_SHUFFLE
, MVT::v2f64
, 1},
452 {ISD::VECTOR_SHUFFLE
, MVT::v4i16
, 1},
453 {ISD::VECTOR_SHUFFLE
, MVT::v8i8
, 1},
455 {ISD::VECTOR_SHUFFLE
, MVT::v4i32
, 1},
456 {ISD::VECTOR_SHUFFLE
, MVT::v4f32
, 1},
457 {ISD::VECTOR_SHUFFLE
, MVT::v8i16
, 1},
458 {ISD::VECTOR_SHUFFLE
, MVT::v16i8
, 1}};
460 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Tp
);
462 if (const auto *Entry
= CostTableLookup(NEONDupTbl
, ISD::VECTOR_SHUFFLE
,
464 return LT
.first
* Entry
->Cost
;
466 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
468 if (Kind
== TTI::SK_Reverse
) {
469 static const CostTblEntry NEONShuffleTbl
[] = {
470 // Reverse shuffle cost one instruction if we are shuffling within a
471 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
472 {ISD::VECTOR_SHUFFLE
, MVT::v2i32
, 1},
473 {ISD::VECTOR_SHUFFLE
, MVT::v2f32
, 1},
474 {ISD::VECTOR_SHUFFLE
, MVT::v2i64
, 1},
475 {ISD::VECTOR_SHUFFLE
, MVT::v2f64
, 1},
476 {ISD::VECTOR_SHUFFLE
, MVT::v4i16
, 1},
477 {ISD::VECTOR_SHUFFLE
, MVT::v8i8
, 1},
479 {ISD::VECTOR_SHUFFLE
, MVT::v4i32
, 2},
480 {ISD::VECTOR_SHUFFLE
, MVT::v4f32
, 2},
481 {ISD::VECTOR_SHUFFLE
, MVT::v8i16
, 2},
482 {ISD::VECTOR_SHUFFLE
, MVT::v16i8
, 2}};
484 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Tp
);
486 if (const auto *Entry
= CostTableLookup(NEONShuffleTbl
, ISD::VECTOR_SHUFFLE
,
488 return LT
.first
* Entry
->Cost
;
490 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
492 if (Kind
== TTI::SK_Select
) {
493 static const CostTblEntry NEONSelShuffleTbl
[] = {
494 // Select shuffle cost table for ARM. Cost is the number of instructions
495 // required to create the shuffled vector.
497 {ISD::VECTOR_SHUFFLE
, MVT::v2f32
, 1},
498 {ISD::VECTOR_SHUFFLE
, MVT::v2i64
, 1},
499 {ISD::VECTOR_SHUFFLE
, MVT::v2f64
, 1},
500 {ISD::VECTOR_SHUFFLE
, MVT::v2i32
, 1},
502 {ISD::VECTOR_SHUFFLE
, MVT::v4i32
, 2},
503 {ISD::VECTOR_SHUFFLE
, MVT::v4f32
, 2},
504 {ISD::VECTOR_SHUFFLE
, MVT::v4i16
, 2},
506 {ISD::VECTOR_SHUFFLE
, MVT::v8i16
, 16},
508 {ISD::VECTOR_SHUFFLE
, MVT::v16i8
, 32}};
510 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Tp
);
511 if (const auto *Entry
= CostTableLookup(NEONSelShuffleTbl
,
512 ISD::VECTOR_SHUFFLE
, LT
.second
))
513 return LT
.first
* Entry
->Cost
;
514 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
516 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
519 int ARMTTIImpl::getArithmeticInstrCost(
520 unsigned Opcode
, Type
*Ty
, TTI::OperandValueKind Op1Info
,
521 TTI::OperandValueKind Op2Info
, TTI::OperandValueProperties Opd1PropInfo
,
522 TTI::OperandValueProperties Opd2PropInfo
,
523 ArrayRef
<const Value
*> Args
) {
524 int ISDOpcode
= TLI
->InstructionOpcodeToISD(Opcode
);
525 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
527 const unsigned FunctionCallDivCost
= 20;
528 const unsigned ReciprocalDivCost
= 10;
529 static const CostTblEntry CostTbl
[] = {
531 // These costs are somewhat random. Choose a cost of 20 to indicate that
532 // vectorizing devision (added function call) is going to be very expensive.
533 // Double registers types.
534 { ISD::SDIV
, MVT::v1i64
, 1 * FunctionCallDivCost
},
535 { ISD::UDIV
, MVT::v1i64
, 1 * FunctionCallDivCost
},
536 { ISD::SREM
, MVT::v1i64
, 1 * FunctionCallDivCost
},
537 { ISD::UREM
, MVT::v1i64
, 1 * FunctionCallDivCost
},
538 { ISD::SDIV
, MVT::v2i32
, 2 * FunctionCallDivCost
},
539 { ISD::UDIV
, MVT::v2i32
, 2 * FunctionCallDivCost
},
540 { ISD::SREM
, MVT::v2i32
, 2 * FunctionCallDivCost
},
541 { ISD::UREM
, MVT::v2i32
, 2 * FunctionCallDivCost
},
542 { ISD::SDIV
, MVT::v4i16
, ReciprocalDivCost
},
543 { ISD::UDIV
, MVT::v4i16
, ReciprocalDivCost
},
544 { ISD::SREM
, MVT::v4i16
, 4 * FunctionCallDivCost
},
545 { ISD::UREM
, MVT::v4i16
, 4 * FunctionCallDivCost
},
546 { ISD::SDIV
, MVT::v8i8
, ReciprocalDivCost
},
547 { ISD::UDIV
, MVT::v8i8
, ReciprocalDivCost
},
548 { ISD::SREM
, MVT::v8i8
, 8 * FunctionCallDivCost
},
549 { ISD::UREM
, MVT::v8i8
, 8 * FunctionCallDivCost
},
550 // Quad register types.
551 { ISD::SDIV
, MVT::v2i64
, 2 * FunctionCallDivCost
},
552 { ISD::UDIV
, MVT::v2i64
, 2 * FunctionCallDivCost
},
553 { ISD::SREM
, MVT::v2i64
, 2 * FunctionCallDivCost
},
554 { ISD::UREM
, MVT::v2i64
, 2 * FunctionCallDivCost
},
555 { ISD::SDIV
, MVT::v4i32
, 4 * FunctionCallDivCost
},
556 { ISD::UDIV
, MVT::v4i32
, 4 * FunctionCallDivCost
},
557 { ISD::SREM
, MVT::v4i32
, 4 * FunctionCallDivCost
},
558 { ISD::UREM
, MVT::v4i32
, 4 * FunctionCallDivCost
},
559 { ISD::SDIV
, MVT::v8i16
, 8 * FunctionCallDivCost
},
560 { ISD::UDIV
, MVT::v8i16
, 8 * FunctionCallDivCost
},
561 { ISD::SREM
, MVT::v8i16
, 8 * FunctionCallDivCost
},
562 { ISD::UREM
, MVT::v8i16
, 8 * FunctionCallDivCost
},
563 { ISD::SDIV
, MVT::v16i8
, 16 * FunctionCallDivCost
},
564 { ISD::UDIV
, MVT::v16i8
, 16 * FunctionCallDivCost
},
565 { ISD::SREM
, MVT::v16i8
, 16 * FunctionCallDivCost
},
566 { ISD::UREM
, MVT::v16i8
, 16 * FunctionCallDivCost
},
571 if (const auto *Entry
= CostTableLookup(CostTbl
, ISDOpcode
, LT
.second
))
572 return LT
.first
* Entry
->Cost
;
574 int Cost
= BaseT::getArithmeticInstrCost(Opcode
, Ty
, Op1Info
, Op2Info
,
575 Opd1PropInfo
, Opd2PropInfo
);
577 // This is somewhat of a hack. The problem that we are facing is that SROA
578 // creates a sequence of shift, and, or instructions to construct values.
579 // These sequences are recognized by the ISel and have zero-cost. Not so for
580 // the vectorized code. Because we have support for v2i64 but not i64 those
581 // sequences look particularly beneficial to vectorize.
582 // To work around this we increase the cost of v2i64 operations to make them
583 // seem less beneficial.
584 if (LT
.second
== MVT::v2i64
&&
585 Op2Info
== TargetTransformInfo::OK_UniformConstantValue
)
591 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
, unsigned Alignment
,
592 unsigned AddressSpace
, const Instruction
*I
) {
593 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Src
);
595 if (Src
->isVectorTy() && Alignment
!= 16 &&
596 Src
->getVectorElementType()->isDoubleTy()) {
597 // Unaligned loads/stores are extremely inefficient.
598 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
604 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
606 ArrayRef
<unsigned> Indices
,
608 unsigned AddressSpace
,
610 bool UseMaskForGaps
) {
611 assert(Factor
>= 2 && "Invalid interleave factor");
612 assert(isa
<VectorType
>(VecTy
) && "Expect a vector type");
614 // vldN/vstN doesn't support vector types of i64/f64 element.
615 bool EltIs64Bits
= DL
.getTypeSizeInBits(VecTy
->getScalarType()) == 64;
617 if (Factor
<= TLI
->getMaxSupportedInterleaveFactor() && !EltIs64Bits
&&
618 !UseMaskForCond
&& !UseMaskForGaps
) {
619 unsigned NumElts
= VecTy
->getVectorNumElements();
620 auto *SubVecTy
= VectorType::get(VecTy
->getScalarType(), NumElts
/ Factor
);
622 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
623 // Accesses having vector types that are a multiple of 128 bits can be
624 // matched to more than one vldN/vstN instruction.
625 if (NumElts
% Factor
== 0 &&
626 TLI
->isLegalInterleavedAccessType(SubVecTy
, DL
))
627 return Factor
* TLI
->getNumInterleavedAccesses(SubVecTy
, DL
);
630 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
631 Alignment
, AddressSpace
,
632 UseMaskForCond
, UseMaskForGaps
);
635 bool ARMTTIImpl::isLoweredToCall(const Function
*F
) {
636 if (!F
->isIntrinsic())
637 BaseT::isLoweredToCall(F
);
639 // Assume all Arm-specific intrinsics map to an instruction.
640 if (F
->getName().startswith("llvm.arm"))
643 switch (F
->getIntrinsicID()) {
645 case Intrinsic::powi
:
650 case Intrinsic::log10
:
651 case Intrinsic::log2
:
653 case Intrinsic::exp2
:
655 case Intrinsic::sqrt
:
656 case Intrinsic::fabs
:
657 case Intrinsic::copysign
:
658 case Intrinsic::floor
:
659 case Intrinsic::ceil
:
660 case Intrinsic::trunc
:
661 case Intrinsic::rint
:
662 case Intrinsic::nearbyint
:
663 case Intrinsic::round
:
664 case Intrinsic::canonicalize
:
665 case Intrinsic::lround
:
666 case Intrinsic::llround
:
667 case Intrinsic::lrint
:
668 case Intrinsic::llrint
:
669 if (F
->getReturnType()->isDoubleTy() && !ST
->hasFP64())
671 if (F
->getReturnType()->isHalfTy() && !ST
->hasFullFP16())
673 // Some operations can be handled by vector instructions and assume
674 // unsupported vectors will be expanded into supported scalar ones.
675 // TODO Handle scalar operations properly.
676 return !ST
->hasFPARMv8Base() && !ST
->hasVFP2Base();
677 case Intrinsic::masked_store
:
678 case Intrinsic::masked_load
:
679 case Intrinsic::masked_gather
:
680 case Intrinsic::masked_scatter
:
681 return !ST
->hasMVEIntegerOps();
682 case Intrinsic::sadd_with_overflow
:
683 case Intrinsic::uadd_with_overflow
:
684 case Intrinsic::ssub_with_overflow
:
685 case Intrinsic::usub_with_overflow
:
686 case Intrinsic::sadd_sat
:
687 case Intrinsic::uadd_sat
:
688 case Intrinsic::ssub_sat
:
689 case Intrinsic::usub_sat
:
693 return BaseT::isLoweredToCall(F
);
696 bool ARMTTIImpl::isHardwareLoopProfitable(Loop
*L
, ScalarEvolution
&SE
,
698 TargetLibraryInfo
*LibInfo
,
699 HardwareLoopInfo
&HWLoopInfo
) {
700 // Low-overhead branches are only supported in the 'low-overhead branch'
701 // extension of v8.1-m.
702 if (!ST
->hasLOB() || DisableLowOverheadLoops
)
705 if (!SE
.hasLoopInvariantBackedgeTakenCount(L
))
708 const SCEV
*BackedgeTakenCount
= SE
.getBackedgeTakenCount(L
);
709 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
))
712 const SCEV
*TripCountSCEV
=
713 SE
.getAddExpr(BackedgeTakenCount
,
714 SE
.getOne(BackedgeTakenCount
->getType()));
716 // We need to store the trip count in LR, a 32-bit register.
717 if (SE
.getUnsignedRangeMax(TripCountSCEV
).getBitWidth() > 32)
720 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
721 // point in generating a hardware loop if that's going to happen.
722 auto MaybeCall
= [this](Instruction
&I
) {
723 const ARMTargetLowering
*TLI
= getTLI();
724 unsigned ISD
= TLI
->InstructionOpcodeToISD(I
.getOpcode());
725 EVT VT
= TLI
->getValueType(DL
, I
.getType(), true);
726 if (TLI
->getOperationAction(ISD
, VT
) == TargetLowering::LibCall
)
729 // Check if an intrinsic will be lowered to a call and assume that any
730 // other CallInst will generate a bl.
731 if (auto *Call
= dyn_cast
<CallInst
>(&I
)) {
732 if (isa
<IntrinsicInst
>(Call
)) {
733 if (const Function
*F
= Call
->getCalledFunction())
734 return isLoweredToCall(F
);
739 // FPv5 provides conversions between integer, double-precision,
740 // single-precision, and half-precision formats.
741 switch (I
.getOpcode()) {
744 case Instruction::FPToSI
:
745 case Instruction::FPToUI
:
746 case Instruction::SIToFP
:
747 case Instruction::UIToFP
:
748 case Instruction::FPTrunc
:
749 case Instruction::FPExt
:
750 return !ST
->hasFPARMv8Base();
753 // FIXME: Unfortunately the approach of checking the Operation Action does
754 // not catch all cases of Legalization that use library calls. Our
755 // Legalization step categorizes some transformations into library calls as
756 // Custom, Expand or even Legal when doing type legalization. So for now
757 // we have to special case for instance the SDIV of 64bit integers and the
758 // use of floating point emulation.
759 if (VT
.isInteger() && VT
.getSizeInBits() >= 64) {
773 // Assume all other non-float operations are supported.
774 if (!VT
.isFloatingPoint())
777 // We'll need a library call to handle most floats when using soft.
778 if (TLI
->useSoftFloat()) {
779 switch (I
.getOpcode()) {
782 case Instruction::Alloca
:
783 case Instruction::Load
:
784 case Instruction::Store
:
785 case Instruction::Select
:
786 case Instruction::PHI
:
791 // We'll need a libcall to perform double precision operations on a single
792 // precision only FPU.
793 if (I
.getType()->isDoubleTy() && !ST
->hasFP64())
796 // Likewise for half precision arithmetic.
797 if (I
.getType()->isHalfTy() && !ST
->hasFullFP16())
803 auto IsHardwareLoopIntrinsic
= [](Instruction
&I
) {
804 if (auto *Call
= dyn_cast
<IntrinsicInst
>(&I
)) {
805 switch (Call
->getIntrinsicID()) {
808 case Intrinsic::set_loop_iterations
:
809 case Intrinsic::test_set_loop_iterations
:
810 case Intrinsic::loop_decrement
:
811 case Intrinsic::loop_decrement_reg
:
818 // Scan the instructions to see if there's any that we know will turn into a
819 // call or if this loop is already a low-overhead loop.
820 auto ScanLoop
= [&](Loop
*L
) {
821 for (auto *BB
: L
->getBlocks()) {
822 for (auto &I
: *BB
) {
823 if (MaybeCall(I
) || IsHardwareLoopIntrinsic(I
))
830 // Visit inner loops.
831 for (auto Inner
: *L
)
832 if (!ScanLoop(Inner
))
838 // TODO: Check whether the trip count calculation is expensive. If L is the
839 // inner loop but we know it has a low trip count, calculating that trip
840 // count (in the parent loop) may be detrimental.
842 LLVMContext
&C
= L
->getHeader()->getContext();
843 HWLoopInfo
.CounterInReg
= true;
844 HWLoopInfo
.IsNestingLegal
= false;
845 HWLoopInfo
.PerformEntryTest
= true;
846 HWLoopInfo
.CountType
= Type::getInt32Ty(C
);
847 HWLoopInfo
.LoopDecrement
= ConstantInt::get(HWLoopInfo
.CountType
, 1);
851 void ARMTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
852 TTI::UnrollingPreferences
&UP
) {
853 // Only currently enable these preferences for M-Class cores.
855 return BasicTTIImplBase::getUnrollingPreferences(L
, SE
, UP
);
857 // Disable loop unrolling for Oz and Os.
858 UP
.OptSizeThreshold
= 0;
859 UP
.PartialOptSizeThreshold
= 0;
860 if (L
->getHeader()->getParent()->hasOptSize())
863 // Only enable on Thumb-2 targets.
867 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
868 L
->getExitingBlocks(ExitingBlocks
);
869 LLVM_DEBUG(dbgs() << "Loop has:\n"
870 << "Blocks: " << L
->getNumBlocks() << "\n"
871 << "Exit blocks: " << ExitingBlocks
.size() << "\n");
873 // Only allow another exit other than the latch. This acts as an early exit
874 // as it mirrors the profitability calculation of the runtime unroller.
875 if (ExitingBlocks
.size() > 2)
878 // Limit the CFG of the loop body for targets with a branch predictor.
879 // Allowing 4 blocks permits if-then-else diamonds in the body.
880 if (ST
->hasBranchPredictor() && L
->getNumBlocks() > 4)
883 // Scan the loop: don't unroll loops with calls as this could prevent
886 for (auto *BB
: L
->getBlocks()) {
887 for (auto &I
: *BB
) {
888 if (isa
<CallInst
>(I
) || isa
<InvokeInst
>(I
)) {
889 ImmutableCallSite
CS(&I
);
890 if (const Function
*F
= CS
.getCalledFunction()) {
891 if (!isLoweredToCall(F
))
896 SmallVector
<const Value
*, 4> Operands(I
.value_op_begin(),
898 Cost
+= getUserCost(&I
, Operands
);
902 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost
<< "\n");
906 UP
.UpperBound
= true;
907 UP
.UnrollRemainder
= true;
908 UP
.DefaultUnrollRuntimeCount
= 4;
909 UP
.UnrollAndJam
= true;
910 UP
.UnrollAndJamInnerLoopThreshold
= 60;
912 // Force unrolling small loops can be very useful because of the branch
913 // taken cost of the backedge.