[lib/ObjectYAML] - Cleanup the private interface of ELFState<ELFT>. NFCI.
[llvm-complete.git] / lib / Target / ARM / ARMTargetTransformInfo.cpp
blob3dbf5b9efc4e3015b2e17add4abd7f5d0a0b67bc
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/CallSite.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/MC/SubtargetFeature.h"
27 #include "llvm/Support/Casting.h"
28 #include "llvm/Support/MachineValueType.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <cstdint>
33 #include <utility>
35 using namespace llvm;
37 #define DEBUG_TYPE "armtti"
39 static cl::opt<bool> DisableLowOverheadLoops(
40 "disable-arm-loloops", cl::Hidden, cl::init(false),
41 cl::desc("Disable the generation of low-overhead loops"));
43 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
44 const Function *Callee) const {
45 const TargetMachine &TM = getTLI()->getTargetMachine();
46 const FeatureBitset &CallerBits =
47 TM.getSubtargetImpl(*Caller)->getFeatureBits();
48 const FeatureBitset &CalleeBits =
49 TM.getSubtargetImpl(*Callee)->getFeatureBits();
51 // To inline a callee, all features not in the whitelist must match exactly.
52 bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
53 (CalleeBits & ~InlineFeatureWhitelist);
54 // For features in the whitelist, the callee's features must be a subset of
55 // the callers'.
56 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
57 (CalleeBits & InlineFeatureWhitelist);
58 return MatchExact && MatchSubset;
61 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
62 assert(Ty->isIntegerTy());
64 unsigned Bits = Ty->getPrimitiveSizeInBits();
65 if (Bits == 0 || Imm.getActiveBits() >= 64)
66 return 4;
68 int64_t SImmVal = Imm.getSExtValue();
69 uint64_t ZImmVal = Imm.getZExtValue();
70 if (!ST->isThumb()) {
71 if ((SImmVal >= 0 && SImmVal < 65536) ||
72 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
73 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
74 return 1;
75 return ST->hasV6T2Ops() ? 2 : 3;
77 if (ST->isThumb2()) {
78 if ((SImmVal >= 0 && SImmVal < 65536) ||
79 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
80 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
81 return 1;
82 return ST->hasV6T2Ops() ? 2 : 3;
84 // Thumb1, any i8 imm cost 1.
85 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
86 return 1;
87 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
88 return 2;
89 // Load from constantpool.
90 return 3;
93 // Constants smaller than 256 fit in the immediate field of
94 // Thumb1 instructions so we return a zero cost and 1 otherwise.
95 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
96 const APInt &Imm, Type *Ty) {
97 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
98 return 0;
100 return 1;
103 int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
104 Type *Ty) {
105 // Division by a constant can be turned into multiplication, but only if we
106 // know it's constant. So it's not so much that the immediate is cheap (it's
107 // not), but that the alternative is worse.
108 // FIXME: this is probably unneeded with GlobalISel.
109 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
110 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
111 Idx == 1)
112 return 0;
114 if (Opcode == Instruction::And) {
115 // UXTB/UXTH
116 if (Imm == 255 || Imm == 65535)
117 return 0;
118 // Conversion to BIC is free, and means we can use ~Imm instead.
119 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty));
122 if (Opcode == Instruction::Add)
123 // Conversion to SUB is free, and means we can use -Imm instead.
124 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty));
126 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
127 Ty->getIntegerBitWidth() == 32) {
128 int64_t NegImm = -Imm.getSExtValue();
129 if (ST->isThumb2() && NegImm < 1<<12)
130 // icmp X, #-C -> cmn X, #C
131 return 0;
132 if (ST->isThumb() && NegImm < 1<<8)
133 // icmp X, #-C -> adds X, #C
134 return 0;
137 // xor a, -1 can always be folded to MVN
138 if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
139 return 0;
141 return getIntImmCost(Imm, Ty);
144 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
145 const Instruction *I) {
146 int ISD = TLI->InstructionOpcodeToISD(Opcode);
147 assert(ISD && "Invalid opcode");
149 // Single to/from double precision conversions.
150 static const CostTblEntry NEONFltDblTbl[] = {
151 // Vector fptrunc/fpext conversions.
152 { ISD::FP_ROUND, MVT::v2f64, 2 },
153 { ISD::FP_EXTEND, MVT::v2f32, 2 },
154 { ISD::FP_EXTEND, MVT::v4f32, 4 }
157 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
158 ISD == ISD::FP_EXTEND)) {
159 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
160 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
161 return LT.first * Entry->Cost;
164 EVT SrcTy = TLI->getValueType(DL, Src);
165 EVT DstTy = TLI->getValueType(DL, Dst);
167 if (!SrcTy.isSimple() || !DstTy.isSimple())
168 return BaseT::getCastInstrCost(Opcode, Dst, Src);
170 // The extend of a load is free
171 if (I && isa<LoadInst>(I->getOperand(0))) {
172 static const TypeConversionCostTblEntry LoadConversionTbl[] = {
173 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
174 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
175 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
176 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
177 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
178 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
179 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
180 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
181 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
182 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
183 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
184 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
186 if (const auto *Entry = ConvertCostTableLookup(
187 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
188 return Entry->Cost;
190 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
191 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
192 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
193 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
194 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
195 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
196 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
198 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
199 if (const auto *Entry =
200 ConvertCostTableLookup(MVELoadConversionTbl, ISD,
201 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
202 return Entry->Cost;
206 // Some arithmetic, load and store operations have specific instructions
207 // to cast up/down their types automatically at no extra cost.
208 // TODO: Get these tables to know at least what the related operations are.
209 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
210 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
211 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
212 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
213 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
214 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
215 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
217 // The number of vmovl instructions for the extension.
218 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
219 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
220 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
221 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
222 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
223 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
224 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
225 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
226 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
227 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
229 // Operations that we legalize using splitting.
230 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
231 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
233 // Vector float <-> i32 conversions.
234 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
235 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
237 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
238 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
239 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
240 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
241 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
242 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
243 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
244 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
245 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
246 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
247 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
248 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
249 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
250 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
251 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
252 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
253 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
254 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
255 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
256 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
258 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
259 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
260 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
261 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
262 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
263 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
265 // Vector double <-> i32 conversions.
266 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
267 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
269 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
270 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
271 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
272 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
273 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
274 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
276 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
277 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
278 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
279 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
280 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
281 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
284 if (SrcTy.isVector() && ST->hasNEON()) {
285 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
286 DstTy.getSimpleVT(),
287 SrcTy.getSimpleVT()))
288 return Entry->Cost;
291 // Scalar float to integer conversions.
292 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
293 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
294 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
295 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
296 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
297 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
298 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
299 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
300 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
301 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
302 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
303 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
304 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
305 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
306 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
307 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
308 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
309 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
310 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
311 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
312 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
314 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
315 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
316 DstTy.getSimpleVT(),
317 SrcTy.getSimpleVT()))
318 return Entry->Cost;
321 // Scalar integer to float conversions.
322 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
323 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
324 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
325 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
326 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
327 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
328 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
329 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
330 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
331 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
332 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
333 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
334 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
335 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
336 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
337 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
338 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
339 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
340 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
341 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
342 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
345 if (SrcTy.isInteger() && ST->hasNEON()) {
346 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
347 ISD, DstTy.getSimpleVT(),
348 SrcTy.getSimpleVT()))
349 return Entry->Cost;
352 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
353 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
354 // are linearised so take more.
355 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
356 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
357 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
358 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
359 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
360 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
361 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
362 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
363 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
364 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
365 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
366 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
367 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
370 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
371 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
372 ISD, DstTy.getSimpleVT(),
373 SrcTy.getSimpleVT()))
374 return Entry->Cost * ST->getMVEVectorCostFactor();
377 // Scalar integer conversion costs.
378 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
379 // i16 -> i64 requires two dependent operations.
380 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
382 // Truncates on i64 are assumed to be free.
383 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
384 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
385 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
386 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
389 if (SrcTy.isInteger()) {
390 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
391 DstTy.getSimpleVT(),
392 SrcTy.getSimpleVT()))
393 return Entry->Cost;
396 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
397 ? ST->getMVEVectorCostFactor()
398 : 1;
399 return BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src);
402 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
403 unsigned Index) {
404 // Penalize inserting into an D-subregister. We end up with a three times
405 // lower estimated throughput on swift.
406 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
407 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
408 return 3;
410 if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
411 Opcode == Instruction::ExtractElement)) {
412 // Cross-class copies are expensive on many microarchitectures,
413 // so assume they are expensive by default.
414 if (ValTy->getVectorElementType()->isIntegerTy())
415 return 3;
417 // Even if it's not a cross class copy, this likely leads to mixing
418 // of NEON and VFP code and should be therefore penalized.
419 if (ValTy->isVectorTy() &&
420 ValTy->getScalarSizeInBits() <= 32)
421 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
424 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
425 Opcode == Instruction::ExtractElement)) {
426 // We say MVE moves costs at least the MVEVectorCostFactor, even though
427 // they are scalar instructions. This helps prevent mixing scalar and
428 // vector, to prevent vectorising where we end up just scalarising the
429 // result anyway.
430 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
431 ST->getMVEVectorCostFactor()) *
432 ValTy->getVectorNumElements() / 2;
435 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
438 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
439 const Instruction *I) {
440 int ISD = TLI->InstructionOpcodeToISD(Opcode);
441 // On NEON a vector select gets lowered to vbsl.
442 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
443 // Lowering of some vector selects is currently far from perfect.
444 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
445 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
446 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
447 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
450 EVT SelCondTy = TLI->getValueType(DL, CondTy);
451 EVT SelValTy = TLI->getValueType(DL, ValTy);
452 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
453 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
454 SelCondTy.getSimpleVT(),
455 SelValTy.getSimpleVT()))
456 return Entry->Cost;
459 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
460 return LT.first;
463 int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
464 ? ST->getMVEVectorCostFactor()
465 : 1;
466 return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
469 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
470 const SCEV *Ptr) {
471 // Address computations in vectorized code with non-consecutive addresses will
472 // likely result in more instructions compared to scalar code where the
473 // computation can more often be merged into the index mode. The resulting
474 // extra micro-ops can significantly decrease throughput.
475 unsigned NumVectorInstToHideOverhead = 10;
476 int MaxMergeDistance = 64;
478 if (ST->hasNEON()) {
479 if (Ty->isVectorTy() && SE &&
480 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
481 return NumVectorInstToHideOverhead;
483 // In many cases the address computation is not merged into the instruction
484 // addressing mode.
485 return 1;
487 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
490 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
491 const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
492 assert(MI && "MemcpyInst expected");
493 ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
495 // To model the cost of a library call, we assume 1 for the call, and
496 // 3 for the argument setup.
497 const unsigned LibCallCost = 4;
499 // If 'size' is not a constant, a library call will be generated.
500 if (!C)
501 return LibCallCost;
503 const unsigned Size = C->getValue().getZExtValue();
504 const unsigned DstAlign = MI->getDestAlignment();
505 const unsigned SrcAlign = MI->getSourceAlignment();
506 const Function *F = I->getParent()->getParent();
507 const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
508 std::vector<EVT> MemOps;
510 // MemOps will be poplulated with a list of data types that needs to be
511 // loaded and stored. That's why we multiply the number of elements by 2 to
512 // get the cost for this memcpy.
513 if (getTLI()->findOptimalMemOpLowering(
514 MemOps, Limit, Size, DstAlign, SrcAlign, false /*IsMemset*/,
515 false /*ZeroMemset*/, false /*MemcpyStrSrc*/, false /*AllowOverlap*/,
516 MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
517 F->getAttributes()))
518 return MemOps.size() * 2;
520 // If we can't find an optimal memop lowering, return the default cost
521 return LibCallCost;
524 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
525 Type *SubTp) {
526 if (ST->hasNEON()) {
527 if (Kind == TTI::SK_Broadcast) {
528 static const CostTblEntry NEONDupTbl[] = {
529 // VDUP handles these cases.
530 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
531 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
532 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
533 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
534 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
535 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
537 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
538 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
539 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
540 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
542 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
544 if (const auto *Entry =
545 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
546 return LT.first * Entry->Cost;
548 if (Kind == TTI::SK_Reverse) {
549 static const CostTblEntry NEONShuffleTbl[] = {
550 // Reverse shuffle cost one instruction if we are shuffling within a
551 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
552 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
553 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
554 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
555 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
556 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
557 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
559 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
560 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
561 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
562 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
564 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
566 if (const auto *Entry =
567 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
568 return LT.first * Entry->Cost;
570 if (Kind == TTI::SK_Select) {
571 static const CostTblEntry NEONSelShuffleTbl[] = {
572 // Select shuffle cost table for ARM. Cost is the number of
573 // instructions
574 // required to create the shuffled vector.
576 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
577 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
578 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
579 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
581 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
582 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
583 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
585 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
587 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
589 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
590 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
591 ISD::VECTOR_SHUFFLE, LT.second))
592 return LT.first * Entry->Cost;
595 if (ST->hasMVEIntegerOps()) {
596 if (Kind == TTI::SK_Broadcast) {
597 static const CostTblEntry MVEDupTbl[] = {
598 // VDUP handles these cases.
599 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
600 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
601 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
602 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
603 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
605 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
607 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
608 LT.second))
609 return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
612 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
613 ? ST->getMVEVectorCostFactor()
614 : 1;
615 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
618 int ARMTTIImpl::getArithmeticInstrCost(
619 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
620 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
621 TTI::OperandValueProperties Opd2PropInfo,
622 ArrayRef<const Value *> Args) {
623 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
624 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
626 const unsigned FunctionCallDivCost = 20;
627 const unsigned ReciprocalDivCost = 10;
628 static const CostTblEntry CostTbl[] = {
629 // Division.
630 // These costs are somewhat random. Choose a cost of 20 to indicate that
631 // vectorizing devision (added function call) is going to be very expensive.
632 // Double registers types.
633 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
634 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
635 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
636 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
637 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
638 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
639 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
640 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
641 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
642 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
643 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
644 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
645 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
646 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
647 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
648 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
649 // Quad register types.
650 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
651 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
652 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
653 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
654 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
655 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
656 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
657 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
658 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
659 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
660 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
661 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
662 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
663 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
664 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
665 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
666 // Multiplication.
669 if (ST->hasNEON()) {
670 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
671 return LT.first * Entry->Cost;
673 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
674 Opd1PropInfo, Opd2PropInfo);
676 // This is somewhat of a hack. The problem that we are facing is that SROA
677 // creates a sequence of shift, and, or instructions to construct values.
678 // These sequences are recognized by the ISel and have zero-cost. Not so for
679 // the vectorized code. Because we have support for v2i64 but not i64 those
680 // sequences look particularly beneficial to vectorize.
681 // To work around this we increase the cost of v2i64 operations to make them
682 // seem less beneficial.
683 if (LT.second == MVT::v2i64 &&
684 Op2Info == TargetTransformInfo::OK_UniformConstantValue)
685 Cost += 4;
687 return Cost;
690 int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
691 ? ST->getMVEVectorCostFactor()
692 : 1;
694 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
695 // without treating floats as more expensive that scalars or increasing the
696 // costs for custom operations. The results is also multiplied by the
697 // MVEVectorCostFactor where appropriate.
698 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
699 return LT.first * BaseCost;
701 // Else this is expand, assume that we need to scalarize this op.
702 if (Ty->isVectorTy()) {
703 unsigned Num = Ty->getVectorNumElements();
704 unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType());
705 // Return the cost of multiple scalar invocation plus the cost of
706 // inserting and extracting the values.
707 return BaseT::getScalarizationOverhead(Ty, Args) + Num * Cost;
710 return BaseCost;
713 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
714 unsigned AddressSpace, const Instruction *I) {
715 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
717 if (ST->hasNEON() && Src->isVectorTy() && Alignment != 16 &&
718 Src->getVectorElementType()->isDoubleTy()) {
719 // Unaligned loads/stores are extremely inefficient.
720 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
721 return LT.first * 4;
723 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
724 ? ST->getMVEVectorCostFactor()
725 : 1;
726 return BaseCost * LT.first;
729 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
730 unsigned Factor,
731 ArrayRef<unsigned> Indices,
732 unsigned Alignment,
733 unsigned AddressSpace,
734 bool UseMaskForCond,
735 bool UseMaskForGaps) {
736 assert(Factor >= 2 && "Invalid interleave factor");
737 assert(isa<VectorType>(VecTy) && "Expect a vector type");
739 // vldN/vstN doesn't support vector types of i64/f64 element.
740 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
742 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
743 !UseMaskForCond && !UseMaskForGaps) {
744 unsigned NumElts = VecTy->getVectorNumElements();
745 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
747 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
748 // Accesses having vector types that are a multiple of 128 bits can be
749 // matched to more than one vldN/vstN instruction.
750 if (NumElts % Factor == 0 &&
751 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
752 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
755 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
756 Alignment, AddressSpace,
757 UseMaskForCond, UseMaskForGaps);
760 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
761 if (!F->isIntrinsic())
762 BaseT::isLoweredToCall(F);
764 // Assume all Arm-specific intrinsics map to an instruction.
765 if (F->getName().startswith("llvm.arm"))
766 return false;
768 switch (F->getIntrinsicID()) {
769 default: break;
770 case Intrinsic::powi:
771 case Intrinsic::sin:
772 case Intrinsic::cos:
773 case Intrinsic::pow:
774 case Intrinsic::log:
775 case Intrinsic::log10:
776 case Intrinsic::log2:
777 case Intrinsic::exp:
778 case Intrinsic::exp2:
779 return true;
780 case Intrinsic::sqrt:
781 case Intrinsic::fabs:
782 case Intrinsic::copysign:
783 case Intrinsic::floor:
784 case Intrinsic::ceil:
785 case Intrinsic::trunc:
786 case Intrinsic::rint:
787 case Intrinsic::nearbyint:
788 case Intrinsic::round:
789 case Intrinsic::canonicalize:
790 case Intrinsic::lround:
791 case Intrinsic::llround:
792 case Intrinsic::lrint:
793 case Intrinsic::llrint:
794 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
795 return true;
796 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
797 return true;
798 // Some operations can be handled by vector instructions and assume
799 // unsupported vectors will be expanded into supported scalar ones.
800 // TODO Handle scalar operations properly.
801 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
802 case Intrinsic::masked_store:
803 case Intrinsic::masked_load:
804 case Intrinsic::masked_gather:
805 case Intrinsic::masked_scatter:
806 return !ST->hasMVEIntegerOps();
807 case Intrinsic::sadd_with_overflow:
808 case Intrinsic::uadd_with_overflow:
809 case Intrinsic::ssub_with_overflow:
810 case Intrinsic::usub_with_overflow:
811 case Intrinsic::sadd_sat:
812 case Intrinsic::uadd_sat:
813 case Intrinsic::ssub_sat:
814 case Intrinsic::usub_sat:
815 return false;
818 return BaseT::isLoweredToCall(F);
821 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
822 AssumptionCache &AC,
823 TargetLibraryInfo *LibInfo,
824 HardwareLoopInfo &HWLoopInfo) {
825 // Low-overhead branches are only supported in the 'low-overhead branch'
826 // extension of v8.1-m.
827 if (!ST->hasLOB() || DisableLowOverheadLoops)
828 return false;
830 if (!SE.hasLoopInvariantBackedgeTakenCount(L))
831 return false;
833 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
834 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
835 return false;
837 const SCEV *TripCountSCEV =
838 SE.getAddExpr(BackedgeTakenCount,
839 SE.getOne(BackedgeTakenCount->getType()));
841 // We need to store the trip count in LR, a 32-bit register.
842 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32)
843 return false;
845 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
846 // point in generating a hardware loop if that's going to happen.
847 auto MaybeCall = [this](Instruction &I) {
848 const ARMTargetLowering *TLI = getTLI();
849 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
850 EVT VT = TLI->getValueType(DL, I.getType(), true);
851 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
852 return true;
854 // Check if an intrinsic will be lowered to a call and assume that any
855 // other CallInst will generate a bl.
856 if (auto *Call = dyn_cast<CallInst>(&I)) {
857 if (isa<IntrinsicInst>(Call)) {
858 if (const Function *F = Call->getCalledFunction())
859 return isLoweredToCall(F);
861 return true;
864 // FPv5 provides conversions between integer, double-precision,
865 // single-precision, and half-precision formats.
866 switch (I.getOpcode()) {
867 default:
868 break;
869 case Instruction::FPToSI:
870 case Instruction::FPToUI:
871 case Instruction::SIToFP:
872 case Instruction::UIToFP:
873 case Instruction::FPTrunc:
874 case Instruction::FPExt:
875 return !ST->hasFPARMv8Base();
878 // FIXME: Unfortunately the approach of checking the Operation Action does
879 // not catch all cases of Legalization that use library calls. Our
880 // Legalization step categorizes some transformations into library calls as
881 // Custom, Expand or even Legal when doing type legalization. So for now
882 // we have to special case for instance the SDIV of 64bit integers and the
883 // use of floating point emulation.
884 if (VT.isInteger() && VT.getSizeInBits() >= 64) {
885 switch (ISD) {
886 default:
887 break;
888 case ISD::SDIV:
889 case ISD::UDIV:
890 case ISD::SREM:
891 case ISD::UREM:
892 case ISD::SDIVREM:
893 case ISD::UDIVREM:
894 return true;
898 // Assume all other non-float operations are supported.
899 if (!VT.isFloatingPoint())
900 return false;
902 // We'll need a library call to handle most floats when using soft.
903 if (TLI->useSoftFloat()) {
904 switch (I.getOpcode()) {
905 default:
906 return true;
907 case Instruction::Alloca:
908 case Instruction::Load:
909 case Instruction::Store:
910 case Instruction::Select:
911 case Instruction::PHI:
912 return false;
916 // We'll need a libcall to perform double precision operations on a single
917 // precision only FPU.
918 if (I.getType()->isDoubleTy() && !ST->hasFP64())
919 return true;
921 // Likewise for half precision arithmetic.
922 if (I.getType()->isHalfTy() && !ST->hasFullFP16())
923 return true;
925 return false;
928 auto IsHardwareLoopIntrinsic = [](Instruction &I) {
929 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
930 switch (Call->getIntrinsicID()) {
931 default:
932 break;
933 case Intrinsic::set_loop_iterations:
934 case Intrinsic::test_set_loop_iterations:
935 case Intrinsic::loop_decrement:
936 case Intrinsic::loop_decrement_reg:
937 return true;
940 return false;
943 // Scan the instructions to see if there's any that we know will turn into a
944 // call or if this loop is already a low-overhead loop.
945 auto ScanLoop = [&](Loop *L) {
946 for (auto *BB : L->getBlocks()) {
947 for (auto &I : *BB) {
948 if (MaybeCall(I) || IsHardwareLoopIntrinsic(I))
949 return false;
952 return true;
955 // Visit inner loops.
956 for (auto Inner : *L)
957 if (!ScanLoop(Inner))
958 return false;
960 if (!ScanLoop(L))
961 return false;
963 // TODO: Check whether the trip count calculation is expensive. If L is the
964 // inner loop but we know it has a low trip count, calculating that trip
965 // count (in the parent loop) may be detrimental.
967 LLVMContext &C = L->getHeader()->getContext();
968 HWLoopInfo.CounterInReg = true;
969 HWLoopInfo.IsNestingLegal = false;
970 HWLoopInfo.PerformEntryTest = true;
971 HWLoopInfo.CountType = Type::getInt32Ty(C);
972 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
973 return true;
976 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
977 TTI::UnrollingPreferences &UP) {
978 // Only currently enable these preferences for M-Class cores.
979 if (!ST->isMClass())
980 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
982 // Disable loop unrolling for Oz and Os.
983 UP.OptSizeThreshold = 0;
984 UP.PartialOptSizeThreshold = 0;
985 if (L->getHeader()->getParent()->hasOptSize())
986 return;
988 // Only enable on Thumb-2 targets.
989 if (!ST->isThumb2())
990 return;
992 SmallVector<BasicBlock*, 4> ExitingBlocks;
993 L->getExitingBlocks(ExitingBlocks);
994 LLVM_DEBUG(dbgs() << "Loop has:\n"
995 << "Blocks: " << L->getNumBlocks() << "\n"
996 << "Exit blocks: " << ExitingBlocks.size() << "\n");
998 // Only allow another exit other than the latch. This acts as an early exit
999 // as it mirrors the profitability calculation of the runtime unroller.
1000 if (ExitingBlocks.size() > 2)
1001 return;
1003 // Limit the CFG of the loop body for targets with a branch predictor.
1004 // Allowing 4 blocks permits if-then-else diamonds in the body.
1005 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
1006 return;
1008 // Scan the loop: don't unroll loops with calls as this could prevent
1009 // inlining.
1010 unsigned Cost = 0;
1011 for (auto *BB : L->getBlocks()) {
1012 for (auto &I : *BB) {
1013 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1014 ImmutableCallSite CS(&I);
1015 if (const Function *F = CS.getCalledFunction()) {
1016 if (!isLoweredToCall(F))
1017 continue;
1019 return;
1021 // Don't unroll vectorised loop. MVE does not benefit from it as much as
1022 // scalar code.
1023 if (I.getType()->isVectorTy())
1024 return;
1026 SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1027 I.value_op_end());
1028 Cost += getUserCost(&I, Operands);
1032 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
1034 UP.Partial = true;
1035 UP.Runtime = true;
1036 UP.UpperBound = true;
1037 UP.UnrollRemainder = true;
1038 UP.DefaultUnrollRuntimeCount = 4;
1039 UP.UnrollAndJam = true;
1040 UP.UnrollAndJamInnerLoopThreshold = 60;
1042 // Force unrolling small loops can be very useful because of the branch
1043 // taken cost of the backedge.
1044 if (Cost < 12)
1045 UP.Force = true;
1048 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1049 TTI::ReductionFlags Flags) const {
1050 assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type");
1051 unsigned ScalarBits = Ty->getScalarSizeInBits();
1052 if (!ST->hasMVEIntegerOps())
1053 return false;
1055 switch (Opcode) {
1056 case Instruction::FAdd:
1057 case Instruction::FMul:
1058 case Instruction::And:
1059 case Instruction::Or:
1060 case Instruction::Xor:
1061 case Instruction::Mul:
1062 case Instruction::ICmp:
1063 case Instruction::FCmp:
1064 return false;
1065 case Instruction::Add:
1066 return ScalarBits * Ty->getVectorNumElements() == 128;
1067 default:
1068 llvm_unreachable("Unhandled reduction opcode");
1070 return false;