[X86][BMI] Pull out schedule classes from bmi_andn<> and bmi_bls<>
[llvm-core.git] / lib / Target / AArch64 / AArch64TargetTransformInfo.cpp
blobdc916a7b3407d66890e1957d97b67192f0bb58a6
1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "AArch64ExpandImm.h"
10 #include "AArch64TargetTransformInfo.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/TargetTransformInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/TargetLowering.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/Support/Debug.h"
19 #include <algorithm>
20 using namespace llvm;
22 #define DEBUG_TYPE "aarch64tti"
24 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
25 cl::init(true), cl::Hidden);
27 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
28 const Function *Callee) const {
29 const TargetMachine &TM = getTLI()->getTargetMachine();
31 const FeatureBitset &CallerBits =
32 TM.getSubtargetImpl(*Caller)->getFeatureBits();
33 const FeatureBitset &CalleeBits =
34 TM.getSubtargetImpl(*Callee)->getFeatureBits();
36 // Inline a callee if its target-features are a subset of the callers
37 // target-features.
38 return (CallerBits & CalleeBits) == CalleeBits;
41 /// Calculate the cost of materializing a 64-bit value. This helper
42 /// method might only calculate a fraction of a larger immediate. Therefore it
43 /// is valid to return a cost of ZERO.
44 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
45 // Check if the immediate can be encoded within an instruction.
46 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
47 return 0;
49 if (Val < 0)
50 Val = ~Val;
52 // Calculate how many moves we will need to materialize this constant.
53 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
54 AArch64_IMM::expandMOVImm(Val, 64, Insn);
55 return Insn.size();
58 /// Calculate the cost of materializing the given constant.
59 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
60 assert(Ty->isIntegerTy());
62 unsigned BitSize = Ty->getPrimitiveSizeInBits();
63 if (BitSize == 0)
64 return ~0U;
66 // Sign-extend all constants to a multiple of 64-bit.
67 APInt ImmVal = Imm;
68 if (BitSize & 0x3f)
69 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
71 // Split the constant into 64-bit chunks and calculate the cost for each
72 // chunk.
73 int Cost = 0;
74 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
75 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
76 int64_t Val = Tmp.getSExtValue();
77 Cost += getIntImmCost(Val);
79 // We need at least one instruction to materialze the constant.
80 return std::max(1, Cost);
83 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
84 const APInt &Imm, Type *Ty) {
85 assert(Ty->isIntegerTy());
87 unsigned BitSize = Ty->getPrimitiveSizeInBits();
88 // There is no cost model for constants with a bit size of 0. Return TCC_Free
89 // here, so that constant hoisting will ignore this constant.
90 if (BitSize == 0)
91 return TTI::TCC_Free;
93 unsigned ImmIdx = ~0U;
94 switch (Opcode) {
95 default:
96 return TTI::TCC_Free;
97 case Instruction::GetElementPtr:
98 // Always hoist the base address of a GetElementPtr.
99 if (Idx == 0)
100 return 2 * TTI::TCC_Basic;
101 return TTI::TCC_Free;
102 case Instruction::Store:
103 ImmIdx = 0;
104 break;
105 case Instruction::Add:
106 case Instruction::Sub:
107 case Instruction::Mul:
108 case Instruction::UDiv:
109 case Instruction::SDiv:
110 case Instruction::URem:
111 case Instruction::SRem:
112 case Instruction::And:
113 case Instruction::Or:
114 case Instruction::Xor:
115 case Instruction::ICmp:
116 ImmIdx = 1;
117 break;
118 // Always return TCC_Free for the shift value of a shift instruction.
119 case Instruction::Shl:
120 case Instruction::LShr:
121 case Instruction::AShr:
122 if (Idx == 1)
123 return TTI::TCC_Free;
124 break;
125 case Instruction::Trunc:
126 case Instruction::ZExt:
127 case Instruction::SExt:
128 case Instruction::IntToPtr:
129 case Instruction::PtrToInt:
130 case Instruction::BitCast:
131 case Instruction::PHI:
132 case Instruction::Call:
133 case Instruction::Select:
134 case Instruction::Ret:
135 case Instruction::Load:
136 break;
139 if (Idx == ImmIdx) {
140 int NumConstants = (BitSize + 63) / 64;
141 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
142 return (Cost <= NumConstants * TTI::TCC_Basic)
143 ? static_cast<int>(TTI::TCC_Free)
144 : Cost;
146 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
149 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
150 const APInt &Imm, Type *Ty) {
151 assert(Ty->isIntegerTy());
153 unsigned BitSize = Ty->getPrimitiveSizeInBits();
154 // There is no cost model for constants with a bit size of 0. Return TCC_Free
155 // here, so that constant hoisting will ignore this constant.
156 if (BitSize == 0)
157 return TTI::TCC_Free;
159 switch (IID) {
160 default:
161 return TTI::TCC_Free;
162 case Intrinsic::sadd_with_overflow:
163 case Intrinsic::uadd_with_overflow:
164 case Intrinsic::ssub_with_overflow:
165 case Intrinsic::usub_with_overflow:
166 case Intrinsic::smul_with_overflow:
167 case Intrinsic::umul_with_overflow:
168 if (Idx == 1) {
169 int NumConstants = (BitSize + 63) / 64;
170 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
171 return (Cost <= NumConstants * TTI::TCC_Basic)
172 ? static_cast<int>(TTI::TCC_Free)
173 : Cost;
175 break;
176 case Intrinsic::experimental_stackmap:
177 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
178 return TTI::TCC_Free;
179 break;
180 case Intrinsic::experimental_patchpoint_void:
181 case Intrinsic::experimental_patchpoint_i64:
182 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
183 return TTI::TCC_Free;
184 break;
186 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
189 TargetTransformInfo::PopcntSupportKind
190 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
191 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
192 if (TyWidth == 32 || TyWidth == 64)
193 return TTI::PSK_FastHardware;
194 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
195 return TTI::PSK_Software;
198 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
199 ArrayRef<const Value *> Args) {
201 // A helper that returns a vector type from the given type. The number of
202 // elements in type Ty determine the vector width.
203 auto toVectorTy = [&](Type *ArgTy) {
204 return VectorType::get(ArgTy->getScalarType(),
205 DstTy->getVectorNumElements());
208 // Exit early if DstTy is not a vector type whose elements are at least
209 // 16-bits wide.
210 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
211 return false;
213 // Determine if the operation has a widening variant. We consider both the
214 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
215 // instructions.
217 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
218 // verify that their extending operands are eliminated during code
219 // generation.
220 switch (Opcode) {
221 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
222 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
223 break;
224 default:
225 return false;
228 // To be a widening instruction (either the "wide" or "long" versions), the
229 // second operand must be a sign- or zero extend having a single user. We
230 // only consider extends having a single user because they may otherwise not
231 // be eliminated.
232 if (Args.size() != 2 ||
233 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
234 !Args[1]->hasOneUse())
235 return false;
236 auto *Extend = cast<CastInst>(Args[1]);
238 // Legalize the destination type and ensure it can be used in a widening
239 // operation.
240 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
241 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
242 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
243 return false;
245 // Legalize the source type and ensure it can be used in a widening
246 // operation.
247 Type *SrcTy = toVectorTy(Extend->getSrcTy());
248 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
249 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
250 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
251 return false;
253 // Get the total number of vector elements in the legalized types.
254 unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorNumElements();
255 unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorNumElements();
257 // Return true if the legalized types have the same number of vector elements
258 // and the destination element type size is twice that of the source type.
259 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
262 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
263 const Instruction *I) {
264 int ISD = TLI->InstructionOpcodeToISD(Opcode);
265 assert(ISD && "Invalid opcode");
267 // If the cast is observable, and it is used by a widening instruction (e.g.,
268 // uaddl, saddw, etc.), it may be free.
269 if (I && I->hasOneUse()) {
270 auto *SingleUser = cast<Instruction>(*I->user_begin());
271 SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
272 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
273 // If the cast is the second operand, it is free. We will generate either
274 // a "wide" or "long" version of the widening instruction.
275 if (I == SingleUser->getOperand(1))
276 return 0;
277 // If the cast is not the second operand, it will be free if it looks the
278 // same as the second operand. In this case, we will generate a "long"
279 // version of the widening instruction.
280 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
281 if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
282 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
283 return 0;
287 EVT SrcTy = TLI->getValueType(DL, Src);
288 EVT DstTy = TLI->getValueType(DL, Dst);
290 if (!SrcTy.isSimple() || !DstTy.isSimple())
291 return BaseT::getCastInstrCost(Opcode, Dst, Src);
293 static const TypeConversionCostTblEntry
294 ConversionTbl[] = {
295 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
296 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
297 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
298 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
300 // The number of shll instructions for the extension.
301 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
302 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
303 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
304 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
305 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
306 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
307 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
308 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
309 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
310 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
311 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
312 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
313 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
314 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
315 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
316 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
318 // LowerVectorINT_TO_FP:
319 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
320 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
321 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
322 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
323 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
324 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
326 // Complex: to v2f32
327 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
328 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
329 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
330 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
331 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
332 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
334 // Complex: to v4f32
335 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
336 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
337 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
338 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
340 // Complex: to v8f32
341 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
342 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
343 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
344 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
346 // Complex: to v16f32
347 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
348 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
350 // Complex: to v2f64
351 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
352 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
353 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
354 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
355 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
356 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
359 // LowerVectorFP_TO_INT
360 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
361 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
362 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
363 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
364 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
365 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
367 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
368 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
369 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
370 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
371 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
372 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
373 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
375 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
376 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
377 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
378 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
379 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
381 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
382 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
383 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
384 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
385 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
386 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
387 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
390 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
391 DstTy.getSimpleVT(),
392 SrcTy.getSimpleVT()))
393 return Entry->Cost;
395 return BaseT::getCastInstrCost(Opcode, Dst, Src);
398 int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
399 VectorType *VecTy,
400 unsigned Index) {
402 // Make sure we were given a valid extend opcode.
403 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
404 "Invalid opcode");
406 // We are extending an element we extract from a vector, so the source type
407 // of the extend is the element type of the vector.
408 auto *Src = VecTy->getElementType();
410 // Sign- and zero-extends are for integer types only.
411 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
413 // Get the cost for the extract. We compute the cost (if any) for the extend
414 // below.
415 auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
417 // Legalize the types.
418 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
419 auto DstVT = TLI->getValueType(DL, Dst);
420 auto SrcVT = TLI->getValueType(DL, Src);
422 // If the resulting type is still a vector and the destination type is legal,
423 // we may get the extension for free. If not, get the default cost for the
424 // extend.
425 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
426 return Cost + getCastInstrCost(Opcode, Dst, Src);
428 // The destination type should be larger than the element type. If not, get
429 // the default cost for the extend.
430 if (DstVT.getSizeInBits() < SrcVT.getSizeInBits())
431 return Cost + getCastInstrCost(Opcode, Dst, Src);
433 switch (Opcode) {
434 default:
435 llvm_unreachable("Opcode should be either SExt or ZExt");
437 // For sign-extends, we only need a smov, which performs the extension
438 // automatically.
439 case Instruction::SExt:
440 return Cost;
442 // For zero-extends, the extend is performed automatically by a umov unless
443 // the destination type is i64 and the element type is i8 or i16.
444 case Instruction::ZExt:
445 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
446 return Cost;
449 // If we are unable to perform the extend for free, get the default cost.
450 return Cost + getCastInstrCost(Opcode, Dst, Src);
453 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
454 unsigned Index) {
455 assert(Val->isVectorTy() && "This must be a vector type");
457 if (Index != -1U) {
458 // Legalize the type.
459 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
461 // This type is legalized to a scalar type.
462 if (!LT.second.isVector())
463 return 0;
465 // The type may be split. Normalize the index to the new type.
466 unsigned Width = LT.second.getVectorNumElements();
467 Index = Index % Width;
469 // The element at index zero is already inside the vector.
470 if (Index == 0)
471 return 0;
474 // All other insert/extracts cost this much.
475 return ST->getVectorInsertExtractBaseCost();
478 int AArch64TTIImpl::getArithmeticInstrCost(
479 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
480 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
481 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
482 // Legalize the type.
483 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
485 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
486 // add in the widening overhead specified by the sub-target. Since the
487 // extends feeding widening instructions are performed automatically, they
488 // aren't present in the generated code and have a zero cost. By adding a
489 // widening overhead here, we attach the total cost of the combined operation
490 // to the widening instruction.
491 int Cost = 0;
492 if (isWideningInstruction(Ty, Opcode, Args))
493 Cost += ST->getWideningBaseCost();
495 int ISD = TLI->InstructionOpcodeToISD(Opcode);
497 switch (ISD) {
498 default:
499 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
500 Opd1PropInfo, Opd2PropInfo);
501 case ISD::SDIV:
502 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
503 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
504 // On AArch64, scalar signed division by constants power-of-two are
505 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
506 // The OperandValue properties many not be same as that of previous
507 // operation; conservatively assume OP_None.
508 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
509 TargetTransformInfo::OP_None,
510 TargetTransformInfo::OP_None);
511 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
512 TargetTransformInfo::OP_None,
513 TargetTransformInfo::OP_None);
514 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
515 TargetTransformInfo::OP_None,
516 TargetTransformInfo::OP_None);
517 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
518 TargetTransformInfo::OP_None,
519 TargetTransformInfo::OP_None);
520 return Cost;
522 LLVM_FALLTHROUGH;
523 case ISD::UDIV:
524 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
525 auto VT = TLI->getValueType(DL, Ty);
526 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
527 // Vector signed division by constant are expanded to the
528 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
529 // to MULHS + SUB + SRL + ADD + SRL.
530 int MulCost = getArithmeticInstrCost(Instruction::Mul, Ty, Opd1Info,
531 Opd2Info,
532 TargetTransformInfo::OP_None,
533 TargetTransformInfo::OP_None);
534 int AddCost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info,
535 Opd2Info,
536 TargetTransformInfo::OP_None,
537 TargetTransformInfo::OP_None);
538 int ShrCost = getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info,
539 Opd2Info,
540 TargetTransformInfo::OP_None,
541 TargetTransformInfo::OP_None);
542 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
546 Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
547 Opd1PropInfo, Opd2PropInfo);
548 if (Ty->isVectorTy()) {
549 // On AArch64, vector divisions are not supported natively and are
550 // expanded into scalar divisions of each pair of elements.
551 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, Opd1Info,
552 Opd2Info, Opd1PropInfo, Opd2PropInfo);
553 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, Opd1Info,
554 Opd2Info, Opd1PropInfo, Opd2PropInfo);
555 // TODO: if one of the arguments is scalar, then it's not necessary to
556 // double the cost of handling the vector elements.
557 Cost += Cost;
559 return Cost;
561 case ISD::ADD:
562 case ISD::MUL:
563 case ISD::XOR:
564 case ISD::OR:
565 case ISD::AND:
566 // These nodes are marked as 'custom' for combining purposes only.
567 // We know that they are legal. See LowerAdd in ISelLowering.
568 return (Cost + 1) * LT.first;
572 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
573 const SCEV *Ptr) {
574 // Address computations in vectorized code with non-consecutive addresses will
575 // likely result in more instructions compared to scalar code where the
576 // computation can more often be merged into the index mode. The resulting
577 // extra micro-ops can significantly decrease throughput.
578 unsigned NumVectorInstToHideOverhead = 10;
579 int MaxMergeDistance = 64;
581 if (Ty->isVectorTy() && SE &&
582 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
583 return NumVectorInstToHideOverhead;
585 // In many cases the address computation is not merged into the instruction
586 // addressing mode.
587 return 1;
590 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
591 Type *CondTy, const Instruction *I) {
593 int ISD = TLI->InstructionOpcodeToISD(Opcode);
594 // We don't lower some vector selects well that are wider than the register
595 // width.
596 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
597 // We would need this many instructions to hide the scalarization happening.
598 const int AmortizationCost = 20;
599 static const TypeConversionCostTblEntry
600 VectorSelectTbl[] = {
601 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
602 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
603 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
604 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
605 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
606 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
609 EVT SelCondTy = TLI->getValueType(DL, CondTy);
610 EVT SelValTy = TLI->getValueType(DL, ValTy);
611 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
612 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
613 SelCondTy.getSimpleVT(),
614 SelValTy.getSimpleVT()))
615 return Entry->Cost;
618 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
621 AArch64TTIImpl::TTI::MemCmpExpansionOptions
622 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
623 TTI::MemCmpExpansionOptions Options;
624 Options.AllowOverlappingLoads = !ST->requiresStrictAlign();
625 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
626 Options.NumLoadsPerBlock = Options.MaxNumLoads;
627 // TODO: Though vector loads usually perform well on AArch64, in some targets
628 // they may wake up the FP unit, which raises the power consumption. Perhaps
629 // they could be used with no holds barred (-O3).
630 Options.LoadSizes = {8, 4, 2, 1};
631 return Options;
634 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
635 unsigned Alignment, unsigned AddressSpace,
636 const Instruction *I) {
637 auto LT = TLI->getTypeLegalizationCost(DL, Ty);
639 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
640 LT.second.is128BitVector() && Alignment < 16) {
641 // Unaligned stores are extremely inefficient. We don't split all
642 // unaligned 128-bit stores because the negative impact that has shown in
643 // practice on inlined block copy code.
644 // We make such stores expensive so that we will only vectorize if there
645 // are 6 other instructions getting vectorized.
646 const int AmortizationCost = 6;
648 return LT.first * 2 * AmortizationCost;
651 if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8)) {
652 unsigned ProfitableNumElements;
653 if (Opcode == Instruction::Store)
654 // We use a custom trunc store lowering so v.4b should be profitable.
655 ProfitableNumElements = 4;
656 else
657 // We scalarize the loads because there is not v.4b register and we
658 // have to promote the elements to v.2.
659 ProfitableNumElements = 8;
661 if (Ty->getVectorNumElements() < ProfitableNumElements) {
662 unsigned NumVecElts = Ty->getVectorNumElements();
663 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
664 // We generate 2 instructions per vector element.
665 return NumVectorizableInstsToAmortize * NumVecElts * 2;
669 return LT.first;
672 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
673 unsigned Factor,
674 ArrayRef<unsigned> Indices,
675 unsigned Alignment,
676 unsigned AddressSpace,
677 bool UseMaskForCond,
678 bool UseMaskForGaps) {
679 assert(Factor >= 2 && "Invalid interleave factor");
680 assert(isa<VectorType>(VecTy) && "Expect a vector type");
682 if (!UseMaskForCond && !UseMaskForGaps &&
683 Factor <= TLI->getMaxSupportedInterleaveFactor()) {
684 unsigned NumElts = VecTy->getVectorNumElements();
685 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
687 // ldN/stN only support legal vector types of size 64 or 128 in bits.
688 // Accesses having vector types that are a multiple of 128 bits can be
689 // matched to more than one ldN/stN instruction.
690 if (NumElts % Factor == 0 &&
691 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
692 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
695 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
696 Alignment, AddressSpace,
697 UseMaskForCond, UseMaskForGaps);
700 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
701 int Cost = 0;
702 for (auto *I : Tys) {
703 if (!I->isVectorTy())
704 continue;
705 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
706 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
707 getMemoryOpCost(Instruction::Load, I, 128, 0);
709 return Cost;
712 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
713 return ST->getMaxInterleaveFactor();
716 // For Falkor, we want to avoid having too many strided loads in a loop since
717 // that can exhaust the HW prefetcher resources. We adjust the unroller
718 // MaxCount preference below to attempt to ensure unrolling doesn't create too
719 // many strided loads.
720 static void
721 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
722 TargetTransformInfo::UnrollingPreferences &UP) {
723 enum { MaxStridedLoads = 7 };
724 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
725 int StridedLoads = 0;
726 // FIXME? We could make this more precise by looking at the CFG and
727 // e.g. not counting loads in each side of an if-then-else diamond.
728 for (const auto BB : L->blocks()) {
729 for (auto &I : *BB) {
730 LoadInst *LMemI = dyn_cast<LoadInst>(&I);
731 if (!LMemI)
732 continue;
734 Value *PtrValue = LMemI->getPointerOperand();
735 if (L->isLoopInvariant(PtrValue))
736 continue;
738 const SCEV *LSCEV = SE.getSCEV(PtrValue);
739 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
740 if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
741 continue;
743 // FIXME? We could take pairing of unrolled load copies into account
744 // by looking at the AddRec, but we would probably have to limit this
745 // to loops with no stores or other memory optimization barriers.
746 ++StridedLoads;
747 // We've seen enough strided loads that seeing more won't make a
748 // difference.
749 if (StridedLoads > MaxStridedLoads / 2)
750 return StridedLoads;
753 return StridedLoads;
756 int StridedLoads = countStridedLoads(L, SE);
757 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
758 << " strided loads\n");
759 // Pick the largest power of 2 unroll count that won't result in too many
760 // strided loads.
761 if (StridedLoads) {
762 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
763 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
764 << UP.MaxCount << '\n');
768 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
769 TTI::UnrollingPreferences &UP) {
770 // Enable partial unrolling and runtime unrolling.
771 BaseT::getUnrollingPreferences(L, SE, UP);
773 // For inner loop, it is more likely to be a hot one, and the runtime check
774 // can be promoted out from LICM pass, so the overhead is less, let's try
775 // a larger threshold to unroll more loops.
776 if (L->getLoopDepth() > 1)
777 UP.PartialThreshold *= 2;
779 // Disable partial & runtime unrolling on -Os.
780 UP.PartialOptSizeThreshold = 0;
782 if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
783 EnableFalkorHWPFUnrollFix)
784 getFalkorUnrollingPreferences(L, SE, UP);
787 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
788 Type *ExpectedType) {
789 switch (Inst->getIntrinsicID()) {
790 default:
791 return nullptr;
792 case Intrinsic::aarch64_neon_st2:
793 case Intrinsic::aarch64_neon_st3:
794 case Intrinsic::aarch64_neon_st4: {
795 // Create a struct type
796 StructType *ST = dyn_cast<StructType>(ExpectedType);
797 if (!ST)
798 return nullptr;
799 unsigned NumElts = Inst->getNumArgOperands() - 1;
800 if (ST->getNumElements() != NumElts)
801 return nullptr;
802 for (unsigned i = 0, e = NumElts; i != e; ++i) {
803 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
804 return nullptr;
806 Value *Res = UndefValue::get(ExpectedType);
807 IRBuilder<> Builder(Inst);
808 for (unsigned i = 0, e = NumElts; i != e; ++i) {
809 Value *L = Inst->getArgOperand(i);
810 Res = Builder.CreateInsertValue(Res, L, i);
812 return Res;
814 case Intrinsic::aarch64_neon_ld2:
815 case Intrinsic::aarch64_neon_ld3:
816 case Intrinsic::aarch64_neon_ld4:
817 if (Inst->getType() == ExpectedType)
818 return Inst;
819 return nullptr;
823 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
824 MemIntrinsicInfo &Info) {
825 switch (Inst->getIntrinsicID()) {
826 default:
827 break;
828 case Intrinsic::aarch64_neon_ld2:
829 case Intrinsic::aarch64_neon_ld3:
830 case Intrinsic::aarch64_neon_ld4:
831 Info.ReadMem = true;
832 Info.WriteMem = false;
833 Info.PtrVal = Inst->getArgOperand(0);
834 break;
835 case Intrinsic::aarch64_neon_st2:
836 case Intrinsic::aarch64_neon_st3:
837 case Intrinsic::aarch64_neon_st4:
838 Info.ReadMem = false;
839 Info.WriteMem = true;
840 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
841 break;
844 switch (Inst->getIntrinsicID()) {
845 default:
846 return false;
847 case Intrinsic::aarch64_neon_ld2:
848 case Intrinsic::aarch64_neon_st2:
849 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
850 break;
851 case Intrinsic::aarch64_neon_ld3:
852 case Intrinsic::aarch64_neon_st3:
853 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
854 break;
855 case Intrinsic::aarch64_neon_ld4:
856 case Intrinsic::aarch64_neon_st4:
857 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
858 break;
860 return true;
863 /// See if \p I should be considered for address type promotion. We check if \p
864 /// I is a sext with right type and used in memory accesses. If it used in a
865 /// "complex" getelementptr, we allow it to be promoted without finding other
866 /// sext instructions that sign extended the same initial value. A getelementptr
867 /// is considered as "complex" if it has more than 2 operands.
868 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
869 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
870 bool Considerable = false;
871 AllowPromotionWithoutCommonHeader = false;
872 if (!isa<SExtInst>(&I))
873 return false;
874 Type *ConsideredSExtType =
875 Type::getInt64Ty(I.getParent()->getParent()->getContext());
876 if (I.getType() != ConsideredSExtType)
877 return false;
878 // See if the sext is the one with the right type and used in at least one
879 // GetElementPtrInst.
880 for (const User *U : I.users()) {
881 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
882 Considerable = true;
883 // A getelementptr is considered as "complex" if it has more than 2
884 // operands. We will promote a SExt used in such complex GEP as we
885 // expect some computation to be merged if they are done on 64 bits.
886 if (GEPInst->getNumOperands() > 2) {
887 AllowPromotionWithoutCommonHeader = true;
888 break;
892 return Considerable;
895 bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
896 TTI::ReductionFlags Flags) const {
897 assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type");
898 unsigned ScalarBits = Ty->getScalarSizeInBits();
899 switch (Opcode) {
900 case Instruction::FAdd:
901 case Instruction::FMul:
902 case Instruction::And:
903 case Instruction::Or:
904 case Instruction::Xor:
905 case Instruction::Mul:
906 return false;
907 case Instruction::Add:
908 return ScalarBits * Ty->getVectorNumElements() >= 128;
909 case Instruction::ICmp:
910 return (ScalarBits < 64) &&
911 (ScalarBits * Ty->getVectorNumElements() >= 128);
912 case Instruction::FCmp:
913 return Flags.NoNaN;
914 default:
915 llvm_unreachable("Unhandled reduction opcode");
917 return false;
920 int AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy,
921 bool IsPairwiseForm) {
923 if (IsPairwiseForm)
924 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm);
926 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
927 MVT MTy = LT.second;
928 int ISD = TLI->InstructionOpcodeToISD(Opcode);
929 assert(ISD && "Invalid opcode");
931 // Horizontal adds can use the 'addv' instruction. We model the cost of these
932 // instructions as normal vector adds. This is the only arithmetic vector
933 // reduction operation for which we have an instruction.
934 static const CostTblEntry CostTblNoPairwise[]{
935 {ISD::ADD, MVT::v8i8, 1},
936 {ISD::ADD, MVT::v16i8, 1},
937 {ISD::ADD, MVT::v4i16, 1},
938 {ISD::ADD, MVT::v8i16, 1},
939 {ISD::ADD, MVT::v4i32, 1},
942 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
943 return LT.first * Entry->Cost;
945 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm);
948 int AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
949 Type *SubTp) {
950 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
951 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc) {
952 static const CostTblEntry ShuffleTbl[] = {
953 // Broadcast shuffle kinds can be performed with 'dup'.
954 { TTI::SK_Broadcast, MVT::v8i8, 1 },
955 { TTI::SK_Broadcast, MVT::v16i8, 1 },
956 { TTI::SK_Broadcast, MVT::v4i16, 1 },
957 { TTI::SK_Broadcast, MVT::v8i16, 1 },
958 { TTI::SK_Broadcast, MVT::v2i32, 1 },
959 { TTI::SK_Broadcast, MVT::v4i32, 1 },
960 { TTI::SK_Broadcast, MVT::v2i64, 1 },
961 { TTI::SK_Broadcast, MVT::v2f32, 1 },
962 { TTI::SK_Broadcast, MVT::v4f32, 1 },
963 { TTI::SK_Broadcast, MVT::v2f64, 1 },
964 // Transpose shuffle kinds can be performed with 'trn1/trn2' and
965 // 'zip1/zip2' instructions.
966 { TTI::SK_Transpose, MVT::v8i8, 1 },
967 { TTI::SK_Transpose, MVT::v16i8, 1 },
968 { TTI::SK_Transpose, MVT::v4i16, 1 },
969 { TTI::SK_Transpose, MVT::v8i16, 1 },
970 { TTI::SK_Transpose, MVT::v2i32, 1 },
971 { TTI::SK_Transpose, MVT::v4i32, 1 },
972 { TTI::SK_Transpose, MVT::v2i64, 1 },
973 { TTI::SK_Transpose, MVT::v2f32, 1 },
974 { TTI::SK_Transpose, MVT::v4f32, 1 },
975 { TTI::SK_Transpose, MVT::v2f64, 1 },
976 // Select shuffle kinds.
977 // TODO: handle vXi8/vXi16.
978 { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
979 { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
980 { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
981 { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
982 { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
983 { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
984 // PermuteSingleSrc shuffle kinds.
985 // TODO: handle vXi8/vXi16.
986 { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
987 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
988 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
989 { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
990 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
991 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
993 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
994 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
995 return LT.first * Entry->Cost;
998 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);