Recommit [NFC] Better encapsulation of llvm::Optional Storage
[llvm-complete.git] / include / llvm / CodeGen / BasicTTIImpl.h
blob1e9aeab9711623990b6c8bdd6f1c16e35ea70648
1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
12 /// interfaces.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/LoopInfo.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/TargetTransformInfoImpl.h"
27 #include "llvm/CodeGen/ISDOpcodes.h"
28 #include "llvm/CodeGen/TargetLowering.h"
29 #include "llvm/CodeGen/TargetSubtargetInfo.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/Operator.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/Value.h"
44 #include "llvm/MC/MCSchedule.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/MachineValueType.h"
49 #include "llvm/Support/MathExtras.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstdint>
53 #include <limits>
54 #include <utility>
56 namespace llvm {
58 class Function;
59 class GlobalValue;
60 class LLVMContext;
61 class ScalarEvolution;
62 class SCEV;
63 class TargetMachine;
65 extern cl::opt<unsigned> PartialUnrollingThreshold;
67 /// Base class which can be used to help build a TTI implementation.
68 ///
69 /// This class provides as much implementation of the TTI interface as is
70 /// possible using the target independent parts of the code generator.
71 ///
72 /// In order to subclass it, your class must implement a getST() method to
73 /// return the subtarget, and a getTLI() method to return the target lowering.
74 /// We need these methods implemented in the derived class so that this class
75 /// doesn't have to duplicate storage for them.
76 template <typename T>
77 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
78 private:
79 using BaseT = TargetTransformInfoImplCRTPBase<T>;
80 using TTI = TargetTransformInfo;
82 /// Estimate a cost of Broadcast as an extract and sequence of insert
83 /// operations.
84 unsigned getBroadcastShuffleOverhead(Type *Ty) {
85 assert(Ty->isVectorTy() && "Can only shuffle vectors");
86 unsigned Cost = 0;
87 // Broadcast cost is equal to the cost of extracting the zero'th element
88 // plus the cost of inserting it into every element of the result vector.
89 Cost += static_cast<T *>(this)->getVectorInstrCost(
90 Instruction::ExtractElement, Ty, 0);
92 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
93 Cost += static_cast<T *>(this)->getVectorInstrCost(
94 Instruction::InsertElement, Ty, i);
96 return Cost;
99 /// Estimate a cost of shuffle as a sequence of extract and insert
100 /// operations.
101 unsigned getPermuteShuffleOverhead(Type *Ty) {
102 assert(Ty->isVectorTy() && "Can only shuffle vectors");
103 unsigned Cost = 0;
104 // Shuffle cost is equal to the cost of extracting element from its argument
105 // plus the cost of inserting them onto the result vector.
107 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
108 // index 0 of first vector, index 1 of second vector,index 2 of first
109 // vector and finally index 3 of second vector and insert them at index
110 // <0,1,2,3> of result vector.
111 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
112 Cost += static_cast<T *>(this)
113 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
114 Cost += static_cast<T *>(this)
115 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
117 return Cost;
120 /// Estimate a cost of subvector extraction as a sequence of extract and
121 /// insert operations.
122 unsigned getExtractSubvectorOverhead(Type *Ty, int Index, Type *SubTy) {
123 assert(Ty && Ty->isVectorTy() && SubTy && SubTy->isVectorTy() &&
124 "Can only extract subvectors from vectors");
125 int NumSubElts = SubTy->getVectorNumElements();
126 assert((Index + NumSubElts) <= (int)Ty->getVectorNumElements() &&
127 "SK_ExtractSubvector index out of range");
129 unsigned Cost = 0;
130 // Subvector extraction cost is equal to the cost of extracting element from
131 // the source type plus the cost of inserting them into the result vector
132 // type.
133 for (int i = 0; i != NumSubElts; ++i) {
134 Cost += static_cast<T *>(this)->getVectorInstrCost(
135 Instruction::ExtractElement, Ty, i + Index);
136 Cost += static_cast<T *>(this)->getVectorInstrCost(
137 Instruction::InsertElement, SubTy, i);
139 return Cost;
142 /// Estimate a cost of subvector insertion as a sequence of extract and
143 /// insert operations.
144 unsigned getInsertSubvectorOverhead(Type *Ty, int Index, Type *SubTy) {
145 assert(Ty && Ty->isVectorTy() && SubTy && SubTy->isVectorTy() &&
146 "Can only insert subvectors into vectors");
147 int NumSubElts = SubTy->getVectorNumElements();
148 assert((Index + NumSubElts) <= (int)Ty->getVectorNumElements() &&
149 "SK_InsertSubvector index out of range");
151 unsigned Cost = 0;
152 // Subvector insertion cost is equal to the cost of extracting element from
153 // the source type plus the cost of inserting them into the result vector
154 // type.
155 for (int i = 0; i != NumSubElts; ++i) {
156 Cost += static_cast<T *>(this)->getVectorInstrCost(
157 Instruction::ExtractElement, SubTy, i);
158 Cost += static_cast<T *>(this)->getVectorInstrCost(
159 Instruction::InsertElement, Ty, i + Index);
161 return Cost;
164 /// Local query method delegates up to T which *must* implement this!
165 const TargetSubtargetInfo *getST() const {
166 return static_cast<const T *>(this)->getST();
169 /// Local query method delegates up to T which *must* implement this!
170 const TargetLoweringBase *getTLI() const {
171 return static_cast<const T *>(this)->getTLI();
174 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
175 switch (M) {
176 case TTI::MIM_Unindexed:
177 return ISD::UNINDEXED;
178 case TTI::MIM_PreInc:
179 return ISD::PRE_INC;
180 case TTI::MIM_PreDec:
181 return ISD::PRE_DEC;
182 case TTI::MIM_PostInc:
183 return ISD::POST_INC;
184 case TTI::MIM_PostDec:
185 return ISD::POST_DEC;
187 llvm_unreachable("Unexpected MemIndexedMode");
190 protected:
191 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
192 : BaseT(DL) {}
194 using TargetTransformInfoImplBase::DL;
196 public:
197 /// \name Scalar TTI Implementations
198 /// @{
199 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
200 unsigned BitWidth, unsigned AddressSpace,
201 unsigned Alignment, bool *Fast) const {
202 EVT E = EVT::getIntegerVT(Context, BitWidth);
203 return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
206 bool hasBranchDivergence() { return false; }
208 bool isSourceOfDivergence(const Value *V) { return false; }
210 bool isAlwaysUniform(const Value *V) { return false; }
212 unsigned getFlatAddressSpace() {
213 // Return an invalid address space.
214 return -1;
217 bool isLegalAddImmediate(int64_t imm) {
218 return getTLI()->isLegalAddImmediate(imm);
221 bool isLegalICmpImmediate(int64_t imm) {
222 return getTLI()->isLegalICmpImmediate(imm);
225 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
226 bool HasBaseReg, int64_t Scale,
227 unsigned AddrSpace, Instruction *I = nullptr) {
228 TargetLoweringBase::AddrMode AM;
229 AM.BaseGV = BaseGV;
230 AM.BaseOffs = BaseOffset;
231 AM.HasBaseReg = HasBaseReg;
232 AM.Scale = Scale;
233 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
236 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
237 const DataLayout &DL) const {
238 EVT VT = getTLI()->getValueType(DL, Ty);
239 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
242 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
243 const DataLayout &DL) const {
244 EVT VT = getTLI()->getValueType(DL, Ty);
245 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
248 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
249 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
252 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
253 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
254 TargetLoweringBase::AddrMode AM;
255 AM.BaseGV = BaseGV;
256 AM.BaseOffs = BaseOffset;
257 AM.HasBaseReg = HasBaseReg;
258 AM.Scale = Scale;
259 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
262 bool isTruncateFree(Type *Ty1, Type *Ty2) {
263 return getTLI()->isTruncateFree(Ty1, Ty2);
266 bool isProfitableToHoist(Instruction *I) {
267 return getTLI()->isProfitableToHoist(I);
270 bool useAA() const { return getST()->useAA(); }
272 bool isTypeLegal(Type *Ty) {
273 EVT VT = getTLI()->getValueType(DL, Ty);
274 return getTLI()->isTypeLegal(VT);
277 int getGEPCost(Type *PointeeType, const Value *Ptr,
278 ArrayRef<const Value *> Operands) {
279 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
282 int getExtCost(const Instruction *I, const Value *Src) {
283 if (getTLI()->isExtFree(I))
284 return TargetTransformInfo::TCC_Free;
286 if (isa<ZExtInst>(I) || isa<SExtInst>(I))
287 if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
288 if (getTLI()->isExtLoad(LI, I, DL))
289 return TargetTransformInfo::TCC_Free;
291 return TargetTransformInfo::TCC_Basic;
294 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
295 ArrayRef<const Value *> Arguments) {
296 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
299 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
300 ArrayRef<Type *> ParamTys) {
301 if (IID == Intrinsic::cttz) {
302 if (getTLI()->isCheapToSpeculateCttz())
303 return TargetTransformInfo::TCC_Basic;
304 return TargetTransformInfo::TCC_Expensive;
307 if (IID == Intrinsic::ctlz) {
308 if (getTLI()->isCheapToSpeculateCtlz())
309 return TargetTransformInfo::TCC_Basic;
310 return TargetTransformInfo::TCC_Expensive;
313 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
316 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
317 unsigned &JumpTableSize) {
318 /// Try to find the estimated number of clusters. Note that the number of
319 /// clusters identified in this function could be different from the actural
320 /// numbers found in lowering. This function ignore switches that are
321 /// lowered with a mix of jump table / bit test / BTree. This function was
322 /// initially intended to be used when estimating the cost of switch in
323 /// inline cost heuristic, but it's a generic cost model to be used in other
324 /// places (e.g., in loop unrolling).
325 unsigned N = SI.getNumCases();
326 const TargetLoweringBase *TLI = getTLI();
327 const DataLayout &DL = this->getDataLayout();
329 JumpTableSize = 0;
330 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
332 // Early exit if both a jump table and bit test are not allowed.
333 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
334 return N;
336 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
337 APInt MinCaseVal = MaxCaseVal;
338 for (auto CI : SI.cases()) {
339 const APInt &CaseVal = CI.getCaseValue()->getValue();
340 if (CaseVal.sgt(MaxCaseVal))
341 MaxCaseVal = CaseVal;
342 if (CaseVal.slt(MinCaseVal))
343 MinCaseVal = CaseVal;
346 // Check if suitable for a bit test
347 if (N <= DL.getIndexSizeInBits(0u)) {
348 SmallPtrSet<const BasicBlock *, 4> Dests;
349 for (auto I : SI.cases())
350 Dests.insert(I.getCaseSuccessor());
352 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
353 DL))
354 return 1;
357 // Check if suitable for a jump table.
358 if (IsJTAllowed) {
359 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
360 return N;
361 uint64_t Range =
362 (MaxCaseVal - MinCaseVal)
363 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
364 // Check whether a range of clusters is dense enough for a jump table
365 if (TLI->isSuitableForJumpTable(&SI, N, Range)) {
366 JumpTableSize = Range;
367 return 1;
370 return N;
373 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
375 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
377 bool shouldBuildLookupTables() {
378 const TargetLoweringBase *TLI = getTLI();
379 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
380 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
383 bool haveFastSqrt(Type *Ty) {
384 const TargetLoweringBase *TLI = getTLI();
385 EVT VT = TLI->getValueType(DL, Ty);
386 return TLI->isTypeLegal(VT) &&
387 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
390 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
391 return true;
394 unsigned getFPOpCost(Type *Ty) {
395 // Check whether FADD is available, as a proxy for floating-point in
396 // general.
397 const TargetLoweringBase *TLI = getTLI();
398 EVT VT = TLI->getValueType(DL, Ty);
399 if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
400 return TargetTransformInfo::TCC_Basic;
401 return TargetTransformInfo::TCC_Expensive;
404 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
405 const TargetLoweringBase *TLI = getTLI();
406 switch (Opcode) {
407 default: break;
408 case Instruction::Trunc:
409 if (TLI->isTruncateFree(OpTy, Ty))
410 return TargetTransformInfo::TCC_Free;
411 return TargetTransformInfo::TCC_Basic;
412 case Instruction::ZExt:
413 if (TLI->isZExtFree(OpTy, Ty))
414 return TargetTransformInfo::TCC_Free;
415 return TargetTransformInfo::TCC_Basic;
418 return BaseT::getOperationCost(Opcode, Ty, OpTy);
421 unsigned getInliningThresholdMultiplier() { return 1; }
423 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
424 TTI::UnrollingPreferences &UP) {
425 // This unrolling functionality is target independent, but to provide some
426 // motivation for its intended use, for x86:
428 // According to the Intel 64 and IA-32 Architectures Optimization Reference
429 // Manual, Intel Core models and later have a loop stream detector (and
430 // associated uop queue) that can benefit from partial unrolling.
431 // The relevant requirements are:
432 // - The loop must have no more than 4 (8 for Nehalem and later) branches
433 // taken, and none of them may be calls.
434 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
436 // According to the Software Optimization Guide for AMD Family 15h
437 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
438 // and loop buffer which can benefit from partial unrolling.
439 // The relevant requirements are:
440 // - The loop must have fewer than 16 branches
441 // - The loop must have less than 40 uops in all executed loop branches
443 // The number of taken branches in a loop is hard to estimate here, and
444 // benchmarking has revealed that it is better not to be conservative when
445 // estimating the branch count. As a result, we'll ignore the branch limits
446 // until someone finds a case where it matters in practice.
448 unsigned MaxOps;
449 const TargetSubtargetInfo *ST = getST();
450 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
451 MaxOps = PartialUnrollingThreshold;
452 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
453 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
454 else
455 return;
457 // Scan the loop: don't unroll loops with calls.
458 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
459 ++I) {
460 BasicBlock *BB = *I;
462 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
463 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
464 ImmutableCallSite CS(&*J);
465 if (const Function *F = CS.getCalledFunction()) {
466 if (!static_cast<T *>(this)->isLoweredToCall(F))
467 continue;
470 return;
474 // Enable runtime and partial unrolling up to the specified size.
475 // Enable using trip count upper bound to unroll loops.
476 UP.Partial = UP.Runtime = UP.UpperBound = true;
477 UP.PartialThreshold = MaxOps;
479 // Avoid unrolling when optimizing for size.
480 UP.OptSizeThreshold = 0;
481 UP.PartialOptSizeThreshold = 0;
483 // Set number of instructions optimized when "back edge"
484 // becomes "fall through" to default value of 2.
485 UP.BEInsns = 2;
488 int getInstructionLatency(const Instruction *I) {
489 if (isa<LoadInst>(I))
490 return getST()->getSchedModel().DefaultLoadLatency;
492 return BaseT::getInstructionLatency(I);
495 /// @}
497 /// \name Vector TTI Implementations
498 /// @{
500 unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
502 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
504 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
505 /// are set if the result needs to be inserted and/or extracted from vectors.
506 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
507 assert(Ty->isVectorTy() && "Can only scalarize vectors");
508 unsigned Cost = 0;
510 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
511 if (Insert)
512 Cost += static_cast<T *>(this)
513 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
514 if (Extract)
515 Cost += static_cast<T *>(this)
516 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
519 return Cost;
522 /// Estimate the overhead of scalarizing an instructions unique
523 /// non-constant operands. The types of the arguments are ordinarily
524 /// scalar, in which case the costs are multiplied with VF.
525 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
526 unsigned VF) {
527 unsigned Cost = 0;
528 SmallPtrSet<const Value*, 4> UniqueOperands;
529 for (const Value *A : Args) {
530 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
531 Type *VecTy = nullptr;
532 if (A->getType()->isVectorTy()) {
533 VecTy = A->getType();
534 // If A is a vector operand, VF should be 1 or correspond to A.
535 assert((VF == 1 || VF == VecTy->getVectorNumElements()) &&
536 "Vector argument does not match VF");
538 else
539 VecTy = VectorType::get(A->getType(), VF);
541 Cost += getScalarizationOverhead(VecTy, false, true);
545 return Cost;
548 unsigned getScalarizationOverhead(Type *VecTy, ArrayRef<const Value *> Args) {
549 assert(VecTy->isVectorTy());
551 unsigned Cost = 0;
553 Cost += getScalarizationOverhead(VecTy, true, false);
554 if (!Args.empty())
555 Cost += getOperandsScalarizationOverhead(Args,
556 VecTy->getVectorNumElements());
557 else
558 // When no information on arguments is provided, we add the cost
559 // associated with one argument as a heuristic.
560 Cost += getScalarizationOverhead(VecTy, false, true);
562 return Cost;
565 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
567 unsigned getArithmeticInstrCost(
568 unsigned Opcode, Type *Ty,
569 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
570 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
571 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
572 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
573 ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
574 // Check if any of the operands are vector operands.
575 const TargetLoweringBase *TLI = getTLI();
576 int ISD = TLI->InstructionOpcodeToISD(Opcode);
577 assert(ISD && "Invalid opcode");
579 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
581 bool IsFloat = Ty->isFPOrFPVectorTy();
582 // Assume that floating point arithmetic operations cost twice as much as
583 // integer operations.
584 unsigned OpCost = (IsFloat ? 2 : 1);
586 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
587 // The operation is legal. Assume it costs 1.
588 // TODO: Once we have extract/insert subvector cost we need to use them.
589 return LT.first * OpCost;
592 if (!TLI->isOperationExpand(ISD, LT.second)) {
593 // If the operation is custom lowered, then assume that the code is twice
594 // as expensive.
595 return LT.first * 2 * OpCost;
598 // Else, assume that we need to scalarize this op.
599 // TODO: If one of the types get legalized by splitting, handle this
600 // similarly to what getCastInstrCost() does.
601 if (Ty->isVectorTy()) {
602 unsigned Num = Ty->getVectorNumElements();
603 unsigned Cost = static_cast<T *>(this)
604 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
605 // Return the cost of multiple scalar invocation plus the cost of
606 // inserting and extracting the values.
607 return getScalarizationOverhead(Ty, Args) + Num * Cost;
610 // We don't know anything about this scalar instruction.
611 return OpCost;
614 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
615 Type *SubTp) {
616 switch (Kind) {
617 case TTI::SK_Broadcast:
618 return getBroadcastShuffleOverhead(Tp);
619 case TTI::SK_Select:
620 case TTI::SK_Reverse:
621 case TTI::SK_Transpose:
622 case TTI::SK_PermuteSingleSrc:
623 case TTI::SK_PermuteTwoSrc:
624 return getPermuteShuffleOverhead(Tp);
625 case TTI::SK_ExtractSubvector:
626 return getExtractSubvectorOverhead(Tp, Index, SubTp);
627 case TTI::SK_InsertSubvector:
628 return getInsertSubvectorOverhead(Tp, Index, SubTp);
630 llvm_unreachable("Unknown TTI::ShuffleKind");
633 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
634 const Instruction *I = nullptr) {
635 const TargetLoweringBase *TLI = getTLI();
636 int ISD = TLI->InstructionOpcodeToISD(Opcode);
637 assert(ISD && "Invalid opcode");
638 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
639 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
641 // Check for NOOP conversions.
642 if (SrcLT.first == DstLT.first &&
643 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
645 // Bitcast between types that are legalized to the same type are free.
646 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
647 return 0;
650 if (Opcode == Instruction::Trunc &&
651 TLI->isTruncateFree(SrcLT.second, DstLT.second))
652 return 0;
654 if (Opcode == Instruction::ZExt &&
655 TLI->isZExtFree(SrcLT.second, DstLT.second))
656 return 0;
658 if (Opcode == Instruction::AddrSpaceCast &&
659 TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
660 Dst->getPointerAddressSpace()))
661 return 0;
663 // If this is a zext/sext of a load, return 0 if the corresponding
664 // extending load exists on target.
665 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
666 I && isa<LoadInst>(I->getOperand(0))) {
667 EVT ExtVT = EVT::getEVT(Dst);
668 EVT LoadVT = EVT::getEVT(Src);
669 unsigned LType =
670 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
671 if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
672 return 0;
675 // If the cast is marked as legal (or promote) then assume low cost.
676 if (SrcLT.first == DstLT.first &&
677 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
678 return 1;
680 // Handle scalar conversions.
681 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
682 // Scalar bitcasts are usually free.
683 if (Opcode == Instruction::BitCast)
684 return 0;
686 // Just check the op cost. If the operation is legal then assume it costs
687 // 1.
688 if (!TLI->isOperationExpand(ISD, DstLT.second))
689 return 1;
691 // Assume that illegal scalar instruction are expensive.
692 return 4;
695 // Check vector-to-vector casts.
696 if (Dst->isVectorTy() && Src->isVectorTy()) {
697 // If the cast is between same-sized registers, then the check is simple.
698 if (SrcLT.first == DstLT.first &&
699 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
701 // Assume that Zext is done using AND.
702 if (Opcode == Instruction::ZExt)
703 return 1;
705 // Assume that sext is done using SHL and SRA.
706 if (Opcode == Instruction::SExt)
707 return 2;
709 // Just check the op cost. If the operation is legal then assume it
710 // costs
711 // 1 and multiply by the type-legalization overhead.
712 if (!TLI->isOperationExpand(ISD, DstLT.second))
713 return SrcLT.first * 1;
716 // If we are legalizing by splitting, query the concrete TTI for the cost
717 // of casting the original vector twice. We also need to factor in the
718 // cost of the split itself. Count that as 1, to be consistent with
719 // TLI->getTypeLegalizationCost().
720 if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
721 TargetLowering::TypeSplitVector) ||
722 (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
723 TargetLowering::TypeSplitVector)) {
724 Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
725 Dst->getVectorNumElements() / 2);
726 Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
727 Src->getVectorNumElements() / 2);
728 T *TTI = static_cast<T *>(this);
729 return TTI->getVectorSplitCost() +
730 (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc, I));
733 // In other cases where the source or destination are illegal, assume
734 // the operation will get scalarized.
735 unsigned Num = Dst->getVectorNumElements();
736 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
737 Opcode, Dst->getScalarType(), Src->getScalarType(), I);
739 // Return the cost of multiple scalar invocation plus the cost of
740 // inserting and extracting the values.
741 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
744 // We already handled vector-to-vector and scalar-to-scalar conversions.
745 // This
746 // is where we handle bitcast between vectors and scalars. We need to assume
747 // that the conversion is scalarized in one way or another.
748 if (Opcode == Instruction::BitCast)
749 // Illegal bitcasts are done by storing and loading from a stack slot.
750 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
751 : 0) +
752 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
753 : 0);
755 llvm_unreachable("Unhandled cast");
758 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
759 VectorType *VecTy, unsigned Index) {
760 return static_cast<T *>(this)->getVectorInstrCost(
761 Instruction::ExtractElement, VecTy, Index) +
762 static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
763 VecTy->getElementType());
766 unsigned getCFInstrCost(unsigned Opcode) {
767 // Branches are assumed to be predicted.
768 return 0;
771 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
772 const Instruction *I) {
773 const TargetLoweringBase *TLI = getTLI();
774 int ISD = TLI->InstructionOpcodeToISD(Opcode);
775 assert(ISD && "Invalid opcode");
777 // Selects on vectors are actually vector selects.
778 if (ISD == ISD::SELECT) {
779 assert(CondTy && "CondTy must exist");
780 if (CondTy->isVectorTy())
781 ISD = ISD::VSELECT;
783 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
785 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
786 !TLI->isOperationExpand(ISD, LT.second)) {
787 // The operation is legal. Assume it costs 1. Multiply
788 // by the type-legalization overhead.
789 return LT.first * 1;
792 // Otherwise, assume that the cast is scalarized.
793 // TODO: If one of the types get legalized by splitting, handle this
794 // similarly to what getCastInstrCost() does.
795 if (ValTy->isVectorTy()) {
796 unsigned Num = ValTy->getVectorNumElements();
797 if (CondTy)
798 CondTy = CondTy->getScalarType();
799 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
800 Opcode, ValTy->getScalarType(), CondTy, I);
802 // Return the cost of multiple scalar invocation plus the cost of
803 // inserting and extracting the values.
804 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
807 // Unknown scalar opcode.
808 return 1;
811 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
812 std::pair<unsigned, MVT> LT =
813 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
815 return LT.first;
818 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
819 unsigned AddressSpace, const Instruction *I = nullptr) {
820 assert(!Src->isVoidTy() && "Invalid type");
821 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
823 // Assuming that all loads of legal types cost 1.
824 unsigned Cost = LT.first;
826 if (Src->isVectorTy() &&
827 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
828 // This is a vector load that legalizes to a larger type than the vector
829 // itself. Unless the corresponding extending load or truncating store is
830 // legal, then this will scalarize.
831 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
832 EVT MemVT = getTLI()->getValueType(DL, Src);
833 if (Opcode == Instruction::Store)
834 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
835 else
836 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
838 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
839 // This is a vector load/store for some illegal type that is scalarized.
840 // We must account for the cost of building or decomposing the vector.
841 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
842 Opcode == Instruction::Store);
846 return Cost;
849 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
850 unsigned Factor,
851 ArrayRef<unsigned> Indices,
852 unsigned Alignment, unsigned AddressSpace,
853 bool UseMaskForCond = false,
854 bool UseMaskForGaps = false) {
855 VectorType *VT = dyn_cast<VectorType>(VecTy);
856 assert(VT && "Expect a vector type for interleaved memory op");
858 unsigned NumElts = VT->getNumElements();
859 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
861 unsigned NumSubElts = NumElts / Factor;
862 VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
864 // Firstly, the cost of load/store operation.
865 unsigned Cost;
866 if (UseMaskForCond || UseMaskForGaps)
867 Cost = static_cast<T *>(this)->getMaskedMemoryOpCost(
868 Opcode, VecTy, Alignment, AddressSpace);
869 else
870 Cost = static_cast<T *>(this)->getMemoryOpCost(Opcode, VecTy, Alignment,
871 AddressSpace);
873 // Legalize the vector type, and get the legalized and unlegalized type
874 // sizes.
875 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
876 unsigned VecTySize =
877 static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
878 unsigned VecTyLTSize = VecTyLT.getStoreSize();
880 // Return the ceiling of dividing A by B.
881 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
883 // Scale the cost of the memory operation by the fraction of legalized
884 // instructions that will actually be used. We shouldn't account for the
885 // cost of dead instructions since they will be removed.
887 // E.g., An interleaved load of factor 8:
888 // %vec = load <16 x i64>, <16 x i64>* %ptr
889 // %v0 = shufflevector %vec, undef, <0, 8>
891 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
892 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
893 // type). The other loads are unused.
895 // We only scale the cost of loads since interleaved store groups aren't
896 // allowed to have gaps.
897 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
898 // The number of loads of a legal type it will take to represent a load
899 // of the unlegalized vector type.
900 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
902 // The number of elements of the unlegalized type that correspond to a
903 // single legal instruction.
904 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
906 // Determine which legal instructions will be used.
907 BitVector UsedInsts(NumLegalInsts, false);
908 for (unsigned Index : Indices)
909 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
910 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
912 // Scale the cost of the load by the fraction of legal instructions that
913 // will be used.
914 Cost *= UsedInsts.count() / NumLegalInsts;
917 // Then plus the cost of interleave operation.
918 if (Opcode == Instruction::Load) {
919 // The interleave cost is similar to extract sub vectors' elements
920 // from the wide vector, and insert them into sub vectors.
922 // E.g. An interleaved load of factor 2 (with one member of index 0):
923 // %vec = load <8 x i32>, <8 x i32>* %ptr
924 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
925 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
926 // <8 x i32> vector and insert them into a <4 x i32> vector.
928 assert(Indices.size() <= Factor &&
929 "Interleaved memory op has too many members");
931 for (unsigned Index : Indices) {
932 assert(Index < Factor && "Invalid index for interleaved memory op");
934 // Extract elements from loaded vector for each sub vector.
935 for (unsigned i = 0; i < NumSubElts; i++)
936 Cost += static_cast<T *>(this)->getVectorInstrCost(
937 Instruction::ExtractElement, VT, Index + i * Factor);
940 unsigned InsSubCost = 0;
941 for (unsigned i = 0; i < NumSubElts; i++)
942 InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
943 Instruction::InsertElement, SubVT, i);
945 Cost += Indices.size() * InsSubCost;
946 } else {
947 // The interleave cost is extract all elements from sub vectors, and
948 // insert them into the wide vector.
950 // E.g. An interleaved store of factor 2:
951 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
952 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
953 // The cost is estimated as extract all elements from both <4 x i32>
954 // vectors and insert into the <8 x i32> vector.
956 unsigned ExtSubCost = 0;
957 for (unsigned i = 0; i < NumSubElts; i++)
958 ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
959 Instruction::ExtractElement, SubVT, i);
960 Cost += ExtSubCost * Factor;
962 for (unsigned i = 0; i < NumElts; i++)
963 Cost += static_cast<T *>(this)
964 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
967 if (!UseMaskForCond)
968 return Cost;
970 Type *I8Type = Type::getInt8Ty(VT->getContext());
971 VectorType *MaskVT = VectorType::get(I8Type, NumElts);
972 SubVT = VectorType::get(I8Type, NumSubElts);
974 // The Mask shuffling cost is extract all the elements of the Mask
975 // and insert each of them Factor times into the wide vector:
977 // E.g. an interleaved group with factor 3:
978 // %mask = icmp ult <8 x i32> %vec1, %vec2
979 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
980 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
981 // The cost is estimated as extract all mask elements from the <8xi1> mask
982 // vector and insert them factor times into the <24xi1> shuffled mask
983 // vector.
984 for (unsigned i = 0; i < NumSubElts; i++)
985 Cost += static_cast<T *>(this)->getVectorInstrCost(
986 Instruction::ExtractElement, SubVT, i);
988 for (unsigned i = 0; i < NumElts; i++)
989 Cost += static_cast<T *>(this)->getVectorInstrCost(
990 Instruction::InsertElement, MaskVT, i);
992 // The Gaps mask is invariant and created outside the loop, therefore the
993 // cost of creating it is not accounted for here. However if we have both
994 // a MaskForGaps and some other mask that guards the execution of the
995 // memory access, we need to account for the cost of And-ing the two masks
996 // inside the loop.
997 if (UseMaskForGaps)
998 Cost += static_cast<T *>(this)->getArithmeticInstrCost(
999 BinaryOperator::And, MaskVT);
1001 return Cost;
1004 /// Get intrinsic cost based on arguments.
1005 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1006 ArrayRef<Value *> Args, FastMathFlags FMF,
1007 unsigned VF = 1) {
1008 unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
1009 assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
1010 auto *ConcreteTTI = static_cast<T *>(this);
1012 switch (IID) {
1013 default: {
1014 // Assume that we need to scalarize this intrinsic.
1015 SmallVector<Type *, 4> Types;
1016 for (Value *Op : Args) {
1017 Type *OpTy = Op->getType();
1018 assert(VF == 1 || !OpTy->isVectorTy());
1019 Types.push_back(VF == 1 ? OpTy : VectorType::get(OpTy, VF));
1022 if (VF > 1 && !RetTy->isVoidTy())
1023 RetTy = VectorType::get(RetTy, VF);
1025 // Compute the scalarization overhead based on Args for a vector
1026 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
1027 // CostModel will pass a vector RetTy and VF is 1.
1028 unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
1029 if (RetVF > 1 || VF > 1) {
1030 ScalarizationCost = 0;
1031 if (!RetTy->isVoidTy())
1032 ScalarizationCost += getScalarizationOverhead(RetTy, true, false);
1033 ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
1036 return ConcreteTTI->getIntrinsicInstrCost(IID, RetTy, Types, FMF,
1037 ScalarizationCost);
1039 case Intrinsic::masked_scatter: {
1040 assert(VF == 1 && "Can't vectorize types here.");
1041 Value *Mask = Args[3];
1042 bool VarMask = !isa<Constant>(Mask);
1043 unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
1044 return ConcreteTTI->getGatherScatterOpCost(
1045 Instruction::Store, Args[0]->getType(), Args[1], VarMask, Alignment);
1047 case Intrinsic::masked_gather: {
1048 assert(VF == 1 && "Can't vectorize types here.");
1049 Value *Mask = Args[2];
1050 bool VarMask = !isa<Constant>(Mask);
1051 unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
1052 return ConcreteTTI->getGatherScatterOpCost(Instruction::Load, RetTy,
1053 Args[0], VarMask, Alignment);
1055 case Intrinsic::experimental_vector_reduce_add:
1056 case Intrinsic::experimental_vector_reduce_mul:
1057 case Intrinsic::experimental_vector_reduce_and:
1058 case Intrinsic::experimental_vector_reduce_or:
1059 case Intrinsic::experimental_vector_reduce_xor:
1060 case Intrinsic::experimental_vector_reduce_fadd:
1061 case Intrinsic::experimental_vector_reduce_fmul:
1062 case Intrinsic::experimental_vector_reduce_smax:
1063 case Intrinsic::experimental_vector_reduce_smin:
1064 case Intrinsic::experimental_vector_reduce_fmax:
1065 case Intrinsic::experimental_vector_reduce_fmin:
1066 case Intrinsic::experimental_vector_reduce_umax:
1067 case Intrinsic::experimental_vector_reduce_umin:
1068 return getIntrinsicInstrCost(IID, RetTy, Args[0]->getType(), FMF);
1069 case Intrinsic::fshl:
1070 case Intrinsic::fshr: {
1071 Value *X = Args[0];
1072 Value *Y = Args[1];
1073 Value *Z = Args[2];
1074 TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW;
1075 TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX);
1076 TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY);
1077 TTI::OperandValueKind OpKindZ = TTI::getOperandInfo(Z, OpPropsZ);
1078 TTI::OperandValueKind OpKindBW = TTI::OK_UniformConstantValue;
1079 OpPropsBW = isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1080 : TTI::OP_None;
1081 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1082 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1083 unsigned Cost = 0;
1084 Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::Or, RetTy);
1085 Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::Sub, RetTy);
1086 Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
1087 OpKindX, OpKindZ, OpPropsX);
1088 Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
1089 OpKindY, OpKindZ, OpPropsY);
1090 // Non-constant shift amounts requires a modulo.
1091 if (OpKindZ != TTI::OK_UniformConstantValue &&
1092 OpKindZ != TTI::OK_NonUniformConstantValue)
1093 Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1094 OpKindZ, OpKindBW, OpPropsZ,
1095 OpPropsBW);
1096 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1097 if (X != Y) {
1098 Type *CondTy = Type::getInt1Ty(RetTy->getContext());
1099 if (RetVF > 1)
1100 CondTy = VectorType::get(CondTy, RetVF);
1101 Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy,
1102 CondTy, nullptr);
1103 Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
1104 CondTy, nullptr);
1106 return Cost;
1111 /// Get intrinsic cost based on argument types.
1112 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1113 /// cost of scalarizing the arguments and the return value will be computed
1114 /// based on types.
1115 unsigned getIntrinsicInstrCost(
1116 Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> Tys, FastMathFlags FMF,
1117 unsigned ScalarizationCostPassed = std::numeric_limits<unsigned>::max()) {
1118 unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
1119 auto *ConcreteTTI = static_cast<T *>(this);
1121 SmallVector<unsigned, 2> ISDs;
1122 unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
1123 switch (IID) {
1124 default: {
1125 // Assume that we need to scalarize this intrinsic.
1126 unsigned ScalarizationCost = ScalarizationCostPassed;
1127 unsigned ScalarCalls = 1;
1128 Type *ScalarRetTy = RetTy;
1129 if (RetTy->isVectorTy()) {
1130 if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
1131 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
1132 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
1133 ScalarRetTy = RetTy->getScalarType();
1135 SmallVector<Type *, 4> ScalarTys;
1136 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1137 Type *Ty = Tys[i];
1138 if (Ty->isVectorTy()) {
1139 if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
1140 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
1141 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
1142 Ty = Ty->getScalarType();
1144 ScalarTys.push_back(Ty);
1146 if (ScalarCalls == 1)
1147 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1149 unsigned ScalarCost =
1150 ConcreteTTI->getIntrinsicInstrCost(IID, ScalarRetTy, ScalarTys, FMF);
1152 return ScalarCalls * ScalarCost + ScalarizationCost;
1154 // Look for intrinsics that can be lowered directly or turned into a scalar
1155 // intrinsic call.
1156 case Intrinsic::sqrt:
1157 ISDs.push_back(ISD::FSQRT);
1158 break;
1159 case Intrinsic::sin:
1160 ISDs.push_back(ISD::FSIN);
1161 break;
1162 case Intrinsic::cos:
1163 ISDs.push_back(ISD::FCOS);
1164 break;
1165 case Intrinsic::exp:
1166 ISDs.push_back(ISD::FEXP);
1167 break;
1168 case Intrinsic::exp2:
1169 ISDs.push_back(ISD::FEXP2);
1170 break;
1171 case Intrinsic::log:
1172 ISDs.push_back(ISD::FLOG);
1173 break;
1174 case Intrinsic::log10:
1175 ISDs.push_back(ISD::FLOG10);
1176 break;
1177 case Intrinsic::log2:
1178 ISDs.push_back(ISD::FLOG2);
1179 break;
1180 case Intrinsic::fabs:
1181 ISDs.push_back(ISD::FABS);
1182 break;
1183 case Intrinsic::canonicalize:
1184 ISDs.push_back(ISD::FCANONICALIZE);
1185 break;
1186 case Intrinsic::minnum:
1187 ISDs.push_back(ISD::FMINNUM);
1188 if (FMF.noNaNs())
1189 ISDs.push_back(ISD::FMINIMUM);
1190 break;
1191 case Intrinsic::maxnum:
1192 ISDs.push_back(ISD::FMAXNUM);
1193 if (FMF.noNaNs())
1194 ISDs.push_back(ISD::FMAXIMUM);
1195 break;
1196 case Intrinsic::copysign:
1197 ISDs.push_back(ISD::FCOPYSIGN);
1198 break;
1199 case Intrinsic::floor:
1200 ISDs.push_back(ISD::FFLOOR);
1201 break;
1202 case Intrinsic::ceil:
1203 ISDs.push_back(ISD::FCEIL);
1204 break;
1205 case Intrinsic::trunc:
1206 ISDs.push_back(ISD::FTRUNC);
1207 break;
1208 case Intrinsic::nearbyint:
1209 ISDs.push_back(ISD::FNEARBYINT);
1210 break;
1211 case Intrinsic::rint:
1212 ISDs.push_back(ISD::FRINT);
1213 break;
1214 case Intrinsic::round:
1215 ISDs.push_back(ISD::FROUND);
1216 break;
1217 case Intrinsic::pow:
1218 ISDs.push_back(ISD::FPOW);
1219 break;
1220 case Intrinsic::fma:
1221 ISDs.push_back(ISD::FMA);
1222 break;
1223 case Intrinsic::fmuladd:
1224 ISDs.push_back(ISD::FMA);
1225 break;
1226 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1227 case Intrinsic::lifetime_start:
1228 case Intrinsic::lifetime_end:
1229 case Intrinsic::sideeffect:
1230 return 0;
1231 case Intrinsic::masked_store:
1232 return ConcreteTTI->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0,
1234 case Intrinsic::masked_load:
1235 return ConcreteTTI->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
1236 case Intrinsic::experimental_vector_reduce_add:
1237 return ConcreteTTI->getArithmeticReductionCost(Instruction::Add, Tys[0],
1238 /*IsPairwiseForm=*/false);
1239 case Intrinsic::experimental_vector_reduce_mul:
1240 return ConcreteTTI->getArithmeticReductionCost(Instruction::Mul, Tys[0],
1241 /*IsPairwiseForm=*/false);
1242 case Intrinsic::experimental_vector_reduce_and:
1243 return ConcreteTTI->getArithmeticReductionCost(Instruction::And, Tys[0],
1244 /*IsPairwiseForm=*/false);
1245 case Intrinsic::experimental_vector_reduce_or:
1246 return ConcreteTTI->getArithmeticReductionCost(Instruction::Or, Tys[0],
1247 /*IsPairwiseForm=*/false);
1248 case Intrinsic::experimental_vector_reduce_xor:
1249 return ConcreteTTI->getArithmeticReductionCost(Instruction::Xor, Tys[0],
1250 /*IsPairwiseForm=*/false);
1251 case Intrinsic::experimental_vector_reduce_fadd:
1252 return ConcreteTTI->getArithmeticReductionCost(Instruction::FAdd, Tys[0],
1253 /*IsPairwiseForm=*/false);
1254 case Intrinsic::experimental_vector_reduce_fmul:
1255 return ConcreteTTI->getArithmeticReductionCost(Instruction::FMul, Tys[0],
1256 /*IsPairwiseForm=*/false);
1257 case Intrinsic::experimental_vector_reduce_smax:
1258 case Intrinsic::experimental_vector_reduce_smin:
1259 case Intrinsic::experimental_vector_reduce_fmax:
1260 case Intrinsic::experimental_vector_reduce_fmin:
1261 return ConcreteTTI->getMinMaxReductionCost(
1262 Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
1263 /*IsSigned=*/true);
1264 case Intrinsic::experimental_vector_reduce_umax:
1265 case Intrinsic::experimental_vector_reduce_umin:
1266 return ConcreteTTI->getMinMaxReductionCost(
1267 Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
1268 /*IsSigned=*/false);
1269 case Intrinsic::sadd_sat:
1270 case Intrinsic::ssub_sat: {
1271 Type *CondTy = Type::getInt1Ty(RetTy->getContext());
1272 if (RetVF > 1)
1273 CondTy = VectorType::get(CondTy, RetVF);
1275 Type *OpTy = StructType::create({RetTy, CondTy});
1276 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
1277 ? Intrinsic::sadd_with_overflow
1278 : Intrinsic::ssub_with_overflow;
1280 // SatMax -> Overflow && SumDiff < 0
1281 // SatMin -> Overflow && SumDiff >= 0
1282 unsigned Cost = 0;
1283 Cost += ConcreteTTI->getIntrinsicInstrCost(
1284 OverflowOp, OpTy, {RetTy, RetTy}, FMF, ScalarizationCostPassed);
1285 Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy,
1286 CondTy, nullptr);
1287 Cost += 2 * ConcreteTTI->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
1288 CondTy, nullptr);
1289 return Cost;
1291 case Intrinsic::uadd_sat:
1292 case Intrinsic::usub_sat: {
1293 Type *CondTy = Type::getInt1Ty(RetTy->getContext());
1294 if (RetVF > 1)
1295 CondTy = VectorType::get(CondTy, RetVF);
1297 Type *OpTy = StructType::create({RetTy, CondTy});
1298 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
1299 ? Intrinsic::uadd_with_overflow
1300 : Intrinsic::usub_with_overflow;
1302 unsigned Cost = 0;
1303 Cost += ConcreteTTI->getIntrinsicInstrCost(
1304 OverflowOp, OpTy, {RetTy, RetTy}, FMF, ScalarizationCostPassed);
1305 Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
1306 CondTy, nullptr);
1307 return Cost;
1309 case Intrinsic::sadd_with_overflow:
1310 case Intrinsic::ssub_with_overflow: {
1311 Type *SumTy = RetTy->getContainedType(0);
1312 Type *OverflowTy = RetTy->getContainedType(1);
1313 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
1314 ? BinaryOperator::Add
1315 : BinaryOperator::Sub;
1317 // LHSSign -> LHS >= 0
1318 // RHSSign -> RHS >= 0
1319 // SumSign -> Sum >= 0
1321 // Add:
1322 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
1323 // Sub:
1324 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
1325 unsigned Cost = 0;
1326 Cost += ConcreteTTI->getArithmeticInstrCost(Opcode, SumTy);
1327 Cost += 3 * ConcreteTTI->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
1328 OverflowTy, nullptr);
1329 Cost += 2 * ConcreteTTI->getCmpSelInstrCost(
1330 BinaryOperator::ICmp, OverflowTy, OverflowTy, nullptr);
1331 Cost +=
1332 ConcreteTTI->getArithmeticInstrCost(BinaryOperator::And, OverflowTy);
1333 return Cost;
1335 case Intrinsic::uadd_with_overflow:
1336 case Intrinsic::usub_with_overflow: {
1337 Type *SumTy = RetTy->getContainedType(0);
1338 Type *OverflowTy = RetTy->getContainedType(1);
1339 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
1340 ? BinaryOperator::Add
1341 : BinaryOperator::Sub;
1343 unsigned Cost = 0;
1344 Cost += ConcreteTTI->getArithmeticInstrCost(Opcode, SumTy);
1345 Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
1346 OverflowTy, nullptr);
1347 return Cost;
1349 case Intrinsic::ctpop:
1350 ISDs.push_back(ISD::CTPOP);
1351 // In case of legalization use TCC_Expensive. This is cheaper than a
1352 // library call but still not a cheap instruction.
1353 SingleCallCost = TargetTransformInfo::TCC_Expensive;
1354 break;
1355 // FIXME: ctlz, cttz, ...
1358 const TargetLoweringBase *TLI = getTLI();
1359 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1361 SmallVector<unsigned, 2> LegalCost;
1362 SmallVector<unsigned, 2> CustomCost;
1363 for (unsigned ISD : ISDs) {
1364 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1365 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
1366 TLI->isFAbsFree(LT.second)) {
1367 return 0;
1370 // The operation is legal. Assume it costs 1.
1371 // If the type is split to multiple registers, assume that there is some
1372 // overhead to this.
1373 // TODO: Once we have extract/insert subvector cost we need to use them.
1374 if (LT.first > 1)
1375 LegalCost.push_back(LT.first * 2);
1376 else
1377 LegalCost.push_back(LT.first * 1);
1378 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
1379 // If the operation is custom lowered then assume
1380 // that the code is twice as expensive.
1381 CustomCost.push_back(LT.first * 2);
1385 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
1386 if (MinLegalCostI != LegalCost.end())
1387 return *MinLegalCostI;
1389 auto MinCustomCostI =
1390 std::min_element(CustomCost.begin(), CustomCost.end());
1391 if (MinCustomCostI != CustomCost.end())
1392 return *MinCustomCostI;
1394 // If we can't lower fmuladd into an FMA estimate the cost as a floating
1395 // point mul followed by an add.
1396 if (IID == Intrinsic::fmuladd)
1397 return ConcreteTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
1398 ConcreteTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
1400 // Else, assume that we need to scalarize this intrinsic. For math builtins
1401 // this will emit a costly libcall, adding call overhead and spills. Make it
1402 // very expensive.
1403 if (RetTy->isVectorTy()) {
1404 unsigned ScalarizationCost =
1405 ((ScalarizationCostPassed != std::numeric_limits<unsigned>::max())
1406 ? ScalarizationCostPassed
1407 : getScalarizationOverhead(RetTy, true, false));
1408 unsigned ScalarCalls = RetTy->getVectorNumElements();
1409 SmallVector<Type *, 4> ScalarTys;
1410 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1411 Type *Ty = Tys[i];
1412 if (Ty->isVectorTy())
1413 Ty = Ty->getScalarType();
1414 ScalarTys.push_back(Ty);
1416 unsigned ScalarCost = ConcreteTTI->getIntrinsicInstrCost(
1417 IID, RetTy->getScalarType(), ScalarTys, FMF);
1418 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1419 if (Tys[i]->isVectorTy()) {
1420 if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
1421 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
1422 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
1426 return ScalarCalls * ScalarCost + ScalarizationCost;
1429 // This is going to be turned into a library call, make it expensive.
1430 return SingleCallCost;
1433 /// Compute a cost of the given call instruction.
1435 /// Compute the cost of calling function F with return type RetTy and
1436 /// argument types Tys. F might be nullptr, in this case the cost of an
1437 /// arbitrary call with the specified signature will be returned.
1438 /// This is used, for instance, when we estimate call of a vector
1439 /// counterpart of the given function.
1440 /// \param F Called function, might be nullptr.
1441 /// \param RetTy Return value types.
1442 /// \param Tys Argument types.
1443 /// \returns The cost of Call instruction.
1444 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
1445 return 10;
1448 unsigned getNumberOfParts(Type *Tp) {
1449 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
1450 return LT.first;
1453 unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
1454 const SCEV *) {
1455 return 0;
1458 /// Try to calculate arithmetic and shuffle op costs for reduction operations.
1459 /// We're assuming that reduction operation are performing the following way:
1460 /// 1. Non-pairwise reduction
1461 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1462 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
1463 /// \----------------v-------------/ \----------v------------/
1464 /// n/2 elements n/2 elements
1465 /// %red1 = op <n x t> %val, <n x t> val1
1466 /// After this operation we have a vector %red1 where only the first n/2
1467 /// elements are meaningful, the second n/2 elements are undefined and can be
1468 /// dropped. All other operations are actually working with the vector of
1469 /// length n/2, not n, though the real vector length is still n.
1470 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
1471 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
1472 /// \----------------v-------------/ \----------v------------/
1473 /// n/4 elements 3*n/4 elements
1474 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
1475 /// length n/2, the resulting vector has length n/4 etc.
1476 /// 2. Pairwise reduction:
1477 /// Everything is the same except for an additional shuffle operation which
1478 /// is used to produce operands for pairwise kind of reductions.
1479 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1480 /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
1481 /// \-------------v----------/ \----------v------------/
1482 /// n/2 elements n/2 elements
1483 /// %val2 = shufflevector<n x t> %val, <n x t> %undef,
1484 /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
1485 /// \-------------v----------/ \----------v------------/
1486 /// n/2 elements n/2 elements
1487 /// %red1 = op <n x t> %val1, <n x t> val2
1488 /// Again, the operation is performed on <n x t> vector, but the resulting
1489 /// vector %red1 is <n/2 x t> vector.
1491 /// The cost model should take into account that the actual length of the
1492 /// vector is reduced on each iteration.
1493 unsigned getArithmeticReductionCost(unsigned Opcode, Type *Ty,
1494 bool IsPairwise) {
1495 assert(Ty->isVectorTy() && "Expect a vector type");
1496 Type *ScalarTy = Ty->getVectorElementType();
1497 unsigned NumVecElts = Ty->getVectorNumElements();
1498 unsigned NumReduxLevels = Log2_32(NumVecElts);
1499 unsigned ArithCost = 0;
1500 unsigned ShuffleCost = 0;
1501 auto *ConcreteTTI = static_cast<T *>(this);
1502 std::pair<unsigned, MVT> LT =
1503 ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
1504 unsigned LongVectorCount = 0;
1505 unsigned MVTLen =
1506 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1507 while (NumVecElts > MVTLen) {
1508 NumVecElts /= 2;
1509 Type *SubTy = VectorType::get(ScalarTy, NumVecElts);
1510 // Assume the pairwise shuffles add a cost.
1511 ShuffleCost += (IsPairwise + 1) *
1512 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1513 NumVecElts, SubTy);
1514 ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, SubTy);
1515 Ty = SubTy;
1516 ++LongVectorCount;
1519 NumReduxLevels -= LongVectorCount;
1521 // The minimal length of the vector is limited by the real length of vector
1522 // operations performed on the current platform. That's why several final
1523 // reduction operations are performed on the vectors with the same
1524 // architecture-dependent length.
1526 // Non pairwise reductions need one shuffle per reduction level. Pairwise
1527 // reductions need two shuffles on every level, but the last one. On that
1528 // level one of the shuffles is <0, u, u, ...> which is identity.
1529 unsigned NumShuffles = NumReduxLevels;
1530 if (IsPairwise && NumReduxLevels >= 1)
1531 NumShuffles += NumReduxLevels - 1;
1532 ShuffleCost += NumShuffles *
1533 ConcreteTTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
1534 0, Ty);
1535 ArithCost += NumReduxLevels *
1536 ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
1537 return ShuffleCost + ArithCost +
1538 ConcreteTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
1541 /// Try to calculate op costs for min/max reduction operations.
1542 /// \param CondTy Conditional type for the Select instruction.
1543 unsigned getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwise,
1544 bool) {
1545 assert(Ty->isVectorTy() && "Expect a vector type");
1546 Type *ScalarTy = Ty->getVectorElementType();
1547 Type *ScalarCondTy = CondTy->getVectorElementType();
1548 unsigned NumVecElts = Ty->getVectorNumElements();
1549 unsigned NumReduxLevels = Log2_32(NumVecElts);
1550 unsigned CmpOpcode;
1551 if (Ty->isFPOrFPVectorTy()) {
1552 CmpOpcode = Instruction::FCmp;
1553 } else {
1554 assert(Ty->isIntOrIntVectorTy() &&
1555 "expecting floating point or integer type for min/max reduction");
1556 CmpOpcode = Instruction::ICmp;
1558 unsigned MinMaxCost = 0;
1559 unsigned ShuffleCost = 0;
1560 auto *ConcreteTTI = static_cast<T *>(this);
1561 std::pair<unsigned, MVT> LT =
1562 ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
1563 unsigned LongVectorCount = 0;
1564 unsigned MVTLen =
1565 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1566 while (NumVecElts > MVTLen) {
1567 NumVecElts /= 2;
1568 Type *SubTy = VectorType::get(ScalarTy, NumVecElts);
1569 CondTy = VectorType::get(ScalarCondTy, NumVecElts);
1571 // Assume the pairwise shuffles add a cost.
1572 ShuffleCost += (IsPairwise + 1) *
1573 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1574 NumVecElts, SubTy);
1575 MinMaxCost +=
1576 ConcreteTTI->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy, nullptr) +
1577 ConcreteTTI->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
1578 nullptr);
1579 Ty = SubTy;
1580 ++LongVectorCount;
1583 NumReduxLevels -= LongVectorCount;
1585 // The minimal length of the vector is limited by the real length of vector
1586 // operations performed on the current platform. That's why several final
1587 // reduction opertions are perfomed on the vectors with the same
1588 // architecture-dependent length.
1590 // Non pairwise reductions need one shuffle per reduction level. Pairwise
1591 // reductions need two shuffles on every level, but the last one. On that
1592 // level one of the shuffles is <0, u, u, ...> which is identity.
1593 unsigned NumShuffles = NumReduxLevels;
1594 if (IsPairwise && NumReduxLevels >= 1)
1595 NumShuffles += NumReduxLevels - 1;
1596 ShuffleCost += NumShuffles *
1597 ConcreteTTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
1598 0, Ty);
1599 MinMaxCost +=
1600 NumReduxLevels *
1601 (ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
1602 ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
1603 nullptr));
1604 // The last min/max should be in vector registers and we counted it above.
1605 // So just need a single extractelement.
1606 return ShuffleCost + MinMaxCost +
1607 ConcreteTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
1610 unsigned getVectorSplitCost() { return 1; }
1612 /// @}
1615 /// Concrete BasicTTIImpl that can be used if no further customization
1616 /// is needed.
1617 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
1618 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
1620 friend class BasicTTIImplBase<BasicTTIImpl>;
1622 const TargetSubtargetInfo *ST;
1623 const TargetLoweringBase *TLI;
1625 const TargetSubtargetInfo *getST() const { return ST; }
1626 const TargetLoweringBase *getTLI() const { return TLI; }
1628 public:
1629 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
1632 } // end namespace llvm
1634 #endif // LLVM_CODEGEN_BASICTTIIMPL_H