1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/Analysis/CFG.h"
11 #include "llvm/Analysis/LoopIterator.h"
12 #include "llvm/Analysis/TargetTransformInfoImpl.h"
13 #include "llvm/IR/CFG.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/Instruction.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/PatternMatch.h"
22 #include "llvm/InitializePasses.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
28 using namespace PatternMatch
;
30 #define DEBUG_TYPE "tti"
32 static cl::opt
<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
34 cl::desc("Recognize reduction patterns."));
37 /// No-op implementation of the TTI interface using the utility base
40 /// This is used when no target specific information is available.
41 struct NoTTIImpl
: TargetTransformInfoImplCRTPBase
<NoTTIImpl
> {
42 explicit NoTTIImpl(const DataLayout
&DL
)
43 : TargetTransformInfoImplCRTPBase
<NoTTIImpl
>(DL
) {}
47 bool HardwareLoopInfo::canAnalyze(LoopInfo
&LI
) {
48 // If the loop has irreducible control flow, it can not be converted to
50 LoopBlocksRPO
RPOT(L
);
52 if (containsIrreducibleCFG
<const BasicBlock
*>(RPOT
, LI
))
57 IntrinsicCostAttributes::IntrinsicCostAttributes(
58 Intrinsic::ID Id
, const CallBase
&CI
, InstructionCost ScalarizationCost
)
59 : II(dyn_cast
<IntrinsicInst
>(&CI
)), RetTy(CI
.getType()), IID(Id
),
60 ScalarizationCost(ScalarizationCost
) {
62 if (const auto *FPMO
= dyn_cast
<FPMathOperator
>(&CI
))
63 FMF
= FPMO
->getFastMathFlags();
65 Arguments
.insert(Arguments
.begin(), CI
.arg_begin(), CI
.arg_end());
66 FunctionType
*FTy
= CI
.getCalledFunction()->getFunctionType();
67 ParamTys
.insert(ParamTys
.begin(), FTy
->param_begin(), FTy
->param_end());
70 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id
, Type
*RTy
,
73 const IntrinsicInst
*I
,
74 InstructionCost ScalarCost
)
75 : II(I
), RetTy(RTy
), IID(Id
), FMF(Flags
), ScalarizationCost(ScalarCost
) {
76 ParamTys
.insert(ParamTys
.begin(), Tys
.begin(), Tys
.end());
79 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id
, Type
*Ty
,
80 ArrayRef
<const Value
*> Args
)
81 : RetTy(Ty
), IID(Id
) {
83 Arguments
.insert(Arguments
.begin(), Args
.begin(), Args
.end());
84 ParamTys
.reserve(Arguments
.size());
85 for (unsigned Idx
= 0, Size
= Arguments
.size(); Idx
!= Size
; ++Idx
)
86 ParamTys
.push_back(Arguments
[Idx
]->getType());
89 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id
, Type
*RTy
,
90 ArrayRef
<const Value
*> Args
,
93 const IntrinsicInst
*I
,
94 InstructionCost ScalarCost
)
95 : II(I
), RetTy(RTy
), IID(Id
), FMF(Flags
), ScalarizationCost(ScalarCost
) {
96 ParamTys
.insert(ParamTys
.begin(), Tys
.begin(), Tys
.end());
97 Arguments
.insert(Arguments
.begin(), Args
.begin(), Args
.end());
100 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution
&SE
,
101 LoopInfo
&LI
, DominatorTree
&DT
,
102 bool ForceNestedLoop
,
103 bool ForceHardwareLoopPHI
) {
104 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
105 L
->getExitingBlocks(ExitingBlocks
);
107 for (BasicBlock
*BB
: ExitingBlocks
) {
108 // If we pass the updated counter back through a phi, we need to know
109 // which latch the updated value will be coming from.
110 if (!L
->isLoopLatch(BB
)) {
111 if (ForceHardwareLoopPHI
|| CounterInReg
)
115 const SCEV
*EC
= SE
.getExitCount(L
, BB
);
116 if (isa
<SCEVCouldNotCompute
>(EC
))
118 if (const SCEVConstant
*ConstEC
= dyn_cast
<SCEVConstant
>(EC
)) {
119 if (ConstEC
->getValue()->isZero())
121 } else if (!SE
.isLoopInvariant(EC
, L
))
124 if (SE
.getTypeSizeInBits(EC
->getType()) > CountType
->getBitWidth())
127 // If this exiting block is contained in a nested loop, it is not eligible
128 // for insertion of the branch-and-decrement since the inner loop would
129 // end up messing up the value in the CTR.
130 if (!IsNestingLegal
&& LI
.getLoopFor(BB
) != L
&& !ForceNestedLoop
)
133 // We now have a loop-invariant count of loop iterations (which is not the
134 // constant zero) for which we know that this loop will not exit via this
137 // We need to make sure that this block will run on every loop iteration.
138 // For this to be true, we must dominate all blocks with backedges. Such
139 // blocks are in-loop predecessors to the header block.
140 bool NotAlways
= false;
141 for (BasicBlock
*Pred
: predecessors(L
->getHeader())) {
142 if (!L
->contains(Pred
))
145 if (!DT
.dominates(BB
, Pred
)) {
154 // Make sure this blocks ends with a conditional branch.
155 Instruction
*TI
= BB
->getTerminator();
159 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(TI
)) {
160 if (!BI
->isConditional())
167 // Note that this block may not be the loop latch block, even if the loop
168 // has a latch block.
170 TripCount
= SE
.getAddExpr(EC
, SE
.getOne(EC
->getType()));
172 if (!EC
->getType()->isPointerTy() && EC
->getType() != CountType
)
173 TripCount
= SE
.getZeroExtendExpr(TripCount
, CountType
);
183 TargetTransformInfo::TargetTransformInfo(const DataLayout
&DL
)
184 : TTIImpl(new Model
<NoTTIImpl
>(NoTTIImpl(DL
))) {}
186 TargetTransformInfo::~TargetTransformInfo() {}
188 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo
&&Arg
)
189 : TTIImpl(std::move(Arg
.TTIImpl
)) {}
191 TargetTransformInfo
&TargetTransformInfo::operator=(TargetTransformInfo
&&RHS
) {
192 TTIImpl
= std::move(RHS
.TTIImpl
);
196 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
197 return TTIImpl
->getInliningThresholdMultiplier();
201 TargetTransformInfo::adjustInliningThreshold(const CallBase
*CB
) const {
202 return TTIImpl
->adjustInliningThreshold(CB
);
205 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
206 return TTIImpl
->getInlinerVectorBonusPercent();
210 TargetTransformInfo::getGEPCost(Type
*PointeeType
, const Value
*Ptr
,
211 ArrayRef
<const Value
*> Operands
,
212 TTI::TargetCostKind CostKind
) const {
213 return TTIImpl
->getGEPCost(PointeeType
, Ptr
, Operands
, CostKind
);
216 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
217 const SwitchInst
&SI
, unsigned &JTSize
, ProfileSummaryInfo
*PSI
,
218 BlockFrequencyInfo
*BFI
) const {
219 return TTIImpl
->getEstimatedNumberOfCaseClusters(SI
, JTSize
, PSI
, BFI
);
223 TargetTransformInfo::getUserCost(const User
*U
,
224 ArrayRef
<const Value
*> Operands
,
225 enum TargetCostKind CostKind
) const {
226 InstructionCost Cost
= TTIImpl
->getUserCost(U
, Operands
, CostKind
);
227 assert((CostKind
== TTI::TCK_RecipThroughput
|| Cost
>= 0) &&
228 "TTI should not produce negative costs!");
232 BranchProbability
TargetTransformInfo::getPredictableBranchThreshold() const {
233 return TTIImpl
->getPredictableBranchThreshold();
236 bool TargetTransformInfo::hasBranchDivergence() const {
237 return TTIImpl
->hasBranchDivergence();
240 bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
241 return TTIImpl
->useGPUDivergenceAnalysis();
244 bool TargetTransformInfo::isSourceOfDivergence(const Value
*V
) const {
245 return TTIImpl
->isSourceOfDivergence(V
);
248 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value
*V
) const {
249 return TTIImpl
->isAlwaysUniform(V
);
252 unsigned TargetTransformInfo::getFlatAddressSpace() const {
253 return TTIImpl
->getFlatAddressSpace();
256 bool TargetTransformInfo::collectFlatAddressOperands(
257 SmallVectorImpl
<int> &OpIndexes
, Intrinsic::ID IID
) const {
258 return TTIImpl
->collectFlatAddressOperands(OpIndexes
, IID
);
261 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS
,
262 unsigned ToAS
) const {
263 return TTIImpl
->isNoopAddrSpaceCast(FromAS
, ToAS
);
266 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value
*V
) const {
267 return TTIImpl
->getAssumedAddrSpace(V
);
270 Value
*TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
271 IntrinsicInst
*II
, Value
*OldV
, Value
*NewV
) const {
272 return TTIImpl
->rewriteIntrinsicWithAddressSpace(II
, OldV
, NewV
);
275 bool TargetTransformInfo::isLoweredToCall(const Function
*F
) const {
276 return TTIImpl
->isLoweredToCall(F
);
279 bool TargetTransformInfo::isHardwareLoopProfitable(
280 Loop
*L
, ScalarEvolution
&SE
, AssumptionCache
&AC
,
281 TargetLibraryInfo
*LibInfo
, HardwareLoopInfo
&HWLoopInfo
) const {
282 return TTIImpl
->isHardwareLoopProfitable(L
, SE
, AC
, LibInfo
, HWLoopInfo
);
285 bool TargetTransformInfo::preferPredicateOverEpilogue(
286 Loop
*L
, LoopInfo
*LI
, ScalarEvolution
&SE
, AssumptionCache
&AC
,
287 TargetLibraryInfo
*TLI
, DominatorTree
*DT
,
288 const LoopAccessInfo
*LAI
) const {
289 return TTIImpl
->preferPredicateOverEpilogue(L
, LI
, SE
, AC
, TLI
, DT
, LAI
);
292 bool TargetTransformInfo::emitGetActiveLaneMask() const {
293 return TTIImpl
->emitGetActiveLaneMask();
296 Optional
<Instruction
*>
297 TargetTransformInfo::instCombineIntrinsic(InstCombiner
&IC
,
298 IntrinsicInst
&II
) const {
299 return TTIImpl
->instCombineIntrinsic(IC
, II
);
302 Optional
<Value
*> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
303 InstCombiner
&IC
, IntrinsicInst
&II
, APInt DemandedMask
, KnownBits
&Known
,
304 bool &KnownBitsComputed
) const {
305 return TTIImpl
->simplifyDemandedUseBitsIntrinsic(IC
, II
, DemandedMask
, Known
,
309 Optional
<Value
*> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
310 InstCombiner
&IC
, IntrinsicInst
&II
, APInt DemandedElts
, APInt
&UndefElts
,
311 APInt
&UndefElts2
, APInt
&UndefElts3
,
312 std::function
<void(Instruction
*, unsigned, APInt
, APInt
&)>
313 SimplifyAndSetOp
) const {
314 return TTIImpl
->simplifyDemandedVectorEltsIntrinsic(
315 IC
, II
, DemandedElts
, UndefElts
, UndefElts2
, UndefElts3
,
319 void TargetTransformInfo::getUnrollingPreferences(
320 Loop
*L
, ScalarEvolution
&SE
, UnrollingPreferences
&UP
,
321 OptimizationRemarkEmitter
*ORE
) const {
322 return TTIImpl
->getUnrollingPreferences(L
, SE
, UP
, ORE
);
325 void TargetTransformInfo::getPeelingPreferences(Loop
*L
, ScalarEvolution
&SE
,
326 PeelingPreferences
&PP
) const {
327 return TTIImpl
->getPeelingPreferences(L
, SE
, PP
);
330 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm
) const {
331 return TTIImpl
->isLegalAddImmediate(Imm
);
334 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm
) const {
335 return TTIImpl
->isLegalICmpImmediate(Imm
);
338 bool TargetTransformInfo::isLegalAddressingMode(Type
*Ty
, GlobalValue
*BaseGV
,
340 bool HasBaseReg
, int64_t Scale
,
342 Instruction
*I
) const {
343 return TTIImpl
->isLegalAddressingMode(Ty
, BaseGV
, BaseOffset
, HasBaseReg
,
344 Scale
, AddrSpace
, I
);
347 bool TargetTransformInfo::isLSRCostLess(LSRCost
&C1
, LSRCost
&C2
) const {
348 return TTIImpl
->isLSRCostLess(C1
, C2
);
351 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
352 return TTIImpl
->isNumRegsMajorCostOfLSR();
355 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction
*I
) const {
356 return TTIImpl
->isProfitableLSRChainElement(I
);
359 bool TargetTransformInfo::canMacroFuseCmp() const {
360 return TTIImpl
->canMacroFuseCmp();
363 bool TargetTransformInfo::canSaveCmp(Loop
*L
, BranchInst
**BI
,
364 ScalarEvolution
*SE
, LoopInfo
*LI
,
365 DominatorTree
*DT
, AssumptionCache
*AC
,
366 TargetLibraryInfo
*LibInfo
) const {
367 return TTIImpl
->canSaveCmp(L
, BI
, SE
, LI
, DT
, AC
, LibInfo
);
370 TTI::AddressingModeKind
371 TargetTransformInfo::getPreferredAddressingMode(const Loop
*L
,
372 ScalarEvolution
*SE
) const {
373 return TTIImpl
->getPreferredAddressingMode(L
, SE
);
376 bool TargetTransformInfo::isLegalMaskedStore(Type
*DataType
,
377 Align Alignment
) const {
378 return TTIImpl
->isLegalMaskedStore(DataType
, Alignment
);
381 bool TargetTransformInfo::isLegalMaskedLoad(Type
*DataType
,
382 Align Alignment
) const {
383 return TTIImpl
->isLegalMaskedLoad(DataType
, Alignment
);
386 bool TargetTransformInfo::isLegalNTStore(Type
*DataType
,
387 Align Alignment
) const {
388 return TTIImpl
->isLegalNTStore(DataType
, Alignment
);
391 bool TargetTransformInfo::isLegalNTLoad(Type
*DataType
, Align Alignment
) const {
392 return TTIImpl
->isLegalNTLoad(DataType
, Alignment
);
395 bool TargetTransformInfo::isLegalMaskedGather(Type
*DataType
,
396 Align Alignment
) const {
397 return TTIImpl
->isLegalMaskedGather(DataType
, Alignment
);
400 bool TargetTransformInfo::isLegalMaskedScatter(Type
*DataType
,
401 Align Alignment
) const {
402 return TTIImpl
->isLegalMaskedScatter(DataType
, Alignment
);
405 bool TargetTransformInfo::isLegalMaskedCompressStore(Type
*DataType
) const {
406 return TTIImpl
->isLegalMaskedCompressStore(DataType
);
409 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type
*DataType
) const {
410 return TTIImpl
->isLegalMaskedExpandLoad(DataType
);
413 bool TargetTransformInfo::hasDivRemOp(Type
*DataType
, bool IsSigned
) const {
414 return TTIImpl
->hasDivRemOp(DataType
, IsSigned
);
417 bool TargetTransformInfo::hasVolatileVariant(Instruction
*I
,
418 unsigned AddrSpace
) const {
419 return TTIImpl
->hasVolatileVariant(I
, AddrSpace
);
422 bool TargetTransformInfo::prefersVectorizedAddressing() const {
423 return TTIImpl
->prefersVectorizedAddressing();
426 InstructionCost
TargetTransformInfo::getScalingFactorCost(
427 Type
*Ty
, GlobalValue
*BaseGV
, int64_t BaseOffset
, bool HasBaseReg
,
428 int64_t Scale
, unsigned AddrSpace
) const {
429 InstructionCost Cost
= TTIImpl
->getScalingFactorCost(
430 Ty
, BaseGV
, BaseOffset
, HasBaseReg
, Scale
, AddrSpace
);
431 assert(Cost
>= 0 && "TTI should not produce negative costs!");
435 bool TargetTransformInfo::LSRWithInstrQueries() const {
436 return TTIImpl
->LSRWithInstrQueries();
439 bool TargetTransformInfo::isTruncateFree(Type
*Ty1
, Type
*Ty2
) const {
440 return TTIImpl
->isTruncateFree(Ty1
, Ty2
);
443 bool TargetTransformInfo::isProfitableToHoist(Instruction
*I
) const {
444 return TTIImpl
->isProfitableToHoist(I
);
447 bool TargetTransformInfo::useAA() const { return TTIImpl
->useAA(); }
449 bool TargetTransformInfo::isTypeLegal(Type
*Ty
) const {
450 return TTIImpl
->isTypeLegal(Ty
);
453 InstructionCost
TargetTransformInfo::getRegUsageForType(Type
*Ty
) const {
454 return TTIImpl
->getRegUsageForType(Ty
);
457 bool TargetTransformInfo::shouldBuildLookupTables() const {
458 return TTIImpl
->shouldBuildLookupTables();
461 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
463 return TTIImpl
->shouldBuildLookupTablesForConstant(C
);
466 bool TargetTransformInfo::shouldBuildRelLookupTables() const {
467 return TTIImpl
->shouldBuildRelLookupTables();
470 bool TargetTransformInfo::useColdCCForColdCall(Function
&F
) const {
471 return TTIImpl
->useColdCCForColdCall(F
);
475 TargetTransformInfo::getScalarizationOverhead(VectorType
*Ty
,
476 const APInt
&DemandedElts
,
477 bool Insert
, bool Extract
) const {
478 return TTIImpl
->getScalarizationOverhead(Ty
, DemandedElts
, Insert
, Extract
);
481 InstructionCost
TargetTransformInfo::getOperandsScalarizationOverhead(
482 ArrayRef
<const Value
*> Args
, ArrayRef
<Type
*> Tys
) const {
483 return TTIImpl
->getOperandsScalarizationOverhead(Args
, Tys
);
486 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
487 return TTIImpl
->supportsEfficientVectorElementLoadStore();
490 bool TargetTransformInfo::enableAggressiveInterleaving(
491 bool LoopHasReductions
) const {
492 return TTIImpl
->enableAggressiveInterleaving(LoopHasReductions
);
495 TargetTransformInfo::MemCmpExpansionOptions
496 TargetTransformInfo::enableMemCmpExpansion(bool OptSize
, bool IsZeroCmp
) const {
497 return TTIImpl
->enableMemCmpExpansion(OptSize
, IsZeroCmp
);
500 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
501 return TTIImpl
->enableInterleavedAccessVectorization();
504 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
505 return TTIImpl
->enableMaskedInterleavedAccessVectorization();
508 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
509 return TTIImpl
->isFPVectorizationPotentiallyUnsafe();
512 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext
&Context
,
514 unsigned AddressSpace
,
517 return TTIImpl
->allowsMisalignedMemoryAccesses(Context
, BitWidth
,
518 AddressSpace
, Alignment
, Fast
);
521 TargetTransformInfo::PopcntSupportKind
522 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit
) const {
523 return TTIImpl
->getPopcntSupport(IntTyWidthInBit
);
526 bool TargetTransformInfo::haveFastSqrt(Type
*Ty
) const {
527 return TTIImpl
->haveFastSqrt(Ty
);
530 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type
*Ty
) const {
531 return TTIImpl
->isFCmpOrdCheaperThanFCmpZero(Ty
);
534 InstructionCost
TargetTransformInfo::getFPOpCost(Type
*Ty
) const {
535 InstructionCost Cost
= TTIImpl
->getFPOpCost(Ty
);
536 assert(Cost
>= 0 && "TTI should not produce negative costs!");
540 InstructionCost
TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode
,
544 InstructionCost Cost
= TTIImpl
->getIntImmCodeSizeCost(Opcode
, Idx
, Imm
, Ty
);
545 assert(Cost
>= 0 && "TTI should not produce negative costs!");
550 TargetTransformInfo::getIntImmCost(const APInt
&Imm
, Type
*Ty
,
551 TTI::TargetCostKind CostKind
) const {
552 InstructionCost Cost
= TTIImpl
->getIntImmCost(Imm
, Ty
, CostKind
);
553 assert(Cost
>= 0 && "TTI should not produce negative costs!");
557 InstructionCost
TargetTransformInfo::getIntImmCostInst(
558 unsigned Opcode
, unsigned Idx
, const APInt
&Imm
, Type
*Ty
,
559 TTI::TargetCostKind CostKind
, Instruction
*Inst
) const {
560 InstructionCost Cost
=
561 TTIImpl
->getIntImmCostInst(Opcode
, Idx
, Imm
, Ty
, CostKind
, Inst
);
562 assert(Cost
>= 0 && "TTI should not produce negative costs!");
567 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID
, unsigned Idx
,
568 const APInt
&Imm
, Type
*Ty
,
569 TTI::TargetCostKind CostKind
) const {
570 InstructionCost Cost
=
571 TTIImpl
->getIntImmCostIntrin(IID
, Idx
, Imm
, Ty
, CostKind
);
572 assert(Cost
>= 0 && "TTI should not produce negative costs!");
576 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID
) const {
577 return TTIImpl
->getNumberOfRegisters(ClassID
);
580 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector
,
582 return TTIImpl
->getRegisterClassForType(Vector
, Ty
);
585 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID
) const {
586 return TTIImpl
->getRegisterClassName(ClassID
);
589 TypeSize
TargetTransformInfo::getRegisterBitWidth(
590 TargetTransformInfo::RegisterKind K
) const {
591 return TTIImpl
->getRegisterBitWidth(K
);
594 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
595 return TTIImpl
->getMinVectorRegisterBitWidth();
598 Optional
<unsigned> TargetTransformInfo::getMaxVScale() const {
599 return TTIImpl
->getMaxVScale();
602 bool TargetTransformInfo::shouldMaximizeVectorBandwidth() const {
603 return TTIImpl
->shouldMaximizeVectorBandwidth();
606 ElementCount
TargetTransformInfo::getMinimumVF(unsigned ElemWidth
,
607 bool IsScalable
) const {
608 return TTIImpl
->getMinimumVF(ElemWidth
, IsScalable
);
611 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth
,
612 unsigned Opcode
) const {
613 return TTIImpl
->getMaximumVF(ElemWidth
, Opcode
);
616 bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
617 const Instruction
&I
, bool &AllowPromotionWithoutCommonHeader
) const {
618 return TTIImpl
->shouldConsiderAddressTypePromotion(
619 I
, AllowPromotionWithoutCommonHeader
);
622 unsigned TargetTransformInfo::getCacheLineSize() const {
623 return TTIImpl
->getCacheLineSize();
626 llvm::Optional
<unsigned>
627 TargetTransformInfo::getCacheSize(CacheLevel Level
) const {
628 return TTIImpl
->getCacheSize(Level
);
631 llvm::Optional
<unsigned>
632 TargetTransformInfo::getCacheAssociativity(CacheLevel Level
) const {
633 return TTIImpl
->getCacheAssociativity(Level
);
636 unsigned TargetTransformInfo::getPrefetchDistance() const {
637 return TTIImpl
->getPrefetchDistance();
640 unsigned TargetTransformInfo::getMinPrefetchStride(
641 unsigned NumMemAccesses
, unsigned NumStridedMemAccesses
,
642 unsigned NumPrefetches
, bool HasCall
) const {
643 return TTIImpl
->getMinPrefetchStride(NumMemAccesses
, NumStridedMemAccesses
,
644 NumPrefetches
, HasCall
);
647 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
648 return TTIImpl
->getMaxPrefetchIterationsAhead();
651 bool TargetTransformInfo::enableWritePrefetching() const {
652 return TTIImpl
->enableWritePrefetching();
655 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF
) const {
656 return TTIImpl
->getMaxInterleaveFactor(VF
);
659 TargetTransformInfo::OperandValueKind
660 TargetTransformInfo::getOperandInfo(const Value
*V
,
661 OperandValueProperties
&OpProps
) {
662 OperandValueKind OpInfo
= OK_AnyValue
;
665 if (const auto *CI
= dyn_cast
<ConstantInt
>(V
)) {
666 if (CI
->getValue().isPowerOf2())
667 OpProps
= OP_PowerOf2
;
668 return OK_UniformConstantValue
;
671 // A broadcast shuffle creates a uniform value.
672 // TODO: Add support for non-zero index broadcasts.
673 // TODO: Add support for different source vector width.
674 if (const auto *ShuffleInst
= dyn_cast
<ShuffleVectorInst
>(V
))
675 if (ShuffleInst
->isZeroEltSplat())
676 OpInfo
= OK_UniformValue
;
678 const Value
*Splat
= getSplatValue(V
);
680 // Check for a splat of a constant or for a non uniform vector of constants
681 // and check if the constant(s) are all powers of two.
682 if (isa
<ConstantVector
>(V
) || isa
<ConstantDataVector
>(V
)) {
683 OpInfo
= OK_NonUniformConstantValue
;
685 OpInfo
= OK_UniformConstantValue
;
686 if (auto *CI
= dyn_cast
<ConstantInt
>(Splat
))
687 if (CI
->getValue().isPowerOf2())
688 OpProps
= OP_PowerOf2
;
689 } else if (const auto *CDS
= dyn_cast
<ConstantDataSequential
>(V
)) {
690 OpProps
= OP_PowerOf2
;
691 for (unsigned I
= 0, E
= CDS
->getNumElements(); I
!= E
; ++I
) {
692 if (auto *CI
= dyn_cast
<ConstantInt
>(CDS
->getElementAsConstant(I
)))
693 if (CI
->getValue().isPowerOf2())
701 // Check for a splat of a uniform value. This is not loop aware, so return
702 // true only for the obviously uniform cases (argument, globalvalue)
703 if (Splat
&& (isa
<Argument
>(Splat
) || isa
<GlobalValue
>(Splat
)))
704 OpInfo
= OK_UniformValue
;
709 InstructionCost
TargetTransformInfo::getArithmeticInstrCost(
710 unsigned Opcode
, Type
*Ty
, TTI::TargetCostKind CostKind
,
711 OperandValueKind Opd1Info
, OperandValueKind Opd2Info
,
712 OperandValueProperties Opd1PropInfo
, OperandValueProperties Opd2PropInfo
,
713 ArrayRef
<const Value
*> Args
, const Instruction
*CxtI
) const {
714 InstructionCost Cost
=
715 TTIImpl
->getArithmeticInstrCost(Opcode
, Ty
, CostKind
, Opd1Info
, Opd2Info
,
716 Opd1PropInfo
, Opd2PropInfo
, Args
, CxtI
);
717 assert(Cost
>= 0 && "TTI should not produce negative costs!");
721 InstructionCost
TargetTransformInfo::getShuffleCost(ShuffleKind Kind
,
725 VectorType
*SubTp
) const {
726 InstructionCost Cost
= TTIImpl
->getShuffleCost(Kind
, Ty
, Mask
, Index
, SubTp
);
727 assert(Cost
>= 0 && "TTI should not produce negative costs!");
732 TargetTransformInfo::getCastContextHint(const Instruction
*I
) {
734 return CastContextHint::None
;
736 auto getLoadStoreKind
= [](const Value
*V
, unsigned LdStOp
, unsigned MaskedOp
,
737 unsigned GatScatOp
) {
738 const Instruction
*I
= dyn_cast
<Instruction
>(V
);
740 return CastContextHint::None
;
742 if (I
->getOpcode() == LdStOp
)
743 return CastContextHint::Normal
;
745 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
746 if (II
->getIntrinsicID() == MaskedOp
)
747 return TTI::CastContextHint::Masked
;
748 if (II
->getIntrinsicID() == GatScatOp
)
749 return TTI::CastContextHint::GatherScatter
;
752 return TTI::CastContextHint::None
;
755 switch (I
->getOpcode()) {
756 case Instruction::ZExt
:
757 case Instruction::SExt
:
758 case Instruction::FPExt
:
759 return getLoadStoreKind(I
->getOperand(0), Instruction::Load
,
760 Intrinsic::masked_load
, Intrinsic::masked_gather
);
761 case Instruction::Trunc
:
762 case Instruction::FPTrunc
:
764 return getLoadStoreKind(*I
->user_begin(), Instruction::Store
,
765 Intrinsic::masked_store
,
766 Intrinsic::masked_scatter
);
769 return CastContextHint::None
;
772 return TTI::CastContextHint::None
;
775 InstructionCost
TargetTransformInfo::getCastInstrCost(
776 unsigned Opcode
, Type
*Dst
, Type
*Src
, CastContextHint CCH
,
777 TTI::TargetCostKind CostKind
, const Instruction
*I
) const {
778 assert((I
== nullptr || I
->getOpcode() == Opcode
) &&
779 "Opcode should reflect passed instruction.");
780 InstructionCost Cost
=
781 TTIImpl
->getCastInstrCost(Opcode
, Dst
, Src
, CCH
, CostKind
, I
);
782 assert(Cost
>= 0 && "TTI should not produce negative costs!");
786 InstructionCost
TargetTransformInfo::getExtractWithExtendCost(
787 unsigned Opcode
, Type
*Dst
, VectorType
*VecTy
, unsigned Index
) const {
788 InstructionCost Cost
=
789 TTIImpl
->getExtractWithExtendCost(Opcode
, Dst
, VecTy
, Index
);
790 assert(Cost
>= 0 && "TTI should not produce negative costs!");
794 InstructionCost
TargetTransformInfo::getCFInstrCost(
795 unsigned Opcode
, TTI::TargetCostKind CostKind
, const Instruction
*I
) const {
796 assert((I
== nullptr || I
->getOpcode() == Opcode
) &&
797 "Opcode should reflect passed instruction.");
798 InstructionCost Cost
= TTIImpl
->getCFInstrCost(Opcode
, CostKind
, I
);
799 assert(Cost
>= 0 && "TTI should not produce negative costs!");
803 InstructionCost
TargetTransformInfo::getCmpSelInstrCost(
804 unsigned Opcode
, Type
*ValTy
, Type
*CondTy
, CmpInst::Predicate VecPred
,
805 TTI::TargetCostKind CostKind
, const Instruction
*I
) const {
806 assert((I
== nullptr || I
->getOpcode() == Opcode
) &&
807 "Opcode should reflect passed instruction.");
808 InstructionCost Cost
=
809 TTIImpl
->getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, VecPred
, CostKind
, I
);
810 assert(Cost
>= 0 && "TTI should not produce negative costs!");
814 InstructionCost
TargetTransformInfo::getVectorInstrCost(unsigned Opcode
,
816 unsigned Index
) const {
817 InstructionCost Cost
= TTIImpl
->getVectorInstrCost(Opcode
, Val
, Index
);
818 assert(Cost
>= 0 && "TTI should not produce negative costs!");
822 InstructionCost
TargetTransformInfo::getMemoryOpCost(
823 unsigned Opcode
, Type
*Src
, Align Alignment
, unsigned AddressSpace
,
824 TTI::TargetCostKind CostKind
, const Instruction
*I
) const {
825 assert((I
== nullptr || I
->getOpcode() == Opcode
) &&
826 "Opcode should reflect passed instruction.");
827 InstructionCost Cost
= TTIImpl
->getMemoryOpCost(Opcode
, Src
, Alignment
,
828 AddressSpace
, CostKind
, I
);
829 assert(Cost
>= 0 && "TTI should not produce negative costs!");
833 InstructionCost
TargetTransformInfo::getMaskedMemoryOpCost(
834 unsigned Opcode
, Type
*Src
, Align Alignment
, unsigned AddressSpace
,
835 TTI::TargetCostKind CostKind
) const {
836 InstructionCost Cost
= TTIImpl
->getMaskedMemoryOpCost(Opcode
, Src
, Alignment
,
837 AddressSpace
, CostKind
);
838 assert(Cost
>= 0 && "TTI should not produce negative costs!");
842 InstructionCost
TargetTransformInfo::getGatherScatterOpCost(
843 unsigned Opcode
, Type
*DataTy
, const Value
*Ptr
, bool VariableMask
,
844 Align Alignment
, TTI::TargetCostKind CostKind
, const Instruction
*I
) const {
845 InstructionCost Cost
= TTIImpl
->getGatherScatterOpCost(
846 Opcode
, DataTy
, Ptr
, VariableMask
, Alignment
, CostKind
, I
);
847 assert(Cost
>= 0 && "TTI should not produce negative costs!");
851 InstructionCost
TargetTransformInfo::getInterleavedMemoryOpCost(
852 unsigned Opcode
, Type
*VecTy
, unsigned Factor
, ArrayRef
<unsigned> Indices
,
853 Align Alignment
, unsigned AddressSpace
, TTI::TargetCostKind CostKind
,
854 bool UseMaskForCond
, bool UseMaskForGaps
) const {
855 InstructionCost Cost
= TTIImpl
->getInterleavedMemoryOpCost(
856 Opcode
, VecTy
, Factor
, Indices
, Alignment
, AddressSpace
, CostKind
,
857 UseMaskForCond
, UseMaskForGaps
);
858 assert(Cost
>= 0 && "TTI should not produce negative costs!");
863 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes
&ICA
,
864 TTI::TargetCostKind CostKind
) const {
865 InstructionCost Cost
= TTIImpl
->getIntrinsicInstrCost(ICA
, CostKind
);
866 assert(Cost
>= 0 && "TTI should not produce negative costs!");
871 TargetTransformInfo::getCallInstrCost(Function
*F
, Type
*RetTy
,
872 ArrayRef
<Type
*> Tys
,
873 TTI::TargetCostKind CostKind
) const {
874 InstructionCost Cost
= TTIImpl
->getCallInstrCost(F
, RetTy
, Tys
, CostKind
);
875 assert(Cost
>= 0 && "TTI should not produce negative costs!");
879 unsigned TargetTransformInfo::getNumberOfParts(Type
*Tp
) const {
880 return TTIImpl
->getNumberOfParts(Tp
);
884 TargetTransformInfo::getAddressComputationCost(Type
*Tp
, ScalarEvolution
*SE
,
885 const SCEV
*Ptr
) const {
886 InstructionCost Cost
= TTIImpl
->getAddressComputationCost(Tp
, SE
, Ptr
);
887 assert(Cost
>= 0 && "TTI should not produce negative costs!");
891 InstructionCost
TargetTransformInfo::getMemcpyCost(const Instruction
*I
) const {
892 InstructionCost Cost
= TTIImpl
->getMemcpyCost(I
);
893 assert(Cost
>= 0 && "TTI should not produce negative costs!");
897 InstructionCost
TargetTransformInfo::getArithmeticReductionCost(
898 unsigned Opcode
, VectorType
*Ty
, Optional
<FastMathFlags
> FMF
,
899 TTI::TargetCostKind CostKind
) const {
900 InstructionCost Cost
=
901 TTIImpl
->getArithmeticReductionCost(Opcode
, Ty
, FMF
, CostKind
);
902 assert(Cost
>= 0 && "TTI should not produce negative costs!");
906 InstructionCost
TargetTransformInfo::getMinMaxReductionCost(
907 VectorType
*Ty
, VectorType
*CondTy
, bool IsUnsigned
,
908 TTI::TargetCostKind CostKind
) const {
909 InstructionCost Cost
=
910 TTIImpl
->getMinMaxReductionCost(Ty
, CondTy
, IsUnsigned
, CostKind
);
911 assert(Cost
>= 0 && "TTI should not produce negative costs!");
915 InstructionCost
TargetTransformInfo::getExtendedAddReductionCost(
916 bool IsMLA
, bool IsUnsigned
, Type
*ResTy
, VectorType
*Ty
,
917 TTI::TargetCostKind CostKind
) const {
918 return TTIImpl
->getExtendedAddReductionCost(IsMLA
, IsUnsigned
, ResTy
, Ty
,
923 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef
<Type
*> Tys
) const {
924 return TTIImpl
->getCostOfKeepingLiveOverCall(Tys
);
927 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst
*Inst
,
928 MemIntrinsicInfo
&Info
) const {
929 return TTIImpl
->getTgtMemIntrinsic(Inst
, Info
);
932 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
933 return TTIImpl
->getAtomicMemIntrinsicMaxElementSize();
936 Value
*TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
937 IntrinsicInst
*Inst
, Type
*ExpectedType
) const {
938 return TTIImpl
->getOrCreateResultFromMemIntrinsic(Inst
, ExpectedType
);
941 Type
*TargetTransformInfo::getMemcpyLoopLoweringType(
942 LLVMContext
&Context
, Value
*Length
, unsigned SrcAddrSpace
,
943 unsigned DestAddrSpace
, unsigned SrcAlign
, unsigned DestAlign
) const {
944 return TTIImpl
->getMemcpyLoopLoweringType(Context
, Length
, SrcAddrSpace
,
945 DestAddrSpace
, SrcAlign
, DestAlign
);
948 void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
949 SmallVectorImpl
<Type
*> &OpsOut
, LLVMContext
&Context
,
950 unsigned RemainingBytes
, unsigned SrcAddrSpace
, unsigned DestAddrSpace
,
951 unsigned SrcAlign
, unsigned DestAlign
) const {
952 TTIImpl
->getMemcpyLoopResidualLoweringType(OpsOut
, Context
, RemainingBytes
,
953 SrcAddrSpace
, DestAddrSpace
,
954 SrcAlign
, DestAlign
);
957 bool TargetTransformInfo::areInlineCompatible(const Function
*Caller
,
958 const Function
*Callee
) const {
959 return TTIImpl
->areInlineCompatible(Caller
, Callee
);
962 bool TargetTransformInfo::areFunctionArgsABICompatible(
963 const Function
*Caller
, const Function
*Callee
,
964 SmallPtrSetImpl
<Argument
*> &Args
) const {
965 return TTIImpl
->areFunctionArgsABICompatible(Caller
, Callee
, Args
);
968 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode
,
970 return TTIImpl
->isIndexedLoadLegal(Mode
, Ty
);
973 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode
,
975 return TTIImpl
->isIndexedStoreLegal(Mode
, Ty
);
978 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS
) const {
979 return TTIImpl
->getLoadStoreVecRegBitWidth(AS
);
982 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst
*LI
) const {
983 return TTIImpl
->isLegalToVectorizeLoad(LI
);
986 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst
*SI
) const {
987 return TTIImpl
->isLegalToVectorizeStore(SI
);
990 bool TargetTransformInfo::isLegalToVectorizeLoadChain(
991 unsigned ChainSizeInBytes
, Align Alignment
, unsigned AddrSpace
) const {
992 return TTIImpl
->isLegalToVectorizeLoadChain(ChainSizeInBytes
, Alignment
,
996 bool TargetTransformInfo::isLegalToVectorizeStoreChain(
997 unsigned ChainSizeInBytes
, Align Alignment
, unsigned AddrSpace
) const {
998 return TTIImpl
->isLegalToVectorizeStoreChain(ChainSizeInBytes
, Alignment
,
1002 bool TargetTransformInfo::isLegalToVectorizeReduction(
1003 const RecurrenceDescriptor
&RdxDesc
, ElementCount VF
) const {
1004 return TTIImpl
->isLegalToVectorizeReduction(RdxDesc
, VF
);
1007 bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type
*Ty
) const {
1008 return TTIImpl
->isElementTypeLegalForScalableVector(Ty
);
1011 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF
,
1013 unsigned ChainSizeInBytes
,
1014 VectorType
*VecTy
) const {
1015 return TTIImpl
->getLoadVectorFactor(VF
, LoadSize
, ChainSizeInBytes
, VecTy
);
1018 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF
,
1020 unsigned ChainSizeInBytes
,
1021 VectorType
*VecTy
) const {
1022 return TTIImpl
->getStoreVectorFactor(VF
, StoreSize
, ChainSizeInBytes
, VecTy
);
1025 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode
, Type
*Ty
,
1026 ReductionFlags Flags
) const {
1027 return TTIImpl
->preferInLoopReduction(Opcode
, Ty
, Flags
);
1030 bool TargetTransformInfo::preferPredicatedReductionSelect(
1031 unsigned Opcode
, Type
*Ty
, ReductionFlags Flags
) const {
1032 return TTIImpl
->preferPredicatedReductionSelect(Opcode
, Ty
, Flags
);
1035 TargetTransformInfo::VPLegalization
1036 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic
&VPI
) const {
1037 return TTIImpl
->getVPLegalizationStrategy(VPI
);
1040 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst
*II
) const {
1041 return TTIImpl
->shouldExpandReduction(II
);
1044 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1045 return TTIImpl
->getGISelRematGlobalCost();
1048 bool TargetTransformInfo::supportsScalableVectors() const {
1049 return TTIImpl
->supportsScalableVectors();
1052 bool TargetTransformInfo::hasActiveVectorLength() const {
1053 return TTIImpl
->hasActiveVectorLength();
1057 TargetTransformInfo::getInstructionLatency(const Instruction
*I
) const {
1058 return TTIImpl
->getInstructionLatency(I
);
1062 TargetTransformInfo::getInstructionThroughput(const Instruction
*I
) const {
1063 TTI::TargetCostKind CostKind
= TTI::TCK_RecipThroughput
;
1065 switch (I
->getOpcode()) {
1066 case Instruction::GetElementPtr
:
1067 case Instruction::Ret
:
1068 case Instruction::PHI
:
1069 case Instruction::Br
:
1070 case Instruction::Add
:
1071 case Instruction::FAdd
:
1072 case Instruction::Sub
:
1073 case Instruction::FSub
:
1074 case Instruction::Mul
:
1075 case Instruction::FMul
:
1076 case Instruction::UDiv
:
1077 case Instruction::SDiv
:
1078 case Instruction::FDiv
:
1079 case Instruction::URem
:
1080 case Instruction::SRem
:
1081 case Instruction::FRem
:
1082 case Instruction::Shl
:
1083 case Instruction::LShr
:
1084 case Instruction::AShr
:
1085 case Instruction::And
:
1086 case Instruction::Or
:
1087 case Instruction::Xor
:
1088 case Instruction::FNeg
:
1089 case Instruction::Select
:
1090 case Instruction::ICmp
:
1091 case Instruction::FCmp
:
1092 case Instruction::Store
:
1093 case Instruction::Load
:
1094 case Instruction::ZExt
:
1095 case Instruction::SExt
:
1096 case Instruction::FPToUI
:
1097 case Instruction::FPToSI
:
1098 case Instruction::FPExt
:
1099 case Instruction::PtrToInt
:
1100 case Instruction::IntToPtr
:
1101 case Instruction::SIToFP
:
1102 case Instruction::UIToFP
:
1103 case Instruction::Trunc
:
1104 case Instruction::FPTrunc
:
1105 case Instruction::BitCast
:
1106 case Instruction::AddrSpaceCast
:
1107 case Instruction::ExtractElement
:
1108 case Instruction::InsertElement
:
1109 case Instruction::ExtractValue
:
1110 case Instruction::ShuffleVector
:
1111 case Instruction::Call
:
1112 case Instruction::Switch
:
1113 return getUserCost(I
, CostKind
);
1115 // We don't have any information on this instruction.
1120 TargetTransformInfo::Concept::~Concept() {}
1122 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI
) {}
1124 TargetIRAnalysis::TargetIRAnalysis(
1125 std::function
<Result(const Function
&)> TTICallback
)
1126 : TTICallback(std::move(TTICallback
)) {}
1128 TargetIRAnalysis::Result
TargetIRAnalysis::run(const Function
&F
,
1129 FunctionAnalysisManager
&) {
1130 return TTICallback(F
);
1133 AnalysisKey
TargetIRAnalysis::Key
;
1135 TargetIRAnalysis::Result
TargetIRAnalysis::getDefaultTTI(const Function
&F
) {
1136 return Result(F
.getParent()->getDataLayout());
1139 // Register the basic pass.
1140 INITIALIZE_PASS(TargetTransformInfoWrapperPass
, "tti",
1141 "Target Transform Information", false, true)
1142 char TargetTransformInfoWrapperPass::ID
= 0;
1144 void TargetTransformInfoWrapperPass::anchor() {}
1146 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
1147 : ImmutablePass(ID
) {
1148 initializeTargetTransformInfoWrapperPassPass(
1149 *PassRegistry::getPassRegistry());
1152 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
1153 TargetIRAnalysis TIRA
)
1154 : ImmutablePass(ID
), TIRA(std::move(TIRA
)) {
1155 initializeTargetTransformInfoWrapperPassPass(
1156 *PassRegistry::getPassRegistry());
1159 TargetTransformInfo
&TargetTransformInfoWrapperPass::getTTI(const Function
&F
) {
1160 FunctionAnalysisManager DummyFAM
;
1161 TTI
= TIRA
.run(F
, DummyFAM
);
1166 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA
) {
1167 return new TargetTransformInfoWrapperPass(std::move(TIRA
));