1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// X86 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
19 #include "X86TargetMachine.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/CodeGen/BasicTTIImpl.h"
28 class X86TTIImpl
: public BasicTTIImplBase
<X86TTIImpl
> {
29 typedef BasicTTIImplBase
<X86TTIImpl
> BaseT
;
30 typedef TargetTransformInfo TTI
;
33 const X86Subtarget
*ST
;
34 const X86TargetLowering
*TLI
;
36 const X86Subtarget
*getST() const { return ST
; }
37 const X86TargetLowering
*getTLI() const { return TLI
; }
39 const FeatureBitset InlineFeatureIgnoreList
= {
40 // This indicates the CPU is 64 bit capable not that we are in 64-bit
44 // These features don't have any intrinsics or ABI effect.
47 X86::FeatureLAHFSAHF64
,
49 // Some older targets can be setup to fold unaligned loads.
50 X86::FeatureSSEUnalignedMem
,
52 // Codegen control options.
53 X86::TuningFast11ByteNOP
,
54 X86::TuningFast15ByteNOP
,
56 X86::TuningFastHorizontalOps
,
58 X86::TuningFastScalarFSQRT
,
59 X86::TuningFastSHLDRotate
,
60 X86::TuningFastScalarShiftMasks
,
61 X86::TuningFastVectorShiftMasks
,
62 X86::TuningFastVariableCrossLaneShuffle
,
63 X86::TuningFastVariablePerLaneShuffle
,
64 X86::TuningFastVectorFSQRT
,
67 X86::TuningLZCNTFalseDeps
,
68 X86::TuningBranchFusion
,
69 X86::TuningMacroFusion
,
70 X86::TuningPadShortFunctions
,
71 X86::TuningPOPCNTFalseDeps
,
72 X86::TuningMULCFalseDeps
,
73 X86::TuningPERMFalseDeps
,
74 X86::TuningRANGEFalseDeps
,
75 X86::TuningGETMANTFalseDeps
,
76 X86::TuningMULLQFalseDeps
,
77 X86::TuningSlow3OpsLEA
,
78 X86::TuningSlowDivide32
,
79 X86::TuningSlowDivide64
,
80 X86::TuningSlowIncDec
,
82 X86::TuningSlowPMADDWD
,
83 X86::TuningSlowPMULLD
,
85 X86::TuningSlowTwoMemOps
,
86 X86::TuningSlowUAMem16
,
87 X86::TuningPreferMaskRegisters
,
88 X86::TuningInsertVZEROUPPER
,
89 X86::TuningUseSLMArithCosts
,
90 X86::TuningUseGLMDivSqrtCosts
,
91 X86::TuningNoDomainDelay
,
92 X86::TuningNoDomainDelayMov
,
93 X86::TuningNoDomainDelayShuffle
,
94 X86::TuningNoDomainDelayBlend
,
95 X86::TuningPreferShiftShuffle
,
96 X86::TuningFastImmVectorShift
,
99 X86::TuningFastGather
,
100 X86::TuningSlowUAMem32
,
101 X86::TuningAllowLight256Bit
,
103 // Based on whether user set the -mprefer-vector-width command line.
104 X86::TuningPrefer128Bit
,
105 X86::TuningPrefer256Bit
,
107 // CPU name enums. These just follow CPU string.
112 explicit X86TTIImpl(const X86TargetMachine
*TM
, const Function
&F
)
113 : BaseT(TM
, F
.getParent()->getDataLayout()), ST(TM
->getSubtargetImpl(F
)),
114 TLI(ST
->getTargetLowering()) {}
116 /// \name Scalar TTI Implementations
118 TTI::PopcntSupportKind
getPopcntSupport(unsigned TyWidth
);
122 /// \name Cache TTI Implementation
124 std::optional
<unsigned> getCacheSize(
125 TargetTransformInfo::CacheLevel Level
) const override
;
126 std::optional
<unsigned> getCacheAssociativity(
127 TargetTransformInfo::CacheLevel Level
) const override
;
130 /// \name Vector TTI Implementations
133 unsigned getNumberOfRegisters(unsigned ClassID
) const;
134 TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K
) const;
135 unsigned getLoadStoreVecRegBitWidth(unsigned AS
) const;
136 unsigned getMaxInterleaveFactor(ElementCount VF
);
137 InstructionCost
getArithmeticInstrCost(
138 unsigned Opcode
, Type
*Ty
, TTI::TargetCostKind CostKind
,
139 TTI::OperandValueInfo Op1Info
= {TTI::OK_AnyValue
, TTI::OP_None
},
140 TTI::OperandValueInfo Op2Info
= {TTI::OK_AnyValue
, TTI::OP_None
},
141 ArrayRef
<const Value
*> Args
= ArrayRef
<const Value
*>(),
142 const Instruction
*CxtI
= nullptr);
143 InstructionCost
getShuffleCost(TTI::ShuffleKind Kind
, VectorType
*Tp
,
145 TTI::TargetCostKind CostKind
, int Index
,
147 ArrayRef
<const Value
*> Args
= std::nullopt
);
148 InstructionCost
getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
149 TTI::CastContextHint CCH
,
150 TTI::TargetCostKind CostKind
,
151 const Instruction
*I
= nullptr);
152 InstructionCost
getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
153 CmpInst::Predicate VecPred
,
154 TTI::TargetCostKind CostKind
,
155 const Instruction
*I
= nullptr);
156 using BaseT::getVectorInstrCost
;
157 InstructionCost
getVectorInstrCost(unsigned Opcode
, Type
*Val
,
158 TTI::TargetCostKind CostKind
,
159 unsigned Index
, Value
*Op0
, Value
*Op1
);
160 InstructionCost
getScalarizationOverhead(VectorType
*Ty
,
161 const APInt
&DemandedElts
,
162 bool Insert
, bool Extract
,
163 TTI::TargetCostKind CostKind
);
164 InstructionCost
getReplicationShuffleCost(Type
*EltTy
, int ReplicationFactor
,
166 const APInt
&DemandedDstElts
,
167 TTI::TargetCostKind CostKind
);
169 getMemoryOpCost(unsigned Opcode
, Type
*Src
, MaybeAlign Alignment
,
170 unsigned AddressSpace
, TTI::TargetCostKind CostKind
,
171 TTI::OperandValueInfo OpInfo
= {TTI::OK_AnyValue
, TTI::OP_None
},
172 const Instruction
*I
= nullptr);
173 InstructionCost
getMaskedMemoryOpCost(unsigned Opcode
, Type
*Src
,
174 Align Alignment
, unsigned AddressSpace
,
175 TTI::TargetCostKind CostKind
);
176 InstructionCost
getGatherScatterOpCost(unsigned Opcode
, Type
*DataTy
,
177 const Value
*Ptr
, bool VariableMask
,
179 TTI::TargetCostKind CostKind
,
180 const Instruction
*I
);
181 InstructionCost
getPointersChainCost(ArrayRef
<const Value
*> Ptrs
,
183 const TTI::PointersChainInfo
&Info
,
185 TTI::TargetCostKind CostKind
);
186 InstructionCost
getAddressComputationCost(Type
*PtrTy
, ScalarEvolution
*SE
,
189 std::optional
<Instruction
*> instCombineIntrinsic(InstCombiner
&IC
,
190 IntrinsicInst
&II
) const;
191 std::optional
<Value
*>
192 simplifyDemandedUseBitsIntrinsic(InstCombiner
&IC
, IntrinsicInst
&II
,
193 APInt DemandedMask
, KnownBits
&Known
,
194 bool &KnownBitsComputed
) const;
195 std::optional
<Value
*> simplifyDemandedVectorEltsIntrinsic(
196 InstCombiner
&IC
, IntrinsicInst
&II
, APInt DemandedElts
, APInt
&UndefElts
,
197 APInt
&UndefElts2
, APInt
&UndefElts3
,
198 std::function
<void(Instruction
*, unsigned, APInt
, APInt
&)>
199 SimplifyAndSetOp
) const;
201 unsigned getAtomicMemIntrinsicMaxElementSize() const;
203 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes
&ICA
,
204 TTI::TargetCostKind CostKind
);
206 InstructionCost
getArithmeticReductionCost(unsigned Opcode
, VectorType
*Ty
,
207 std::optional
<FastMathFlags
> FMF
,
208 TTI::TargetCostKind CostKind
);
210 InstructionCost
getMinMaxCost(Intrinsic::ID IID
, Type
*Ty
,
211 TTI::TargetCostKind CostKind
,
214 InstructionCost
getMinMaxReductionCost(Intrinsic::ID IID
, VectorType
*Ty
,
216 TTI::TargetCostKind CostKind
);
218 InstructionCost
getInterleavedMemoryOpCost(
219 unsigned Opcode
, Type
*VecTy
, unsigned Factor
, ArrayRef
<unsigned> Indices
,
220 Align Alignment
, unsigned AddressSpace
, TTI::TargetCostKind CostKind
,
221 bool UseMaskForCond
= false, bool UseMaskForGaps
= false);
222 InstructionCost
getInterleavedMemoryOpCostAVX512(
223 unsigned Opcode
, FixedVectorType
*VecTy
, unsigned Factor
,
224 ArrayRef
<unsigned> Indices
, Align Alignment
, unsigned AddressSpace
,
225 TTI::TargetCostKind CostKind
, bool UseMaskForCond
= false,
226 bool UseMaskForGaps
= false);
228 InstructionCost
getIntImmCost(int64_t);
230 InstructionCost
getIntImmCost(const APInt
&Imm
, Type
*Ty
,
231 TTI::TargetCostKind CostKind
);
233 InstructionCost
getCFInstrCost(unsigned Opcode
, TTI::TargetCostKind CostKind
,
234 const Instruction
*I
= nullptr);
236 InstructionCost
getIntImmCostInst(unsigned Opcode
, unsigned Idx
,
237 const APInt
&Imm
, Type
*Ty
,
238 TTI::TargetCostKind CostKind
,
239 Instruction
*Inst
= nullptr);
240 InstructionCost
getIntImmCostIntrin(Intrinsic::ID IID
, unsigned Idx
,
241 const APInt
&Imm
, Type
*Ty
,
242 TTI::TargetCostKind CostKind
);
243 /// Return the cost of the scaling factor used in the addressing
244 /// mode represented by AM for this target, for a load/store
245 /// of the specified type.
246 /// If the AM is supported, the return value must be >= 0.
247 /// If the AM is not supported, it returns a negative value.
248 InstructionCost
getScalingFactorCost(Type
*Ty
, GlobalValue
*BaseGV
,
249 int64_t BaseOffset
, bool HasBaseReg
,
250 int64_t Scale
, unsigned AddrSpace
) const;
252 bool isLSRCostLess(const TargetTransformInfo::LSRCost
&C1
,
253 const TargetTransformInfo::LSRCost
&C2
);
254 bool canMacroFuseCmp();
255 bool isLegalMaskedLoad(Type
*DataType
, Align Alignment
);
256 bool isLegalMaskedStore(Type
*DataType
, Align Alignment
);
257 bool isLegalNTLoad(Type
*DataType
, Align Alignment
);
258 bool isLegalNTStore(Type
*DataType
, Align Alignment
);
259 bool isLegalBroadcastLoad(Type
*ElementTy
, ElementCount NumElements
) const;
260 bool forceScalarizeMaskedGather(VectorType
*VTy
, Align Alignment
);
261 bool forceScalarizeMaskedScatter(VectorType
*VTy
, Align Alignment
) {
262 return forceScalarizeMaskedGather(VTy
, Alignment
);
264 bool isLegalMaskedGatherScatter(Type
*DataType
, Align Alignment
);
265 bool isLegalMaskedGather(Type
*DataType
, Align Alignment
);
266 bool isLegalMaskedScatter(Type
*DataType
, Align Alignment
);
267 bool isLegalMaskedExpandLoad(Type
*DataType
);
268 bool isLegalMaskedCompressStore(Type
*DataType
);
269 bool isLegalAltInstr(VectorType
*VecTy
, unsigned Opcode0
, unsigned Opcode1
,
270 const SmallBitVector
&OpcodeMask
) const;
271 bool hasDivRemOp(Type
*DataType
, bool IsSigned
);
272 bool isExpensiveToSpeculativelyExecute(const Instruction
*I
);
273 bool isFCmpOrdCheaperThanFCmpZero(Type
*Ty
);
274 bool areInlineCompatible(const Function
*Caller
,
275 const Function
*Callee
) const;
276 bool areTypesABICompatible(const Function
*Caller
, const Function
*Callee
,
277 const ArrayRef
<Type
*> &Type
) const;
279 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const {
280 return ST
->getMaxInlineSizeThreshold();
283 TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize
,
284 bool IsZeroCmp
) const;
285 bool prefersVectorizedAddressing() const;
286 bool supportsEfficientVectorElementLoadStore() const;
287 bool enableInterleavedAccessVectorization();
290 bool supportsGather() const;
291 InstructionCost
getGSScalarCost(unsigned Opcode
, Type
*DataTy
,
292 bool VariableMask
, Align Alignment
,
293 unsigned AddressSpace
);
294 InstructionCost
getGSVectorCost(unsigned Opcode
, Type
*DataTy
,
295 const Value
*Ptr
, Align Alignment
,
296 unsigned AddressSpace
);
298 int getGatherOverhead() const;
299 int getScatterOverhead() const;
304 } // end namespace llvm