1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// X86 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
20 #include "X86TargetMachine.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/CodeGen/BasicTTIImpl.h"
23 #include "llvm/CodeGen/TargetLowering.h"
27 class X86TTIImpl
: public BasicTTIImplBase
<X86TTIImpl
> {
28 typedef BasicTTIImplBase
<X86TTIImpl
> BaseT
;
29 typedef TargetTransformInfo TTI
;
32 const X86Subtarget
*ST
;
33 const X86TargetLowering
*TLI
;
35 const X86Subtarget
*getST() const { return ST
; }
36 const X86TargetLowering
*getTLI() const { return TLI
; }
38 const FeatureBitset InlineFeatureIgnoreList
= {
39 // This indicates the CPU is 64 bit capable not that we are in 64-bit
43 // These features don't have any intrinsics or ABI effect.
45 X86::FeatureCMPXCHG16B
,
48 // Codegen control options.
49 X86::FeatureFast11ByteNOP
,
50 X86::FeatureFast15ByteNOP
,
51 X86::FeatureFastBEXTR
,
52 X86::FeatureFastHorizontalOps
,
53 X86::FeatureFastLZCNT
,
54 X86::FeatureFastPartialYMMorZMMWrite
,
55 X86::FeatureFastScalarFSQRT
,
56 X86::FeatureFastSHLDRotate
,
57 X86::FeatureFastScalarShiftMasks
,
58 X86::FeatureFastVectorShiftMasks
,
59 X86::FeatureFastVariableShuffle
,
60 X86::FeatureFastVectorFSQRT
,
62 X86::FeatureLEAUsesAG
,
63 X86::FeatureLZCNTFalseDeps
,
64 X86::FeatureBranchFusion
,
65 X86::FeatureMacroFusion
,
66 X86::FeatureMergeToThreeWayBranch
,
67 X86::FeaturePadShortFunctions
,
68 X86::FeaturePOPCNTFalseDeps
,
69 X86::FeatureSSEUnalignedMem
,
70 X86::FeatureSlow3OpsLEA
,
71 X86::FeatureSlowDivide32
,
72 X86::FeatureSlowDivide64
,
73 X86::FeatureSlowIncDec
,
75 X86::FeatureSlowPMADDWD
,
76 X86::FeatureSlowPMULLD
,
78 X86::FeatureSlowTwoMemOps
,
79 X86::FeatureSlowUAMem16
,
82 X86::FeatureHasFastGather
,
83 X86::FeatureSlowUAMem32
,
85 // Based on whether user set the -mprefer-vector-width command line.
86 X86::FeaturePrefer256Bit
,
88 // CPU name enums. These just follow CPU string.
97 explicit X86TTIImpl(const X86TargetMachine
*TM
, const Function
&F
)
98 : BaseT(TM
, F
.getParent()->getDataLayout()), ST(TM
->getSubtargetImpl(F
)),
99 TLI(ST
->getTargetLowering()) {}
101 /// \name Scalar TTI Implementations
103 TTI::PopcntSupportKind
getPopcntSupport(unsigned TyWidth
);
107 /// \name Cache TTI Implementation
109 llvm::Optional
<unsigned> getCacheSize(
110 TargetTransformInfo::CacheLevel Level
) const;
111 llvm::Optional
<unsigned> getCacheAssociativity(
112 TargetTransformInfo::CacheLevel Level
) const;
115 /// \name Vector TTI Implementations
118 unsigned getNumberOfRegisters(bool Vector
);
119 unsigned getRegisterBitWidth(bool Vector
) const;
120 unsigned getLoadStoreVecRegBitWidth(unsigned AS
) const;
121 unsigned getMaxInterleaveFactor(unsigned VF
);
122 int getArithmeticInstrCost(
123 unsigned Opcode
, Type
*Ty
,
124 TTI::OperandValueKind Opd1Info
= TTI::OK_AnyValue
,
125 TTI::OperandValueKind Opd2Info
= TTI::OK_AnyValue
,
126 TTI::OperandValueProperties Opd1PropInfo
= TTI::OP_None
,
127 TTI::OperandValueProperties Opd2PropInfo
= TTI::OP_None
,
128 ArrayRef
<const Value
*> Args
= ArrayRef
<const Value
*>());
129 int getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
, Type
*SubTp
);
130 int getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
131 const Instruction
*I
= nullptr);
132 int getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
133 const Instruction
*I
= nullptr);
134 int getVectorInstrCost(unsigned Opcode
, Type
*Val
, unsigned Index
);
135 int getMemoryOpCost(unsigned Opcode
, Type
*Src
, unsigned Alignment
,
136 unsigned AddressSpace
, const Instruction
*I
= nullptr);
137 int getMaskedMemoryOpCost(unsigned Opcode
, Type
*Src
, unsigned Alignment
,
138 unsigned AddressSpace
);
139 int getGatherScatterOpCost(unsigned Opcode
, Type
*DataTy
, Value
*Ptr
,
140 bool VariableMask
, unsigned Alignment
);
141 int getAddressComputationCost(Type
*PtrTy
, ScalarEvolution
*SE
,
144 unsigned getAtomicMemIntrinsicMaxElementSize() const;
146 int getIntrinsicInstrCost(Intrinsic::ID IID
, Type
*RetTy
,
147 ArrayRef
<Type
*> Tys
, FastMathFlags FMF
,
148 unsigned ScalarizationCostPassed
= UINT_MAX
);
149 int getIntrinsicInstrCost(Intrinsic::ID IID
, Type
*RetTy
,
150 ArrayRef
<Value
*> Args
, FastMathFlags FMF
,
153 int getArithmeticReductionCost(unsigned Opcode
, Type
*Ty
,
154 bool IsPairwiseForm
);
156 int getMinMaxReductionCost(Type
*Ty
, Type
*CondTy
, bool IsPairwiseForm
,
159 int getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
160 unsigned Factor
, ArrayRef
<unsigned> Indices
,
161 unsigned Alignment
, unsigned AddressSpace
,
162 bool UseMaskForCond
= false,
163 bool UseMaskForGaps
= false);
164 int getInterleavedMemoryOpCostAVX512(unsigned Opcode
, Type
*VecTy
,
165 unsigned Factor
, ArrayRef
<unsigned> Indices
,
166 unsigned Alignment
, unsigned AddressSpace
,
167 bool UseMaskForCond
= false,
168 bool UseMaskForGaps
= false);
169 int getInterleavedMemoryOpCostAVX2(unsigned Opcode
, Type
*VecTy
,
170 unsigned Factor
, ArrayRef
<unsigned> Indices
,
171 unsigned Alignment
, unsigned AddressSpace
,
172 bool UseMaskForCond
= false,
173 bool UseMaskForGaps
= false);
175 int getIntImmCost(int64_t);
177 int getIntImmCost(const APInt
&Imm
, Type
*Ty
);
179 unsigned getUserCost(const User
*U
, ArrayRef
<const Value
*> Operands
);
181 int getIntImmCost(unsigned Opcode
, unsigned Idx
, const APInt
&Imm
, Type
*Ty
);
182 int getIntImmCost(Intrinsic::ID IID
, unsigned Idx
, const APInt
&Imm
,
184 bool isLSRCostLess(TargetTransformInfo::LSRCost
&C1
,
185 TargetTransformInfo::LSRCost
&C2
);
186 bool canMacroFuseCmp();
187 bool isLegalMaskedLoad(Type
*DataType
);
188 bool isLegalMaskedStore(Type
*DataType
);
189 bool isLegalNTLoad(Type
*DataType
, unsigned Alignment
);
190 bool isLegalNTStore(Type
*DataType
, unsigned Alignment
);
191 bool isLegalMaskedGather(Type
*DataType
);
192 bool isLegalMaskedScatter(Type
*DataType
);
193 bool isLegalMaskedExpandLoad(Type
*DataType
);
194 bool isLegalMaskedCompressStore(Type
*DataType
);
195 bool hasDivRemOp(Type
*DataType
, bool IsSigned
);
196 bool isFCmpOrdCheaperThanFCmpZero(Type
*Ty
);
197 bool areInlineCompatible(const Function
*Caller
,
198 const Function
*Callee
) const;
199 bool areFunctionArgsABICompatible(const Function
*Caller
,
200 const Function
*Callee
,
201 SmallPtrSetImpl
<Argument
*> &Args
) const;
202 TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize
,
203 bool IsZeroCmp
) const;
204 bool enableInterleavedAccessVectorization();
206 int getGSScalarCost(unsigned Opcode
, Type
*DataTy
, bool VariableMask
,
207 unsigned Alignment
, unsigned AddressSpace
);
208 int getGSVectorCost(unsigned Opcode
, Type
*DataTy
, Value
*Ptr
,
209 unsigned Alignment
, unsigned AddressSpace
);
214 } // end namespace llvm