1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
13 //===----------------------------------------------------------------------===//
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/UnrollLoop.h"
28 #define DEBUG_TYPE "hexagontti"
30 static cl::opt
<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
31 cl::Hidden
, cl::desc("Enable loop vectorizer for HVX"));
33 static cl::opt
<bool> EmitLookupTables("hexagon-emit-lookup-tables",
34 cl::init(true), cl::Hidden
,
35 cl::desc("Control lookup table emission on Hexagon target"));
37 // Constant "cost factor" to make floating point operations more expensive
38 // in terms of vectorization cost. This isn't the best way, but it should
39 // do. Ultimately, the cost should use cycles.
40 static const unsigned FloatFactor
= 4;
42 bool HexagonTTIImpl::useHVX() const {
43 return ST
.useHVXOps() && HexagonAutoHVX
;
46 bool HexagonTTIImpl::isTypeForHVX(Type
*VecTy
) const {
47 assert(VecTy
->isVectorTy());
48 if (cast
<VectorType
>(VecTy
)->isScalable())
50 // Avoid types like <2 x i32*>.
51 if (!cast
<VectorType
>(VecTy
)->getElementType()->isIntegerTy())
53 EVT VecVT
= EVT::getEVT(VecTy
);
54 if (!VecVT
.isSimple() || VecVT
.getSizeInBits() <= 64)
56 if (ST
.isHVXVectorType(VecVT
.getSimpleVT()))
58 auto Action
= TLI
.getPreferredVectorAction(VecVT
.getSimpleVT());
59 return Action
== TargetLoweringBase::TypeWidenVector
;
62 unsigned HexagonTTIImpl::getTypeNumElements(Type
*Ty
) const {
64 return Ty
->getVectorNumElements();
65 assert((Ty
->isIntegerTy() || Ty
->isFloatingPointTy()) &&
66 "Expecting scalar type");
70 TargetTransformInfo::PopcntSupportKind
71 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit
) const {
72 // Return fast hardware support as every input < 64 bits will be promoted
74 return TargetTransformInfo::PSK_FastHardware
;
77 // The Hexagon target can unroll loops with run-time trip counts.
78 void HexagonTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
79 TTI::UnrollingPreferences
&UP
) {
80 UP
.Runtime
= UP
.Partial
= true;
81 // Only try to peel innermost loops with small runtime trip counts.
82 if (L
&& L
->empty() && canPeel(L
) &&
83 SE
.getSmallConstantTripCount(L
) == 0 &&
84 SE
.getSmallConstantMaxTripCount(L
) > 0 &&
85 SE
.getSmallConstantMaxTripCount(L
) <= 5) {
90 bool HexagonTTIImpl::shouldFavorPostInc() const {
94 /// --- Vector TTI begin ---
96 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector
) const {
98 return useHVX() ? 32 : 0;
102 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF
) {
103 return useHVX() ? 2 : 0;
106 unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector
) const {
107 return Vector
? getMinVectorRegisterBitWidth() : 32;
110 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
111 return useHVX() ? ST
.getVectorLength()*8 : 0;
114 unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth
) const {
115 return (8 * ST
.getVectorLength()) / ElemWidth
;
118 unsigned HexagonTTIImpl::getScalarizationOverhead(Type
*Ty
, bool Insert
,
120 return BaseT::getScalarizationOverhead(Ty
, Insert
, Extract
);
123 unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
124 ArrayRef
<const Value
*> Args
, unsigned VF
) {
125 return BaseT::getOperandsScalarizationOverhead(Args
, VF
);
128 unsigned HexagonTTIImpl::getCallInstrCost(Function
*F
, Type
*RetTy
,
129 ArrayRef
<Type
*> Tys
) {
130 return BaseT::getCallInstrCost(F
, RetTy
, Tys
);
133 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID
, Type
*RetTy
,
134 ArrayRef
<Value
*> Args
, FastMathFlags FMF
, unsigned VF
) {
135 return BaseT::getIntrinsicInstrCost(ID
, RetTy
, Args
, FMF
, VF
);
138 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID
, Type
*RetTy
,
139 ArrayRef
<Type
*> Tys
, FastMathFlags FMF
,
140 unsigned ScalarizationCostPassed
) {
141 if (ID
== Intrinsic::bswap
) {
142 std::pair
<int, MVT
> LT
= TLI
.getTypeLegalizationCost(DL
, RetTy
);
145 return BaseT::getIntrinsicInstrCost(ID
, RetTy
, Tys
, FMF
,
146 ScalarizationCostPassed
);
149 unsigned HexagonTTIImpl::getAddressComputationCost(Type
*Tp
,
150 ScalarEvolution
*SE
, const SCEV
*S
) {
154 unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
,
155 unsigned Alignment
, unsigned AddressSpace
, const Instruction
*I
) {
156 assert(Opcode
== Instruction::Load
|| Opcode
== Instruction::Store
);
157 if (Opcode
== Instruction::Store
)
158 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
, I
);
160 if (Src
->isVectorTy()) {
161 VectorType
*VecTy
= cast
<VectorType
>(Src
);
162 unsigned VecWidth
= VecTy
->getBitWidth();
163 if (useHVX() && isTypeForHVX(VecTy
)) {
164 unsigned RegWidth
= getRegisterBitWidth(true);
165 assert(RegWidth
&& "Non-zero vector register width expected");
166 // Cost of HVX loads.
167 if (VecWidth
% RegWidth
== 0)
168 return VecWidth
/ RegWidth
;
169 // Cost of constructing HVX vector from scalar loads.
170 Alignment
= std::min(Alignment
, RegWidth
/ 8);
171 unsigned AlignWidth
= 8 * std::max(1u, Alignment
);
172 unsigned NumLoads
= alignTo(VecWidth
, AlignWidth
) / AlignWidth
;
177 // Add extra cost for floating point types.
178 unsigned Cost
= VecTy
->getElementType()->isFloatingPointTy() ? FloatFactor
180 Alignment
= std::min(Alignment
, 8u);
181 unsigned AlignWidth
= 8 * std::max(1u, Alignment
);
182 unsigned NumLoads
= alignTo(VecWidth
, AlignWidth
) / AlignWidth
;
183 if (Alignment
== 4 || Alignment
== 8)
184 return Cost
* NumLoads
;
185 // Loads of less than 32 bits will need extra inserts to compose a vector.
186 unsigned LogA
= Log2_32(Alignment
);
187 return (3 - LogA
) * Cost
* NumLoads
;
190 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
, I
);
193 unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode
,
194 Type
*Src
, unsigned Alignment
, unsigned AddressSpace
) {
195 return BaseT::getMaskedMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
);
198 unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
,
199 int Index
, Type
*SubTp
) {
203 unsigned HexagonTTIImpl::getGatherScatterOpCost(unsigned Opcode
, Type
*DataTy
,
204 Value
*Ptr
, bool VariableMask
, unsigned Alignment
) {
205 return BaseT::getGatherScatterOpCost(Opcode
, DataTy
, Ptr
, VariableMask
,
209 unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
,
210 Type
*VecTy
, unsigned Factor
, ArrayRef
<unsigned> Indices
,
211 unsigned Alignment
, unsigned AddressSpace
, bool UseMaskForCond
,
212 bool UseMaskForGaps
) {
213 if (Indices
.size() != Factor
|| UseMaskForCond
|| UseMaskForGaps
)
214 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
215 Alignment
, AddressSpace
,
216 UseMaskForCond
, UseMaskForGaps
);
217 return getMemoryOpCost(Opcode
, VecTy
, Alignment
, AddressSpace
, nullptr);
220 unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
,
221 Type
*CondTy
, const Instruction
*I
) {
222 if (ValTy
->isVectorTy()) {
223 std::pair
<int, MVT
> LT
= TLI
.getTypeLegalizationCost(DL
, ValTy
);
224 if (Opcode
== Instruction::FCmp
)
225 return LT
.first
+ FloatFactor
* getTypeNumElements(ValTy
);
227 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, I
);
230 unsigned HexagonTTIImpl::getArithmeticInstrCost(unsigned Opcode
, Type
*Ty
,
231 TTI::OperandValueKind Opd1Info
, TTI::OperandValueKind Opd2Info
,
232 TTI::OperandValueProperties Opd1PropInfo
,
233 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
) {
234 if (Ty
->isVectorTy()) {
235 std::pair
<int, MVT
> LT
= TLI
.getTypeLegalizationCost(DL
, Ty
);
236 if (LT
.second
.isFloatingPoint())
237 return LT
.first
+ FloatFactor
* getTypeNumElements(Ty
);
239 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Opd1Info
, Opd2Info
,
240 Opd1PropInfo
, Opd2PropInfo
, Args
);
243 unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*DstTy
,
244 Type
*SrcTy
, const Instruction
*I
) {
245 if (SrcTy
->isFPOrFPVectorTy() || DstTy
->isFPOrFPVectorTy()) {
246 unsigned SrcN
= SrcTy
->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy
) : 0;
247 unsigned DstN
= DstTy
->isFPOrFPVectorTy() ? getTypeNumElements(DstTy
) : 0;
249 std::pair
<int, MVT
> SrcLT
= TLI
.getTypeLegalizationCost(DL
, SrcTy
);
250 std::pair
<int, MVT
> DstLT
= TLI
.getTypeLegalizationCost(DL
, DstTy
);
251 return std::max(SrcLT
.first
, DstLT
.first
) + FloatFactor
* (SrcN
+ DstN
);
256 unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*Val
,
258 Type
*ElemTy
= Val
->isVectorTy() ? cast
<VectorType
>(Val
)->getElementType()
260 if (Opcode
== Instruction::InsertElement
) {
261 // Need two rotations for non-zero index.
262 unsigned Cost
= (Index
!= 0) ? 2 : 0;
263 if (ElemTy
->isIntegerTy(32))
265 // If it's not a 32-bit value, there will need to be an extract.
266 return Cost
+ getVectorInstrCost(Instruction::ExtractElement
, Val
, Index
);
269 if (Opcode
== Instruction::ExtractElement
)
275 /// --- Vector TTI end ---
277 unsigned HexagonTTIImpl::getPrefetchDistance() const {
278 return ST
.getL1PrefetchDistance();
281 unsigned HexagonTTIImpl::getCacheLineSize() const {
282 return ST
.getL1CacheLineSize();
285 int HexagonTTIImpl::getUserCost(const User
*U
,
286 ArrayRef
<const Value
*> Operands
) {
287 auto isCastFoldedIntoLoad
= [this](const CastInst
*CI
) -> bool {
288 if (!CI
->isIntegerCast())
290 // Only extensions from an integer type shorter than 32-bit to i32
291 // can be folded into the load.
292 const DataLayout
&DL
= getDataLayout();
293 unsigned SBW
= DL
.getTypeSizeInBits(CI
->getSrcTy());
294 unsigned DBW
= DL
.getTypeSizeInBits(CI
->getDestTy());
295 if (DBW
!= 32 || SBW
>= DBW
)
298 const LoadInst
*LI
= dyn_cast
<const LoadInst
>(CI
->getOperand(0));
299 // Technically, this code could allow multiple uses of the load, and
300 // check if all the uses are the same extension operation, but this
301 // should be sufficient for most cases.
302 return LI
&& LI
->hasOneUse();
305 if (const CastInst
*CI
= dyn_cast
<const CastInst
>(U
))
306 if (isCastFoldedIntoLoad(CI
))
307 return TargetTransformInfo::TCC_Free
;
308 return BaseT::getUserCost(U
, Operands
);
311 bool HexagonTTIImpl::shouldBuildLookupTables() const {
312 return EmitLookupTables
;