1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
13 //===----------------------------------------------------------------------===//
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/LoopPeel.h"
25 #include "llvm/Transforms/Utils/UnrollLoop.h"
29 #define DEBUG_TYPE "hexagontti"
31 static cl::opt
<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden
, cl::desc("Enable loop vectorizer for HVX"));
34 static cl::opt
<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35 cl::init(true), cl::Hidden
,
36 cl::desc("Control lookup table emission on Hexagon target"));
38 static cl::opt
<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
39 cl::Hidden
, cl::desc("Enable masked loads/stores for HVX"));
41 // Constant "cost factor" to make floating point operations more expensive
42 // in terms of vectorization cost. This isn't the best way, but it should
43 // do. Ultimately, the cost should use cycles.
44 static const unsigned FloatFactor
= 4;
46 bool HexagonTTIImpl::useHVX() const {
47 return ST
.useHVXOps() && HexagonAutoHVX
;
50 unsigned HexagonTTIImpl::getTypeNumElements(Type
*Ty
) const {
51 if (auto *VTy
= dyn_cast
<FixedVectorType
>(Ty
))
52 return VTy
->getNumElements();
53 assert((Ty
->isIntegerTy() || Ty
->isFloatingPointTy()) &&
54 "Expecting scalar type");
58 TargetTransformInfo::PopcntSupportKind
59 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit
) const {
60 // Return fast hardware support as every input < 64 bits will be promoted
62 return TargetTransformInfo::PSK_FastHardware
;
65 // The Hexagon target can unroll loops with run-time trip counts.
66 void HexagonTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
67 TTI::UnrollingPreferences
&UP
,
68 OptimizationRemarkEmitter
*ORE
) {
69 UP
.Runtime
= UP
.Partial
= true;
72 void HexagonTTIImpl::getPeelingPreferences(Loop
*L
, ScalarEvolution
&SE
,
73 TTI::PeelingPreferences
&PP
) {
74 BaseT::getPeelingPreferences(L
, SE
, PP
);
75 // Only try to peel innermost loops with small runtime trip counts.
76 if (L
&& L
->isInnermost() && canPeel(L
) &&
77 SE
.getSmallConstantTripCount(L
) == 0 &&
78 SE
.getSmallConstantMaxTripCount(L
) > 0 &&
79 SE
.getSmallConstantMaxTripCount(L
) <= 5) {
84 TTI::AddressingModeKind
85 HexagonTTIImpl::getPreferredAddressingMode(const Loop
*L
,
86 ScalarEvolution
*SE
) const {
87 return TTI::AMK_PostIndexed
;
90 /// --- Vector TTI begin ---
92 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector
) const {
94 return useHVX() ? 32 : 0;
98 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF
) {
99 return useHVX() ? 2 : 1;
103 HexagonTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K
) const {
105 case TargetTransformInfo::RGK_Scalar
:
106 return TypeSize::getFixed(32);
107 case TargetTransformInfo::RGK_FixedWidthVector
:
108 return TypeSize::getFixed(getMinVectorRegisterBitWidth());
109 case TargetTransformInfo::RGK_ScalableVector
:
110 return TypeSize::getScalable(0);
113 llvm_unreachable("Unsupported register kind");
116 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
117 return useHVX() ? ST
.getVectorLength()*8 : 32;
120 ElementCount
HexagonTTIImpl::getMinimumVF(unsigned ElemWidth
,
121 bool IsScalable
) const {
122 assert(!IsScalable
&& "Scalable VFs are not supported for Hexagon");
123 return ElementCount::getFixed((8 * ST
.getVectorLength()) / ElemWidth
);
126 InstructionCost
HexagonTTIImpl::getScalarizationOverhead(
127 VectorType
*Ty
, const APInt
&DemandedElts
, bool Insert
, bool Extract
) {
128 return BaseT::getScalarizationOverhead(Ty
, DemandedElts
, Insert
, Extract
);
132 HexagonTTIImpl::getOperandsScalarizationOverhead(ArrayRef
<const Value
*> Args
,
133 ArrayRef
<Type
*> Tys
) {
134 return BaseT::getOperandsScalarizationOverhead(Args
, Tys
);
137 InstructionCost
HexagonTTIImpl::getCallInstrCost(Function
*F
, Type
*RetTy
,
138 ArrayRef
<Type
*> Tys
,
139 TTI::TargetCostKind CostKind
) {
140 return BaseT::getCallInstrCost(F
, RetTy
, Tys
, CostKind
);
144 HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes
&ICA
,
145 TTI::TargetCostKind CostKind
) {
146 if (ICA
.getID() == Intrinsic::bswap
) {
147 std::pair
<InstructionCost
, MVT
> LT
=
148 TLI
.getTypeLegalizationCost(DL
, ICA
.getReturnType());
151 return BaseT::getIntrinsicInstrCost(ICA
, CostKind
);
154 InstructionCost
HexagonTTIImpl::getAddressComputationCost(Type
*Tp
,
160 InstructionCost
HexagonTTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
,
161 MaybeAlign Alignment
,
162 unsigned AddressSpace
,
163 TTI::TargetCostKind CostKind
,
164 const Instruction
*I
) {
165 assert(Opcode
== Instruction::Load
|| Opcode
== Instruction::Store
);
166 // TODO: Handle other cost kinds.
167 if (CostKind
!= TTI::TCK_RecipThroughput
)
170 if (Opcode
== Instruction::Store
)
171 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
,
174 if (Src
->isVectorTy()) {
175 VectorType
*VecTy
= cast
<VectorType
>(Src
);
176 unsigned VecWidth
= VecTy
->getPrimitiveSizeInBits().getFixedSize();
177 if (useHVX() && ST
.isTypeForHVX(VecTy
)) {
179 getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector
)
181 assert(RegWidth
&& "Non-zero vector register width expected");
182 // Cost of HVX loads.
183 if (VecWidth
% RegWidth
== 0)
184 return VecWidth
/ RegWidth
;
185 // Cost of constructing HVX vector from scalar loads
186 const Align
RegAlign(RegWidth
/ 8);
187 if (!Alignment
|| *Alignment
> RegAlign
)
188 Alignment
= RegAlign
;
190 unsigned AlignWidth
= 8 * Alignment
->value();
191 unsigned NumLoads
= alignTo(VecWidth
, AlignWidth
) / AlignWidth
;
196 // Add extra cost for floating point types.
198 VecTy
->getElementType()->isFloatingPointTy() ? FloatFactor
: 1;
200 // At this point unspecified alignment is considered as Align(1).
201 const Align BoundAlignment
= std::min(Alignment
.valueOrOne(), Align(8));
202 unsigned AlignWidth
= 8 * BoundAlignment
.value();
203 unsigned NumLoads
= alignTo(VecWidth
, AlignWidth
) / AlignWidth
;
204 if (Alignment
== Align(4) || Alignment
== Align(8))
205 return Cost
* NumLoads
;
206 // Loads of less than 32 bits will need extra inserts to compose a vector.
207 assert(BoundAlignment
<= Align(8));
208 unsigned LogA
= Log2(BoundAlignment
);
209 return (3 - LogA
) * Cost
* NumLoads
;
212 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
,
217 HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode
, Type
*Src
,
218 Align Alignment
, unsigned AddressSpace
,
219 TTI::TargetCostKind CostKind
) {
220 return BaseT::getMaskedMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
,
224 InstructionCost
HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
,
225 ArrayRef
<int> Mask
, int Index
,
230 InstructionCost
HexagonTTIImpl::getGatherScatterOpCost(
231 unsigned Opcode
, Type
*DataTy
, const Value
*Ptr
, bool VariableMask
,
232 Align Alignment
, TTI::TargetCostKind CostKind
, const Instruction
*I
) {
233 return BaseT::getGatherScatterOpCost(Opcode
, DataTy
, Ptr
, VariableMask
,
234 Alignment
, CostKind
, I
);
237 InstructionCost
HexagonTTIImpl::getInterleavedMemoryOpCost(
238 unsigned Opcode
, Type
*VecTy
, unsigned Factor
, ArrayRef
<unsigned> Indices
,
239 Align Alignment
, unsigned AddressSpace
, TTI::TargetCostKind CostKind
,
240 bool UseMaskForCond
, bool UseMaskForGaps
) {
241 if (Indices
.size() != Factor
|| UseMaskForCond
|| UseMaskForGaps
)
242 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
243 Alignment
, AddressSpace
,
245 UseMaskForCond
, UseMaskForGaps
);
246 return getMemoryOpCost(Opcode
, VecTy
, MaybeAlign(Alignment
), AddressSpace
,
250 InstructionCost
HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
,
252 CmpInst::Predicate VecPred
,
253 TTI::TargetCostKind CostKind
,
254 const Instruction
*I
) {
255 if (ValTy
->isVectorTy() && CostKind
== TTI::TCK_RecipThroughput
) {
256 std::pair
<InstructionCost
, MVT
> LT
= TLI
.getTypeLegalizationCost(DL
, ValTy
);
257 if (Opcode
== Instruction::FCmp
)
258 return LT
.first
+ FloatFactor
* getTypeNumElements(ValTy
);
260 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, VecPred
, CostKind
, I
);
263 InstructionCost
HexagonTTIImpl::getArithmeticInstrCost(
264 unsigned Opcode
, Type
*Ty
, TTI::TargetCostKind CostKind
,
265 TTI::OperandValueKind Opd1Info
, TTI::OperandValueKind Opd2Info
,
266 TTI::OperandValueProperties Opd1PropInfo
,
267 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
,
268 const Instruction
*CxtI
) {
269 // TODO: Handle more cost kinds.
270 if (CostKind
!= TTI::TCK_RecipThroughput
)
271 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, CostKind
, Opd1Info
,
272 Opd2Info
, Opd1PropInfo
,
273 Opd2PropInfo
, Args
, CxtI
);
275 if (Ty
->isVectorTy()) {
276 std::pair
<InstructionCost
, MVT
> LT
= TLI
.getTypeLegalizationCost(DL
, Ty
);
277 if (LT
.second
.isFloatingPoint())
278 return LT
.first
+ FloatFactor
* getTypeNumElements(Ty
);
280 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, CostKind
, Opd1Info
, Opd2Info
,
281 Opd1PropInfo
, Opd2PropInfo
, Args
, CxtI
);
284 InstructionCost
HexagonTTIImpl::getCastInstrCost(unsigned Opcode
, Type
*DstTy
,
286 TTI::CastContextHint CCH
,
287 TTI::TargetCostKind CostKind
,
288 const Instruction
*I
) {
289 if (SrcTy
->isFPOrFPVectorTy() || DstTy
->isFPOrFPVectorTy()) {
290 unsigned SrcN
= SrcTy
->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy
) : 0;
291 unsigned DstN
= DstTy
->isFPOrFPVectorTy() ? getTypeNumElements(DstTy
) : 0;
293 std::pair
<InstructionCost
, MVT
> SrcLT
=
294 TLI
.getTypeLegalizationCost(DL
, SrcTy
);
295 std::pair
<InstructionCost
, MVT
> DstLT
=
296 TLI
.getTypeLegalizationCost(DL
, DstTy
);
297 InstructionCost Cost
=
298 std::max(SrcLT
.first
, DstLT
.first
) + FloatFactor
* (SrcN
+ DstN
);
299 // TODO: Allow non-throughput costs that aren't binary.
300 if (CostKind
!= TTI::TCK_RecipThroughput
)
301 return Cost
== 0 ? 0 : 1;
307 InstructionCost
HexagonTTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*Val
,
309 Type
*ElemTy
= Val
->isVectorTy() ? cast
<VectorType
>(Val
)->getElementType()
311 if (Opcode
== Instruction::InsertElement
) {
312 // Need two rotations for non-zero index.
313 unsigned Cost
= (Index
!= 0) ? 2 : 0;
314 if (ElemTy
->isIntegerTy(32))
316 // If it's not a 32-bit value, there will need to be an extract.
317 return Cost
+ getVectorInstrCost(Instruction::ExtractElement
, Val
, Index
);
320 if (Opcode
== Instruction::ExtractElement
)
326 bool HexagonTTIImpl::isLegalMaskedStore(Type
*DataType
, Align
/*Alignment*/) {
327 return HexagonMaskedVMem
&& ST
.isTypeForHVX(DataType
);
330 bool HexagonTTIImpl::isLegalMaskedLoad(Type
*DataType
, Align
/*Alignment*/) {
331 return HexagonMaskedVMem
&& ST
.isTypeForHVX(DataType
);
334 /// --- Vector TTI end ---
336 unsigned HexagonTTIImpl::getPrefetchDistance() const {
337 return ST
.getL1PrefetchDistance();
340 unsigned HexagonTTIImpl::getCacheLineSize() const {
341 return ST
.getL1CacheLineSize();
344 InstructionCost
HexagonTTIImpl::getUserCost(const User
*U
,
345 ArrayRef
<const Value
*> Operands
,
346 TTI::TargetCostKind CostKind
) {
347 auto isCastFoldedIntoLoad
= [this](const CastInst
*CI
) -> bool {
348 if (!CI
->isIntegerCast())
350 // Only extensions from an integer type shorter than 32-bit to i32
351 // can be folded into the load.
352 const DataLayout
&DL
= getDataLayout();
353 unsigned SBW
= DL
.getTypeSizeInBits(CI
->getSrcTy());
354 unsigned DBW
= DL
.getTypeSizeInBits(CI
->getDestTy());
355 if (DBW
!= 32 || SBW
>= DBW
)
358 const LoadInst
*LI
= dyn_cast
<const LoadInst
>(CI
->getOperand(0));
359 // Technically, this code could allow multiple uses of the load, and
360 // check if all the uses are the same extension operation, but this
361 // should be sufficient for most cases.
362 return LI
&& LI
->hasOneUse();
365 if (const CastInst
*CI
= dyn_cast
<const CastInst
>(U
))
366 if (isCastFoldedIntoLoad(CI
))
367 return TargetTransformInfo::TCC_Free
;
368 return BaseT::getUserCost(U
, Operands
, CostKind
);
371 bool HexagonTTIImpl::shouldBuildLookupTables() const {
372 return EmitLookupTables
;