[ARM] Remove declaration of unimplemented function. NFC.
[llvm-complete.git] / lib / Target / Hexagon / HexagonTargetTransformInfo.cpp
blob38062e8e922c7c612af8df8716f4784ef4118e01
1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
12 ///
13 //===----------------------------------------------------------------------===//
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/UnrollLoop.h"
26 using namespace llvm;
28 #define DEBUG_TYPE "hexagontti"
30 static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
31 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33 static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
34 cl::init(true), cl::Hidden,
35 cl::desc("Control lookup table emission on Hexagon target"));
37 // Constant "cost factor" to make floating point operations more expensive
38 // in terms of vectorization cost. This isn't the best way, but it should
39 // do. Ultimately, the cost should use cycles.
40 static const unsigned FloatFactor = 4;
42 bool HexagonTTIImpl::useHVX() const {
43 return ST.useHVXOps() && HexagonAutoHVX;
46 bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
47 assert(VecTy->isVectorTy());
48 // Avoid types like <2 x i32*>.
49 if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
50 return false;
51 EVT VecVT = EVT::getEVT(VecTy);
52 if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
53 return false;
54 if (ST.isHVXVectorType(VecVT.getSimpleVT()))
55 return true;
56 auto Action = TLI.getPreferredVectorAction(VecVT.getSimpleVT());
57 return Action == TargetLoweringBase::TypeWidenVector;
60 unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
61 if (Ty->isVectorTy())
62 return Ty->getVectorNumElements();
63 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
64 "Expecting scalar type");
65 return 1;
68 TargetTransformInfo::PopcntSupportKind
69 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
70 // Return fast hardware support as every input < 64 bits will be promoted
71 // to 64 bits.
72 return TargetTransformInfo::PSK_FastHardware;
75 // The Hexagon target can unroll loops with run-time trip counts.
76 void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
77 TTI::UnrollingPreferences &UP) {
78 UP.Runtime = UP.Partial = true;
79 // Only try to peel innermost loops with small runtime trip counts.
80 if (L && L->empty() && canPeel(L) &&
81 SE.getSmallConstantTripCount(L) == 0 &&
82 SE.getSmallConstantMaxTripCount(L) > 0 &&
83 SE.getSmallConstantMaxTripCount(L) <= 5) {
84 UP.PeelCount = 2;
88 bool HexagonTTIImpl::shouldFavorPostInc() const {
89 return true;
92 /// --- Vector TTI begin ---
94 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
95 if (Vector)
96 return useHVX() ? 32 : 0;
97 return 32;
100 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
101 return useHVX() ? 2 : 0;
104 unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
105 return Vector ? getMinVectorRegisterBitWidth() : 32;
108 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
109 return useHVX() ? ST.getVectorLength()*8 : 0;
112 unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
113 return (8 * ST.getVectorLength()) / ElemWidth;
116 unsigned HexagonTTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
117 bool Extract) {
118 return BaseT::getScalarizationOverhead(Ty, Insert, Extract);
121 unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
122 ArrayRef<const Value*> Args, unsigned VF) {
123 return BaseT::getOperandsScalarizationOverhead(Args, VF);
126 unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
127 ArrayRef<Type*> Tys) {
128 return BaseT::getCallInstrCost(F, RetTy, Tys);
131 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
132 ArrayRef<Value*> Args, FastMathFlags FMF, unsigned VF) {
133 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
136 unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
137 ArrayRef<Type*> Tys, FastMathFlags FMF,
138 unsigned ScalarizationCostPassed) {
139 if (ID == Intrinsic::bswap) {
140 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, RetTy);
141 return LT.first + 2;
143 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
144 ScalarizationCostPassed);
147 unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
148 ScalarEvolution *SE, const SCEV *S) {
149 return 0;
152 unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
153 unsigned Alignment, unsigned AddressSpace, const Instruction *I) {
154 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
155 if (Opcode == Instruction::Store)
156 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
158 if (Src->isVectorTy()) {
159 VectorType *VecTy = cast<VectorType>(Src);
160 unsigned VecWidth = VecTy->getBitWidth();
161 if (useHVX() && isTypeForHVX(VecTy)) {
162 unsigned RegWidth = getRegisterBitWidth(true);
163 assert(RegWidth && "Non-zero vector register width expected");
164 // Cost of HVX loads.
165 if (VecWidth % RegWidth == 0)
166 return VecWidth / RegWidth;
167 // Cost of constructing HVX vector from scalar loads.
168 Alignment = std::min(Alignment, RegWidth / 8);
169 unsigned AlignWidth = 8 * std::max(1u, Alignment);
170 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
171 return 3 * NumLoads;
174 // Non-HVX vectors.
175 // Add extra cost for floating point types.
176 unsigned Cost = VecTy->getElementType()->isFloatingPointTy() ? FloatFactor
177 : 1;
178 Alignment = std::min(Alignment, 8u);
179 unsigned AlignWidth = 8 * std::max(1u, Alignment);
180 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
181 if (Alignment == 4 || Alignment == 8)
182 return Cost * NumLoads;
183 // Loads of less than 32 bits will need extra inserts to compose a vector.
184 unsigned LogA = Log2_32(Alignment);
185 return (3 - LogA) * Cost * NumLoads;
188 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
191 unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode,
192 Type *Src, unsigned Alignment, unsigned AddressSpace) {
193 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
196 unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
197 int Index, Type *SubTp) {
198 return 1;
201 unsigned HexagonTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
202 Value *Ptr, bool VariableMask, unsigned Alignment) {
203 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
204 Alignment);
207 unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode,
208 Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
209 unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond,
210 bool UseMaskForGaps) {
211 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
212 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
213 Alignment, AddressSpace,
214 UseMaskForCond, UseMaskForGaps);
215 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, nullptr);
218 unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
219 Type *CondTy, const Instruction *I) {
220 if (ValTy->isVectorTy()) {
221 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
222 if (Opcode == Instruction::FCmp)
223 return LT.first + FloatFactor * getTypeNumElements(ValTy);
225 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
228 unsigned HexagonTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
229 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
230 TTI::OperandValueProperties Opd1PropInfo,
231 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value*> Args) {
232 if (Ty->isVectorTy()) {
233 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
234 if (LT.second.isFloatingPoint())
235 return LT.first + FloatFactor * getTypeNumElements(Ty);
237 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
238 Opd1PropInfo, Opd2PropInfo, Args);
241 unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
242 Type *SrcTy, const Instruction *I) {
243 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
244 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
245 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
247 std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
248 std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
249 return std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
251 return 1;
254 unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
255 unsigned Index) {
256 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
257 : Val;
258 if (Opcode == Instruction::InsertElement) {
259 // Need two rotations for non-zero index.
260 unsigned Cost = (Index != 0) ? 2 : 0;
261 if (ElemTy->isIntegerTy(32))
262 return Cost;
263 // If it's not a 32-bit value, there will need to be an extract.
264 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
267 if (Opcode == Instruction::ExtractElement)
268 return 2;
270 return 1;
273 /// --- Vector TTI end ---
275 unsigned HexagonTTIImpl::getPrefetchDistance() const {
276 return ST.getL1PrefetchDistance();
279 unsigned HexagonTTIImpl::getCacheLineSize() const {
280 return ST.getL1CacheLineSize();
283 int HexagonTTIImpl::getUserCost(const User *U,
284 ArrayRef<const Value *> Operands) {
285 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
286 if (!CI->isIntegerCast())
287 return false;
288 // Only extensions from an integer type shorter than 32-bit to i32
289 // can be folded into the load.
290 const DataLayout &DL = getDataLayout();
291 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
292 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
293 if (DBW != 32 || SBW >= DBW)
294 return false;
296 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
297 // Technically, this code could allow multiple uses of the load, and
298 // check if all the uses are the same extension operation, but this
299 // should be sufficient for most cases.
300 return LI && LI->hasOneUse();
303 if (const CastInst *CI = dyn_cast<const CastInst>(U))
304 if (isCastFoldedIntoLoad(CI))
305 return TargetTransformInfo::TCC_Free;
306 return BaseT::getUserCost(U, Operands);
309 bool HexagonTTIImpl::shouldBuildLookupTables() const {
310 return EmitLookupTables;