1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/ISDOpcodes.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Argument.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MachineValueType.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetMachine.h"
55 #define DEBUG_TYPE "AMDGPUtti"
57 static cl::opt
<unsigned> UnrollThresholdPrivate(
58 "amdgpu-unroll-threshold-private",
59 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
60 cl::init(2500), cl::Hidden
);
62 static cl::opt
<unsigned> UnrollThresholdLocal(
63 "amdgpu-unroll-threshold-local",
64 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
65 cl::init(1000), cl::Hidden
);
67 static cl::opt
<unsigned> UnrollThresholdIf(
68 "amdgpu-unroll-threshold-if",
69 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
70 cl::init(150), cl::Hidden
);
72 static bool dependsOnLocalPhi(const Loop
*L
, const Value
*Cond
,
74 const Instruction
*I
= dyn_cast
<Instruction
>(Cond
);
78 for (const Value
*V
: I
->operand_values()) {
81 if (const PHINode
*PHI
= dyn_cast
<PHINode
>(V
)) {
82 if (llvm::none_of(L
->getSubLoops(), [PHI
](const Loop
* SubLoop
) {
83 return SubLoop
->contains(PHI
); }))
85 } else if (Depth
< 10 && dependsOnLocalPhi(L
, V
, Depth
+1))
91 void AMDGPUTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
92 TTI::UnrollingPreferences
&UP
) {
93 UP
.Threshold
= 300; // Twice the default.
94 UP
.MaxCount
= std::numeric_limits
<unsigned>::max();
97 // TODO: Do we want runtime unrolling?
99 // Maximum alloca size than can fit registers. Reserve 16 registers.
100 const unsigned MaxAlloca
= (256 - 16) * 4;
101 unsigned ThresholdPrivate
= UnrollThresholdPrivate
;
102 unsigned ThresholdLocal
= UnrollThresholdLocal
;
103 unsigned MaxBoost
= std::max(ThresholdPrivate
, ThresholdLocal
);
104 for (const BasicBlock
*BB
: L
->getBlocks()) {
105 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
106 unsigned LocalGEPsSeen
= 0;
108 if (llvm::any_of(L
->getSubLoops(), [BB
](const Loop
* SubLoop
) {
109 return SubLoop
->contains(BB
); }))
110 continue; // Block belongs to an inner loop.
112 for (const Instruction
&I
: *BB
) {
113 // Unroll a loop which contains an "if" statement whose condition
114 // defined by a PHI belonging to the loop. This may help to eliminate
115 // if region and potentially even PHI itself, saving on both divergence
116 // and registers used for the PHI.
117 // Add a small bonus for each of such "if" statements.
118 if (const BranchInst
*Br
= dyn_cast
<BranchInst
>(&I
)) {
119 if (UP
.Threshold
< MaxBoost
&& Br
->isConditional()) {
120 if (L
->isLoopExiting(Br
->getSuccessor(0)) ||
121 L
->isLoopExiting(Br
->getSuccessor(1)))
123 if (dependsOnLocalPhi(L
, Br
->getCondition())) {
124 UP
.Threshold
+= UnrollThresholdIf
;
125 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP
.Threshold
127 << *L
<< " due to " << *Br
<< '\n');
128 if (UP
.Threshold
>= MaxBoost
)
135 const GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(&I
);
139 unsigned AS
= GEP
->getAddressSpace();
140 unsigned Threshold
= 0;
141 if (AS
== AMDGPUAS::PRIVATE_ADDRESS
)
142 Threshold
= ThresholdPrivate
;
143 else if (AS
== AMDGPUAS::LOCAL_ADDRESS
)
144 Threshold
= ThresholdLocal
;
148 if (UP
.Threshold
>= Threshold
)
151 if (AS
== AMDGPUAS::PRIVATE_ADDRESS
) {
152 const Value
*Ptr
= GEP
->getPointerOperand();
153 const AllocaInst
*Alloca
=
154 dyn_cast
<AllocaInst
>(GetUnderlyingObject(Ptr
, DL
));
155 if (!Alloca
|| !Alloca
->isStaticAlloca())
157 Type
*Ty
= Alloca
->getAllocatedType();
158 unsigned AllocaSize
= Ty
->isSized() ? DL
.getTypeAllocSize(Ty
) : 0;
159 if (AllocaSize
> MaxAlloca
)
161 } else if (AS
== AMDGPUAS::LOCAL_ADDRESS
) {
163 // Inhibit unroll for local memory if we have seen addressing not to
164 // a variable, most likely we will be unable to combine it.
165 // Do not unroll too deep inner loops for local memory to give a chance
166 // to unroll an outer loop for a more important reason.
167 if (LocalGEPsSeen
> 1 || L
->getLoopDepth() > 2 ||
168 (!isa
<GlobalVariable
>(GEP
->getPointerOperand()) &&
169 !isa
<Argument
>(GEP
->getPointerOperand())))
173 // Check if GEP depends on a value defined by this loop itself.
174 bool HasLoopDef
= false;
175 for (const Value
*Op
: GEP
->operands()) {
176 const Instruction
*Inst
= dyn_cast
<Instruction
>(Op
);
177 if (!Inst
|| L
->isLoopInvariant(Op
))
180 if (llvm::any_of(L
->getSubLoops(), [Inst
](const Loop
* SubLoop
) {
181 return SubLoop
->contains(Inst
); }))
189 // We want to do whatever we can to limit the number of alloca
190 // instructions that make it through to the code generator. allocas
191 // require us to use indirect addressing, which is slow and prone to
192 // compiler bugs. If this loop does an address calculation on an
193 // alloca ptr, then we want to use a higher than normal loop unroll
194 // threshold. This will give SROA a better chance to eliminate these
197 // We also want to have more unrolling for local memory to let ds
198 // instructions with different offsets combine.
200 // Don't use the maximum allowed value here as it will make some
201 // programs way too big.
202 UP
.Threshold
= Threshold
;
203 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
205 << *L
<< " due to " << *GEP
<< '\n');
206 if (UP
.Threshold
>= MaxBoost
)
212 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec
) const {
213 // The concept of vector registers doesn't really exist. Some packed vector
214 // operations operate on the normal 32-bit registers.
218 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec
) const {
219 // This is really the number of registers to fill when vectorizing /
220 // interleaving loops, so we lie to avoid trying to use all registers.
221 return getHardwareNumberOfRegisters(Vec
) >> 3;
224 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector
) const {
228 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
232 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF
, unsigned LoadSize
,
233 unsigned ChainSizeInBytes
,
234 VectorType
*VecTy
) const {
235 unsigned VecRegBitWidth
= VF
* LoadSize
;
236 if (VecRegBitWidth
> 128 && VecTy
->getScalarSizeInBits() < 32)
237 // TODO: Support element-size less than 32bit?
238 return 128 / LoadSize
;
243 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF
, unsigned StoreSize
,
244 unsigned ChainSizeInBytes
,
245 VectorType
*VecTy
) const {
246 unsigned VecRegBitWidth
= VF
* StoreSize
;
247 if (VecRegBitWidth
> 128)
248 return 128 / StoreSize
;
253 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace
) const {
254 if (AddrSpace
== AMDGPUAS::GLOBAL_ADDRESS
||
255 AddrSpace
== AMDGPUAS::CONSTANT_ADDRESS
||
256 AddrSpace
== AMDGPUAS::CONSTANT_ADDRESS_32BIT
) {
260 if (AddrSpace
== AMDGPUAS::FLAT_ADDRESS
||
261 AddrSpace
== AMDGPUAS::LOCAL_ADDRESS
||
262 AddrSpace
== AMDGPUAS::REGION_ADDRESS
)
265 if (AddrSpace
== AMDGPUAS::PRIVATE_ADDRESS
)
266 return 8 * ST
->getMaxPrivateElementSize();
268 llvm_unreachable("unhandled address space");
271 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes
,
273 unsigned AddrSpace
) const {
274 // We allow vectorization of flat stores, even though we may need to decompose
275 // them later if they may access private memory. We don't have enough context
276 // here, and legalization can handle it.
277 if (AddrSpace
== AMDGPUAS::PRIVATE_ADDRESS
) {
278 return (Alignment
>= 4 || ST
->hasUnalignedScratchAccess()) &&
279 ChainSizeInBytes
<= ST
->getMaxPrivateElementSize();
284 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes
,
286 unsigned AddrSpace
) const {
287 return isLegalToVectorizeMemChain(ChainSizeInBytes
, Alignment
, AddrSpace
);
290 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes
,
292 unsigned AddrSpace
) const {
293 return isLegalToVectorizeMemChain(ChainSizeInBytes
, Alignment
, AddrSpace
);
296 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF
) {
297 // Disable unrolling if the loop is not vectorized.
298 // TODO: Enable this again.
305 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst
*Inst
,
306 MemIntrinsicInfo
&Info
) const {
307 switch (Inst
->getIntrinsicID()) {
308 case Intrinsic::amdgcn_atomic_inc
:
309 case Intrinsic::amdgcn_atomic_dec
:
310 case Intrinsic::amdgcn_ds_ordered_add
:
311 case Intrinsic::amdgcn_ds_ordered_swap
:
312 case Intrinsic::amdgcn_ds_fadd
:
313 case Intrinsic::amdgcn_ds_fmin
:
314 case Intrinsic::amdgcn_ds_fmax
: {
315 auto *Ordering
= dyn_cast
<ConstantInt
>(Inst
->getArgOperand(2));
316 auto *Volatile
= dyn_cast
<ConstantInt
>(Inst
->getArgOperand(4));
317 if (!Ordering
|| !Volatile
)
318 return false; // Invalid.
320 unsigned OrderingVal
= Ordering
->getZExtValue();
321 if (OrderingVal
> static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent
))
324 Info
.PtrVal
= Inst
->getArgOperand(0);
325 Info
.Ordering
= static_cast<AtomicOrdering
>(OrderingVal
);
327 Info
.WriteMem
= true;
328 Info
.IsVolatile
= !Volatile
->isNullValue();
336 int GCNTTIImpl::getArithmeticInstrCost(
337 unsigned Opcode
, Type
*Ty
, TTI::OperandValueKind Opd1Info
,
338 TTI::OperandValueKind Opd2Info
, TTI::OperandValueProperties Opd1PropInfo
,
339 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
) {
340 EVT OrigTy
= TLI
->getValueType(DL
, Ty
);
341 if (!OrigTy
.isSimple()) {
342 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Opd1Info
, Opd2Info
,
343 Opd1PropInfo
, Opd2PropInfo
);
346 // Legalize the type.
347 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
348 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
350 // Because we don't have any legal vector operations, but the legal types, we
351 // need to account for split vectors.
352 unsigned NElts
= LT
.second
.isVector() ?
353 LT
.second
.getVectorNumElements() : 1;
355 MVT::SimpleValueType SLT
= LT
.second
.getScalarType().SimpleTy
;
362 return get64BitInstrCost() * LT
.first
* NElts
;
365 return getFullRateInstrCost() * LT
.first
* NElts
;
371 if (SLT
== MVT::i64
){
372 // and, or and xor are typically split into 2 VALU instructions.
373 return 2 * getFullRateInstrCost() * LT
.first
* NElts
;
376 return LT
.first
* NElts
* getFullRateInstrCost();
378 const int QuarterRateCost
= getQuarterRateInstrCost();
379 if (SLT
== MVT::i64
) {
380 const int FullRateCost
= getFullRateInstrCost();
381 return (4 * QuarterRateCost
+ (2 * 2) * FullRateCost
) * LT
.first
* NElts
;
385 return QuarterRateCost
* NElts
* LT
.first
;
391 return LT
.first
* NElts
* get64BitInstrCost();
393 if (SLT
== MVT::f32
|| SLT
== MVT::f16
)
394 return LT
.first
* NElts
* getFullRateInstrCost();
398 // FIXME: frem should be handled separately. The fdiv in it is most of it,
399 // but the current lowering is also not entirely correct.
400 if (SLT
== MVT::f64
) {
401 int Cost
= 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
402 // Add cost of workaround.
403 if (ST
->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS
)
404 Cost
+= 3 * getFullRateInstrCost();
406 return LT
.first
* Cost
* NElts
;
409 if (!Args
.empty() && match(Args
[0], PatternMatch::m_FPOne())) {
410 // TODO: This is more complicated, unsafe flags etc.
411 if ((SLT
== MVT::f32
&& !ST
->hasFP32Denormals()) ||
412 (SLT
== MVT::f16
&& ST
->has16BitInsts())) {
413 return LT
.first
* getQuarterRateInstrCost() * NElts
;
417 if (SLT
== MVT::f16
&& ST
->has16BitInsts()) {
423 int Cost
= 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
424 return LT
.first
* Cost
* NElts
;
427 if (SLT
== MVT::f32
|| SLT
== MVT::f16
) {
428 int Cost
= 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
430 if (!ST
->hasFP32Denormals()) {
432 Cost
+= 2 * getFullRateInstrCost();
435 return LT
.first
* NElts
* Cost
;
442 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Opd1Info
, Opd2Info
,
443 Opd1PropInfo
, Opd2PropInfo
);
446 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode
) {
447 // XXX - For some reason this isn't called for switch.
449 case Instruction::Br
:
450 case Instruction::Ret
:
453 return BaseT::getCFInstrCost(Opcode
);
457 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode
, Type
*Ty
,
459 EVT OrigTy
= TLI
->getValueType(DL
, Ty
);
461 // Computes cost on targets that have packed math instructions(which support
462 // 16-bit types only).
464 !ST
->hasVOP3PInsts() ||
465 OrigTy
.getScalarSizeInBits() != 16)
466 return BaseT::getArithmeticReductionCost(Opcode
, Ty
, IsPairwise
);
468 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
469 return LT
.first
* getFullRateInstrCost();
472 int GCNTTIImpl::getMinMaxReductionCost(Type
*Ty
, Type
*CondTy
,
475 EVT OrigTy
= TLI
->getValueType(DL
, Ty
);
477 // Computes cost on targets that have packed math instructions(which support
478 // 16-bit types only).
480 !ST
->hasVOP3PInsts() ||
481 OrigTy
.getScalarSizeInBits() != 16)
482 return BaseT::getMinMaxReductionCost(Ty
, CondTy
, IsPairwise
, IsUnsigned
);
484 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
485 return LT
.first
* getHalfRateInstrCost();
488 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*ValTy
,
491 case Instruction::ExtractElement
:
492 case Instruction::InsertElement
: {
494 = DL
.getTypeSizeInBits(cast
<VectorType
>(ValTy
)->getElementType());
496 if (EltSize
== 16 && Index
== 0 && ST
->has16BitInsts())
498 return BaseT::getVectorInstrCost(Opcode
, ValTy
, Index
);
501 // Extracts are just reads of a subregister, so are free. Inserts are
502 // considered free because we don't want to have any cost for scalarizing
503 // operations, and we don't have to copy into a different register class.
505 // Dynamic indexing isn't free and is best avoided.
506 return Index
== ~0u ? 2 : 0;
509 return BaseT::getVectorInstrCost(Opcode
, ValTy
, Index
);
515 static bool isArgPassedInSGPR(const Argument
*A
) {
516 const Function
*F
= A
->getParent();
518 // Arguments to compute shaders are never a source of divergence.
519 CallingConv::ID CC
= F
->getCallingConv();
521 case CallingConv::AMDGPU_KERNEL
:
522 case CallingConv::SPIR_KERNEL
:
524 case CallingConv::AMDGPU_VS
:
525 case CallingConv::AMDGPU_LS
:
526 case CallingConv::AMDGPU_HS
:
527 case CallingConv::AMDGPU_ES
:
528 case CallingConv::AMDGPU_GS
:
529 case CallingConv::AMDGPU_PS
:
530 case CallingConv::AMDGPU_CS
:
531 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
532 // Everything else is in VGPRs.
533 return F
->getAttributes().hasParamAttribute(A
->getArgNo(), Attribute::InReg
) ||
534 F
->getAttributes().hasParamAttribute(A
->getArgNo(), Attribute::ByVal
);
536 // TODO: Should calls support inreg for SGPR inputs?
541 /// \returns true if the result of the value could potentially be
542 /// different across workitems in a wavefront.
543 bool GCNTTIImpl::isSourceOfDivergence(const Value
*V
) const {
544 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
545 return !isArgPassedInSGPR(A
);
547 // Loads from the private and flat address spaces are divergent, because
548 // threads can execute the load instruction with the same inputs and get
549 // different results.
551 // All other loads are not divergent, because if threads issue loads with the
552 // same arguments, they will always get the same result.
553 if (const LoadInst
*Load
= dyn_cast
<LoadInst
>(V
))
554 return Load
->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
||
555 Load
->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS
;
557 // Atomics are divergent because they are executed sequentially: when an
558 // atomic operation refers to the same address in each thread, then each
559 // thread after the first sees the value written by the previous thread as
561 if (isa
<AtomicRMWInst
>(V
) || isa
<AtomicCmpXchgInst
>(V
))
564 if (const IntrinsicInst
*Intrinsic
= dyn_cast
<IntrinsicInst
>(V
))
565 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic
->getIntrinsicID());
567 // Assume all function calls are a source of divergence.
568 if (isa
<CallInst
>(V
) || isa
<InvokeInst
>(V
))
574 bool GCNTTIImpl::isAlwaysUniform(const Value
*V
) const {
575 if (const IntrinsicInst
*Intrinsic
= dyn_cast
<IntrinsicInst
>(V
)) {
576 switch (Intrinsic
->getIntrinsicID()) {
579 case Intrinsic::amdgcn_readfirstlane
:
580 case Intrinsic::amdgcn_readlane
:
581 case Intrinsic::amdgcn_icmp
:
582 case Intrinsic::amdgcn_fcmp
:
589 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
591 if (ST
->hasVOP3PInsts()) {
592 VectorType
*VT
= cast
<VectorType
>(Tp
);
593 if (VT
->getNumElements() == 2 &&
594 DL
.getTypeSizeInBits(VT
->getElementType()) == 16) {
595 // With op_sel VOP3P instructions freely can access the low half or high
596 // half of a register, so any swizzle is free.
599 case TTI::SK_Broadcast
:
600 case TTI::SK_Reverse
:
601 case TTI::SK_PermuteSingleSrc
:
609 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
612 bool GCNTTIImpl::areInlineCompatible(const Function
*Caller
,
613 const Function
*Callee
) const {
614 const TargetMachine
&TM
= getTLI()->getTargetMachine();
615 const FeatureBitset
&CallerBits
=
616 TM
.getSubtargetImpl(*Caller
)->getFeatureBits();
617 const FeatureBitset
&CalleeBits
=
618 TM
.getSubtargetImpl(*Callee
)->getFeatureBits();
620 FeatureBitset RealCallerBits
= CallerBits
& ~InlineFeatureIgnoreList
;
621 FeatureBitset RealCalleeBits
= CalleeBits
& ~InlineFeatureIgnoreList
;
622 return ((RealCallerBits
& RealCalleeBits
) == RealCalleeBits
);
625 void GCNTTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
626 TTI::UnrollingPreferences
&UP
) {
627 CommonTTI
.getUnrollingPreferences(L
, SE
, UP
);
630 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec
) const {
631 return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
634 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec
) const {
635 return getHardwareNumberOfRegisters(Vec
);
638 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector
) const {
642 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
646 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace
) const {
647 if (AddrSpace
== AMDGPUAS::GLOBAL_ADDRESS
||
648 AddrSpace
== AMDGPUAS::CONSTANT_ADDRESS
)
650 if (AddrSpace
== AMDGPUAS::LOCAL_ADDRESS
||
651 AddrSpace
== AMDGPUAS::REGION_ADDRESS
)
653 if (AddrSpace
== AMDGPUAS::PRIVATE_ADDRESS
)
656 if ((AddrSpace
== AMDGPUAS::PARAM_D_ADDRESS
||
657 AddrSpace
== AMDGPUAS::PARAM_I_ADDRESS
||
658 (AddrSpace
>= AMDGPUAS::CONSTANT_BUFFER_0
&&
659 AddrSpace
<= AMDGPUAS::CONSTANT_BUFFER_15
)))
661 llvm_unreachable("unhandled address space");
664 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes
,
666 unsigned AddrSpace
) const {
667 // We allow vectorization of flat stores, even though we may need to decompose
668 // them later if they may access private memory. We don't have enough context
669 // here, and legalization can handle it.
670 return (AddrSpace
!= AMDGPUAS::PRIVATE_ADDRESS
);
673 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes
,
675 unsigned AddrSpace
) const {
676 return isLegalToVectorizeMemChain(ChainSizeInBytes
, Alignment
, AddrSpace
);
679 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes
,
681 unsigned AddrSpace
) const {
682 return isLegalToVectorizeMemChain(ChainSizeInBytes
, Alignment
, AddrSpace
);
685 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF
) {
686 // Disable unrolling if the loop is not vectorized.
687 // TODO: Enable this again.
694 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode
) {
695 // XXX - For some reason this isn't called for switch.
697 case Instruction::Br
:
698 case Instruction::Ret
:
701 return BaseT::getCFInstrCost(Opcode
);
705 int R600TTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*ValTy
,
708 case Instruction::ExtractElement
:
709 case Instruction::InsertElement
: {
711 = DL
.getTypeSizeInBits(cast
<VectorType
>(ValTy
)->getElementType());
713 return BaseT::getVectorInstrCost(Opcode
, ValTy
, Index
);
716 // Extracts are just reads of a subregister, so are free. Inserts are
717 // considered free because we don't want to have any cost for scalarizing
718 // operations, and we don't have to copy into a different register class.
720 // Dynamic indexing isn't free and is best avoided.
721 return Index
== ~0u ? 2 : 0;
724 return BaseT::getVectorInstrCost(Opcode
, ValTy
, Index
);
728 void R600TTIImpl::getUnrollingPreferences(Loop
*L
, ScalarEvolution
&SE
,
729 TTI::UnrollingPreferences
&UP
) {
730 CommonTTI
.getUnrollingPreferences(L
, SE
, UP
);