Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / AMDGPU / AMDGPUTargetTransformInfo.cpp
blob626c4f35f17b33f71f8f868f33377b02b7728fee
1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/ISDOpcodes.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Argument.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MachineValueType.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include <algorithm>
49 #include <cassert>
50 #include <limits>
51 #include <utility>
53 using namespace llvm;
55 #define DEBUG_TYPE "AMDGPUtti"
57 static cl::opt<unsigned> UnrollThresholdPrivate(
58 "amdgpu-unroll-threshold-private",
59 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
60 cl::init(2500), cl::Hidden);
62 static cl::opt<unsigned> UnrollThresholdLocal(
63 "amdgpu-unroll-threshold-local",
64 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
65 cl::init(1000), cl::Hidden);
67 static cl::opt<unsigned> UnrollThresholdIf(
68 "amdgpu-unroll-threshold-if",
69 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
70 cl::init(150), cl::Hidden);
72 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
73 unsigned Depth = 0) {
74 const Instruction *I = dyn_cast<Instruction>(Cond);
75 if (!I)
76 return false;
78 for (const Value *V : I->operand_values()) {
79 if (!L->contains(I))
80 continue;
81 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
82 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
83 return SubLoop->contains(PHI); }))
84 return true;
85 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
86 return true;
88 return false;
91 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
92 TTI::UnrollingPreferences &UP) {
93 UP.Threshold = 300; // Twice the default.
94 UP.MaxCount = std::numeric_limits<unsigned>::max();
95 UP.Partial = true;
97 // TODO: Do we want runtime unrolling?
99 // Maximum alloca size than can fit registers. Reserve 16 registers.
100 const unsigned MaxAlloca = (256 - 16) * 4;
101 unsigned ThresholdPrivate = UnrollThresholdPrivate;
102 unsigned ThresholdLocal = UnrollThresholdLocal;
103 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
104 for (const BasicBlock *BB : L->getBlocks()) {
105 const DataLayout &DL = BB->getModule()->getDataLayout();
106 unsigned LocalGEPsSeen = 0;
108 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
109 return SubLoop->contains(BB); }))
110 continue; // Block belongs to an inner loop.
112 for (const Instruction &I : *BB) {
113 // Unroll a loop which contains an "if" statement whose condition
114 // defined by a PHI belonging to the loop. This may help to eliminate
115 // if region and potentially even PHI itself, saving on both divergence
116 // and registers used for the PHI.
117 // Add a small bonus for each of such "if" statements.
118 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
119 if (UP.Threshold < MaxBoost && Br->isConditional()) {
120 if (L->isLoopExiting(Br->getSuccessor(0)) ||
121 L->isLoopExiting(Br->getSuccessor(1)))
122 continue;
123 if (dependsOnLocalPhi(L, Br->getCondition())) {
124 UP.Threshold += UnrollThresholdIf;
125 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
126 << " for loop:\n"
127 << *L << " due to " << *Br << '\n');
128 if (UP.Threshold >= MaxBoost)
129 return;
132 continue;
135 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
136 if (!GEP)
137 continue;
139 unsigned AS = GEP->getAddressSpace();
140 unsigned Threshold = 0;
141 if (AS == AMDGPUAS::PRIVATE_ADDRESS)
142 Threshold = ThresholdPrivate;
143 else if (AS == AMDGPUAS::LOCAL_ADDRESS)
144 Threshold = ThresholdLocal;
145 else
146 continue;
148 if (UP.Threshold >= Threshold)
149 continue;
151 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
152 const Value *Ptr = GEP->getPointerOperand();
153 const AllocaInst *Alloca =
154 dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
155 if (!Alloca || !Alloca->isStaticAlloca())
156 continue;
157 Type *Ty = Alloca->getAllocatedType();
158 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
159 if (AllocaSize > MaxAlloca)
160 continue;
161 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
162 LocalGEPsSeen++;
163 // Inhibit unroll for local memory if we have seen addressing not to
164 // a variable, most likely we will be unable to combine it.
165 // Do not unroll too deep inner loops for local memory to give a chance
166 // to unroll an outer loop for a more important reason.
167 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
168 (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
169 !isa<Argument>(GEP->getPointerOperand())))
170 continue;
173 // Check if GEP depends on a value defined by this loop itself.
174 bool HasLoopDef = false;
175 for (const Value *Op : GEP->operands()) {
176 const Instruction *Inst = dyn_cast<Instruction>(Op);
177 if (!Inst || L->isLoopInvariant(Op))
178 continue;
180 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
181 return SubLoop->contains(Inst); }))
182 continue;
183 HasLoopDef = true;
184 break;
186 if (!HasLoopDef)
187 continue;
189 // We want to do whatever we can to limit the number of alloca
190 // instructions that make it through to the code generator. allocas
191 // require us to use indirect addressing, which is slow and prone to
192 // compiler bugs. If this loop does an address calculation on an
193 // alloca ptr, then we want to use a higher than normal loop unroll
194 // threshold. This will give SROA a better chance to eliminate these
195 // allocas.
197 // We also want to have more unrolling for local memory to let ds
198 // instructions with different offsets combine.
200 // Don't use the maximum allowed value here as it will make some
201 // programs way too big.
202 UP.Threshold = Threshold;
203 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
204 << " for loop:\n"
205 << *L << " due to " << *GEP << '\n');
206 if (UP.Threshold >= MaxBoost)
207 return;
212 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
213 // The concept of vector registers doesn't really exist. Some packed vector
214 // operations operate on the normal 32-bit registers.
215 return 256;
218 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
219 // This is really the number of registers to fill when vectorizing /
220 // interleaving loops, so we lie to avoid trying to use all registers.
221 return getHardwareNumberOfRegisters(Vec) >> 3;
224 unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const {
225 return 32;
228 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
229 return 32;
232 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
233 unsigned ChainSizeInBytes,
234 VectorType *VecTy) const {
235 unsigned VecRegBitWidth = VF * LoadSize;
236 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
237 // TODO: Support element-size less than 32bit?
238 return 128 / LoadSize;
240 return VF;
243 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
244 unsigned ChainSizeInBytes,
245 VectorType *VecTy) const {
246 unsigned VecRegBitWidth = VF * StoreSize;
247 if (VecRegBitWidth > 128)
248 return 128 / StoreSize;
250 return VF;
253 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
254 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
255 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
256 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
257 return 512;
260 if (AddrSpace == AMDGPUAS::FLAT_ADDRESS ||
261 AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
262 AddrSpace == AMDGPUAS::REGION_ADDRESS)
263 return 128;
265 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
266 return 8 * ST->getMaxPrivateElementSize();
268 llvm_unreachable("unhandled address space");
271 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
272 unsigned Alignment,
273 unsigned AddrSpace) const {
274 // We allow vectorization of flat stores, even though we may need to decompose
275 // them later if they may access private memory. We don't have enough context
276 // here, and legalization can handle it.
277 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
278 return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
279 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
281 return true;
284 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
285 unsigned Alignment,
286 unsigned AddrSpace) const {
287 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
290 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
291 unsigned Alignment,
292 unsigned AddrSpace) const {
293 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
296 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
297 // Disable unrolling if the loop is not vectorized.
298 // TODO: Enable this again.
299 if (VF == 1)
300 return 1;
302 return 8;
305 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
306 MemIntrinsicInfo &Info) const {
307 switch (Inst->getIntrinsicID()) {
308 case Intrinsic::amdgcn_atomic_inc:
309 case Intrinsic::amdgcn_atomic_dec:
310 case Intrinsic::amdgcn_ds_ordered_add:
311 case Intrinsic::amdgcn_ds_ordered_swap:
312 case Intrinsic::amdgcn_ds_fadd:
313 case Intrinsic::amdgcn_ds_fmin:
314 case Intrinsic::amdgcn_ds_fmax: {
315 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
316 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
317 if (!Ordering || !Volatile)
318 return false; // Invalid.
320 unsigned OrderingVal = Ordering->getZExtValue();
321 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
322 return false;
324 Info.PtrVal = Inst->getArgOperand(0);
325 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
326 Info.ReadMem = true;
327 Info.WriteMem = true;
328 Info.IsVolatile = !Volatile->isNullValue();
329 return true;
331 default:
332 return false;
336 int GCNTTIImpl::getArithmeticInstrCost(
337 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
338 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
339 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
340 EVT OrigTy = TLI->getValueType(DL, Ty);
341 if (!OrigTy.isSimple()) {
342 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
343 Opd1PropInfo, Opd2PropInfo);
346 // Legalize the type.
347 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
348 int ISD = TLI->InstructionOpcodeToISD(Opcode);
350 // Because we don't have any legal vector operations, but the legal types, we
351 // need to account for split vectors.
352 unsigned NElts = LT.second.isVector() ?
353 LT.second.getVectorNumElements() : 1;
355 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
357 switch (ISD) {
358 case ISD::SHL:
359 case ISD::SRL:
360 case ISD::SRA:
361 if (SLT == MVT::i64)
362 return get64BitInstrCost() * LT.first * NElts;
364 // i32
365 return getFullRateInstrCost() * LT.first * NElts;
366 case ISD::ADD:
367 case ISD::SUB:
368 case ISD::AND:
369 case ISD::OR:
370 case ISD::XOR:
371 if (SLT == MVT::i64){
372 // and, or and xor are typically split into 2 VALU instructions.
373 return 2 * getFullRateInstrCost() * LT.first * NElts;
376 return LT.first * NElts * getFullRateInstrCost();
377 case ISD::MUL: {
378 const int QuarterRateCost = getQuarterRateInstrCost();
379 if (SLT == MVT::i64) {
380 const int FullRateCost = getFullRateInstrCost();
381 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
384 // i32
385 return QuarterRateCost * NElts * LT.first;
387 case ISD::FADD:
388 case ISD::FSUB:
389 case ISD::FMUL:
390 if (SLT == MVT::f64)
391 return LT.first * NElts * get64BitInstrCost();
393 if (SLT == MVT::f32 || SLT == MVT::f16)
394 return LT.first * NElts * getFullRateInstrCost();
395 break;
396 case ISD::FDIV:
397 case ISD::FREM:
398 // FIXME: frem should be handled separately. The fdiv in it is most of it,
399 // but the current lowering is also not entirely correct.
400 if (SLT == MVT::f64) {
401 int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
402 // Add cost of workaround.
403 if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
404 Cost += 3 * getFullRateInstrCost();
406 return LT.first * Cost * NElts;
409 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
410 // TODO: This is more complicated, unsafe flags etc.
411 if ((SLT == MVT::f32 && !ST->hasFP32Denormals()) ||
412 (SLT == MVT::f16 && ST->has16BitInsts())) {
413 return LT.first * getQuarterRateInstrCost() * NElts;
417 if (SLT == MVT::f16 && ST->has16BitInsts()) {
418 // 2 x v_cvt_f32_f16
419 // f32 rcp
420 // f32 fmul
421 // v_cvt_f16_f32
422 // f16 div_fixup
423 int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
424 return LT.first * Cost * NElts;
427 if (SLT == MVT::f32 || SLT == MVT::f16) {
428 int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
430 if (!ST->hasFP32Denormals()) {
431 // FP mode switches.
432 Cost += 2 * getFullRateInstrCost();
435 return LT.first * NElts * Cost;
437 break;
438 default:
439 break;
442 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
443 Opd1PropInfo, Opd2PropInfo);
446 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode) {
447 // XXX - For some reason this isn't called for switch.
448 switch (Opcode) {
449 case Instruction::Br:
450 case Instruction::Ret:
451 return 10;
452 default:
453 return BaseT::getCFInstrCost(Opcode);
457 int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *Ty,
458 bool IsPairwise) {
459 EVT OrigTy = TLI->getValueType(DL, Ty);
461 // Computes cost on targets that have packed math instructions(which support
462 // 16-bit types only).
463 if (IsPairwise ||
464 !ST->hasVOP3PInsts() ||
465 OrigTy.getScalarSizeInBits() != 16)
466 return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise);
468 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
469 return LT.first * getFullRateInstrCost();
472 int GCNTTIImpl::getMinMaxReductionCost(Type *Ty, Type *CondTy,
473 bool IsPairwise,
474 bool IsUnsigned) {
475 EVT OrigTy = TLI->getValueType(DL, Ty);
477 // Computes cost on targets that have packed math instructions(which support
478 // 16-bit types only).
479 if (IsPairwise ||
480 !ST->hasVOP3PInsts() ||
481 OrigTy.getScalarSizeInBits() != 16)
482 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned);
484 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
485 return LT.first * getHalfRateInstrCost();
488 int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
489 unsigned Index) {
490 switch (Opcode) {
491 case Instruction::ExtractElement:
492 case Instruction::InsertElement: {
493 unsigned EltSize
494 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
495 if (EltSize < 32) {
496 if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
497 return 0;
498 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
501 // Extracts are just reads of a subregister, so are free. Inserts are
502 // considered free because we don't want to have any cost for scalarizing
503 // operations, and we don't have to copy into a different register class.
505 // Dynamic indexing isn't free and is best avoided.
506 return Index == ~0u ? 2 : 0;
508 default:
509 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
515 static bool isArgPassedInSGPR(const Argument *A) {
516 const Function *F = A->getParent();
518 // Arguments to compute shaders are never a source of divergence.
519 CallingConv::ID CC = F->getCallingConv();
520 switch (CC) {
521 case CallingConv::AMDGPU_KERNEL:
522 case CallingConv::SPIR_KERNEL:
523 return true;
524 case CallingConv::AMDGPU_VS:
525 case CallingConv::AMDGPU_LS:
526 case CallingConv::AMDGPU_HS:
527 case CallingConv::AMDGPU_ES:
528 case CallingConv::AMDGPU_GS:
529 case CallingConv::AMDGPU_PS:
530 case CallingConv::AMDGPU_CS:
531 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
532 // Everything else is in VGPRs.
533 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
534 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
535 default:
536 // TODO: Should calls support inreg for SGPR inputs?
537 return false;
541 /// \returns true if the result of the value could potentially be
542 /// different across workitems in a wavefront.
543 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
544 if (const Argument *A = dyn_cast<Argument>(V))
545 return !isArgPassedInSGPR(A);
547 // Loads from the private and flat address spaces are divergent, because
548 // threads can execute the load instruction with the same inputs and get
549 // different results.
551 // All other loads are not divergent, because if threads issue loads with the
552 // same arguments, they will always get the same result.
553 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
554 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
555 Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
557 // Atomics are divergent because they are executed sequentially: when an
558 // atomic operation refers to the same address in each thread, then each
559 // thread after the first sees the value written by the previous thread as
560 // original value.
561 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
562 return true;
564 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
565 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
567 // Assume all function calls are a source of divergence.
568 if (isa<CallInst>(V) || isa<InvokeInst>(V))
569 return true;
571 return false;
574 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
575 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
576 switch (Intrinsic->getIntrinsicID()) {
577 default:
578 return false;
579 case Intrinsic::amdgcn_readfirstlane:
580 case Intrinsic::amdgcn_readlane:
581 case Intrinsic::amdgcn_icmp:
582 case Intrinsic::amdgcn_fcmp:
583 return true;
586 return false;
589 unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
590 Type *SubTp) {
591 if (ST->hasVOP3PInsts()) {
592 VectorType *VT = cast<VectorType>(Tp);
593 if (VT->getNumElements() == 2 &&
594 DL.getTypeSizeInBits(VT->getElementType()) == 16) {
595 // With op_sel VOP3P instructions freely can access the low half or high
596 // half of a register, so any swizzle is free.
598 switch (Kind) {
599 case TTI::SK_Broadcast:
600 case TTI::SK_Reverse:
601 case TTI::SK_PermuteSingleSrc:
602 return 0;
603 default:
604 break;
609 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
612 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
613 const Function *Callee) const {
614 const TargetMachine &TM = getTLI()->getTargetMachine();
615 const FeatureBitset &CallerBits =
616 TM.getSubtargetImpl(*Caller)->getFeatureBits();
617 const FeatureBitset &CalleeBits =
618 TM.getSubtargetImpl(*Callee)->getFeatureBits();
620 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
621 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
622 return ((RealCallerBits & RealCalleeBits) == RealCalleeBits);
625 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
626 TTI::UnrollingPreferences &UP) {
627 CommonTTI.getUnrollingPreferences(L, SE, UP);
630 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
631 return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
634 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
635 return getHardwareNumberOfRegisters(Vec);
638 unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const {
639 return 32;
642 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
643 return 32;
646 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
647 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
648 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
649 return 128;
650 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
651 AddrSpace == AMDGPUAS::REGION_ADDRESS)
652 return 64;
653 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
654 return 32;
656 if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
657 AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
658 (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
659 AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
660 return 128;
661 llvm_unreachable("unhandled address space");
664 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
665 unsigned Alignment,
666 unsigned AddrSpace) const {
667 // We allow vectorization of flat stores, even though we may need to decompose
668 // them later if they may access private memory. We don't have enough context
669 // here, and legalization can handle it.
670 return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
673 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
674 unsigned Alignment,
675 unsigned AddrSpace) const {
676 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
679 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
680 unsigned Alignment,
681 unsigned AddrSpace) const {
682 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
685 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
686 // Disable unrolling if the loop is not vectorized.
687 // TODO: Enable this again.
688 if (VF == 1)
689 return 1;
691 return 8;
694 unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode) {
695 // XXX - For some reason this isn't called for switch.
696 switch (Opcode) {
697 case Instruction::Br:
698 case Instruction::Ret:
699 return 10;
700 default:
701 return BaseT::getCFInstrCost(Opcode);
705 int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
706 unsigned Index) {
707 switch (Opcode) {
708 case Instruction::ExtractElement:
709 case Instruction::InsertElement: {
710 unsigned EltSize
711 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
712 if (EltSize < 32) {
713 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
716 // Extracts are just reads of a subregister, so are free. Inserts are
717 // considered free because we don't want to have any cost for scalarizing
718 // operations, and we don't have to copy into a different register class.
720 // Dynamic indexing isn't free and is best avoided.
721 return Index == ~0u ? 2 : 0;
723 default:
724 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
728 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
729 TTI::UnrollingPreferences &UP) {
730 CommonTTI.getUnrollingPreferences(L, SE, UP);