1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass eliminates allocas by either converting them into vectors or
11 // by migrating them to local address space.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUSubtarget.h"
17 #include "Utils/AMDGPUBaseInfo.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/Metadata.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/IR/User.h"
46 #include "llvm/IR/Value.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/Target/TargetMachine.h"
62 #define DEBUG_TYPE "amdgpu-promote-alloca"
68 static cl::opt
<bool> DisablePromoteAllocaToVector(
69 "disable-promote-alloca-to-vector",
70 cl::desc("Disable promote alloca to vector"),
73 // FIXME: This can create globals so should be a module pass.
74 class AMDGPUPromoteAlloca
: public FunctionPass
{
76 const TargetMachine
*TM
;
77 Module
*Mod
= nullptr;
78 const DataLayout
*DL
= nullptr;
80 // FIXME: This should be per-kernel.
81 uint32_t LocalMemLimit
= 0;
82 uint32_t CurrentLocalMemUsage
= 0;
84 bool IsAMDGCN
= false;
85 bool IsAMDHSA
= false;
87 std::pair
<Value
*, Value
*> getLocalSizeYZ(IRBuilder
<> &Builder
);
88 Value
*getWorkitemID(IRBuilder
<> &Builder
, unsigned N
);
90 /// BaseAlloca is the alloca root the search started from.
91 /// Val may be that alloca or a recursive user of it.
92 bool collectUsesWithPtrTypes(Value
*BaseAlloca
,
94 std::vector
<Value
*> &WorkList
) const;
96 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
97 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
98 /// Returns true if both operands are derived from the same alloca. Val should
99 /// be the same value as one of the input operands of UseInst.
100 bool binaryOpIsDerivedFromSameAlloca(Value
*Alloca
, Value
*Val
,
101 Instruction
*UseInst
,
102 int OpIdx0
, int OpIdx1
) const;
104 /// Check whether we have enough local memory for promotion.
105 bool hasSufficientLocalMem(const Function
&F
);
110 AMDGPUPromoteAlloca() : FunctionPass(ID
) {}
112 bool doInitialization(Module
&M
) override
;
113 bool runOnFunction(Function
&F
) override
;
115 StringRef
getPassName() const override
{ return "AMDGPU Promote Alloca"; }
117 bool handleAlloca(AllocaInst
&I
, bool SufficientLDS
);
119 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
120 AU
.setPreservesCFG();
121 FunctionPass::getAnalysisUsage(AU
);
125 } // end anonymous namespace
127 char AMDGPUPromoteAlloca::ID
= 0;
129 INITIALIZE_PASS(AMDGPUPromoteAlloca
, DEBUG_TYPE
,
130 "AMDGPU promote alloca to vector or LDS", false, false)
132 char &llvm::AMDGPUPromoteAllocaID
= AMDGPUPromoteAlloca::ID
;
134 bool AMDGPUPromoteAlloca::doInitialization(Module
&M
) {
136 DL
= &Mod
->getDataLayout();
141 bool AMDGPUPromoteAlloca::runOnFunction(Function
&F
) {
145 if (auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>())
146 TM
= &TPC
->getTM
<TargetMachine
>();
150 const Triple
&TT
= TM
->getTargetTriple();
151 IsAMDGCN
= TT
.getArch() == Triple::amdgcn
;
152 IsAMDHSA
= TT
.getOS() == Triple::AMDHSA
;
154 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
155 if (!ST
.isPromoteAllocaEnabled())
158 bool SufficientLDS
= hasSufficientLocalMem(F
);
159 bool Changed
= false;
160 BasicBlock
&EntryBB
= *F
.begin();
161 for (auto I
= EntryBB
.begin(), E
= EntryBB
.end(); I
!= E
; ) {
162 AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
);
166 Changed
|= handleAlloca(*AI
, SufficientLDS
);
172 std::pair
<Value
*, Value
*>
173 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder
<> &Builder
) {
174 const Function
&F
= *Builder
.GetInsertBlock()->getParent();
175 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
178 Function
*LocalSizeYFn
179 = Intrinsic::getDeclaration(Mod
, Intrinsic::r600_read_local_size_y
);
180 Function
*LocalSizeZFn
181 = Intrinsic::getDeclaration(Mod
, Intrinsic::r600_read_local_size_z
);
183 CallInst
*LocalSizeY
= Builder
.CreateCall(LocalSizeYFn
, {});
184 CallInst
*LocalSizeZ
= Builder
.CreateCall(LocalSizeZFn
, {});
186 ST
.makeLIDRangeMetadata(LocalSizeY
);
187 ST
.makeLIDRangeMetadata(LocalSizeZ
);
189 return std::make_pair(LocalSizeY
, LocalSizeZ
);
192 // We must read the size out of the dispatch pointer.
195 // We are indexing into this struct, and want to extract the workgroup_size_*
198 // typedef struct hsa_kernel_dispatch_packet_s {
201 // uint16_t workgroup_size_x ;
202 // uint16_t workgroup_size_y;
203 // uint16_t workgroup_size_z;
204 // uint16_t reserved0;
205 // uint32_t grid_size_x ;
206 // uint32_t grid_size_y ;
207 // uint32_t grid_size_z;
209 // uint32_t private_segment_size;
210 // uint32_t group_segment_size;
211 // uint64_t kernel_object;
213 // #ifdef HSA_LARGE_MODEL
214 // void *kernarg_address;
215 // #elif defined HSA_LITTLE_ENDIAN
216 // void *kernarg_address;
217 // uint32_t reserved1;
219 // uint32_t reserved1;
220 // void *kernarg_address;
222 // uint64_t reserved2;
223 // hsa_signal_t completion_signal; // uint64_t wrapper
224 // } hsa_kernel_dispatch_packet_t
226 Function
*DispatchPtrFn
227 = Intrinsic::getDeclaration(Mod
, Intrinsic::amdgcn_dispatch_ptr
);
229 CallInst
*DispatchPtr
= Builder
.CreateCall(DispatchPtrFn
, {});
230 DispatchPtr
->addAttribute(AttributeList::ReturnIndex
, Attribute::NoAlias
);
231 DispatchPtr
->addAttribute(AttributeList::ReturnIndex
, Attribute::NonNull
);
233 // Size of the dispatch packet struct.
234 DispatchPtr
->addDereferenceableAttr(AttributeList::ReturnIndex
, 64);
236 Type
*I32Ty
= Type::getInt32Ty(Mod
->getContext());
237 Value
*CastDispatchPtr
= Builder
.CreateBitCast(
238 DispatchPtr
, PointerType::get(I32Ty
, AMDGPUAS::CONSTANT_ADDRESS
));
240 // We could do a single 64-bit load here, but it's likely that the basic
241 // 32-bit and extract sequence is already present, and it is probably easier
242 // to CSE this. The loads should be mergable later anyway.
243 Value
*GEPXY
= Builder
.CreateConstInBoundsGEP1_64(CastDispatchPtr
, 1);
244 LoadInst
*LoadXY
= Builder
.CreateAlignedLoad(GEPXY
, 4);
246 Value
*GEPZU
= Builder
.CreateConstInBoundsGEP1_64(CastDispatchPtr
, 2);
247 LoadInst
*LoadZU
= Builder
.CreateAlignedLoad(GEPZU
, 4);
249 MDNode
*MD
= MDNode::get(Mod
->getContext(), None
);
250 LoadXY
->setMetadata(LLVMContext::MD_invariant_load
, MD
);
251 LoadZU
->setMetadata(LLVMContext::MD_invariant_load
, MD
);
252 ST
.makeLIDRangeMetadata(LoadZU
);
254 // Extract y component. Upper half of LoadZU should be zero already.
255 Value
*Y
= Builder
.CreateLShr(LoadXY
, 16);
257 return std::make_pair(Y
, LoadZU
);
260 Value
*AMDGPUPromoteAlloca::getWorkitemID(IRBuilder
<> &Builder
, unsigned N
) {
261 const AMDGPUSubtarget
&ST
=
262 AMDGPUSubtarget::get(*TM
, *Builder
.GetInsertBlock()->getParent());
263 Intrinsic::ID IntrID
= Intrinsic::ID::not_intrinsic
;
267 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_x
268 : Intrinsic::r600_read_tidig_x
;
271 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_y
272 : Intrinsic::r600_read_tidig_y
;
276 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_z
277 : Intrinsic::r600_read_tidig_z
;
280 llvm_unreachable("invalid dimension");
283 Function
*WorkitemIdFn
= Intrinsic::getDeclaration(Mod
, IntrID
);
284 CallInst
*CI
= Builder
.CreateCall(WorkitemIdFn
);
285 ST
.makeLIDRangeMetadata(CI
);
290 static VectorType
*arrayTypeToVecType(ArrayType
*ArrayTy
) {
291 return VectorType::get(ArrayTy
->getElementType(),
292 ArrayTy
->getNumElements());
296 calculateVectorIndex(Value
*Ptr
,
297 const std::map
<GetElementPtrInst
*, Value
*> &GEPIdx
) {
298 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(Ptr
);
300 auto I
= GEPIdx
.find(GEP
);
301 return I
== GEPIdx
.end() ? nullptr : I
->second
;
304 static Value
* GEPToVectorIndex(GetElementPtrInst
*GEP
) {
305 // FIXME we only support simple cases
306 if (GEP
->getNumOperands() != 3)
309 ConstantInt
*I0
= dyn_cast
<ConstantInt
>(GEP
->getOperand(1));
310 if (!I0
|| !I0
->isZero())
313 return GEP
->getOperand(2);
316 // Not an instruction handled below to turn into a vector.
318 // TODO: Check isTriviallyVectorizable for calls and handle other
320 static bool canVectorizeInst(Instruction
*Inst
, User
*User
) {
321 switch (Inst
->getOpcode()) {
322 case Instruction::Load
: {
323 // Currently only handle the case where the Pointer Operand is a GEP.
324 // Also we could not vectorize volatile or atomic loads.
325 LoadInst
*LI
= cast
<LoadInst
>(Inst
);
326 return isa
<GetElementPtrInst
>(LI
->getPointerOperand()) && LI
->isSimple();
328 case Instruction::BitCast
:
330 case Instruction::Store
: {
331 // Must be the stored pointer operand, not a stored value, plus
332 // since it should be canonical form, the User should be a GEP.
333 // Also we could not vectorize volatile or atomic stores.
334 StoreInst
*SI
= cast
<StoreInst
>(Inst
);
335 return (SI
->getPointerOperand() == User
) && isa
<GetElementPtrInst
>(User
) && SI
->isSimple();
342 static bool tryPromoteAllocaToVector(AllocaInst
*Alloca
) {
344 if (DisablePromoteAllocaToVector
) {
345 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
349 ArrayType
*AllocaTy
= dyn_cast
<ArrayType
>(Alloca
->getAllocatedType());
351 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
353 // FIXME: There is no reason why we can't support larger arrays, we
354 // are just being conservative for now.
355 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
356 // could also be promoted but we don't currently handle this case
358 AllocaTy
->getNumElements() > 16 ||
359 AllocaTy
->getNumElements() < 2 ||
360 !VectorType::isValidElementType(AllocaTy
->getElementType())) {
361 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
365 std::map
<GetElementPtrInst
*, Value
*> GEPVectorIdx
;
366 std::vector
<Value
*> WorkList
;
367 for (User
*AllocaUser
: Alloca
->users()) {
368 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(AllocaUser
);
370 if (!canVectorizeInst(cast
<Instruction
>(AllocaUser
), Alloca
))
373 WorkList
.push_back(AllocaUser
);
377 Value
*Index
= GEPToVectorIndex(GEP
);
379 // If we can't compute a vector index from this GEP, then we can't
380 // promote this alloca to vector.
382 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
387 GEPVectorIdx
[GEP
] = Index
;
388 for (User
*GEPUser
: AllocaUser
->users()) {
389 if (!canVectorizeInst(cast
<Instruction
>(GEPUser
), AllocaUser
))
392 WorkList
.push_back(GEPUser
);
396 VectorType
*VectorTy
= arrayTypeToVecType(AllocaTy
);
398 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy
<< " -> "
399 << *VectorTy
<< '\n');
401 for (Value
*V
: WorkList
) {
402 Instruction
*Inst
= cast
<Instruction
>(V
);
403 IRBuilder
<> Builder(Inst
);
404 switch (Inst
->getOpcode()) {
405 case Instruction::Load
: {
406 Type
*VecPtrTy
= VectorTy
->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS
);
407 Value
*Ptr
= cast
<LoadInst
>(Inst
)->getPointerOperand();
408 Value
*Index
= calculateVectorIndex(Ptr
, GEPVectorIdx
);
410 Value
*BitCast
= Builder
.CreateBitCast(Alloca
, VecPtrTy
);
411 Value
*VecValue
= Builder
.CreateLoad(BitCast
);
412 Value
*ExtractElement
= Builder
.CreateExtractElement(VecValue
, Index
);
413 Inst
->replaceAllUsesWith(ExtractElement
);
414 Inst
->eraseFromParent();
417 case Instruction::Store
: {
418 Type
*VecPtrTy
= VectorTy
->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS
);
420 StoreInst
*SI
= cast
<StoreInst
>(Inst
);
421 Value
*Ptr
= SI
->getPointerOperand();
422 Value
*Index
= calculateVectorIndex(Ptr
, GEPVectorIdx
);
423 Value
*BitCast
= Builder
.CreateBitCast(Alloca
, VecPtrTy
);
424 Value
*VecValue
= Builder
.CreateLoad(BitCast
);
425 Value
*NewVecValue
= Builder
.CreateInsertElement(VecValue
,
426 SI
->getValueOperand(),
428 Builder
.CreateStore(NewVecValue
, BitCast
);
429 Inst
->eraseFromParent();
432 case Instruction::BitCast
:
433 case Instruction::AddrSpaceCast
:
437 llvm_unreachable("Inconsistency in instructions promotable to vector");
443 static bool isCallPromotable(CallInst
*CI
) {
444 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
448 switch (II
->getIntrinsicID()) {
449 case Intrinsic::memcpy
:
450 case Intrinsic::memmove
:
451 case Intrinsic::memset
:
452 case Intrinsic::lifetime_start
:
453 case Intrinsic::lifetime_end
:
454 case Intrinsic::invariant_start
:
455 case Intrinsic::invariant_end
:
456 case Intrinsic::launder_invariant_group
:
457 case Intrinsic::strip_invariant_group
:
458 case Intrinsic::objectsize
:
465 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value
*BaseAlloca
,
470 // Figure out which operand is the one we might not be promoting.
471 Value
*OtherOp
= Inst
->getOperand(OpIdx0
);
473 OtherOp
= Inst
->getOperand(OpIdx1
);
475 if (isa
<ConstantPointerNull
>(OtherOp
))
478 Value
*OtherObj
= GetUnderlyingObject(OtherOp
, *DL
);
479 if (!isa
<AllocaInst
>(OtherObj
))
482 // TODO: We should be able to replace undefs with the right pointer type.
484 // TODO: If we know the other base object is another promotable
485 // alloca, not necessarily this alloca, we can do this. The
486 // important part is both must have the same address space at
488 if (OtherObj
!= BaseAlloca
) {
490 dbgs() << "Found a binary instruction with another alloca object\n");
497 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
500 std::vector
<Value
*> &WorkList
) const {
502 for (User
*User
: Val
->users()) {
503 if (is_contained(WorkList
, User
))
506 if (CallInst
*CI
= dyn_cast
<CallInst
>(User
)) {
507 if (!isCallPromotable(CI
))
510 WorkList
.push_back(User
);
514 Instruction
*UseInst
= cast
<Instruction
>(User
);
515 if (UseInst
->getOpcode() == Instruction::PtrToInt
)
518 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UseInst
)) {
519 if (LI
->isVolatile())
525 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UseInst
)) {
526 if (SI
->isVolatile())
529 // Reject if the stored value is not the pointer operand.
530 if (SI
->getPointerOperand() != Val
)
532 } else if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(UseInst
)) {
533 if (RMW
->isVolatile())
535 } else if (AtomicCmpXchgInst
*CAS
= dyn_cast
<AtomicCmpXchgInst
>(UseInst
)) {
536 if (CAS
->isVolatile())
540 // Only promote a select if we know that the other select operand
541 // is from another pointer that will also be promoted.
542 if (ICmpInst
*ICmp
= dyn_cast
<ICmpInst
>(UseInst
)) {
543 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, ICmp
, 0, 1))
546 // May need to rewrite constant operands.
547 WorkList
.push_back(ICmp
);
550 if (UseInst
->getOpcode() == Instruction::AddrSpaceCast
) {
551 // Give up if the pointer may be captured.
552 if (PointerMayBeCaptured(UseInst
, true, true))
554 // Don't collect the users of this.
555 WorkList
.push_back(User
);
559 if (!User
->getType()->isPointerTy())
562 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(UseInst
)) {
563 // Be conservative if an address could be computed outside the bounds of
565 if (!GEP
->isInBounds())
569 // Only promote a select if we know that the other select operand is from
570 // another pointer that will also be promoted.
571 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(UseInst
)) {
572 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, SI
, 1, 2))
577 if (PHINode
*Phi
= dyn_cast
<PHINode
>(UseInst
)) {
578 // TODO: Handle more complex cases. We should be able to replace loops
580 switch (Phi
->getNumIncomingValues()) {
584 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, Phi
, 0, 1))
592 WorkList
.push_back(User
);
593 if (!collectUsesWithPtrTypes(BaseAlloca
, User
, WorkList
))
600 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function
&F
) {
602 FunctionType
*FTy
= F
.getFunctionType();
603 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
605 // If the function has any arguments in the local address space, then it's
606 // possible these arguments require the entire local memory space, so
607 // we cannot use local memory in the pass.
608 for (Type
*ParamTy
: FTy
->params()) {
609 PointerType
*PtrTy
= dyn_cast
<PointerType
>(ParamTy
);
610 if (PtrTy
&& PtrTy
->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS
) {
612 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
613 "local memory disabled.\n");
618 LocalMemLimit
= ST
.getLocalMemorySize();
619 if (LocalMemLimit
== 0)
622 const DataLayout
&DL
= Mod
->getDataLayout();
624 // Check how much local memory is being used by global objects
625 CurrentLocalMemUsage
= 0;
626 for (GlobalVariable
&GV
: Mod
->globals()) {
627 if (GV
.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS
)
630 for (const User
*U
: GV
.users()) {
631 const Instruction
*Use
= dyn_cast
<Instruction
>(U
);
635 if (Use
->getParent()->getParent() == &F
) {
636 unsigned Align
= GV
.getAlignment();
638 Align
= DL
.getABITypeAlignment(GV
.getValueType());
640 // FIXME: Try to account for padding here. The padding is currently
641 // determined from the inverse order of uses in the function. I'm not
642 // sure if the use list order is in any way connected to this, so the
643 // total reported size is likely incorrect.
644 uint64_t AllocSize
= DL
.getTypeAllocSize(GV
.getValueType());
645 CurrentLocalMemUsage
= alignTo(CurrentLocalMemUsage
, Align
);
646 CurrentLocalMemUsage
+= AllocSize
;
652 unsigned MaxOccupancy
= ST
.getOccupancyWithLocalMemSize(CurrentLocalMemUsage
,
655 // Restrict local memory usage so that we don't drastically reduce occupancy,
656 // unless it is already significantly reduced.
658 // TODO: Have some sort of hint or other heuristics to guess occupancy based
659 // on other factors..
660 unsigned OccupancyHint
= ST
.getWavesPerEU(F
).second
;
661 if (OccupancyHint
== 0)
664 // Clamp to max value.
665 OccupancyHint
= std::min(OccupancyHint
, ST
.getMaxWavesPerEU());
667 // Check the hint but ignore it if it's obviously wrong from the existing LDS
669 MaxOccupancy
= std::min(OccupancyHint
, MaxOccupancy
);
672 // Round up to the next tier of usage.
673 unsigned MaxSizeWithWaveCount
674 = ST
.getMaxLocalMemSizeWithWaveCount(MaxOccupancy
, F
);
676 // Program is possibly broken by using more local mem than available.
677 if (CurrentLocalMemUsage
> MaxSizeWithWaveCount
)
680 LocalMemLimit
= MaxSizeWithWaveCount
;
682 LLVM_DEBUG(dbgs() << F
.getName() << " uses " << CurrentLocalMemUsage
684 << " Rounding size to " << MaxSizeWithWaveCount
685 << " with a maximum occupancy of " << MaxOccupancy
<< '\n'
686 << " and " << (LocalMemLimit
- CurrentLocalMemUsage
)
687 << " available for promotion\n");
692 // FIXME: Should try to pick the most likely to be profitable allocas first.
693 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst
&I
, bool SufficientLDS
) {
694 // Array allocations are probably not worth handling, since an allocation of
695 // the array type is the canonical form.
696 if (!I
.isStaticAlloca() || I
.isArrayAllocation())
699 IRBuilder
<> Builder(&I
);
701 // First try to replace the alloca with a vector
702 Type
*AllocaTy
= I
.getAllocatedType();
704 LLVM_DEBUG(dbgs() << "Trying to promote " << I
<< '\n');
706 if (tryPromoteAllocaToVector(&I
))
707 return true; // Promoted to vector.
709 const Function
&ContainingFunction
= *I
.getParent()->getParent();
710 CallingConv::ID CC
= ContainingFunction
.getCallingConv();
712 // Don't promote the alloca to LDS for shader calling conventions as the work
713 // item ID intrinsics are not supported for these calling conventions.
714 // Furthermore not all LDS is available for some of the stages.
716 case CallingConv::AMDGPU_KERNEL
:
717 case CallingConv::SPIR_KERNEL
:
722 << " promote alloca to LDS not supported with calling convention.\n");
726 // Not likely to have sufficient local memory for promotion.
730 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, ContainingFunction
);
731 unsigned WorkGroupSize
= ST
.getFlatWorkGroupSizes(ContainingFunction
).second
;
733 const DataLayout
&DL
= Mod
->getDataLayout();
735 unsigned Align
= I
.getAlignment();
737 Align
= DL
.getABITypeAlignment(I
.getAllocatedType());
739 // FIXME: This computed padding is likely wrong since it depends on inverse
742 // FIXME: It is also possible that if we're allowed to use all of the memory
743 // could could end up using more than the maximum due to alignment padding.
745 uint32_t NewSize
= alignTo(CurrentLocalMemUsage
, Align
);
746 uint32_t AllocSize
= WorkGroupSize
* DL
.getTypeAllocSize(AllocaTy
);
747 NewSize
+= AllocSize
;
749 if (NewSize
> LocalMemLimit
) {
750 LLVM_DEBUG(dbgs() << " " << AllocSize
751 << " bytes of local memory not available to promote\n");
755 CurrentLocalMemUsage
= NewSize
;
757 std::vector
<Value
*> WorkList
;
759 if (!collectUsesWithPtrTypes(&I
, &I
, WorkList
)) {
760 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
764 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
766 Function
*F
= I
.getParent()->getParent();
768 Type
*GVTy
= ArrayType::get(I
.getAllocatedType(), WorkGroupSize
);
769 GlobalVariable
*GV
= new GlobalVariable(
770 *Mod
, GVTy
, false, GlobalValue::InternalLinkage
,
771 UndefValue::get(GVTy
),
772 Twine(F
->getName()) + Twine('.') + I
.getName(),
774 GlobalVariable::NotThreadLocal
,
775 AMDGPUAS::LOCAL_ADDRESS
);
776 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
777 GV
->setAlignment(I
.getAlignment());
779 Value
*TCntY
, *TCntZ
;
781 std::tie(TCntY
, TCntZ
) = getLocalSizeYZ(Builder
);
782 Value
*TIdX
= getWorkitemID(Builder
, 0);
783 Value
*TIdY
= getWorkitemID(Builder
, 1);
784 Value
*TIdZ
= getWorkitemID(Builder
, 2);
786 Value
*Tmp0
= Builder
.CreateMul(TCntY
, TCntZ
, "", true, true);
787 Tmp0
= Builder
.CreateMul(Tmp0
, TIdX
);
788 Value
*Tmp1
= Builder
.CreateMul(TIdY
, TCntZ
, "", true, true);
789 Value
*TID
= Builder
.CreateAdd(Tmp0
, Tmp1
);
790 TID
= Builder
.CreateAdd(TID
, TIdZ
);
793 Constant::getNullValue(Type::getInt32Ty(Mod
->getContext())),
797 Value
*Offset
= Builder
.CreateInBoundsGEP(GVTy
, GV
, Indices
);
798 I
.mutateType(Offset
->getType());
799 I
.replaceAllUsesWith(Offset
);
802 for (Value
*V
: WorkList
) {
803 CallInst
*Call
= dyn_cast
<CallInst
>(V
);
805 if (ICmpInst
*CI
= dyn_cast
<ICmpInst
>(V
)) {
806 Value
*Src0
= CI
->getOperand(0);
807 Type
*EltTy
= Src0
->getType()->getPointerElementType();
808 PointerType
*NewTy
= PointerType::get(EltTy
, AMDGPUAS::LOCAL_ADDRESS
);
810 if (isa
<ConstantPointerNull
>(CI
->getOperand(0)))
811 CI
->setOperand(0, ConstantPointerNull::get(NewTy
));
813 if (isa
<ConstantPointerNull
>(CI
->getOperand(1)))
814 CI
->setOperand(1, ConstantPointerNull::get(NewTy
));
819 // The operand's value should be corrected on its own and we don't want to
821 if (isa
<AddrSpaceCastInst
>(V
))
824 Type
*EltTy
= V
->getType()->getPointerElementType();
825 PointerType
*NewTy
= PointerType::get(EltTy
, AMDGPUAS::LOCAL_ADDRESS
);
827 // FIXME: It doesn't really make sense to try to do this for all
829 V
->mutateType(NewTy
);
831 // Adjust the types of any constant operands.
832 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
833 if (isa
<ConstantPointerNull
>(SI
->getOperand(1)))
834 SI
->setOperand(1, ConstantPointerNull::get(NewTy
));
836 if (isa
<ConstantPointerNull
>(SI
->getOperand(2)))
837 SI
->setOperand(2, ConstantPointerNull::get(NewTy
));
838 } else if (PHINode
*Phi
= dyn_cast
<PHINode
>(V
)) {
839 for (unsigned I
= 0, E
= Phi
->getNumIncomingValues(); I
!= E
; ++I
) {
840 if (isa
<ConstantPointerNull
>(Phi
->getIncomingValue(I
)))
841 Phi
->setIncomingValue(I
, ConstantPointerNull::get(NewTy
));
848 IntrinsicInst
*Intr
= cast
<IntrinsicInst
>(Call
);
849 Builder
.SetInsertPoint(Intr
);
850 switch (Intr
->getIntrinsicID()) {
851 case Intrinsic::lifetime_start
:
852 case Intrinsic::lifetime_end
:
853 // These intrinsics are for address space 0 only
854 Intr
->eraseFromParent();
856 case Intrinsic::memcpy
: {
857 MemCpyInst
*MemCpy
= cast
<MemCpyInst
>(Intr
);
858 Builder
.CreateMemCpy(MemCpy
->getRawDest(), MemCpy
->getDestAlignment(),
859 MemCpy
->getRawSource(), MemCpy
->getSourceAlignment(),
860 MemCpy
->getLength(), MemCpy
->isVolatile());
861 Intr
->eraseFromParent();
864 case Intrinsic::memmove
: {
865 MemMoveInst
*MemMove
= cast
<MemMoveInst
>(Intr
);
866 Builder
.CreateMemMove(MemMove
->getRawDest(), MemMove
->getDestAlignment(),
867 MemMove
->getRawSource(), MemMove
->getSourceAlignment(),
868 MemMove
->getLength(), MemMove
->isVolatile());
869 Intr
->eraseFromParent();
872 case Intrinsic::memset
: {
873 MemSetInst
*MemSet
= cast
<MemSetInst
>(Intr
);
874 Builder
.CreateMemSet(MemSet
->getRawDest(), MemSet
->getValue(),
875 MemSet
->getLength(), MemSet
->getDestAlignment(),
876 MemSet
->isVolatile());
877 Intr
->eraseFromParent();
880 case Intrinsic::invariant_start
:
881 case Intrinsic::invariant_end
:
882 case Intrinsic::launder_invariant_group
:
883 case Intrinsic::strip_invariant_group
:
884 Intr
->eraseFromParent();
885 // FIXME: I think the invariant marker should still theoretically apply,
886 // but the intrinsics need to be changed to accept pointers with any
889 case Intrinsic::objectsize
: {
890 Value
*Src
= Intr
->getOperand(0);
891 Type
*SrcTy
= Src
->getType()->getPointerElementType();
892 Function
*ObjectSize
= Intrinsic::getDeclaration(Mod
,
893 Intrinsic::objectsize
,
894 { Intr
->getType(), PointerType::get(SrcTy
, AMDGPUAS::LOCAL_ADDRESS
) }
897 CallInst
*NewCall
= Builder
.CreateCall(
898 ObjectSize
, {Src
, Intr
->getOperand(1), Intr
->getOperand(2)});
899 Intr
->replaceAllUsesWith(NewCall
);
900 Intr
->eraseFromParent();
905 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
911 FunctionPass
*llvm::createAMDGPUPromoteAlloca() {
912 return new AMDGPUPromoteAlloca();