1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass eliminates allocas by either converting them into vectors or
10 // by migrating them to local address space.
12 //===----------------------------------------------------------------------===//
15 #include "AMDGPUSubtarget.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Metadata.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/User.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/MathExtras.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include "llvm/Target/TargetMachine.h"
61 #define DEBUG_TYPE "amdgpu-promote-alloca"
67 static cl::opt
<bool> DisablePromoteAllocaToVector(
68 "disable-promote-alloca-to-vector",
69 cl::desc("Disable promote alloca to vector"),
72 static cl::opt
<bool> DisablePromoteAllocaToLDS(
73 "disable-promote-alloca-to-lds",
74 cl::desc("Disable promote alloca to LDS"),
77 // FIXME: This can create globals so should be a module pass.
78 class AMDGPUPromoteAlloca
: public FunctionPass
{
80 const TargetMachine
*TM
;
81 Module
*Mod
= nullptr;
82 const DataLayout
*DL
= nullptr;
84 // FIXME: This should be per-kernel.
85 uint32_t LocalMemLimit
= 0;
86 uint32_t CurrentLocalMemUsage
= 0;
88 bool IsAMDGCN
= false;
89 bool IsAMDHSA
= false;
91 std::pair
<Value
*, Value
*> getLocalSizeYZ(IRBuilder
<> &Builder
);
92 Value
*getWorkitemID(IRBuilder
<> &Builder
, unsigned N
);
94 /// BaseAlloca is the alloca root the search started from.
95 /// Val may be that alloca or a recursive user of it.
96 bool collectUsesWithPtrTypes(Value
*BaseAlloca
,
98 std::vector
<Value
*> &WorkList
) const;
100 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
101 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
102 /// Returns true if both operands are derived from the same alloca. Val should
103 /// be the same value as one of the input operands of UseInst.
104 bool binaryOpIsDerivedFromSameAlloca(Value
*Alloca
, Value
*Val
,
105 Instruction
*UseInst
,
106 int OpIdx0
, int OpIdx1
) const;
108 /// Check whether we have enough local memory for promotion.
109 bool hasSufficientLocalMem(const Function
&F
);
114 AMDGPUPromoteAlloca() : FunctionPass(ID
) {}
116 bool doInitialization(Module
&M
) override
;
117 bool runOnFunction(Function
&F
) override
;
119 StringRef
getPassName() const override
{ return "AMDGPU Promote Alloca"; }
121 bool handleAlloca(AllocaInst
&I
, bool SufficientLDS
);
123 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
124 AU
.setPreservesCFG();
125 FunctionPass::getAnalysisUsage(AU
);
129 } // end anonymous namespace
131 char AMDGPUPromoteAlloca::ID
= 0;
133 INITIALIZE_PASS(AMDGPUPromoteAlloca
, DEBUG_TYPE
,
134 "AMDGPU promote alloca to vector or LDS", false, false)
136 char &llvm::AMDGPUPromoteAllocaID
= AMDGPUPromoteAlloca::ID
;
138 bool AMDGPUPromoteAlloca::doInitialization(Module
&M
) {
140 DL
= &Mod
->getDataLayout();
145 bool AMDGPUPromoteAlloca::runOnFunction(Function
&F
) {
149 if (auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>())
150 TM
= &TPC
->getTM
<TargetMachine
>();
154 const Triple
&TT
= TM
->getTargetTriple();
155 IsAMDGCN
= TT
.getArch() == Triple::amdgcn
;
156 IsAMDHSA
= TT
.getOS() == Triple::AMDHSA
;
158 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
159 if (!ST
.isPromoteAllocaEnabled())
162 bool SufficientLDS
= hasSufficientLocalMem(F
);
163 bool Changed
= false;
164 BasicBlock
&EntryBB
= *F
.begin();
166 SmallVector
<AllocaInst
*, 16> Allocas
;
167 for (Instruction
&I
: EntryBB
) {
168 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(&I
))
169 Allocas
.push_back(AI
);
172 for (AllocaInst
*AI
: Allocas
) {
173 if (handleAlloca(*AI
, SufficientLDS
))
180 std::pair
<Value
*, Value
*>
181 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder
<> &Builder
) {
182 const Function
&F
= *Builder
.GetInsertBlock()->getParent();
183 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
186 Function
*LocalSizeYFn
187 = Intrinsic::getDeclaration(Mod
, Intrinsic::r600_read_local_size_y
);
188 Function
*LocalSizeZFn
189 = Intrinsic::getDeclaration(Mod
, Intrinsic::r600_read_local_size_z
);
191 CallInst
*LocalSizeY
= Builder
.CreateCall(LocalSizeYFn
, {});
192 CallInst
*LocalSizeZ
= Builder
.CreateCall(LocalSizeZFn
, {});
194 ST
.makeLIDRangeMetadata(LocalSizeY
);
195 ST
.makeLIDRangeMetadata(LocalSizeZ
);
197 return std::make_pair(LocalSizeY
, LocalSizeZ
);
200 // We must read the size out of the dispatch pointer.
203 // We are indexing into this struct, and want to extract the workgroup_size_*
206 // typedef struct hsa_kernel_dispatch_packet_s {
209 // uint16_t workgroup_size_x ;
210 // uint16_t workgroup_size_y;
211 // uint16_t workgroup_size_z;
212 // uint16_t reserved0;
213 // uint32_t grid_size_x ;
214 // uint32_t grid_size_y ;
215 // uint32_t grid_size_z;
217 // uint32_t private_segment_size;
218 // uint32_t group_segment_size;
219 // uint64_t kernel_object;
221 // #ifdef HSA_LARGE_MODEL
222 // void *kernarg_address;
223 // #elif defined HSA_LITTLE_ENDIAN
224 // void *kernarg_address;
225 // uint32_t reserved1;
227 // uint32_t reserved1;
228 // void *kernarg_address;
230 // uint64_t reserved2;
231 // hsa_signal_t completion_signal; // uint64_t wrapper
232 // } hsa_kernel_dispatch_packet_t
234 Function
*DispatchPtrFn
235 = Intrinsic::getDeclaration(Mod
, Intrinsic::amdgcn_dispatch_ptr
);
237 CallInst
*DispatchPtr
= Builder
.CreateCall(DispatchPtrFn
, {});
238 DispatchPtr
->addAttribute(AttributeList::ReturnIndex
, Attribute::NoAlias
);
239 DispatchPtr
->addAttribute(AttributeList::ReturnIndex
, Attribute::NonNull
);
241 // Size of the dispatch packet struct.
242 DispatchPtr
->addDereferenceableAttr(AttributeList::ReturnIndex
, 64);
244 Type
*I32Ty
= Type::getInt32Ty(Mod
->getContext());
245 Value
*CastDispatchPtr
= Builder
.CreateBitCast(
246 DispatchPtr
, PointerType::get(I32Ty
, AMDGPUAS::CONSTANT_ADDRESS
));
248 // We could do a single 64-bit load here, but it's likely that the basic
249 // 32-bit and extract sequence is already present, and it is probably easier
250 // to CSE this. The loads should be mergable later anyway.
251 Value
*GEPXY
= Builder
.CreateConstInBoundsGEP1_64(I32Ty
, CastDispatchPtr
, 1);
252 LoadInst
*LoadXY
= Builder
.CreateAlignedLoad(I32Ty
, GEPXY
, 4);
254 Value
*GEPZU
= Builder
.CreateConstInBoundsGEP1_64(I32Ty
, CastDispatchPtr
, 2);
255 LoadInst
*LoadZU
= Builder
.CreateAlignedLoad(I32Ty
, GEPZU
, 4);
257 MDNode
*MD
= MDNode::get(Mod
->getContext(), None
);
258 LoadXY
->setMetadata(LLVMContext::MD_invariant_load
, MD
);
259 LoadZU
->setMetadata(LLVMContext::MD_invariant_load
, MD
);
260 ST
.makeLIDRangeMetadata(LoadZU
);
262 // Extract y component. Upper half of LoadZU should be zero already.
263 Value
*Y
= Builder
.CreateLShr(LoadXY
, 16);
265 return std::make_pair(Y
, LoadZU
);
268 Value
*AMDGPUPromoteAlloca::getWorkitemID(IRBuilder
<> &Builder
, unsigned N
) {
269 const AMDGPUSubtarget
&ST
=
270 AMDGPUSubtarget::get(*TM
, *Builder
.GetInsertBlock()->getParent());
271 Intrinsic::ID IntrID
= Intrinsic::ID::not_intrinsic
;
275 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_x
276 : Intrinsic::r600_read_tidig_x
;
279 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_y
280 : Intrinsic::r600_read_tidig_y
;
284 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_z
285 : Intrinsic::r600_read_tidig_z
;
288 llvm_unreachable("invalid dimension");
291 Function
*WorkitemIdFn
= Intrinsic::getDeclaration(Mod
, IntrID
);
292 CallInst
*CI
= Builder
.CreateCall(WorkitemIdFn
);
293 ST
.makeLIDRangeMetadata(CI
);
298 static VectorType
*arrayTypeToVecType(ArrayType
*ArrayTy
) {
299 return VectorType::get(ArrayTy
->getElementType(),
300 ArrayTy
->getNumElements());
304 calculateVectorIndex(Value
*Ptr
,
305 const std::map
<GetElementPtrInst
*, Value
*> &GEPIdx
) {
306 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(Ptr
);
308 auto I
= GEPIdx
.find(GEP
);
309 return I
== GEPIdx
.end() ? nullptr : I
->second
;
312 static Value
* GEPToVectorIndex(GetElementPtrInst
*GEP
) {
313 // FIXME we only support simple cases
314 if (GEP
->getNumOperands() != 3)
317 ConstantInt
*I0
= dyn_cast
<ConstantInt
>(GEP
->getOperand(1));
318 if (!I0
|| !I0
->isZero())
321 return GEP
->getOperand(2);
324 // Not an instruction handled below to turn into a vector.
326 // TODO: Check isTriviallyVectorizable for calls and handle other
328 static bool canVectorizeInst(Instruction
*Inst
, User
*User
) {
329 switch (Inst
->getOpcode()) {
330 case Instruction::Load
: {
331 // Currently only handle the case where the Pointer Operand is a GEP.
332 // Also we could not vectorize volatile or atomic loads.
333 LoadInst
*LI
= cast
<LoadInst
>(Inst
);
334 if (isa
<AllocaInst
>(User
) &&
335 LI
->getPointerOperandType() == User
->getType() &&
336 isa
<VectorType
>(LI
->getType()))
338 return isa
<GetElementPtrInst
>(LI
->getPointerOperand()) && LI
->isSimple();
340 case Instruction::BitCast
:
342 case Instruction::Store
: {
343 // Must be the stored pointer operand, not a stored value, plus
344 // since it should be canonical form, the User should be a GEP.
345 // Also we could not vectorize volatile or atomic stores.
346 StoreInst
*SI
= cast
<StoreInst
>(Inst
);
347 if (isa
<AllocaInst
>(User
) &&
348 SI
->getPointerOperandType() == User
->getType() &&
349 isa
<VectorType
>(SI
->getValueOperand()->getType()))
351 return (SI
->getPointerOperand() == User
) && isa
<GetElementPtrInst
>(User
) && SI
->isSimple();
358 static bool tryPromoteAllocaToVector(AllocaInst
*Alloca
) {
360 if (DisablePromoteAllocaToVector
) {
361 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
365 Type
*AT
= Alloca
->getAllocatedType();
366 SequentialType
*AllocaTy
= dyn_cast
<SequentialType
>(AT
);
368 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
370 // FIXME: There is no reason why we can't support larger arrays, we
371 // are just being conservative for now.
372 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
373 // could also be promoted but we don't currently handle this case
375 AllocaTy
->getNumElements() > 16 ||
376 AllocaTy
->getNumElements() < 2 ||
377 !VectorType::isValidElementType(AllocaTy
->getElementType())) {
378 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
382 std::map
<GetElementPtrInst
*, Value
*> GEPVectorIdx
;
383 std::vector
<Value
*> WorkList
;
384 for (User
*AllocaUser
: Alloca
->users()) {
385 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(AllocaUser
);
387 if (!canVectorizeInst(cast
<Instruction
>(AllocaUser
), Alloca
))
390 WorkList
.push_back(AllocaUser
);
394 Value
*Index
= GEPToVectorIndex(GEP
);
396 // If we can't compute a vector index from this GEP, then we can't
397 // promote this alloca to vector.
399 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
404 GEPVectorIdx
[GEP
] = Index
;
405 for (User
*GEPUser
: AllocaUser
->users()) {
406 if (!canVectorizeInst(cast
<Instruction
>(GEPUser
), AllocaUser
))
409 WorkList
.push_back(GEPUser
);
413 VectorType
*VectorTy
= dyn_cast
<VectorType
>(AllocaTy
);
415 VectorTy
= arrayTypeToVecType(cast
<ArrayType
>(AllocaTy
));
417 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy
<< " -> "
418 << *VectorTy
<< '\n');
420 for (Value
*V
: WorkList
) {
421 Instruction
*Inst
= cast
<Instruction
>(V
);
422 IRBuilder
<> Builder(Inst
);
423 switch (Inst
->getOpcode()) {
424 case Instruction::Load
: {
425 if (Inst
->getType() == AT
)
428 Type
*VecPtrTy
= VectorTy
->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS
);
429 Value
*Ptr
= cast
<LoadInst
>(Inst
)->getPointerOperand();
430 Value
*Index
= calculateVectorIndex(Ptr
, GEPVectorIdx
);
432 Value
*BitCast
= Builder
.CreateBitCast(Alloca
, VecPtrTy
);
433 Value
*VecValue
= Builder
.CreateLoad(VectorTy
, BitCast
);
434 Value
*ExtractElement
= Builder
.CreateExtractElement(VecValue
, Index
);
435 Inst
->replaceAllUsesWith(ExtractElement
);
436 Inst
->eraseFromParent();
439 case Instruction::Store
: {
440 StoreInst
*SI
= cast
<StoreInst
>(Inst
);
441 if (SI
->getValueOperand()->getType() == AT
)
444 Type
*VecPtrTy
= VectorTy
->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS
);
445 Value
*Ptr
= SI
->getPointerOperand();
446 Value
*Index
= calculateVectorIndex(Ptr
, GEPVectorIdx
);
447 Value
*BitCast
= Builder
.CreateBitCast(Alloca
, VecPtrTy
);
448 Value
*VecValue
= Builder
.CreateLoad(VectorTy
, BitCast
);
449 Value
*NewVecValue
= Builder
.CreateInsertElement(VecValue
,
450 SI
->getValueOperand(),
452 Builder
.CreateStore(NewVecValue
, BitCast
);
453 Inst
->eraseFromParent();
456 case Instruction::BitCast
:
457 case Instruction::AddrSpaceCast
:
461 llvm_unreachable("Inconsistency in instructions promotable to vector");
467 static bool isCallPromotable(CallInst
*CI
) {
468 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
472 switch (II
->getIntrinsicID()) {
473 case Intrinsic::memcpy
:
474 case Intrinsic::memmove
:
475 case Intrinsic::memset
:
476 case Intrinsic::lifetime_start
:
477 case Intrinsic::lifetime_end
:
478 case Intrinsic::invariant_start
:
479 case Intrinsic::invariant_end
:
480 case Intrinsic::launder_invariant_group
:
481 case Intrinsic::strip_invariant_group
:
482 case Intrinsic::objectsize
:
489 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value
*BaseAlloca
,
494 // Figure out which operand is the one we might not be promoting.
495 Value
*OtherOp
= Inst
->getOperand(OpIdx0
);
497 OtherOp
= Inst
->getOperand(OpIdx1
);
499 if (isa
<ConstantPointerNull
>(OtherOp
))
502 Value
*OtherObj
= GetUnderlyingObject(OtherOp
, *DL
);
503 if (!isa
<AllocaInst
>(OtherObj
))
506 // TODO: We should be able to replace undefs with the right pointer type.
508 // TODO: If we know the other base object is another promotable
509 // alloca, not necessarily this alloca, we can do this. The
510 // important part is both must have the same address space at
512 if (OtherObj
!= BaseAlloca
) {
514 dbgs() << "Found a binary instruction with another alloca object\n");
521 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
524 std::vector
<Value
*> &WorkList
) const {
526 for (User
*User
: Val
->users()) {
527 if (is_contained(WorkList
, User
))
530 if (CallInst
*CI
= dyn_cast
<CallInst
>(User
)) {
531 if (!isCallPromotable(CI
))
534 WorkList
.push_back(User
);
538 Instruction
*UseInst
= cast
<Instruction
>(User
);
539 if (UseInst
->getOpcode() == Instruction::PtrToInt
)
542 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UseInst
)) {
543 if (LI
->isVolatile())
549 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UseInst
)) {
550 if (SI
->isVolatile())
553 // Reject if the stored value is not the pointer operand.
554 if (SI
->getPointerOperand() != Val
)
556 } else if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(UseInst
)) {
557 if (RMW
->isVolatile())
559 } else if (AtomicCmpXchgInst
*CAS
= dyn_cast
<AtomicCmpXchgInst
>(UseInst
)) {
560 if (CAS
->isVolatile())
564 // Only promote a select if we know that the other select operand
565 // is from another pointer that will also be promoted.
566 if (ICmpInst
*ICmp
= dyn_cast
<ICmpInst
>(UseInst
)) {
567 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, ICmp
, 0, 1))
570 // May need to rewrite constant operands.
571 WorkList
.push_back(ICmp
);
574 if (UseInst
->getOpcode() == Instruction::AddrSpaceCast
) {
575 // Give up if the pointer may be captured.
576 if (PointerMayBeCaptured(UseInst
, true, true))
578 // Don't collect the users of this.
579 WorkList
.push_back(User
);
583 if (!User
->getType()->isPointerTy())
586 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(UseInst
)) {
587 // Be conservative if an address could be computed outside the bounds of
589 if (!GEP
->isInBounds())
593 // Only promote a select if we know that the other select operand is from
594 // another pointer that will also be promoted.
595 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(UseInst
)) {
596 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, SI
, 1, 2))
601 if (PHINode
*Phi
= dyn_cast
<PHINode
>(UseInst
)) {
602 // TODO: Handle more complex cases. We should be able to replace loops
604 switch (Phi
->getNumIncomingValues()) {
608 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, Phi
, 0, 1))
616 WorkList
.push_back(User
);
617 if (!collectUsesWithPtrTypes(BaseAlloca
, User
, WorkList
))
624 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function
&F
) {
626 FunctionType
*FTy
= F
.getFunctionType();
627 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
629 // If the function has any arguments in the local address space, then it's
630 // possible these arguments require the entire local memory space, so
631 // we cannot use local memory in the pass.
632 for (Type
*ParamTy
: FTy
->params()) {
633 PointerType
*PtrTy
= dyn_cast
<PointerType
>(ParamTy
);
634 if (PtrTy
&& PtrTy
->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS
) {
636 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
637 "local memory disabled.\n");
642 LocalMemLimit
= ST
.getLocalMemorySize();
643 if (LocalMemLimit
== 0)
646 const DataLayout
&DL
= Mod
->getDataLayout();
648 // Check how much local memory is being used by global objects
649 CurrentLocalMemUsage
= 0;
650 for (GlobalVariable
&GV
: Mod
->globals()) {
651 if (GV
.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS
)
654 for (const User
*U
: GV
.users()) {
655 const Instruction
*Use
= dyn_cast
<Instruction
>(U
);
659 if (Use
->getParent()->getParent() == &F
) {
660 unsigned Align
= GV
.getAlignment();
662 Align
= DL
.getABITypeAlignment(GV
.getValueType());
664 // FIXME: Try to account for padding here. The padding is currently
665 // determined from the inverse order of uses in the function. I'm not
666 // sure if the use list order is in any way connected to this, so the
667 // total reported size is likely incorrect.
668 uint64_t AllocSize
= DL
.getTypeAllocSize(GV
.getValueType());
669 CurrentLocalMemUsage
= alignTo(CurrentLocalMemUsage
, Align
);
670 CurrentLocalMemUsage
+= AllocSize
;
676 unsigned MaxOccupancy
= ST
.getOccupancyWithLocalMemSize(CurrentLocalMemUsage
,
679 // Restrict local memory usage so that we don't drastically reduce occupancy,
680 // unless it is already significantly reduced.
682 // TODO: Have some sort of hint or other heuristics to guess occupancy based
683 // on other factors..
684 unsigned OccupancyHint
= ST
.getWavesPerEU(F
).second
;
685 if (OccupancyHint
== 0)
688 // Clamp to max value.
689 OccupancyHint
= std::min(OccupancyHint
, ST
.getMaxWavesPerEU());
691 // Check the hint but ignore it if it's obviously wrong from the existing LDS
693 MaxOccupancy
= std::min(OccupancyHint
, MaxOccupancy
);
696 // Round up to the next tier of usage.
697 unsigned MaxSizeWithWaveCount
698 = ST
.getMaxLocalMemSizeWithWaveCount(MaxOccupancy
, F
);
700 // Program is possibly broken by using more local mem than available.
701 if (CurrentLocalMemUsage
> MaxSizeWithWaveCount
)
704 LocalMemLimit
= MaxSizeWithWaveCount
;
706 LLVM_DEBUG(dbgs() << F
.getName() << " uses " << CurrentLocalMemUsage
708 << " Rounding size to " << MaxSizeWithWaveCount
709 << " with a maximum occupancy of " << MaxOccupancy
<< '\n'
710 << " and " << (LocalMemLimit
- CurrentLocalMemUsage
)
711 << " available for promotion\n");
716 // FIXME: Should try to pick the most likely to be profitable allocas first.
717 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst
&I
, bool SufficientLDS
) {
718 // Array allocations are probably not worth handling, since an allocation of
719 // the array type is the canonical form.
720 if (!I
.isStaticAlloca() || I
.isArrayAllocation())
723 IRBuilder
<> Builder(&I
);
725 // First try to replace the alloca with a vector
726 Type
*AllocaTy
= I
.getAllocatedType();
728 LLVM_DEBUG(dbgs() << "Trying to promote " << I
<< '\n');
730 if (tryPromoteAllocaToVector(&I
))
731 return true; // Promoted to vector.
733 if (DisablePromoteAllocaToLDS
)
736 const Function
&ContainingFunction
= *I
.getParent()->getParent();
737 CallingConv::ID CC
= ContainingFunction
.getCallingConv();
739 // Don't promote the alloca to LDS for shader calling conventions as the work
740 // item ID intrinsics are not supported for these calling conventions.
741 // Furthermore not all LDS is available for some of the stages.
743 case CallingConv::AMDGPU_KERNEL
:
744 case CallingConv::SPIR_KERNEL
:
749 << " promote alloca to LDS not supported with calling convention.\n");
753 // Not likely to have sufficient local memory for promotion.
757 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, ContainingFunction
);
758 unsigned WorkGroupSize
= ST
.getFlatWorkGroupSizes(ContainingFunction
).second
;
760 const DataLayout
&DL
= Mod
->getDataLayout();
762 unsigned Align
= I
.getAlignment();
764 Align
= DL
.getABITypeAlignment(I
.getAllocatedType());
766 // FIXME: This computed padding is likely wrong since it depends on inverse
769 // FIXME: It is also possible that if we're allowed to use all of the memory
770 // could could end up using more than the maximum due to alignment padding.
772 uint32_t NewSize
= alignTo(CurrentLocalMemUsage
, Align
);
773 uint32_t AllocSize
= WorkGroupSize
* DL
.getTypeAllocSize(AllocaTy
);
774 NewSize
+= AllocSize
;
776 if (NewSize
> LocalMemLimit
) {
777 LLVM_DEBUG(dbgs() << " " << AllocSize
778 << " bytes of local memory not available to promote\n");
782 CurrentLocalMemUsage
= NewSize
;
784 std::vector
<Value
*> WorkList
;
786 if (!collectUsesWithPtrTypes(&I
, &I
, WorkList
)) {
787 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
791 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
793 Function
*F
= I
.getParent()->getParent();
795 Type
*GVTy
= ArrayType::get(I
.getAllocatedType(), WorkGroupSize
);
796 GlobalVariable
*GV
= new GlobalVariable(
797 *Mod
, GVTy
, false, GlobalValue::InternalLinkage
,
798 UndefValue::get(GVTy
),
799 Twine(F
->getName()) + Twine('.') + I
.getName(),
801 GlobalVariable::NotThreadLocal
,
802 AMDGPUAS::LOCAL_ADDRESS
);
803 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
804 GV
->setAlignment(MaybeAlign(I
.getAlignment()));
806 Value
*TCntY
, *TCntZ
;
808 std::tie(TCntY
, TCntZ
) = getLocalSizeYZ(Builder
);
809 Value
*TIdX
= getWorkitemID(Builder
, 0);
810 Value
*TIdY
= getWorkitemID(Builder
, 1);
811 Value
*TIdZ
= getWorkitemID(Builder
, 2);
813 Value
*Tmp0
= Builder
.CreateMul(TCntY
, TCntZ
, "", true, true);
814 Tmp0
= Builder
.CreateMul(Tmp0
, TIdX
);
815 Value
*Tmp1
= Builder
.CreateMul(TIdY
, TCntZ
, "", true, true);
816 Value
*TID
= Builder
.CreateAdd(Tmp0
, Tmp1
);
817 TID
= Builder
.CreateAdd(TID
, TIdZ
);
820 Constant::getNullValue(Type::getInt32Ty(Mod
->getContext())),
824 Value
*Offset
= Builder
.CreateInBoundsGEP(GVTy
, GV
, Indices
);
825 I
.mutateType(Offset
->getType());
826 I
.replaceAllUsesWith(Offset
);
829 for (Value
*V
: WorkList
) {
830 CallInst
*Call
= dyn_cast
<CallInst
>(V
);
832 if (ICmpInst
*CI
= dyn_cast
<ICmpInst
>(V
)) {
833 Value
*Src0
= CI
->getOperand(0);
834 Type
*EltTy
= Src0
->getType()->getPointerElementType();
835 PointerType
*NewTy
= PointerType::get(EltTy
, AMDGPUAS::LOCAL_ADDRESS
);
837 if (isa
<ConstantPointerNull
>(CI
->getOperand(0)))
838 CI
->setOperand(0, ConstantPointerNull::get(NewTy
));
840 if (isa
<ConstantPointerNull
>(CI
->getOperand(1)))
841 CI
->setOperand(1, ConstantPointerNull::get(NewTy
));
846 // The operand's value should be corrected on its own and we don't want to
848 if (isa
<AddrSpaceCastInst
>(V
))
851 Type
*EltTy
= V
->getType()->getPointerElementType();
852 PointerType
*NewTy
= PointerType::get(EltTy
, AMDGPUAS::LOCAL_ADDRESS
);
854 // FIXME: It doesn't really make sense to try to do this for all
856 V
->mutateType(NewTy
);
858 // Adjust the types of any constant operands.
859 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
860 if (isa
<ConstantPointerNull
>(SI
->getOperand(1)))
861 SI
->setOperand(1, ConstantPointerNull::get(NewTy
));
863 if (isa
<ConstantPointerNull
>(SI
->getOperand(2)))
864 SI
->setOperand(2, ConstantPointerNull::get(NewTy
));
865 } else if (PHINode
*Phi
= dyn_cast
<PHINode
>(V
)) {
866 for (unsigned I
= 0, E
= Phi
->getNumIncomingValues(); I
!= E
; ++I
) {
867 if (isa
<ConstantPointerNull
>(Phi
->getIncomingValue(I
)))
868 Phi
->setIncomingValue(I
, ConstantPointerNull::get(NewTy
));
875 IntrinsicInst
*Intr
= cast
<IntrinsicInst
>(Call
);
876 Builder
.SetInsertPoint(Intr
);
877 switch (Intr
->getIntrinsicID()) {
878 case Intrinsic::lifetime_start
:
879 case Intrinsic::lifetime_end
:
880 // These intrinsics are for address space 0 only
881 Intr
->eraseFromParent();
883 case Intrinsic::memcpy
: {
884 MemCpyInst
*MemCpy
= cast
<MemCpyInst
>(Intr
);
885 Builder
.CreateMemCpy(MemCpy
->getRawDest(), MemCpy
->getDestAlignment(),
886 MemCpy
->getRawSource(), MemCpy
->getSourceAlignment(),
887 MemCpy
->getLength(), MemCpy
->isVolatile());
888 Intr
->eraseFromParent();
891 case Intrinsic::memmove
: {
892 MemMoveInst
*MemMove
= cast
<MemMoveInst
>(Intr
);
893 Builder
.CreateMemMove(MemMove
->getRawDest(), MemMove
->getDestAlignment(),
894 MemMove
->getRawSource(), MemMove
->getSourceAlignment(),
895 MemMove
->getLength(), MemMove
->isVolatile());
896 Intr
->eraseFromParent();
899 case Intrinsic::memset
: {
900 MemSetInst
*MemSet
= cast
<MemSetInst
>(Intr
);
901 Builder
.CreateMemSet(MemSet
->getRawDest(), MemSet
->getValue(),
902 MemSet
->getLength(), MemSet
->getDestAlignment(),
903 MemSet
->isVolatile());
904 Intr
->eraseFromParent();
907 case Intrinsic::invariant_start
:
908 case Intrinsic::invariant_end
:
909 case Intrinsic::launder_invariant_group
:
910 case Intrinsic::strip_invariant_group
:
911 Intr
->eraseFromParent();
912 // FIXME: I think the invariant marker should still theoretically apply,
913 // but the intrinsics need to be changed to accept pointers with any
916 case Intrinsic::objectsize
: {
917 Value
*Src
= Intr
->getOperand(0);
918 Type
*SrcTy
= Src
->getType()->getPointerElementType();
919 Function
*ObjectSize
= Intrinsic::getDeclaration(Mod
,
920 Intrinsic::objectsize
,
921 { Intr
->getType(), PointerType::get(SrcTy
, AMDGPUAS::LOCAL_ADDRESS
) }
924 CallInst
*NewCall
= Builder
.CreateCall(
926 {Src
, Intr
->getOperand(1), Intr
->getOperand(2), Intr
->getOperand(3)});
927 Intr
->replaceAllUsesWith(NewCall
);
928 Intr
->eraseFromParent();
933 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
939 FunctionPass
*llvm::createAMDGPUPromoteAlloca() {
940 return new AMDGPUPromoteAlloca();