1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass eliminates allocas by either converting them into vectors or
10 // by migrating them to local address space.
12 //===----------------------------------------------------------------------===//
15 #include "AMDGPUSubtarget.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Metadata.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/User.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/MathExtras.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include "llvm/Target/TargetMachine.h"
61 #define DEBUG_TYPE "amdgpu-promote-alloca"
67 static cl::opt
<bool> DisablePromoteAllocaToVector(
68 "disable-promote-alloca-to-vector",
69 cl::desc("Disable promote alloca to vector"),
72 static cl::opt
<bool> DisablePromoteAllocaToLDS(
73 "disable-promote-alloca-to-lds",
74 cl::desc("Disable promote alloca to LDS"),
77 // FIXME: This can create globals so should be a module pass.
78 class AMDGPUPromoteAlloca
: public FunctionPass
{
80 const TargetMachine
*TM
;
81 Module
*Mod
= nullptr;
82 const DataLayout
*DL
= nullptr;
84 // FIXME: This should be per-kernel.
85 uint32_t LocalMemLimit
= 0;
86 uint32_t CurrentLocalMemUsage
= 0;
88 bool IsAMDGCN
= false;
89 bool IsAMDHSA
= false;
91 std::pair
<Value
*, Value
*> getLocalSizeYZ(IRBuilder
<> &Builder
);
92 Value
*getWorkitemID(IRBuilder
<> &Builder
, unsigned N
);
94 /// BaseAlloca is the alloca root the search started from.
95 /// Val may be that alloca or a recursive user of it.
96 bool collectUsesWithPtrTypes(Value
*BaseAlloca
,
98 std::vector
<Value
*> &WorkList
) const;
100 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
101 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
102 /// Returns true if both operands are derived from the same alloca. Val should
103 /// be the same value as one of the input operands of UseInst.
104 bool binaryOpIsDerivedFromSameAlloca(Value
*Alloca
, Value
*Val
,
105 Instruction
*UseInst
,
106 int OpIdx0
, int OpIdx1
) const;
108 /// Check whether we have enough local memory for promotion.
109 bool hasSufficientLocalMem(const Function
&F
);
114 AMDGPUPromoteAlloca() : FunctionPass(ID
) {}
116 bool doInitialization(Module
&M
) override
;
117 bool runOnFunction(Function
&F
) override
;
119 StringRef
getPassName() const override
{ return "AMDGPU Promote Alloca"; }
121 bool handleAlloca(AllocaInst
&I
, bool SufficientLDS
);
123 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
124 AU
.setPreservesCFG();
125 FunctionPass::getAnalysisUsage(AU
);
129 } // end anonymous namespace
131 char AMDGPUPromoteAlloca::ID
= 0;
133 INITIALIZE_PASS(AMDGPUPromoteAlloca
, DEBUG_TYPE
,
134 "AMDGPU promote alloca to vector or LDS", false, false)
136 char &llvm::AMDGPUPromoteAllocaID
= AMDGPUPromoteAlloca::ID
;
138 bool AMDGPUPromoteAlloca::doInitialization(Module
&M
) {
140 DL
= &Mod
->getDataLayout();
145 bool AMDGPUPromoteAlloca::runOnFunction(Function
&F
) {
149 if (auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>())
150 TM
= &TPC
->getTM
<TargetMachine
>();
154 const Triple
&TT
= TM
->getTargetTriple();
155 IsAMDGCN
= TT
.getArch() == Triple::amdgcn
;
156 IsAMDHSA
= TT
.getOS() == Triple::AMDHSA
;
158 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
159 if (!ST
.isPromoteAllocaEnabled())
162 bool SufficientLDS
= hasSufficientLocalMem(F
);
163 bool Changed
= false;
164 BasicBlock
&EntryBB
= *F
.begin();
165 for (auto I
= EntryBB
.begin(), E
= EntryBB
.end(); I
!= E
; ) {
166 AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
);
170 Changed
|= handleAlloca(*AI
, SufficientLDS
);
176 std::pair
<Value
*, Value
*>
177 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder
<> &Builder
) {
178 const Function
&F
= *Builder
.GetInsertBlock()->getParent();
179 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
182 Function
*LocalSizeYFn
183 = Intrinsic::getDeclaration(Mod
, Intrinsic::r600_read_local_size_y
);
184 Function
*LocalSizeZFn
185 = Intrinsic::getDeclaration(Mod
, Intrinsic::r600_read_local_size_z
);
187 CallInst
*LocalSizeY
= Builder
.CreateCall(LocalSizeYFn
, {});
188 CallInst
*LocalSizeZ
= Builder
.CreateCall(LocalSizeZFn
, {});
190 ST
.makeLIDRangeMetadata(LocalSizeY
);
191 ST
.makeLIDRangeMetadata(LocalSizeZ
);
193 return std::make_pair(LocalSizeY
, LocalSizeZ
);
196 // We must read the size out of the dispatch pointer.
199 // We are indexing into this struct, and want to extract the workgroup_size_*
202 // typedef struct hsa_kernel_dispatch_packet_s {
205 // uint16_t workgroup_size_x ;
206 // uint16_t workgroup_size_y;
207 // uint16_t workgroup_size_z;
208 // uint16_t reserved0;
209 // uint32_t grid_size_x ;
210 // uint32_t grid_size_y ;
211 // uint32_t grid_size_z;
213 // uint32_t private_segment_size;
214 // uint32_t group_segment_size;
215 // uint64_t kernel_object;
217 // #ifdef HSA_LARGE_MODEL
218 // void *kernarg_address;
219 // #elif defined HSA_LITTLE_ENDIAN
220 // void *kernarg_address;
221 // uint32_t reserved1;
223 // uint32_t reserved1;
224 // void *kernarg_address;
226 // uint64_t reserved2;
227 // hsa_signal_t completion_signal; // uint64_t wrapper
228 // } hsa_kernel_dispatch_packet_t
230 Function
*DispatchPtrFn
231 = Intrinsic::getDeclaration(Mod
, Intrinsic::amdgcn_dispatch_ptr
);
233 CallInst
*DispatchPtr
= Builder
.CreateCall(DispatchPtrFn
, {});
234 DispatchPtr
->addAttribute(AttributeList::ReturnIndex
, Attribute::NoAlias
);
235 DispatchPtr
->addAttribute(AttributeList::ReturnIndex
, Attribute::NonNull
);
237 // Size of the dispatch packet struct.
238 DispatchPtr
->addDereferenceableAttr(AttributeList::ReturnIndex
, 64);
240 Type
*I32Ty
= Type::getInt32Ty(Mod
->getContext());
241 Value
*CastDispatchPtr
= Builder
.CreateBitCast(
242 DispatchPtr
, PointerType::get(I32Ty
, AMDGPUAS::CONSTANT_ADDRESS
));
244 // We could do a single 64-bit load here, but it's likely that the basic
245 // 32-bit and extract sequence is already present, and it is probably easier
246 // to CSE this. The loads should be mergable later anyway.
247 Value
*GEPXY
= Builder
.CreateConstInBoundsGEP1_64(I32Ty
, CastDispatchPtr
, 1);
248 LoadInst
*LoadXY
= Builder
.CreateAlignedLoad(I32Ty
, GEPXY
, 4);
250 Value
*GEPZU
= Builder
.CreateConstInBoundsGEP1_64(I32Ty
, CastDispatchPtr
, 2);
251 LoadInst
*LoadZU
= Builder
.CreateAlignedLoad(I32Ty
, GEPZU
, 4);
253 MDNode
*MD
= MDNode::get(Mod
->getContext(), None
);
254 LoadXY
->setMetadata(LLVMContext::MD_invariant_load
, MD
);
255 LoadZU
->setMetadata(LLVMContext::MD_invariant_load
, MD
);
256 ST
.makeLIDRangeMetadata(LoadZU
);
258 // Extract y component. Upper half of LoadZU should be zero already.
259 Value
*Y
= Builder
.CreateLShr(LoadXY
, 16);
261 return std::make_pair(Y
, LoadZU
);
264 Value
*AMDGPUPromoteAlloca::getWorkitemID(IRBuilder
<> &Builder
, unsigned N
) {
265 const AMDGPUSubtarget
&ST
=
266 AMDGPUSubtarget::get(*TM
, *Builder
.GetInsertBlock()->getParent());
267 Intrinsic::ID IntrID
= Intrinsic::ID::not_intrinsic
;
271 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_x
272 : Intrinsic::r600_read_tidig_x
;
275 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_y
276 : Intrinsic::r600_read_tidig_y
;
280 IntrID
= IsAMDGCN
? Intrinsic::amdgcn_workitem_id_z
281 : Intrinsic::r600_read_tidig_z
;
284 llvm_unreachable("invalid dimension");
287 Function
*WorkitemIdFn
= Intrinsic::getDeclaration(Mod
, IntrID
);
288 CallInst
*CI
= Builder
.CreateCall(WorkitemIdFn
);
289 ST
.makeLIDRangeMetadata(CI
);
294 static VectorType
*arrayTypeToVecType(ArrayType
*ArrayTy
) {
295 return VectorType::get(ArrayTy
->getElementType(),
296 ArrayTy
->getNumElements());
300 calculateVectorIndex(Value
*Ptr
,
301 const std::map
<GetElementPtrInst
*, Value
*> &GEPIdx
) {
302 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(Ptr
);
304 auto I
= GEPIdx
.find(GEP
);
305 return I
== GEPIdx
.end() ? nullptr : I
->second
;
308 static Value
* GEPToVectorIndex(GetElementPtrInst
*GEP
) {
309 // FIXME we only support simple cases
310 if (GEP
->getNumOperands() != 3)
313 ConstantInt
*I0
= dyn_cast
<ConstantInt
>(GEP
->getOperand(1));
314 if (!I0
|| !I0
->isZero())
317 return GEP
->getOperand(2);
320 // Not an instruction handled below to turn into a vector.
322 // TODO: Check isTriviallyVectorizable for calls and handle other
324 static bool canVectorizeInst(Instruction
*Inst
, User
*User
) {
325 switch (Inst
->getOpcode()) {
326 case Instruction::Load
: {
327 // Currently only handle the case where the Pointer Operand is a GEP.
328 // Also we could not vectorize volatile or atomic loads.
329 LoadInst
*LI
= cast
<LoadInst
>(Inst
);
330 if (isa
<AllocaInst
>(User
) &&
331 LI
->getPointerOperandType() == User
->getType() &&
332 isa
<VectorType
>(LI
->getType()))
334 return isa
<GetElementPtrInst
>(LI
->getPointerOperand()) && LI
->isSimple();
336 case Instruction::BitCast
:
338 case Instruction::Store
: {
339 // Must be the stored pointer operand, not a stored value, plus
340 // since it should be canonical form, the User should be a GEP.
341 // Also we could not vectorize volatile or atomic stores.
342 StoreInst
*SI
= cast
<StoreInst
>(Inst
);
343 if (isa
<AllocaInst
>(User
) &&
344 SI
->getPointerOperandType() == User
->getType() &&
345 isa
<VectorType
>(SI
->getValueOperand()->getType()))
347 return (SI
->getPointerOperand() == User
) && isa
<GetElementPtrInst
>(User
) && SI
->isSimple();
354 static bool tryPromoteAllocaToVector(AllocaInst
*Alloca
) {
356 if (DisablePromoteAllocaToVector
) {
357 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
361 Type
*AT
= Alloca
->getAllocatedType();
362 SequentialType
*AllocaTy
= dyn_cast
<SequentialType
>(AT
);
364 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
366 // FIXME: There is no reason why we can't support larger arrays, we
367 // are just being conservative for now.
368 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
369 // could also be promoted but we don't currently handle this case
371 AllocaTy
->getNumElements() > 16 ||
372 AllocaTy
->getNumElements() < 2 ||
373 !VectorType::isValidElementType(AllocaTy
->getElementType())) {
374 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
378 std::map
<GetElementPtrInst
*, Value
*> GEPVectorIdx
;
379 std::vector
<Value
*> WorkList
;
380 for (User
*AllocaUser
: Alloca
->users()) {
381 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(AllocaUser
);
383 if (!canVectorizeInst(cast
<Instruction
>(AllocaUser
), Alloca
))
386 WorkList
.push_back(AllocaUser
);
390 Value
*Index
= GEPToVectorIndex(GEP
);
392 // If we can't compute a vector index from this GEP, then we can't
393 // promote this alloca to vector.
395 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
400 GEPVectorIdx
[GEP
] = Index
;
401 for (User
*GEPUser
: AllocaUser
->users()) {
402 if (!canVectorizeInst(cast
<Instruction
>(GEPUser
), AllocaUser
))
405 WorkList
.push_back(GEPUser
);
409 VectorType
*VectorTy
= dyn_cast
<VectorType
>(AllocaTy
);
411 VectorTy
= arrayTypeToVecType(cast
<ArrayType
>(AllocaTy
));
413 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy
<< " -> "
414 << *VectorTy
<< '\n');
416 for (Value
*V
: WorkList
) {
417 Instruction
*Inst
= cast
<Instruction
>(V
);
418 IRBuilder
<> Builder(Inst
);
419 switch (Inst
->getOpcode()) {
420 case Instruction::Load
: {
421 if (Inst
->getType() == AT
)
424 Type
*VecPtrTy
= VectorTy
->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS
);
425 Value
*Ptr
= cast
<LoadInst
>(Inst
)->getPointerOperand();
426 Value
*Index
= calculateVectorIndex(Ptr
, GEPVectorIdx
);
428 Value
*BitCast
= Builder
.CreateBitCast(Alloca
, VecPtrTy
);
429 Value
*VecValue
= Builder
.CreateLoad(VectorTy
, BitCast
);
430 Value
*ExtractElement
= Builder
.CreateExtractElement(VecValue
, Index
);
431 Inst
->replaceAllUsesWith(ExtractElement
);
432 Inst
->eraseFromParent();
435 case Instruction::Store
: {
436 StoreInst
*SI
= cast
<StoreInst
>(Inst
);
437 if (SI
->getValueOperand()->getType() == AT
)
440 Type
*VecPtrTy
= VectorTy
->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS
);
441 Value
*Ptr
= SI
->getPointerOperand();
442 Value
*Index
= calculateVectorIndex(Ptr
, GEPVectorIdx
);
443 Value
*BitCast
= Builder
.CreateBitCast(Alloca
, VecPtrTy
);
444 Value
*VecValue
= Builder
.CreateLoad(VectorTy
, BitCast
);
445 Value
*NewVecValue
= Builder
.CreateInsertElement(VecValue
,
446 SI
->getValueOperand(),
448 Builder
.CreateStore(NewVecValue
, BitCast
);
449 Inst
->eraseFromParent();
452 case Instruction::BitCast
:
453 case Instruction::AddrSpaceCast
:
457 llvm_unreachable("Inconsistency in instructions promotable to vector");
463 static bool isCallPromotable(CallInst
*CI
) {
464 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
468 switch (II
->getIntrinsicID()) {
469 case Intrinsic::memcpy
:
470 case Intrinsic::memmove
:
471 case Intrinsic::memset
:
472 case Intrinsic::lifetime_start
:
473 case Intrinsic::lifetime_end
:
474 case Intrinsic::invariant_start
:
475 case Intrinsic::invariant_end
:
476 case Intrinsic::launder_invariant_group
:
477 case Intrinsic::strip_invariant_group
:
478 case Intrinsic::objectsize
:
485 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value
*BaseAlloca
,
490 // Figure out which operand is the one we might not be promoting.
491 Value
*OtherOp
= Inst
->getOperand(OpIdx0
);
493 OtherOp
= Inst
->getOperand(OpIdx1
);
495 if (isa
<ConstantPointerNull
>(OtherOp
))
498 Value
*OtherObj
= GetUnderlyingObject(OtherOp
, *DL
);
499 if (!isa
<AllocaInst
>(OtherObj
))
502 // TODO: We should be able to replace undefs with the right pointer type.
504 // TODO: If we know the other base object is another promotable
505 // alloca, not necessarily this alloca, we can do this. The
506 // important part is both must have the same address space at
508 if (OtherObj
!= BaseAlloca
) {
510 dbgs() << "Found a binary instruction with another alloca object\n");
517 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
520 std::vector
<Value
*> &WorkList
) const {
522 for (User
*User
: Val
->users()) {
523 if (is_contained(WorkList
, User
))
526 if (CallInst
*CI
= dyn_cast
<CallInst
>(User
)) {
527 if (!isCallPromotable(CI
))
530 WorkList
.push_back(User
);
534 Instruction
*UseInst
= cast
<Instruction
>(User
);
535 if (UseInst
->getOpcode() == Instruction::PtrToInt
)
538 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UseInst
)) {
539 if (LI
->isVolatile())
545 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UseInst
)) {
546 if (SI
->isVolatile())
549 // Reject if the stored value is not the pointer operand.
550 if (SI
->getPointerOperand() != Val
)
552 } else if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(UseInst
)) {
553 if (RMW
->isVolatile())
555 } else if (AtomicCmpXchgInst
*CAS
= dyn_cast
<AtomicCmpXchgInst
>(UseInst
)) {
556 if (CAS
->isVolatile())
560 // Only promote a select if we know that the other select operand
561 // is from another pointer that will also be promoted.
562 if (ICmpInst
*ICmp
= dyn_cast
<ICmpInst
>(UseInst
)) {
563 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, ICmp
, 0, 1))
566 // May need to rewrite constant operands.
567 WorkList
.push_back(ICmp
);
570 if (UseInst
->getOpcode() == Instruction::AddrSpaceCast
) {
571 // Give up if the pointer may be captured.
572 if (PointerMayBeCaptured(UseInst
, true, true))
574 // Don't collect the users of this.
575 WorkList
.push_back(User
);
579 if (!User
->getType()->isPointerTy())
582 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(UseInst
)) {
583 // Be conservative if an address could be computed outside the bounds of
585 if (!GEP
->isInBounds())
589 // Only promote a select if we know that the other select operand is from
590 // another pointer that will also be promoted.
591 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(UseInst
)) {
592 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, SI
, 1, 2))
597 if (PHINode
*Phi
= dyn_cast
<PHINode
>(UseInst
)) {
598 // TODO: Handle more complex cases. We should be able to replace loops
600 switch (Phi
->getNumIncomingValues()) {
604 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca
, Val
, Phi
, 0, 1))
612 WorkList
.push_back(User
);
613 if (!collectUsesWithPtrTypes(BaseAlloca
, User
, WorkList
))
620 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function
&F
) {
622 FunctionType
*FTy
= F
.getFunctionType();
623 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, F
);
625 // If the function has any arguments in the local address space, then it's
626 // possible these arguments require the entire local memory space, so
627 // we cannot use local memory in the pass.
628 for (Type
*ParamTy
: FTy
->params()) {
629 PointerType
*PtrTy
= dyn_cast
<PointerType
>(ParamTy
);
630 if (PtrTy
&& PtrTy
->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS
) {
632 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
633 "local memory disabled.\n");
638 LocalMemLimit
= ST
.getLocalMemorySize();
639 if (LocalMemLimit
== 0)
642 const DataLayout
&DL
= Mod
->getDataLayout();
644 // Check how much local memory is being used by global objects
645 CurrentLocalMemUsage
= 0;
646 for (GlobalVariable
&GV
: Mod
->globals()) {
647 if (GV
.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS
)
650 for (const User
*U
: GV
.users()) {
651 const Instruction
*Use
= dyn_cast
<Instruction
>(U
);
655 if (Use
->getParent()->getParent() == &F
) {
656 unsigned Align
= GV
.getAlignment();
658 Align
= DL
.getABITypeAlignment(GV
.getValueType());
660 // FIXME: Try to account for padding here. The padding is currently
661 // determined from the inverse order of uses in the function. I'm not
662 // sure if the use list order is in any way connected to this, so the
663 // total reported size is likely incorrect.
664 uint64_t AllocSize
= DL
.getTypeAllocSize(GV
.getValueType());
665 CurrentLocalMemUsage
= alignTo(CurrentLocalMemUsage
, Align
);
666 CurrentLocalMemUsage
+= AllocSize
;
672 unsigned MaxOccupancy
= ST
.getOccupancyWithLocalMemSize(CurrentLocalMemUsage
,
675 // Restrict local memory usage so that we don't drastically reduce occupancy,
676 // unless it is already significantly reduced.
678 // TODO: Have some sort of hint or other heuristics to guess occupancy based
679 // on other factors..
680 unsigned OccupancyHint
= ST
.getWavesPerEU(F
).second
;
681 if (OccupancyHint
== 0)
684 // Clamp to max value.
685 OccupancyHint
= std::min(OccupancyHint
, ST
.getMaxWavesPerEU());
687 // Check the hint but ignore it if it's obviously wrong from the existing LDS
689 MaxOccupancy
= std::min(OccupancyHint
, MaxOccupancy
);
692 // Round up to the next tier of usage.
693 unsigned MaxSizeWithWaveCount
694 = ST
.getMaxLocalMemSizeWithWaveCount(MaxOccupancy
, F
);
696 // Program is possibly broken by using more local mem than available.
697 if (CurrentLocalMemUsage
> MaxSizeWithWaveCount
)
700 LocalMemLimit
= MaxSizeWithWaveCount
;
702 LLVM_DEBUG(dbgs() << F
.getName() << " uses " << CurrentLocalMemUsage
704 << " Rounding size to " << MaxSizeWithWaveCount
705 << " with a maximum occupancy of " << MaxOccupancy
<< '\n'
706 << " and " << (LocalMemLimit
- CurrentLocalMemUsage
)
707 << " available for promotion\n");
712 // FIXME: Should try to pick the most likely to be profitable allocas first.
713 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst
&I
, bool SufficientLDS
) {
714 // Array allocations are probably not worth handling, since an allocation of
715 // the array type is the canonical form.
716 if (!I
.isStaticAlloca() || I
.isArrayAllocation())
719 IRBuilder
<> Builder(&I
);
721 // First try to replace the alloca with a vector
722 Type
*AllocaTy
= I
.getAllocatedType();
724 LLVM_DEBUG(dbgs() << "Trying to promote " << I
<< '\n');
726 if (tryPromoteAllocaToVector(&I
))
727 return true; // Promoted to vector.
729 if (DisablePromoteAllocaToLDS
)
732 const Function
&ContainingFunction
= *I
.getParent()->getParent();
733 CallingConv::ID CC
= ContainingFunction
.getCallingConv();
735 // Don't promote the alloca to LDS for shader calling conventions as the work
736 // item ID intrinsics are not supported for these calling conventions.
737 // Furthermore not all LDS is available for some of the stages.
739 case CallingConv::AMDGPU_KERNEL
:
740 case CallingConv::SPIR_KERNEL
:
745 << " promote alloca to LDS not supported with calling convention.\n");
749 // Not likely to have sufficient local memory for promotion.
753 const AMDGPUSubtarget
&ST
= AMDGPUSubtarget::get(*TM
, ContainingFunction
);
754 unsigned WorkGroupSize
= ST
.getFlatWorkGroupSizes(ContainingFunction
).second
;
756 const DataLayout
&DL
= Mod
->getDataLayout();
758 unsigned Align
= I
.getAlignment();
760 Align
= DL
.getABITypeAlignment(I
.getAllocatedType());
762 // FIXME: This computed padding is likely wrong since it depends on inverse
765 // FIXME: It is also possible that if we're allowed to use all of the memory
766 // could could end up using more than the maximum due to alignment padding.
768 uint32_t NewSize
= alignTo(CurrentLocalMemUsage
, Align
);
769 uint32_t AllocSize
= WorkGroupSize
* DL
.getTypeAllocSize(AllocaTy
);
770 NewSize
+= AllocSize
;
772 if (NewSize
> LocalMemLimit
) {
773 LLVM_DEBUG(dbgs() << " " << AllocSize
774 << " bytes of local memory not available to promote\n");
778 CurrentLocalMemUsage
= NewSize
;
780 std::vector
<Value
*> WorkList
;
782 if (!collectUsesWithPtrTypes(&I
, &I
, WorkList
)) {
783 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
787 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
789 Function
*F
= I
.getParent()->getParent();
791 Type
*GVTy
= ArrayType::get(I
.getAllocatedType(), WorkGroupSize
);
792 GlobalVariable
*GV
= new GlobalVariable(
793 *Mod
, GVTy
, false, GlobalValue::InternalLinkage
,
794 UndefValue::get(GVTy
),
795 Twine(F
->getName()) + Twine('.') + I
.getName(),
797 GlobalVariable::NotThreadLocal
,
798 AMDGPUAS::LOCAL_ADDRESS
);
799 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
800 GV
->setAlignment(I
.getAlignment());
802 Value
*TCntY
, *TCntZ
;
804 std::tie(TCntY
, TCntZ
) = getLocalSizeYZ(Builder
);
805 Value
*TIdX
= getWorkitemID(Builder
, 0);
806 Value
*TIdY
= getWorkitemID(Builder
, 1);
807 Value
*TIdZ
= getWorkitemID(Builder
, 2);
809 Value
*Tmp0
= Builder
.CreateMul(TCntY
, TCntZ
, "", true, true);
810 Tmp0
= Builder
.CreateMul(Tmp0
, TIdX
);
811 Value
*Tmp1
= Builder
.CreateMul(TIdY
, TCntZ
, "", true, true);
812 Value
*TID
= Builder
.CreateAdd(Tmp0
, Tmp1
);
813 TID
= Builder
.CreateAdd(TID
, TIdZ
);
816 Constant::getNullValue(Type::getInt32Ty(Mod
->getContext())),
820 Value
*Offset
= Builder
.CreateInBoundsGEP(GVTy
, GV
, Indices
);
821 I
.mutateType(Offset
->getType());
822 I
.replaceAllUsesWith(Offset
);
825 for (Value
*V
: WorkList
) {
826 CallInst
*Call
= dyn_cast
<CallInst
>(V
);
828 if (ICmpInst
*CI
= dyn_cast
<ICmpInst
>(V
)) {
829 Value
*Src0
= CI
->getOperand(0);
830 Type
*EltTy
= Src0
->getType()->getPointerElementType();
831 PointerType
*NewTy
= PointerType::get(EltTy
, AMDGPUAS::LOCAL_ADDRESS
);
833 if (isa
<ConstantPointerNull
>(CI
->getOperand(0)))
834 CI
->setOperand(0, ConstantPointerNull::get(NewTy
));
836 if (isa
<ConstantPointerNull
>(CI
->getOperand(1)))
837 CI
->setOperand(1, ConstantPointerNull::get(NewTy
));
842 // The operand's value should be corrected on its own and we don't want to
844 if (isa
<AddrSpaceCastInst
>(V
))
847 Type
*EltTy
= V
->getType()->getPointerElementType();
848 PointerType
*NewTy
= PointerType::get(EltTy
, AMDGPUAS::LOCAL_ADDRESS
);
850 // FIXME: It doesn't really make sense to try to do this for all
852 V
->mutateType(NewTy
);
854 // Adjust the types of any constant operands.
855 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
856 if (isa
<ConstantPointerNull
>(SI
->getOperand(1)))
857 SI
->setOperand(1, ConstantPointerNull::get(NewTy
));
859 if (isa
<ConstantPointerNull
>(SI
->getOperand(2)))
860 SI
->setOperand(2, ConstantPointerNull::get(NewTy
));
861 } else if (PHINode
*Phi
= dyn_cast
<PHINode
>(V
)) {
862 for (unsigned I
= 0, E
= Phi
->getNumIncomingValues(); I
!= E
; ++I
) {
863 if (isa
<ConstantPointerNull
>(Phi
->getIncomingValue(I
)))
864 Phi
->setIncomingValue(I
, ConstantPointerNull::get(NewTy
));
871 IntrinsicInst
*Intr
= cast
<IntrinsicInst
>(Call
);
872 Builder
.SetInsertPoint(Intr
);
873 switch (Intr
->getIntrinsicID()) {
874 case Intrinsic::lifetime_start
:
875 case Intrinsic::lifetime_end
:
876 // These intrinsics are for address space 0 only
877 Intr
->eraseFromParent();
879 case Intrinsic::memcpy
: {
880 MemCpyInst
*MemCpy
= cast
<MemCpyInst
>(Intr
);
881 Builder
.CreateMemCpy(MemCpy
->getRawDest(), MemCpy
->getDestAlignment(),
882 MemCpy
->getRawSource(), MemCpy
->getSourceAlignment(),
883 MemCpy
->getLength(), MemCpy
->isVolatile());
884 Intr
->eraseFromParent();
887 case Intrinsic::memmove
: {
888 MemMoveInst
*MemMove
= cast
<MemMoveInst
>(Intr
);
889 Builder
.CreateMemMove(MemMove
->getRawDest(), MemMove
->getDestAlignment(),
890 MemMove
->getRawSource(), MemMove
->getSourceAlignment(),
891 MemMove
->getLength(), MemMove
->isVolatile());
892 Intr
->eraseFromParent();
895 case Intrinsic::memset
: {
896 MemSetInst
*MemSet
= cast
<MemSetInst
>(Intr
);
897 Builder
.CreateMemSet(MemSet
->getRawDest(), MemSet
->getValue(),
898 MemSet
->getLength(), MemSet
->getDestAlignment(),
899 MemSet
->isVolatile());
900 Intr
->eraseFromParent();
903 case Intrinsic::invariant_start
:
904 case Intrinsic::invariant_end
:
905 case Intrinsic::launder_invariant_group
:
906 case Intrinsic::strip_invariant_group
:
907 Intr
->eraseFromParent();
908 // FIXME: I think the invariant marker should still theoretically apply,
909 // but the intrinsics need to be changed to accept pointers with any
912 case Intrinsic::objectsize
: {
913 Value
*Src
= Intr
->getOperand(0);
914 Type
*SrcTy
= Src
->getType()->getPointerElementType();
915 Function
*ObjectSize
= Intrinsic::getDeclaration(Mod
,
916 Intrinsic::objectsize
,
917 { Intr
->getType(), PointerType::get(SrcTy
, AMDGPUAS::LOCAL_ADDRESS
) }
920 CallInst
*NewCall
= Builder
.CreateCall(
922 {Src
, Intr
->getOperand(1), Intr
->getOperand(2), Intr
->getOperand(3)});
923 Intr
->replaceAllUsesWith(NewCall
);
924 Intr
->eraseFromParent();
929 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
935 FunctionPass
*llvm::createAMDGPUPromoteAlloca() {
936 return new AMDGPUPromoteAlloca();