Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / AMDGPU / AMDGPUPromoteAlloca.cpp
bloba2d62c73404c1672380bb79fb3d8e5a298a3b60f
1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates allocas by either converting them into vectors or
10 // by migrating them to local address space.
12 //===----------------------------------------------------------------------===//
14 #include "AMDGPU.h"
15 #include "AMDGPUSubtarget.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Metadata.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/User.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/MathExtras.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include "llvm/Target/TargetMachine.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <cstdint>
56 #include <map>
57 #include <tuple>
58 #include <utility>
59 #include <vector>
61 #define DEBUG_TYPE "amdgpu-promote-alloca"
63 using namespace llvm;
65 namespace {
67 static cl::opt<bool> DisablePromoteAllocaToVector(
68 "disable-promote-alloca-to-vector",
69 cl::desc("Disable promote alloca to vector"),
70 cl::init(false));
72 static cl::opt<bool> DisablePromoteAllocaToLDS(
73 "disable-promote-alloca-to-lds",
74 cl::desc("Disable promote alloca to LDS"),
75 cl::init(false));
77 // FIXME: This can create globals so should be a module pass.
78 class AMDGPUPromoteAlloca : public FunctionPass {
79 private:
80 const TargetMachine *TM;
81 Module *Mod = nullptr;
82 const DataLayout *DL = nullptr;
84 // FIXME: This should be per-kernel.
85 uint32_t LocalMemLimit = 0;
86 uint32_t CurrentLocalMemUsage = 0;
88 bool IsAMDGCN = false;
89 bool IsAMDHSA = false;
91 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
92 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
94 /// BaseAlloca is the alloca root the search started from.
95 /// Val may be that alloca or a recursive user of it.
96 bool collectUsesWithPtrTypes(Value *BaseAlloca,
97 Value *Val,
98 std::vector<Value*> &WorkList) const;
100 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
101 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
102 /// Returns true if both operands are derived from the same alloca. Val should
103 /// be the same value as one of the input operands of UseInst.
104 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
105 Instruction *UseInst,
106 int OpIdx0, int OpIdx1) const;
108 /// Check whether we have enough local memory for promotion.
109 bool hasSufficientLocalMem(const Function &F);
111 public:
112 static char ID;
114 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
116 bool doInitialization(Module &M) override;
117 bool runOnFunction(Function &F) override;
119 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
121 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
123 void getAnalysisUsage(AnalysisUsage &AU) const override {
124 AU.setPreservesCFG();
125 FunctionPass::getAnalysisUsage(AU);
129 } // end anonymous namespace
131 char AMDGPUPromoteAlloca::ID = 0;
133 INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
134 "AMDGPU promote alloca to vector or LDS", false, false)
136 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
138 bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
139 Mod = &M;
140 DL = &Mod->getDataLayout();
142 return false;
145 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146 if (skipFunction(F))
147 return false;
149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
150 TM = &TPC->getTM<TargetMachine>();
151 else
152 return false;
154 const Triple &TT = TM->getTargetTriple();
155 IsAMDGCN = TT.getArch() == Triple::amdgcn;
156 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
158 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
159 if (!ST.isPromoteAllocaEnabled())
160 return false;
162 bool SufficientLDS = hasSufficientLocalMem(F);
163 bool Changed = false;
164 BasicBlock &EntryBB = *F.begin();
165 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
166 AllocaInst *AI = dyn_cast<AllocaInst>(I);
168 ++I;
169 if (AI)
170 Changed |= handleAlloca(*AI, SufficientLDS);
173 return Changed;
176 std::pair<Value *, Value *>
177 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
178 const Function &F = *Builder.GetInsertBlock()->getParent();
179 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
181 if (!IsAMDHSA) {
182 Function *LocalSizeYFn
183 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
184 Function *LocalSizeZFn
185 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
187 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
188 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
190 ST.makeLIDRangeMetadata(LocalSizeY);
191 ST.makeLIDRangeMetadata(LocalSizeZ);
193 return std::make_pair(LocalSizeY, LocalSizeZ);
196 // We must read the size out of the dispatch pointer.
197 assert(IsAMDGCN);
199 // We are indexing into this struct, and want to extract the workgroup_size_*
200 // fields.
202 // typedef struct hsa_kernel_dispatch_packet_s {
203 // uint16_t header;
204 // uint16_t setup;
205 // uint16_t workgroup_size_x ;
206 // uint16_t workgroup_size_y;
207 // uint16_t workgroup_size_z;
208 // uint16_t reserved0;
209 // uint32_t grid_size_x ;
210 // uint32_t grid_size_y ;
211 // uint32_t grid_size_z;
213 // uint32_t private_segment_size;
214 // uint32_t group_segment_size;
215 // uint64_t kernel_object;
217 // #ifdef HSA_LARGE_MODEL
218 // void *kernarg_address;
219 // #elif defined HSA_LITTLE_ENDIAN
220 // void *kernarg_address;
221 // uint32_t reserved1;
222 // #else
223 // uint32_t reserved1;
224 // void *kernarg_address;
225 // #endif
226 // uint64_t reserved2;
227 // hsa_signal_t completion_signal; // uint64_t wrapper
228 // } hsa_kernel_dispatch_packet_t
230 Function *DispatchPtrFn
231 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
233 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
234 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
235 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
237 // Size of the dispatch packet struct.
238 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
240 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
241 Value *CastDispatchPtr = Builder.CreateBitCast(
242 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
244 // We could do a single 64-bit load here, but it's likely that the basic
245 // 32-bit and extract sequence is already present, and it is probably easier
246 // to CSE this. The loads should be mergable later anyway.
247 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
248 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
250 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
251 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
253 MDNode *MD = MDNode::get(Mod->getContext(), None);
254 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
255 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
256 ST.makeLIDRangeMetadata(LoadZU);
258 // Extract y component. Upper half of LoadZU should be zero already.
259 Value *Y = Builder.CreateLShr(LoadXY, 16);
261 return std::make_pair(Y, LoadZU);
264 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
265 const AMDGPUSubtarget &ST =
266 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
267 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
269 switch (N) {
270 case 0:
271 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
272 : Intrinsic::r600_read_tidig_x;
273 break;
274 case 1:
275 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
276 : Intrinsic::r600_read_tidig_y;
277 break;
279 case 2:
280 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
281 : Intrinsic::r600_read_tidig_z;
282 break;
283 default:
284 llvm_unreachable("invalid dimension");
287 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
288 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
289 ST.makeLIDRangeMetadata(CI);
291 return CI;
294 static VectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
295 return VectorType::get(ArrayTy->getElementType(),
296 ArrayTy->getNumElements());
299 static Value *
300 calculateVectorIndex(Value *Ptr,
301 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
302 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
304 auto I = GEPIdx.find(GEP);
305 return I == GEPIdx.end() ? nullptr : I->second;
308 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
309 // FIXME we only support simple cases
310 if (GEP->getNumOperands() != 3)
311 return nullptr;
313 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
314 if (!I0 || !I0->isZero())
315 return nullptr;
317 return GEP->getOperand(2);
320 // Not an instruction handled below to turn into a vector.
322 // TODO: Check isTriviallyVectorizable for calls and handle other
323 // instructions.
324 static bool canVectorizeInst(Instruction *Inst, User *User) {
325 switch (Inst->getOpcode()) {
326 case Instruction::Load: {
327 // Currently only handle the case where the Pointer Operand is a GEP.
328 // Also we could not vectorize volatile or atomic loads.
329 LoadInst *LI = cast<LoadInst>(Inst);
330 if (isa<AllocaInst>(User) &&
331 LI->getPointerOperandType() == User->getType() &&
332 isa<VectorType>(LI->getType()))
333 return true;
334 return isa<GetElementPtrInst>(LI->getPointerOperand()) && LI->isSimple();
336 case Instruction::BitCast:
337 return true;
338 case Instruction::Store: {
339 // Must be the stored pointer operand, not a stored value, plus
340 // since it should be canonical form, the User should be a GEP.
341 // Also we could not vectorize volatile or atomic stores.
342 StoreInst *SI = cast<StoreInst>(Inst);
343 if (isa<AllocaInst>(User) &&
344 SI->getPointerOperandType() == User->getType() &&
345 isa<VectorType>(SI->getValueOperand()->getType()))
346 return true;
347 return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && SI->isSimple();
349 default:
350 return false;
354 static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
356 if (DisablePromoteAllocaToVector) {
357 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
358 return false;
361 Type *AT = Alloca->getAllocatedType();
362 SequentialType *AllocaTy = dyn_cast<SequentialType>(AT);
364 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
366 // FIXME: There is no reason why we can't support larger arrays, we
367 // are just being conservative for now.
368 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
369 // could also be promoted but we don't currently handle this case
370 if (!AllocaTy ||
371 AllocaTy->getNumElements() > 16 ||
372 AllocaTy->getNumElements() < 2 ||
373 !VectorType::isValidElementType(AllocaTy->getElementType())) {
374 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
375 return false;
378 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
379 std::vector<Value*> WorkList;
380 for (User *AllocaUser : Alloca->users()) {
381 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
382 if (!GEP) {
383 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
384 return false;
386 WorkList.push_back(AllocaUser);
387 continue;
390 Value *Index = GEPToVectorIndex(GEP);
392 // If we can't compute a vector index from this GEP, then we can't
393 // promote this alloca to vector.
394 if (!Index) {
395 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
396 << '\n');
397 return false;
400 GEPVectorIdx[GEP] = Index;
401 for (User *GEPUser : AllocaUser->users()) {
402 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
403 return false;
405 WorkList.push_back(GEPUser);
409 VectorType *VectorTy = dyn_cast<VectorType>(AllocaTy);
410 if (!VectorTy)
411 VectorTy = arrayTypeToVecType(cast<ArrayType>(AllocaTy));
413 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
414 << *VectorTy << '\n');
416 for (Value *V : WorkList) {
417 Instruction *Inst = cast<Instruction>(V);
418 IRBuilder<> Builder(Inst);
419 switch (Inst->getOpcode()) {
420 case Instruction::Load: {
421 if (Inst->getType() == AT)
422 break;
424 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
425 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
426 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
428 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
429 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
430 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
431 Inst->replaceAllUsesWith(ExtractElement);
432 Inst->eraseFromParent();
433 break;
435 case Instruction::Store: {
436 StoreInst *SI = cast<StoreInst>(Inst);
437 if (SI->getValueOperand()->getType() == AT)
438 break;
440 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
441 Value *Ptr = SI->getPointerOperand();
442 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
443 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
444 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
445 Value *NewVecValue = Builder.CreateInsertElement(VecValue,
446 SI->getValueOperand(),
447 Index);
448 Builder.CreateStore(NewVecValue, BitCast);
449 Inst->eraseFromParent();
450 break;
452 case Instruction::BitCast:
453 case Instruction::AddrSpaceCast:
454 break;
456 default:
457 llvm_unreachable("Inconsistency in instructions promotable to vector");
460 return true;
463 static bool isCallPromotable(CallInst *CI) {
464 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
465 if (!II)
466 return false;
468 switch (II->getIntrinsicID()) {
469 case Intrinsic::memcpy:
470 case Intrinsic::memmove:
471 case Intrinsic::memset:
472 case Intrinsic::lifetime_start:
473 case Intrinsic::lifetime_end:
474 case Intrinsic::invariant_start:
475 case Intrinsic::invariant_end:
476 case Intrinsic::launder_invariant_group:
477 case Intrinsic::strip_invariant_group:
478 case Intrinsic::objectsize:
479 return true;
480 default:
481 return false;
485 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
486 Value *Val,
487 Instruction *Inst,
488 int OpIdx0,
489 int OpIdx1) const {
490 // Figure out which operand is the one we might not be promoting.
491 Value *OtherOp = Inst->getOperand(OpIdx0);
492 if (Val == OtherOp)
493 OtherOp = Inst->getOperand(OpIdx1);
495 if (isa<ConstantPointerNull>(OtherOp))
496 return true;
498 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
499 if (!isa<AllocaInst>(OtherObj))
500 return false;
502 // TODO: We should be able to replace undefs with the right pointer type.
504 // TODO: If we know the other base object is another promotable
505 // alloca, not necessarily this alloca, we can do this. The
506 // important part is both must have the same address space at
507 // the end.
508 if (OtherObj != BaseAlloca) {
509 LLVM_DEBUG(
510 dbgs() << "Found a binary instruction with another alloca object\n");
511 return false;
514 return true;
517 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
518 Value *BaseAlloca,
519 Value *Val,
520 std::vector<Value*> &WorkList) const {
522 for (User *User : Val->users()) {
523 if (is_contained(WorkList, User))
524 continue;
526 if (CallInst *CI = dyn_cast<CallInst>(User)) {
527 if (!isCallPromotable(CI))
528 return false;
530 WorkList.push_back(User);
531 continue;
534 Instruction *UseInst = cast<Instruction>(User);
535 if (UseInst->getOpcode() == Instruction::PtrToInt)
536 return false;
538 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
539 if (LI->isVolatile())
540 return false;
542 continue;
545 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
546 if (SI->isVolatile())
547 return false;
549 // Reject if the stored value is not the pointer operand.
550 if (SI->getPointerOperand() != Val)
551 return false;
552 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
553 if (RMW->isVolatile())
554 return false;
555 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
556 if (CAS->isVolatile())
557 return false;
560 // Only promote a select if we know that the other select operand
561 // is from another pointer that will also be promoted.
562 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
563 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
564 return false;
566 // May need to rewrite constant operands.
567 WorkList.push_back(ICmp);
570 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
571 // Give up if the pointer may be captured.
572 if (PointerMayBeCaptured(UseInst, true, true))
573 return false;
574 // Don't collect the users of this.
575 WorkList.push_back(User);
576 continue;
579 if (!User->getType()->isPointerTy())
580 continue;
582 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
583 // Be conservative if an address could be computed outside the bounds of
584 // the alloca.
585 if (!GEP->isInBounds())
586 return false;
589 // Only promote a select if we know that the other select operand is from
590 // another pointer that will also be promoted.
591 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
592 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
593 return false;
596 // Repeat for phis.
597 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
598 // TODO: Handle more complex cases. We should be able to replace loops
599 // over arrays.
600 switch (Phi->getNumIncomingValues()) {
601 case 1:
602 break;
603 case 2:
604 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
605 return false;
606 break;
607 default:
608 return false;
612 WorkList.push_back(User);
613 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
614 return false;
617 return true;
620 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
622 FunctionType *FTy = F.getFunctionType();
623 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
625 // If the function has any arguments in the local address space, then it's
626 // possible these arguments require the entire local memory space, so
627 // we cannot use local memory in the pass.
628 for (Type *ParamTy : FTy->params()) {
629 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
630 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
631 LocalMemLimit = 0;
632 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
633 "local memory disabled.\n");
634 return false;
638 LocalMemLimit = ST.getLocalMemorySize();
639 if (LocalMemLimit == 0)
640 return false;
642 const DataLayout &DL = Mod->getDataLayout();
644 // Check how much local memory is being used by global objects
645 CurrentLocalMemUsage = 0;
646 for (GlobalVariable &GV : Mod->globals()) {
647 if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
648 continue;
650 for (const User *U : GV.users()) {
651 const Instruction *Use = dyn_cast<Instruction>(U);
652 if (!Use)
653 continue;
655 if (Use->getParent()->getParent() == &F) {
656 unsigned Align = GV.getAlignment();
657 if (Align == 0)
658 Align = DL.getABITypeAlignment(GV.getValueType());
660 // FIXME: Try to account for padding here. The padding is currently
661 // determined from the inverse order of uses in the function. I'm not
662 // sure if the use list order is in any way connected to this, so the
663 // total reported size is likely incorrect.
664 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
665 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
666 CurrentLocalMemUsage += AllocSize;
667 break;
672 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
675 // Restrict local memory usage so that we don't drastically reduce occupancy,
676 // unless it is already significantly reduced.
678 // TODO: Have some sort of hint or other heuristics to guess occupancy based
679 // on other factors..
680 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
681 if (OccupancyHint == 0)
682 OccupancyHint = 7;
684 // Clamp to max value.
685 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
687 // Check the hint but ignore it if it's obviously wrong from the existing LDS
688 // usage.
689 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
692 // Round up to the next tier of usage.
693 unsigned MaxSizeWithWaveCount
694 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
696 // Program is possibly broken by using more local mem than available.
697 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
698 return false;
700 LocalMemLimit = MaxSizeWithWaveCount;
702 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
703 << " bytes of LDS\n"
704 << " Rounding size to " << MaxSizeWithWaveCount
705 << " with a maximum occupancy of " << MaxOccupancy << '\n'
706 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
707 << " available for promotion\n");
709 return true;
712 // FIXME: Should try to pick the most likely to be profitable allocas first.
713 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
714 // Array allocations are probably not worth handling, since an allocation of
715 // the array type is the canonical form.
716 if (!I.isStaticAlloca() || I.isArrayAllocation())
717 return false;
719 IRBuilder<> Builder(&I);
721 // First try to replace the alloca with a vector
722 Type *AllocaTy = I.getAllocatedType();
724 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
726 if (tryPromoteAllocaToVector(&I))
727 return true; // Promoted to vector.
729 if (DisablePromoteAllocaToLDS)
730 return false;
732 const Function &ContainingFunction = *I.getParent()->getParent();
733 CallingConv::ID CC = ContainingFunction.getCallingConv();
735 // Don't promote the alloca to LDS for shader calling conventions as the work
736 // item ID intrinsics are not supported for these calling conventions.
737 // Furthermore not all LDS is available for some of the stages.
738 switch (CC) {
739 case CallingConv::AMDGPU_KERNEL:
740 case CallingConv::SPIR_KERNEL:
741 break;
742 default:
743 LLVM_DEBUG(
744 dbgs()
745 << " promote alloca to LDS not supported with calling convention.\n");
746 return false;
749 // Not likely to have sufficient local memory for promotion.
750 if (!SufficientLDS)
751 return false;
753 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
754 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
756 const DataLayout &DL = Mod->getDataLayout();
758 unsigned Align = I.getAlignment();
759 if (Align == 0)
760 Align = DL.getABITypeAlignment(I.getAllocatedType());
762 // FIXME: This computed padding is likely wrong since it depends on inverse
763 // usage order.
765 // FIXME: It is also possible that if we're allowed to use all of the memory
766 // could could end up using more than the maximum due to alignment padding.
768 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
769 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
770 NewSize += AllocSize;
772 if (NewSize > LocalMemLimit) {
773 LLVM_DEBUG(dbgs() << " " << AllocSize
774 << " bytes of local memory not available to promote\n");
775 return false;
778 CurrentLocalMemUsage = NewSize;
780 std::vector<Value*> WorkList;
782 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
783 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
784 return false;
787 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
789 Function *F = I.getParent()->getParent();
791 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
792 GlobalVariable *GV = new GlobalVariable(
793 *Mod, GVTy, false, GlobalValue::InternalLinkage,
794 UndefValue::get(GVTy),
795 Twine(F->getName()) + Twine('.') + I.getName(),
796 nullptr,
797 GlobalVariable::NotThreadLocal,
798 AMDGPUAS::LOCAL_ADDRESS);
799 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
800 GV->setAlignment(I.getAlignment());
802 Value *TCntY, *TCntZ;
804 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
805 Value *TIdX = getWorkitemID(Builder, 0);
806 Value *TIdY = getWorkitemID(Builder, 1);
807 Value *TIdZ = getWorkitemID(Builder, 2);
809 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
810 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
811 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
812 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
813 TID = Builder.CreateAdd(TID, TIdZ);
815 Value *Indices[] = {
816 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
820 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
821 I.mutateType(Offset->getType());
822 I.replaceAllUsesWith(Offset);
823 I.eraseFromParent();
825 for (Value *V : WorkList) {
826 CallInst *Call = dyn_cast<CallInst>(V);
827 if (!Call) {
828 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
829 Value *Src0 = CI->getOperand(0);
830 Type *EltTy = Src0->getType()->getPointerElementType();
831 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
833 if (isa<ConstantPointerNull>(CI->getOperand(0)))
834 CI->setOperand(0, ConstantPointerNull::get(NewTy));
836 if (isa<ConstantPointerNull>(CI->getOperand(1)))
837 CI->setOperand(1, ConstantPointerNull::get(NewTy));
839 continue;
842 // The operand's value should be corrected on its own and we don't want to
843 // touch the users.
844 if (isa<AddrSpaceCastInst>(V))
845 continue;
847 Type *EltTy = V->getType()->getPointerElementType();
848 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
850 // FIXME: It doesn't really make sense to try to do this for all
851 // instructions.
852 V->mutateType(NewTy);
854 // Adjust the types of any constant operands.
855 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
856 if (isa<ConstantPointerNull>(SI->getOperand(1)))
857 SI->setOperand(1, ConstantPointerNull::get(NewTy));
859 if (isa<ConstantPointerNull>(SI->getOperand(2)))
860 SI->setOperand(2, ConstantPointerNull::get(NewTy));
861 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
862 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
863 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
864 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
868 continue;
871 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
872 Builder.SetInsertPoint(Intr);
873 switch (Intr->getIntrinsicID()) {
874 case Intrinsic::lifetime_start:
875 case Intrinsic::lifetime_end:
876 // These intrinsics are for address space 0 only
877 Intr->eraseFromParent();
878 continue;
879 case Intrinsic::memcpy: {
880 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
881 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(),
882 MemCpy->getRawSource(), MemCpy->getSourceAlignment(),
883 MemCpy->getLength(), MemCpy->isVolatile());
884 Intr->eraseFromParent();
885 continue;
887 case Intrinsic::memmove: {
888 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
889 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(),
890 MemMove->getRawSource(), MemMove->getSourceAlignment(),
891 MemMove->getLength(), MemMove->isVolatile());
892 Intr->eraseFromParent();
893 continue;
895 case Intrinsic::memset: {
896 MemSetInst *MemSet = cast<MemSetInst>(Intr);
897 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
898 MemSet->getLength(), MemSet->getDestAlignment(),
899 MemSet->isVolatile());
900 Intr->eraseFromParent();
901 continue;
903 case Intrinsic::invariant_start:
904 case Intrinsic::invariant_end:
905 case Intrinsic::launder_invariant_group:
906 case Intrinsic::strip_invariant_group:
907 Intr->eraseFromParent();
908 // FIXME: I think the invariant marker should still theoretically apply,
909 // but the intrinsics need to be changed to accept pointers with any
910 // address space.
911 continue;
912 case Intrinsic::objectsize: {
913 Value *Src = Intr->getOperand(0);
914 Type *SrcTy = Src->getType()->getPointerElementType();
915 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
916 Intrinsic::objectsize,
917 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
920 CallInst *NewCall = Builder.CreateCall(
921 ObjectSize,
922 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
923 Intr->replaceAllUsesWith(NewCall);
924 Intr->eraseFromParent();
925 continue;
927 default:
928 Intr->print(errs());
929 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
932 return true;
935 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
936 return new AMDGPUPromoteAlloca();