1 //===- AMDGPUPerfHintAnalysis.cpp - analysis of functions memory traffic --===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// \brief Analyzes if a function potentially memory bound and if a kernel
11 /// kernel may benefit from limiting number of waves to reduce cache thrashing.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUPerfHintAnalysis.h"
17 #include "Utils/AMDGPUBaseInfo.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/CallGraph.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/IR/ValueMap.h"
30 #include "llvm/Support/CommandLine.h"
34 #define DEBUG_TYPE "amdgpu-perf-hint"
36 static cl::opt
<unsigned>
37 MemBoundThresh("amdgpu-membound-threshold", cl::init(50), cl::Hidden
,
38 cl::desc("Function mem bound threshold in %"));
40 static cl::opt
<unsigned>
41 LimitWaveThresh("amdgpu-limit-wave-threshold", cl::init(50), cl::Hidden
,
42 cl::desc("Kernel limit wave threshold in %"));
44 static cl::opt
<unsigned>
45 IAWeight("amdgpu-indirect-access-weight", cl::init(1000), cl::Hidden
,
46 cl::desc("Indirect access memory instruction weight"));
48 static cl::opt
<unsigned>
49 LSWeight("amdgpu-large-stride-weight", cl::init(1000), cl::Hidden
,
50 cl::desc("Large stride memory access weight"));
52 static cl::opt
<unsigned>
53 LargeStrideThresh("amdgpu-large-stride-threshold", cl::init(64), cl::Hidden
,
54 cl::desc("Large stride memory access threshold"));
56 STATISTIC(NumMemBound
, "Number of functions marked as memory bound");
57 STATISTIC(NumLimitWave
, "Number of functions marked as needing limit wave");
59 char llvm::AMDGPUPerfHintAnalysis::ID
= 0;
60 char &llvm::AMDGPUPerfHintAnalysisID
= AMDGPUPerfHintAnalysis::ID
;
62 INITIALIZE_PASS(AMDGPUPerfHintAnalysis
, DEBUG_TYPE
,
63 "Analysis if a function is memory bound", true, true)
67 struct AMDGPUPerfHint
{
68 friend AMDGPUPerfHintAnalysis
;
71 AMDGPUPerfHint(AMDGPUPerfHintAnalysis::FuncInfoMap
&FIM_
,
72 const TargetLowering
*TLI_
)
73 : FIM(FIM_
), DL(nullptr), TLI(TLI_
) {}
75 bool runOnFunction(Function
&F
);
78 struct MemAccessInfo
{
82 MemAccessInfo() : V(nullptr), Base(nullptr), Offset(0) {}
83 bool isLargeStride(MemAccessInfo
&Reference
) const;
84 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
85 Printable
print() const {
86 return Printable([this](raw_ostream
&OS
) {
87 OS
<< "Value: " << *V
<< '\n'
88 << "Base: " << *Base
<< " Offset: " << Offset
<< '\n';
94 MemAccessInfo
makeMemAccessInfo(Instruction
*) const;
96 MemAccessInfo LastAccess
; // Last memory access info
98 AMDGPUPerfHintAnalysis::FuncInfoMap
&FIM
;
100 const DataLayout
*DL
;
102 const TargetLowering
*TLI
;
104 AMDGPUPerfHintAnalysis::FuncInfo
*visit(const Function
&F
);
105 static bool isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo
&F
);
106 static bool needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo
&F
);
108 bool isIndirectAccess(const Instruction
*Inst
) const;
110 /// Check if the instruction is large stride.
111 /// The purpose is to identify memory access pattern like:
115 /// In the above example, the second and third memory access will be marked
116 /// large stride memory access.
117 bool isLargeStride(const Instruction
*Inst
);
119 bool isGlobalAddr(const Value
*V
) const;
120 bool isLocalAddr(const Value
*V
) const;
121 bool isConstantAddr(const Value
*V
) const;
124 static const Value
*getMemoryInstrPtr(const Instruction
*Inst
) {
125 if (auto LI
= dyn_cast
<LoadInst
>(Inst
)) {
126 return LI
->getPointerOperand();
128 if (auto SI
= dyn_cast
<StoreInst
>(Inst
)) {
129 return SI
->getPointerOperand();
131 if (auto AI
= dyn_cast
<AtomicCmpXchgInst
>(Inst
)) {
132 return AI
->getPointerOperand();
134 if (auto AI
= dyn_cast
<AtomicRMWInst
>(Inst
)) {
135 return AI
->getPointerOperand();
137 if (auto MI
= dyn_cast
<AnyMemIntrinsic
>(Inst
)) {
138 return MI
->getRawDest();
144 bool AMDGPUPerfHint::isIndirectAccess(const Instruction
*Inst
) const {
145 LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst
<< '\n');
146 SmallSet
<const Value
*, 32> WorkSet
;
147 SmallSet
<const Value
*, 32> Visited
;
148 if (const Value
*MO
= getMemoryInstrPtr(Inst
)) {
149 if (isGlobalAddr(MO
))
153 while (!WorkSet
.empty()) {
154 const Value
*V
= *WorkSet
.begin();
155 WorkSet
.erase(*WorkSet
.begin());
156 if (!Visited
.insert(V
).second
)
158 LLVM_DEBUG(dbgs() << " check: " << *V
<< '\n');
160 if (auto LD
= dyn_cast
<LoadInst
>(V
)) {
161 auto M
= LD
->getPointerOperand();
162 if (isGlobalAddr(M
) || isLocalAddr(M
) || isConstantAddr(M
)) {
163 LLVM_DEBUG(dbgs() << " is IA\n");
169 if (auto GEP
= dyn_cast
<GetElementPtrInst
>(V
)) {
170 auto P
= GEP
->getPointerOperand();
172 for (unsigned I
= 1, E
= GEP
->getNumIndices() + 1; I
!= E
; ++I
)
173 WorkSet
.insert(GEP
->getOperand(I
));
177 if (auto U
= dyn_cast
<UnaryInstruction
>(V
)) {
178 WorkSet
.insert(U
->getOperand(0));
182 if (auto BO
= dyn_cast
<BinaryOperator
>(V
)) {
183 WorkSet
.insert(BO
->getOperand(0));
184 WorkSet
.insert(BO
->getOperand(1));
188 if (auto S
= dyn_cast
<SelectInst
>(V
)) {
189 WorkSet
.insert(S
->getFalseValue());
190 WorkSet
.insert(S
->getTrueValue());
194 if (auto E
= dyn_cast
<ExtractElementInst
>(V
)) {
195 WorkSet
.insert(E
->getVectorOperand());
199 LLVM_DEBUG(dbgs() << " dropped\n");
202 LLVM_DEBUG(dbgs() << " is not IA\n");
206 AMDGPUPerfHintAnalysis::FuncInfo
*AMDGPUPerfHint::visit(const Function
&F
) {
207 AMDGPUPerfHintAnalysis::FuncInfo
&FI
= FIM
[&F
];
209 LLVM_DEBUG(dbgs() << "[AMDGPUPerfHint] process " << F
.getName() << '\n');
212 LastAccess
= MemAccessInfo();
214 if (getMemoryInstrPtr(&I
)) {
215 if (isIndirectAccess(&I
))
217 if (isLargeStride(&I
))
223 CallSite
CS(const_cast<Instruction
*>(&I
));
225 Function
*Callee
= CS
.getCalledFunction();
226 if (!Callee
|| Callee
->isDeclaration()) {
230 if (&F
== Callee
) // Handle immediate recursion
233 auto Loc
= FIM
.find(Callee
);
234 if (Loc
== FIM
.end())
237 FI
.MemInstCount
+= Loc
->second
.MemInstCount
;
238 FI
.InstCount
+= Loc
->second
.InstCount
;
239 FI
.IAMInstCount
+= Loc
->second
.IAMInstCount
;
240 FI
.LSMInstCount
+= Loc
->second
.LSMInstCount
;
241 } else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(&I
)) {
242 TargetLoweringBase::AddrMode AM
;
243 auto *Ptr
= GetPointerBaseWithConstantOffset(GEP
, AM
.BaseOffs
, *DL
);
244 AM
.BaseGV
= dyn_cast_or_null
<GlobalValue
>(const_cast<Value
*>(Ptr
));
245 AM
.HasBaseReg
= !AM
.BaseGV
;
246 if (TLI
->isLegalAddressingMode(*DL
, AM
, GEP
->getResultElementType(),
247 GEP
->getPointerAddressSpace()))
248 // Offset will likely be folded into load or store
260 bool AMDGPUPerfHint::runOnFunction(Function
&F
) {
261 const Module
&M
= *F
.getParent();
262 DL
= &M
.getDataLayout();
264 if (F
.hasFnAttribute("amdgpu-wave-limiter") &&
265 F
.hasFnAttribute("amdgpu-memory-bound"))
268 const AMDGPUPerfHintAnalysis::FuncInfo
*Info
= visit(F
);
270 LLVM_DEBUG(dbgs() << F
.getName() << " MemInst: " << Info
->MemInstCount
272 << " IAMInst: " << Info
->IAMInstCount
<< '\n'
273 << " LSMInst: " << Info
->LSMInstCount
<< '\n'
274 << " TotalInst: " << Info
->InstCount
<< '\n');
276 if (isMemBound(*Info
)) {
277 LLVM_DEBUG(dbgs() << F
.getName() << " is memory bound\n");
279 F
.addFnAttr("amdgpu-memory-bound", "true");
282 if (AMDGPU::isEntryFunctionCC(F
.getCallingConv()) && needLimitWave(*Info
)) {
283 LLVM_DEBUG(dbgs() << F
.getName() << " needs limit wave\n");
285 F
.addFnAttr("amdgpu-wave-limiter", "true");
291 bool AMDGPUPerfHint::isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo
&FI
) {
292 return FI
.MemInstCount
* 100 / FI
.InstCount
> MemBoundThresh
;
295 bool AMDGPUPerfHint::needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo
&FI
) {
296 return ((FI
.MemInstCount
+ FI
.IAMInstCount
* IAWeight
+
297 FI
.LSMInstCount
* LSWeight
) *
298 100 / FI
.InstCount
) > LimitWaveThresh
;
301 bool AMDGPUPerfHint::isGlobalAddr(const Value
*V
) const {
302 if (auto PT
= dyn_cast
<PointerType
>(V
->getType())) {
303 unsigned As
= PT
->getAddressSpace();
304 // Flat likely points to global too.
305 return As
== AMDGPUAS::GLOBAL_ADDRESS
|| As
== AMDGPUAS::FLAT_ADDRESS
;
310 bool AMDGPUPerfHint::isLocalAddr(const Value
*V
) const {
311 if (auto PT
= dyn_cast
<PointerType
>(V
->getType()))
312 return PT
->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS
;
316 bool AMDGPUPerfHint::isLargeStride(const Instruction
*Inst
) {
317 LLVM_DEBUG(dbgs() << "[isLargeStride] " << *Inst
<< '\n');
319 MemAccessInfo MAI
= makeMemAccessInfo(const_cast<Instruction
*>(Inst
));
320 bool IsLargeStride
= MAI
.isLargeStride(LastAccess
);
322 LastAccess
= std::move(MAI
);
324 return IsLargeStride
;
327 AMDGPUPerfHint::MemAccessInfo
328 AMDGPUPerfHint::makeMemAccessInfo(Instruction
*Inst
) const {
330 const Value
*MO
= getMemoryInstrPtr(Inst
);
332 LLVM_DEBUG(dbgs() << "[isLargeStride] MO: " << *MO
<< '\n');
333 // Do not treat local-addr memory access as large stride.
338 MAI
.Base
= GetPointerBaseWithConstantOffset(MO
, MAI
.Offset
, *DL
);
342 bool AMDGPUPerfHint::isConstantAddr(const Value
*V
) const {
343 if (auto PT
= dyn_cast
<PointerType
>(V
->getType())) {
344 unsigned As
= PT
->getAddressSpace();
345 return As
== AMDGPUAS::CONSTANT_ADDRESS
||
346 As
== AMDGPUAS::CONSTANT_ADDRESS_32BIT
;
351 bool AMDGPUPerfHint::MemAccessInfo::isLargeStride(
352 MemAccessInfo
&Reference
) const {
354 if (!Base
|| !Reference
.Base
|| Base
!= Reference
.Base
)
357 uint64_t Diff
= Offset
> Reference
.Offset
? Offset
- Reference
.Offset
358 : Reference
.Offset
- Offset
;
359 bool Result
= Diff
> LargeStrideThresh
;
360 LLVM_DEBUG(dbgs() << "[isLargeStride compare]\n"
361 << print() << "<=>\n"
362 << Reference
.print() << "Result:" << Result
<< '\n');
367 bool AMDGPUPerfHintAnalysis::runOnSCC(CallGraphSCC
&SCC
) {
368 auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>();
372 const TargetMachine
&TM
= TPC
->getTM
<TargetMachine
>();
374 bool Changed
= false;
375 for (CallGraphNode
*I
: SCC
) {
376 Function
*F
= I
->getFunction();
377 if (!F
|| F
->isDeclaration())
380 const TargetSubtargetInfo
*ST
= TM
.getSubtargetImpl(*F
);
381 AMDGPUPerfHint
Analyzer(FIM
, ST
->getTargetLowering());
383 if (Analyzer
.runOnFunction(*F
))
390 bool AMDGPUPerfHintAnalysis::isMemoryBound(const Function
*F
) const {
391 auto FI
= FIM
.find(F
);
395 return AMDGPUPerfHint::isMemBound(FI
->second
);
398 bool AMDGPUPerfHintAnalysis::needsWaveLimiter(const Function
*F
) const {
399 auto FI
= FIM
.find(F
);
403 return AMDGPUPerfHint::needLimitWave(FI
->second
);