1 //===- LegacyDivergenceAnalysis.cpp --------- Legacy Divergence Analysis
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 //===----------------------------------------------------------------------===//
10 // This file implements divergence analysis which determines whether a branch
11 // in a GPU program is divergent.It can help branch optimizations such as jump
12 // threading and loop unswitching to make better decisions.
14 // GPU programs typically use the SIMD execution model, where multiple threads
15 // in the same execution group have to execute in lock-step. Therefore, if the
16 // code contains divergent branches (i.e., threads in a group do not agree on
17 // which path of the branch to take), the group of threads has to execute all
18 // the paths from that branch with different subsets of threads enabled until
19 // they converge at the immediately post-dominating BB of the paths.
21 // Due to this execution model, some optimizations such as jump
22 // threading and loop unswitching can be unfortunately harmful when performed on
23 // divergent branches. Therefore, an analysis that computes which branches in a
24 // GPU program are divergent can help the compiler to selectively run these
27 // This file defines divergence analysis which computes a conservative but
28 // non-trivial approximation of all divergent branches in a GPU program. It
29 // partially implements the approach described in
31 // Divergence Analysis
32 // Sampaio, Souza, Collange, Pereira
35 // The divergence analysis identifies the sources of divergence (e.g., special
36 // variables that hold the thread ID), and recursively marks variables that are
37 // data or sync dependent on a source of divergence as divergent.
39 // While data dependency is a well-known concept, the notion of sync dependency
40 // is worth more explanation. Sync dependence characterizes the control flow
41 // aspect of the propagation of branch divergence. For example,
43 // %cond = icmp slt i32 %tid, 10
44 // br i1 %cond, label %then, label %else
50 // %a = phi i32 [ 0, %then ], [ 1, %else ]
52 // Suppose %tid holds the thread ID. Although %a is not data dependent on %tid
53 // because %tid is not on its use-def chains, %a is sync dependent on %tid
54 // because the branch "br i1 %cond" depends on %tid and affects which value %a
57 // The current implementation has the following limitations:
58 // 1. intra-procedural. It conservatively considers the arguments of a
59 // non-kernel-entry function and the return value of a function call as
61 // 2. memory as black box. It conservatively considers values loaded from
62 // generic or local address as divergent. This can be improved by leveraging
65 //===----------------------------------------------------------------------===//
67 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
68 #include "llvm/ADT/PostOrderIterator.h"
69 #include "llvm/Analysis/CFG.h"
70 #include "llvm/Analysis/DivergenceAnalysis.h"
71 #include "llvm/Analysis/Passes.h"
72 #include "llvm/Analysis/PostDominators.h"
73 #include "llvm/Analysis/TargetTransformInfo.h"
74 #include "llvm/IR/Dominators.h"
75 #include "llvm/IR/InstIterator.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/Value.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Support/CommandLine.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/raw_ostream.h"
85 #define DEBUG_TYPE "divergence"
87 // transparently use the GPUDivergenceAnalysis
88 static cl::opt
<bool> UseGPUDA("use-gpu-divergence-analysis", cl::init(false),
90 cl::desc("turn the LegacyDivergenceAnalysis into "
91 "a wrapper for GPUDivergenceAnalysis"));
95 class DivergencePropagator
{
97 DivergencePropagator(Function
&F
, TargetTransformInfo
&TTI
, DominatorTree
&DT
,
98 PostDominatorTree
&PDT
, DenseSet
<const Value
*> &DV
,
99 DenseSet
<const Use
*> &DU
)
100 : F(F
), TTI(TTI
), DT(DT
), PDT(PDT
), DV(DV
), DU(DU
) {}
101 void populateWithSourcesOfDivergence();
105 // A helper function that explores data dependents of V.
106 void exploreDataDependency(Value
*V
);
107 // A helper function that explores sync dependents of TI.
108 void exploreSyncDependency(Instruction
*TI
);
109 // Computes the influence region from Start to End. This region includes all
110 // basic blocks on any simple path from Start to End.
111 void computeInfluenceRegion(BasicBlock
*Start
, BasicBlock
*End
,
112 DenseSet
<BasicBlock
*> &InfluenceRegion
);
113 // Finds all users of I that are outside the influence region, and add these
114 // users to Worklist.
115 void findUsersOutsideInfluenceRegion(
116 Instruction
&I
, const DenseSet
<BasicBlock
*> &InfluenceRegion
);
119 TargetTransformInfo
&TTI
;
121 PostDominatorTree
&PDT
;
122 std::vector
<Value
*> Worklist
; // Stack for DFS.
123 DenseSet
<const Value
*> &DV
; // Stores all divergent values.
124 DenseSet
<const Use
*> &DU
; // Stores divergent uses of possibly uniform
128 void DivergencePropagator::populateWithSourcesOfDivergence() {
132 for (auto &I
: instructions(F
)) {
133 if (TTI
.isSourceOfDivergence(&I
)) {
134 Worklist
.push_back(&I
);
138 for (auto &Arg
: F
.args()) {
139 if (TTI
.isSourceOfDivergence(&Arg
)) {
140 Worklist
.push_back(&Arg
);
146 void DivergencePropagator::exploreSyncDependency(Instruction
*TI
) {
147 // Propagation rule 1: if branch TI is divergent, all PHINodes in TI's
148 // immediate post dominator are divergent. This rule handles if-then-else
149 // patterns. For example,
155 // a = phi(a1, a2); // sync dependent on (tid < 5)
156 BasicBlock
*ThisBB
= TI
->getParent();
158 // Unreachable blocks may not be in the dominator tree.
159 if (!DT
.isReachableFromEntry(ThisBB
))
162 // If the function has no exit blocks or doesn't reach any exit blocks, the
163 // post dominator may be null.
164 DomTreeNode
*ThisNode
= PDT
.getNode(ThisBB
);
168 BasicBlock
*IPostDom
= ThisNode
->getIDom()->getBlock();
169 if (IPostDom
== nullptr)
172 for (auto I
= IPostDom
->begin(); isa
<PHINode
>(I
); ++I
) {
173 // A PHINode is uniform if it returns the same value no matter which path is
175 if (!cast
<PHINode
>(I
)->hasConstantOrUndefValue() && DV
.insert(&*I
).second
)
176 Worklist
.push_back(&*I
);
179 // Propagation rule 2: if a value defined in a loop is used outside, the user
180 // is sync dependent on the condition of the loop exits that dominate the
181 // user. For example,
186 // if (foo(i)) ... // uniform
187 // } while (i < tid);
188 // if (bar(i)) ... // divergent
190 // A program may contain unstructured loops. Therefore, we cannot leverage
191 // LoopInfo, which only recognizes natural loops.
193 // The algorithm used here handles both natural and unstructured loops. Given
194 // a branch TI, we first compute its influence region, the union of all simple
195 // paths from TI to its immediate post dominator (IPostDom). Then, we search
196 // for all the values defined in the influence region but used outside. All
197 // these users are sync dependent on TI.
198 DenseSet
<BasicBlock
*> InfluenceRegion
;
199 computeInfluenceRegion(ThisBB
, IPostDom
, InfluenceRegion
);
200 // An insight that can speed up the search process is that all the in-region
201 // values that are used outside must dominate TI. Therefore, instead of
202 // searching every basic blocks in the influence region, we search all the
203 // dominators of TI until it is outside the influence region.
204 BasicBlock
*InfluencedBB
= ThisBB
;
205 while (InfluenceRegion
.count(InfluencedBB
)) {
206 for (auto &I
: *InfluencedBB
) {
208 findUsersOutsideInfluenceRegion(I
, InfluenceRegion
);
210 DomTreeNode
*IDomNode
= DT
.getNode(InfluencedBB
)->getIDom();
211 if (IDomNode
== nullptr)
213 InfluencedBB
= IDomNode
->getBlock();
217 void DivergencePropagator::findUsersOutsideInfluenceRegion(
218 Instruction
&I
, const DenseSet
<BasicBlock
*> &InfluenceRegion
) {
219 for (Use
&Use
: I
.uses()) {
220 Instruction
*UserInst
= cast
<Instruction
>(Use
.getUser());
221 if (!InfluenceRegion
.count(UserInst
->getParent())) {
223 if (DV
.insert(UserInst
).second
)
224 Worklist
.push_back(UserInst
);
229 // A helper function for computeInfluenceRegion that adds successors of "ThisBB"
230 // to the influence region.
232 addSuccessorsToInfluenceRegion(BasicBlock
*ThisBB
, BasicBlock
*End
,
233 DenseSet
<BasicBlock
*> &InfluenceRegion
,
234 std::vector
<BasicBlock
*> &InfluenceStack
) {
235 for (BasicBlock
*Succ
: successors(ThisBB
)) {
236 if (Succ
!= End
&& InfluenceRegion
.insert(Succ
).second
)
237 InfluenceStack
.push_back(Succ
);
241 void DivergencePropagator::computeInfluenceRegion(
242 BasicBlock
*Start
, BasicBlock
*End
,
243 DenseSet
<BasicBlock
*> &InfluenceRegion
) {
244 assert(PDT
.properlyDominates(End
, Start
) &&
245 "End does not properly dominate Start");
247 // The influence region starts from the end of "Start" to the beginning of
248 // "End". Therefore, "Start" should not be in the region unless "Start" is in
249 // a loop that doesn't contain "End".
250 std::vector
<BasicBlock
*> InfluenceStack
;
251 addSuccessorsToInfluenceRegion(Start
, End
, InfluenceRegion
, InfluenceStack
);
252 while (!InfluenceStack
.empty()) {
253 BasicBlock
*BB
= InfluenceStack
.back();
254 InfluenceStack
.pop_back();
255 addSuccessorsToInfluenceRegion(BB
, End
, InfluenceRegion
, InfluenceStack
);
259 void DivergencePropagator::exploreDataDependency(Value
*V
) {
260 // Follow def-use chains of V.
261 for (User
*U
: V
->users()) {
262 if (!TTI
.isAlwaysUniform(U
) && DV
.insert(U
).second
)
263 Worklist
.push_back(U
);
267 void DivergencePropagator::propagate() {
268 // Traverse the dependency graph using DFS.
269 while (!Worklist
.empty()) {
270 Value
*V
= Worklist
.back();
272 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
273 // Terminators with less than two successors won't introduce sync
274 // dependency. Ignore them.
275 if (I
->isTerminator() && I
->getNumSuccessors() > 1)
276 exploreSyncDependency(I
);
278 exploreDataDependency(V
);
284 // Register this pass.
285 char LegacyDivergenceAnalysis::ID
= 0;
286 LegacyDivergenceAnalysis::LegacyDivergenceAnalysis() : FunctionPass(ID
) {
287 initializeLegacyDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
289 INITIALIZE_PASS_BEGIN(LegacyDivergenceAnalysis
, "divergence",
290 "Legacy Divergence Analysis", false, true)
291 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
292 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass
)
293 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
294 INITIALIZE_PASS_END(LegacyDivergenceAnalysis
, "divergence",
295 "Legacy Divergence Analysis", false, true)
297 FunctionPass
*llvm::createLegacyDivergenceAnalysisPass() {
298 return new LegacyDivergenceAnalysis();
301 void LegacyDivergenceAnalysis::getAnalysisUsage(AnalysisUsage
&AU
) const {
302 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
303 AU
.addRequiredTransitive
<PostDominatorTreeWrapperPass
>();
304 AU
.addRequiredTransitive
<LoopInfoWrapperPass
>();
305 AU
.setPreservesAll();
308 bool LegacyDivergenceAnalysis::shouldUseGPUDivergenceAnalysis(
309 const Function
&F
, const TargetTransformInfo
&TTI
) const {
310 if (!(UseGPUDA
|| TTI
.useGPUDivergenceAnalysis()))
313 // GPUDivergenceAnalysis requires a reducible CFG.
314 auto &LI
= getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
315 using RPOTraversal
= ReversePostOrderTraversal
<const Function
*>;
316 RPOTraversal
FuncRPOT(&F
);
317 return !containsIrreducibleCFG
<const BasicBlock
*, const RPOTraversal
,
318 const LoopInfo
>(FuncRPOT
, LI
);
321 bool LegacyDivergenceAnalysis::runOnFunction(Function
&F
) {
322 auto *TTIWP
= getAnalysisIfAvailable
<TargetTransformInfoWrapperPass
>();
323 if (TTIWP
== nullptr)
326 TargetTransformInfo
&TTI
= TTIWP
->getTTI(F
);
327 // Fast path: if the target does not have branch divergence, we do not mark
328 // any branch as divergent.
329 if (!TTI
.hasBranchDivergence())
332 DivergentValues
.clear();
333 DivergentUses
.clear();
336 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
337 auto &PDT
= getAnalysis
<PostDominatorTreeWrapperPass
>().getPostDomTree();
339 if (shouldUseGPUDivergenceAnalysis(F
, TTI
)) {
340 // run the new GPU divergence analysis
341 auto &LI
= getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
342 gpuDA
= std::make_unique
<DivergenceInfo
>(F
, DT
, PDT
, LI
, TTI
,
343 /* KnownReducible = */ true);
346 // run LLVM's existing DivergenceAnalysis
347 DivergencePropagator
DP(F
, TTI
, DT
, PDT
, DivergentValues
, DivergentUses
);
348 DP
.populateWithSourcesOfDivergence();
352 LLVM_DEBUG(dbgs() << "\nAfter divergence analysis on " << F
.getName()
354 print(dbgs(), F
.getParent()));
359 bool LegacyDivergenceAnalysis::isDivergent(const Value
*V
) const {
361 return gpuDA
->isDivergent(*V
);
363 return DivergentValues
.count(V
);
366 bool LegacyDivergenceAnalysis::isDivergentUse(const Use
*U
) const {
368 return gpuDA
->isDivergentUse(*U
);
370 return DivergentValues
.count(U
->get()) || DivergentUses
.count(U
);
373 void LegacyDivergenceAnalysis::print(raw_ostream
&OS
, const Module
*) const {
374 if ((!gpuDA
|| !gpuDA
->hasDivergence()) && DivergentValues
.empty())
377 const Function
*F
= nullptr;
378 if (!DivergentValues
.empty()) {
379 const Value
*FirstDivergentValue
= *DivergentValues
.begin();
380 if (const Argument
*Arg
= dyn_cast
<Argument
>(FirstDivergentValue
)) {
381 F
= Arg
->getParent();
382 } else if (const Instruction
*I
=
383 dyn_cast
<Instruction
>(FirstDivergentValue
)) {
384 F
= I
->getParent()->getParent();
386 llvm_unreachable("Only arguments and instructions can be divergent");
389 F
= &gpuDA
->getFunction();
394 // Dumps all divergent values in F, arguments and then instructions.
395 for (auto &Arg
: F
->args()) {
396 OS
<< (isDivergent(&Arg
) ? "DIVERGENT: " : " ");
399 // Iterate instructions using instructions() to ensure a deterministic order.
400 for (const BasicBlock
&BB
: *F
) {
401 OS
<< "\n " << BB
.getName() << ":\n";
402 for (auto &I
: BB
.instructionsWithoutDebug()) {
403 OS
<< (isDivergent(&I
) ? "DIVERGENT: " : " ");