[ARM] Generate 8.1-m CSINC, CSNEG and CSINV instructions.
[llvm-core.git] / lib / Analysis / LegacyDivergenceAnalysis.cpp
blob7a12f597969dedc47e7f893564240b02ef0b477d
1 //===- LegacyDivergenceAnalysis.cpp --------- Legacy Divergence Analysis
2 //Implementation -==//
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements divergence analysis which determines whether a branch
11 // in a GPU program is divergent.It can help branch optimizations such as jump
12 // threading and loop unswitching to make better decisions.
14 // GPU programs typically use the SIMD execution model, where multiple threads
15 // in the same execution group have to execute in lock-step. Therefore, if the
16 // code contains divergent branches (i.e., threads in a group do not agree on
17 // which path of the branch to take), the group of threads has to execute all
18 // the paths from that branch with different subsets of threads enabled until
19 // they converge at the immediately post-dominating BB of the paths.
21 // Due to this execution model, some optimizations such as jump
22 // threading and loop unswitching can be unfortunately harmful when performed on
23 // divergent branches. Therefore, an analysis that computes which branches in a
24 // GPU program are divergent can help the compiler to selectively run these
25 // optimizations.
27 // This file defines divergence analysis which computes a conservative but
28 // non-trivial approximation of all divergent branches in a GPU program. It
29 // partially implements the approach described in
31 // Divergence Analysis
32 // Sampaio, Souza, Collange, Pereira
33 // TOPLAS '13
35 // The divergence analysis identifies the sources of divergence (e.g., special
36 // variables that hold the thread ID), and recursively marks variables that are
37 // data or sync dependent on a source of divergence as divergent.
39 // While data dependency is a well-known concept, the notion of sync dependency
40 // is worth more explanation. Sync dependence characterizes the control flow
41 // aspect of the propagation of branch divergence. For example,
43 // %cond = icmp slt i32 %tid, 10
44 // br i1 %cond, label %then, label %else
45 // then:
46 // br label %merge
47 // else:
48 // br label %merge
49 // merge:
50 // %a = phi i32 [ 0, %then ], [ 1, %else ]
52 // Suppose %tid holds the thread ID. Although %a is not data dependent on %tid
53 // because %tid is not on its use-def chains, %a is sync dependent on %tid
54 // because the branch "br i1 %cond" depends on %tid and affects which value %a
55 // is assigned to.
57 // The current implementation has the following limitations:
58 // 1. intra-procedural. It conservatively considers the arguments of a
59 // non-kernel-entry function and the return value of a function call as
60 // divergent.
61 // 2. memory as black box. It conservatively considers values loaded from
62 // generic or local address as divergent. This can be improved by leveraging
63 // pointer analysis.
65 //===----------------------------------------------------------------------===//
67 #include "llvm/ADT/PostOrderIterator.h"
68 #include "llvm/Analysis/CFG.h"
69 #include "llvm/Analysis/DivergenceAnalysis.h"
70 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
71 #include "llvm/Analysis/Passes.h"
72 #include "llvm/Analysis/PostDominators.h"
73 #include "llvm/Analysis/TargetTransformInfo.h"
74 #include "llvm/IR/Dominators.h"
75 #include "llvm/IR/InstIterator.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/Value.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include <vector>
81 using namespace llvm;
83 #define DEBUG_TYPE "divergence"
85 // transparently use the GPUDivergenceAnalysis
86 static cl::opt<bool> UseGPUDA("use-gpu-divergence-analysis", cl::init(false),
87 cl::Hidden,
88 cl::desc("turn the LegacyDivergenceAnalysis into "
89 "a wrapper for GPUDivergenceAnalysis"));
91 namespace {
93 class DivergencePropagator {
94 public:
95 DivergencePropagator(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
96 PostDominatorTree &PDT, DenseSet<const Value *> &DV,
97 DenseSet<const Use *> &DU)
98 : F(F), TTI(TTI), DT(DT), PDT(PDT), DV(DV), DU(DU) {}
99 void populateWithSourcesOfDivergence();
100 void propagate();
102 private:
103 // A helper function that explores data dependents of V.
104 void exploreDataDependency(Value *V);
105 // A helper function that explores sync dependents of TI.
106 void exploreSyncDependency(Instruction *TI);
107 // Computes the influence region from Start to End. This region includes all
108 // basic blocks on any simple path from Start to End.
109 void computeInfluenceRegion(BasicBlock *Start, BasicBlock *End,
110 DenseSet<BasicBlock *> &InfluenceRegion);
111 // Finds all users of I that are outside the influence region, and add these
112 // users to Worklist.
113 void findUsersOutsideInfluenceRegion(
114 Instruction &I, const DenseSet<BasicBlock *> &InfluenceRegion);
116 Function &F;
117 TargetTransformInfo &TTI;
118 DominatorTree &DT;
119 PostDominatorTree &PDT;
120 std::vector<Value *> Worklist; // Stack for DFS.
121 DenseSet<const Value *> &DV; // Stores all divergent values.
122 DenseSet<const Use *> &DU; // Stores divergent uses of possibly uniform
123 // values.
126 void DivergencePropagator::populateWithSourcesOfDivergence() {
127 Worklist.clear();
128 DV.clear();
129 DU.clear();
130 for (auto &I : instructions(F)) {
131 if (TTI.isSourceOfDivergence(&I)) {
132 Worklist.push_back(&I);
133 DV.insert(&I);
136 for (auto &Arg : F.args()) {
137 if (TTI.isSourceOfDivergence(&Arg)) {
138 Worklist.push_back(&Arg);
139 DV.insert(&Arg);
144 void DivergencePropagator::exploreSyncDependency(Instruction *TI) {
145 // Propagation rule 1: if branch TI is divergent, all PHINodes in TI's
146 // immediate post dominator are divergent. This rule handles if-then-else
147 // patterns. For example,
149 // if (tid < 5)
150 // a1 = 1;
151 // else
152 // a2 = 2;
153 // a = phi(a1, a2); // sync dependent on (tid < 5)
154 BasicBlock *ThisBB = TI->getParent();
156 // Unreachable blocks may not be in the dominator tree.
157 if (!DT.isReachableFromEntry(ThisBB))
158 return;
160 // If the function has no exit blocks or doesn't reach any exit blocks, the
161 // post dominator may be null.
162 DomTreeNode *ThisNode = PDT.getNode(ThisBB);
163 if (!ThisNode)
164 return;
166 BasicBlock *IPostDom = ThisNode->getIDom()->getBlock();
167 if (IPostDom == nullptr)
168 return;
170 for (auto I = IPostDom->begin(); isa<PHINode>(I); ++I) {
171 // A PHINode is uniform if it returns the same value no matter which path is
172 // taken.
173 if (!cast<PHINode>(I)->hasConstantOrUndefValue() && DV.insert(&*I).second)
174 Worklist.push_back(&*I);
177 // Propagation rule 2: if a value defined in a loop is used outside, the user
178 // is sync dependent on the condition of the loop exits that dominate the
179 // user. For example,
181 // int i = 0;
182 // do {
183 // i++;
184 // if (foo(i)) ... // uniform
185 // } while (i < tid);
186 // if (bar(i)) ... // divergent
188 // A program may contain unstructured loops. Therefore, we cannot leverage
189 // LoopInfo, which only recognizes natural loops.
191 // The algorithm used here handles both natural and unstructured loops. Given
192 // a branch TI, we first compute its influence region, the union of all simple
193 // paths from TI to its immediate post dominator (IPostDom). Then, we search
194 // for all the values defined in the influence region but used outside. All
195 // these users are sync dependent on TI.
196 DenseSet<BasicBlock *> InfluenceRegion;
197 computeInfluenceRegion(ThisBB, IPostDom, InfluenceRegion);
198 // An insight that can speed up the search process is that all the in-region
199 // values that are used outside must dominate TI. Therefore, instead of
200 // searching every basic blocks in the influence region, we search all the
201 // dominators of TI until it is outside the influence region.
202 BasicBlock *InfluencedBB = ThisBB;
203 while (InfluenceRegion.count(InfluencedBB)) {
204 for (auto &I : *InfluencedBB) {
205 if (!DV.count(&I))
206 findUsersOutsideInfluenceRegion(I, InfluenceRegion);
208 DomTreeNode *IDomNode = DT.getNode(InfluencedBB)->getIDom();
209 if (IDomNode == nullptr)
210 break;
211 InfluencedBB = IDomNode->getBlock();
215 void DivergencePropagator::findUsersOutsideInfluenceRegion(
216 Instruction &I, const DenseSet<BasicBlock *> &InfluenceRegion) {
217 for (Use &Use : I.uses()) {
218 Instruction *UserInst = cast<Instruction>(Use.getUser());
219 if (!InfluenceRegion.count(UserInst->getParent())) {
220 DU.insert(&Use);
221 if (DV.insert(UserInst).second)
222 Worklist.push_back(UserInst);
227 // A helper function for computeInfluenceRegion that adds successors of "ThisBB"
228 // to the influence region.
229 static void
230 addSuccessorsToInfluenceRegion(BasicBlock *ThisBB, BasicBlock *End,
231 DenseSet<BasicBlock *> &InfluenceRegion,
232 std::vector<BasicBlock *> &InfluenceStack) {
233 for (BasicBlock *Succ : successors(ThisBB)) {
234 if (Succ != End && InfluenceRegion.insert(Succ).second)
235 InfluenceStack.push_back(Succ);
239 void DivergencePropagator::computeInfluenceRegion(
240 BasicBlock *Start, BasicBlock *End,
241 DenseSet<BasicBlock *> &InfluenceRegion) {
242 assert(PDT.properlyDominates(End, Start) &&
243 "End does not properly dominate Start");
245 // The influence region starts from the end of "Start" to the beginning of
246 // "End". Therefore, "Start" should not be in the region unless "Start" is in
247 // a loop that doesn't contain "End".
248 std::vector<BasicBlock *> InfluenceStack;
249 addSuccessorsToInfluenceRegion(Start, End, InfluenceRegion, InfluenceStack);
250 while (!InfluenceStack.empty()) {
251 BasicBlock *BB = InfluenceStack.back();
252 InfluenceStack.pop_back();
253 addSuccessorsToInfluenceRegion(BB, End, InfluenceRegion, InfluenceStack);
257 void DivergencePropagator::exploreDataDependency(Value *V) {
258 // Follow def-use chains of V.
259 for (User *U : V->users()) {
260 Instruction *UserInst = cast<Instruction>(U);
261 if (!TTI.isAlwaysUniform(U) && DV.insert(UserInst).second)
262 Worklist.push_back(UserInst);
266 void DivergencePropagator::propagate() {
267 // Traverse the dependency graph using DFS.
268 while (!Worklist.empty()) {
269 Value *V = Worklist.back();
270 Worklist.pop_back();
271 if (Instruction *I = dyn_cast<Instruction>(V)) {
272 // Terminators with less than two successors won't introduce sync
273 // dependency. Ignore them.
274 if (I->isTerminator() && I->getNumSuccessors() > 1)
275 exploreSyncDependency(I);
277 exploreDataDependency(V);
281 } // namespace
283 // Register this pass.
284 char LegacyDivergenceAnalysis::ID = 0;
285 INITIALIZE_PASS_BEGIN(LegacyDivergenceAnalysis, "divergence",
286 "Legacy Divergence Analysis", false, true)
287 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
288 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
289 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
290 INITIALIZE_PASS_END(LegacyDivergenceAnalysis, "divergence",
291 "Legacy Divergence Analysis", false, true)
293 FunctionPass *llvm::createLegacyDivergenceAnalysisPass() {
294 return new LegacyDivergenceAnalysis();
297 void LegacyDivergenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
298 AU.addRequired<DominatorTreeWrapperPass>();
299 AU.addRequired<PostDominatorTreeWrapperPass>();
300 if (UseGPUDA)
301 AU.addRequired<LoopInfoWrapperPass>();
302 AU.setPreservesAll();
305 bool LegacyDivergenceAnalysis::shouldUseGPUDivergenceAnalysis(
306 const Function &F) const {
307 if (!UseGPUDA)
308 return false;
310 // GPUDivergenceAnalysis requires a reducible CFG.
311 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
312 using RPOTraversal = ReversePostOrderTraversal<const Function *>;
313 RPOTraversal FuncRPOT(&F);
314 return !containsIrreducibleCFG<const BasicBlock *, const RPOTraversal,
315 const LoopInfo>(FuncRPOT, LI);
318 bool LegacyDivergenceAnalysis::runOnFunction(Function &F) {
319 auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
320 if (TTIWP == nullptr)
321 return false;
323 TargetTransformInfo &TTI = TTIWP->getTTI(F);
324 // Fast path: if the target does not have branch divergence, we do not mark
325 // any branch as divergent.
326 if (!TTI.hasBranchDivergence())
327 return false;
329 DivergentValues.clear();
330 DivergentUses.clear();
331 gpuDA = nullptr;
333 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
334 auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
336 if (shouldUseGPUDivergenceAnalysis(F)) {
337 // run the new GPU divergence analysis
338 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
339 gpuDA = std::make_unique<GPUDivergenceAnalysis>(F, DT, PDT, LI, TTI);
341 } else {
342 // run LLVM's existing DivergenceAnalysis
343 DivergencePropagator DP(F, TTI, DT, PDT, DivergentValues, DivergentUses);
344 DP.populateWithSourcesOfDivergence();
345 DP.propagate();
348 LLVM_DEBUG(dbgs() << "\nAfter divergence analysis on " << F.getName()
349 << ":\n";
350 print(dbgs(), F.getParent()));
352 return false;
355 bool LegacyDivergenceAnalysis::isDivergent(const Value *V) const {
356 if (gpuDA) {
357 return gpuDA->isDivergent(*V);
359 return DivergentValues.count(V);
362 bool LegacyDivergenceAnalysis::isDivergentUse(const Use *U) const {
363 if (gpuDA) {
364 return gpuDA->isDivergentUse(*U);
366 return DivergentValues.count(U->get()) || DivergentUses.count(U);
369 void LegacyDivergenceAnalysis::print(raw_ostream &OS, const Module *) const {
370 if ((!gpuDA || !gpuDA->hasDivergence()) && DivergentValues.empty())
371 return;
373 const Function *F = nullptr;
374 if (!DivergentValues.empty()) {
375 const Value *FirstDivergentValue = *DivergentValues.begin();
376 if (const Argument *Arg = dyn_cast<Argument>(FirstDivergentValue)) {
377 F = Arg->getParent();
378 } else if (const Instruction *I =
379 dyn_cast<Instruction>(FirstDivergentValue)) {
380 F = I->getParent()->getParent();
381 } else {
382 llvm_unreachable("Only arguments and instructions can be divergent");
384 } else if (gpuDA) {
385 F = &gpuDA->getFunction();
387 if (!F)
388 return;
390 // Dumps all divergent values in F, arguments and then instructions.
391 for (auto &Arg : F->args()) {
392 OS << (isDivergent(&Arg) ? "DIVERGENT: " : " ");
393 OS << Arg << "\n";
395 // Iterate instructions using instructions() to ensure a deterministic order.
396 for (auto BI = F->begin(), BE = F->end(); BI != BE; ++BI) {
397 auto &BB = *BI;
398 OS << "\n " << BB.getName() << ":\n";
399 for (auto &I : BB.instructionsWithoutDebug()) {
400 OS << (isDivergent(&I) ? "DIVERGENT: " : " ");
401 OS << I << "\n";
404 OS << "\n";