Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / CodeGen / CallBrPrepare.cpp
blobdb243a0bfebe1a757ad328fcaf9f932525238335
1 //===-- CallBrPrepare - Prepare callbr for code generation ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers callbrs in LLVM IR in order to to assist SelectionDAG's
10 // codegen.
12 // In particular, this pass assists in inserting register copies for the output
13 // values of a callbr along the edges leading to the indirect target blocks.
14 // Though the output SSA value is defined by the callbr instruction itself in
15 // the IR representation, the value cannot be copied to the appropriate virtual
16 // registers prior to jumping to an indirect label, since the jump occurs
17 // within the user-provided assembly blob.
19 // Instead, those copies must occur separately at the beginning of each
20 // indirect target. That requires that we create a separate SSA definition in
21 // each of them (via llvm.callbr.landingpad), and may require splitting
22 // critical edges so we have a location to place the intrinsic. Finally, we
23 // remap users of the original callbr output SSA value to instead point to the
24 // appropriate llvm.callbr.landingpad value.
26 // Ideally, this could be done inside SelectionDAG, or in the
27 // MachineInstruction representation, without the use of an IR-level intrinsic.
28 // But, within the current framework, it’s simpler to implement as an IR pass.
29 // (If support for callbr in GlobalISel is implemented, it’s worth considering
30 // whether this is still required.)
32 //===----------------------------------------------------------------------===//
34 #include "llvm/ADT/ArrayRef.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallVector.h"
37 #include "llvm/ADT/iterator.h"
38 #include "llvm/Analysis/CFG.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/InitializePasses.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/SSAUpdater.h"
52 using namespace llvm;
54 #define DEBUG_TYPE "callbrprepare"
56 namespace {
58 class CallBrPrepare : public FunctionPass {
59 bool SplitCriticalEdges(ArrayRef<CallBrInst *> CBRs, DominatorTree &DT);
60 bool InsertIntrinsicCalls(ArrayRef<CallBrInst *> CBRs,
61 DominatorTree &DT) const;
62 void UpdateSSA(DominatorTree &DT, CallBrInst *CBR, CallInst *Intrinsic,
63 SSAUpdater &SSAUpdate) const;
65 public:
66 CallBrPrepare() : FunctionPass(ID) {}
67 void getAnalysisUsage(AnalysisUsage &AU) const override;
68 bool runOnFunction(Function &Fn) override;
69 static char ID;
72 } // end anonymous namespace
74 char CallBrPrepare::ID = 0;
75 INITIALIZE_PASS_BEGIN(CallBrPrepare, DEBUG_TYPE, "Prepare callbr", false, false)
76 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
77 INITIALIZE_PASS_END(CallBrPrepare, DEBUG_TYPE, "Prepare callbr", false, false)
79 FunctionPass *llvm::createCallBrPass() { return new CallBrPrepare(); }
81 void CallBrPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
82 AU.addPreserved<DominatorTreeWrapperPass>();
85 static SmallVector<CallBrInst *, 2> FindCallBrs(Function &Fn) {
86 SmallVector<CallBrInst *, 2> CBRs;
87 for (BasicBlock &BB : Fn)
88 if (auto *CBR = dyn_cast<CallBrInst>(BB.getTerminator()))
89 if (!CBR->getType()->isVoidTy() && !CBR->use_empty())
90 CBRs.push_back(CBR);
91 return CBRs;
94 bool CallBrPrepare::SplitCriticalEdges(ArrayRef<CallBrInst *> CBRs,
95 DominatorTree &DT) {
96 bool Changed = false;
97 CriticalEdgeSplittingOptions Options(&DT);
98 Options.setMergeIdenticalEdges();
100 // The indirect destination might be duplicated between another parameter...
101 // %0 = callbr ... [label %x, label %x]
102 // ...hence MergeIdenticalEdges and AllowIndentical edges, but we don't need
103 // to split the default destination if it's duplicated between an indirect
104 // destination...
105 // %1 = callbr ... to label %x [label %x]
106 // ...hence starting at 1 and checking against successor 0 (aka the default
107 // destination).
108 for (CallBrInst *CBR : CBRs)
109 for (unsigned i = 1, e = CBR->getNumSuccessors(); i != e; ++i)
110 if (CBR->getSuccessor(i) == CBR->getSuccessor(0) ||
111 isCriticalEdge(CBR, i, /*AllowIdenticalEdges*/ true))
112 if (SplitKnownCriticalEdge(CBR, i, Options))
113 Changed = true;
114 return Changed;
117 bool CallBrPrepare::InsertIntrinsicCalls(ArrayRef<CallBrInst *> CBRs,
118 DominatorTree &DT) const {
119 bool Changed = false;
120 SmallPtrSet<const BasicBlock *, 4> Visited;
121 IRBuilder<> Builder(CBRs[0]->getContext());
122 for (CallBrInst *CBR : CBRs) {
123 if (!CBR->getNumIndirectDests())
124 continue;
126 SSAUpdater SSAUpdate;
127 SSAUpdate.Initialize(CBR->getType(), CBR->getName());
128 SSAUpdate.AddAvailableValue(CBR->getParent(), CBR);
129 SSAUpdate.AddAvailableValue(CBR->getDefaultDest(), CBR);
131 for (BasicBlock *IndDest : CBR->getIndirectDests()) {
132 if (!Visited.insert(IndDest).second)
133 continue;
134 Builder.SetInsertPoint(&*IndDest->begin());
135 CallInst *Intrinsic = Builder.CreateIntrinsic(
136 CBR->getType(), Intrinsic::callbr_landingpad, {CBR});
137 SSAUpdate.AddAvailableValue(IndDest, Intrinsic);
138 UpdateSSA(DT, CBR, Intrinsic, SSAUpdate);
139 Changed = true;
142 return Changed;
145 static bool IsInSameBasicBlock(const Use &U, const BasicBlock *BB) {
146 const auto *I = dyn_cast<Instruction>(U.getUser());
147 return I && I->getParent() == BB;
150 #ifndef NDEBUG
151 static void PrintDebugDomInfo(const DominatorTree &DT, const Use &U,
152 const BasicBlock *BB, bool IsDefaultDest) {
153 if (!isa<Instruction>(U.getUser()))
154 return;
155 LLVM_DEBUG(dbgs() << "Use: " << *U.getUser() << ", in block "
156 << cast<Instruction>(U.getUser())->getParent()->getName()
157 << ", is " << (DT.dominates(BB, U) ? "" : "NOT ")
158 << "dominated by " << BB->getName() << " ("
159 << (IsDefaultDest ? "in" : "") << "direct)\n");
161 #endif
163 void CallBrPrepare::UpdateSSA(DominatorTree &DT, CallBrInst *CBR,
164 CallInst *Intrinsic,
165 SSAUpdater &SSAUpdate) const {
167 SmallPtrSet<Use *, 4> Visited;
168 BasicBlock *DefaultDest = CBR->getDefaultDest();
169 BasicBlock *LandingPad = Intrinsic->getParent();
171 SmallVector<Use *, 4> Uses(make_pointer_range(CBR->uses()));
172 for (Use *U : Uses) {
173 if (!Visited.insert(U).second)
174 continue;
176 #ifndef NDEBUG
177 PrintDebugDomInfo(DT, *U, LandingPad, /*IsDefaultDest*/ false);
178 PrintDebugDomInfo(DT, *U, DefaultDest, /*IsDefaultDest*/ true);
179 #endif
181 // Don't rewrite the use in the newly inserted intrinsic.
182 if (const auto *II = dyn_cast<IntrinsicInst>(U->getUser()))
183 if (II->getIntrinsicID() == Intrinsic::callbr_landingpad)
184 continue;
186 // If the Use is in the same BasicBlock as the Intrinsic call, replace
187 // the Use with the value of the Intrinsic call.
188 if (IsInSameBasicBlock(*U, LandingPad)) {
189 U->set(Intrinsic);
190 continue;
193 // If the Use is dominated by the default dest, do not touch it.
194 if (DT.dominates(DefaultDest, *U))
195 continue;
197 SSAUpdate.RewriteUse(*U);
201 bool CallBrPrepare::runOnFunction(Function &Fn) {
202 bool Changed = false;
203 SmallVector<CallBrInst *, 2> CBRs = FindCallBrs(Fn);
205 if (CBRs.empty())
206 return Changed;
208 // It's highly likely that most programs do not contain CallBrInsts. Follow a
209 // similar pattern from SafeStackLegacyPass::runOnFunction to reuse previous
210 // domtree analysis if available, otherwise compute it lazily. This avoids
211 // forcing Dominator Tree Construction at -O0 for programs that likely do not
212 // contain CallBrInsts. It does pessimize programs with callbr at higher
213 // optimization levels, as the DominatorTree created here is not reused by
214 // subsequent passes.
215 DominatorTree *DT;
216 std::optional<DominatorTree> LazilyComputedDomTree;
217 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
218 DT = &DTWP->getDomTree();
219 else {
220 LazilyComputedDomTree.emplace(Fn);
221 DT = &*LazilyComputedDomTree;
224 if (SplitCriticalEdges(CBRs, *DT))
225 Changed = true;
227 if (InsertIntrinsicCalls(CBRs, *DT))
228 Changed = true;
230 return Changed;