[RISCV] Match vcompress during shuffle lowering (#117748)
[llvm-project.git] / llvm / lib / Target / WebAssembly / WebAssemblyCFGSort.cpp
blob6525c6d93bee84210fa83c42cd0121d4e5825398
1 //===-- WebAssemblyCFGSort.cpp - CFG Sorting ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements a CFG sorting pass.
11 ///
12 /// This pass reorders the blocks in a function to put them into topological
13 /// order, ignoring loop backedges, and without any loop or exception being
14 /// interrupted by a block not dominated by the its header, with special care
15 /// to keep the order as similar as possible to the original order.
16 ///
17 ////===----------------------------------------------------------------------===//
19 #include "WebAssembly.h"
20 #include "WebAssemblyExceptionInfo.h"
21 #include "WebAssemblySortRegion.h"
22 #include "WebAssemblyUtilities.h"
23 #include "llvm/ADT/PriorityQueue.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/WasmEHFuncInfo.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 using namespace llvm;
34 using WebAssembly::SortRegion;
35 using WebAssembly::SortRegionInfo;
37 #define DEBUG_TYPE "wasm-cfg-sort"
39 // Option to disable EH pad first sorting. Only for testing unwind destination
40 // mismatches in CFGStackify.
41 static cl::opt<bool> WasmDisableEHPadSort(
42 "wasm-disable-ehpad-sort", cl::ReallyHidden,
43 cl::desc(
44 "WebAssembly: Disable EH pad-first sort order. Testing purpose only."),
45 cl::init(false));
47 namespace {
49 class WebAssemblyCFGSort final : public MachineFunctionPass {
50 StringRef getPassName() const override { return "WebAssembly CFG Sort"; }
52 void getAnalysisUsage(AnalysisUsage &AU) const override {
53 AU.setPreservesCFG();
54 AU.addRequired<MachineDominatorTreeWrapperPass>();
55 AU.addPreserved<MachineDominatorTreeWrapperPass>();
56 AU.addRequired<MachineLoopInfoWrapperPass>();
57 AU.addPreserved<MachineLoopInfoWrapperPass>();
58 AU.addRequired<WebAssemblyExceptionInfo>();
59 AU.addPreserved<WebAssemblyExceptionInfo>();
60 MachineFunctionPass::getAnalysisUsage(AU);
63 bool runOnMachineFunction(MachineFunction &MF) override;
65 public:
66 static char ID; // Pass identification, replacement for typeid
67 WebAssemblyCFGSort() : MachineFunctionPass(ID) {}
69 } // end anonymous namespace
71 char WebAssemblyCFGSort::ID = 0;
72 INITIALIZE_PASS(WebAssemblyCFGSort, DEBUG_TYPE,
73 "Reorders blocks in topological order", false, false)
75 FunctionPass *llvm::createWebAssemblyCFGSort() {
76 return new WebAssemblyCFGSort();
79 static void maybeUpdateTerminator(MachineBasicBlock *MBB) {
80 #ifndef NDEBUG
81 bool AnyBarrier = false;
82 #endif
83 bool AllAnalyzable = true;
84 for (const MachineInstr &Term : MBB->terminators()) {
85 #ifndef NDEBUG
86 AnyBarrier |= Term.isBarrier();
87 #endif
88 AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
90 assert((AnyBarrier || AllAnalyzable) &&
91 "analyzeBranch needs to analyze any block with a fallthrough");
93 // Find the layout successor from the original block order.
94 MachineFunction *MF = MBB->getParent();
95 MachineBasicBlock *OriginalSuccessor =
96 unsigned(MBB->getNumber() + 1) < MF->getNumBlockIDs()
97 ? MF->getBlockNumbered(MBB->getNumber() + 1)
98 : nullptr;
100 if (AllAnalyzable)
101 MBB->updateTerminator(OriginalSuccessor);
104 namespace {
105 // EH pads are selected first regardless of the block comparison order.
106 // When only one of the BBs is an EH pad, we give a higher priority to it, to
107 // prevent common mismatches between possibly throwing calls and ehpads they
108 // unwind to, as in the example below:
110 // bb0:
111 // call @foo // If this throws, unwind to bb2
112 // bb1:
113 // call @bar // If this throws, unwind to bb3
114 // bb2 (ehpad):
115 // handler_bb2
116 // bb3 (ehpad):
117 // handler_bb3
118 // continuing code
120 // Because this pass tries to preserve the original BB order, this order will
121 // not change. But this will result in this try-catch structure in CFGStackify,
122 // resulting in a mismatch:
123 // try
124 // try
125 // call @foo
126 // call @bar // This should unwind to bb3, not bb2!
127 // catch
128 // handler_bb2
129 // end
130 // catch
131 // handler_bb3
132 // end
133 // continuing code
135 // If we give a higher priority to an EH pad whenever it is ready in this
136 // example, when both bb1 and bb2 are ready, we would pick up bb2 first.
138 /// Sort blocks by their number.
139 struct CompareBlockNumbers {
140 bool operator()(const MachineBasicBlock *A,
141 const MachineBasicBlock *B) const {
142 if (!WasmDisableEHPadSort) {
143 if (A->isEHPad() && !B->isEHPad())
144 return false;
145 if (!A->isEHPad() && B->isEHPad())
146 return true;
149 return A->getNumber() > B->getNumber();
152 /// Sort blocks by their number in the opposite order..
153 struct CompareBlockNumbersBackwards {
154 bool operator()(const MachineBasicBlock *A,
155 const MachineBasicBlock *B) const {
156 if (!WasmDisableEHPadSort) {
157 if (A->isEHPad() && !B->isEHPad())
158 return false;
159 if (!A->isEHPad() && B->isEHPad())
160 return true;
163 return A->getNumber() < B->getNumber();
166 /// Bookkeeping for a region to help ensure that we don't mix blocks not
167 /// dominated by the its header among its blocks.
168 struct Entry {
169 const SortRegion *TheRegion;
170 unsigned NumBlocksLeft;
172 /// List of blocks not dominated by Loop's header that are deferred until
173 /// after all of Loop's blocks have been seen.
174 std::vector<MachineBasicBlock *> Deferred;
176 explicit Entry(const SortRegion *R)
177 : TheRegion(R), NumBlocksLeft(R->getNumBlocks()) {}
179 } // end anonymous namespace
181 /// Sort the blocks, taking special care to make sure that regions are not
182 /// interrupted by blocks not dominated by their header.
183 /// TODO: There are many opportunities for improving the heuristics here.
184 /// Explore them.
185 static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
186 const WebAssemblyExceptionInfo &WEI,
187 MachineDominatorTree &MDT) {
188 // Remember original layout ordering, so we can update terminators after
189 // reordering to point to the original layout successor.
190 MF.RenumberBlocks();
191 MDT.updateBlockNumbers();
193 // Prepare for a topological sort: Record the number of predecessors each
194 // block has, ignoring loop backedges.
195 SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
196 for (MachineBasicBlock &MBB : MF) {
197 unsigned N = MBB.pred_size();
198 if (MachineLoop *L = MLI.getLoopFor(&MBB))
199 if (L->getHeader() == &MBB)
200 for (const MachineBasicBlock *Pred : MBB.predecessors())
201 if (L->contains(Pred))
202 --N;
203 NumPredsLeft[MBB.getNumber()] = N;
206 // Topological sort the CFG, with additional constraints:
207 // - Between a region header and the last block in the region, there can be
208 // no blocks not dominated by its header.
209 // - It's desirable to preserve the original block order when possible.
210 // We use two ready lists; Preferred and Ready. Preferred has recently
211 // processed successors, to help preserve block sequences from the original
212 // order. Ready has the remaining ready blocks. EH blocks are picked first
213 // from both queues.
214 PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
215 CompareBlockNumbers>
216 Preferred;
217 PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
218 CompareBlockNumbersBackwards>
219 Ready;
221 const auto *EHInfo = MF.getWasmEHFuncInfo();
222 SortRegionInfo SRI(MLI, WEI);
223 SmallVector<Entry, 4> Entries;
224 for (MachineBasicBlock *MBB = &MF.front();;) {
225 const SortRegion *R = SRI.getRegionFor(MBB);
226 if (R) {
227 // If MBB is a region header, add it to the active region list. We can't
228 // put any blocks that it doesn't dominate until we see the end of the
229 // region.
230 if (R->getHeader() == MBB)
231 Entries.push_back(Entry(R));
232 // For each active region the block is in, decrement the count. If MBB is
233 // the last block in an active region, take it off the list and pick up
234 // any blocks deferred because the header didn't dominate them.
235 for (Entry &E : Entries)
236 if (E.TheRegion->contains(MBB) && --E.NumBlocksLeft == 0)
237 for (auto *DeferredBlock : E.Deferred)
238 Ready.push(DeferredBlock);
239 while (!Entries.empty() && Entries.back().NumBlocksLeft == 0)
240 Entries.pop_back();
242 // The main topological sort logic.
243 for (MachineBasicBlock *Succ : MBB->successors()) {
244 // Ignore backedges.
245 if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
246 if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
247 continue;
248 // Decrement the predecessor count. If it's now zero, it's ready.
249 if (--NumPredsLeft[Succ->getNumber()] == 0) {
250 // When we are in a SortRegion, we allow sorting of not only BBs that
251 // belong to the current (innermost) region but also BBs that are
252 // dominated by the current region header. But we should not do this for
253 // exceptions because there can be cases in which, for example:
254 // EHPad A's unwind destination (where the exception lands when it is
255 // not caught by EHPad A) is EHPad B, so EHPad B does not belong to the
256 // exception dominated by EHPad A. But EHPad B is dominated by EHPad A,
257 // so EHPad B can be sorted within EHPad A's exception. This is
258 // incorrect because we may end up delegating/rethrowing to an inner
259 // scope in CFGStackify. So here we make sure those unwind destinations
260 // are deferred until their unwind source's exception is sorted.
261 if (EHInfo && EHInfo->hasUnwindSrcs(Succ)) {
262 SmallPtrSet<MachineBasicBlock *, 4> UnwindSrcs =
263 EHInfo->getUnwindSrcs(Succ);
264 bool IsDeferred = false;
265 for (Entry &E : Entries) {
266 if (UnwindSrcs.count(E.TheRegion->getHeader())) {
267 E.Deferred.push_back(Succ);
268 IsDeferred = true;
269 break;
272 if (IsDeferred)
273 continue;
275 Preferred.push(Succ);
278 // Determine the block to follow MBB. First try to find a preferred block,
279 // to preserve the original block order when possible.
280 MachineBasicBlock *Next = nullptr;
281 while (!Preferred.empty()) {
282 Next = Preferred.top();
283 Preferred.pop();
284 // If X isn't dominated by the top active region header, defer it until
285 // that region is done.
286 if (!Entries.empty() &&
287 !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
288 Entries.back().Deferred.push_back(Next);
289 Next = nullptr;
290 continue;
292 // If Next was originally ordered before MBB, and it isn't because it was
293 // loop-rotated above the header, it's not preferred.
294 if (Next->getNumber() < MBB->getNumber() &&
295 (WasmDisableEHPadSort || !Next->isEHPad()) &&
296 (!R || !R->contains(Next) ||
297 R->getHeader()->getNumber() < Next->getNumber())) {
298 Ready.push(Next);
299 Next = nullptr;
300 continue;
302 break;
304 // If we didn't find a suitable block in the Preferred list, check the
305 // general Ready list.
306 if (!Next) {
307 // If there are no more blocks to process, we're done.
308 if (Ready.empty()) {
309 maybeUpdateTerminator(MBB);
310 break;
312 for (;;) {
313 Next = Ready.top();
314 Ready.pop();
315 // If Next isn't dominated by the top active region header, defer it
316 // until that region is done.
317 if (!Entries.empty() &&
318 !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
319 Entries.back().Deferred.push_back(Next);
320 continue;
322 break;
325 // Move the next block into place and iterate.
326 Next->moveAfter(MBB);
327 maybeUpdateTerminator(MBB);
328 MBB = Next;
330 assert(Entries.empty() && "Active sort region list not finished");
331 MF.RenumberBlocks();
332 MDT.updateBlockNumbers();
334 #ifndef NDEBUG
335 SmallSetVector<const SortRegion *, 8> OnStack;
337 // Insert a sentinel representing the degenerate loop that starts at the
338 // function entry block and includes the entire function as a "loop" that
339 // executes once.
340 OnStack.insert(nullptr);
342 for (auto &MBB : MF) {
343 assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
344 const SortRegion *Region = SRI.getRegionFor(&MBB);
346 if (Region && &MBB == Region->getHeader()) {
347 // Region header.
348 if (Region->isLoop()) {
349 // Loop header. The loop predecessor should be sorted above, and the
350 // other predecessors should be backedges below.
351 for (auto *Pred : MBB.predecessors())
352 assert(
353 (Pred->getNumber() < MBB.getNumber() || Region->contains(Pred)) &&
354 "Loop header predecessors must be loop predecessors or "
355 "backedges");
356 } else {
357 // Exception header. All predecessors should be sorted above.
358 for (auto *Pred : MBB.predecessors())
359 assert(Pred->getNumber() < MBB.getNumber() &&
360 "Non-loop-header predecessors should be topologically sorted");
362 assert(OnStack.insert(Region) &&
363 "Regions should be declared at most once.");
365 } else {
366 // Not a region header. All predecessors should be sorted above.
367 for (auto *Pred : MBB.predecessors())
368 assert(Pred->getNumber() < MBB.getNumber() &&
369 "Non-loop-header predecessors should be topologically sorted");
370 assert(OnStack.count(SRI.getRegionFor(&MBB)) &&
371 "Blocks must be nested in their regions");
373 while (OnStack.size() > 1 && &MBB == SRI.getBottom(OnStack.back()))
374 OnStack.pop_back();
376 assert(OnStack.pop_back_val() == nullptr &&
377 "The function entry block shouldn't actually be a region header");
378 assert(OnStack.empty() &&
379 "Control flow stack pushes and pops should be balanced.");
380 #endif
383 bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
384 LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n"
385 "********** Function: "
386 << MF.getName() << '\n');
388 const auto &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
389 const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
390 auto &MDT = getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
391 // Liveness is not tracked for VALUE_STACK physreg.
392 MF.getRegInfo().invalidateLiveness();
394 // Sort the blocks, with contiguous sort regions.
395 sortBlocks(MF, MLI, WEI, MDT);
397 return true;