[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / CodeGen / MachineSink.cpp
blobe7e8f6026834800e2d53dedbeb5d8e2cf7bdad0e
1 //===- MachineSink.cpp - Sinking for machine instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass moves instructions into successor blocks when possible, so that
10 // they aren't executed on paths where their results aren't needed.
12 // This pass is not intended to be a replacement or a complete alternative
13 // for an LLVM-IR-level sinking pass. It is only designed to sink simple
14 // constructs that are not exposed before lowering and instruction selection.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFG.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
31 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
32 #include "llvm/CodeGen/MachineCycleAnalysis.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineInstr.h"
37 #include "llvm/CodeGen/MachineLoopInfo.h"
38 #include "llvm/CodeGen/MachineOperand.h"
39 #include "llvm/CodeGen/MachinePostDominators.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/RegisterClassInfo.h"
42 #include "llvm/CodeGen/RegisterPressure.h"
43 #include "llvm/CodeGen/TargetInstrInfo.h"
44 #include "llvm/CodeGen/TargetPassConfig.h"
45 #include "llvm/CodeGen/TargetRegisterInfo.h"
46 #include "llvm/CodeGen/TargetSubtargetInfo.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/MC/MCRegisterInfo.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/BranchProbability.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstdint>
60 #include <utility>
61 #include <vector>
63 using namespace llvm;
65 #define DEBUG_TYPE "machine-sink"
67 static cl::opt<bool>
68 SplitEdges("machine-sink-split",
69 cl::desc("Split critical edges during machine sinking"),
70 cl::init(true), cl::Hidden);
72 static cl::opt<bool>
73 UseBlockFreqInfo("machine-sink-bfi",
74 cl::desc("Use block frequency info to find successors to sink"),
75 cl::init(true), cl::Hidden);
77 static cl::opt<unsigned> SplitEdgeProbabilityThreshold(
78 "machine-sink-split-probability-threshold",
79 cl::desc(
80 "Percentage threshold for splitting single-instruction critical edge. "
81 "If the branch threshold is higher than this threshold, we allow "
82 "speculative execution of up to 1 instruction to avoid branching to "
83 "splitted critical edge"),
84 cl::init(40), cl::Hidden);
86 static cl::opt<unsigned> SinkLoadInstsPerBlockThreshold(
87 "machine-sink-load-instrs-threshold",
88 cl::desc("Do not try to find alias store for a load if there is a in-path "
89 "block whose instruction number is higher than this threshold."),
90 cl::init(2000), cl::Hidden);
92 static cl::opt<unsigned> SinkLoadBlocksThreshold(
93 "machine-sink-load-blocks-threshold",
94 cl::desc("Do not try to find alias store for a load if the block number in "
95 "the straight line is higher than this threshold."),
96 cl::init(20), cl::Hidden);
98 static cl::opt<bool>
99 SinkInstsIntoCycle("sink-insts-to-avoid-spills",
100 cl::desc("Sink instructions into cycles to avoid "
101 "register spills"),
102 cl::init(false), cl::Hidden);
104 static cl::opt<unsigned> SinkIntoCycleLimit(
105 "machine-sink-cycle-limit",
106 cl::desc("The maximum number of instructions considered for cycle sinking."),
107 cl::init(50), cl::Hidden);
109 STATISTIC(NumSunk, "Number of machine instructions sunk");
110 STATISTIC(NumCycleSunk, "Number of machine instructions sunk into a cycle");
111 STATISTIC(NumSplit, "Number of critical edges split");
112 STATISTIC(NumCoalesces, "Number of copies coalesced");
113 STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");
115 namespace {
117 class MachineSinking : public MachineFunctionPass {
118 const TargetSubtargetInfo *STI = nullptr;
119 const TargetInstrInfo *TII = nullptr;
120 const TargetRegisterInfo *TRI = nullptr;
121 MachineRegisterInfo *MRI = nullptr; // Machine register information
122 MachineDominatorTree *DT = nullptr; // Machine dominator tree
123 MachinePostDominatorTree *PDT = nullptr; // Machine post dominator tree
124 MachineCycleInfo *CI = nullptr;
125 MachineBlockFrequencyInfo *MBFI = nullptr;
126 const MachineBranchProbabilityInfo *MBPI = nullptr;
127 AliasAnalysis *AA = nullptr;
128 RegisterClassInfo RegClassInfo;
130 // Remember which edges have been considered for breaking.
131 SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8>
132 CEBCandidates;
133 // Remember which edges we are about to split.
134 // This is different from CEBCandidates since those edges
135 // will be split.
136 SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit;
138 DenseSet<Register> RegsToClearKillFlags;
140 using AllSuccsCache =
141 DenseMap<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>;
143 /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is
144 /// post-dominated by another DBG_VALUE of the same variable location.
145 /// This is necessary to detect sequences such as:
146 /// %0 = someinst
147 /// DBG_VALUE %0, !123, !DIExpression()
148 /// %1 = anotherinst
149 /// DBG_VALUE %1, !123, !DIExpression()
150 /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that
151 /// would re-order assignments.
152 using SeenDbgUser = PointerIntPair<MachineInstr *, 1>;
154 /// Record of DBG_VALUE uses of vregs in a block, so that we can identify
155 /// debug instructions to sink.
156 SmallDenseMap<unsigned, TinyPtrVector<SeenDbgUser>> SeenDbgUsers;
158 /// Record of debug variables that have had their locations set in the
159 /// current block.
160 DenseSet<DebugVariable> SeenDbgVars;
162 DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool>
163 HasStoreCache;
165 DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>,
166 SmallVector<MachineInstr *>>
167 StoreInstrCache;
169 /// Cached BB's register pressure.
170 DenseMap<const MachineBasicBlock *, std::vector<unsigned>>
171 CachedRegisterPressure;
173 bool EnableSinkAndFold;
175 public:
176 static char ID; // Pass identification
178 MachineSinking() : MachineFunctionPass(ID) {
179 initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
182 bool runOnMachineFunction(MachineFunction &MF) override;
184 void getAnalysisUsage(AnalysisUsage &AU) const override {
185 MachineFunctionPass::getAnalysisUsage(AU);
186 AU.addRequired<AAResultsWrapperPass>();
187 AU.addRequired<MachineDominatorTree>();
188 AU.addRequired<MachinePostDominatorTree>();
189 AU.addRequired<MachineCycleInfoWrapperPass>();
190 AU.addRequired<MachineBranchProbabilityInfo>();
191 AU.addPreserved<MachineCycleInfoWrapperPass>();
192 AU.addPreserved<MachineLoopInfo>();
193 if (UseBlockFreqInfo)
194 AU.addRequired<MachineBlockFrequencyInfo>();
195 AU.addRequired<TargetPassConfig>();
198 void releaseMemory() override {
199 CEBCandidates.clear();
202 private:
203 bool ProcessBlock(MachineBasicBlock &MBB);
204 void ProcessDbgInst(MachineInstr &MI);
205 bool isWorthBreakingCriticalEdge(MachineInstr &MI,
206 MachineBasicBlock *From,
207 MachineBasicBlock *To);
209 bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To,
210 MachineInstr &MI);
212 /// Postpone the splitting of the given critical
213 /// edge (\p From, \p To).
215 /// We do not split the edges on the fly. Indeed, this invalidates
216 /// the dominance information and thus triggers a lot of updates
217 /// of that information underneath.
218 /// Instead, we postpone all the splits after each iteration of
219 /// the main loop. That way, the information is at least valid
220 /// for the lifetime of an iteration.
222 /// \return True if the edge is marked as toSplit, false otherwise.
223 /// False can be returned if, for instance, this is not profitable.
224 bool PostponeSplitCriticalEdge(MachineInstr &MI,
225 MachineBasicBlock *From,
226 MachineBasicBlock *To,
227 bool BreakPHIEdge);
228 bool SinkInstruction(MachineInstr &MI, bool &SawStore,
229 AllSuccsCache &AllSuccessors);
231 /// If we sink a COPY inst, some debug users of it's destination may no
232 /// longer be dominated by the COPY, and will eventually be dropped.
233 /// This is easily rectified by forwarding the non-dominated debug uses
234 /// to the copy source.
235 void SalvageUnsunkDebugUsersOfCopy(MachineInstr &,
236 MachineBasicBlock *TargetBlock);
237 bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB,
238 MachineBasicBlock *DefMBB, bool &BreakPHIEdge,
239 bool &LocalUse) const;
240 MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
241 bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
243 void FindCycleSinkCandidates(MachineCycle *Cycle, MachineBasicBlock *BB,
244 SmallVectorImpl<MachineInstr *> &Candidates);
245 bool SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I);
247 bool isProfitableToSinkTo(Register Reg, MachineInstr &MI,
248 MachineBasicBlock *MBB,
249 MachineBasicBlock *SuccToSinkTo,
250 AllSuccsCache &AllSuccessors);
252 bool PerformTrivialForwardCoalescing(MachineInstr &MI,
253 MachineBasicBlock *MBB);
255 bool PerformSinkAndFold(MachineInstr &MI, MachineBasicBlock *MBB);
257 SmallVector<MachineBasicBlock *, 4> &
258 GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
259 AllSuccsCache &AllSuccessors) const;
261 std::vector<unsigned> &getBBRegisterPressure(const MachineBasicBlock &MBB);
263 bool registerPressureSetExceedsLimit(unsigned NRegs,
264 const TargetRegisterClass *RC,
265 const MachineBasicBlock &MBB);
268 } // end anonymous namespace
270 char MachineSinking::ID = 0;
272 char &llvm::MachineSinkingID = MachineSinking::ID;
274 INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE,
275 "Machine code sinking", false, false)
276 INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
277 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
278 INITIALIZE_PASS_DEPENDENCY(MachineCycleInfoWrapperPass)
279 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
280 INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE,
281 "Machine code sinking", false, false)
283 /// Return true if a target defined block prologue instruction interferes
284 /// with a sink candidate.
285 static bool blockPrologueInterferes(const MachineBasicBlock *BB,
286 MachineBasicBlock::const_iterator End,
287 const MachineInstr &MI,
288 const TargetRegisterInfo *TRI,
289 const TargetInstrInfo *TII,
290 const MachineRegisterInfo *MRI) {
291 for (MachineBasicBlock::const_iterator PI = BB->getFirstNonPHI(); PI != End;
292 ++PI) {
293 // Only check target defined prologue instructions
294 if (!TII->isBasicBlockPrologue(*PI))
295 continue;
296 for (auto &MO : MI.operands()) {
297 if (!MO.isReg())
298 continue;
299 Register Reg = MO.getReg();
300 if (!Reg)
301 continue;
302 if (MO.isUse()) {
303 if (Reg.isPhysical() &&
304 (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(Reg))))
305 continue;
306 if (PI->modifiesRegister(Reg, TRI))
307 return true;
308 } else {
309 if (PI->readsRegister(Reg, TRI))
310 return true;
311 // Check for interference with non-dead defs
312 auto *DefOp = PI->findRegisterDefOperand(Reg, false, true, TRI);
313 if (DefOp && !DefOp->isDead())
314 return true;
319 return false;
322 bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
323 MachineBasicBlock *MBB) {
324 if (!MI.isCopy())
325 return false;
327 Register SrcReg = MI.getOperand(1).getReg();
328 Register DstReg = MI.getOperand(0).getReg();
329 if (!SrcReg.isVirtual() || !DstReg.isVirtual() ||
330 !MRI->hasOneNonDBGUse(SrcReg))
331 return false;
333 const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
334 const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
335 if (SRC != DRC)
336 return false;
338 MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
339 if (DefMI->isCopyLike())
340 return false;
341 LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
342 LLVM_DEBUG(dbgs() << "*** to: " << MI);
343 MRI->replaceRegWith(DstReg, SrcReg);
344 MI.eraseFromParent();
346 // Conservatively, clear any kill flags, since it's possible that they are no
347 // longer correct.
348 MRI->clearKillFlags(SrcReg);
350 ++NumCoalesces;
351 return true;
354 bool MachineSinking::PerformSinkAndFold(MachineInstr &MI,
355 MachineBasicBlock *MBB) {
356 if (MI.isCopy() || MI.mayLoadOrStore() ||
357 MI.getOpcode() == TargetOpcode::REG_SEQUENCE)
358 return false;
360 // Don't sink instructions that the target prefers not to sink.
361 if (!TII->shouldSink(MI))
362 return false;
364 // Check if it's safe to move the instruction.
365 bool SawStore = true;
366 if (!MI.isSafeToMove(AA, SawStore))
367 return false;
369 // Convergent operations may not be made control-dependent on additional
370 // values.
371 if (MI.isConvergent())
372 return false;
374 // Don't sink defs/uses of hard registers or if the instruction defines more
375 // than one register.
376 // Don't sink more than two register uses - it'll cover most of the cases and
377 // greatly simplifies the register pressure checks.
378 Register DefReg;
379 Register UsedRegA, UsedRegB;
380 for (const MachineOperand &MO : MI.operands()) {
381 if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||
382 MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||
383 MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())
384 continue;
385 if (!MO.isReg())
386 return false;
388 Register Reg = MO.getReg();
389 if (Reg == 0)
390 continue;
392 if (Reg.isVirtual()) {
393 if (MO.isDef()) {
394 if (DefReg)
395 return false;
396 DefReg = Reg;
397 continue;
400 if (UsedRegA == 0)
401 UsedRegA = Reg;
402 else if (UsedRegB == 0)
403 UsedRegB = Reg;
404 else
405 return false;
406 continue;
409 if (Reg.isPhysical() &&
410 (MRI->isConstantPhysReg(Reg) || TII->isIgnorableUse(MO)))
411 continue;
413 return false;
416 // Scan uses of the destination register. Every use, except the last, must be
417 // a copy, with a chain of copies terminating with either a copy into a hard
418 // register, or a load/store instruction where the use is part of the
419 // address (*not* the stored value).
420 using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;
421 SmallVector<SinkInfo> SinkInto;
422 SmallVector<Register> Worklist;
424 const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
425 const TargetRegisterClass *RCA =
426 UsedRegA == 0 ? nullptr : MRI->getRegClass(UsedRegA);
427 const TargetRegisterClass *RCB =
428 UsedRegB == 0 ? nullptr : MRI->getRegClass(UsedRegB);
430 Worklist.push_back(DefReg);
431 while (!Worklist.empty()) {
432 Register Reg = Worklist.pop_back_val();
434 for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
435 ExtAddrMode MaybeAM;
436 MachineInstr &UseInst = *MO.getParent();
437 if (UseInst.isCopy()) {
438 Register DstReg;
439 if (const MachineOperand &O = UseInst.getOperand(0); O.isReg())
440 DstReg = O.getReg();
441 if (DstReg == 0)
442 return false;
443 if (DstReg.isVirtual()) {
444 Worklist.push_back(DstReg);
445 continue;
447 // If we are going to replace a copy, the original instruction must be
448 // as cheap as a copy.
449 if (!TII->isAsCheapAsAMove(MI))
450 return false;
451 // The hard register must be in the register class of the original
452 // instruction's destination register.
453 if (!RC->contains(DstReg))
454 return false;
455 } else if (UseInst.mayLoadOrStore()) {
456 ExtAddrMode AM;
457 if (!TII->canFoldIntoAddrMode(UseInst, Reg, MI, AM))
458 return false;
459 MaybeAM = AM;
460 } else {
461 return false;
464 if (UseInst.getParent() != MI.getParent()) {
465 // If the register class of the register we are replacing is a superset
466 // of any of the register classes of the operands of the materialized
467 // instruction don't consider that live range extended.
468 const TargetRegisterClass *RCS = MRI->getRegClass(Reg);
469 if (RCA && RCA->hasSuperClassEq(RCS))
470 RCA = nullptr;
471 else if (RCB && RCB->hasSuperClassEq(RCS))
472 RCB = nullptr;
473 if (RCA || RCB) {
474 if (RCA == nullptr) {
475 RCA = RCB;
476 RCB = nullptr;
479 unsigned NRegs = !!RCA + !!RCB;
480 if (RCA == RCB)
481 RCB = nullptr;
483 // Check we don't exceed register pressure at the destination.
484 const MachineBasicBlock &MBB = *UseInst.getParent();
485 if (RCB == nullptr) {
486 if (registerPressureSetExceedsLimit(NRegs, RCA, MBB))
487 return false;
488 } else if (registerPressureSetExceedsLimit(1, RCA, MBB) ||
489 registerPressureSetExceedsLimit(1, RCB, MBB)) {
490 return false;
495 SinkInto.emplace_back(&UseInst, MaybeAM);
499 if (SinkInto.empty())
500 return false;
502 // Now we know we can fold the instruction in all its users.
503 for (auto &[SinkDst, MaybeAM] : SinkInto) {
504 MachineInstr *New = nullptr;
505 LLVM_DEBUG(dbgs() << "Sinking copy of"; MI.dump(); dbgs() << "into";
506 SinkDst->dump());
507 if (SinkDst->isCopy()) {
508 // TODO: After performing the sink-and-fold, the original instruction is
509 // deleted. Its value is still available (in a hard register), so if there
510 // are debug instructions which refer to the (now deleted) virtual
511 // register they could be updated to refer to the hard register, in
512 // principle. However, it's not clear how to do that, moreover in some
513 // cases the debug instructions may need to be replicated proportionally
514 // to the number of the COPY instructions replaced and in some extreme
515 // cases we can end up with quadratic increase in the number of debug
516 // instructions.
518 // Sink a copy of the instruction, replacing a COPY instruction.
519 MachineBasicBlock::iterator InsertPt = SinkDst->getIterator();
520 Register DstReg = SinkDst->getOperand(0).getReg();
521 TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0, MI, *TRI);
522 New = &*std::prev(InsertPt);
523 if (!New->getDebugLoc())
524 New->setDebugLoc(SinkDst->getDebugLoc());
526 // The operand registers of the "sunk" instruction have their live range
527 // extended and their kill flags may no longer be correct. Conservatively
528 // clear the kill flags.
529 if (UsedRegA)
530 MRI->clearKillFlags(UsedRegA);
531 if (UsedRegB)
532 MRI->clearKillFlags(UsedRegB);
533 } else {
534 // Fold instruction into the addressing mode of a memory instruction.
535 New = TII->emitLdStWithAddr(*SinkDst, MaybeAM);
537 // The registers of the addressing mode may have their live range extended
538 // and their kill flags may no longer be correct. Conservatively clear the
539 // kill flags.
540 if (Register R = MaybeAM.BaseReg; R.isValid() && R.isVirtual())
541 MRI->clearKillFlags(R);
542 if (Register R = MaybeAM.ScaledReg; R.isValid() && R.isVirtual())
543 MRI->clearKillFlags(R);
545 LLVM_DEBUG(dbgs() << "yielding"; New->dump());
546 // Clear the StoreInstrCache, since we may invalidate it by erasing.
547 if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())
548 StoreInstrCache.clear();
549 SinkDst->eraseFromParent();
552 // Collect operands that need to be cleaned up because the registers no longer
553 // exist (in COPYs and debug instructions). We cannot delete instructions or
554 // clear operands while traversing register uses.
555 SmallVector<MachineOperand *> Cleanup;
556 Worklist.push_back(DefReg);
557 while (!Worklist.empty()) {
558 Register Reg = Worklist.pop_back_val();
559 for (MachineOperand &MO : MRI->use_operands(Reg)) {
560 MachineInstr *U = MO.getParent();
561 assert((U->isCopy() || U->isDebugInstr()) &&
562 "Only debug uses and copies must remain");
563 if (U->isCopy())
564 Worklist.push_back(U->getOperand(0).getReg());
565 Cleanup.push_back(&MO);
569 // Delete the dead COPYs and clear operands in debug instructions
570 for (MachineOperand *MO : Cleanup) {
571 MachineInstr *I = MO->getParent();
572 if (I->isCopy()) {
573 I->eraseFromParent();
574 } else {
575 MO->setReg(0);
576 MO->setSubReg(0);
580 MI.eraseFromParent();
581 return true;
584 /// AllUsesDominatedByBlock - Return true if all uses of the specified register
585 /// occur in blocks dominated by the specified block. If any use is in the
586 /// definition block, then return false since it is never legal to move def
587 /// after uses.
588 bool MachineSinking::AllUsesDominatedByBlock(Register Reg,
589 MachineBasicBlock *MBB,
590 MachineBasicBlock *DefMBB,
591 bool &BreakPHIEdge,
592 bool &LocalUse) const {
593 assert(Reg.isVirtual() && "Only makes sense for vregs");
595 // Ignore debug uses because debug info doesn't affect the code.
596 if (MRI->use_nodbg_empty(Reg))
597 return true;
599 // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
600 // into and they are all PHI nodes. In this case, machine-sink must break
601 // the critical edge first. e.g.
603 // %bb.1:
604 // Predecessors according to CFG: %bb.0
605 // ...
606 // %def = DEC64_32r %x, implicit-def dead %eflags
607 // ...
608 // JE_4 <%bb.37>, implicit %eflags
609 // Successors according to CFG: %bb.37 %bb.2
611 // %bb.2:
612 // %p = PHI %y, %bb.0, %def, %bb.1
613 if (all_of(MRI->use_nodbg_operands(Reg), [&](MachineOperand &MO) {
614 MachineInstr *UseInst = MO.getParent();
615 unsigned OpNo = MO.getOperandNo();
616 MachineBasicBlock *UseBlock = UseInst->getParent();
617 return UseBlock == MBB && UseInst->isPHI() &&
618 UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
619 })) {
620 BreakPHIEdge = true;
621 return true;
624 for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
625 // Determine the block of the use.
626 MachineInstr *UseInst = MO.getParent();
627 unsigned OpNo = &MO - &UseInst->getOperand(0);
628 MachineBasicBlock *UseBlock = UseInst->getParent();
629 if (UseInst->isPHI()) {
630 // PHI nodes use the operand in the predecessor block, not the block with
631 // the PHI.
632 UseBlock = UseInst->getOperand(OpNo+1).getMBB();
633 } else if (UseBlock == DefMBB) {
634 LocalUse = true;
635 return false;
638 // Check that it dominates.
639 if (!DT->dominates(MBB, UseBlock))
640 return false;
643 return true;
646 /// Return true if this machine instruction loads from global offset table or
647 /// constant pool.
648 static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
649 assert(MI.mayLoad() && "Expected MI that loads!");
651 // If we lost memory operands, conservatively assume that the instruction
652 // reads from everything..
653 if (MI.memoperands_empty())
654 return true;
656 for (MachineMemOperand *MemOp : MI.memoperands())
657 if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
658 if (PSV->isGOT() || PSV->isConstantPool())
659 return true;
661 return false;
664 void MachineSinking::FindCycleSinkCandidates(
665 MachineCycle *Cycle, MachineBasicBlock *BB,
666 SmallVectorImpl<MachineInstr *> &Candidates) {
667 for (auto &MI : *BB) {
668 LLVM_DEBUG(dbgs() << "CycleSink: Analysing candidate: " << MI);
669 if (!TII->shouldSink(MI)) {
670 LLVM_DEBUG(dbgs() << "CycleSink: Instruction not a candidate for this "
671 "target\n");
672 continue;
674 if (!isCycleInvariant(Cycle, MI)) {
675 LLVM_DEBUG(dbgs() << "CycleSink: Instruction is not cycle invariant\n");
676 continue;
678 bool DontMoveAcrossStore = true;
679 if (!MI.isSafeToMove(AA, DontMoveAcrossStore)) {
680 LLVM_DEBUG(dbgs() << "CycleSink: Instruction not safe to move.\n");
681 continue;
683 if (MI.mayLoad() && !mayLoadFromGOTOrConstantPool(MI)) {
684 LLVM_DEBUG(dbgs() << "CycleSink: Dont sink GOT or constant pool loads\n");
685 continue;
687 if (MI.isConvergent())
688 continue;
690 const MachineOperand &MO = MI.getOperand(0);
691 if (!MO.isReg() || !MO.getReg() || !MO.isDef())
692 continue;
693 if (!MRI->hasOneDef(MO.getReg()))
694 continue;
696 LLVM_DEBUG(dbgs() << "CycleSink: Instruction added as candidate.\n");
697 Candidates.push_back(&MI);
701 bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
702 if (skipFunction(MF.getFunction()))
703 return false;
705 LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
707 STI = &MF.getSubtarget();
708 TII = STI->getInstrInfo();
709 TRI = STI->getRegisterInfo();
710 MRI = &MF.getRegInfo();
711 DT = &getAnalysis<MachineDominatorTree>();
712 PDT = &getAnalysis<MachinePostDominatorTree>();
713 CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo();
714 MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
715 MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
716 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
717 RegClassInfo.runOnMachineFunction(MF);
718 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
719 EnableSinkAndFold = PassConfig->getEnableSinkAndFold();
721 bool EverMadeChange = false;
723 while (true) {
724 bool MadeChange = false;
726 // Process all basic blocks.
727 CEBCandidates.clear();
728 ToSplit.clear();
729 for (auto &MBB: MF)
730 MadeChange |= ProcessBlock(MBB);
732 // If we have anything we marked as toSplit, split it now.
733 for (const auto &Pair : ToSplit) {
734 auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
735 if (NewSucc != nullptr) {
736 LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
737 << printMBBReference(*Pair.first) << " -- "
738 << printMBBReference(*NewSucc) << " -- "
739 << printMBBReference(*Pair.second) << '\n');
740 if (MBFI)
741 MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI);
743 MadeChange = true;
744 ++NumSplit;
745 CI->splitCriticalEdge(Pair.first, Pair.second, NewSucc);
746 } else
747 LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
749 // If this iteration over the code changed anything, keep iterating.
750 if (!MadeChange) break;
751 EverMadeChange = true;
754 if (SinkInstsIntoCycle) {
755 SmallVector<MachineCycle *, 8> Cycles(CI->toplevel_begin(),
756 CI->toplevel_end());
757 for (auto *Cycle : Cycles) {
758 MachineBasicBlock *Preheader = Cycle->getCyclePreheader();
759 if (!Preheader) {
760 LLVM_DEBUG(dbgs() << "CycleSink: Can't find preheader\n");
761 continue;
763 SmallVector<MachineInstr *, 8> Candidates;
764 FindCycleSinkCandidates(Cycle, Preheader, Candidates);
766 // Walk the candidates in reverse order so that we start with the use
767 // of a def-use chain, if there is any.
768 // TODO: Sort the candidates using a cost-model.
769 unsigned i = 0;
770 for (MachineInstr *I : llvm::reverse(Candidates)) {
771 if (i++ == SinkIntoCycleLimit) {
772 LLVM_DEBUG(dbgs() << "CycleSink: Limit reached of instructions to "
773 "be analysed.");
774 break;
777 if (!SinkIntoCycle(Cycle, *I))
778 break;
779 EverMadeChange = true;
780 ++NumCycleSunk;
785 HasStoreCache.clear();
786 StoreInstrCache.clear();
788 // Now clear any kill flags for recorded registers.
789 for (auto I : RegsToClearKillFlags)
790 MRI->clearKillFlags(I);
791 RegsToClearKillFlags.clear();
793 return EverMadeChange;
796 bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
797 if ((!EnableSinkAndFold && MBB.succ_size() <= 1) || MBB.empty())
798 return false;
800 // Don't bother sinking code out of unreachable blocks. In addition to being
801 // unprofitable, it can also lead to infinite looping, because in an
802 // unreachable cycle there may be nowhere to stop.
803 if (!DT->isReachableFromEntry(&MBB)) return false;
805 bool MadeChange = false;
807 // Cache all successors, sorted by frequency info and cycle depth.
808 AllSuccsCache AllSuccessors;
810 // Walk the basic block bottom-up. Remember if we saw a store.
811 MachineBasicBlock::iterator I = MBB.end();
812 --I;
813 bool ProcessedBegin, SawStore = false;
814 do {
815 MachineInstr &MI = *I; // The instruction to sink.
817 // Predecrement I (if it's not begin) so that it isn't invalidated by
818 // sinking.
819 ProcessedBegin = I == MBB.begin();
820 if (!ProcessedBegin)
821 --I;
823 if (MI.isDebugOrPseudoInstr()) {
824 if (MI.isDebugValue())
825 ProcessDbgInst(MI);
826 continue;
829 if (EnableSinkAndFold && PerformSinkAndFold(MI, &MBB)) {
830 MadeChange = true;
831 continue;
834 // Can't sink anything out of a block that has less than two successors.
835 if (MBB.succ_size() <= 1)
836 continue;
838 if (PerformTrivialForwardCoalescing(MI, &MBB)) {
839 MadeChange = true;
840 continue;
843 if (SinkInstruction(MI, SawStore, AllSuccessors)) {
844 ++NumSunk;
845 MadeChange = true;
848 // If we just processed the first instruction in the block, we're done.
849 } while (!ProcessedBegin);
851 SeenDbgUsers.clear();
852 SeenDbgVars.clear();
853 // recalculate the bb register pressure after sinking one BB.
854 CachedRegisterPressure.clear();
855 return MadeChange;
858 void MachineSinking::ProcessDbgInst(MachineInstr &MI) {
859 // When we see DBG_VALUEs for registers, record any vreg it reads, so that
860 // we know what to sink if the vreg def sinks.
861 assert(MI.isDebugValue() && "Expected DBG_VALUE for processing");
863 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
864 MI.getDebugLoc()->getInlinedAt());
865 bool SeenBefore = SeenDbgVars.contains(Var);
867 for (MachineOperand &MO : MI.debug_operands()) {
868 if (MO.isReg() && MO.getReg().isVirtual())
869 SeenDbgUsers[MO.getReg()].push_back(SeenDbgUser(&MI, SeenBefore));
872 // Record the variable for any DBG_VALUE, to avoid re-ordering any of them.
873 SeenDbgVars.insert(Var);
876 bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
877 MachineBasicBlock *From,
878 MachineBasicBlock *To) {
879 // FIXME: Need much better heuristics.
881 // If the pass has already considered breaking this edge (during this pass
882 // through the function), then let's go ahead and break it. This means
883 // sinking multiple "cheap" instructions into the same block.
884 if (!CEBCandidates.insert(std::make_pair(From, To)).second)
885 return true;
887 if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
888 return true;
890 if (From->isSuccessor(To) && MBPI->getEdgeProbability(From, To) <=
891 BranchProbability(SplitEdgeProbabilityThreshold, 100))
892 return true;
894 // MI is cheap, we probably don't want to break the critical edge for it.
895 // However, if this would allow some definitions of its source operands
896 // to be sunk then it's probably worth it.
897 for (const MachineOperand &MO : MI.all_uses()) {
898 Register Reg = MO.getReg();
899 if (Reg == 0)
900 continue;
902 // We don't move live definitions of physical registers,
903 // so sinking their uses won't enable any opportunities.
904 if (Reg.isPhysical())
905 continue;
907 // If this instruction is the only user of a virtual register,
908 // check if breaking the edge will enable sinking
909 // both this instruction and the defining instruction.
910 if (MRI->hasOneNonDBGUse(Reg)) {
911 // If the definition resides in same MBB,
912 // claim it's likely we can sink these together.
913 // If definition resides elsewhere, we aren't
914 // blocking it from being sunk so don't break the edge.
915 MachineInstr *DefMI = MRI->getVRegDef(Reg);
916 if (DefMI->getParent() == MI.getParent())
917 return true;
921 return false;
924 bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
925 MachineBasicBlock *FromBB,
926 MachineBasicBlock *ToBB,
927 bool BreakPHIEdge) {
928 if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
929 return false;
931 // Avoid breaking back edge. From == To means backedge for single BB cycle.
932 if (!SplitEdges || FromBB == ToBB)
933 return false;
935 MachineCycle *FromCycle = CI->getCycle(FromBB);
936 MachineCycle *ToCycle = CI->getCycle(ToBB);
938 // Check for backedges of more "complex" cycles.
939 if (FromCycle == ToCycle && FromCycle &&
940 (!FromCycle->isReducible() || FromCycle->getHeader() == ToBB))
941 return false;
943 // It's not always legal to break critical edges and sink the computation
944 // to the edge.
946 // %bb.1:
947 // v1024
948 // Beq %bb.3
949 // <fallthrough>
950 // %bb.2:
951 // ... no uses of v1024
952 // <fallthrough>
953 // %bb.3:
954 // ...
955 // = v1024
957 // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
959 // %bb.1:
960 // ...
961 // Bne %bb.2
962 // %bb.4:
963 // v1024 =
964 // B %bb.3
965 // %bb.2:
966 // ... no uses of v1024
967 // <fallthrough>
968 // %bb.3:
969 // ...
970 // = v1024
972 // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
973 // flow. We need to ensure the new basic block where the computation is
974 // sunk to dominates all the uses.
975 // It's only legal to break critical edge and sink the computation to the
976 // new block if all the predecessors of "To", except for "From", are
977 // not dominated by "From". Given SSA property, this means these
978 // predecessors are dominated by "To".
980 // There is no need to do this check if all the uses are PHI nodes. PHI
981 // sources are only defined on the specific predecessor edges.
982 if (!BreakPHIEdge) {
983 for (MachineBasicBlock *Pred : ToBB->predecessors())
984 if (Pred != FromBB && !DT->dominates(ToBB, Pred))
985 return false;
988 ToSplit.insert(std::make_pair(FromBB, ToBB));
990 return true;
993 std::vector<unsigned> &
994 MachineSinking::getBBRegisterPressure(const MachineBasicBlock &MBB) {
995 // Currently to save compiling time, MBB's register pressure will not change
996 // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's
997 // register pressure is changed after sinking any instructions into it.
998 // FIXME: need a accurate and cheap register pressure estiminate model here.
999 auto RP = CachedRegisterPressure.find(&MBB);
1000 if (RP != CachedRegisterPressure.end())
1001 return RP->second;
1003 RegionPressure Pressure;
1004 RegPressureTracker RPTracker(Pressure);
1006 // Initialize the register pressure tracker.
1007 RPTracker.init(MBB.getParent(), &RegClassInfo, nullptr, &MBB, MBB.end(),
1008 /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true);
1010 for (MachineBasicBlock::const_iterator MII = MBB.instr_end(),
1011 MIE = MBB.instr_begin();
1012 MII != MIE; --MII) {
1013 const MachineInstr &MI = *std::prev(MII);
1014 if (MI.isDebugInstr() || MI.isPseudoProbe())
1015 continue;
1016 RegisterOperands RegOpers;
1017 RegOpers.collect(MI, *TRI, *MRI, false, false);
1018 RPTracker.recedeSkipDebugValues();
1019 assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!");
1020 RPTracker.recede(RegOpers);
1023 RPTracker.closeRegion();
1024 auto It = CachedRegisterPressure.insert(
1025 std::make_pair(&MBB, RPTracker.getPressure().MaxSetPressure));
1026 return It.first->second;
1029 bool MachineSinking::registerPressureSetExceedsLimit(
1030 unsigned NRegs, const TargetRegisterClass *RC,
1031 const MachineBasicBlock &MBB) {
1032 unsigned Weight = NRegs * TRI->getRegClassWeight(RC).RegWeight;
1033 const int *PS = TRI->getRegClassPressureSets(RC);
1034 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(MBB);
1035 for (; *PS != -1; PS++)
1036 if (Weight + BBRegisterPressure[*PS] >=
1037 TRI->getRegPressureSetLimit(*MBB.getParent(), *PS))
1038 return true;
1039 return false;
1042 /// isProfitableToSinkTo - Return true if it is profitable to sink MI.
1043 bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI,
1044 MachineBasicBlock *MBB,
1045 MachineBasicBlock *SuccToSinkTo,
1046 AllSuccsCache &AllSuccessors) {
1047 assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
1049 if (MBB == SuccToSinkTo)
1050 return false;
1052 // It is profitable if SuccToSinkTo does not post dominate current block.
1053 if (!PDT->dominates(SuccToSinkTo, MBB))
1054 return true;
1056 // It is profitable to sink an instruction from a deeper cycle to a shallower
1057 // cycle, even if the latter post-dominates the former (PR21115).
1058 if (CI->getCycleDepth(MBB) > CI->getCycleDepth(SuccToSinkTo))
1059 return true;
1061 // Check if only use in post dominated block is PHI instruction.
1062 bool NonPHIUse = false;
1063 for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
1064 MachineBasicBlock *UseBlock = UseInst.getParent();
1065 if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
1066 NonPHIUse = true;
1068 if (!NonPHIUse)
1069 return true;
1071 // If SuccToSinkTo post dominates then also it may be profitable if MI
1072 // can further profitably sinked into another block in next round.
1073 bool BreakPHIEdge = false;
1074 // FIXME - If finding successor is compile time expensive then cache results.
1075 if (MachineBasicBlock *MBB2 =
1076 FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
1077 return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
1079 MachineCycle *MCycle = CI->getCycle(MBB);
1081 // If the instruction is not inside a cycle, it is not profitable to sink MI to
1082 // a post dominate block SuccToSinkTo.
1083 if (!MCycle)
1084 return false;
1086 // If this instruction is inside a Cycle and sinking this instruction can make
1087 // more registers live range shorten, it is still prifitable.
1088 for (const MachineOperand &MO : MI.operands()) {
1089 // Ignore non-register operands.
1090 if (!MO.isReg())
1091 continue;
1092 Register Reg = MO.getReg();
1093 if (Reg == 0)
1094 continue;
1096 if (Reg.isPhysical()) {
1097 // Don't handle non-constant and non-ignorable physical register uses.
1098 if (MO.isUse() && !MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO))
1099 return false;
1100 continue;
1103 // Users for the defs are all dominated by SuccToSinkTo.
1104 if (MO.isDef()) {
1105 // This def register's live range is shortened after sinking.
1106 bool LocalUse = false;
1107 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge,
1108 LocalUse))
1109 return false;
1110 } else {
1111 MachineInstr *DefMI = MRI->getVRegDef(Reg);
1112 if (!DefMI)
1113 continue;
1114 MachineCycle *Cycle = CI->getCycle(DefMI->getParent());
1115 // DefMI is defined outside of cycle. There should be no live range
1116 // impact for this operand. Defination outside of cycle means:
1117 // 1: defination is outside of cycle.
1118 // 2: defination is in this cycle, but it is a PHI in the cycle header.
1119 if (Cycle != MCycle || (DefMI->isPHI() && Cycle && Cycle->isReducible() &&
1120 Cycle->getHeader() == DefMI->getParent()))
1121 continue;
1122 // The DefMI is defined inside the cycle.
1123 // If sinking this operand makes some register pressure set exceed limit,
1124 // it is not profitable.
1125 if (registerPressureSetExceedsLimit(1, MRI->getRegClass(Reg),
1126 *SuccToSinkTo)) {
1127 LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable.");
1128 return false;
1133 // If MI is in cycle and all its operands are alive across the whole cycle or
1134 // if no operand sinking make register pressure set exceed limit, it is
1135 // profitable to sink MI.
1136 return true;
1139 /// Get the sorted sequence of successors for this MachineBasicBlock, possibly
1140 /// computing it if it was not already cached.
1141 SmallVector<MachineBasicBlock *, 4> &
1142 MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
1143 AllSuccsCache &AllSuccessors) const {
1144 // Do we have the sorted successors in cache ?
1145 auto Succs = AllSuccessors.find(MBB);
1146 if (Succs != AllSuccessors.end())
1147 return Succs->second;
1149 SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->successors());
1151 // Handle cases where sinking can happen but where the sink point isn't a
1152 // successor. For example:
1154 // x = computation
1155 // if () {} else {}
1156 // use x
1158 for (MachineDomTreeNode *DTChild : DT->getNode(MBB)->children()) {
1159 // DomTree children of MBB that have MBB as immediate dominator are added.
1160 if (DTChild->getIDom()->getBlock() == MI.getParent() &&
1161 // Skip MBBs already added to the AllSuccs vector above.
1162 !MBB->isSuccessor(DTChild->getBlock()))
1163 AllSuccs.push_back(DTChild->getBlock());
1166 // Sort Successors according to their cycle depth or block frequency info.
1167 llvm::stable_sort(
1168 AllSuccs, [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
1169 uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
1170 uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
1171 bool HasBlockFreq = LHSFreq != 0 || RHSFreq != 0;
1172 return HasBlockFreq ? LHSFreq < RHSFreq
1173 : CI->getCycleDepth(L) < CI->getCycleDepth(R);
1176 auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
1178 return it.first->second;
1181 /// FindSuccToSinkTo - Find a successor to sink this instruction to.
1182 MachineBasicBlock *
1183 MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
1184 bool &BreakPHIEdge,
1185 AllSuccsCache &AllSuccessors) {
1186 assert (MBB && "Invalid MachineBasicBlock!");
1188 // loop over all the operands of the specified instruction. If there is
1189 // anything we can't handle, bail out.
1191 // SuccToSinkTo - This is the successor to sink this instruction to, once we
1192 // decide.
1193 MachineBasicBlock *SuccToSinkTo = nullptr;
1194 for (const MachineOperand &MO : MI.operands()) {
1195 if (!MO.isReg()) continue; // Ignore non-register operands.
1197 Register Reg = MO.getReg();
1198 if (Reg == 0) continue;
1200 if (Reg.isPhysical()) {
1201 if (MO.isUse()) {
1202 // If the physreg has no defs anywhere, it's just an ambient register
1203 // and we can freely move its uses. Alternatively, if it's allocatable,
1204 // it could get allocated to something with a def during allocation.
1205 if (!MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO))
1206 return nullptr;
1207 } else if (!MO.isDead()) {
1208 // A def that isn't dead. We can't move it.
1209 return nullptr;
1211 } else {
1212 // Virtual register uses are always safe to sink.
1213 if (MO.isUse()) continue;
1215 // If it's not safe to move defs of the register class, then abort.
1216 if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
1217 return nullptr;
1219 // Virtual register defs can only be sunk if all their uses are in blocks
1220 // dominated by one of the successors.
1221 if (SuccToSinkTo) {
1222 // If a previous operand picked a block to sink to, then this operand
1223 // must be sinkable to the same block.
1224 bool LocalUse = false;
1225 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
1226 BreakPHIEdge, LocalUse))
1227 return nullptr;
1229 continue;
1232 // Otherwise, we should look at all the successors and decide which one
1233 // we should sink to. If we have reliable block frequency information
1234 // (frequency != 0) available, give successors with smaller frequencies
1235 // higher priority, otherwise prioritize smaller cycle depths.
1236 for (MachineBasicBlock *SuccBlock :
1237 GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
1238 bool LocalUse = false;
1239 if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
1240 BreakPHIEdge, LocalUse)) {
1241 SuccToSinkTo = SuccBlock;
1242 break;
1244 if (LocalUse)
1245 // Def is used locally, it's never safe to move this def.
1246 return nullptr;
1249 // If we couldn't find a block to sink to, ignore this instruction.
1250 if (!SuccToSinkTo)
1251 return nullptr;
1252 if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
1253 return nullptr;
1257 // It is not possible to sink an instruction into its own block. This can
1258 // happen with cycles.
1259 if (MBB == SuccToSinkTo)
1260 return nullptr;
1262 // It's not safe to sink instructions to EH landing pad. Control flow into
1263 // landing pad is implicitly defined.
1264 if (SuccToSinkTo && SuccToSinkTo->isEHPad())
1265 return nullptr;
1267 // It ought to be okay to sink instructions into an INLINEASM_BR target, but
1268 // only if we make sure that MI occurs _before_ an INLINEASM_BR instruction in
1269 // the source block (which this code does not yet do). So for now, forbid
1270 // doing so.
1271 if (SuccToSinkTo && SuccToSinkTo->isInlineAsmBrIndirectTarget())
1272 return nullptr;
1274 if (SuccToSinkTo && !TII->isSafeToSink(MI, SuccToSinkTo, CI))
1275 return nullptr;
1277 return SuccToSinkTo;
1280 /// Return true if MI is likely to be usable as a memory operation by the
1281 /// implicit null check optimization.
1283 /// This is a "best effort" heuristic, and should not be relied upon for
1284 /// correctness. This returning true does not guarantee that the implicit null
1285 /// check optimization is legal over MI, and this returning false does not
1286 /// guarantee MI cannot possibly be used to do a null check.
1287 static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
1288 const TargetInstrInfo *TII,
1289 const TargetRegisterInfo *TRI) {
1290 using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
1292 auto *MBB = MI.getParent();
1293 if (MBB->pred_size() != 1)
1294 return false;
1296 auto *PredMBB = *MBB->pred_begin();
1297 auto *PredBB = PredMBB->getBasicBlock();
1299 // Frontends that don't use implicit null checks have no reason to emit
1300 // branches with make.implicit metadata, and this function should always
1301 // return false for them.
1302 if (!PredBB ||
1303 !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
1304 return false;
1306 const MachineOperand *BaseOp;
1307 int64_t Offset;
1308 bool OffsetIsScalable;
1309 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI))
1310 return false;
1312 if (!BaseOp->isReg())
1313 return false;
1315 if (!(MI.mayLoad() && !MI.isPredicable()))
1316 return false;
1318 MachineBranchPredicate MBP;
1319 if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))
1320 return false;
1322 return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
1323 (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
1324 MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
1325 MBP.LHS.getReg() == BaseOp->getReg();
1328 /// If the sunk instruction is a copy, try to forward the copy instead of
1329 /// leaving an 'undef' DBG_VALUE in the original location. Don't do this if
1330 /// there's any subregister weirdness involved. Returns true if copy
1331 /// propagation occurred.
1332 static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI,
1333 Register Reg) {
1334 const MachineRegisterInfo &MRI = SinkInst.getMF()->getRegInfo();
1335 const TargetInstrInfo &TII = *SinkInst.getMF()->getSubtarget().getInstrInfo();
1337 // Copy DBG_VALUE operand and set the original to undef. We then check to
1338 // see whether this is something that can be copy-forwarded. If it isn't,
1339 // continue around the loop.
1341 const MachineOperand *SrcMO = nullptr, *DstMO = nullptr;
1342 auto CopyOperands = TII.isCopyInstr(SinkInst);
1343 if (!CopyOperands)
1344 return false;
1345 SrcMO = CopyOperands->Source;
1346 DstMO = CopyOperands->Destination;
1348 // Check validity of forwarding this copy.
1349 bool PostRA = MRI.getNumVirtRegs() == 0;
1351 // Trying to forward between physical and virtual registers is too hard.
1352 if (Reg.isVirtual() != SrcMO->getReg().isVirtual())
1353 return false;
1355 // Only try virtual register copy-forwarding before regalloc, and physical
1356 // register copy-forwarding after regalloc.
1357 bool arePhysRegs = !Reg.isVirtual();
1358 if (arePhysRegs != PostRA)
1359 return false;
1361 // Pre-regalloc, only forward if all subregisters agree (or there are no
1362 // subregs at all). More analysis might recover some forwardable copies.
1363 if (!PostRA)
1364 for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg))
1365 if (DbgMO.getSubReg() != SrcMO->getSubReg() ||
1366 DbgMO.getSubReg() != DstMO->getSubReg())
1367 return false;
1369 // Post-regalloc, we may be sinking a DBG_VALUE of a sub or super-register
1370 // of this copy. Only forward the copy if the DBG_VALUE operand exactly
1371 // matches the copy destination.
1372 if (PostRA && Reg != DstMO->getReg())
1373 return false;
1375 for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg)) {
1376 DbgMO.setReg(SrcMO->getReg());
1377 DbgMO.setSubReg(SrcMO->getSubReg());
1379 return true;
1382 using MIRegs = std::pair<MachineInstr *, SmallVector<unsigned, 2>>;
1383 /// Sink an instruction and its associated debug instructions.
1384 static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
1385 MachineBasicBlock::iterator InsertPos,
1386 ArrayRef<MIRegs> DbgValuesToSink) {
1387 // If we cannot find a location to use (merge with), then we erase the debug
1388 // location to prevent debug-info driven tools from potentially reporting
1389 // wrong location information.
1390 if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end())
1391 MI.setDebugLoc(DILocation::getMergedLocation(MI.getDebugLoc(),
1392 InsertPos->getDebugLoc()));
1393 else
1394 MI.setDebugLoc(DebugLoc());
1396 // Move the instruction.
1397 MachineBasicBlock *ParentBlock = MI.getParent();
1398 SuccToSinkTo.splice(InsertPos, ParentBlock, MI,
1399 ++MachineBasicBlock::iterator(MI));
1401 // Sink a copy of debug users to the insert position. Mark the original
1402 // DBG_VALUE location as 'undef', indicating that any earlier variable
1403 // location should be terminated as we've optimised away the value at this
1404 // point.
1405 for (const auto &DbgValueToSink : DbgValuesToSink) {
1406 MachineInstr *DbgMI = DbgValueToSink.first;
1407 MachineInstr *NewDbgMI = DbgMI->getMF()->CloneMachineInstr(DbgMI);
1408 SuccToSinkTo.insert(InsertPos, NewDbgMI);
1410 bool PropagatedAllSunkOps = true;
1411 for (unsigned Reg : DbgValueToSink.second) {
1412 if (DbgMI->hasDebugOperandForReg(Reg)) {
1413 if (!attemptDebugCopyProp(MI, *DbgMI, Reg)) {
1414 PropagatedAllSunkOps = false;
1415 break;
1419 if (!PropagatedAllSunkOps)
1420 DbgMI->setDebugValueUndef();
1424 /// hasStoreBetween - check if there is store betweeen straight line blocks From
1425 /// and To.
1426 bool MachineSinking::hasStoreBetween(MachineBasicBlock *From,
1427 MachineBasicBlock *To, MachineInstr &MI) {
1428 // Make sure From and To are in straight line which means From dominates To
1429 // and To post dominates From.
1430 if (!DT->dominates(From, To) || !PDT->dominates(To, From))
1431 return true;
1433 auto BlockPair = std::make_pair(From, To);
1435 // Does these two blocks pair be queried before and have a definite cached
1436 // result?
1437 if (auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())
1438 return It->second;
1440 if (auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())
1441 return llvm::any_of(It->second, [&](MachineInstr *I) {
1442 return I->mayAlias(AA, MI, false);
1445 bool SawStore = false;
1446 bool HasAliasedStore = false;
1447 DenseSet<MachineBasicBlock *> HandledBlocks;
1448 DenseSet<MachineBasicBlock *> HandledDomBlocks;
1449 // Go through all reachable blocks from From.
1450 for (MachineBasicBlock *BB : depth_first(From)) {
1451 // We insert the instruction at the start of block To, so no need to worry
1452 // about stores inside To.
1453 // Store in block From should be already considered when just enter function
1454 // SinkInstruction.
1455 if (BB == To || BB == From)
1456 continue;
1458 // We already handle this BB in previous iteration.
1459 if (HandledBlocks.count(BB))
1460 continue;
1462 HandledBlocks.insert(BB);
1463 // To post dominates BB, it must be a path from block From.
1464 if (PDT->dominates(To, BB)) {
1465 if (!HandledDomBlocks.count(BB))
1466 HandledDomBlocks.insert(BB);
1468 // If this BB is too big or the block number in straight line between From
1469 // and To is too big, stop searching to save compiling time.
1470 if (BB->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold) ||
1471 HandledDomBlocks.size() > SinkLoadBlocksThreshold) {
1472 for (auto *DomBB : HandledDomBlocks) {
1473 if (DomBB != BB && DT->dominates(DomBB, BB))
1474 HasStoreCache[std::make_pair(DomBB, To)] = true;
1475 else if(DomBB != BB && DT->dominates(BB, DomBB))
1476 HasStoreCache[std::make_pair(From, DomBB)] = true;
1478 HasStoreCache[BlockPair] = true;
1479 return true;
1482 for (MachineInstr &I : *BB) {
1483 // Treat as alias conservatively for a call or an ordered memory
1484 // operation.
1485 if (I.isCall() || I.hasOrderedMemoryRef()) {
1486 for (auto *DomBB : HandledDomBlocks) {
1487 if (DomBB != BB && DT->dominates(DomBB, BB))
1488 HasStoreCache[std::make_pair(DomBB, To)] = true;
1489 else if(DomBB != BB && DT->dominates(BB, DomBB))
1490 HasStoreCache[std::make_pair(From, DomBB)] = true;
1492 HasStoreCache[BlockPair] = true;
1493 return true;
1496 if (I.mayStore()) {
1497 SawStore = true;
1498 // We still have chance to sink MI if all stores between are not
1499 // aliased to MI.
1500 // Cache all store instructions, so that we don't need to go through
1501 // all From reachable blocks for next load instruction.
1502 if (I.mayAlias(AA, MI, false))
1503 HasAliasedStore = true;
1504 StoreInstrCache[BlockPair].push_back(&I);
1509 // If there is no store at all, cache the result.
1510 if (!SawStore)
1511 HasStoreCache[BlockPair] = false;
1512 return HasAliasedStore;
1515 /// Sink instructions into cycles if profitable. This especially tries to
1516 /// prevent register spills caused by register pressure if there is little to no
1517 /// overhead moving instructions into cycles.
1518 bool MachineSinking::SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I) {
1519 LLVM_DEBUG(dbgs() << "CycleSink: Finding sink block for: " << I);
1520 MachineBasicBlock *Preheader = Cycle->getCyclePreheader();
1521 assert(Preheader && "Cycle sink needs a preheader block");
1522 MachineBasicBlock *SinkBlock = nullptr;
1523 bool CanSink = true;
1524 const MachineOperand &MO = I.getOperand(0);
1526 for (MachineInstr &MI : MRI->use_instructions(MO.getReg())) {
1527 LLVM_DEBUG(dbgs() << "CycleSink: Analysing use: " << MI);
1528 if (!Cycle->contains(MI.getParent())) {
1529 LLVM_DEBUG(dbgs() << "CycleSink: Use not in cycle, can't sink.\n");
1530 CanSink = false;
1531 break;
1534 // FIXME: Come up with a proper cost model that estimates whether sinking
1535 // the instruction (and thus possibly executing it on every cycle
1536 // iteration) is more expensive than a register.
1537 // For now assumes that copies are cheap and thus almost always worth it.
1538 if (!MI.isCopy()) {
1539 LLVM_DEBUG(dbgs() << "CycleSink: Use is not a copy\n");
1540 CanSink = false;
1541 break;
1543 if (!SinkBlock) {
1544 SinkBlock = MI.getParent();
1545 LLVM_DEBUG(dbgs() << "CycleSink: Setting sink block to: "
1546 << printMBBReference(*SinkBlock) << "\n");
1547 continue;
1549 SinkBlock = DT->findNearestCommonDominator(SinkBlock, MI.getParent());
1550 if (!SinkBlock) {
1551 LLVM_DEBUG(dbgs() << "CycleSink: Can't find nearest dominator\n");
1552 CanSink = false;
1553 break;
1555 LLVM_DEBUG(dbgs() << "CycleSink: Setting nearest common dom block: " <<
1556 printMBBReference(*SinkBlock) << "\n");
1559 if (!CanSink) {
1560 LLVM_DEBUG(dbgs() << "CycleSink: Can't sink instruction.\n");
1561 return false;
1563 if (!SinkBlock) {
1564 LLVM_DEBUG(dbgs() << "CycleSink: Not sinking, can't find sink block.\n");
1565 return false;
1567 if (SinkBlock == Preheader) {
1568 LLVM_DEBUG(
1569 dbgs() << "CycleSink: Not sinking, sink block is the preheader\n");
1570 return false;
1572 if (SinkBlock->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold)) {
1573 LLVM_DEBUG(
1574 dbgs() << "CycleSink: Not Sinking, block too large to analyse.\n");
1575 return false;
1578 LLVM_DEBUG(dbgs() << "CycleSink: Sinking instruction!\n");
1579 SinkBlock->splice(SinkBlock->SkipPHIsAndLabels(SinkBlock->begin()), Preheader,
1582 // Conservatively clear any kill flags on uses of sunk instruction
1583 for (MachineOperand &MO : I.operands()) {
1584 if (MO.isReg() && MO.readsReg())
1585 RegsToClearKillFlags.insert(MO.getReg());
1588 // The instruction is moved from its basic block, so do not retain the
1589 // debug information.
1590 assert(!I.isDebugInstr() && "Should not sink debug inst");
1591 I.setDebugLoc(DebugLoc());
1592 return true;
1595 /// SinkInstruction - Determine whether it is safe to sink the specified machine
1596 /// instruction out of its current block into a successor.
1597 bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
1598 AllSuccsCache &AllSuccessors) {
1599 // Don't sink instructions that the target prefers not to sink.
1600 if (!TII->shouldSink(MI))
1601 return false;
1603 // Check if it's safe to move the instruction.
1604 if (!MI.isSafeToMove(AA, SawStore))
1605 return false;
1607 // Convergent operations may not be made control-dependent on additional
1608 // values.
1609 if (MI.isConvergent())
1610 return false;
1612 // Don't break implicit null checks. This is a performance heuristic, and not
1613 // required for correctness.
1614 if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
1615 return false;
1617 // FIXME: This should include support for sinking instructions within the
1618 // block they are currently in to shorten the live ranges. We often get
1619 // instructions sunk into the top of a large block, but it would be better to
1620 // also sink them down before their first use in the block. This xform has to
1621 // be careful not to *increase* register pressure though, e.g. sinking
1622 // "x = y + z" down if it kills y and z would increase the live ranges of y
1623 // and z and only shrink the live range of x.
1625 bool BreakPHIEdge = false;
1626 MachineBasicBlock *ParentBlock = MI.getParent();
1627 MachineBasicBlock *SuccToSinkTo =
1628 FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
1630 // If there are no outputs, it must have side-effects.
1631 if (!SuccToSinkTo)
1632 return false;
1634 // If the instruction to move defines a dead physical register which is live
1635 // when leaving the basic block, don't move it because it could turn into a
1636 // "zombie" define of that preg. E.g., EFLAGS.
1637 for (const MachineOperand &MO : MI.all_defs()) {
1638 Register Reg = MO.getReg();
1639 if (Reg == 0 || !Reg.isPhysical())
1640 continue;
1641 if (SuccToSinkTo->isLiveIn(Reg))
1642 return false;
1645 LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
1647 // If the block has multiple predecessors, this is a critical edge.
1648 // Decide if we can sink along it or need to break the edge.
1649 if (SuccToSinkTo->pred_size() > 1) {
1650 // We cannot sink a load across a critical edge - there may be stores in
1651 // other code paths.
1652 bool TryBreak = false;
1653 bool Store =
1654 MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo, MI) : true;
1655 if (!MI.isSafeToMove(AA, Store)) {
1656 LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
1657 TryBreak = true;
1660 // We don't want to sink across a critical edge if we don't dominate the
1661 // successor. We could be introducing calculations to new code paths.
1662 if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
1663 LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
1664 TryBreak = true;
1667 // Don't sink instructions into a cycle.
1668 if (!TryBreak && CI->getCycle(SuccToSinkTo) &&
1669 (!CI->getCycle(SuccToSinkTo)->isReducible() ||
1670 CI->getCycle(SuccToSinkTo)->getHeader() == SuccToSinkTo)) {
1671 LLVM_DEBUG(dbgs() << " *** NOTE: cycle header found\n");
1672 TryBreak = true;
1675 // Otherwise we are OK with sinking along a critical edge.
1676 if (!TryBreak)
1677 LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
1678 else {
1679 // Mark this edge as to be split.
1680 // If the edge can actually be split, the next iteration of the main loop
1681 // will sink MI in the newly created block.
1682 bool Status =
1683 PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
1684 if (!Status)
1685 LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1686 "break critical edge\n");
1687 // The instruction will not be sunk this time.
1688 return false;
1692 if (BreakPHIEdge) {
1693 // BreakPHIEdge is true if all the uses are in the successor MBB being
1694 // sunken into and they are all PHI nodes. In this case, machine-sink must
1695 // break the critical edge first.
1696 bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
1697 SuccToSinkTo, BreakPHIEdge);
1698 if (!Status)
1699 LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1700 "break critical edge\n");
1701 // The instruction will not be sunk this time.
1702 return false;
1705 // Determine where to insert into. Skip phi nodes.
1706 MachineBasicBlock::iterator InsertPos =
1707 SuccToSinkTo->SkipPHIsAndLabels(SuccToSinkTo->begin());
1708 if (blockPrologueInterferes(SuccToSinkTo, InsertPos, MI, TRI, TII, MRI)) {
1709 LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");
1710 return false;
1713 // Collect debug users of any vreg that this inst defines.
1714 SmallVector<MIRegs, 4> DbgUsersToSink;
1715 for (auto &MO : MI.all_defs()) {
1716 if (!MO.getReg().isVirtual())
1717 continue;
1718 if (!SeenDbgUsers.count(MO.getReg()))
1719 continue;
1721 // Sink any users that don't pass any other DBG_VALUEs for this variable.
1722 auto &Users = SeenDbgUsers[MO.getReg()];
1723 for (auto &User : Users) {
1724 MachineInstr *DbgMI = User.getPointer();
1725 if (User.getInt()) {
1726 // This DBG_VALUE would re-order assignments. If we can't copy-propagate
1727 // it, it can't be recovered. Set it undef.
1728 if (!attemptDebugCopyProp(MI, *DbgMI, MO.getReg()))
1729 DbgMI->setDebugValueUndef();
1730 } else {
1731 DbgUsersToSink.push_back(
1732 {DbgMI, SmallVector<unsigned, 2>(1, MO.getReg())});
1737 // After sinking, some debug users may not be dominated any more. If possible,
1738 // copy-propagate their operands. As it's expensive, don't do this if there's
1739 // no debuginfo in the program.
1740 if (MI.getMF()->getFunction().getSubprogram() && MI.isCopy())
1741 SalvageUnsunkDebugUsersOfCopy(MI, SuccToSinkTo);
1743 performSink(MI, *SuccToSinkTo, InsertPos, DbgUsersToSink);
1745 // Conservatively, clear any kill flags, since it's possible that they are no
1746 // longer correct.
1747 // Note that we have to clear the kill flags for any register this instruction
1748 // uses as we may sink over another instruction which currently kills the
1749 // used registers.
1750 for (MachineOperand &MO : MI.all_uses())
1751 RegsToClearKillFlags.insert(MO.getReg()); // Remember to clear kill flags.
1753 return true;
1756 void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
1757 MachineInstr &MI, MachineBasicBlock *TargetBlock) {
1758 assert(MI.isCopy());
1759 assert(MI.getOperand(1).isReg());
1761 // Enumerate all users of vreg operands that are def'd. Skip those that will
1762 // be sunk. For the rest, if they are not dominated by the block we will sink
1763 // MI into, propagate the copy source to them.
1764 SmallVector<MachineInstr *, 4> DbgDefUsers;
1765 SmallVector<Register, 4> DbgUseRegs;
1766 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1767 for (auto &MO : MI.all_defs()) {
1768 if (!MO.getReg().isVirtual())
1769 continue;
1770 DbgUseRegs.push_back(MO.getReg());
1771 for (auto &User : MRI.use_instructions(MO.getReg())) {
1772 if (!User.isDebugValue() || DT->dominates(TargetBlock, User.getParent()))
1773 continue;
1775 // If is in same block, will either sink or be use-before-def.
1776 if (User.getParent() == MI.getParent())
1777 continue;
1779 assert(User.hasDebugOperandForReg(MO.getReg()) &&
1780 "DBG_VALUE user of vreg, but has no operand for it?");
1781 DbgDefUsers.push_back(&User);
1785 // Point the users of this copy that are no longer dominated, at the source
1786 // of the copy.
1787 for (auto *User : DbgDefUsers) {
1788 for (auto &Reg : DbgUseRegs) {
1789 for (auto &DbgOp : User->getDebugOperandsForReg(Reg)) {
1790 DbgOp.setReg(MI.getOperand(1).getReg());
1791 DbgOp.setSubReg(MI.getOperand(1).getSubReg());
1797 //===----------------------------------------------------------------------===//
1798 // This pass is not intended to be a replacement or a complete alternative
1799 // for the pre-ra machine sink pass. It is only designed to sink COPY
1800 // instructions which should be handled after RA.
1802 // This pass sinks COPY instructions into a successor block, if the COPY is not
1803 // used in the current block and the COPY is live-in to a single successor
1804 // (i.e., doesn't require the COPY to be duplicated). This avoids executing the
1805 // copy on paths where their results aren't needed. This also exposes
1806 // additional opportunites for dead copy elimination and shrink wrapping.
1808 // These copies were either not handled by or are inserted after the MachineSink
1809 // pass. As an example of the former case, the MachineSink pass cannot sink
1810 // COPY instructions with allocatable source registers; for AArch64 these type
1811 // of copy instructions are frequently used to move function parameters (PhyReg)
1812 // into virtual registers in the entry block.
1814 // For the machine IR below, this pass will sink %w19 in the entry into its
1815 // successor (%bb.1) because %w19 is only live-in in %bb.1.
1816 // %bb.0:
1817 // %wzr = SUBSWri %w1, 1
1818 // %w19 = COPY %w0
1819 // Bcc 11, %bb.2
1820 // %bb.1:
1821 // Live Ins: %w19
1822 // BL @fun
1823 // %w0 = ADDWrr %w0, %w19
1824 // RET %w0
1825 // %bb.2:
1826 // %w0 = COPY %wzr
1827 // RET %w0
1828 // As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
1829 // able to see %bb.0 as a candidate.
1830 //===----------------------------------------------------------------------===//
1831 namespace {
1833 class PostRAMachineSinking : public MachineFunctionPass {
1834 public:
1835 bool runOnMachineFunction(MachineFunction &MF) override;
1837 static char ID;
1838 PostRAMachineSinking() : MachineFunctionPass(ID) {}
1839 StringRef getPassName() const override { return "PostRA Machine Sink"; }
1841 void getAnalysisUsage(AnalysisUsage &AU) const override {
1842 AU.setPreservesCFG();
1843 MachineFunctionPass::getAnalysisUsage(AU);
1846 MachineFunctionProperties getRequiredProperties() const override {
1847 return MachineFunctionProperties().set(
1848 MachineFunctionProperties::Property::NoVRegs);
1851 private:
1852 /// Track which register units have been modified and used.
1853 LiveRegUnits ModifiedRegUnits, UsedRegUnits;
1855 /// Track DBG_VALUEs of (unmodified) register units. Each DBG_VALUE has an
1856 /// entry in this map for each unit it touches. The DBG_VALUE's entry
1857 /// consists of a pointer to the instruction itself, and a vector of registers
1858 /// referred to by the instruction that overlap the key register unit.
1859 DenseMap<unsigned, SmallVector<MIRegs, 2>> SeenDbgInstrs;
1861 /// Sink Copy instructions unused in the same block close to their uses in
1862 /// successors.
1863 bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
1864 const TargetRegisterInfo *TRI, const TargetInstrInfo *TII);
1866 } // namespace
1868 char PostRAMachineSinking::ID = 0;
1869 char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID;
1871 INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink",
1872 "PostRA Machine Sink", false, false)
1874 static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg,
1875 const TargetRegisterInfo *TRI) {
1876 LiveRegUnits LiveInRegUnits(*TRI);
1877 LiveInRegUnits.addLiveIns(MBB);
1878 return !LiveInRegUnits.available(Reg);
1881 static MachineBasicBlock *
1882 getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
1883 const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
1884 unsigned Reg, const TargetRegisterInfo *TRI) {
1885 // Try to find a single sinkable successor in which Reg is live-in.
1886 MachineBasicBlock *BB = nullptr;
1887 for (auto *SI : SinkableBBs) {
1888 if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {
1889 // If BB is set here, Reg is live-in to at least two sinkable successors,
1890 // so quit.
1891 if (BB)
1892 return nullptr;
1893 BB = SI;
1896 // Reg is not live-in to any sinkable successors.
1897 if (!BB)
1898 return nullptr;
1900 // Check if any register aliased with Reg is live-in in other successors.
1901 for (auto *SI : CurBB.successors()) {
1902 if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))
1903 return nullptr;
1905 return BB;
1908 static MachineBasicBlock *
1909 getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
1910 const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
1911 ArrayRef<unsigned> DefedRegsInCopy,
1912 const TargetRegisterInfo *TRI) {
1913 MachineBasicBlock *SingleBB = nullptr;
1914 for (auto DefReg : DefedRegsInCopy) {
1915 MachineBasicBlock *BB =
1916 getSingleLiveInSuccBB(CurBB, SinkableBBs, DefReg, TRI);
1917 if (!BB || (SingleBB && SingleBB != BB))
1918 return nullptr;
1919 SingleBB = BB;
1921 return SingleBB;
1924 static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
1925 SmallVectorImpl<unsigned> &UsedOpsInCopy,
1926 LiveRegUnits &UsedRegUnits,
1927 const TargetRegisterInfo *TRI) {
1928 for (auto U : UsedOpsInCopy) {
1929 MachineOperand &MO = MI->getOperand(U);
1930 Register SrcReg = MO.getReg();
1931 if (!UsedRegUnits.available(SrcReg)) {
1932 MachineBasicBlock::iterator NI = std::next(MI->getIterator());
1933 for (MachineInstr &UI : make_range(NI, CurBB.end())) {
1934 if (UI.killsRegister(SrcReg, TRI)) {
1935 UI.clearRegisterKills(SrcReg, TRI);
1936 MO.setIsKill(true);
1937 break;
1944 static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
1945 SmallVectorImpl<unsigned> &UsedOpsInCopy,
1946 SmallVectorImpl<unsigned> &DefedRegsInCopy) {
1947 MachineFunction &MF = *SuccBB->getParent();
1948 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1949 for (unsigned DefReg : DefedRegsInCopy)
1950 for (MCPhysReg S : TRI->subregs_inclusive(DefReg))
1951 SuccBB->removeLiveIn(S);
1952 for (auto U : UsedOpsInCopy) {
1953 Register SrcReg = MI->getOperand(U).getReg();
1954 LaneBitmask Mask;
1955 for (MCRegUnitMaskIterator S(SrcReg, TRI); S.isValid(); ++S)
1956 Mask |= (*S).second;
1957 SuccBB->addLiveIn(SrcReg, Mask);
1959 SuccBB->sortUniqueLiveIns();
1962 static bool hasRegisterDependency(MachineInstr *MI,
1963 SmallVectorImpl<unsigned> &UsedOpsInCopy,
1964 SmallVectorImpl<unsigned> &DefedRegsInCopy,
1965 LiveRegUnits &ModifiedRegUnits,
1966 LiveRegUnits &UsedRegUnits) {
1967 bool HasRegDependency = false;
1968 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1969 MachineOperand &MO = MI->getOperand(i);
1970 if (!MO.isReg())
1971 continue;
1972 Register Reg = MO.getReg();
1973 if (!Reg)
1974 continue;
1975 if (MO.isDef()) {
1976 if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) {
1977 HasRegDependency = true;
1978 break;
1980 DefedRegsInCopy.push_back(Reg);
1982 // FIXME: instead of isUse(), readsReg() would be a better fix here,
1983 // For example, we can ignore modifications in reg with undef. However,
1984 // it's not perfectly clear if skipping the internal read is safe in all
1985 // other targets.
1986 } else if (MO.isUse()) {
1987 if (!ModifiedRegUnits.available(Reg)) {
1988 HasRegDependency = true;
1989 break;
1991 UsedOpsInCopy.push_back(i);
1994 return HasRegDependency;
1997 bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
1998 MachineFunction &MF,
1999 const TargetRegisterInfo *TRI,
2000 const TargetInstrInfo *TII) {
2001 SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
2002 // FIXME: For now, we sink only to a successor which has a single predecessor
2003 // so that we can directly sink COPY instructions to the successor without
2004 // adding any new block or branch instruction.
2005 for (MachineBasicBlock *SI : CurBB.successors())
2006 if (!SI->livein_empty() && SI->pred_size() == 1)
2007 SinkableBBs.insert(SI);
2009 if (SinkableBBs.empty())
2010 return false;
2012 bool Changed = false;
2014 // Track which registers have been modified and used between the end of the
2015 // block and the current instruction.
2016 ModifiedRegUnits.clear();
2017 UsedRegUnits.clear();
2018 SeenDbgInstrs.clear();
2020 for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(CurBB))) {
2021 // Track the operand index for use in Copy.
2022 SmallVector<unsigned, 2> UsedOpsInCopy;
2023 // Track the register number defed in Copy.
2024 SmallVector<unsigned, 2> DefedRegsInCopy;
2026 // We must sink this DBG_VALUE if its operand is sunk. To avoid searching
2027 // for DBG_VALUEs later, record them when they're encountered.
2028 if (MI.isDebugValue() && !MI.isDebugRef()) {
2029 SmallDenseMap<MCRegister, SmallVector<unsigned, 2>, 4> MIUnits;
2030 bool IsValid = true;
2031 for (MachineOperand &MO : MI.debug_operands()) {
2032 if (MO.isReg() && MO.getReg().isPhysical()) {
2033 // Bail if we can already tell the sink would be rejected, rather
2034 // than needlessly accumulating lots of DBG_VALUEs.
2035 if (hasRegisterDependency(&MI, UsedOpsInCopy, DefedRegsInCopy,
2036 ModifiedRegUnits, UsedRegUnits)) {
2037 IsValid = false;
2038 break;
2041 // Record debug use of each reg unit.
2042 for (MCRegUnit Unit : TRI->regunits(MO.getReg()))
2043 MIUnits[Unit].push_back(MO.getReg());
2046 if (IsValid) {
2047 for (auto &RegOps : MIUnits)
2048 SeenDbgInstrs[RegOps.first].emplace_back(&MI,
2049 std::move(RegOps.second));
2051 continue;
2054 if (MI.isDebugOrPseudoInstr())
2055 continue;
2057 // Do not move any instruction across function call.
2058 if (MI.isCall())
2059 return false;
2061 if (!MI.isCopy() || !MI.getOperand(0).isRenamable()) {
2062 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2063 TRI);
2064 continue;
2067 // Don't sink the COPY if it would violate a register dependency.
2068 if (hasRegisterDependency(&MI, UsedOpsInCopy, DefedRegsInCopy,
2069 ModifiedRegUnits, UsedRegUnits)) {
2070 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2071 TRI);
2072 continue;
2074 assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&
2075 "Unexpect SrcReg or DefReg");
2076 MachineBasicBlock *SuccBB =
2077 getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI);
2078 // Don't sink if we cannot find a single sinkable successor in which Reg
2079 // is live-in.
2080 if (!SuccBB) {
2081 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2082 TRI);
2083 continue;
2085 assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) &&
2086 "Unexpected predecessor");
2088 // Collect DBG_VALUEs that must sink with this copy. We've previously
2089 // recorded which reg units that DBG_VALUEs read, if this instruction
2090 // writes any of those units then the corresponding DBG_VALUEs must sink.
2091 MapVector<MachineInstr *, MIRegs::second_type> DbgValsToSinkMap;
2092 for (auto &MO : MI.all_defs()) {
2093 for (MCRegUnit Unit : TRI->regunits(MO.getReg())) {
2094 for (const auto &MIRegs : SeenDbgInstrs.lookup(Unit)) {
2095 auto &Regs = DbgValsToSinkMap[MIRegs.first];
2096 for (unsigned Reg : MIRegs.second)
2097 Regs.push_back(Reg);
2101 auto DbgValsToSink = DbgValsToSinkMap.takeVector();
2103 LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccBB);
2105 MachineBasicBlock::iterator InsertPos =
2106 SuccBB->SkipPHIsAndLabels(SuccBB->begin());
2107 if (blockPrologueInterferes(SuccBB, InsertPos, MI, TRI, TII, nullptr)) {
2108 LLVM_DEBUG(
2109 dbgs() << " *** Not sinking: prologue interference\n");
2110 continue;
2113 // Clear the kill flag if SrcReg is killed between MI and the end of the
2114 // block.
2115 clearKillFlags(&MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI);
2116 performSink(MI, *SuccBB, InsertPos, DbgValsToSink);
2117 updateLiveIn(&MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);
2119 Changed = true;
2120 ++NumPostRACopySink;
2122 return Changed;
2125 bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {
2126 if (skipFunction(MF.getFunction()))
2127 return false;
2129 bool Changed = false;
2130 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2131 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2133 ModifiedRegUnits.init(*TRI);
2134 UsedRegUnits.init(*TRI);
2135 for (auto &BB : MF)
2136 Changed |= tryToSinkCopy(BB, MF, TRI, TII);
2138 return Changed;