1 //===- MachineSink.cpp - Sinking for machine instructions -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass moves instructions into successor blocks when possible, so that
10 // they aren't executed on paths where their results aren't needed.
12 // This pass is not intended to be a replacement or a complete alternative
13 // for an LLVM-IR-level sinking pass. It is only designed to sink simple
14 // constructs that are not exposed before lowering and instruction selection.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFG.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
31 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
32 #include "llvm/CodeGen/MachineCycleAnalysis.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineInstr.h"
37 #include "llvm/CodeGen/MachineLoopInfo.h"
38 #include "llvm/CodeGen/MachineOperand.h"
39 #include "llvm/CodeGen/MachinePostDominators.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/RegisterClassInfo.h"
42 #include "llvm/CodeGen/RegisterPressure.h"
43 #include "llvm/CodeGen/TargetInstrInfo.h"
44 #include "llvm/CodeGen/TargetPassConfig.h"
45 #include "llvm/CodeGen/TargetRegisterInfo.h"
46 #include "llvm/CodeGen/TargetSubtargetInfo.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/MC/MCRegisterInfo.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/BranchProbability.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/raw_ostream.h"
65 #define DEBUG_TYPE "machine-sink"
68 SplitEdges("machine-sink-split",
69 cl::desc("Split critical edges during machine sinking"),
70 cl::init(true), cl::Hidden
);
73 UseBlockFreqInfo("machine-sink-bfi",
74 cl::desc("Use block frequency info to find successors to sink"),
75 cl::init(true), cl::Hidden
);
77 static cl::opt
<unsigned> SplitEdgeProbabilityThreshold(
78 "machine-sink-split-probability-threshold",
80 "Percentage threshold for splitting single-instruction critical edge. "
81 "If the branch threshold is higher than this threshold, we allow "
82 "speculative execution of up to 1 instruction to avoid branching to "
83 "splitted critical edge"),
84 cl::init(40), cl::Hidden
);
86 static cl::opt
<unsigned> SinkLoadInstsPerBlockThreshold(
87 "machine-sink-load-instrs-threshold",
88 cl::desc("Do not try to find alias store for a load if there is a in-path "
89 "block whose instruction number is higher than this threshold."),
90 cl::init(2000), cl::Hidden
);
92 static cl::opt
<unsigned> SinkLoadBlocksThreshold(
93 "machine-sink-load-blocks-threshold",
94 cl::desc("Do not try to find alias store for a load if the block number in "
95 "the straight line is higher than this threshold."),
96 cl::init(20), cl::Hidden
);
99 SinkInstsIntoCycle("sink-insts-to-avoid-spills",
100 cl::desc("Sink instructions into cycles to avoid "
102 cl::init(false), cl::Hidden
);
104 static cl::opt
<unsigned> SinkIntoCycleLimit(
105 "machine-sink-cycle-limit",
106 cl::desc("The maximum number of instructions considered for cycle sinking."),
107 cl::init(50), cl::Hidden
);
109 STATISTIC(NumSunk
, "Number of machine instructions sunk");
110 STATISTIC(NumCycleSunk
, "Number of machine instructions sunk into a cycle");
111 STATISTIC(NumSplit
, "Number of critical edges split");
112 STATISTIC(NumCoalesces
, "Number of copies coalesced");
113 STATISTIC(NumPostRACopySink
, "Number of copies sunk after RA");
117 class MachineSinking
: public MachineFunctionPass
{
118 const TargetSubtargetInfo
*STI
= nullptr;
119 const TargetInstrInfo
*TII
= nullptr;
120 const TargetRegisterInfo
*TRI
= nullptr;
121 MachineRegisterInfo
*MRI
= nullptr; // Machine register information
122 MachineDominatorTree
*DT
= nullptr; // Machine dominator tree
123 MachinePostDominatorTree
*PDT
= nullptr; // Machine post dominator tree
124 MachineCycleInfo
*CI
= nullptr;
125 MachineBlockFrequencyInfo
*MBFI
= nullptr;
126 const MachineBranchProbabilityInfo
*MBPI
= nullptr;
127 AliasAnalysis
*AA
= nullptr;
128 RegisterClassInfo RegClassInfo
;
130 // Remember which edges have been considered for breaking.
131 SmallSet
<std::pair
<MachineBasicBlock
*, MachineBasicBlock
*>, 8>
133 // Remember which edges we are about to split.
134 // This is different from CEBCandidates since those edges
136 SetVector
<std::pair
<MachineBasicBlock
*, MachineBasicBlock
*>> ToSplit
;
138 DenseSet
<Register
> RegsToClearKillFlags
;
140 using AllSuccsCache
=
141 DenseMap
<MachineBasicBlock
*, SmallVector
<MachineBasicBlock
*, 4>>;
143 /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is
144 /// post-dominated by another DBG_VALUE of the same variable location.
145 /// This is necessary to detect sequences such as:
147 /// DBG_VALUE %0, !123, !DIExpression()
149 /// DBG_VALUE %1, !123, !DIExpression()
150 /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that
151 /// would re-order assignments.
152 using SeenDbgUser
= PointerIntPair
<MachineInstr
*, 1>;
154 /// Record of DBG_VALUE uses of vregs in a block, so that we can identify
155 /// debug instructions to sink.
156 SmallDenseMap
<unsigned, TinyPtrVector
<SeenDbgUser
>> SeenDbgUsers
;
158 /// Record of debug variables that have had their locations set in the
160 DenseSet
<DebugVariable
> SeenDbgVars
;
162 DenseMap
<std::pair
<MachineBasicBlock
*, MachineBasicBlock
*>, bool>
165 DenseMap
<std::pair
<MachineBasicBlock
*, MachineBasicBlock
*>,
166 SmallVector
<MachineInstr
*>>
169 /// Cached BB's register pressure.
170 DenseMap
<const MachineBasicBlock
*, std::vector
<unsigned>>
171 CachedRegisterPressure
;
173 bool EnableSinkAndFold
;
176 static char ID
; // Pass identification
178 MachineSinking() : MachineFunctionPass(ID
) {
179 initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
182 bool runOnMachineFunction(MachineFunction
&MF
) override
;
184 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
185 MachineFunctionPass::getAnalysisUsage(AU
);
186 AU
.addRequired
<AAResultsWrapperPass
>();
187 AU
.addRequired
<MachineDominatorTree
>();
188 AU
.addRequired
<MachinePostDominatorTree
>();
189 AU
.addRequired
<MachineCycleInfoWrapperPass
>();
190 AU
.addRequired
<MachineBranchProbabilityInfo
>();
191 AU
.addPreserved
<MachineCycleInfoWrapperPass
>();
192 AU
.addPreserved
<MachineLoopInfo
>();
193 if (UseBlockFreqInfo
)
194 AU
.addRequired
<MachineBlockFrequencyInfo
>();
195 AU
.addRequired
<TargetPassConfig
>();
198 void releaseMemory() override
{
199 CEBCandidates
.clear();
203 bool ProcessBlock(MachineBasicBlock
&MBB
);
204 void ProcessDbgInst(MachineInstr
&MI
);
205 bool isWorthBreakingCriticalEdge(MachineInstr
&MI
,
206 MachineBasicBlock
*From
,
207 MachineBasicBlock
*To
);
209 bool hasStoreBetween(MachineBasicBlock
*From
, MachineBasicBlock
*To
,
212 /// Postpone the splitting of the given critical
213 /// edge (\p From, \p To).
215 /// We do not split the edges on the fly. Indeed, this invalidates
216 /// the dominance information and thus triggers a lot of updates
217 /// of that information underneath.
218 /// Instead, we postpone all the splits after each iteration of
219 /// the main loop. That way, the information is at least valid
220 /// for the lifetime of an iteration.
222 /// \return True if the edge is marked as toSplit, false otherwise.
223 /// False can be returned if, for instance, this is not profitable.
224 bool PostponeSplitCriticalEdge(MachineInstr
&MI
,
225 MachineBasicBlock
*From
,
226 MachineBasicBlock
*To
,
228 bool SinkInstruction(MachineInstr
&MI
, bool &SawStore
,
229 AllSuccsCache
&AllSuccessors
);
231 /// If we sink a COPY inst, some debug users of it's destination may no
232 /// longer be dominated by the COPY, and will eventually be dropped.
233 /// This is easily rectified by forwarding the non-dominated debug uses
234 /// to the copy source.
235 void SalvageUnsunkDebugUsersOfCopy(MachineInstr
&,
236 MachineBasicBlock
*TargetBlock
);
237 bool AllUsesDominatedByBlock(Register Reg
, MachineBasicBlock
*MBB
,
238 MachineBasicBlock
*DefMBB
, bool &BreakPHIEdge
,
239 bool &LocalUse
) const;
240 MachineBasicBlock
*FindSuccToSinkTo(MachineInstr
&MI
, MachineBasicBlock
*MBB
,
241 bool &BreakPHIEdge
, AllSuccsCache
&AllSuccessors
);
243 void FindCycleSinkCandidates(MachineCycle
*Cycle
, MachineBasicBlock
*BB
,
244 SmallVectorImpl
<MachineInstr
*> &Candidates
);
245 bool SinkIntoCycle(MachineCycle
*Cycle
, MachineInstr
&I
);
247 bool isProfitableToSinkTo(Register Reg
, MachineInstr
&MI
,
248 MachineBasicBlock
*MBB
,
249 MachineBasicBlock
*SuccToSinkTo
,
250 AllSuccsCache
&AllSuccessors
);
252 bool PerformTrivialForwardCoalescing(MachineInstr
&MI
,
253 MachineBasicBlock
*MBB
);
255 bool PerformSinkAndFold(MachineInstr
&MI
, MachineBasicBlock
*MBB
);
257 SmallVector
<MachineBasicBlock
*, 4> &
258 GetAllSortedSuccessors(MachineInstr
&MI
, MachineBasicBlock
*MBB
,
259 AllSuccsCache
&AllSuccessors
) const;
261 std::vector
<unsigned> &getBBRegisterPressure(const MachineBasicBlock
&MBB
);
263 bool registerPressureSetExceedsLimit(unsigned NRegs
,
264 const TargetRegisterClass
*RC
,
265 const MachineBasicBlock
&MBB
);
268 } // end anonymous namespace
270 char MachineSinking::ID
= 0;
272 char &llvm::MachineSinkingID
= MachineSinking::ID
;
274 INITIALIZE_PASS_BEGIN(MachineSinking
, DEBUG_TYPE
,
275 "Machine code sinking", false, false)
276 INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo
)
277 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
278 INITIALIZE_PASS_DEPENDENCY(MachineCycleInfoWrapperPass
)
279 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
280 INITIALIZE_PASS_END(MachineSinking
, DEBUG_TYPE
,
281 "Machine code sinking", false, false)
283 /// Return true if a target defined block prologue instruction interferes
284 /// with a sink candidate.
285 static bool blockPrologueInterferes(const MachineBasicBlock
*BB
,
286 MachineBasicBlock::const_iterator End
,
287 const MachineInstr
&MI
,
288 const TargetRegisterInfo
*TRI
,
289 const TargetInstrInfo
*TII
,
290 const MachineRegisterInfo
*MRI
) {
291 for (MachineBasicBlock::const_iterator PI
= BB
->getFirstNonPHI(); PI
!= End
;
293 // Only check target defined prologue instructions
294 if (!TII
->isBasicBlockPrologue(*PI
))
296 for (auto &MO
: MI
.operands()) {
299 Register Reg
= MO
.getReg();
303 if (Reg
.isPhysical() &&
304 (TII
->isIgnorableUse(MO
) || (MRI
&& MRI
->isConstantPhysReg(Reg
))))
306 if (PI
->modifiesRegister(Reg
, TRI
))
309 if (PI
->readsRegister(Reg
, TRI
))
311 // Check for interference with non-dead defs
312 auto *DefOp
= PI
->findRegisterDefOperand(Reg
, false, true, TRI
);
313 if (DefOp
&& !DefOp
->isDead())
322 bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr
&MI
,
323 MachineBasicBlock
*MBB
) {
327 Register SrcReg
= MI
.getOperand(1).getReg();
328 Register DstReg
= MI
.getOperand(0).getReg();
329 if (!SrcReg
.isVirtual() || !DstReg
.isVirtual() ||
330 !MRI
->hasOneNonDBGUse(SrcReg
))
333 const TargetRegisterClass
*SRC
= MRI
->getRegClass(SrcReg
);
334 const TargetRegisterClass
*DRC
= MRI
->getRegClass(DstReg
);
338 MachineInstr
*DefMI
= MRI
->getVRegDef(SrcReg
);
339 if (DefMI
->isCopyLike())
341 LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI
);
342 LLVM_DEBUG(dbgs() << "*** to: " << MI
);
343 MRI
->replaceRegWith(DstReg
, SrcReg
);
344 MI
.eraseFromParent();
346 // Conservatively, clear any kill flags, since it's possible that they are no
348 MRI
->clearKillFlags(SrcReg
);
354 bool MachineSinking::PerformSinkAndFold(MachineInstr
&MI
,
355 MachineBasicBlock
*MBB
) {
356 if (MI
.isCopy() || MI
.mayLoadOrStore() ||
357 MI
.getOpcode() == TargetOpcode::REG_SEQUENCE
)
360 // Don't sink instructions that the target prefers not to sink.
361 if (!TII
->shouldSink(MI
))
364 // Check if it's safe to move the instruction.
365 bool SawStore
= true;
366 if (!MI
.isSafeToMove(AA
, SawStore
))
369 // Convergent operations may not be made control-dependent on additional
371 if (MI
.isConvergent())
374 // Don't sink defs/uses of hard registers or if the instruction defines more
375 // than one register.
376 // Don't sink more than two register uses - it'll cover most of the cases and
377 // greatly simplifies the register pressure checks.
379 Register UsedRegA
, UsedRegB
;
380 for (const MachineOperand
&MO
: MI
.operands()) {
381 if (MO
.isImm() || MO
.isRegMask() || MO
.isRegLiveOut() || MO
.isMetadata() ||
382 MO
.isMCSymbol() || MO
.isDbgInstrRef() || MO
.isCFIIndex() ||
383 MO
.isIntrinsicID() || MO
.isPredicate() || MO
.isShuffleMask())
388 Register Reg
= MO
.getReg();
392 if (Reg
.isVirtual()) {
402 else if (UsedRegB
== 0)
409 if (Reg
.isPhysical() &&
410 (MRI
->isConstantPhysReg(Reg
) || TII
->isIgnorableUse(MO
)))
416 // Scan uses of the destination register. Every use, except the last, must be
417 // a copy, with a chain of copies terminating with either a copy into a hard
418 // register, or a load/store instruction where the use is part of the
419 // address (*not* the stored value).
420 using SinkInfo
= std::pair
<MachineInstr
*, ExtAddrMode
>;
421 SmallVector
<SinkInfo
> SinkInto
;
422 SmallVector
<Register
> Worklist
;
424 const TargetRegisterClass
*RC
= MRI
->getRegClass(DefReg
);
425 const TargetRegisterClass
*RCA
=
426 UsedRegA
== 0 ? nullptr : MRI
->getRegClass(UsedRegA
);
427 const TargetRegisterClass
*RCB
=
428 UsedRegB
== 0 ? nullptr : MRI
->getRegClass(UsedRegB
);
430 Worklist
.push_back(DefReg
);
431 while (!Worklist
.empty()) {
432 Register Reg
= Worklist
.pop_back_val();
434 for (MachineOperand
&MO
: MRI
->use_nodbg_operands(Reg
)) {
436 MachineInstr
&UseInst
= *MO
.getParent();
437 if (UseInst
.isCopy()) {
439 if (const MachineOperand
&O
= UseInst
.getOperand(0); O
.isReg())
443 if (DstReg
.isVirtual()) {
444 Worklist
.push_back(DstReg
);
447 // If we are going to replace a copy, the original instruction must be
448 // as cheap as a copy.
449 if (!TII
->isAsCheapAsAMove(MI
))
451 // The hard register must be in the register class of the original
452 // instruction's destination register.
453 if (!RC
->contains(DstReg
))
455 } else if (UseInst
.mayLoadOrStore()) {
457 if (!TII
->canFoldIntoAddrMode(UseInst
, Reg
, MI
, AM
))
464 if (UseInst
.getParent() != MI
.getParent()) {
465 // If the register class of the register we are replacing is a superset
466 // of any of the register classes of the operands of the materialized
467 // instruction don't consider that live range extended.
468 const TargetRegisterClass
*RCS
= MRI
->getRegClass(Reg
);
469 if (RCA
&& RCA
->hasSuperClassEq(RCS
))
471 else if (RCB
&& RCB
->hasSuperClassEq(RCS
))
474 if (RCA
== nullptr) {
479 unsigned NRegs
= !!RCA
+ !!RCB
;
483 // Check we don't exceed register pressure at the destination.
484 const MachineBasicBlock
&MBB
= *UseInst
.getParent();
485 if (RCB
== nullptr) {
486 if (registerPressureSetExceedsLimit(NRegs
, RCA
, MBB
))
488 } else if (registerPressureSetExceedsLimit(1, RCA
, MBB
) ||
489 registerPressureSetExceedsLimit(1, RCB
, MBB
)) {
495 SinkInto
.emplace_back(&UseInst
, MaybeAM
);
499 if (SinkInto
.empty())
502 // Now we know we can fold the instruction in all its users.
503 for (auto &[SinkDst
, MaybeAM
] : SinkInto
) {
504 MachineInstr
*New
= nullptr;
505 LLVM_DEBUG(dbgs() << "Sinking copy of"; MI
.dump(); dbgs() << "into";
507 if (SinkDst
->isCopy()) {
508 // TODO: After performing the sink-and-fold, the original instruction is
509 // deleted. Its value is still available (in a hard register), so if there
510 // are debug instructions which refer to the (now deleted) virtual
511 // register they could be updated to refer to the hard register, in
512 // principle. However, it's not clear how to do that, moreover in some
513 // cases the debug instructions may need to be replicated proportionally
514 // to the number of the COPY instructions replaced and in some extreme
515 // cases we can end up with quadratic increase in the number of debug
518 // Sink a copy of the instruction, replacing a COPY instruction.
519 MachineBasicBlock::iterator InsertPt
= SinkDst
->getIterator();
520 Register DstReg
= SinkDst
->getOperand(0).getReg();
521 TII
->reMaterialize(*SinkDst
->getParent(), InsertPt
, DstReg
, 0, MI
, *TRI
);
522 New
= &*std::prev(InsertPt
);
523 if (!New
->getDebugLoc())
524 New
->setDebugLoc(SinkDst
->getDebugLoc());
526 // The operand registers of the "sunk" instruction have their live range
527 // extended and their kill flags may no longer be correct. Conservatively
528 // clear the kill flags.
530 MRI
->clearKillFlags(UsedRegA
);
532 MRI
->clearKillFlags(UsedRegB
);
534 // Fold instruction into the addressing mode of a memory instruction.
535 New
= TII
->emitLdStWithAddr(*SinkDst
, MaybeAM
);
537 // The registers of the addressing mode may have their live range extended
538 // and their kill flags may no longer be correct. Conservatively clear the
540 if (Register R
= MaybeAM
.BaseReg
; R
.isValid() && R
.isVirtual())
541 MRI
->clearKillFlags(R
);
542 if (Register R
= MaybeAM
.ScaledReg
; R
.isValid() && R
.isVirtual())
543 MRI
->clearKillFlags(R
);
545 LLVM_DEBUG(dbgs() << "yielding"; New
->dump());
546 // Clear the StoreInstrCache, since we may invalidate it by erasing.
547 if (SinkDst
->mayStore() && !SinkDst
->hasOrderedMemoryRef())
548 StoreInstrCache
.clear();
549 SinkDst
->eraseFromParent();
552 // Collect operands that need to be cleaned up because the registers no longer
553 // exist (in COPYs and debug instructions). We cannot delete instructions or
554 // clear operands while traversing register uses.
555 SmallVector
<MachineOperand
*> Cleanup
;
556 Worklist
.push_back(DefReg
);
557 while (!Worklist
.empty()) {
558 Register Reg
= Worklist
.pop_back_val();
559 for (MachineOperand
&MO
: MRI
->use_operands(Reg
)) {
560 MachineInstr
*U
= MO
.getParent();
561 assert((U
->isCopy() || U
->isDebugInstr()) &&
562 "Only debug uses and copies must remain");
564 Worklist
.push_back(U
->getOperand(0).getReg());
565 Cleanup
.push_back(&MO
);
569 // Delete the dead COPYs and clear operands in debug instructions
570 for (MachineOperand
*MO
: Cleanup
) {
571 MachineInstr
*I
= MO
->getParent();
573 I
->eraseFromParent();
580 MI
.eraseFromParent();
584 /// AllUsesDominatedByBlock - Return true if all uses of the specified register
585 /// occur in blocks dominated by the specified block. If any use is in the
586 /// definition block, then return false since it is never legal to move def
588 bool MachineSinking::AllUsesDominatedByBlock(Register Reg
,
589 MachineBasicBlock
*MBB
,
590 MachineBasicBlock
*DefMBB
,
592 bool &LocalUse
) const {
593 assert(Reg
.isVirtual() && "Only makes sense for vregs");
595 // Ignore debug uses because debug info doesn't affect the code.
596 if (MRI
->use_nodbg_empty(Reg
))
599 // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
600 // into and they are all PHI nodes. In this case, machine-sink must break
601 // the critical edge first. e.g.
604 // Predecessors according to CFG: %bb.0
606 // %def = DEC64_32r %x, implicit-def dead %eflags
608 // JE_4 <%bb.37>, implicit %eflags
609 // Successors according to CFG: %bb.37 %bb.2
612 // %p = PHI %y, %bb.0, %def, %bb.1
613 if (all_of(MRI
->use_nodbg_operands(Reg
), [&](MachineOperand
&MO
) {
614 MachineInstr
*UseInst
= MO
.getParent();
615 unsigned OpNo
= MO
.getOperandNo();
616 MachineBasicBlock
*UseBlock
= UseInst
->getParent();
617 return UseBlock
== MBB
&& UseInst
->isPHI() &&
618 UseInst
->getOperand(OpNo
+ 1).getMBB() == DefMBB
;
624 for (MachineOperand
&MO
: MRI
->use_nodbg_operands(Reg
)) {
625 // Determine the block of the use.
626 MachineInstr
*UseInst
= MO
.getParent();
627 unsigned OpNo
= &MO
- &UseInst
->getOperand(0);
628 MachineBasicBlock
*UseBlock
= UseInst
->getParent();
629 if (UseInst
->isPHI()) {
630 // PHI nodes use the operand in the predecessor block, not the block with
632 UseBlock
= UseInst
->getOperand(OpNo
+1).getMBB();
633 } else if (UseBlock
== DefMBB
) {
638 // Check that it dominates.
639 if (!DT
->dominates(MBB
, UseBlock
))
646 /// Return true if this machine instruction loads from global offset table or
648 static bool mayLoadFromGOTOrConstantPool(MachineInstr
&MI
) {
649 assert(MI
.mayLoad() && "Expected MI that loads!");
651 // If we lost memory operands, conservatively assume that the instruction
652 // reads from everything..
653 if (MI
.memoperands_empty())
656 for (MachineMemOperand
*MemOp
: MI
.memoperands())
657 if (const PseudoSourceValue
*PSV
= MemOp
->getPseudoValue())
658 if (PSV
->isGOT() || PSV
->isConstantPool())
664 void MachineSinking::FindCycleSinkCandidates(
665 MachineCycle
*Cycle
, MachineBasicBlock
*BB
,
666 SmallVectorImpl
<MachineInstr
*> &Candidates
) {
667 for (auto &MI
: *BB
) {
668 LLVM_DEBUG(dbgs() << "CycleSink: Analysing candidate: " << MI
);
669 if (!TII
->shouldSink(MI
)) {
670 LLVM_DEBUG(dbgs() << "CycleSink: Instruction not a candidate for this "
674 if (!isCycleInvariant(Cycle
, MI
)) {
675 LLVM_DEBUG(dbgs() << "CycleSink: Instruction is not cycle invariant\n");
678 bool DontMoveAcrossStore
= true;
679 if (!MI
.isSafeToMove(AA
, DontMoveAcrossStore
)) {
680 LLVM_DEBUG(dbgs() << "CycleSink: Instruction not safe to move.\n");
683 if (MI
.mayLoad() && !mayLoadFromGOTOrConstantPool(MI
)) {
684 LLVM_DEBUG(dbgs() << "CycleSink: Dont sink GOT or constant pool loads\n");
687 if (MI
.isConvergent())
690 const MachineOperand
&MO
= MI
.getOperand(0);
691 if (!MO
.isReg() || !MO
.getReg() || !MO
.isDef())
693 if (!MRI
->hasOneDef(MO
.getReg()))
696 LLVM_DEBUG(dbgs() << "CycleSink: Instruction added as candidate.\n");
697 Candidates
.push_back(&MI
);
701 bool MachineSinking::runOnMachineFunction(MachineFunction
&MF
) {
702 if (skipFunction(MF
.getFunction()))
705 LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
707 STI
= &MF
.getSubtarget();
708 TII
= STI
->getInstrInfo();
709 TRI
= STI
->getRegisterInfo();
710 MRI
= &MF
.getRegInfo();
711 DT
= &getAnalysis
<MachineDominatorTree
>();
712 PDT
= &getAnalysis
<MachinePostDominatorTree
>();
713 CI
= &getAnalysis
<MachineCycleInfoWrapperPass
>().getCycleInfo();
714 MBFI
= UseBlockFreqInfo
? &getAnalysis
<MachineBlockFrequencyInfo
>() : nullptr;
715 MBPI
= &getAnalysis
<MachineBranchProbabilityInfo
>();
716 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
717 RegClassInfo
.runOnMachineFunction(MF
);
718 TargetPassConfig
*PassConfig
= &getAnalysis
<TargetPassConfig
>();
719 EnableSinkAndFold
= PassConfig
->getEnableSinkAndFold();
721 bool EverMadeChange
= false;
724 bool MadeChange
= false;
726 // Process all basic blocks.
727 CEBCandidates
.clear();
730 MadeChange
|= ProcessBlock(MBB
);
732 // If we have anything we marked as toSplit, split it now.
733 for (const auto &Pair
: ToSplit
) {
734 auto NewSucc
= Pair
.first
->SplitCriticalEdge(Pair
.second
, *this);
735 if (NewSucc
!= nullptr) {
736 LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
737 << printMBBReference(*Pair
.first
) << " -- "
738 << printMBBReference(*NewSucc
) << " -- "
739 << printMBBReference(*Pair
.second
) << '\n');
741 MBFI
->onEdgeSplit(*Pair
.first
, *NewSucc
, *MBPI
);
745 CI
->splitCriticalEdge(Pair
.first
, Pair
.second
, NewSucc
);
747 LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
749 // If this iteration over the code changed anything, keep iterating.
750 if (!MadeChange
) break;
751 EverMadeChange
= true;
754 if (SinkInstsIntoCycle
) {
755 SmallVector
<MachineCycle
*, 8> Cycles(CI
->toplevel_begin(),
757 for (auto *Cycle
: Cycles
) {
758 MachineBasicBlock
*Preheader
= Cycle
->getCyclePreheader();
760 LLVM_DEBUG(dbgs() << "CycleSink: Can't find preheader\n");
763 SmallVector
<MachineInstr
*, 8> Candidates
;
764 FindCycleSinkCandidates(Cycle
, Preheader
, Candidates
);
766 // Walk the candidates in reverse order so that we start with the use
767 // of a def-use chain, if there is any.
768 // TODO: Sort the candidates using a cost-model.
770 for (MachineInstr
*I
: llvm::reverse(Candidates
)) {
771 if (i
++ == SinkIntoCycleLimit
) {
772 LLVM_DEBUG(dbgs() << "CycleSink: Limit reached of instructions to "
777 if (!SinkIntoCycle(Cycle
, *I
))
779 EverMadeChange
= true;
785 HasStoreCache
.clear();
786 StoreInstrCache
.clear();
788 // Now clear any kill flags for recorded registers.
789 for (auto I
: RegsToClearKillFlags
)
790 MRI
->clearKillFlags(I
);
791 RegsToClearKillFlags
.clear();
793 return EverMadeChange
;
796 bool MachineSinking::ProcessBlock(MachineBasicBlock
&MBB
) {
797 if ((!EnableSinkAndFold
&& MBB
.succ_size() <= 1) || MBB
.empty())
800 // Don't bother sinking code out of unreachable blocks. In addition to being
801 // unprofitable, it can also lead to infinite looping, because in an
802 // unreachable cycle there may be nowhere to stop.
803 if (!DT
->isReachableFromEntry(&MBB
)) return false;
805 bool MadeChange
= false;
807 // Cache all successors, sorted by frequency info and cycle depth.
808 AllSuccsCache AllSuccessors
;
810 // Walk the basic block bottom-up. Remember if we saw a store.
811 MachineBasicBlock::iterator I
= MBB
.end();
813 bool ProcessedBegin
, SawStore
= false;
815 MachineInstr
&MI
= *I
; // The instruction to sink.
817 // Predecrement I (if it's not begin) so that it isn't invalidated by
819 ProcessedBegin
= I
== MBB
.begin();
823 if (MI
.isDebugOrPseudoInstr()) {
824 if (MI
.isDebugValue())
829 if (EnableSinkAndFold
&& PerformSinkAndFold(MI
, &MBB
)) {
834 // Can't sink anything out of a block that has less than two successors.
835 if (MBB
.succ_size() <= 1)
838 if (PerformTrivialForwardCoalescing(MI
, &MBB
)) {
843 if (SinkInstruction(MI
, SawStore
, AllSuccessors
)) {
848 // If we just processed the first instruction in the block, we're done.
849 } while (!ProcessedBegin
);
851 SeenDbgUsers
.clear();
853 // recalculate the bb register pressure after sinking one BB.
854 CachedRegisterPressure
.clear();
858 void MachineSinking::ProcessDbgInst(MachineInstr
&MI
) {
859 // When we see DBG_VALUEs for registers, record any vreg it reads, so that
860 // we know what to sink if the vreg def sinks.
861 assert(MI
.isDebugValue() && "Expected DBG_VALUE for processing");
863 DebugVariable
Var(MI
.getDebugVariable(), MI
.getDebugExpression(),
864 MI
.getDebugLoc()->getInlinedAt());
865 bool SeenBefore
= SeenDbgVars
.contains(Var
);
867 for (MachineOperand
&MO
: MI
.debug_operands()) {
868 if (MO
.isReg() && MO
.getReg().isVirtual())
869 SeenDbgUsers
[MO
.getReg()].push_back(SeenDbgUser(&MI
, SeenBefore
));
872 // Record the variable for any DBG_VALUE, to avoid re-ordering any of them.
873 SeenDbgVars
.insert(Var
);
876 bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr
&MI
,
877 MachineBasicBlock
*From
,
878 MachineBasicBlock
*To
) {
879 // FIXME: Need much better heuristics.
881 // If the pass has already considered breaking this edge (during this pass
882 // through the function), then let's go ahead and break it. This means
883 // sinking multiple "cheap" instructions into the same block.
884 if (!CEBCandidates
.insert(std::make_pair(From
, To
)).second
)
887 if (!MI
.isCopy() && !TII
->isAsCheapAsAMove(MI
))
890 if (From
->isSuccessor(To
) && MBPI
->getEdgeProbability(From
, To
) <=
891 BranchProbability(SplitEdgeProbabilityThreshold
, 100))
894 // MI is cheap, we probably don't want to break the critical edge for it.
895 // However, if this would allow some definitions of its source operands
896 // to be sunk then it's probably worth it.
897 for (const MachineOperand
&MO
: MI
.all_uses()) {
898 Register Reg
= MO
.getReg();
902 // We don't move live definitions of physical registers,
903 // so sinking their uses won't enable any opportunities.
904 if (Reg
.isPhysical())
907 // If this instruction is the only user of a virtual register,
908 // check if breaking the edge will enable sinking
909 // both this instruction and the defining instruction.
910 if (MRI
->hasOneNonDBGUse(Reg
)) {
911 // If the definition resides in same MBB,
912 // claim it's likely we can sink these together.
913 // If definition resides elsewhere, we aren't
914 // blocking it from being sunk so don't break the edge.
915 MachineInstr
*DefMI
= MRI
->getVRegDef(Reg
);
916 if (DefMI
->getParent() == MI
.getParent())
924 bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr
&MI
,
925 MachineBasicBlock
*FromBB
,
926 MachineBasicBlock
*ToBB
,
928 if (!isWorthBreakingCriticalEdge(MI
, FromBB
, ToBB
))
931 // Avoid breaking back edge. From == To means backedge for single BB cycle.
932 if (!SplitEdges
|| FromBB
== ToBB
)
935 MachineCycle
*FromCycle
= CI
->getCycle(FromBB
);
936 MachineCycle
*ToCycle
= CI
->getCycle(ToBB
);
938 // Check for backedges of more "complex" cycles.
939 if (FromCycle
== ToCycle
&& FromCycle
&&
940 (!FromCycle
->isReducible() || FromCycle
->getHeader() == ToBB
))
943 // It's not always legal to break critical edges and sink the computation
951 // ... no uses of v1024
957 // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
966 // ... no uses of v1024
972 // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
973 // flow. We need to ensure the new basic block where the computation is
974 // sunk to dominates all the uses.
975 // It's only legal to break critical edge and sink the computation to the
976 // new block if all the predecessors of "To", except for "From", are
977 // not dominated by "From". Given SSA property, this means these
978 // predecessors are dominated by "To".
980 // There is no need to do this check if all the uses are PHI nodes. PHI
981 // sources are only defined on the specific predecessor edges.
983 for (MachineBasicBlock
*Pred
: ToBB
->predecessors())
984 if (Pred
!= FromBB
&& !DT
->dominates(ToBB
, Pred
))
988 ToSplit
.insert(std::make_pair(FromBB
, ToBB
));
993 std::vector
<unsigned> &
994 MachineSinking::getBBRegisterPressure(const MachineBasicBlock
&MBB
) {
995 // Currently to save compiling time, MBB's register pressure will not change
996 // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's
997 // register pressure is changed after sinking any instructions into it.
998 // FIXME: need a accurate and cheap register pressure estiminate model here.
999 auto RP
= CachedRegisterPressure
.find(&MBB
);
1000 if (RP
!= CachedRegisterPressure
.end())
1003 RegionPressure Pressure
;
1004 RegPressureTracker
RPTracker(Pressure
);
1006 // Initialize the register pressure tracker.
1007 RPTracker
.init(MBB
.getParent(), &RegClassInfo
, nullptr, &MBB
, MBB
.end(),
1008 /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true);
1010 for (MachineBasicBlock::const_iterator MII
= MBB
.instr_end(),
1011 MIE
= MBB
.instr_begin();
1012 MII
!= MIE
; --MII
) {
1013 const MachineInstr
&MI
= *std::prev(MII
);
1014 if (MI
.isDebugInstr() || MI
.isPseudoProbe())
1016 RegisterOperands RegOpers
;
1017 RegOpers
.collect(MI
, *TRI
, *MRI
, false, false);
1018 RPTracker
.recedeSkipDebugValues();
1019 assert(&*RPTracker
.getPos() == &MI
&& "RPTracker sync error!");
1020 RPTracker
.recede(RegOpers
);
1023 RPTracker
.closeRegion();
1024 auto It
= CachedRegisterPressure
.insert(
1025 std::make_pair(&MBB
, RPTracker
.getPressure().MaxSetPressure
));
1026 return It
.first
->second
;
1029 bool MachineSinking::registerPressureSetExceedsLimit(
1030 unsigned NRegs
, const TargetRegisterClass
*RC
,
1031 const MachineBasicBlock
&MBB
) {
1032 unsigned Weight
= NRegs
* TRI
->getRegClassWeight(RC
).RegWeight
;
1033 const int *PS
= TRI
->getRegClassPressureSets(RC
);
1034 std::vector
<unsigned> BBRegisterPressure
= getBBRegisterPressure(MBB
);
1035 for (; *PS
!= -1; PS
++)
1036 if (Weight
+ BBRegisterPressure
[*PS
] >=
1037 TRI
->getRegPressureSetLimit(*MBB
.getParent(), *PS
))
1042 /// isProfitableToSinkTo - Return true if it is profitable to sink MI.
1043 bool MachineSinking::isProfitableToSinkTo(Register Reg
, MachineInstr
&MI
,
1044 MachineBasicBlock
*MBB
,
1045 MachineBasicBlock
*SuccToSinkTo
,
1046 AllSuccsCache
&AllSuccessors
) {
1047 assert (SuccToSinkTo
&& "Invalid SinkTo Candidate BB");
1049 if (MBB
== SuccToSinkTo
)
1052 // It is profitable if SuccToSinkTo does not post dominate current block.
1053 if (!PDT
->dominates(SuccToSinkTo
, MBB
))
1056 // It is profitable to sink an instruction from a deeper cycle to a shallower
1057 // cycle, even if the latter post-dominates the former (PR21115).
1058 if (CI
->getCycleDepth(MBB
) > CI
->getCycleDepth(SuccToSinkTo
))
1061 // Check if only use in post dominated block is PHI instruction.
1062 bool NonPHIUse
= false;
1063 for (MachineInstr
&UseInst
: MRI
->use_nodbg_instructions(Reg
)) {
1064 MachineBasicBlock
*UseBlock
= UseInst
.getParent();
1065 if (UseBlock
== SuccToSinkTo
&& !UseInst
.isPHI())
1071 // If SuccToSinkTo post dominates then also it may be profitable if MI
1072 // can further profitably sinked into another block in next round.
1073 bool BreakPHIEdge
= false;
1074 // FIXME - If finding successor is compile time expensive then cache results.
1075 if (MachineBasicBlock
*MBB2
=
1076 FindSuccToSinkTo(MI
, SuccToSinkTo
, BreakPHIEdge
, AllSuccessors
))
1077 return isProfitableToSinkTo(Reg
, MI
, SuccToSinkTo
, MBB2
, AllSuccessors
);
1079 MachineCycle
*MCycle
= CI
->getCycle(MBB
);
1081 // If the instruction is not inside a cycle, it is not profitable to sink MI to
1082 // a post dominate block SuccToSinkTo.
1086 // If this instruction is inside a Cycle and sinking this instruction can make
1087 // more registers live range shorten, it is still prifitable.
1088 for (const MachineOperand
&MO
: MI
.operands()) {
1089 // Ignore non-register operands.
1092 Register Reg
= MO
.getReg();
1096 if (Reg
.isPhysical()) {
1097 // Don't handle non-constant and non-ignorable physical register uses.
1098 if (MO
.isUse() && !MRI
->isConstantPhysReg(Reg
) && !TII
->isIgnorableUse(MO
))
1103 // Users for the defs are all dominated by SuccToSinkTo.
1105 // This def register's live range is shortened after sinking.
1106 bool LocalUse
= false;
1107 if (!AllUsesDominatedByBlock(Reg
, SuccToSinkTo
, MBB
, BreakPHIEdge
,
1111 MachineInstr
*DefMI
= MRI
->getVRegDef(Reg
);
1114 MachineCycle
*Cycle
= CI
->getCycle(DefMI
->getParent());
1115 // DefMI is defined outside of cycle. There should be no live range
1116 // impact for this operand. Defination outside of cycle means:
1117 // 1: defination is outside of cycle.
1118 // 2: defination is in this cycle, but it is a PHI in the cycle header.
1119 if (Cycle
!= MCycle
|| (DefMI
->isPHI() && Cycle
&& Cycle
->isReducible() &&
1120 Cycle
->getHeader() == DefMI
->getParent()))
1122 // The DefMI is defined inside the cycle.
1123 // If sinking this operand makes some register pressure set exceed limit,
1124 // it is not profitable.
1125 if (registerPressureSetExceedsLimit(1, MRI
->getRegClass(Reg
),
1127 LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable.");
1133 // If MI is in cycle and all its operands are alive across the whole cycle or
1134 // if no operand sinking make register pressure set exceed limit, it is
1135 // profitable to sink MI.
1139 /// Get the sorted sequence of successors for this MachineBasicBlock, possibly
1140 /// computing it if it was not already cached.
1141 SmallVector
<MachineBasicBlock
*, 4> &
1142 MachineSinking::GetAllSortedSuccessors(MachineInstr
&MI
, MachineBasicBlock
*MBB
,
1143 AllSuccsCache
&AllSuccessors
) const {
1144 // Do we have the sorted successors in cache ?
1145 auto Succs
= AllSuccessors
.find(MBB
);
1146 if (Succs
!= AllSuccessors
.end())
1147 return Succs
->second
;
1149 SmallVector
<MachineBasicBlock
*, 4> AllSuccs(MBB
->successors());
1151 // Handle cases where sinking can happen but where the sink point isn't a
1152 // successor. For example:
1158 for (MachineDomTreeNode
*DTChild
: DT
->getNode(MBB
)->children()) {
1159 // DomTree children of MBB that have MBB as immediate dominator are added.
1160 if (DTChild
->getIDom()->getBlock() == MI
.getParent() &&
1161 // Skip MBBs already added to the AllSuccs vector above.
1162 !MBB
->isSuccessor(DTChild
->getBlock()))
1163 AllSuccs
.push_back(DTChild
->getBlock());
1166 // Sort Successors according to their cycle depth or block frequency info.
1168 AllSuccs
, [this](const MachineBasicBlock
*L
, const MachineBasicBlock
*R
) {
1169 uint64_t LHSFreq
= MBFI
? MBFI
->getBlockFreq(L
).getFrequency() : 0;
1170 uint64_t RHSFreq
= MBFI
? MBFI
->getBlockFreq(R
).getFrequency() : 0;
1171 bool HasBlockFreq
= LHSFreq
!= 0 || RHSFreq
!= 0;
1172 return HasBlockFreq
? LHSFreq
< RHSFreq
1173 : CI
->getCycleDepth(L
) < CI
->getCycleDepth(R
);
1176 auto it
= AllSuccessors
.insert(std::make_pair(MBB
, AllSuccs
));
1178 return it
.first
->second
;
1181 /// FindSuccToSinkTo - Find a successor to sink this instruction to.
1183 MachineSinking::FindSuccToSinkTo(MachineInstr
&MI
, MachineBasicBlock
*MBB
,
1185 AllSuccsCache
&AllSuccessors
) {
1186 assert (MBB
&& "Invalid MachineBasicBlock!");
1188 // loop over all the operands of the specified instruction. If there is
1189 // anything we can't handle, bail out.
1191 // SuccToSinkTo - This is the successor to sink this instruction to, once we
1193 MachineBasicBlock
*SuccToSinkTo
= nullptr;
1194 for (const MachineOperand
&MO
: MI
.operands()) {
1195 if (!MO
.isReg()) continue; // Ignore non-register operands.
1197 Register Reg
= MO
.getReg();
1198 if (Reg
== 0) continue;
1200 if (Reg
.isPhysical()) {
1202 // If the physreg has no defs anywhere, it's just an ambient register
1203 // and we can freely move its uses. Alternatively, if it's allocatable,
1204 // it could get allocated to something with a def during allocation.
1205 if (!MRI
->isConstantPhysReg(Reg
) && !TII
->isIgnorableUse(MO
))
1207 } else if (!MO
.isDead()) {
1208 // A def that isn't dead. We can't move it.
1212 // Virtual register uses are always safe to sink.
1213 if (MO
.isUse()) continue;
1215 // If it's not safe to move defs of the register class, then abort.
1216 if (!TII
->isSafeToMoveRegClassDefs(MRI
->getRegClass(Reg
)))
1219 // Virtual register defs can only be sunk if all their uses are in blocks
1220 // dominated by one of the successors.
1222 // If a previous operand picked a block to sink to, then this operand
1223 // must be sinkable to the same block.
1224 bool LocalUse
= false;
1225 if (!AllUsesDominatedByBlock(Reg
, SuccToSinkTo
, MBB
,
1226 BreakPHIEdge
, LocalUse
))
1232 // Otherwise, we should look at all the successors and decide which one
1233 // we should sink to. If we have reliable block frequency information
1234 // (frequency != 0) available, give successors with smaller frequencies
1235 // higher priority, otherwise prioritize smaller cycle depths.
1236 for (MachineBasicBlock
*SuccBlock
:
1237 GetAllSortedSuccessors(MI
, MBB
, AllSuccessors
)) {
1238 bool LocalUse
= false;
1239 if (AllUsesDominatedByBlock(Reg
, SuccBlock
, MBB
,
1240 BreakPHIEdge
, LocalUse
)) {
1241 SuccToSinkTo
= SuccBlock
;
1245 // Def is used locally, it's never safe to move this def.
1249 // If we couldn't find a block to sink to, ignore this instruction.
1252 if (!isProfitableToSinkTo(Reg
, MI
, MBB
, SuccToSinkTo
, AllSuccessors
))
1257 // It is not possible to sink an instruction into its own block. This can
1258 // happen with cycles.
1259 if (MBB
== SuccToSinkTo
)
1262 // It's not safe to sink instructions to EH landing pad. Control flow into
1263 // landing pad is implicitly defined.
1264 if (SuccToSinkTo
&& SuccToSinkTo
->isEHPad())
1267 // It ought to be okay to sink instructions into an INLINEASM_BR target, but
1268 // only if we make sure that MI occurs _before_ an INLINEASM_BR instruction in
1269 // the source block (which this code does not yet do). So for now, forbid
1271 if (SuccToSinkTo
&& SuccToSinkTo
->isInlineAsmBrIndirectTarget())
1274 if (SuccToSinkTo
&& !TII
->isSafeToSink(MI
, SuccToSinkTo
, CI
))
1277 return SuccToSinkTo
;
1280 /// Return true if MI is likely to be usable as a memory operation by the
1281 /// implicit null check optimization.
1283 /// This is a "best effort" heuristic, and should not be relied upon for
1284 /// correctness. This returning true does not guarantee that the implicit null
1285 /// check optimization is legal over MI, and this returning false does not
1286 /// guarantee MI cannot possibly be used to do a null check.
1287 static bool SinkingPreventsImplicitNullCheck(MachineInstr
&MI
,
1288 const TargetInstrInfo
*TII
,
1289 const TargetRegisterInfo
*TRI
) {
1290 using MachineBranchPredicate
= TargetInstrInfo::MachineBranchPredicate
;
1292 auto *MBB
= MI
.getParent();
1293 if (MBB
->pred_size() != 1)
1296 auto *PredMBB
= *MBB
->pred_begin();
1297 auto *PredBB
= PredMBB
->getBasicBlock();
1299 // Frontends that don't use implicit null checks have no reason to emit
1300 // branches with make.implicit metadata, and this function should always
1301 // return false for them.
1303 !PredBB
->getTerminator()->getMetadata(LLVMContext::MD_make_implicit
))
1306 const MachineOperand
*BaseOp
;
1308 bool OffsetIsScalable
;
1309 if (!TII
->getMemOperandWithOffset(MI
, BaseOp
, Offset
, OffsetIsScalable
, TRI
))
1312 if (!BaseOp
->isReg())
1315 if (!(MI
.mayLoad() && !MI
.isPredicable()))
1318 MachineBranchPredicate MBP
;
1319 if (TII
->analyzeBranchPredicate(*PredMBB
, MBP
, false))
1322 return MBP
.LHS
.isReg() && MBP
.RHS
.isImm() && MBP
.RHS
.getImm() == 0 &&
1323 (MBP
.Predicate
== MachineBranchPredicate::PRED_NE
||
1324 MBP
.Predicate
== MachineBranchPredicate::PRED_EQ
) &&
1325 MBP
.LHS
.getReg() == BaseOp
->getReg();
1328 /// If the sunk instruction is a copy, try to forward the copy instead of
1329 /// leaving an 'undef' DBG_VALUE in the original location. Don't do this if
1330 /// there's any subregister weirdness involved. Returns true if copy
1331 /// propagation occurred.
1332 static bool attemptDebugCopyProp(MachineInstr
&SinkInst
, MachineInstr
&DbgMI
,
1334 const MachineRegisterInfo
&MRI
= SinkInst
.getMF()->getRegInfo();
1335 const TargetInstrInfo
&TII
= *SinkInst
.getMF()->getSubtarget().getInstrInfo();
1337 // Copy DBG_VALUE operand and set the original to undef. We then check to
1338 // see whether this is something that can be copy-forwarded. If it isn't,
1339 // continue around the loop.
1341 const MachineOperand
*SrcMO
= nullptr, *DstMO
= nullptr;
1342 auto CopyOperands
= TII
.isCopyInstr(SinkInst
);
1345 SrcMO
= CopyOperands
->Source
;
1346 DstMO
= CopyOperands
->Destination
;
1348 // Check validity of forwarding this copy.
1349 bool PostRA
= MRI
.getNumVirtRegs() == 0;
1351 // Trying to forward between physical and virtual registers is too hard.
1352 if (Reg
.isVirtual() != SrcMO
->getReg().isVirtual())
1355 // Only try virtual register copy-forwarding before regalloc, and physical
1356 // register copy-forwarding after regalloc.
1357 bool arePhysRegs
= !Reg
.isVirtual();
1358 if (arePhysRegs
!= PostRA
)
1361 // Pre-regalloc, only forward if all subregisters agree (or there are no
1362 // subregs at all). More analysis might recover some forwardable copies.
1364 for (auto &DbgMO
: DbgMI
.getDebugOperandsForReg(Reg
))
1365 if (DbgMO
.getSubReg() != SrcMO
->getSubReg() ||
1366 DbgMO
.getSubReg() != DstMO
->getSubReg())
1369 // Post-regalloc, we may be sinking a DBG_VALUE of a sub or super-register
1370 // of this copy. Only forward the copy if the DBG_VALUE operand exactly
1371 // matches the copy destination.
1372 if (PostRA
&& Reg
!= DstMO
->getReg())
1375 for (auto &DbgMO
: DbgMI
.getDebugOperandsForReg(Reg
)) {
1376 DbgMO
.setReg(SrcMO
->getReg());
1377 DbgMO
.setSubReg(SrcMO
->getSubReg());
1382 using MIRegs
= std::pair
<MachineInstr
*, SmallVector
<unsigned, 2>>;
1383 /// Sink an instruction and its associated debug instructions.
1384 static void performSink(MachineInstr
&MI
, MachineBasicBlock
&SuccToSinkTo
,
1385 MachineBasicBlock::iterator InsertPos
,
1386 ArrayRef
<MIRegs
> DbgValuesToSink
) {
1387 // If we cannot find a location to use (merge with), then we erase the debug
1388 // location to prevent debug-info driven tools from potentially reporting
1389 // wrong location information.
1390 if (!SuccToSinkTo
.empty() && InsertPos
!= SuccToSinkTo
.end())
1391 MI
.setDebugLoc(DILocation::getMergedLocation(MI
.getDebugLoc(),
1392 InsertPos
->getDebugLoc()));
1394 MI
.setDebugLoc(DebugLoc());
1396 // Move the instruction.
1397 MachineBasicBlock
*ParentBlock
= MI
.getParent();
1398 SuccToSinkTo
.splice(InsertPos
, ParentBlock
, MI
,
1399 ++MachineBasicBlock::iterator(MI
));
1401 // Sink a copy of debug users to the insert position. Mark the original
1402 // DBG_VALUE location as 'undef', indicating that any earlier variable
1403 // location should be terminated as we've optimised away the value at this
1405 for (const auto &DbgValueToSink
: DbgValuesToSink
) {
1406 MachineInstr
*DbgMI
= DbgValueToSink
.first
;
1407 MachineInstr
*NewDbgMI
= DbgMI
->getMF()->CloneMachineInstr(DbgMI
);
1408 SuccToSinkTo
.insert(InsertPos
, NewDbgMI
);
1410 bool PropagatedAllSunkOps
= true;
1411 for (unsigned Reg
: DbgValueToSink
.second
) {
1412 if (DbgMI
->hasDebugOperandForReg(Reg
)) {
1413 if (!attemptDebugCopyProp(MI
, *DbgMI
, Reg
)) {
1414 PropagatedAllSunkOps
= false;
1419 if (!PropagatedAllSunkOps
)
1420 DbgMI
->setDebugValueUndef();
1424 /// hasStoreBetween - check if there is store betweeen straight line blocks From
1426 bool MachineSinking::hasStoreBetween(MachineBasicBlock
*From
,
1427 MachineBasicBlock
*To
, MachineInstr
&MI
) {
1428 // Make sure From and To are in straight line which means From dominates To
1429 // and To post dominates From.
1430 if (!DT
->dominates(From
, To
) || !PDT
->dominates(To
, From
))
1433 auto BlockPair
= std::make_pair(From
, To
);
1435 // Does these two blocks pair be queried before and have a definite cached
1437 if (auto It
= HasStoreCache
.find(BlockPair
); It
!= HasStoreCache
.end())
1440 if (auto It
= StoreInstrCache
.find(BlockPair
); It
!= StoreInstrCache
.end())
1441 return llvm::any_of(It
->second
, [&](MachineInstr
*I
) {
1442 return I
->mayAlias(AA
, MI
, false);
1445 bool SawStore
= false;
1446 bool HasAliasedStore
= false;
1447 DenseSet
<MachineBasicBlock
*> HandledBlocks
;
1448 DenseSet
<MachineBasicBlock
*> HandledDomBlocks
;
1449 // Go through all reachable blocks from From.
1450 for (MachineBasicBlock
*BB
: depth_first(From
)) {
1451 // We insert the instruction at the start of block To, so no need to worry
1452 // about stores inside To.
1453 // Store in block From should be already considered when just enter function
1455 if (BB
== To
|| BB
== From
)
1458 // We already handle this BB in previous iteration.
1459 if (HandledBlocks
.count(BB
))
1462 HandledBlocks
.insert(BB
);
1463 // To post dominates BB, it must be a path from block From.
1464 if (PDT
->dominates(To
, BB
)) {
1465 if (!HandledDomBlocks
.count(BB
))
1466 HandledDomBlocks
.insert(BB
);
1468 // If this BB is too big or the block number in straight line between From
1469 // and To is too big, stop searching to save compiling time.
1470 if (BB
->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold
) ||
1471 HandledDomBlocks
.size() > SinkLoadBlocksThreshold
) {
1472 for (auto *DomBB
: HandledDomBlocks
) {
1473 if (DomBB
!= BB
&& DT
->dominates(DomBB
, BB
))
1474 HasStoreCache
[std::make_pair(DomBB
, To
)] = true;
1475 else if(DomBB
!= BB
&& DT
->dominates(BB
, DomBB
))
1476 HasStoreCache
[std::make_pair(From
, DomBB
)] = true;
1478 HasStoreCache
[BlockPair
] = true;
1482 for (MachineInstr
&I
: *BB
) {
1483 // Treat as alias conservatively for a call or an ordered memory
1485 if (I
.isCall() || I
.hasOrderedMemoryRef()) {
1486 for (auto *DomBB
: HandledDomBlocks
) {
1487 if (DomBB
!= BB
&& DT
->dominates(DomBB
, BB
))
1488 HasStoreCache
[std::make_pair(DomBB
, To
)] = true;
1489 else if(DomBB
!= BB
&& DT
->dominates(BB
, DomBB
))
1490 HasStoreCache
[std::make_pair(From
, DomBB
)] = true;
1492 HasStoreCache
[BlockPair
] = true;
1498 // We still have chance to sink MI if all stores between are not
1500 // Cache all store instructions, so that we don't need to go through
1501 // all From reachable blocks for next load instruction.
1502 if (I
.mayAlias(AA
, MI
, false))
1503 HasAliasedStore
= true;
1504 StoreInstrCache
[BlockPair
].push_back(&I
);
1509 // If there is no store at all, cache the result.
1511 HasStoreCache
[BlockPair
] = false;
1512 return HasAliasedStore
;
1515 /// Sink instructions into cycles if profitable. This especially tries to
1516 /// prevent register spills caused by register pressure if there is little to no
1517 /// overhead moving instructions into cycles.
1518 bool MachineSinking::SinkIntoCycle(MachineCycle
*Cycle
, MachineInstr
&I
) {
1519 LLVM_DEBUG(dbgs() << "CycleSink: Finding sink block for: " << I
);
1520 MachineBasicBlock
*Preheader
= Cycle
->getCyclePreheader();
1521 assert(Preheader
&& "Cycle sink needs a preheader block");
1522 MachineBasicBlock
*SinkBlock
= nullptr;
1523 bool CanSink
= true;
1524 const MachineOperand
&MO
= I
.getOperand(0);
1526 for (MachineInstr
&MI
: MRI
->use_instructions(MO
.getReg())) {
1527 LLVM_DEBUG(dbgs() << "CycleSink: Analysing use: " << MI
);
1528 if (!Cycle
->contains(MI
.getParent())) {
1529 LLVM_DEBUG(dbgs() << "CycleSink: Use not in cycle, can't sink.\n");
1534 // FIXME: Come up with a proper cost model that estimates whether sinking
1535 // the instruction (and thus possibly executing it on every cycle
1536 // iteration) is more expensive than a register.
1537 // For now assumes that copies are cheap and thus almost always worth it.
1539 LLVM_DEBUG(dbgs() << "CycleSink: Use is not a copy\n");
1544 SinkBlock
= MI
.getParent();
1545 LLVM_DEBUG(dbgs() << "CycleSink: Setting sink block to: "
1546 << printMBBReference(*SinkBlock
) << "\n");
1549 SinkBlock
= DT
->findNearestCommonDominator(SinkBlock
, MI
.getParent());
1551 LLVM_DEBUG(dbgs() << "CycleSink: Can't find nearest dominator\n");
1555 LLVM_DEBUG(dbgs() << "CycleSink: Setting nearest common dom block: " <<
1556 printMBBReference(*SinkBlock
) << "\n");
1560 LLVM_DEBUG(dbgs() << "CycleSink: Can't sink instruction.\n");
1564 LLVM_DEBUG(dbgs() << "CycleSink: Not sinking, can't find sink block.\n");
1567 if (SinkBlock
== Preheader
) {
1569 dbgs() << "CycleSink: Not sinking, sink block is the preheader\n");
1572 if (SinkBlock
->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold
)) {
1574 dbgs() << "CycleSink: Not Sinking, block too large to analyse.\n");
1578 LLVM_DEBUG(dbgs() << "CycleSink: Sinking instruction!\n");
1579 SinkBlock
->splice(SinkBlock
->SkipPHIsAndLabels(SinkBlock
->begin()), Preheader
,
1582 // Conservatively clear any kill flags on uses of sunk instruction
1583 for (MachineOperand
&MO
: I
.operands()) {
1584 if (MO
.isReg() && MO
.readsReg())
1585 RegsToClearKillFlags
.insert(MO
.getReg());
1588 // The instruction is moved from its basic block, so do not retain the
1589 // debug information.
1590 assert(!I
.isDebugInstr() && "Should not sink debug inst");
1591 I
.setDebugLoc(DebugLoc());
1595 /// SinkInstruction - Determine whether it is safe to sink the specified machine
1596 /// instruction out of its current block into a successor.
1597 bool MachineSinking::SinkInstruction(MachineInstr
&MI
, bool &SawStore
,
1598 AllSuccsCache
&AllSuccessors
) {
1599 // Don't sink instructions that the target prefers not to sink.
1600 if (!TII
->shouldSink(MI
))
1603 // Check if it's safe to move the instruction.
1604 if (!MI
.isSafeToMove(AA
, SawStore
))
1607 // Convergent operations may not be made control-dependent on additional
1609 if (MI
.isConvergent())
1612 // Don't break implicit null checks. This is a performance heuristic, and not
1613 // required for correctness.
1614 if (SinkingPreventsImplicitNullCheck(MI
, TII
, TRI
))
1617 // FIXME: This should include support for sinking instructions within the
1618 // block they are currently in to shorten the live ranges. We often get
1619 // instructions sunk into the top of a large block, but it would be better to
1620 // also sink them down before their first use in the block. This xform has to
1621 // be careful not to *increase* register pressure though, e.g. sinking
1622 // "x = y + z" down if it kills y and z would increase the live ranges of y
1623 // and z and only shrink the live range of x.
1625 bool BreakPHIEdge
= false;
1626 MachineBasicBlock
*ParentBlock
= MI
.getParent();
1627 MachineBasicBlock
*SuccToSinkTo
=
1628 FindSuccToSinkTo(MI
, ParentBlock
, BreakPHIEdge
, AllSuccessors
);
1630 // If there are no outputs, it must have side-effects.
1634 // If the instruction to move defines a dead physical register which is live
1635 // when leaving the basic block, don't move it because it could turn into a
1636 // "zombie" define of that preg. E.g., EFLAGS.
1637 for (const MachineOperand
&MO
: MI
.all_defs()) {
1638 Register Reg
= MO
.getReg();
1639 if (Reg
== 0 || !Reg
.isPhysical())
1641 if (SuccToSinkTo
->isLiveIn(Reg
))
1645 LLVM_DEBUG(dbgs() << "Sink instr " << MI
<< "\tinto block " << *SuccToSinkTo
);
1647 // If the block has multiple predecessors, this is a critical edge.
1648 // Decide if we can sink along it or need to break the edge.
1649 if (SuccToSinkTo
->pred_size() > 1) {
1650 // We cannot sink a load across a critical edge - there may be stores in
1651 // other code paths.
1652 bool TryBreak
= false;
1654 MI
.mayLoad() ? hasStoreBetween(ParentBlock
, SuccToSinkTo
, MI
) : true;
1655 if (!MI
.isSafeToMove(AA
, Store
)) {
1656 LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
1660 // We don't want to sink across a critical edge if we don't dominate the
1661 // successor. We could be introducing calculations to new code paths.
1662 if (!TryBreak
&& !DT
->dominates(ParentBlock
, SuccToSinkTo
)) {
1663 LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
1667 // Don't sink instructions into a cycle.
1668 if (!TryBreak
&& CI
->getCycle(SuccToSinkTo
) &&
1669 (!CI
->getCycle(SuccToSinkTo
)->isReducible() ||
1670 CI
->getCycle(SuccToSinkTo
)->getHeader() == SuccToSinkTo
)) {
1671 LLVM_DEBUG(dbgs() << " *** NOTE: cycle header found\n");
1675 // Otherwise we are OK with sinking along a critical edge.
1677 LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
1679 // Mark this edge as to be split.
1680 // If the edge can actually be split, the next iteration of the main loop
1681 // will sink MI in the newly created block.
1683 PostponeSplitCriticalEdge(MI
, ParentBlock
, SuccToSinkTo
, BreakPHIEdge
);
1685 LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1686 "break critical edge\n");
1687 // The instruction will not be sunk this time.
1693 // BreakPHIEdge is true if all the uses are in the successor MBB being
1694 // sunken into and they are all PHI nodes. In this case, machine-sink must
1695 // break the critical edge first.
1696 bool Status
= PostponeSplitCriticalEdge(MI
, ParentBlock
,
1697 SuccToSinkTo
, BreakPHIEdge
);
1699 LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1700 "break critical edge\n");
1701 // The instruction will not be sunk this time.
1705 // Determine where to insert into. Skip phi nodes.
1706 MachineBasicBlock::iterator InsertPos
=
1707 SuccToSinkTo
->SkipPHIsAndLabels(SuccToSinkTo
->begin());
1708 if (blockPrologueInterferes(SuccToSinkTo
, InsertPos
, MI
, TRI
, TII
, MRI
)) {
1709 LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");
1713 // Collect debug users of any vreg that this inst defines.
1714 SmallVector
<MIRegs
, 4> DbgUsersToSink
;
1715 for (auto &MO
: MI
.all_defs()) {
1716 if (!MO
.getReg().isVirtual())
1718 if (!SeenDbgUsers
.count(MO
.getReg()))
1721 // Sink any users that don't pass any other DBG_VALUEs for this variable.
1722 auto &Users
= SeenDbgUsers
[MO
.getReg()];
1723 for (auto &User
: Users
) {
1724 MachineInstr
*DbgMI
= User
.getPointer();
1725 if (User
.getInt()) {
1726 // This DBG_VALUE would re-order assignments. If we can't copy-propagate
1727 // it, it can't be recovered. Set it undef.
1728 if (!attemptDebugCopyProp(MI
, *DbgMI
, MO
.getReg()))
1729 DbgMI
->setDebugValueUndef();
1731 DbgUsersToSink
.push_back(
1732 {DbgMI
, SmallVector
<unsigned, 2>(1, MO
.getReg())});
1737 // After sinking, some debug users may not be dominated any more. If possible,
1738 // copy-propagate their operands. As it's expensive, don't do this if there's
1739 // no debuginfo in the program.
1740 if (MI
.getMF()->getFunction().getSubprogram() && MI
.isCopy())
1741 SalvageUnsunkDebugUsersOfCopy(MI
, SuccToSinkTo
);
1743 performSink(MI
, *SuccToSinkTo
, InsertPos
, DbgUsersToSink
);
1745 // Conservatively, clear any kill flags, since it's possible that they are no
1747 // Note that we have to clear the kill flags for any register this instruction
1748 // uses as we may sink over another instruction which currently kills the
1750 for (MachineOperand
&MO
: MI
.all_uses())
1751 RegsToClearKillFlags
.insert(MO
.getReg()); // Remember to clear kill flags.
1756 void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
1757 MachineInstr
&MI
, MachineBasicBlock
*TargetBlock
) {
1758 assert(MI
.isCopy());
1759 assert(MI
.getOperand(1).isReg());
1761 // Enumerate all users of vreg operands that are def'd. Skip those that will
1762 // be sunk. For the rest, if they are not dominated by the block we will sink
1763 // MI into, propagate the copy source to them.
1764 SmallVector
<MachineInstr
*, 4> DbgDefUsers
;
1765 SmallVector
<Register
, 4> DbgUseRegs
;
1766 const MachineRegisterInfo
&MRI
= MI
.getMF()->getRegInfo();
1767 for (auto &MO
: MI
.all_defs()) {
1768 if (!MO
.getReg().isVirtual())
1770 DbgUseRegs
.push_back(MO
.getReg());
1771 for (auto &User
: MRI
.use_instructions(MO
.getReg())) {
1772 if (!User
.isDebugValue() || DT
->dominates(TargetBlock
, User
.getParent()))
1775 // If is in same block, will either sink or be use-before-def.
1776 if (User
.getParent() == MI
.getParent())
1779 assert(User
.hasDebugOperandForReg(MO
.getReg()) &&
1780 "DBG_VALUE user of vreg, but has no operand for it?");
1781 DbgDefUsers
.push_back(&User
);
1785 // Point the users of this copy that are no longer dominated, at the source
1787 for (auto *User
: DbgDefUsers
) {
1788 for (auto &Reg
: DbgUseRegs
) {
1789 for (auto &DbgOp
: User
->getDebugOperandsForReg(Reg
)) {
1790 DbgOp
.setReg(MI
.getOperand(1).getReg());
1791 DbgOp
.setSubReg(MI
.getOperand(1).getSubReg());
1797 //===----------------------------------------------------------------------===//
1798 // This pass is not intended to be a replacement or a complete alternative
1799 // for the pre-ra machine sink pass. It is only designed to sink COPY
1800 // instructions which should be handled after RA.
1802 // This pass sinks COPY instructions into a successor block, if the COPY is not
1803 // used in the current block and the COPY is live-in to a single successor
1804 // (i.e., doesn't require the COPY to be duplicated). This avoids executing the
1805 // copy on paths where their results aren't needed. This also exposes
1806 // additional opportunites for dead copy elimination and shrink wrapping.
1808 // These copies were either not handled by or are inserted after the MachineSink
1809 // pass. As an example of the former case, the MachineSink pass cannot sink
1810 // COPY instructions with allocatable source registers; for AArch64 these type
1811 // of copy instructions are frequently used to move function parameters (PhyReg)
1812 // into virtual registers in the entry block.
1814 // For the machine IR below, this pass will sink %w19 in the entry into its
1815 // successor (%bb.1) because %w19 is only live-in in %bb.1.
1817 // %wzr = SUBSWri %w1, 1
1823 // %w0 = ADDWrr %w0, %w19
1828 // As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
1829 // able to see %bb.0 as a candidate.
1830 //===----------------------------------------------------------------------===//
1833 class PostRAMachineSinking
: public MachineFunctionPass
{
1835 bool runOnMachineFunction(MachineFunction
&MF
) override
;
1838 PostRAMachineSinking() : MachineFunctionPass(ID
) {}
1839 StringRef
getPassName() const override
{ return "PostRA Machine Sink"; }
1841 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
1842 AU
.setPreservesCFG();
1843 MachineFunctionPass::getAnalysisUsage(AU
);
1846 MachineFunctionProperties
getRequiredProperties() const override
{
1847 return MachineFunctionProperties().set(
1848 MachineFunctionProperties::Property::NoVRegs
);
1852 /// Track which register units have been modified and used.
1853 LiveRegUnits ModifiedRegUnits
, UsedRegUnits
;
1855 /// Track DBG_VALUEs of (unmodified) register units. Each DBG_VALUE has an
1856 /// entry in this map for each unit it touches. The DBG_VALUE's entry
1857 /// consists of a pointer to the instruction itself, and a vector of registers
1858 /// referred to by the instruction that overlap the key register unit.
1859 DenseMap
<unsigned, SmallVector
<MIRegs
, 2>> SeenDbgInstrs
;
1861 /// Sink Copy instructions unused in the same block close to their uses in
1863 bool tryToSinkCopy(MachineBasicBlock
&BB
, MachineFunction
&MF
,
1864 const TargetRegisterInfo
*TRI
, const TargetInstrInfo
*TII
);
1868 char PostRAMachineSinking::ID
= 0;
1869 char &llvm::PostRAMachineSinkingID
= PostRAMachineSinking::ID
;
1871 INITIALIZE_PASS(PostRAMachineSinking
, "postra-machine-sink",
1872 "PostRA Machine Sink", false, false)
1874 static bool aliasWithRegsInLiveIn(MachineBasicBlock
&MBB
, unsigned Reg
,
1875 const TargetRegisterInfo
*TRI
) {
1876 LiveRegUnits
LiveInRegUnits(*TRI
);
1877 LiveInRegUnits
.addLiveIns(MBB
);
1878 return !LiveInRegUnits
.available(Reg
);
1881 static MachineBasicBlock
*
1882 getSingleLiveInSuccBB(MachineBasicBlock
&CurBB
,
1883 const SmallPtrSetImpl
<MachineBasicBlock
*> &SinkableBBs
,
1884 unsigned Reg
, const TargetRegisterInfo
*TRI
) {
1885 // Try to find a single sinkable successor in which Reg is live-in.
1886 MachineBasicBlock
*BB
= nullptr;
1887 for (auto *SI
: SinkableBBs
) {
1888 if (aliasWithRegsInLiveIn(*SI
, Reg
, TRI
)) {
1889 // If BB is set here, Reg is live-in to at least two sinkable successors,
1896 // Reg is not live-in to any sinkable successors.
1900 // Check if any register aliased with Reg is live-in in other successors.
1901 for (auto *SI
: CurBB
.successors()) {
1902 if (!SinkableBBs
.count(SI
) && aliasWithRegsInLiveIn(*SI
, Reg
, TRI
))
1908 static MachineBasicBlock
*
1909 getSingleLiveInSuccBB(MachineBasicBlock
&CurBB
,
1910 const SmallPtrSetImpl
<MachineBasicBlock
*> &SinkableBBs
,
1911 ArrayRef
<unsigned> DefedRegsInCopy
,
1912 const TargetRegisterInfo
*TRI
) {
1913 MachineBasicBlock
*SingleBB
= nullptr;
1914 for (auto DefReg
: DefedRegsInCopy
) {
1915 MachineBasicBlock
*BB
=
1916 getSingleLiveInSuccBB(CurBB
, SinkableBBs
, DefReg
, TRI
);
1917 if (!BB
|| (SingleBB
&& SingleBB
!= BB
))
1924 static void clearKillFlags(MachineInstr
*MI
, MachineBasicBlock
&CurBB
,
1925 SmallVectorImpl
<unsigned> &UsedOpsInCopy
,
1926 LiveRegUnits
&UsedRegUnits
,
1927 const TargetRegisterInfo
*TRI
) {
1928 for (auto U
: UsedOpsInCopy
) {
1929 MachineOperand
&MO
= MI
->getOperand(U
);
1930 Register SrcReg
= MO
.getReg();
1931 if (!UsedRegUnits
.available(SrcReg
)) {
1932 MachineBasicBlock::iterator NI
= std::next(MI
->getIterator());
1933 for (MachineInstr
&UI
: make_range(NI
, CurBB
.end())) {
1934 if (UI
.killsRegister(SrcReg
, TRI
)) {
1935 UI
.clearRegisterKills(SrcReg
, TRI
);
1944 static void updateLiveIn(MachineInstr
*MI
, MachineBasicBlock
*SuccBB
,
1945 SmallVectorImpl
<unsigned> &UsedOpsInCopy
,
1946 SmallVectorImpl
<unsigned> &DefedRegsInCopy
) {
1947 MachineFunction
&MF
= *SuccBB
->getParent();
1948 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1949 for (unsigned DefReg
: DefedRegsInCopy
)
1950 for (MCPhysReg S
: TRI
->subregs_inclusive(DefReg
))
1951 SuccBB
->removeLiveIn(S
);
1952 for (auto U
: UsedOpsInCopy
) {
1953 Register SrcReg
= MI
->getOperand(U
).getReg();
1955 for (MCRegUnitMaskIterator
S(SrcReg
, TRI
); S
.isValid(); ++S
)
1956 Mask
|= (*S
).second
;
1957 SuccBB
->addLiveIn(SrcReg
, Mask
);
1959 SuccBB
->sortUniqueLiveIns();
1962 static bool hasRegisterDependency(MachineInstr
*MI
,
1963 SmallVectorImpl
<unsigned> &UsedOpsInCopy
,
1964 SmallVectorImpl
<unsigned> &DefedRegsInCopy
,
1965 LiveRegUnits
&ModifiedRegUnits
,
1966 LiveRegUnits
&UsedRegUnits
) {
1967 bool HasRegDependency
= false;
1968 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1969 MachineOperand
&MO
= MI
->getOperand(i
);
1972 Register Reg
= MO
.getReg();
1976 if (!ModifiedRegUnits
.available(Reg
) || !UsedRegUnits
.available(Reg
)) {
1977 HasRegDependency
= true;
1980 DefedRegsInCopy
.push_back(Reg
);
1982 // FIXME: instead of isUse(), readsReg() would be a better fix here,
1983 // For example, we can ignore modifications in reg with undef. However,
1984 // it's not perfectly clear if skipping the internal read is safe in all
1986 } else if (MO
.isUse()) {
1987 if (!ModifiedRegUnits
.available(Reg
)) {
1988 HasRegDependency
= true;
1991 UsedOpsInCopy
.push_back(i
);
1994 return HasRegDependency
;
1997 bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock
&CurBB
,
1998 MachineFunction
&MF
,
1999 const TargetRegisterInfo
*TRI
,
2000 const TargetInstrInfo
*TII
) {
2001 SmallPtrSet
<MachineBasicBlock
*, 2> SinkableBBs
;
2002 // FIXME: For now, we sink only to a successor which has a single predecessor
2003 // so that we can directly sink COPY instructions to the successor without
2004 // adding any new block or branch instruction.
2005 for (MachineBasicBlock
*SI
: CurBB
.successors())
2006 if (!SI
->livein_empty() && SI
->pred_size() == 1)
2007 SinkableBBs
.insert(SI
);
2009 if (SinkableBBs
.empty())
2012 bool Changed
= false;
2014 // Track which registers have been modified and used between the end of the
2015 // block and the current instruction.
2016 ModifiedRegUnits
.clear();
2017 UsedRegUnits
.clear();
2018 SeenDbgInstrs
.clear();
2020 for (MachineInstr
&MI
: llvm::make_early_inc_range(llvm::reverse(CurBB
))) {
2021 // Track the operand index for use in Copy.
2022 SmallVector
<unsigned, 2> UsedOpsInCopy
;
2023 // Track the register number defed in Copy.
2024 SmallVector
<unsigned, 2> DefedRegsInCopy
;
2026 // We must sink this DBG_VALUE if its operand is sunk. To avoid searching
2027 // for DBG_VALUEs later, record them when they're encountered.
2028 if (MI
.isDebugValue() && !MI
.isDebugRef()) {
2029 SmallDenseMap
<MCRegister
, SmallVector
<unsigned, 2>, 4> MIUnits
;
2030 bool IsValid
= true;
2031 for (MachineOperand
&MO
: MI
.debug_operands()) {
2032 if (MO
.isReg() && MO
.getReg().isPhysical()) {
2033 // Bail if we can already tell the sink would be rejected, rather
2034 // than needlessly accumulating lots of DBG_VALUEs.
2035 if (hasRegisterDependency(&MI
, UsedOpsInCopy
, DefedRegsInCopy
,
2036 ModifiedRegUnits
, UsedRegUnits
)) {
2041 // Record debug use of each reg unit.
2042 for (MCRegUnit Unit
: TRI
->regunits(MO
.getReg()))
2043 MIUnits
[Unit
].push_back(MO
.getReg());
2047 for (auto &RegOps
: MIUnits
)
2048 SeenDbgInstrs
[RegOps
.first
].emplace_back(&MI
,
2049 std::move(RegOps
.second
));
2054 if (MI
.isDebugOrPseudoInstr())
2057 // Do not move any instruction across function call.
2061 if (!MI
.isCopy() || !MI
.getOperand(0).isRenamable()) {
2062 LiveRegUnits::accumulateUsedDefed(MI
, ModifiedRegUnits
, UsedRegUnits
,
2067 // Don't sink the COPY if it would violate a register dependency.
2068 if (hasRegisterDependency(&MI
, UsedOpsInCopy
, DefedRegsInCopy
,
2069 ModifiedRegUnits
, UsedRegUnits
)) {
2070 LiveRegUnits::accumulateUsedDefed(MI
, ModifiedRegUnits
, UsedRegUnits
,
2074 assert((!UsedOpsInCopy
.empty() && !DefedRegsInCopy
.empty()) &&
2075 "Unexpect SrcReg or DefReg");
2076 MachineBasicBlock
*SuccBB
=
2077 getSingleLiveInSuccBB(CurBB
, SinkableBBs
, DefedRegsInCopy
, TRI
);
2078 // Don't sink if we cannot find a single sinkable successor in which Reg
2081 LiveRegUnits::accumulateUsedDefed(MI
, ModifiedRegUnits
, UsedRegUnits
,
2085 assert((SuccBB
->pred_size() == 1 && *SuccBB
->pred_begin() == &CurBB
) &&
2086 "Unexpected predecessor");
2088 // Collect DBG_VALUEs that must sink with this copy. We've previously
2089 // recorded which reg units that DBG_VALUEs read, if this instruction
2090 // writes any of those units then the corresponding DBG_VALUEs must sink.
2091 MapVector
<MachineInstr
*, MIRegs::second_type
> DbgValsToSinkMap
;
2092 for (auto &MO
: MI
.all_defs()) {
2093 for (MCRegUnit Unit
: TRI
->regunits(MO
.getReg())) {
2094 for (const auto &MIRegs
: SeenDbgInstrs
.lookup(Unit
)) {
2095 auto &Regs
= DbgValsToSinkMap
[MIRegs
.first
];
2096 for (unsigned Reg
: MIRegs
.second
)
2097 Regs
.push_back(Reg
);
2101 auto DbgValsToSink
= DbgValsToSinkMap
.takeVector();
2103 LLVM_DEBUG(dbgs() << "Sink instr " << MI
<< "\tinto block " << *SuccBB
);
2105 MachineBasicBlock::iterator InsertPos
=
2106 SuccBB
->SkipPHIsAndLabels(SuccBB
->begin());
2107 if (blockPrologueInterferes(SuccBB
, InsertPos
, MI
, TRI
, TII
, nullptr)) {
2109 dbgs() << " *** Not sinking: prologue interference\n");
2113 // Clear the kill flag if SrcReg is killed between MI and the end of the
2115 clearKillFlags(&MI
, CurBB
, UsedOpsInCopy
, UsedRegUnits
, TRI
);
2116 performSink(MI
, *SuccBB
, InsertPos
, DbgValsToSink
);
2117 updateLiveIn(&MI
, SuccBB
, UsedOpsInCopy
, DefedRegsInCopy
);
2120 ++NumPostRACopySink
;
2125 bool PostRAMachineSinking::runOnMachineFunction(MachineFunction
&MF
) {
2126 if (skipFunction(MF
.getFunction()))
2129 bool Changed
= false;
2130 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
2131 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
2133 ModifiedRegUnits
.init(*TRI
);
2134 UsedRegUnits
.init(*TRI
);
2136 Changed
|= tryToSinkCopy(BB
, MF
, TRI
, TII
);