the various ConstantExpr::get*Ty methods existed to work with issues around
[llvm/stm8.git] / lib / CodeGen / MachineSink.cpp
blob916dff70a41eccce82bfc45dc2d3366ffb845dac
1 //===-- MachineSink.cpp - Sinking for machine instructions ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass moves instructions into successor blocks when possible, so that
11 // they aren't executed on paths where their results aren't needed.
13 // This pass is not intended to be a replacement or a complete alternative
14 // for an LLVM-IR-level sinking pass. It is only designed to sink simple
15 // constructs that are not exposed before lowering and instruction selection.
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "machine-sink"
20 #include "llvm/CodeGen/Passes.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Target/TargetRegisterInfo.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 using namespace llvm;
35 static cl::opt<bool>
36 SplitEdges("machine-sink-split",
37 cl::desc("Split critical edges during machine sinking"),
38 cl::init(true), cl::Hidden);
40 STATISTIC(NumSunk, "Number of machine instructions sunk");
41 STATISTIC(NumSplit, "Number of critical edges split");
42 STATISTIC(NumCoalesces, "Number of copies coalesced");
44 namespace {
45 class MachineSinking : public MachineFunctionPass {
46 const TargetInstrInfo *TII;
47 const TargetRegisterInfo *TRI;
48 MachineRegisterInfo *MRI; // Machine register information
49 MachineDominatorTree *DT; // Machine dominator tree
50 MachineLoopInfo *LI;
51 AliasAnalysis *AA;
52 BitVector AllocatableSet; // Which physregs are allocatable?
54 // Remember which edges have been considered for breaking.
55 SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
56 CEBCandidates;
58 public:
59 static char ID; // Pass identification
60 MachineSinking() : MachineFunctionPass(ID) {
61 initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
64 virtual bool runOnMachineFunction(MachineFunction &MF);
66 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
67 AU.setPreservesCFG();
68 MachineFunctionPass::getAnalysisUsage(AU);
69 AU.addRequired<AliasAnalysis>();
70 AU.addRequired<MachineDominatorTree>();
71 AU.addRequired<MachineLoopInfo>();
72 AU.addPreserved<MachineDominatorTree>();
73 AU.addPreserved<MachineLoopInfo>();
76 virtual void releaseMemory() {
77 CEBCandidates.clear();
80 private:
81 bool ProcessBlock(MachineBasicBlock &MBB);
82 bool isWorthBreakingCriticalEdge(MachineInstr *MI,
83 MachineBasicBlock *From,
84 MachineBasicBlock *To);
85 MachineBasicBlock *SplitCriticalEdge(MachineInstr *MI,
86 MachineBasicBlock *From,
87 MachineBasicBlock *To,
88 bool BreakPHIEdge);
89 bool SinkInstruction(MachineInstr *MI, bool &SawStore);
90 bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
91 MachineBasicBlock *DefMBB,
92 bool &BreakPHIEdge, bool &LocalUse) const;
93 bool PerformTrivialForwardCoalescing(MachineInstr *MI,
94 MachineBasicBlock *MBB);
96 } // end anonymous namespace
98 char MachineSinking::ID = 0;
99 INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
100 "Machine code sinking", false, false)
101 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
102 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
103 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
104 INITIALIZE_PASS_END(MachineSinking, "machine-sink",
105 "Machine code sinking", false, false)
107 FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
109 bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
110 MachineBasicBlock *MBB) {
111 if (!MI->isCopy())
112 return false;
114 unsigned SrcReg = MI->getOperand(1).getReg();
115 unsigned DstReg = MI->getOperand(0).getReg();
116 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
117 !TargetRegisterInfo::isVirtualRegister(DstReg) ||
118 !MRI->hasOneNonDBGUse(SrcReg))
119 return false;
121 const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
122 const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
123 if (SRC != DRC)
124 return false;
126 MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
127 if (DefMI->isCopyLike())
128 return false;
129 DEBUG(dbgs() << "Coalescing: " << *DefMI);
130 DEBUG(dbgs() << "*** to: " << *MI);
131 MRI->replaceRegWith(DstReg, SrcReg);
132 MI->eraseFromParent();
133 ++NumCoalesces;
134 return true;
137 /// AllUsesDominatedByBlock - Return true if all uses of the specified register
138 /// occur in blocks dominated by the specified block. If any use is in the
139 /// definition block, then return false since it is never legal to move def
140 /// after uses.
141 bool
142 MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
143 MachineBasicBlock *MBB,
144 MachineBasicBlock *DefMBB,
145 bool &BreakPHIEdge,
146 bool &LocalUse) const {
147 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
148 "Only makes sense for vregs");
150 if (MRI->use_nodbg_empty(Reg))
151 return true;
153 // Ignoring debug uses is necessary so debug info doesn't affect the code.
154 // This may leave a referencing dbg_value in the original block, before
155 // the definition of the vreg. Dwarf generator handles this although the
156 // user might not get the right info at runtime.
158 // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
159 // into and they are all PHI nodes. In this case, machine-sink must break
160 // the critical edge first. e.g.
162 // BB#1: derived from LLVM BB %bb4.preheader
163 // Predecessors according to CFG: BB#0
164 // ...
165 // %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
166 // ...
167 // JE_4 <BB#37>, %EFLAGS<imp-use>
168 // Successors according to CFG: BB#37 BB#2
170 // BB#2: derived from LLVM BB %bb.nph
171 // Predecessors according to CFG: BB#0 BB#1
172 // %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
173 BreakPHIEdge = true;
174 for (MachineRegisterInfo::use_nodbg_iterator
175 I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
176 I != E; ++I) {
177 MachineInstr *UseInst = &*I;
178 MachineBasicBlock *UseBlock = UseInst->getParent();
179 if (!(UseBlock == MBB && UseInst->isPHI() &&
180 UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
181 BreakPHIEdge = false;
182 break;
185 if (BreakPHIEdge)
186 return true;
188 for (MachineRegisterInfo::use_nodbg_iterator
189 I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
190 I != E; ++I) {
191 // Determine the block of the use.
192 MachineInstr *UseInst = &*I;
193 MachineBasicBlock *UseBlock = UseInst->getParent();
194 if (UseInst->isPHI()) {
195 // PHI nodes use the operand in the predecessor block, not the block with
196 // the PHI.
197 UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
198 } else if (UseBlock == DefMBB) {
199 LocalUse = true;
200 return false;
203 // Check that it dominates.
204 if (!DT->dominates(MBB, UseBlock))
205 return false;
208 return true;
211 bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
212 DEBUG(dbgs() << "******** Machine Sinking ********\n");
214 const TargetMachine &TM = MF.getTarget();
215 TII = TM.getInstrInfo();
216 TRI = TM.getRegisterInfo();
217 MRI = &MF.getRegInfo();
218 DT = &getAnalysis<MachineDominatorTree>();
219 LI = &getAnalysis<MachineLoopInfo>();
220 AA = &getAnalysis<AliasAnalysis>();
221 AllocatableSet = TRI->getAllocatableSet(MF);
223 bool EverMadeChange = false;
225 while (1) {
226 bool MadeChange = false;
228 // Process all basic blocks.
229 CEBCandidates.clear();
230 for (MachineFunction::iterator I = MF.begin(), E = MF.end();
231 I != E; ++I)
232 MadeChange |= ProcessBlock(*I);
234 // If this iteration over the code changed anything, keep iterating.
235 if (!MadeChange) break;
236 EverMadeChange = true;
238 return EverMadeChange;
241 bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
242 // Can't sink anything out of a block that has less than two successors.
243 if (MBB.succ_size() <= 1 || MBB.empty()) return false;
245 // Don't bother sinking code out of unreachable blocks. In addition to being
246 // unprofitable, it can also lead to infinite looping, because in an
247 // unreachable loop there may be nowhere to stop.
248 if (!DT->isReachableFromEntry(&MBB)) return false;
250 bool MadeChange = false;
252 // Walk the basic block bottom-up. Remember if we saw a store.
253 MachineBasicBlock::iterator I = MBB.end();
254 --I;
255 bool ProcessedBegin, SawStore = false;
256 do {
257 MachineInstr *MI = I; // The instruction to sink.
259 // Predecrement I (if it's not begin) so that it isn't invalidated by
260 // sinking.
261 ProcessedBegin = I == MBB.begin();
262 if (!ProcessedBegin)
263 --I;
265 if (MI->isDebugValue())
266 continue;
268 bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
269 if (Joined) {
270 MadeChange = true;
271 continue;
274 if (SinkInstruction(MI, SawStore))
275 ++NumSunk, MadeChange = true;
277 // If we just processed the first instruction in the block, we're done.
278 } while (!ProcessedBegin);
280 return MadeChange;
283 bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
284 MachineBasicBlock *From,
285 MachineBasicBlock *To) {
286 // FIXME: Need much better heuristics.
288 // If the pass has already considered breaking this edge (during this pass
289 // through the function), then let's go ahead and break it. This means
290 // sinking multiple "cheap" instructions into the same block.
291 if (!CEBCandidates.insert(std::make_pair(From, To)))
292 return true;
294 if (!MI->isCopy() && !MI->getDesc().isAsCheapAsAMove())
295 return true;
297 // MI is cheap, we probably don't want to break the critical edge for it.
298 // However, if this would allow some definitions of its source operands
299 // to be sunk then it's probably worth it.
300 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
301 const MachineOperand &MO = MI->getOperand(i);
302 if (!MO.isReg()) continue;
303 unsigned Reg = MO.getReg();
304 if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg))
305 continue;
306 if (MRI->hasOneNonDBGUse(Reg))
307 return true;
310 return false;
313 MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
314 MachineBasicBlock *FromBB,
315 MachineBasicBlock *ToBB,
316 bool BreakPHIEdge) {
317 if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
318 return 0;
320 // Avoid breaking back edge. From == To means backedge for single BB loop.
321 if (!SplitEdges || FromBB == ToBB)
322 return 0;
324 // Check for backedges of more "complex" loops.
325 if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
326 LI->isLoopHeader(ToBB))
327 return 0;
329 // It's not always legal to break critical edges and sink the computation
330 // to the edge.
332 // BB#1:
333 // v1024
334 // Beq BB#3
335 // <fallthrough>
336 // BB#2:
337 // ... no uses of v1024
338 // <fallthrough>
339 // BB#3:
340 // ...
341 // = v1024
343 // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
345 // BB#1:
346 // ...
347 // Bne BB#2
348 // BB#4:
349 // v1024 =
350 // B BB#3
351 // BB#2:
352 // ... no uses of v1024
353 // <fallthrough>
354 // BB#3:
355 // ...
356 // = v1024
358 // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
359 // flow. We need to ensure the new basic block where the computation is
360 // sunk to dominates all the uses.
361 // It's only legal to break critical edge and sink the computation to the
362 // new block if all the predecessors of "To", except for "From", are
363 // not dominated by "From". Given SSA property, this means these
364 // predecessors are dominated by "To".
366 // There is no need to do this check if all the uses are PHI nodes. PHI
367 // sources are only defined on the specific predecessor edges.
368 if (!BreakPHIEdge) {
369 for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
370 E = ToBB->pred_end(); PI != E; ++PI) {
371 if (*PI == FromBB)
372 continue;
373 if (!DT->dominates(ToBB, *PI))
374 return 0;
378 return FromBB->SplitCriticalEdge(ToBB, this);
381 static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
382 return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
385 /// SinkInstruction - Determine whether it is safe to sink the specified machine
386 /// instruction out of its current block into a successor.
387 bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
388 // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
389 // be close to the source to make it easier to coalesce.
390 if (AvoidsSinking(MI, MRI))
391 return false;
393 // Check if it's safe to move the instruction.
394 if (!MI->isSafeToMove(TII, AA, SawStore))
395 return false;
397 // FIXME: This should include support for sinking instructions within the
398 // block they are currently in to shorten the live ranges. We often get
399 // instructions sunk into the top of a large block, but it would be better to
400 // also sink them down before their first use in the block. This xform has to
401 // be careful not to *increase* register pressure though, e.g. sinking
402 // "x = y + z" down if it kills y and z would increase the live ranges of y
403 // and z and only shrink the live range of x.
405 // Loop over all the operands of the specified instruction. If there is
406 // anything we can't handle, bail out.
407 MachineBasicBlock *ParentBlock = MI->getParent();
409 // SuccToSinkTo - This is the successor to sink this instruction to, once we
410 // decide.
411 MachineBasicBlock *SuccToSinkTo = 0;
413 bool BreakPHIEdge = false;
414 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
415 const MachineOperand &MO = MI->getOperand(i);
416 if (!MO.isReg()) continue; // Ignore non-register operands.
418 unsigned Reg = MO.getReg();
419 if (Reg == 0) continue;
421 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
422 if (MO.isUse()) {
423 // If the physreg has no defs anywhere, it's just an ambient register
424 // and we can freely move its uses. Alternatively, if it's allocatable,
425 // it could get allocated to something with a def during allocation.
426 if (!MRI->def_empty(Reg))
427 return false;
429 if (AllocatableSet.test(Reg))
430 return false;
432 // Check for a def among the register's aliases too.
433 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
434 unsigned AliasReg = *Alias;
435 if (!MRI->def_empty(AliasReg))
436 return false;
438 if (AllocatableSet.test(AliasReg))
439 return false;
441 } else if (!MO.isDead()) {
442 // A def that isn't dead. We can't move it.
443 return false;
445 } else {
446 // Virtual register uses are always safe to sink.
447 if (MO.isUse()) continue;
449 // If it's not safe to move defs of the register class, then abort.
450 if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
451 return false;
453 // FIXME: This picks a successor to sink into based on having one
454 // successor that dominates all the uses. However, there are cases where
455 // sinking can happen but where the sink point isn't a successor. For
456 // example:
458 // x = computation
459 // if () {} else {}
460 // use x
462 // the instruction could be sunk over the whole diamond for the
463 // if/then/else (or loop, etc), allowing it to be sunk into other blocks
464 // after that.
466 // Virtual register defs can only be sunk if all their uses are in blocks
467 // dominated by one of the successors.
468 if (SuccToSinkTo) {
469 // If a previous operand picked a block to sink to, then this operand
470 // must be sinkable to the same block.
471 bool LocalUse = false;
472 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock,
473 BreakPHIEdge, LocalUse))
474 return false;
476 continue;
479 // Otherwise, we should look at all the successors and decide which one
480 // we should sink to.
481 for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
482 E = ParentBlock->succ_end(); SI != E; ++SI) {
483 bool LocalUse = false;
484 if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock,
485 BreakPHIEdge, LocalUse)) {
486 SuccToSinkTo = *SI;
487 break;
489 if (LocalUse)
490 // Def is used locally, it's never safe to move this def.
491 return false;
494 // If we couldn't find a block to sink to, ignore this instruction.
495 if (SuccToSinkTo == 0)
496 return false;
500 // If there are no outputs, it must have side-effects.
501 if (SuccToSinkTo == 0)
502 return false;
504 // It's not safe to sink instructions to EH landing pad. Control flow into
505 // landing pad is implicitly defined.
506 if (SuccToSinkTo->isLandingPad())
507 return false;
509 // It is not possible to sink an instruction into its own block. This can
510 // happen with loops.
511 if (MI->getParent() == SuccToSinkTo)
512 return false;
514 // If the instruction to move defines a dead physical register which is live
515 // when leaving the basic block, don't move it because it could turn into a
516 // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
517 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
518 const MachineOperand &MO = MI->getOperand(I);
519 if (!MO.isReg()) continue;
520 unsigned Reg = MO.getReg();
521 if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
522 if (SuccToSinkTo->isLiveIn(Reg))
523 return false;
526 DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
528 // If the block has multiple predecessors, this would introduce computation on
529 // a path that it doesn't already exist. We could split the critical edge,
530 // but for now we just punt.
531 if (SuccToSinkTo->pred_size() > 1) {
532 // We cannot sink a load across a critical edge - there may be stores in
533 // other code paths.
534 bool TryBreak = false;
535 bool store = true;
536 if (!MI->isSafeToMove(TII, AA, store)) {
537 DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
538 TryBreak = true;
541 // We don't want to sink across a critical edge if we don't dominate the
542 // successor. We could be introducing calculations to new code paths.
543 if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
544 DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
545 TryBreak = true;
548 // Don't sink instructions into a loop.
549 if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
550 DEBUG(dbgs() << " *** NOTE: Loop header found\n");
551 TryBreak = true;
554 // Otherwise we are OK with sinking along a critical edge.
555 if (!TryBreak)
556 DEBUG(dbgs() << "Sinking along critical edge.\n");
557 else {
558 MachineBasicBlock *NewSucc =
559 SplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
560 if (!NewSucc) {
561 DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
562 "break critical edge\n");
563 return false;
564 } else {
565 DEBUG(dbgs() << " *** Splitting critical edge:"
566 " BB#" << ParentBlock->getNumber()
567 << " -- BB#" << NewSucc->getNumber()
568 << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
569 SuccToSinkTo = NewSucc;
570 ++NumSplit;
571 BreakPHIEdge = false;
576 if (BreakPHIEdge) {
577 // BreakPHIEdge is true if all the uses are in the successor MBB being
578 // sunken into and they are all PHI nodes. In this case, machine-sink must
579 // break the critical edge first.
580 MachineBasicBlock *NewSucc = SplitCriticalEdge(MI, ParentBlock,
581 SuccToSinkTo, BreakPHIEdge);
582 if (!NewSucc) {
583 DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
584 "break critical edge\n");
585 return false;
588 DEBUG(dbgs() << " *** Splitting critical edge:"
589 " BB#" << ParentBlock->getNumber()
590 << " -- BB#" << NewSucc->getNumber()
591 << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
592 SuccToSinkTo = NewSucc;
593 ++NumSplit;
596 // Determine where to insert into. Skip phi nodes.
597 MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
598 while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
599 ++InsertPos;
601 // Move the instruction.
602 SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
603 ++MachineBasicBlock::iterator(MI));
605 // Conservatively, clear any kill flags, since it's possible that they are no
606 // longer correct.
607 MI->clearKillInfo();
609 return true;