Fix an easy fixme.
[llvm/stm8.git] / lib / CodeGen / InlineSpiller.cpp
blob0273891d690b04ddf4ae92c185e8e24a234e9cd1
1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "Spiller.h"
17 #include "LiveRangeEdit.h"
18 #include "VirtRegMap.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/LiveStackAnalysis.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
33 using namespace llvm;
35 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
36 STATISTIC(NumSnippets, "Number of snippets included in spills");
37 STATISTIC(NumSpills, "Number of spills inserted");
38 STATISTIC(NumReloads, "Number of reloads inserted");
39 STATISTIC(NumFolded, "Number of folded stack accesses");
40 STATISTIC(NumFoldedLoads, "Number of folded loads");
41 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
42 STATISTIC(NumOmitReloadSpill, "Number of omitted spills after reloads");
43 STATISTIC(NumHoistLocal, "Number of locally hoisted spills");
44 STATISTIC(NumHoistGlobal, "Number of globally hoisted spills");
45 STATISTIC(NumRedundantSpills, "Number of redundant spills identified");
47 namespace {
48 class InlineSpiller : public Spiller {
49 MachineFunctionPass &Pass;
50 MachineFunction &MF;
51 LiveIntervals &LIS;
52 LiveStacks &LSS;
53 AliasAnalysis *AA;
54 MachineDominatorTree &MDT;
55 MachineLoopInfo &Loops;
56 VirtRegMap &VRM;
57 MachineFrameInfo &MFI;
58 MachineRegisterInfo &MRI;
59 const TargetInstrInfo &TII;
60 const TargetRegisterInfo &TRI;
62 // Variables that are valid during spill(), but used by multiple methods.
63 LiveRangeEdit *Edit;
64 LiveInterval *StackInt;
65 int StackSlot;
66 unsigned Original;
68 // All registers to spill to StackSlot, including the main register.
69 SmallVector<unsigned, 8> RegsToSpill;
71 // All COPY instructions to/from snippets.
72 // They are ignored since both operands refer to the same stack slot.
73 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
75 // Values that failed to remat at some point.
76 SmallPtrSet<VNInfo*, 8> UsedValues;
78 // Information about a value that was defined by a copy from a sibling
79 // register.
80 struct SibValueInfo {
81 // True when all reaching defs were reloads: No spill is necessary.
82 bool AllDefsAreReloads;
84 // The preferred register to spill.
85 unsigned SpillReg;
87 // The value of SpillReg that should be spilled.
88 VNInfo *SpillVNI;
90 // A defining instruction that is not a sibling copy or a reload, or NULL.
91 // This can be used as a template for rematerialization.
92 MachineInstr *DefMI;
94 SibValueInfo(unsigned Reg, VNInfo *VNI)
95 : AllDefsAreReloads(false), SpillReg(Reg), SpillVNI(VNI), DefMI(0) {}
98 // Values in RegsToSpill defined by sibling copies.
99 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap;
100 SibValueMap SibValues;
102 // Dead defs generated during spilling.
103 SmallVector<MachineInstr*, 8> DeadDefs;
105 ~InlineSpiller() {}
107 public:
108 InlineSpiller(MachineFunctionPass &pass,
109 MachineFunction &mf,
110 VirtRegMap &vrm)
111 : Pass(pass),
112 MF(mf),
113 LIS(pass.getAnalysis<LiveIntervals>()),
114 LSS(pass.getAnalysis<LiveStacks>()),
115 AA(&pass.getAnalysis<AliasAnalysis>()),
116 MDT(pass.getAnalysis<MachineDominatorTree>()),
117 Loops(pass.getAnalysis<MachineLoopInfo>()),
118 VRM(vrm),
119 MFI(*mf.getFrameInfo()),
120 MRI(mf.getRegInfo()),
121 TII(*mf.getTarget().getInstrInfo()),
122 TRI(*mf.getTarget().getRegisterInfo()) {}
124 void spill(LiveRangeEdit &);
126 private:
127 bool isSnippet(const LiveInterval &SnipLI);
128 void collectRegsToSpill();
130 bool isRegToSpill(unsigned Reg) {
131 return std::find(RegsToSpill.begin(),
132 RegsToSpill.end(), Reg) != RegsToSpill.end();
135 bool isSibling(unsigned Reg);
136 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);
137 void analyzeSiblingValues();
139 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI);
140 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
142 void markValueUsed(LiveInterval*, VNInfo*);
143 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI);
144 void reMaterializeAll();
146 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
147 bool foldMemoryOperand(MachineBasicBlock::iterator MI,
148 const SmallVectorImpl<unsigned> &Ops,
149 MachineInstr *LoadMI = 0);
150 void insertReload(LiveInterval &NewLI, SlotIndex,
151 MachineBasicBlock::iterator MI);
152 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
153 SlotIndex, MachineBasicBlock::iterator MI);
155 void spillAroundUses(unsigned Reg);
156 void spillAll();
160 namespace llvm {
161 Spiller *createInlineSpiller(MachineFunctionPass &pass,
162 MachineFunction &mf,
163 VirtRegMap &vrm) {
164 return new InlineSpiller(pass, mf, vrm);
168 //===----------------------------------------------------------------------===//
169 // Snippets
170 //===----------------------------------------------------------------------===//
172 // When spilling a virtual register, we also spill any snippets it is connected
173 // to. The snippets are small live ranges that only have a single real use,
174 // leftovers from live range splitting. Spilling them enables memory operand
175 // folding or tightens the live range around the single use.
177 // This minimizes register pressure and maximizes the store-to-load distance for
178 // spill slots which can be important in tight loops.
180 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
181 /// otherwise return 0.
182 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) {
183 if (!MI->isFullCopy())
184 return 0;
185 if (MI->getOperand(0).getReg() == Reg)
186 return MI->getOperand(1).getReg();
187 if (MI->getOperand(1).getReg() == Reg)
188 return MI->getOperand(0).getReg();
189 return 0;
192 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
193 /// It is assumed that SnipLI is a virtual register with the same original as
194 /// Edit->getReg().
195 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
196 unsigned Reg = Edit->getReg();
198 // A snippet is a tiny live range with only a single instruction using it
199 // besides copies to/from Reg or spills/fills. We accept:
201 // %snip = COPY %Reg / FILL fi#
202 // %snip = USE %snip
203 // %Reg = COPY %snip / SPILL %snip, fi#
205 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
206 return false;
208 MachineInstr *UseMI = 0;
210 // Check that all uses satisfy our criteria.
211 for (MachineRegisterInfo::reg_nodbg_iterator
212 RI = MRI.reg_nodbg_begin(SnipLI.reg);
213 MachineInstr *MI = RI.skipInstruction();) {
215 // Allow copies to/from Reg.
216 if (isFullCopyOf(MI, Reg))
217 continue;
219 // Allow stack slot loads.
220 int FI;
221 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
222 continue;
224 // Allow stack slot stores.
225 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
226 continue;
228 // Allow a single additional instruction.
229 if (UseMI && MI != UseMI)
230 return false;
231 UseMI = MI;
233 return true;
236 /// collectRegsToSpill - Collect live range snippets that only have a single
237 /// real use.
238 void InlineSpiller::collectRegsToSpill() {
239 unsigned Reg = Edit->getReg();
241 // Main register always spills.
242 RegsToSpill.assign(1, Reg);
243 SnippetCopies.clear();
245 // Snippets all have the same original, so there can't be any for an original
246 // register.
247 if (Original == Reg)
248 return;
250 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
251 MachineInstr *MI = RI.skipInstruction();) {
252 unsigned SnipReg = isFullCopyOf(MI, Reg);
253 if (!isSibling(SnipReg))
254 continue;
255 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
256 if (!isSnippet(SnipLI))
257 continue;
258 SnippetCopies.insert(MI);
259 if (isRegToSpill(SnipReg))
260 continue;
261 RegsToSpill.push_back(SnipReg);
262 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
263 ++NumSnippets;
268 //===----------------------------------------------------------------------===//
269 // Sibling Values
270 //===----------------------------------------------------------------------===//
272 // After live range splitting, some values to be spilled may be defined by
273 // copies from sibling registers. We trace the sibling copies back to the
274 // original value if it still exists. We need it for rematerialization.
276 // Even when the value can't be rematerialized, we still want to determine if
277 // the value has already been spilled, or we may want to hoist the spill from a
278 // loop.
280 bool InlineSpiller::isSibling(unsigned Reg) {
281 return TargetRegisterInfo::isVirtualRegister(Reg) &&
282 VRM.getOriginal(Reg) == Original;
285 /// traceSiblingValue - Trace a value that is about to be spilled back to the
286 /// real defining instructions by looking through sibling copies. Always stay
287 /// within the range of OrigVNI so the registers are known to carry the same
288 /// value.
290 /// Determine if the value is defined by all reloads, so spilling isn't
291 /// necessary - the value is already in the stack slot.
293 /// Return a defining instruction that may be a candidate for rematerialization.
295 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
296 VNInfo *OrigVNI) {
297 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'
298 << UseVNI->id << '@' << UseVNI->def << '\n');
299 SmallPtrSet<VNInfo*, 8> Visited;
300 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;
301 WorkList.push_back(std::make_pair(UseReg, UseVNI));
303 // Best spill candidate seen so far. This must dominate UseVNI.
304 SibValueInfo SVI(UseReg, UseVNI);
305 MachineBasicBlock *UseMBB = LIS.getMBBFromIndex(UseVNI->def);
306 unsigned SpillDepth = Loops.getLoopDepth(UseMBB);
307 bool SeenOrigPHI = false; // Original PHI met.
309 do {
310 unsigned Reg;
311 VNInfo *VNI;
312 tie(Reg, VNI) = WorkList.pop_back_val();
313 if (!Visited.insert(VNI))
314 continue;
316 // Is this value a better spill candidate?
317 if (!isRegToSpill(Reg)) {
318 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
319 if (MBB != UseMBB && MDT.dominates(MBB, UseMBB)) {
320 // This is a valid spill location dominating UseVNI.
321 // Prefer to spill at a smaller loop depth.
322 unsigned Depth = Loops.getLoopDepth(MBB);
323 if (Depth < SpillDepth) {
324 DEBUG(dbgs() << " spill depth " << Depth << ": " << PrintReg(Reg)
325 << ':' << VNI->id << '@' << VNI->def << '\n');
326 SVI.SpillReg = Reg;
327 SVI.SpillVNI = VNI;
328 SpillDepth = Depth;
333 // Trace through PHI-defs created by live range splitting.
334 if (VNI->isPHIDef()) {
335 if (VNI->def == OrigVNI->def) {
336 DEBUG(dbgs() << " orig phi value " << PrintReg(Reg) << ':'
337 << VNI->id << '@' << VNI->def << '\n');
338 SeenOrigPHI = true;
339 continue;
341 // Get values live-out of predecessors.
342 LiveInterval &LI = LIS.getInterval(Reg);
343 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
344 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
345 PE = MBB->pred_end(); PI != PE; ++PI) {
346 VNInfo *PVNI = LI.getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot());
347 if (PVNI)
348 WorkList.push_back(std::make_pair(Reg, PVNI));
350 continue;
353 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
354 assert(MI && "Missing def");
356 // Trace through sibling copies.
357 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
358 if (isSibling(SrcReg)) {
359 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
360 VNInfo *SrcVNI = SrcLI.getVNInfoAt(VNI->def.getUseIndex());
361 assert(SrcVNI && "Copy from non-existing value");
362 DEBUG(dbgs() << " copy of " << PrintReg(SrcReg) << ':'
363 << SrcVNI->id << '@' << SrcVNI->def << '\n');
364 WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
365 continue;
369 // Track reachable reloads.
370 int FI;
371 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {
372 DEBUG(dbgs() << " reload " << PrintReg(Reg) << ':'
373 << VNI->id << "@" << VNI->def << '\n');
374 SVI.AllDefsAreReloads = true;
375 continue;
378 // We have an 'original' def. Don't record trivial cases.
379 if (VNI == UseVNI) {
380 DEBUG(dbgs() << "Not a sibling copy.\n");
381 return MI;
384 // Potential remat candidate.
385 DEBUG(dbgs() << " def " << PrintReg(Reg) << ':'
386 << VNI->id << '@' << VNI->def << '\t' << *MI);
387 SVI.DefMI = MI;
388 } while (!WorkList.empty());
390 if (SeenOrigPHI || SVI.DefMI)
391 SVI.AllDefsAreReloads = false;
393 DEBUG({
394 if (SVI.AllDefsAreReloads)
395 dbgs() << "All defs are reloads.\n";
396 else
397 dbgs() << "Prefer to spill " << PrintReg(SVI.SpillReg) << ':'
398 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def << '\n';
400 SibValues.insert(std::make_pair(UseVNI, SVI));
401 return SVI.DefMI;
404 /// analyzeSiblingValues - Trace values defined by sibling copies back to
405 /// something that isn't a sibling copy.
407 /// Keep track of values that may be rematerializable.
408 void InlineSpiller::analyzeSiblingValues() {
409 SibValues.clear();
411 // No siblings at all?
412 if (Edit->getReg() == Original)
413 return;
415 LiveInterval &OrigLI = LIS.getInterval(Original);
416 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
417 unsigned Reg = RegsToSpill[i];
418 LiveInterval &LI = LIS.getInterval(Reg);
419 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(),
420 VE = LI.vni_end(); VI != VE; ++VI) {
421 VNInfo *VNI = *VI;
422 if (VNI->isUnused())
423 continue;
424 MachineInstr *DefMI = 0;
425 // Check possible sibling copies.
426 if (VNI->isPHIDef() || VNI->getCopy()) {
427 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);
428 if (OrigVNI->def != VNI->def)
429 DefMI = traceSiblingValue(Reg, VNI, OrigVNI);
431 if (!DefMI && !VNI->isPHIDef())
432 DefMI = LIS.getInstructionFromIndex(VNI->def);
433 if (DefMI && Edit->checkRematerializable(VNI, DefMI, TII, AA)) {
434 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@'
435 << VNI->def << " may remat from " << *DefMI);
441 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert
442 /// a spill at a better location.
443 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) {
444 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
445 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex());
446 assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy");
447 SibValueMap::iterator I = SibValues.find(VNI);
448 if (I == SibValues.end())
449 return false;
451 const SibValueInfo &SVI = I->second;
453 // Let the normal folding code deal with the boring case.
454 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI)
455 return false;
457 // SpillReg may have been deleted by remat and DCE.
458 if (!LIS.hasInterval(SVI.SpillReg)) {
459 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n');
460 SibValues.erase(I);
461 return false;
464 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg);
465 if (!SibLI.containsValue(SVI.SpillVNI)) {
466 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n');
467 SibValues.erase(I);
468 return false;
471 // Conservatively extend the stack slot range to the range of the original
472 // value. We may be able to do better with stack slot coloring by being more
473 // careful here.
474 assert(StackInt && "No stack slot assigned yet.");
475 LiveInterval &OrigLI = LIS.getInterval(Original);
476 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
477 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
478 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
479 << *StackInt << '\n');
481 // Already spilled everywhere.
482 if (SVI.AllDefsAreReloads) {
483 ++NumOmitReloadSpill;
484 return true;
486 // We are going to spill SVI.SpillVNI immediately after its def, so clear out
487 // any later spills of the same value.
488 eliminateRedundantSpills(SibLI, SVI.SpillVNI);
490 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def);
491 MachineBasicBlock::iterator MII;
492 if (SVI.SpillVNI->isPHIDef())
493 MII = MBB->SkipPHIsAndLabels(MBB->begin());
494 else {
495 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def);
496 assert(DefMI && "Defining instruction disappeared");
497 MII = DefMI;
498 ++MII;
500 // Insert spill without kill flag immediately after def.
501 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot,
502 MRI.getRegClass(SVI.SpillReg), &TRI);
503 --MII; // Point to store instruction.
504 LIS.InsertMachineInstrInMaps(MII);
505 VRM.addSpillSlotUse(StackSlot, MII);
506 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII);
508 if (MBB == CopyMI->getParent())
509 ++NumHoistLocal;
510 else
511 ++NumHoistGlobal;
512 return true;
515 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
516 /// redundant spills of this value in SLI.reg and sibling copies.
517 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
518 assert(VNI && "Missing value");
519 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
520 WorkList.push_back(std::make_pair(&SLI, VNI));
521 assert(StackInt && "No stack slot assigned yet.");
523 do {
524 LiveInterval *LI;
525 tie(LI, VNI) = WorkList.pop_back_val();
526 unsigned Reg = LI->reg;
527 DEBUG(dbgs() << "Checking redundant spills for "
528 << VNI->id << '@' << VNI->def << " in " << *LI << '\n');
530 // Regs to spill are taken care of.
531 if (isRegToSpill(Reg))
532 continue;
534 // Add all of VNI's live range to StackInt.
535 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
536 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
538 // Find all spills and copies of VNI.
539 for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg);
540 MachineInstr *MI = UI.skipInstruction();) {
541 if (!MI->isCopy() && !MI->getDesc().mayStore())
542 continue;
543 SlotIndex Idx = LIS.getInstructionIndex(MI);
544 if (LI->getVNInfoAt(Idx) != VNI)
545 continue;
547 // Follow sibling copies down the dominator tree.
548 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
549 if (isSibling(DstReg)) {
550 LiveInterval &DstLI = LIS.getInterval(DstReg);
551 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex());
552 assert(DstVNI && "Missing defined value");
553 assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot");
554 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
556 continue;
559 // Erase spills.
560 int FI;
561 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
562 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI);
563 // eliminateDeadDefs won't normally remove stores, so switch opcode.
564 MI->setDesc(TII.get(TargetOpcode::KILL));
565 DeadDefs.push_back(MI);
566 ++NumRedundantSpills;
569 } while (!WorkList.empty());
573 //===----------------------------------------------------------------------===//
574 // Rematerialization
575 //===----------------------------------------------------------------------===//
577 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
578 /// instruction cannot be eliminated. See through snippet copies
579 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
580 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
581 WorkList.push_back(std::make_pair(LI, VNI));
582 do {
583 tie(LI, VNI) = WorkList.pop_back_val();
584 if (!UsedValues.insert(VNI))
585 continue;
587 if (VNI->isPHIDef()) {
588 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
589 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
590 PE = MBB->pred_end(); PI != PE; ++PI) {
591 VNInfo *PVNI = LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot());
592 if (PVNI)
593 WorkList.push_back(std::make_pair(LI, PVNI));
595 continue;
598 // Follow snippet copies.
599 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
600 if (!SnippetCopies.count(MI))
601 continue;
602 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
603 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
604 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getUseIndex());
605 assert(SnipVNI && "Snippet undefined before copy");
606 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
607 } while (!WorkList.empty());
610 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
611 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
612 MachineBasicBlock::iterator MI) {
613 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex();
614 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx);
616 if (!ParentVNI) {
617 DEBUG(dbgs() << "\tadding <undef> flags: ");
618 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
619 MachineOperand &MO = MI->getOperand(i);
620 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
621 MO.setIsUndef();
623 DEBUG(dbgs() << UseIdx << '\t' << *MI);
624 return true;
627 if (SnippetCopies.count(MI))
628 return false;
630 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
631 LiveRangeEdit::Remat RM(ParentVNI);
632 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
633 if (SibI != SibValues.end())
634 RM.OrigMI = SibI->second.DefMI;
635 if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) {
636 markValueUsed(&VirtReg, ParentVNI);
637 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
638 return false;
641 // If the instruction also writes VirtReg.reg, it had better not require the
642 // same register for uses and defs.
643 bool Reads, Writes;
644 SmallVector<unsigned, 8> Ops;
645 tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops);
646 if (Writes) {
647 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
648 MachineOperand &MO = MI->getOperand(Ops[i]);
649 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
650 markValueUsed(&VirtReg, ParentVNI);
651 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
652 return false;
657 // Before rematerializing into a register for a single instruction, try to
658 // fold a load into the instruction. That avoids allocating a new register.
659 if (RM.OrigMI->getDesc().canFoldAsLoad() &&
660 foldMemoryOperand(MI, Ops, RM.OrigMI)) {
661 Edit->markRematerialized(RM.ParentVNI);
662 ++NumFoldedLoads;
663 return true;
666 // Alocate a new register for the remat.
667 LiveInterval &NewLI = Edit->createFrom(Original, LIS, VRM);
668 NewLI.markNotSpillable();
670 // Finally we can rematerialize OrigMI before MI.
671 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
672 LIS, TII, TRI);
673 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
674 << *LIS.getInstructionFromIndex(DefIdx));
676 // Replace operands
677 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
678 MachineOperand &MO = MI->getOperand(Ops[i]);
679 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
680 MO.setReg(NewLI.reg);
681 MO.setIsKill();
684 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
686 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator());
687 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
688 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
689 ++NumRemats;
690 return true;
693 /// reMaterializeAll - Try to rematerialize as many uses as possible,
694 /// and trim the live ranges after.
695 void InlineSpiller::reMaterializeAll() {
696 // analyzeSiblingValues has already tested all relevant defining instructions.
697 if (!Edit->anyRematerializable(LIS, TII, AA))
698 return;
700 UsedValues.clear();
702 // Try to remat before all uses of snippets.
703 bool anyRemat = false;
704 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
705 unsigned Reg = RegsToSpill[i];
706 LiveInterval &LI = LIS.getInterval(Reg);
707 for (MachineRegisterInfo::use_nodbg_iterator
708 RI = MRI.use_nodbg_begin(Reg);
709 MachineInstr *MI = RI.skipInstruction();)
710 anyRemat |= reMaterializeFor(LI, MI);
712 if (!anyRemat)
713 return;
715 // Remove any values that were completely rematted.
716 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
717 unsigned Reg = RegsToSpill[i];
718 LiveInterval &LI = LIS.getInterval(Reg);
719 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
720 I != E; ++I) {
721 VNInfo *VNI = *I;
722 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
723 continue;
724 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
725 MI->addRegisterDead(Reg, &TRI);
726 if (!MI->allDefsAreDead())
727 continue;
728 DEBUG(dbgs() << "All defs dead: " << *MI);
729 DeadDefs.push_back(MI);
733 // Eliminate dead code after remat. Note that some snippet copies may be
734 // deleted here.
735 if (DeadDefs.empty())
736 return;
737 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
738 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
740 // Get rid of deleted and empty intervals.
741 for (unsigned i = RegsToSpill.size(); i != 0; --i) {
742 unsigned Reg = RegsToSpill[i-1];
743 if (!LIS.hasInterval(Reg)) {
744 RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
745 continue;
747 LiveInterval &LI = LIS.getInterval(Reg);
748 if (!LI.empty())
749 continue;
750 Edit->eraseVirtReg(Reg, LIS);
751 RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
753 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
757 //===----------------------------------------------------------------------===//
758 // Spilling
759 //===----------------------------------------------------------------------===//
761 /// If MI is a load or store of StackSlot, it can be removed.
762 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
763 int FI = 0;
764 unsigned InstrReg;
765 if (!(InstrReg = TII.isLoadFromStackSlot(MI, FI)) &&
766 !(InstrReg = TII.isStoreToStackSlot(MI, FI)))
767 return false;
769 // We have a stack access. Is it the right register and slot?
770 if (InstrReg != Reg || FI != StackSlot)
771 return false;
773 DEBUG(dbgs() << "Coalescing stack access: " << *MI);
774 LIS.RemoveMachineInstrFromMaps(MI);
775 MI->eraseFromParent();
776 return true;
779 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
780 /// @param MI Instruction using or defining the current register.
781 /// @param Ops Operand indices from readsWritesVirtualRegister().
782 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
783 /// @return True on success, and MI will be erased.
784 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
785 const SmallVectorImpl<unsigned> &Ops,
786 MachineInstr *LoadMI) {
787 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
788 // operands.
789 SmallVector<unsigned, 8> FoldOps;
790 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
791 unsigned Idx = Ops[i];
792 MachineOperand &MO = MI->getOperand(Idx);
793 if (MO.isImplicit())
794 continue;
795 // FIXME: Teach targets to deal with subregs.
796 if (MO.getSubReg())
797 return false;
798 // We cannot fold a load instruction into a def.
799 if (LoadMI && MO.isDef())
800 return false;
801 // Tied use operands should not be passed to foldMemoryOperand.
802 if (!MI->isRegTiedToDefOperand(Idx))
803 FoldOps.push_back(Idx);
806 MachineInstr *FoldMI =
807 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
808 : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
809 if (!FoldMI)
810 return false;
811 LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
812 if (!LoadMI)
813 VRM.addSpillSlotUse(StackSlot, FoldMI);
814 MI->eraseFromParent();
815 DEBUG(dbgs() << "\tfolded: " << *FoldMI);
816 ++NumFolded;
817 return true;
820 /// insertReload - Insert a reload of NewLI.reg before MI.
821 void InlineSpiller::insertReload(LiveInterval &NewLI,
822 SlotIndex Idx,
823 MachineBasicBlock::iterator MI) {
824 MachineBasicBlock &MBB = *MI->getParent();
825 TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot,
826 MRI.getRegClass(NewLI.reg), &TRI);
827 --MI; // Point to load instruction.
828 SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
829 VRM.addSpillSlotUse(StackSlot, MI);
830 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
831 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
832 LIS.getVNInfoAllocator());
833 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
834 ++NumReloads;
837 /// insertSpill - Insert a spill of NewLI.reg after MI.
838 void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
839 SlotIndex Idx, MachineBasicBlock::iterator MI) {
840 MachineBasicBlock &MBB = *MI->getParent();
841 TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot,
842 MRI.getRegClass(NewLI.reg), &TRI);
843 --MI; // Point to store instruction.
844 SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
845 VRM.addSpillSlotUse(StackSlot, MI);
846 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
847 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator());
848 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
849 ++NumSpills;
852 /// spillAroundUses - insert spill code around each use of Reg.
853 void InlineSpiller::spillAroundUses(unsigned Reg) {
854 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n');
855 LiveInterval &OldLI = LIS.getInterval(Reg);
857 // Iterate over instructions using Reg.
858 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
859 MachineInstr *MI = RI.skipInstruction();) {
861 // Debug values are not allowed to affect codegen.
862 if (MI->isDebugValue()) {
863 // Modify DBG_VALUE now that the value is in a spill slot.
864 uint64_t Offset = MI->getOperand(1).getImm();
865 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
866 DebugLoc DL = MI->getDebugLoc();
867 if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot,
868 Offset, MDPtr, DL)) {
869 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
870 MachineBasicBlock *MBB = MI->getParent();
871 MBB->insert(MBB->erase(MI), NewDV);
872 } else {
873 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
874 MI->eraseFromParent();
876 continue;
879 // Ignore copies to/from snippets. We'll delete them.
880 if (SnippetCopies.count(MI))
881 continue;
883 // Stack slot accesses may coalesce away.
884 if (coalesceStackAccess(MI, Reg))
885 continue;
887 // Analyze instruction.
888 bool Reads, Writes;
889 SmallVector<unsigned, 8> Ops;
890 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops);
892 // Find the slot index where this instruction reads and writes OldLI.
893 // This is usually the def slot, except for tied early clobbers.
894 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex();
895 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getUseIndex()))
896 if (SlotIndex::isSameInstr(Idx, VNI->def))
897 Idx = VNI->def;
899 // Check for a sibling copy.
900 unsigned SibReg = isFullCopyOf(MI, Reg);
901 if (SibReg && isSibling(SibReg)) {
902 // This may actually be a copy between snippets.
903 if (isRegToSpill(SibReg)) {
904 DEBUG(dbgs() << "Found new snippet copy: " << *MI);
905 SnippetCopies.insert(MI);
906 continue;
908 if (Writes) {
909 // Hoist the spill of a sib-reg copy.
910 if (hoistSpill(OldLI, MI)) {
911 // This COPY is now dead, the value is already in the stack slot.
912 MI->getOperand(0).setIsDead();
913 DeadDefs.push_back(MI);
914 continue;
916 } else {
917 // This is a reload for a sib-reg copy. Drop spills downstream.
918 LiveInterval &SibLI = LIS.getInterval(SibReg);
919 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
920 // The COPY will fold to a reload below.
924 // Attempt to fold memory ops.
925 if (foldMemoryOperand(MI, Ops))
926 continue;
928 // Allocate interval around instruction.
929 // FIXME: Infer regclass from instruction alone.
930 LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM);
931 NewLI.markNotSpillable();
933 if (Reads)
934 insertReload(NewLI, Idx, MI);
936 // Rewrite instruction operands.
937 bool hasLiveDef = false;
938 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
939 MachineOperand &MO = MI->getOperand(Ops[i]);
940 MO.setReg(NewLI.reg);
941 if (MO.isUse()) {
942 if (!MI->isRegTiedToDefOperand(Ops[i]))
943 MO.setIsKill();
944 } else {
945 if (!MO.isDead())
946 hasLiveDef = true;
949 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI);
951 // FIXME: Use a second vreg if instruction has no tied ops.
952 if (Writes && hasLiveDef)
953 insertSpill(NewLI, OldLI, Idx, MI);
955 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
959 /// spillAll - Spill all registers remaining after rematerialization.
960 void InlineSpiller::spillAll() {
961 // Update LiveStacks now that we are committed to spilling.
962 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
963 StackSlot = VRM.assignVirt2StackSlot(Original);
964 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
965 StackInt->getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator());
966 } else
967 StackInt = &LSS.getInterval(StackSlot);
969 if (Original != Edit->getReg())
970 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
972 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
973 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
974 StackInt->MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]),
975 StackInt->getValNumInfo(0));
976 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
978 // Spill around uses of all RegsToSpill.
979 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
980 spillAroundUses(RegsToSpill[i]);
982 // Hoisted spills may cause dead code.
983 if (!DeadDefs.empty()) {
984 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
985 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
988 // Finally delete the SnippetCopies.
989 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
990 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]);
991 MachineInstr *MI = RI.skipInstruction();) {
992 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy");
993 // FIXME: Do this with a LiveRangeEdit callback.
994 VRM.RemoveMachineInstrFromMaps(MI);
995 LIS.RemoveMachineInstrFromMaps(MI);
996 MI->eraseFromParent();
1000 // Delete all spilled registers.
1001 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
1002 Edit->eraseVirtReg(RegsToSpill[i], LIS);
1005 void InlineSpiller::spill(LiveRangeEdit &edit) {
1006 ++NumSpilledRanges;
1007 Edit = &edit;
1008 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
1009 && "Trying to spill a stack slot.");
1010 // Share a stack slot among all descendants of Original.
1011 Original = VRM.getOriginal(edit.getReg());
1012 StackSlot = VRM.getStackSlot(Original);
1013 StackInt = 0;
1015 DEBUG(dbgs() << "Inline spilling "
1016 << MRI.getRegClass(edit.getReg())->getName()
1017 << ':' << edit.getParent() << "\nFrom original "
1018 << LIS.getInterval(Original) << '\n');
1019 assert(edit.getParent().isSpillable() &&
1020 "Attempting to spill already spilled value.");
1021 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1023 collectRegsToSpill();
1024 analyzeSiblingValues();
1025 reMaterializeAll();
1027 // Remat may handle everything.
1028 if (!RegsToSpill.empty())
1029 spillAll();
1031 Edit->calculateRegClassAndHint(MF, LIS, Loops);