Fix think-o: emit all 8 bytes of the EOF marker. Also reflow a line in a
[llvm/stm8.git] / lib / CodeGen / InlineSpiller.cpp
blob0a785565f6c4270c0ffdb5a228349019695f39c2
1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "Spiller.h"
17 #include "LiveRangeEdit.h"
18 #include "VirtRegMap.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/raw_ostream.h"
32 using namespace llvm;
34 namespace {
35 class InlineSpiller : public Spiller {
36 MachineFunctionPass &Pass;
37 MachineFunction &MF;
38 LiveIntervals &LIS;
39 LiveStacks &LSS;
40 AliasAnalysis *AA;
41 MachineDominatorTree &MDT;
42 MachineLoopInfo &Loops;
43 VirtRegMap &VRM;
44 MachineFrameInfo &MFI;
45 MachineRegisterInfo &MRI;
46 const TargetInstrInfo &TII;
47 const TargetRegisterInfo &TRI;
49 // Variables that are valid during spill(), but used by multiple methods.
50 LiveRangeEdit *Edit;
51 LiveInterval *StackInt;
52 int StackSlot;
53 unsigned Original;
55 // All registers to spill to StackSlot, including the main register.
56 SmallVector<unsigned, 8> RegsToSpill;
58 // All COPY instructions to/from snippets.
59 // They are ignored since both operands refer to the same stack slot.
60 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
62 // Values that failed to remat at some point.
63 SmallPtrSet<VNInfo*, 8> UsedValues;
65 // Information about a value that was defined by a copy from a sibling
66 // register.
67 struct SibValueInfo {
68 // True when all reaching defs were reloads: No spill is necessary.
69 bool AllDefsAreReloads;
71 // The preferred register to spill.
72 unsigned SpillReg;
74 // The value of SpillReg that should be spilled.
75 VNInfo *SpillVNI;
77 // A defining instruction that is not a sibling copy or a reload, or NULL.
78 // This can be used as a template for rematerialization.
79 MachineInstr *DefMI;
81 SibValueInfo(unsigned Reg, VNInfo *VNI)
82 : AllDefsAreReloads(false), SpillReg(Reg), SpillVNI(VNI), DefMI(0) {}
85 // Values in RegsToSpill defined by sibling copies.
86 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap;
87 SibValueMap SibValues;
89 // Dead defs generated during spilling.
90 SmallVector<MachineInstr*, 8> DeadDefs;
92 ~InlineSpiller() {}
94 public:
95 InlineSpiller(MachineFunctionPass &pass,
96 MachineFunction &mf,
97 VirtRegMap &vrm)
98 : Pass(pass),
99 MF(mf),
100 LIS(pass.getAnalysis<LiveIntervals>()),
101 LSS(pass.getAnalysis<LiveStacks>()),
102 AA(&pass.getAnalysis<AliasAnalysis>()),
103 MDT(pass.getAnalysis<MachineDominatorTree>()),
104 Loops(pass.getAnalysis<MachineLoopInfo>()),
105 VRM(vrm),
106 MFI(*mf.getFrameInfo()),
107 MRI(mf.getRegInfo()),
108 TII(*mf.getTarget().getInstrInfo()),
109 TRI(*mf.getTarget().getRegisterInfo()) {}
111 void spill(LiveRangeEdit &);
113 private:
114 bool isSnippet(const LiveInterval &SnipLI);
115 void collectRegsToSpill();
117 bool isRegToSpill(unsigned Reg) {
118 return std::find(RegsToSpill.begin(),
119 RegsToSpill.end(), Reg) != RegsToSpill.end();
122 bool isSibling(unsigned Reg);
123 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);
124 void analyzeSiblingValues();
126 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI);
127 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
129 void markValueUsed(LiveInterval*, VNInfo*);
130 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI);
131 void reMaterializeAll();
133 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
134 bool foldMemoryOperand(MachineBasicBlock::iterator MI,
135 const SmallVectorImpl<unsigned> &Ops,
136 MachineInstr *LoadMI = 0);
137 void insertReload(LiveInterval &NewLI, SlotIndex,
138 MachineBasicBlock::iterator MI);
139 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
140 SlotIndex, MachineBasicBlock::iterator MI);
142 void spillAroundUses(unsigned Reg);
143 void spillAll();
147 namespace llvm {
148 Spiller *createInlineSpiller(MachineFunctionPass &pass,
149 MachineFunction &mf,
150 VirtRegMap &vrm) {
151 return new InlineSpiller(pass, mf, vrm);
155 //===----------------------------------------------------------------------===//
156 // Snippets
157 //===----------------------------------------------------------------------===//
159 // When spilling a virtual register, we also spill any snippets it is connected
160 // to. The snippets are small live ranges that only have a single real use,
161 // leftovers from live range splitting. Spilling them enables memory operand
162 // folding or tightens the live range around the single use.
164 // This minimizes register pressure and maximizes the store-to-load distance for
165 // spill slots which can be important in tight loops.
167 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
168 /// otherwise return 0.
169 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) {
170 if (!MI->isCopy())
171 return 0;
172 if (MI->getOperand(0).getSubReg() != 0)
173 return 0;
174 if (MI->getOperand(1).getSubReg() != 0)
175 return 0;
176 if (MI->getOperand(0).getReg() == Reg)
177 return MI->getOperand(1).getReg();
178 if (MI->getOperand(1).getReg() == Reg)
179 return MI->getOperand(0).getReg();
180 return 0;
183 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
184 /// It is assumed that SnipLI is a virtual register with the same original as
185 /// Edit->getReg().
186 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
187 unsigned Reg = Edit->getReg();
189 // A snippet is a tiny live range with only a single instruction using it
190 // besides copies to/from Reg or spills/fills. We accept:
192 // %snip = COPY %Reg / FILL fi#
193 // %snip = USE %snip
194 // %Reg = COPY %snip / SPILL %snip, fi#
196 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
197 return false;
199 MachineInstr *UseMI = 0;
201 // Check that all uses satisfy our criteria.
202 for (MachineRegisterInfo::reg_nodbg_iterator
203 RI = MRI.reg_nodbg_begin(SnipLI.reg);
204 MachineInstr *MI = RI.skipInstruction();) {
206 // Allow copies to/from Reg.
207 if (isFullCopyOf(MI, Reg))
208 continue;
210 // Allow stack slot loads.
211 int FI;
212 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
213 continue;
215 // Allow stack slot stores.
216 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
217 continue;
219 // Allow a single additional instruction.
220 if (UseMI && MI != UseMI)
221 return false;
222 UseMI = MI;
224 return true;
227 /// collectRegsToSpill - Collect live range snippets that only have a single
228 /// real use.
229 void InlineSpiller::collectRegsToSpill() {
230 unsigned Reg = Edit->getReg();
232 // Main register always spills.
233 RegsToSpill.assign(1, Reg);
234 SnippetCopies.clear();
236 // Snippets all have the same original, so there can't be any for an original
237 // register.
238 if (Original == Reg)
239 return;
241 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
242 MachineInstr *MI = RI.skipInstruction();) {
243 unsigned SnipReg = isFullCopyOf(MI, Reg);
244 if (!isSibling(SnipReg))
245 continue;
246 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
247 if (!isSnippet(SnipLI))
248 continue;
249 SnippetCopies.insert(MI);
250 if (!isRegToSpill(SnipReg))
251 RegsToSpill.push_back(SnipReg);
253 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
258 //===----------------------------------------------------------------------===//
259 // Sibling Values
260 //===----------------------------------------------------------------------===//
262 // After live range splitting, some values to be spilled may be defined by
263 // copies from sibling registers. We trace the sibling copies back to the
264 // original value if it still exists. We need it for rematerialization.
266 // Even when the value can't be rematerialized, we still want to determine if
267 // the value has already been spilled, or we may want to hoist the spill from a
268 // loop.
270 bool InlineSpiller::isSibling(unsigned Reg) {
271 return TargetRegisterInfo::isVirtualRegister(Reg) &&
272 VRM.getOriginal(Reg) == Original;
275 /// traceSiblingValue - Trace a value that is about to be spilled back to the
276 /// real defining instructions by looking through sibling copies. Always stay
277 /// within the range of OrigVNI so the registers are known to carry the same
278 /// value.
280 /// Determine if the value is defined by all reloads, so spilling isn't
281 /// necessary - the value is already in the stack slot.
283 /// Return a defining instruction that may be a candidate for rematerialization.
285 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
286 VNInfo *OrigVNI) {
287 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'
288 << UseVNI->id << '@' << UseVNI->def << '\n');
289 SmallPtrSet<VNInfo*, 8> Visited;
290 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;
291 WorkList.push_back(std::make_pair(UseReg, UseVNI));
293 // Best spill candidate seen so far. This must dominate UseVNI.
294 SibValueInfo SVI(UseReg, UseVNI);
295 MachineBasicBlock *UseMBB = LIS.getMBBFromIndex(UseVNI->def);
296 unsigned SpillDepth = Loops.getLoopDepth(UseMBB);
297 bool SeenOrigPHI = false; // Original PHI met.
299 do {
300 unsigned Reg;
301 VNInfo *VNI;
302 tie(Reg, VNI) = WorkList.pop_back_val();
303 if (!Visited.insert(VNI))
304 continue;
306 // Is this value a better spill candidate?
307 if (!isRegToSpill(Reg)) {
308 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
309 if (MBB != UseMBB && MDT.dominates(MBB, UseMBB)) {
310 // This is a valid spill location dominating UseVNI.
311 // Prefer to spill at a smaller loop depth.
312 unsigned Depth = Loops.getLoopDepth(MBB);
313 if (Depth < SpillDepth) {
314 DEBUG(dbgs() << " spill depth " << Depth << ": " << PrintReg(Reg)
315 << ':' << VNI->id << '@' << VNI->def << '\n');
316 SVI.SpillReg = Reg;
317 SVI.SpillVNI = VNI;
318 SpillDepth = Depth;
323 // Trace through PHI-defs created by live range splitting.
324 if (VNI->isPHIDef()) {
325 if (VNI->def == OrigVNI->def) {
326 DEBUG(dbgs() << " orig phi value " << PrintReg(Reg) << ':'
327 << VNI->id << '@' << VNI->def << '\n');
328 SeenOrigPHI = true;
329 continue;
331 // Get values live-out of predecessors.
332 LiveInterval &LI = LIS.getInterval(Reg);
333 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
334 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
335 PE = MBB->pred_end(); PI != PE; ++PI) {
336 VNInfo *PVNI = LI.getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot());
337 if (PVNI)
338 WorkList.push_back(std::make_pair(Reg, PVNI));
340 continue;
343 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
344 assert(MI && "Missing def");
346 // Trace through sibling copies.
347 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
348 if (isSibling(SrcReg)) {
349 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
350 VNInfo *SrcVNI = SrcLI.getVNInfoAt(VNI->def.getUseIndex());
351 assert(SrcVNI && "Copy from non-existing value");
352 DEBUG(dbgs() << " copy of " << PrintReg(SrcReg) << ':'
353 << SrcVNI->id << '@' << SrcVNI->def << '\n');
354 WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
355 continue;
359 // Track reachable reloads.
360 int FI;
361 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {
362 DEBUG(dbgs() << " reload " << PrintReg(Reg) << ':'
363 << VNI->id << "@" << VNI->def << '\n');
364 SVI.AllDefsAreReloads = true;
365 continue;
368 // We have an 'original' def. Don't record trivial cases.
369 if (VNI == UseVNI) {
370 DEBUG(dbgs() << "Not a sibling copy.\n");
371 return MI;
374 // Potential remat candidate.
375 DEBUG(dbgs() << " def " << PrintReg(Reg) << ':'
376 << VNI->id << '@' << VNI->def << '\t' << *MI);
377 SVI.DefMI = MI;
378 } while (!WorkList.empty());
380 if (SeenOrigPHI || SVI.DefMI)
381 SVI.AllDefsAreReloads = false;
383 DEBUG({
384 if (SVI.AllDefsAreReloads)
385 dbgs() << "All defs are reloads.\n";
386 else
387 dbgs() << "Prefer to spill " << PrintReg(SVI.SpillReg) << ':'
388 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def << '\n';
390 SibValues.insert(std::make_pair(UseVNI, SVI));
391 return SVI.DefMI;
394 /// analyzeSiblingValues - Trace values defined by sibling copies back to
395 /// something that isn't a sibling copy.
397 /// Keep track of values that may be rematerializable.
398 void InlineSpiller::analyzeSiblingValues() {
399 SibValues.clear();
401 // No siblings at all?
402 if (Edit->getReg() == Original)
403 return;
405 LiveInterval &OrigLI = LIS.getInterval(Original);
406 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
407 unsigned Reg = RegsToSpill[i];
408 LiveInterval &LI = LIS.getInterval(Reg);
409 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(),
410 VE = LI.vni_end(); VI != VE; ++VI) {
411 VNInfo *VNI = *VI;
412 if (VNI->isUnused())
413 continue;
414 MachineInstr *DefMI = 0;
415 // Check possible sibling copies.
416 if (VNI->isPHIDef() || VNI->getCopy()) {
417 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);
418 if (OrigVNI->def != VNI->def)
419 DefMI = traceSiblingValue(Reg, VNI, OrigVNI);
421 if (!DefMI && !VNI->isPHIDef())
422 DefMI = LIS.getInstructionFromIndex(VNI->def);
423 if (DefMI && Edit->checkRematerializable(VNI, DefMI, TII, AA)) {
424 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@'
425 << VNI->def << " may remat from " << *DefMI);
431 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert
432 /// a spill at a better location.
433 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) {
434 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
435 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex());
436 assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy");
437 SibValueMap::const_iterator I = SibValues.find(VNI);
438 if (I == SibValues.end())
439 return false;
441 const SibValueInfo &SVI = I->second;
443 // Let the normal folding code deal with the boring case.
444 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI)
445 return false;
447 // Conservatively extend the stack slot range to the range of the original
448 // value. We may be able to do better with stack slot coloring by being more
449 // careful here.
450 assert(StackInt && "No stack slot assigned yet.");
451 LiveInterval &OrigLI = LIS.getInterval(Original);
452 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
453 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
454 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
455 << *StackInt << '\n');
457 // Already spilled everywhere.
458 if (SVI.AllDefsAreReloads)
459 return true;
461 // We are going to spill SVI.SpillVNI immediately after its def, so clear out
462 // any later spills of the same value.
463 eliminateRedundantSpills(LIS.getInterval(SVI.SpillReg), SVI.SpillVNI);
465 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def);
466 MachineBasicBlock::iterator MII;
467 if (SVI.SpillVNI->isPHIDef())
468 MII = MBB->SkipPHIsAndLabels(MBB->begin());
469 else {
470 MII = LIS.getInstructionFromIndex(SVI.SpillVNI->def);
471 ++MII;
473 // Insert spill without kill flag immediately after def.
474 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot,
475 MRI.getRegClass(SVI.SpillReg), &TRI);
476 --MII; // Point to store instruction.
477 LIS.InsertMachineInstrInMaps(MII);
478 VRM.addSpillSlotUse(StackSlot, MII);
479 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII);
480 return true;
483 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
484 /// redundant spills of this value in SLI.reg and sibling copies.
485 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
486 assert(VNI && "Missing value");
487 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
488 WorkList.push_back(std::make_pair(&SLI, VNI));
489 assert(StackInt && "No stack slot assigned yet.");
491 do {
492 LiveInterval *LI;
493 tie(LI, VNI) = WorkList.pop_back_val();
494 unsigned Reg = LI->reg;
495 DEBUG(dbgs() << "Checking redundant spills for " << PrintReg(Reg) << ':'
496 << VNI->id << '@' << VNI->def << '\n');
498 // Regs to spill are taken care of.
499 if (isRegToSpill(Reg))
500 continue;
502 // Add all of VNI's live range to StackInt.
503 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
504 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
506 // Find all spills and copies of VNI.
507 for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg);
508 MachineInstr *MI = UI.skipInstruction();) {
509 if (!MI->isCopy() && !MI->getDesc().mayStore())
510 continue;
511 SlotIndex Idx = LIS.getInstructionIndex(MI);
512 if (LI->getVNInfoAt(Idx) != VNI)
513 continue;
515 // Follow sibling copies down the dominator tree.
516 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
517 if (isSibling(DstReg)) {
518 LiveInterval &DstLI = LIS.getInterval(DstReg);
519 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex());
520 assert(DstVNI && "Missing defined value");
521 assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot");
522 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
524 continue;
527 // Erase spills.
528 int FI;
529 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
530 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI);
531 // eliminateDeadDefs won't normally remove stores, so switch opcode.
532 MI->setDesc(TII.get(TargetOpcode::KILL));
533 DeadDefs.push_back(MI);
536 } while (!WorkList.empty());
540 //===----------------------------------------------------------------------===//
541 // Rematerialization
542 //===----------------------------------------------------------------------===//
544 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
545 /// instruction cannot be eliminated. See through snippet copies
546 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
547 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
548 WorkList.push_back(std::make_pair(LI, VNI));
549 do {
550 tie(LI, VNI) = WorkList.pop_back_val();
551 if (!UsedValues.insert(VNI))
552 continue;
554 if (VNI->isPHIDef()) {
555 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
556 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
557 PE = MBB->pred_end(); PI != PE; ++PI) {
558 VNInfo *PVNI = LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot());
559 if (PVNI)
560 WorkList.push_back(std::make_pair(LI, PVNI));
562 continue;
565 // Follow snippet copies.
566 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
567 if (!SnippetCopies.count(MI))
568 continue;
569 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
570 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
571 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getUseIndex());
572 assert(SnipVNI && "Snippet undefined before copy");
573 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
574 } while (!WorkList.empty());
577 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
578 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
579 MachineBasicBlock::iterator MI) {
580 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex();
581 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx);
583 if (!ParentVNI) {
584 DEBUG(dbgs() << "\tadding <undef> flags: ");
585 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
586 MachineOperand &MO = MI->getOperand(i);
587 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
588 MO.setIsUndef();
590 DEBUG(dbgs() << UseIdx << '\t' << *MI);
591 return true;
594 if (SnippetCopies.count(MI))
595 return false;
597 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
598 LiveRangeEdit::Remat RM(ParentVNI);
599 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
600 if (SibI != SibValues.end())
601 RM.OrigMI = SibI->second.DefMI;
602 if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) {
603 markValueUsed(&VirtReg, ParentVNI);
604 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
605 return false;
608 // If the instruction also writes VirtReg.reg, it had better not require the
609 // same register for uses and defs.
610 bool Reads, Writes;
611 SmallVector<unsigned, 8> Ops;
612 tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops);
613 if (Writes) {
614 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
615 MachineOperand &MO = MI->getOperand(Ops[i]);
616 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
617 markValueUsed(&VirtReg, ParentVNI);
618 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
619 return false;
624 // Before rematerializing into a register for a single instruction, try to
625 // fold a load into the instruction. That avoids allocating a new register.
626 if (RM.OrigMI->getDesc().canFoldAsLoad() &&
627 foldMemoryOperand(MI, Ops, RM.OrigMI)) {
628 Edit->markRematerialized(RM.ParentVNI);
629 return true;
632 // Alocate a new register for the remat.
633 LiveInterval &NewLI = Edit->createFrom(Original, LIS, VRM);
634 NewLI.markNotSpillable();
636 // Finally we can rematerialize OrigMI before MI.
637 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
638 LIS, TII, TRI);
639 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
640 << *LIS.getInstructionFromIndex(DefIdx));
642 // Replace operands
643 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
644 MachineOperand &MO = MI->getOperand(Ops[i]);
645 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
646 MO.setReg(NewLI.reg);
647 MO.setIsKill();
650 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
652 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator());
653 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
654 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
655 return true;
658 /// reMaterializeAll - Try to rematerialize as many uses as possible,
659 /// and trim the live ranges after.
660 void InlineSpiller::reMaterializeAll() {
661 // analyzeSiblingValues has already tested all relevant defining instructions.
662 if (!Edit->anyRematerializable(LIS, TII, AA))
663 return;
665 UsedValues.clear();
667 // Try to remat before all uses of snippets.
668 bool anyRemat = false;
669 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
670 unsigned Reg = RegsToSpill[i];
671 LiveInterval &LI = LIS.getInterval(Reg);
672 for (MachineRegisterInfo::use_nodbg_iterator
673 RI = MRI.use_nodbg_begin(Reg);
674 MachineInstr *MI = RI.skipInstruction();)
675 anyRemat |= reMaterializeFor(LI, MI);
677 if (!anyRemat)
678 return;
680 // Remove any values that were completely rematted.
681 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
682 unsigned Reg = RegsToSpill[i];
683 LiveInterval &LI = LIS.getInterval(Reg);
684 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
685 I != E; ++I) {
686 VNInfo *VNI = *I;
687 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
688 continue;
689 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
690 MI->addRegisterDead(Reg, &TRI);
691 if (!MI->allDefsAreDead())
692 continue;
693 DEBUG(dbgs() << "All defs dead: " << *MI);
694 DeadDefs.push_back(MI);
698 // Eliminate dead code after remat. Note that some snippet copies may be
699 // deleted here.
700 if (DeadDefs.empty())
701 return;
702 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
703 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
705 // Get rid of deleted and empty intervals.
706 for (unsigned i = RegsToSpill.size(); i != 0; --i) {
707 unsigned Reg = RegsToSpill[i-1];
708 if (!LIS.hasInterval(Reg)) {
709 RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
710 continue;
712 LiveInterval &LI = LIS.getInterval(Reg);
713 if (!LI.empty())
714 continue;
715 Edit->eraseVirtReg(Reg, LIS);
716 RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
718 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
722 //===----------------------------------------------------------------------===//
723 // Spilling
724 //===----------------------------------------------------------------------===//
726 /// If MI is a load or store of StackSlot, it can be removed.
727 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
728 int FI = 0;
729 unsigned InstrReg;
730 if (!(InstrReg = TII.isLoadFromStackSlot(MI, FI)) &&
731 !(InstrReg = TII.isStoreToStackSlot(MI, FI)))
732 return false;
734 // We have a stack access. Is it the right register and slot?
735 if (InstrReg != Reg || FI != StackSlot)
736 return false;
738 DEBUG(dbgs() << "Coalescing stack access: " << *MI);
739 LIS.RemoveMachineInstrFromMaps(MI);
740 MI->eraseFromParent();
741 return true;
744 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
745 /// @param MI Instruction using or defining the current register.
746 /// @param Ops Operand indices from readsWritesVirtualRegister().
747 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
748 /// @return True on success, and MI will be erased.
749 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
750 const SmallVectorImpl<unsigned> &Ops,
751 MachineInstr *LoadMI) {
752 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
753 // operands.
754 SmallVector<unsigned, 8> FoldOps;
755 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
756 unsigned Idx = Ops[i];
757 MachineOperand &MO = MI->getOperand(Idx);
758 if (MO.isImplicit())
759 continue;
760 // FIXME: Teach targets to deal with subregs.
761 if (MO.getSubReg())
762 return false;
763 // We cannot fold a load instruction into a def.
764 if (LoadMI && MO.isDef())
765 return false;
766 // Tied use operands should not be passed to foldMemoryOperand.
767 if (!MI->isRegTiedToDefOperand(Idx))
768 FoldOps.push_back(Idx);
771 MachineInstr *FoldMI =
772 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
773 : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
774 if (!FoldMI)
775 return false;
776 LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
777 if (!LoadMI)
778 VRM.addSpillSlotUse(StackSlot, FoldMI);
779 MI->eraseFromParent();
780 DEBUG(dbgs() << "\tfolded: " << *FoldMI);
781 return true;
784 /// insertReload - Insert a reload of NewLI.reg before MI.
785 void InlineSpiller::insertReload(LiveInterval &NewLI,
786 SlotIndex Idx,
787 MachineBasicBlock::iterator MI) {
788 MachineBasicBlock &MBB = *MI->getParent();
789 TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot,
790 MRI.getRegClass(NewLI.reg), &TRI);
791 --MI; // Point to load instruction.
792 SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
793 VRM.addSpillSlotUse(StackSlot, MI);
794 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
795 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
796 LIS.getVNInfoAllocator());
797 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
800 /// insertSpill - Insert a spill of NewLI.reg after MI.
801 void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
802 SlotIndex Idx, MachineBasicBlock::iterator MI) {
803 MachineBasicBlock &MBB = *MI->getParent();
804 TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot,
805 MRI.getRegClass(NewLI.reg), &TRI);
806 --MI; // Point to store instruction.
807 SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
808 VRM.addSpillSlotUse(StackSlot, MI);
809 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
810 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator());
811 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
814 /// spillAroundUses - insert spill code around each use of Reg.
815 void InlineSpiller::spillAroundUses(unsigned Reg) {
816 LiveInterval &OldLI = LIS.getInterval(Reg);
818 // Iterate over instructions using Reg.
819 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
820 MachineInstr *MI = RI.skipInstruction();) {
822 // Debug values are not allowed to affect codegen.
823 if (MI->isDebugValue()) {
824 // Modify DBG_VALUE now that the value is in a spill slot.
825 uint64_t Offset = MI->getOperand(1).getImm();
826 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
827 DebugLoc DL = MI->getDebugLoc();
828 if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot,
829 Offset, MDPtr, DL)) {
830 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
831 MachineBasicBlock *MBB = MI->getParent();
832 MBB->insert(MBB->erase(MI), NewDV);
833 } else {
834 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
835 MI->eraseFromParent();
837 continue;
840 // Ignore copies to/from snippets. We'll delete them.
841 if (SnippetCopies.count(MI))
842 continue;
844 // Stack slot accesses may coalesce away.
845 if (coalesceStackAccess(MI, Reg))
846 continue;
848 // Analyze instruction.
849 bool Reads, Writes;
850 SmallVector<unsigned, 8> Ops;
851 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops);
853 // Find the slot index where this instruction reads and writes OldLI.
854 // This is usually the def slot, except for tied early clobbers.
855 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex();
856 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getUseIndex()))
857 if (SlotIndex::isSameInstr(Idx, VNI->def))
858 Idx = VNI->def;
860 // Check for a sibling copy.
861 unsigned SibReg = isFullCopyOf(MI, Reg);
862 if (SibReg && isSibling(SibReg)) {
863 if (Writes) {
864 // Hoist the spill of a sib-reg copy.
865 if (hoistSpill(OldLI, MI)) {
866 // This COPY is now dead, the value is already in the stack slot.
867 MI->getOperand(0).setIsDead();
868 DeadDefs.push_back(MI);
869 continue;
871 } else {
872 // This is a reload for a sib-reg copy. Drop spills downstream.
873 LiveInterval &SibLI = LIS.getInterval(SibReg);
874 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
875 // The COPY will fold to a reload below.
879 // Attempt to fold memory ops.
880 if (foldMemoryOperand(MI, Ops))
881 continue;
883 // Allocate interval around instruction.
884 // FIXME: Infer regclass from instruction alone.
885 LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM);
886 NewLI.markNotSpillable();
888 if (Reads)
889 insertReload(NewLI, Idx, MI);
891 // Rewrite instruction operands.
892 bool hasLiveDef = false;
893 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
894 MachineOperand &MO = MI->getOperand(Ops[i]);
895 MO.setReg(NewLI.reg);
896 if (MO.isUse()) {
897 if (!MI->isRegTiedToDefOperand(Ops[i]))
898 MO.setIsKill();
899 } else {
900 if (!MO.isDead())
901 hasLiveDef = true;
904 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI);
906 // FIXME: Use a second vreg if instruction has no tied ops.
907 if (Writes && hasLiveDef)
908 insertSpill(NewLI, OldLI, Idx, MI);
910 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
914 /// spillAll - Spill all registers remaining after rematerialization.
915 void InlineSpiller::spillAll() {
916 // Update LiveStacks now that we are committed to spilling.
917 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
918 StackSlot = VRM.assignVirt2StackSlot(Original);
919 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
920 StackInt->getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator());
921 } else
922 StackInt = &LSS.getInterval(StackSlot);
924 if (Original != Edit->getReg())
925 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
927 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
928 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
929 StackInt->MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]),
930 StackInt->getValNumInfo(0));
931 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
933 // Spill around uses of all RegsToSpill.
934 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
935 spillAroundUses(RegsToSpill[i]);
937 // Hoisted spills may cause dead code.
938 if (!DeadDefs.empty()) {
939 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
940 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
943 // Finally delete the SnippetCopies.
944 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Edit->getReg());
945 MachineInstr *MI = RI.skipInstruction();) {
946 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy");
947 // FIXME: Do this with a LiveRangeEdit callback.
948 VRM.RemoveMachineInstrFromMaps(MI);
949 LIS.RemoveMachineInstrFromMaps(MI);
950 MI->eraseFromParent();
953 // Delete all spilled registers.
954 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
955 Edit->eraseVirtReg(RegsToSpill[i], LIS);
958 void InlineSpiller::spill(LiveRangeEdit &edit) {
959 Edit = &edit;
960 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
961 && "Trying to spill a stack slot.");
962 // Share a stack slot among all descendants of Original.
963 Original = VRM.getOriginal(edit.getReg());
964 StackSlot = VRM.getStackSlot(Original);
965 StackInt = 0;
967 DEBUG(dbgs() << "Inline spilling "
968 << MRI.getRegClass(edit.getReg())->getName()
969 << ':' << edit.getParent() << "\nFrom original "
970 << LIS.getInterval(Original) << '\n');
971 assert(edit.getParent().isSpillable() &&
972 "Attempting to spill already spilled value.");
973 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
975 collectRegsToSpill();
976 analyzeSiblingValues();
977 reMaterializeAll();
979 // Remat may handle everything.
980 if (!RegsToSpill.empty())
981 spillAll();
983 Edit->calculateRegClassAndHint(MF, LIS, Loops);