1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
17 #include "LiveRangeEdit.h"
18 #include "VirtRegMap.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/raw_ostream.h"
35 class InlineSpiller
: public Spiller
{
36 MachineFunctionPass
&Pass
;
41 MachineDominatorTree
&MDT
;
42 MachineLoopInfo
&Loops
;
44 MachineFrameInfo
&MFI
;
45 MachineRegisterInfo
&MRI
;
46 const TargetInstrInfo
&TII
;
47 const TargetRegisterInfo
&TRI
;
49 // Variables that are valid during spill(), but used by multiple methods.
51 LiveInterval
*StackInt
;
55 // All registers to spill to StackSlot, including the main register.
56 SmallVector
<unsigned, 8> RegsToSpill
;
58 // All COPY instructions to/from snippets.
59 // They are ignored since both operands refer to the same stack slot.
60 SmallPtrSet
<MachineInstr
*, 8> SnippetCopies
;
62 // Values that failed to remat at some point.
63 SmallPtrSet
<VNInfo
*, 8> UsedValues
;
65 // Information about a value that was defined by a copy from a sibling
68 // True when all reaching defs were reloads: No spill is necessary.
69 bool AllDefsAreReloads
;
71 // The preferred register to spill.
74 // The value of SpillReg that should be spilled.
77 // A defining instruction that is not a sibling copy or a reload, or NULL.
78 // This can be used as a template for rematerialization.
81 SibValueInfo(unsigned Reg
, VNInfo
*VNI
)
82 : AllDefsAreReloads(false), SpillReg(Reg
), SpillVNI(VNI
), DefMI(0) {}
85 // Values in RegsToSpill defined by sibling copies.
86 typedef DenseMap
<VNInfo
*, SibValueInfo
> SibValueMap
;
87 SibValueMap SibValues
;
89 // Dead defs generated during spilling.
90 SmallVector
<MachineInstr
*, 8> DeadDefs
;
95 InlineSpiller(MachineFunctionPass
&pass
,
100 LIS(pass
.getAnalysis
<LiveIntervals
>()),
101 LSS(pass
.getAnalysis
<LiveStacks
>()),
102 AA(&pass
.getAnalysis
<AliasAnalysis
>()),
103 MDT(pass
.getAnalysis
<MachineDominatorTree
>()),
104 Loops(pass
.getAnalysis
<MachineLoopInfo
>()),
106 MFI(*mf
.getFrameInfo()),
107 MRI(mf
.getRegInfo()),
108 TII(*mf
.getTarget().getInstrInfo()),
109 TRI(*mf
.getTarget().getRegisterInfo()) {}
111 void spill(LiveRangeEdit
&);
114 bool isSnippet(const LiveInterval
&SnipLI
);
115 void collectRegsToSpill();
117 bool isRegToSpill(unsigned Reg
) {
118 return std::find(RegsToSpill
.begin(),
119 RegsToSpill
.end(), Reg
) != RegsToSpill
.end();
122 bool isSibling(unsigned Reg
);
123 MachineInstr
*traceSiblingValue(unsigned, VNInfo
*, VNInfo
*);
124 void analyzeSiblingValues();
126 bool hoistSpill(LiveInterval
&SpillLI
, MachineInstr
*CopyMI
);
127 void eliminateRedundantSpills(LiveInterval
&LI
, VNInfo
*VNI
);
129 void markValueUsed(LiveInterval
*, VNInfo
*);
130 bool reMaterializeFor(LiveInterval
&, MachineBasicBlock::iterator MI
);
131 void reMaterializeAll();
133 bool coalesceStackAccess(MachineInstr
*MI
, unsigned Reg
);
134 bool foldMemoryOperand(MachineBasicBlock::iterator MI
,
135 const SmallVectorImpl
<unsigned> &Ops
,
136 MachineInstr
*LoadMI
= 0);
137 void insertReload(LiveInterval
&NewLI
, SlotIndex
,
138 MachineBasicBlock::iterator MI
);
139 void insertSpill(LiveInterval
&NewLI
, const LiveInterval
&OldLI
,
140 SlotIndex
, MachineBasicBlock::iterator MI
);
142 void spillAroundUses(unsigned Reg
);
148 Spiller
*createInlineSpiller(MachineFunctionPass
&pass
,
151 return new InlineSpiller(pass
, mf
, vrm
);
155 //===----------------------------------------------------------------------===//
157 //===----------------------------------------------------------------------===//
159 // When spilling a virtual register, we also spill any snippets it is connected
160 // to. The snippets are small live ranges that only have a single real use,
161 // leftovers from live range splitting. Spilling them enables memory operand
162 // folding or tightens the live range around the single use.
164 // This minimizes register pressure and maximizes the store-to-load distance for
165 // spill slots which can be important in tight loops.
167 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
168 /// otherwise return 0.
169 static unsigned isFullCopyOf(const MachineInstr
*MI
, unsigned Reg
) {
172 if (MI
->getOperand(0).getSubReg() != 0)
174 if (MI
->getOperand(1).getSubReg() != 0)
176 if (MI
->getOperand(0).getReg() == Reg
)
177 return MI
->getOperand(1).getReg();
178 if (MI
->getOperand(1).getReg() == Reg
)
179 return MI
->getOperand(0).getReg();
183 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
184 /// It is assumed that SnipLI is a virtual register with the same original as
186 bool InlineSpiller::isSnippet(const LiveInterval
&SnipLI
) {
187 unsigned Reg
= Edit
->getReg();
189 // A snippet is a tiny live range with only a single instruction using it
190 // besides copies to/from Reg or spills/fills. We accept:
192 // %snip = COPY %Reg / FILL fi#
194 // %Reg = COPY %snip / SPILL %snip, fi#
196 if (SnipLI
.getNumValNums() > 2 || !LIS
.intervalIsInOneMBB(SnipLI
))
199 MachineInstr
*UseMI
= 0;
201 // Check that all uses satisfy our criteria.
202 for (MachineRegisterInfo::reg_nodbg_iterator
203 RI
= MRI
.reg_nodbg_begin(SnipLI
.reg
);
204 MachineInstr
*MI
= RI
.skipInstruction();) {
206 // Allow copies to/from Reg.
207 if (isFullCopyOf(MI
, Reg
))
210 // Allow stack slot loads.
212 if (SnipLI
.reg
== TII
.isLoadFromStackSlot(MI
, FI
) && FI
== StackSlot
)
215 // Allow stack slot stores.
216 if (SnipLI
.reg
== TII
.isStoreToStackSlot(MI
, FI
) && FI
== StackSlot
)
219 // Allow a single additional instruction.
220 if (UseMI
&& MI
!= UseMI
)
227 /// collectRegsToSpill - Collect live range snippets that only have a single
229 void InlineSpiller::collectRegsToSpill() {
230 unsigned Reg
= Edit
->getReg();
232 // Main register always spills.
233 RegsToSpill
.assign(1, Reg
);
234 SnippetCopies
.clear();
236 // Snippets all have the same original, so there can't be any for an original
241 for (MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(Reg
);
242 MachineInstr
*MI
= RI
.skipInstruction();) {
243 unsigned SnipReg
= isFullCopyOf(MI
, Reg
);
244 if (!isSibling(SnipReg
))
246 LiveInterval
&SnipLI
= LIS
.getInterval(SnipReg
);
247 if (!isSnippet(SnipLI
))
249 SnippetCopies
.insert(MI
);
250 if (!isRegToSpill(SnipReg
))
251 RegsToSpill
.push_back(SnipReg
);
253 DEBUG(dbgs() << "\talso spill snippet " << SnipLI
<< '\n');
258 //===----------------------------------------------------------------------===//
260 //===----------------------------------------------------------------------===//
262 // After live range splitting, some values to be spilled may be defined by
263 // copies from sibling registers. We trace the sibling copies back to the
264 // original value if it still exists. We need it for rematerialization.
266 // Even when the value can't be rematerialized, we still want to determine if
267 // the value has already been spilled, or we may want to hoist the spill from a
270 bool InlineSpiller::isSibling(unsigned Reg
) {
271 return TargetRegisterInfo::isVirtualRegister(Reg
) &&
272 VRM
.getOriginal(Reg
) == Original
;
275 /// traceSiblingValue - Trace a value that is about to be spilled back to the
276 /// real defining instructions by looking through sibling copies. Always stay
277 /// within the range of OrigVNI so the registers are known to carry the same
280 /// Determine if the value is defined by all reloads, so spilling isn't
281 /// necessary - the value is already in the stack slot.
283 /// Return a defining instruction that may be a candidate for rematerialization.
285 MachineInstr
*InlineSpiller::traceSiblingValue(unsigned UseReg
, VNInfo
*UseVNI
,
287 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg
) << ':'
288 << UseVNI
->id
<< '@' << UseVNI
->def
<< '\n');
289 SmallPtrSet
<VNInfo
*, 8> Visited
;
290 SmallVector
<std::pair
<unsigned, VNInfo
*>, 8> WorkList
;
291 WorkList
.push_back(std::make_pair(UseReg
, UseVNI
));
293 // Best spill candidate seen so far. This must dominate UseVNI.
294 SibValueInfo
SVI(UseReg
, UseVNI
);
295 MachineBasicBlock
*UseMBB
= LIS
.getMBBFromIndex(UseVNI
->def
);
296 unsigned SpillDepth
= Loops
.getLoopDepth(UseMBB
);
297 bool SeenOrigPHI
= false; // Original PHI met.
302 tie(Reg
, VNI
) = WorkList
.pop_back_val();
303 if (!Visited
.insert(VNI
))
306 // Is this value a better spill candidate?
307 if (!isRegToSpill(Reg
)) {
308 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
309 if (MBB
!= UseMBB
&& MDT
.dominates(MBB
, UseMBB
)) {
310 // This is a valid spill location dominating UseVNI.
311 // Prefer to spill at a smaller loop depth.
312 unsigned Depth
= Loops
.getLoopDepth(MBB
);
313 if (Depth
< SpillDepth
) {
314 DEBUG(dbgs() << " spill depth " << Depth
<< ": " << PrintReg(Reg
)
315 << ':' << VNI
->id
<< '@' << VNI
->def
<< '\n');
323 // Trace through PHI-defs created by live range splitting.
324 if (VNI
->isPHIDef()) {
325 if (VNI
->def
== OrigVNI
->def
) {
326 DEBUG(dbgs() << " orig phi value " << PrintReg(Reg
) << ':'
327 << VNI
->id
<< '@' << VNI
->def
<< '\n');
331 // Get values live-out of predecessors.
332 LiveInterval
&LI
= LIS
.getInterval(Reg
);
333 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
334 for (MachineBasicBlock::pred_iterator PI
= MBB
->pred_begin(),
335 PE
= MBB
->pred_end(); PI
!= PE
; ++PI
) {
336 VNInfo
*PVNI
= LI
.getVNInfoAt(LIS
.getMBBEndIdx(*PI
).getPrevSlot());
338 WorkList
.push_back(std::make_pair(Reg
, PVNI
));
343 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
344 assert(MI
&& "Missing def");
346 // Trace through sibling copies.
347 if (unsigned SrcReg
= isFullCopyOf(MI
, Reg
)) {
348 if (isSibling(SrcReg
)) {
349 LiveInterval
&SrcLI
= LIS
.getInterval(SrcReg
);
350 VNInfo
*SrcVNI
= SrcLI
.getVNInfoAt(VNI
->def
.getUseIndex());
351 assert(SrcVNI
&& "Copy from non-existing value");
352 DEBUG(dbgs() << " copy of " << PrintReg(SrcReg
) << ':'
353 << SrcVNI
->id
<< '@' << SrcVNI
->def
<< '\n');
354 WorkList
.push_back(std::make_pair(SrcReg
, SrcVNI
));
359 // Track reachable reloads.
361 if (Reg
== TII
.isLoadFromStackSlot(MI
, FI
) && FI
== StackSlot
) {
362 DEBUG(dbgs() << " reload " << PrintReg(Reg
) << ':'
363 << VNI
->id
<< "@" << VNI
->def
<< '\n');
364 SVI
.AllDefsAreReloads
= true;
368 // We have an 'original' def. Don't record trivial cases.
370 DEBUG(dbgs() << "Not a sibling copy.\n");
374 // Potential remat candidate.
375 DEBUG(dbgs() << " def " << PrintReg(Reg
) << ':'
376 << VNI
->id
<< '@' << VNI
->def
<< '\t' << *MI
);
378 } while (!WorkList
.empty());
380 if (SeenOrigPHI
|| SVI
.DefMI
)
381 SVI
.AllDefsAreReloads
= false;
384 if (SVI
.AllDefsAreReloads
)
385 dbgs() << "All defs are reloads.\n";
387 dbgs() << "Prefer to spill " << PrintReg(SVI
.SpillReg
) << ':'
388 << SVI
.SpillVNI
->id
<< '@' << SVI
.SpillVNI
->def
<< '\n';
390 SibValues
.insert(std::make_pair(UseVNI
, SVI
));
394 /// analyzeSiblingValues - Trace values defined by sibling copies back to
395 /// something that isn't a sibling copy.
397 /// Keep track of values that may be rematerializable.
398 void InlineSpiller::analyzeSiblingValues() {
401 // No siblings at all?
402 if (Edit
->getReg() == Original
)
405 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
406 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
407 unsigned Reg
= RegsToSpill
[i
];
408 LiveInterval
&LI
= LIS
.getInterval(Reg
);
409 for (LiveInterval::const_vni_iterator VI
= LI
.vni_begin(),
410 VE
= LI
.vni_end(); VI
!= VE
; ++VI
) {
414 MachineInstr
*DefMI
= 0;
415 // Check possible sibling copies.
416 if (VNI
->isPHIDef() || VNI
->getCopy()) {
417 VNInfo
*OrigVNI
= OrigLI
.getVNInfoAt(VNI
->def
);
418 if (OrigVNI
->def
!= VNI
->def
)
419 DefMI
= traceSiblingValue(Reg
, VNI
, OrigVNI
);
421 if (!DefMI
&& !VNI
->isPHIDef())
422 DefMI
= LIS
.getInstructionFromIndex(VNI
->def
);
423 if (DefMI
&& Edit
->checkRematerializable(VNI
, DefMI
, TII
, AA
)) {
424 DEBUG(dbgs() << "Value " << PrintReg(Reg
) << ':' << VNI
->id
<< '@'
425 << VNI
->def
<< " may remat from " << *DefMI
);
431 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert
432 /// a spill at a better location.
433 bool InlineSpiller::hoistSpill(LiveInterval
&SpillLI
, MachineInstr
*CopyMI
) {
434 SlotIndex Idx
= LIS
.getInstructionIndex(CopyMI
);
435 VNInfo
*VNI
= SpillLI
.getVNInfoAt(Idx
.getDefIndex());
436 assert(VNI
&& VNI
->def
== Idx
.getDefIndex() && "Not defined by copy");
437 SibValueMap::const_iterator I
= SibValues
.find(VNI
);
438 if (I
== SibValues
.end())
441 const SibValueInfo
&SVI
= I
->second
;
443 // Let the normal folding code deal with the boring case.
444 if (!SVI
.AllDefsAreReloads
&& SVI
.SpillVNI
== VNI
)
447 // Conservatively extend the stack slot range to the range of the original
448 // value. We may be able to do better with stack slot coloring by being more
450 assert(StackInt
&& "No stack slot assigned yet.");
451 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
452 VNInfo
*OrigVNI
= OrigLI
.getVNInfoAt(Idx
);
453 StackInt
->MergeValueInAsValue(OrigLI
, OrigVNI
, StackInt
->getValNumInfo(0));
454 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI
->id
<< ": "
455 << *StackInt
<< '\n');
457 // Already spilled everywhere.
458 if (SVI
.AllDefsAreReloads
)
461 // We are going to spill SVI.SpillVNI immediately after its def, so clear out
462 // any later spills of the same value.
463 eliminateRedundantSpills(LIS
.getInterval(SVI
.SpillReg
), SVI
.SpillVNI
);
465 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(SVI
.SpillVNI
->def
);
466 MachineBasicBlock::iterator MII
;
467 if (SVI
.SpillVNI
->isPHIDef())
468 MII
= MBB
->SkipPHIsAndLabels(MBB
->begin());
470 MII
= LIS
.getInstructionFromIndex(SVI
.SpillVNI
->def
);
473 // Insert spill without kill flag immediately after def.
474 TII
.storeRegToStackSlot(*MBB
, MII
, SVI
.SpillReg
, false, StackSlot
,
475 MRI
.getRegClass(SVI
.SpillReg
), &TRI
);
476 --MII
; // Point to store instruction.
477 LIS
.InsertMachineInstrInMaps(MII
);
478 VRM
.addSpillSlotUse(StackSlot
, MII
);
479 DEBUG(dbgs() << "\thoisted: " << SVI
.SpillVNI
->def
<< '\t' << *MII
);
483 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
484 /// redundant spills of this value in SLI.reg and sibling copies.
485 void InlineSpiller::eliminateRedundantSpills(LiveInterval
&SLI
, VNInfo
*VNI
) {
486 assert(VNI
&& "Missing value");
487 SmallVector
<std::pair
<LiveInterval
*, VNInfo
*>, 8> WorkList
;
488 WorkList
.push_back(std::make_pair(&SLI
, VNI
));
489 assert(StackInt
&& "No stack slot assigned yet.");
493 tie(LI
, VNI
) = WorkList
.pop_back_val();
494 unsigned Reg
= LI
->reg
;
495 DEBUG(dbgs() << "Checking redundant spills for " << PrintReg(Reg
) << ':'
496 << VNI
->id
<< '@' << VNI
->def
<< '\n');
498 // Regs to spill are taken care of.
499 if (isRegToSpill(Reg
))
502 // Add all of VNI's live range to StackInt.
503 StackInt
->MergeValueInAsValue(*LI
, VNI
, StackInt
->getValNumInfo(0));
504 DEBUG(dbgs() << "Merged to stack int: " << *StackInt
<< '\n');
506 // Find all spills and copies of VNI.
507 for (MachineRegisterInfo::use_nodbg_iterator UI
= MRI
.use_nodbg_begin(Reg
);
508 MachineInstr
*MI
= UI
.skipInstruction();) {
509 if (!MI
->isCopy() && !MI
->getDesc().mayStore())
511 SlotIndex Idx
= LIS
.getInstructionIndex(MI
);
512 if (LI
->getVNInfoAt(Idx
) != VNI
)
515 // Follow sibling copies down the dominator tree.
516 if (unsigned DstReg
= isFullCopyOf(MI
, Reg
)) {
517 if (isSibling(DstReg
)) {
518 LiveInterval
&DstLI
= LIS
.getInterval(DstReg
);
519 VNInfo
*DstVNI
= DstLI
.getVNInfoAt(Idx
.getDefIndex());
520 assert(DstVNI
&& "Missing defined value");
521 assert(DstVNI
->def
== Idx
.getDefIndex() && "Wrong copy def slot");
522 WorkList
.push_back(std::make_pair(&DstLI
, DstVNI
));
529 if (Reg
== TII
.isStoreToStackSlot(MI
, FI
) && FI
== StackSlot
) {
530 DEBUG(dbgs() << "Redundant spill " << Idx
<< '\t' << *MI
);
531 // eliminateDeadDefs won't normally remove stores, so switch opcode.
532 MI
->setDesc(TII
.get(TargetOpcode::KILL
));
533 DeadDefs
.push_back(MI
);
536 } while (!WorkList
.empty());
540 //===----------------------------------------------------------------------===//
542 //===----------------------------------------------------------------------===//
544 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
545 /// instruction cannot be eliminated. See through snippet copies
546 void InlineSpiller::markValueUsed(LiveInterval
*LI
, VNInfo
*VNI
) {
547 SmallVector
<std::pair
<LiveInterval
*, VNInfo
*>, 8> WorkList
;
548 WorkList
.push_back(std::make_pair(LI
, VNI
));
550 tie(LI
, VNI
) = WorkList
.pop_back_val();
551 if (!UsedValues
.insert(VNI
))
554 if (VNI
->isPHIDef()) {
555 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
556 for (MachineBasicBlock::pred_iterator PI
= MBB
->pred_begin(),
557 PE
= MBB
->pred_end(); PI
!= PE
; ++PI
) {
558 VNInfo
*PVNI
= LI
->getVNInfoAt(LIS
.getMBBEndIdx(*PI
).getPrevSlot());
560 WorkList
.push_back(std::make_pair(LI
, PVNI
));
565 // Follow snippet copies.
566 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
567 if (!SnippetCopies
.count(MI
))
569 LiveInterval
&SnipLI
= LIS
.getInterval(MI
->getOperand(1).getReg());
570 assert(isRegToSpill(SnipLI
.reg
) && "Unexpected register in copy");
571 VNInfo
*SnipVNI
= SnipLI
.getVNInfoAt(VNI
->def
.getUseIndex());
572 assert(SnipVNI
&& "Snippet undefined before copy");
573 WorkList
.push_back(std::make_pair(&SnipLI
, SnipVNI
));
574 } while (!WorkList
.empty());
577 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
578 bool InlineSpiller::reMaterializeFor(LiveInterval
&VirtReg
,
579 MachineBasicBlock::iterator MI
) {
580 SlotIndex UseIdx
= LIS
.getInstructionIndex(MI
).getUseIndex();
581 VNInfo
*ParentVNI
= VirtReg
.getVNInfoAt(UseIdx
);
584 DEBUG(dbgs() << "\tadding <undef> flags: ");
585 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
586 MachineOperand
&MO
= MI
->getOperand(i
);
587 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == VirtReg
.reg
)
590 DEBUG(dbgs() << UseIdx
<< '\t' << *MI
);
594 if (SnippetCopies
.count(MI
))
597 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
598 LiveRangeEdit::Remat
RM(ParentVNI
);
599 SibValueMap::const_iterator SibI
= SibValues
.find(ParentVNI
);
600 if (SibI
!= SibValues
.end())
601 RM
.OrigMI
= SibI
->second
.DefMI
;
602 if (!Edit
->canRematerializeAt(RM
, UseIdx
, false, LIS
)) {
603 markValueUsed(&VirtReg
, ParentVNI
);
604 DEBUG(dbgs() << "\tcannot remat for " << UseIdx
<< '\t' << *MI
);
608 // If the instruction also writes VirtReg.reg, it had better not require the
609 // same register for uses and defs.
611 SmallVector
<unsigned, 8> Ops
;
612 tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(VirtReg
.reg
, &Ops
);
614 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
615 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
616 if (MO
.isUse() ? MI
->isRegTiedToDefOperand(Ops
[i
]) : MO
.getSubReg()) {
617 markValueUsed(&VirtReg
, ParentVNI
);
618 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx
<< '\t' << *MI
);
624 // Before rematerializing into a register for a single instruction, try to
625 // fold a load into the instruction. That avoids allocating a new register.
626 if (RM
.OrigMI
->getDesc().canFoldAsLoad() &&
627 foldMemoryOperand(MI
, Ops
, RM
.OrigMI
)) {
628 Edit
->markRematerialized(RM
.ParentVNI
);
632 // Alocate a new register for the remat.
633 LiveInterval
&NewLI
= Edit
->createFrom(Original
, LIS
, VRM
);
634 NewLI
.markNotSpillable();
636 // Finally we can rematerialize OrigMI before MI.
637 SlotIndex DefIdx
= Edit
->rematerializeAt(*MI
->getParent(), MI
, NewLI
.reg
, RM
,
639 DEBUG(dbgs() << "\tremat: " << DefIdx
<< '\t'
640 << *LIS
.getInstructionFromIndex(DefIdx
));
643 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
644 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
645 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == VirtReg
.reg
) {
646 MO
.setReg(NewLI
.reg
);
650 DEBUG(dbgs() << "\t " << UseIdx
<< '\t' << *MI
);
652 VNInfo
*DefVNI
= NewLI
.getNextValue(DefIdx
, 0, LIS
.getVNInfoAllocator());
653 NewLI
.addRange(LiveRange(DefIdx
, UseIdx
.getDefIndex(), DefVNI
));
654 DEBUG(dbgs() << "\tinterval: " << NewLI
<< '\n');
658 /// reMaterializeAll - Try to rematerialize as many uses as possible,
659 /// and trim the live ranges after.
660 void InlineSpiller::reMaterializeAll() {
661 // analyzeSiblingValues has already tested all relevant defining instructions.
662 if (!Edit
->anyRematerializable(LIS
, TII
, AA
))
667 // Try to remat before all uses of snippets.
668 bool anyRemat
= false;
669 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
670 unsigned Reg
= RegsToSpill
[i
];
671 LiveInterval
&LI
= LIS
.getInterval(Reg
);
672 for (MachineRegisterInfo::use_nodbg_iterator
673 RI
= MRI
.use_nodbg_begin(Reg
);
674 MachineInstr
*MI
= RI
.skipInstruction();)
675 anyRemat
|= reMaterializeFor(LI
, MI
);
680 // Remove any values that were completely rematted.
681 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
682 unsigned Reg
= RegsToSpill
[i
];
683 LiveInterval
&LI
= LIS
.getInterval(Reg
);
684 for (LiveInterval::vni_iterator I
= LI
.vni_begin(), E
= LI
.vni_end();
687 if (VNI
->isUnused() || VNI
->isPHIDef() || UsedValues
.count(VNI
))
689 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
690 MI
->addRegisterDead(Reg
, &TRI
);
691 if (!MI
->allDefsAreDead())
693 DEBUG(dbgs() << "All defs dead: " << *MI
);
694 DeadDefs
.push_back(MI
);
698 // Eliminate dead code after remat. Note that some snippet copies may be
700 if (DeadDefs
.empty())
702 DEBUG(dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n");
703 Edit
->eliminateDeadDefs(DeadDefs
, LIS
, VRM
, TII
);
705 // Get rid of deleted and empty intervals.
706 for (unsigned i
= RegsToSpill
.size(); i
!= 0; --i
) {
707 unsigned Reg
= RegsToSpill
[i
-1];
708 if (!LIS
.hasInterval(Reg
)) {
709 RegsToSpill
.erase(RegsToSpill
.begin() + (i
- 1));
712 LiveInterval
&LI
= LIS
.getInterval(Reg
);
715 Edit
->eraseVirtReg(Reg
, LIS
);
716 RegsToSpill
.erase(RegsToSpill
.begin() + (i
- 1));
718 DEBUG(dbgs() << RegsToSpill
.size() << " registers to spill after remat.\n");
722 //===----------------------------------------------------------------------===//
724 //===----------------------------------------------------------------------===//
726 /// If MI is a load or store of StackSlot, it can be removed.
727 bool InlineSpiller::coalesceStackAccess(MachineInstr
*MI
, unsigned Reg
) {
730 if (!(InstrReg
= TII
.isLoadFromStackSlot(MI
, FI
)) &&
731 !(InstrReg
= TII
.isStoreToStackSlot(MI
, FI
)))
734 // We have a stack access. Is it the right register and slot?
735 if (InstrReg
!= Reg
|| FI
!= StackSlot
)
738 DEBUG(dbgs() << "Coalescing stack access: " << *MI
);
739 LIS
.RemoveMachineInstrFromMaps(MI
);
740 MI
->eraseFromParent();
744 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
745 /// @param MI Instruction using or defining the current register.
746 /// @param Ops Operand indices from readsWritesVirtualRegister().
747 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
748 /// @return True on success, and MI will be erased.
749 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI
,
750 const SmallVectorImpl
<unsigned> &Ops
,
751 MachineInstr
*LoadMI
) {
752 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
754 SmallVector
<unsigned, 8> FoldOps
;
755 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
756 unsigned Idx
= Ops
[i
];
757 MachineOperand
&MO
= MI
->getOperand(Idx
);
760 // FIXME: Teach targets to deal with subregs.
763 // We cannot fold a load instruction into a def.
764 if (LoadMI
&& MO
.isDef())
766 // Tied use operands should not be passed to foldMemoryOperand.
767 if (!MI
->isRegTiedToDefOperand(Idx
))
768 FoldOps
.push_back(Idx
);
771 MachineInstr
*FoldMI
=
772 LoadMI
? TII
.foldMemoryOperand(MI
, FoldOps
, LoadMI
)
773 : TII
.foldMemoryOperand(MI
, FoldOps
, StackSlot
);
776 LIS
.ReplaceMachineInstrInMaps(MI
, FoldMI
);
778 VRM
.addSpillSlotUse(StackSlot
, FoldMI
);
779 MI
->eraseFromParent();
780 DEBUG(dbgs() << "\tfolded: " << *FoldMI
);
784 /// insertReload - Insert a reload of NewLI.reg before MI.
785 void InlineSpiller::insertReload(LiveInterval
&NewLI
,
787 MachineBasicBlock::iterator MI
) {
788 MachineBasicBlock
&MBB
= *MI
->getParent();
789 TII
.loadRegFromStackSlot(MBB
, MI
, NewLI
.reg
, StackSlot
,
790 MRI
.getRegClass(NewLI
.reg
), &TRI
);
791 --MI
; // Point to load instruction.
792 SlotIndex LoadIdx
= LIS
.InsertMachineInstrInMaps(MI
).getDefIndex();
793 VRM
.addSpillSlotUse(StackSlot
, MI
);
794 DEBUG(dbgs() << "\treload: " << LoadIdx
<< '\t' << *MI
);
795 VNInfo
*LoadVNI
= NewLI
.getNextValue(LoadIdx
, 0,
796 LIS
.getVNInfoAllocator());
797 NewLI
.addRange(LiveRange(LoadIdx
, Idx
, LoadVNI
));
800 /// insertSpill - Insert a spill of NewLI.reg after MI.
801 void InlineSpiller::insertSpill(LiveInterval
&NewLI
, const LiveInterval
&OldLI
,
802 SlotIndex Idx
, MachineBasicBlock::iterator MI
) {
803 MachineBasicBlock
&MBB
= *MI
->getParent();
804 TII
.storeRegToStackSlot(MBB
, ++MI
, NewLI
.reg
, true, StackSlot
,
805 MRI
.getRegClass(NewLI
.reg
), &TRI
);
806 --MI
; // Point to store instruction.
807 SlotIndex StoreIdx
= LIS
.InsertMachineInstrInMaps(MI
).getDefIndex();
808 VRM
.addSpillSlotUse(StackSlot
, MI
);
809 DEBUG(dbgs() << "\tspilled: " << StoreIdx
<< '\t' << *MI
);
810 VNInfo
*StoreVNI
= NewLI
.getNextValue(Idx
, 0, LIS
.getVNInfoAllocator());
811 NewLI
.addRange(LiveRange(Idx
, StoreIdx
, StoreVNI
));
814 /// spillAroundUses - insert spill code around each use of Reg.
815 void InlineSpiller::spillAroundUses(unsigned Reg
) {
816 LiveInterval
&OldLI
= LIS
.getInterval(Reg
);
818 // Iterate over instructions using Reg.
819 for (MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(Reg
);
820 MachineInstr
*MI
= RI
.skipInstruction();) {
822 // Debug values are not allowed to affect codegen.
823 if (MI
->isDebugValue()) {
824 // Modify DBG_VALUE now that the value is in a spill slot.
825 uint64_t Offset
= MI
->getOperand(1).getImm();
826 const MDNode
*MDPtr
= MI
->getOperand(2).getMetadata();
827 DebugLoc DL
= MI
->getDebugLoc();
828 if (MachineInstr
*NewDV
= TII
.emitFrameIndexDebugValue(MF
, StackSlot
,
829 Offset
, MDPtr
, DL
)) {
830 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI
);
831 MachineBasicBlock
*MBB
= MI
->getParent();
832 MBB
->insert(MBB
->erase(MI
), NewDV
);
834 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI
);
835 MI
->eraseFromParent();
840 // Ignore copies to/from snippets. We'll delete them.
841 if (SnippetCopies
.count(MI
))
844 // Stack slot accesses may coalesce away.
845 if (coalesceStackAccess(MI
, Reg
))
848 // Analyze instruction.
850 SmallVector
<unsigned, 8> Ops
;
851 tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(Reg
, &Ops
);
853 // Find the slot index where this instruction reads and writes OldLI.
854 // This is usually the def slot, except for tied early clobbers.
855 SlotIndex Idx
= LIS
.getInstructionIndex(MI
).getDefIndex();
856 if (VNInfo
*VNI
= OldLI
.getVNInfoAt(Idx
.getUseIndex()))
857 if (SlotIndex::isSameInstr(Idx
, VNI
->def
))
860 // Check for a sibling copy.
861 unsigned SibReg
= isFullCopyOf(MI
, Reg
);
862 if (SibReg
&& isSibling(SibReg
)) {
864 // Hoist the spill of a sib-reg copy.
865 if (hoistSpill(OldLI
, MI
)) {
866 // This COPY is now dead, the value is already in the stack slot.
867 MI
->getOperand(0).setIsDead();
868 DeadDefs
.push_back(MI
);
872 // This is a reload for a sib-reg copy. Drop spills downstream.
873 LiveInterval
&SibLI
= LIS
.getInterval(SibReg
);
874 eliminateRedundantSpills(SibLI
, SibLI
.getVNInfoAt(Idx
));
875 // The COPY will fold to a reload below.
879 // Attempt to fold memory ops.
880 if (foldMemoryOperand(MI
, Ops
))
883 // Allocate interval around instruction.
884 // FIXME: Infer regclass from instruction alone.
885 LiveInterval
&NewLI
= Edit
->createFrom(Reg
, LIS
, VRM
);
886 NewLI
.markNotSpillable();
889 insertReload(NewLI
, Idx
, MI
);
891 // Rewrite instruction operands.
892 bool hasLiveDef
= false;
893 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
894 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
895 MO
.setReg(NewLI
.reg
);
897 if (!MI
->isRegTiedToDefOperand(Ops
[i
]))
904 DEBUG(dbgs() << "\trewrite: " << Idx
<< '\t' << *MI
);
906 // FIXME: Use a second vreg if instruction has no tied ops.
907 if (Writes
&& hasLiveDef
)
908 insertSpill(NewLI
, OldLI
, Idx
, MI
);
910 DEBUG(dbgs() << "\tinterval: " << NewLI
<< '\n');
914 /// spillAll - Spill all registers remaining after rematerialization.
915 void InlineSpiller::spillAll() {
916 // Update LiveStacks now that we are committed to spilling.
917 if (StackSlot
== VirtRegMap::NO_STACK_SLOT
) {
918 StackSlot
= VRM
.assignVirt2StackSlot(Original
);
919 StackInt
= &LSS
.getOrCreateInterval(StackSlot
, MRI
.getRegClass(Original
));
920 StackInt
->getNextValue(SlotIndex(), 0, LSS
.getVNInfoAllocator());
922 StackInt
= &LSS
.getInterval(StackSlot
);
924 if (Original
!= Edit
->getReg())
925 VRM
.assignVirt2StackSlot(Edit
->getReg(), StackSlot
);
927 assert(StackInt
->getNumValNums() == 1 && "Bad stack interval values");
928 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
)
929 StackInt
->MergeRangesInAsValue(LIS
.getInterval(RegsToSpill
[i
]),
930 StackInt
->getValNumInfo(0));
931 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt
<< '\n');
933 // Spill around uses of all RegsToSpill.
934 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
)
935 spillAroundUses(RegsToSpill
[i
]);
937 // Hoisted spills may cause dead code.
938 if (!DeadDefs
.empty()) {
939 DEBUG(dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n");
940 Edit
->eliminateDeadDefs(DeadDefs
, LIS
, VRM
, TII
);
943 // Finally delete the SnippetCopies.
944 for (MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(Edit
->getReg());
945 MachineInstr
*MI
= RI
.skipInstruction();) {
946 assert(SnippetCopies
.count(MI
) && "Remaining use wasn't a snippet copy");
947 // FIXME: Do this with a LiveRangeEdit callback.
948 VRM
.RemoveMachineInstrFromMaps(MI
);
949 LIS
.RemoveMachineInstrFromMaps(MI
);
950 MI
->eraseFromParent();
953 // Delete all spilled registers.
954 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
)
955 Edit
->eraseVirtReg(RegsToSpill
[i
], LIS
);
958 void InlineSpiller::spill(LiveRangeEdit
&edit
) {
960 assert(!TargetRegisterInfo::isStackSlot(edit
.getReg())
961 && "Trying to spill a stack slot.");
962 // Share a stack slot among all descendants of Original.
963 Original
= VRM
.getOriginal(edit
.getReg());
964 StackSlot
= VRM
.getStackSlot(Original
);
967 DEBUG(dbgs() << "Inline spilling "
968 << MRI
.getRegClass(edit
.getReg())->getName()
969 << ':' << edit
.getParent() << "\nFrom original "
970 << LIS
.getInterval(Original
) << '\n');
971 assert(edit
.getParent().isSpillable() &&
972 "Attempting to spill already spilled value.");
973 assert(DeadDefs
.empty() && "Previous spill didn't remove dead defs");
975 collectRegsToSpill();
976 analyzeSiblingValues();
979 // Remat may handle everything.
980 if (!RegsToSpill
.empty())
983 Edit
->calculateRegClassAndHint(MF
, LIS
, Loops
);