1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
17 #include "LiveRangeEdit.h"
18 #include "VirtRegMap.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/LiveStackAnalysis.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
35 STATISTIC(NumSpilledRanges
, "Number of spilled live ranges");
36 STATISTIC(NumSnippets
, "Number of snippets included in spills");
37 STATISTIC(NumSpills
, "Number of spills inserted");
38 STATISTIC(NumReloads
, "Number of reloads inserted");
39 STATISTIC(NumFolded
, "Number of folded stack accesses");
40 STATISTIC(NumFoldedLoads
, "Number of folded loads");
41 STATISTIC(NumRemats
, "Number of rematerialized defs for spilling");
42 STATISTIC(NumOmitReloadSpill
, "Number of omitted spills after reloads");
43 STATISTIC(NumHoistLocal
, "Number of locally hoisted spills");
44 STATISTIC(NumHoistGlobal
, "Number of globally hoisted spills");
45 STATISTIC(NumRedundantSpills
, "Number of redundant spills identified");
48 class InlineSpiller
: public Spiller
{
49 MachineFunctionPass
&Pass
;
54 MachineDominatorTree
&MDT
;
55 MachineLoopInfo
&Loops
;
57 MachineFrameInfo
&MFI
;
58 MachineRegisterInfo
&MRI
;
59 const TargetInstrInfo
&TII
;
60 const TargetRegisterInfo
&TRI
;
62 // Variables that are valid during spill(), but used by multiple methods.
64 LiveInterval
*StackInt
;
68 // All registers to spill to StackSlot, including the main register.
69 SmallVector
<unsigned, 8> RegsToSpill
;
71 // All COPY instructions to/from snippets.
72 // They are ignored since both operands refer to the same stack slot.
73 SmallPtrSet
<MachineInstr
*, 8> SnippetCopies
;
75 // Values that failed to remat at some point.
76 SmallPtrSet
<VNInfo
*, 8> UsedValues
;
78 // Information about a value that was defined by a copy from a sibling
81 // True when all reaching defs were reloads: No spill is necessary.
82 bool AllDefsAreReloads
;
84 // The preferred register to spill.
87 // The value of SpillReg that should be spilled.
90 // A defining instruction that is not a sibling copy or a reload, or NULL.
91 // This can be used as a template for rematerialization.
94 SibValueInfo(unsigned Reg
, VNInfo
*VNI
)
95 : AllDefsAreReloads(false), SpillReg(Reg
), SpillVNI(VNI
), DefMI(0) {}
98 // Values in RegsToSpill defined by sibling copies.
99 typedef DenseMap
<VNInfo
*, SibValueInfo
> SibValueMap
;
100 SibValueMap SibValues
;
102 // Dead defs generated during spilling.
103 SmallVector
<MachineInstr
*, 8> DeadDefs
;
108 InlineSpiller(MachineFunctionPass
&pass
,
113 LIS(pass
.getAnalysis
<LiveIntervals
>()),
114 LSS(pass
.getAnalysis
<LiveStacks
>()),
115 AA(&pass
.getAnalysis
<AliasAnalysis
>()),
116 MDT(pass
.getAnalysis
<MachineDominatorTree
>()),
117 Loops(pass
.getAnalysis
<MachineLoopInfo
>()),
119 MFI(*mf
.getFrameInfo()),
120 MRI(mf
.getRegInfo()),
121 TII(*mf
.getTarget().getInstrInfo()),
122 TRI(*mf
.getTarget().getRegisterInfo()) {}
124 void spill(LiveRangeEdit
&);
127 bool isSnippet(const LiveInterval
&SnipLI
);
128 void collectRegsToSpill();
130 bool isRegToSpill(unsigned Reg
) {
131 return std::find(RegsToSpill
.begin(),
132 RegsToSpill
.end(), Reg
) != RegsToSpill
.end();
135 bool isSibling(unsigned Reg
);
136 MachineInstr
*traceSiblingValue(unsigned, VNInfo
*, VNInfo
*);
137 void analyzeSiblingValues();
139 bool hoistSpill(LiveInterval
&SpillLI
, MachineInstr
*CopyMI
);
140 void eliminateRedundantSpills(LiveInterval
&LI
, VNInfo
*VNI
);
142 void markValueUsed(LiveInterval
*, VNInfo
*);
143 bool reMaterializeFor(LiveInterval
&, MachineBasicBlock::iterator MI
);
144 void reMaterializeAll();
146 bool coalesceStackAccess(MachineInstr
*MI
, unsigned Reg
);
147 bool foldMemoryOperand(MachineBasicBlock::iterator MI
,
148 const SmallVectorImpl
<unsigned> &Ops
,
149 MachineInstr
*LoadMI
= 0);
150 void insertReload(LiveInterval
&NewLI
, SlotIndex
,
151 MachineBasicBlock::iterator MI
);
152 void insertSpill(LiveInterval
&NewLI
, const LiveInterval
&OldLI
,
153 SlotIndex
, MachineBasicBlock::iterator MI
);
155 void spillAroundUses(unsigned Reg
);
161 Spiller
*createInlineSpiller(MachineFunctionPass
&pass
,
164 return new InlineSpiller(pass
, mf
, vrm
);
168 //===----------------------------------------------------------------------===//
170 //===----------------------------------------------------------------------===//
172 // When spilling a virtual register, we also spill any snippets it is connected
173 // to. The snippets are small live ranges that only have a single real use,
174 // leftovers from live range splitting. Spilling them enables memory operand
175 // folding or tightens the live range around the single use.
177 // This minimizes register pressure and maximizes the store-to-load distance for
178 // spill slots which can be important in tight loops.
180 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
181 /// otherwise return 0.
182 static unsigned isFullCopyOf(const MachineInstr
*MI
, unsigned Reg
) {
183 if (!MI
->isFullCopy())
185 if (MI
->getOperand(0).getReg() == Reg
)
186 return MI
->getOperand(1).getReg();
187 if (MI
->getOperand(1).getReg() == Reg
)
188 return MI
->getOperand(0).getReg();
192 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
193 /// It is assumed that SnipLI is a virtual register with the same original as
195 bool InlineSpiller::isSnippet(const LiveInterval
&SnipLI
) {
196 unsigned Reg
= Edit
->getReg();
198 // A snippet is a tiny live range with only a single instruction using it
199 // besides copies to/from Reg or spills/fills. We accept:
201 // %snip = COPY %Reg / FILL fi#
203 // %Reg = COPY %snip / SPILL %snip, fi#
205 if (SnipLI
.getNumValNums() > 2 || !LIS
.intervalIsInOneMBB(SnipLI
))
208 MachineInstr
*UseMI
= 0;
210 // Check that all uses satisfy our criteria.
211 for (MachineRegisterInfo::reg_nodbg_iterator
212 RI
= MRI
.reg_nodbg_begin(SnipLI
.reg
);
213 MachineInstr
*MI
= RI
.skipInstruction();) {
215 // Allow copies to/from Reg.
216 if (isFullCopyOf(MI
, Reg
))
219 // Allow stack slot loads.
221 if (SnipLI
.reg
== TII
.isLoadFromStackSlot(MI
, FI
) && FI
== StackSlot
)
224 // Allow stack slot stores.
225 if (SnipLI
.reg
== TII
.isStoreToStackSlot(MI
, FI
) && FI
== StackSlot
)
228 // Allow a single additional instruction.
229 if (UseMI
&& MI
!= UseMI
)
236 /// collectRegsToSpill - Collect live range snippets that only have a single
238 void InlineSpiller::collectRegsToSpill() {
239 unsigned Reg
= Edit
->getReg();
241 // Main register always spills.
242 RegsToSpill
.assign(1, Reg
);
243 SnippetCopies
.clear();
245 // Snippets all have the same original, so there can't be any for an original
250 for (MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(Reg
);
251 MachineInstr
*MI
= RI
.skipInstruction();) {
252 unsigned SnipReg
= isFullCopyOf(MI
, Reg
);
253 if (!isSibling(SnipReg
))
255 LiveInterval
&SnipLI
= LIS
.getInterval(SnipReg
);
256 if (!isSnippet(SnipLI
))
258 SnippetCopies
.insert(MI
);
259 if (isRegToSpill(SnipReg
))
261 RegsToSpill
.push_back(SnipReg
);
262 DEBUG(dbgs() << "\talso spill snippet " << SnipLI
<< '\n');
268 //===----------------------------------------------------------------------===//
270 //===----------------------------------------------------------------------===//
272 // After live range splitting, some values to be spilled may be defined by
273 // copies from sibling registers. We trace the sibling copies back to the
274 // original value if it still exists. We need it for rematerialization.
276 // Even when the value can't be rematerialized, we still want to determine if
277 // the value has already been spilled, or we may want to hoist the spill from a
280 bool InlineSpiller::isSibling(unsigned Reg
) {
281 return TargetRegisterInfo::isVirtualRegister(Reg
) &&
282 VRM
.getOriginal(Reg
) == Original
;
285 /// traceSiblingValue - Trace a value that is about to be spilled back to the
286 /// real defining instructions by looking through sibling copies. Always stay
287 /// within the range of OrigVNI so the registers are known to carry the same
290 /// Determine if the value is defined by all reloads, so spilling isn't
291 /// necessary - the value is already in the stack slot.
293 /// Return a defining instruction that may be a candidate for rematerialization.
295 MachineInstr
*InlineSpiller::traceSiblingValue(unsigned UseReg
, VNInfo
*UseVNI
,
297 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg
) << ':'
298 << UseVNI
->id
<< '@' << UseVNI
->def
<< '\n');
299 SmallPtrSet
<VNInfo
*, 8> Visited
;
300 SmallVector
<std::pair
<unsigned, VNInfo
*>, 8> WorkList
;
301 WorkList
.push_back(std::make_pair(UseReg
, UseVNI
));
303 // Best spill candidate seen so far. This must dominate UseVNI.
304 SibValueInfo
SVI(UseReg
, UseVNI
);
305 MachineBasicBlock
*UseMBB
= LIS
.getMBBFromIndex(UseVNI
->def
);
306 MachineBasicBlock
*SpillMBB
= UseMBB
;
307 unsigned SpillDepth
= Loops
.getLoopDepth(SpillMBB
);
308 bool SeenOrigPHI
= false; // Original PHI met.
313 tie(Reg
, VNI
) = WorkList
.pop_back_val();
314 if (!Visited
.insert(VNI
))
317 // Is this value a better spill candidate?
318 if (!isRegToSpill(Reg
)) {
319 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
320 if (MBB
== SpillMBB
) {
321 // This is an alternative def earlier in the same MBB.
322 // Hoist the spill as far as possible in SpillMBB. This can ease
323 // register pressure:
329 // Hoisting the spill of s to immediately after the def removes the
330 // interference between x and y:
336 if (VNI
->def
< SVI
.SpillVNI
->def
) {
337 DEBUG(dbgs() << " hoist in BB#" << MBB
->getNumber() << ": "
338 << PrintReg(Reg
) << ':' << VNI
->id
<< '@' << VNI
->def
343 } else if (MBB
!= UseMBB
&& MDT
.dominates(MBB
, UseMBB
)) {
344 // This is a valid spill location dominating UseVNI.
345 // Prefer to spill at a smaller loop depth.
346 unsigned Depth
= Loops
.getLoopDepth(MBB
);
347 if (Depth
< SpillDepth
) {
348 DEBUG(dbgs() << " spill depth " << Depth
<< ": " << PrintReg(Reg
)
349 << ':' << VNI
->id
<< '@' << VNI
->def
<< '\n');
358 // Trace through PHI-defs created by live range splitting.
359 if (VNI
->isPHIDef()) {
360 if (VNI
->def
== OrigVNI
->def
) {
361 DEBUG(dbgs() << " orig phi value " << PrintReg(Reg
) << ':'
362 << VNI
->id
<< '@' << VNI
->def
<< '\n');
366 // Get values live-out of predecessors.
367 LiveInterval
&LI
= LIS
.getInterval(Reg
);
368 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
369 for (MachineBasicBlock::pred_iterator PI
= MBB
->pred_begin(),
370 PE
= MBB
->pred_end(); PI
!= PE
; ++PI
) {
371 VNInfo
*PVNI
= LI
.getVNInfoAt(LIS
.getMBBEndIdx(*PI
).getPrevSlot());
373 WorkList
.push_back(std::make_pair(Reg
, PVNI
));
378 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
379 assert(MI
&& "Missing def");
381 // Trace through sibling copies.
382 if (unsigned SrcReg
= isFullCopyOf(MI
, Reg
)) {
383 if (isSibling(SrcReg
)) {
384 LiveInterval
&SrcLI
= LIS
.getInterval(SrcReg
);
385 VNInfo
*SrcVNI
= SrcLI
.getVNInfoAt(VNI
->def
.getUseIndex());
386 assert(SrcVNI
&& "Copy from non-existing value");
387 DEBUG(dbgs() << " copy of " << PrintReg(SrcReg
) << ':'
388 << SrcVNI
->id
<< '@' << SrcVNI
->def
<< '\n');
389 WorkList
.push_back(std::make_pair(SrcReg
, SrcVNI
));
394 // Track reachable reloads.
396 if (Reg
== TII
.isLoadFromStackSlot(MI
, FI
) && FI
== StackSlot
) {
397 DEBUG(dbgs() << " reload " << PrintReg(Reg
) << ':'
398 << VNI
->id
<< "@" << VNI
->def
<< '\n');
399 SVI
.AllDefsAreReloads
= true;
403 // We have an 'original' def. Don't record trivial cases.
405 DEBUG(dbgs() << "Not a sibling copy.\n");
409 // Potential remat candidate.
410 DEBUG(dbgs() << " def " << PrintReg(Reg
) << ':'
411 << VNI
->id
<< '@' << VNI
->def
<< '\t' << *MI
);
413 } while (!WorkList
.empty());
415 if (SeenOrigPHI
|| SVI
.DefMI
)
416 SVI
.AllDefsAreReloads
= false;
419 if (SVI
.AllDefsAreReloads
)
420 dbgs() << "All defs are reloads.\n";
422 dbgs() << "Prefer to spill " << PrintReg(SVI
.SpillReg
) << ':'
423 << SVI
.SpillVNI
->id
<< '@' << SVI
.SpillVNI
->def
<< '\n';
425 SibValues
.insert(std::make_pair(UseVNI
, SVI
));
429 /// analyzeSiblingValues - Trace values defined by sibling copies back to
430 /// something that isn't a sibling copy.
432 /// Keep track of values that may be rematerializable.
433 void InlineSpiller::analyzeSiblingValues() {
436 // No siblings at all?
437 if (Edit
->getReg() == Original
)
440 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
441 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
442 unsigned Reg
= RegsToSpill
[i
];
443 LiveInterval
&LI
= LIS
.getInterval(Reg
);
444 for (LiveInterval::const_vni_iterator VI
= LI
.vni_begin(),
445 VE
= LI
.vni_end(); VI
!= VE
; ++VI
) {
449 MachineInstr
*DefMI
= 0;
450 // Check possible sibling copies.
451 if (VNI
->isPHIDef() || VNI
->getCopy()) {
452 VNInfo
*OrigVNI
= OrigLI
.getVNInfoAt(VNI
->def
);
453 assert(OrigVNI
&& "Def outside original live range");
454 if (OrigVNI
->def
!= VNI
->def
)
455 DefMI
= traceSiblingValue(Reg
, VNI
, OrigVNI
);
457 if (!DefMI
&& !VNI
->isPHIDef())
458 DefMI
= LIS
.getInstructionFromIndex(VNI
->def
);
459 if (DefMI
&& Edit
->checkRematerializable(VNI
, DefMI
, TII
, AA
)) {
460 DEBUG(dbgs() << "Value " << PrintReg(Reg
) << ':' << VNI
->id
<< '@'
461 << VNI
->def
<< " may remat from " << *DefMI
);
467 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert
468 /// a spill at a better location.
469 bool InlineSpiller::hoistSpill(LiveInterval
&SpillLI
, MachineInstr
*CopyMI
) {
470 SlotIndex Idx
= LIS
.getInstructionIndex(CopyMI
);
471 VNInfo
*VNI
= SpillLI
.getVNInfoAt(Idx
.getDefIndex());
472 assert(VNI
&& VNI
->def
== Idx
.getDefIndex() && "Not defined by copy");
473 SibValueMap::iterator I
= SibValues
.find(VNI
);
474 if (I
== SibValues
.end())
477 const SibValueInfo
&SVI
= I
->second
;
479 // Let the normal folding code deal with the boring case.
480 if (!SVI
.AllDefsAreReloads
&& SVI
.SpillVNI
== VNI
)
483 // SpillReg may have been deleted by remat and DCE.
484 if (!LIS
.hasInterval(SVI
.SpillReg
)) {
485 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI
.SpillReg
) << '\n');
490 LiveInterval
&SibLI
= LIS
.getInterval(SVI
.SpillReg
);
491 if (!SibLI
.containsValue(SVI
.SpillVNI
)) {
492 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI
.SpillReg
) << '\n');
497 // Conservatively extend the stack slot range to the range of the original
498 // value. We may be able to do better with stack slot coloring by being more
500 assert(StackInt
&& "No stack slot assigned yet.");
501 LiveInterval
&OrigLI
= LIS
.getInterval(Original
);
502 VNInfo
*OrigVNI
= OrigLI
.getVNInfoAt(Idx
);
503 StackInt
->MergeValueInAsValue(OrigLI
, OrigVNI
, StackInt
->getValNumInfo(0));
504 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI
->id
<< ": "
505 << *StackInt
<< '\n');
507 // Already spilled everywhere.
508 if (SVI
.AllDefsAreReloads
) {
509 ++NumOmitReloadSpill
;
512 // We are going to spill SVI.SpillVNI immediately after its def, so clear out
513 // any later spills of the same value.
514 eliminateRedundantSpills(SibLI
, SVI
.SpillVNI
);
516 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(SVI
.SpillVNI
->def
);
517 MachineBasicBlock::iterator MII
;
518 if (SVI
.SpillVNI
->isPHIDef())
519 MII
= MBB
->SkipPHIsAndLabels(MBB
->begin());
521 MachineInstr
*DefMI
= LIS
.getInstructionFromIndex(SVI
.SpillVNI
->def
);
522 assert(DefMI
&& "Defining instruction disappeared");
526 // Insert spill without kill flag immediately after def.
527 TII
.storeRegToStackSlot(*MBB
, MII
, SVI
.SpillReg
, false, StackSlot
,
528 MRI
.getRegClass(SVI
.SpillReg
), &TRI
);
529 --MII
; // Point to store instruction.
530 LIS
.InsertMachineInstrInMaps(MII
);
531 VRM
.addSpillSlotUse(StackSlot
, MII
);
532 DEBUG(dbgs() << "\thoisted: " << SVI
.SpillVNI
->def
<< '\t' << *MII
);
534 if (MBB
== CopyMI
->getParent())
541 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
542 /// redundant spills of this value in SLI.reg and sibling copies.
543 void InlineSpiller::eliminateRedundantSpills(LiveInterval
&SLI
, VNInfo
*VNI
) {
544 assert(VNI
&& "Missing value");
545 SmallVector
<std::pair
<LiveInterval
*, VNInfo
*>, 8> WorkList
;
546 WorkList
.push_back(std::make_pair(&SLI
, VNI
));
547 assert(StackInt
&& "No stack slot assigned yet.");
551 tie(LI
, VNI
) = WorkList
.pop_back_val();
552 unsigned Reg
= LI
->reg
;
553 DEBUG(dbgs() << "Checking redundant spills for "
554 << VNI
->id
<< '@' << VNI
->def
<< " in " << *LI
<< '\n');
556 // Regs to spill are taken care of.
557 if (isRegToSpill(Reg
))
560 // Add all of VNI's live range to StackInt.
561 StackInt
->MergeValueInAsValue(*LI
, VNI
, StackInt
->getValNumInfo(0));
562 DEBUG(dbgs() << "Merged to stack int: " << *StackInt
<< '\n');
564 // Find all spills and copies of VNI.
565 for (MachineRegisterInfo::use_nodbg_iterator UI
= MRI
.use_nodbg_begin(Reg
);
566 MachineInstr
*MI
= UI
.skipInstruction();) {
567 if (!MI
->isCopy() && !MI
->getDesc().mayStore())
569 SlotIndex Idx
= LIS
.getInstructionIndex(MI
);
570 if (LI
->getVNInfoAt(Idx
) != VNI
)
573 // Follow sibling copies down the dominator tree.
574 if (unsigned DstReg
= isFullCopyOf(MI
, Reg
)) {
575 if (isSibling(DstReg
)) {
576 LiveInterval
&DstLI
= LIS
.getInterval(DstReg
);
577 VNInfo
*DstVNI
= DstLI
.getVNInfoAt(Idx
.getDefIndex());
578 assert(DstVNI
&& "Missing defined value");
579 assert(DstVNI
->def
== Idx
.getDefIndex() && "Wrong copy def slot");
580 WorkList
.push_back(std::make_pair(&DstLI
, DstVNI
));
587 if (Reg
== TII
.isStoreToStackSlot(MI
, FI
) && FI
== StackSlot
) {
588 DEBUG(dbgs() << "Redundant spill " << Idx
<< '\t' << *MI
);
589 // eliminateDeadDefs won't normally remove stores, so switch opcode.
590 MI
->setDesc(TII
.get(TargetOpcode::KILL
));
591 DeadDefs
.push_back(MI
);
592 ++NumRedundantSpills
;
595 } while (!WorkList
.empty());
599 //===----------------------------------------------------------------------===//
601 //===----------------------------------------------------------------------===//
603 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
604 /// instruction cannot be eliminated. See through snippet copies
605 void InlineSpiller::markValueUsed(LiveInterval
*LI
, VNInfo
*VNI
) {
606 SmallVector
<std::pair
<LiveInterval
*, VNInfo
*>, 8> WorkList
;
607 WorkList
.push_back(std::make_pair(LI
, VNI
));
609 tie(LI
, VNI
) = WorkList
.pop_back_val();
610 if (!UsedValues
.insert(VNI
))
613 if (VNI
->isPHIDef()) {
614 MachineBasicBlock
*MBB
= LIS
.getMBBFromIndex(VNI
->def
);
615 for (MachineBasicBlock::pred_iterator PI
= MBB
->pred_begin(),
616 PE
= MBB
->pred_end(); PI
!= PE
; ++PI
) {
617 VNInfo
*PVNI
= LI
->getVNInfoAt(LIS
.getMBBEndIdx(*PI
).getPrevSlot());
619 WorkList
.push_back(std::make_pair(LI
, PVNI
));
624 // Follow snippet copies.
625 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
626 if (!SnippetCopies
.count(MI
))
628 LiveInterval
&SnipLI
= LIS
.getInterval(MI
->getOperand(1).getReg());
629 assert(isRegToSpill(SnipLI
.reg
) && "Unexpected register in copy");
630 VNInfo
*SnipVNI
= SnipLI
.getVNInfoAt(VNI
->def
.getUseIndex());
631 assert(SnipVNI
&& "Snippet undefined before copy");
632 WorkList
.push_back(std::make_pair(&SnipLI
, SnipVNI
));
633 } while (!WorkList
.empty());
636 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
637 bool InlineSpiller::reMaterializeFor(LiveInterval
&VirtReg
,
638 MachineBasicBlock::iterator MI
) {
639 SlotIndex UseIdx
= LIS
.getInstructionIndex(MI
).getUseIndex();
640 VNInfo
*ParentVNI
= VirtReg
.getVNInfoAt(UseIdx
);
643 DEBUG(dbgs() << "\tadding <undef> flags: ");
644 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
645 MachineOperand
&MO
= MI
->getOperand(i
);
646 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == VirtReg
.reg
)
649 DEBUG(dbgs() << UseIdx
<< '\t' << *MI
);
653 if (SnippetCopies
.count(MI
))
656 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
657 LiveRangeEdit::Remat
RM(ParentVNI
);
658 SibValueMap::const_iterator SibI
= SibValues
.find(ParentVNI
);
659 if (SibI
!= SibValues
.end())
660 RM
.OrigMI
= SibI
->second
.DefMI
;
661 if (!Edit
->canRematerializeAt(RM
, UseIdx
, false, LIS
)) {
662 markValueUsed(&VirtReg
, ParentVNI
);
663 DEBUG(dbgs() << "\tcannot remat for " << UseIdx
<< '\t' << *MI
);
667 // If the instruction also writes VirtReg.reg, it had better not require the
668 // same register for uses and defs.
670 SmallVector
<unsigned, 8> Ops
;
671 tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(VirtReg
.reg
, &Ops
);
673 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
674 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
675 if (MO
.isUse() ? MI
->isRegTiedToDefOperand(Ops
[i
]) : MO
.getSubReg()) {
676 markValueUsed(&VirtReg
, ParentVNI
);
677 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx
<< '\t' << *MI
);
683 // Before rematerializing into a register for a single instruction, try to
684 // fold a load into the instruction. That avoids allocating a new register.
685 if (RM
.OrigMI
->getDesc().canFoldAsLoad() &&
686 foldMemoryOperand(MI
, Ops
, RM
.OrigMI
)) {
687 Edit
->markRematerialized(RM
.ParentVNI
);
692 // Alocate a new register for the remat.
693 LiveInterval
&NewLI
= Edit
->createFrom(Original
, LIS
, VRM
);
694 NewLI
.markNotSpillable();
696 // Finally we can rematerialize OrigMI before MI.
697 SlotIndex DefIdx
= Edit
->rematerializeAt(*MI
->getParent(), MI
, NewLI
.reg
, RM
,
699 DEBUG(dbgs() << "\tremat: " << DefIdx
<< '\t'
700 << *LIS
.getInstructionFromIndex(DefIdx
));
703 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
704 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
705 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == VirtReg
.reg
) {
706 MO
.setReg(NewLI
.reg
);
710 DEBUG(dbgs() << "\t " << UseIdx
<< '\t' << *MI
);
712 VNInfo
*DefVNI
= NewLI
.getNextValue(DefIdx
, 0, LIS
.getVNInfoAllocator());
713 NewLI
.addRange(LiveRange(DefIdx
, UseIdx
.getDefIndex(), DefVNI
));
714 DEBUG(dbgs() << "\tinterval: " << NewLI
<< '\n');
719 /// reMaterializeAll - Try to rematerialize as many uses as possible,
720 /// and trim the live ranges after.
721 void InlineSpiller::reMaterializeAll() {
722 // analyzeSiblingValues has already tested all relevant defining instructions.
723 if (!Edit
->anyRematerializable(LIS
, TII
, AA
))
728 // Try to remat before all uses of snippets.
729 bool anyRemat
= false;
730 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
731 unsigned Reg
= RegsToSpill
[i
];
732 LiveInterval
&LI
= LIS
.getInterval(Reg
);
733 for (MachineRegisterInfo::use_nodbg_iterator
734 RI
= MRI
.use_nodbg_begin(Reg
);
735 MachineInstr
*MI
= RI
.skipInstruction();)
736 anyRemat
|= reMaterializeFor(LI
, MI
);
741 // Remove any values that were completely rematted.
742 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
743 unsigned Reg
= RegsToSpill
[i
];
744 LiveInterval
&LI
= LIS
.getInterval(Reg
);
745 for (LiveInterval::vni_iterator I
= LI
.vni_begin(), E
= LI
.vni_end();
748 if (VNI
->isUnused() || VNI
->isPHIDef() || UsedValues
.count(VNI
))
750 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
751 MI
->addRegisterDead(Reg
, &TRI
);
752 if (!MI
->allDefsAreDead())
754 DEBUG(dbgs() << "All defs dead: " << *MI
);
755 DeadDefs
.push_back(MI
);
759 // Eliminate dead code after remat. Note that some snippet copies may be
761 if (DeadDefs
.empty())
763 DEBUG(dbgs() << "Remat created " << DeadDefs
.size() << " dead defs.\n");
764 Edit
->eliminateDeadDefs(DeadDefs
, LIS
, VRM
, TII
);
766 // Get rid of deleted and empty intervals.
767 for (unsigned i
= RegsToSpill
.size(); i
!= 0; --i
) {
768 unsigned Reg
= RegsToSpill
[i
-1];
769 if (!LIS
.hasInterval(Reg
)) {
770 RegsToSpill
.erase(RegsToSpill
.begin() + (i
- 1));
773 LiveInterval
&LI
= LIS
.getInterval(Reg
);
776 Edit
->eraseVirtReg(Reg
, LIS
);
777 RegsToSpill
.erase(RegsToSpill
.begin() + (i
- 1));
779 DEBUG(dbgs() << RegsToSpill
.size() << " registers to spill after remat.\n");
783 //===----------------------------------------------------------------------===//
785 //===----------------------------------------------------------------------===//
787 /// If MI is a load or store of StackSlot, it can be removed.
788 bool InlineSpiller::coalesceStackAccess(MachineInstr
*MI
, unsigned Reg
) {
791 if (!(InstrReg
= TII
.isLoadFromStackSlot(MI
, FI
)) &&
792 !(InstrReg
= TII
.isStoreToStackSlot(MI
, FI
)))
795 // We have a stack access. Is it the right register and slot?
796 if (InstrReg
!= Reg
|| FI
!= StackSlot
)
799 DEBUG(dbgs() << "Coalescing stack access: " << *MI
);
800 LIS
.RemoveMachineInstrFromMaps(MI
);
801 MI
->eraseFromParent();
805 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
806 /// @param MI Instruction using or defining the current register.
807 /// @param Ops Operand indices from readsWritesVirtualRegister().
808 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
809 /// @return True on success, and MI will be erased.
810 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI
,
811 const SmallVectorImpl
<unsigned> &Ops
,
812 MachineInstr
*LoadMI
) {
813 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
815 SmallVector
<unsigned, 8> FoldOps
;
816 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
817 unsigned Idx
= Ops
[i
];
818 MachineOperand
&MO
= MI
->getOperand(Idx
);
821 // FIXME: Teach targets to deal with subregs.
824 // We cannot fold a load instruction into a def.
825 if (LoadMI
&& MO
.isDef())
827 // Tied use operands should not be passed to foldMemoryOperand.
828 if (!MI
->isRegTiedToDefOperand(Idx
))
829 FoldOps
.push_back(Idx
);
832 MachineInstr
*FoldMI
=
833 LoadMI
? TII
.foldMemoryOperand(MI
, FoldOps
, LoadMI
)
834 : TII
.foldMemoryOperand(MI
, FoldOps
, StackSlot
);
837 LIS
.ReplaceMachineInstrInMaps(MI
, FoldMI
);
839 VRM
.addSpillSlotUse(StackSlot
, FoldMI
);
840 MI
->eraseFromParent();
841 DEBUG(dbgs() << "\tfolded: " << *FoldMI
);
846 /// insertReload - Insert a reload of NewLI.reg before MI.
847 void InlineSpiller::insertReload(LiveInterval
&NewLI
,
849 MachineBasicBlock::iterator MI
) {
850 MachineBasicBlock
&MBB
= *MI
->getParent();
851 TII
.loadRegFromStackSlot(MBB
, MI
, NewLI
.reg
, StackSlot
,
852 MRI
.getRegClass(NewLI
.reg
), &TRI
);
853 --MI
; // Point to load instruction.
854 SlotIndex LoadIdx
= LIS
.InsertMachineInstrInMaps(MI
).getDefIndex();
855 VRM
.addSpillSlotUse(StackSlot
, MI
);
856 DEBUG(dbgs() << "\treload: " << LoadIdx
<< '\t' << *MI
);
857 VNInfo
*LoadVNI
= NewLI
.getNextValue(LoadIdx
, 0,
858 LIS
.getVNInfoAllocator());
859 NewLI
.addRange(LiveRange(LoadIdx
, Idx
, LoadVNI
));
863 /// insertSpill - Insert a spill of NewLI.reg after MI.
864 void InlineSpiller::insertSpill(LiveInterval
&NewLI
, const LiveInterval
&OldLI
,
865 SlotIndex Idx
, MachineBasicBlock::iterator MI
) {
866 MachineBasicBlock
&MBB
= *MI
->getParent();
867 TII
.storeRegToStackSlot(MBB
, ++MI
, NewLI
.reg
, true, StackSlot
,
868 MRI
.getRegClass(NewLI
.reg
), &TRI
);
869 --MI
; // Point to store instruction.
870 SlotIndex StoreIdx
= LIS
.InsertMachineInstrInMaps(MI
).getDefIndex();
871 VRM
.addSpillSlotUse(StackSlot
, MI
);
872 DEBUG(dbgs() << "\tspilled: " << StoreIdx
<< '\t' << *MI
);
873 VNInfo
*StoreVNI
= NewLI
.getNextValue(Idx
, 0, LIS
.getVNInfoAllocator());
874 NewLI
.addRange(LiveRange(Idx
, StoreIdx
, StoreVNI
));
878 /// spillAroundUses - insert spill code around each use of Reg.
879 void InlineSpiller::spillAroundUses(unsigned Reg
) {
880 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg
) << '\n');
881 LiveInterval
&OldLI
= LIS
.getInterval(Reg
);
883 // Iterate over instructions using Reg.
884 for (MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(Reg
);
885 MachineInstr
*MI
= RI
.skipInstruction();) {
887 // Debug values are not allowed to affect codegen.
888 if (MI
->isDebugValue()) {
889 // Modify DBG_VALUE now that the value is in a spill slot.
890 uint64_t Offset
= MI
->getOperand(1).getImm();
891 const MDNode
*MDPtr
= MI
->getOperand(2).getMetadata();
892 DebugLoc DL
= MI
->getDebugLoc();
893 if (MachineInstr
*NewDV
= TII
.emitFrameIndexDebugValue(MF
, StackSlot
,
894 Offset
, MDPtr
, DL
)) {
895 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI
);
896 MachineBasicBlock
*MBB
= MI
->getParent();
897 MBB
->insert(MBB
->erase(MI
), NewDV
);
899 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI
);
900 MI
->eraseFromParent();
905 // Ignore copies to/from snippets. We'll delete them.
906 if (SnippetCopies
.count(MI
))
909 // Stack slot accesses may coalesce away.
910 if (coalesceStackAccess(MI
, Reg
))
913 // Analyze instruction.
915 SmallVector
<unsigned, 8> Ops
;
916 tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(Reg
, &Ops
);
918 // Find the slot index where this instruction reads and writes OldLI.
919 // This is usually the def slot, except for tied early clobbers.
920 SlotIndex Idx
= LIS
.getInstructionIndex(MI
).getDefIndex();
921 if (VNInfo
*VNI
= OldLI
.getVNInfoAt(Idx
.getUseIndex()))
922 if (SlotIndex::isSameInstr(Idx
, VNI
->def
))
925 // Check for a sibling copy.
926 unsigned SibReg
= isFullCopyOf(MI
, Reg
);
927 if (SibReg
&& isSibling(SibReg
)) {
928 // This may actually be a copy between snippets.
929 if (isRegToSpill(SibReg
)) {
930 DEBUG(dbgs() << "Found new snippet copy: " << *MI
);
931 SnippetCopies
.insert(MI
);
935 // Hoist the spill of a sib-reg copy.
936 if (hoistSpill(OldLI
, MI
)) {
937 // This COPY is now dead, the value is already in the stack slot.
938 MI
->getOperand(0).setIsDead();
939 DeadDefs
.push_back(MI
);
943 // This is a reload for a sib-reg copy. Drop spills downstream.
944 LiveInterval
&SibLI
= LIS
.getInterval(SibReg
);
945 eliminateRedundantSpills(SibLI
, SibLI
.getVNInfoAt(Idx
));
946 // The COPY will fold to a reload below.
950 // Attempt to fold memory ops.
951 if (foldMemoryOperand(MI
, Ops
))
954 // Allocate interval around instruction.
955 // FIXME: Infer regclass from instruction alone.
956 LiveInterval
&NewLI
= Edit
->createFrom(Reg
, LIS
, VRM
);
957 NewLI
.markNotSpillable();
960 insertReload(NewLI
, Idx
, MI
);
962 // Rewrite instruction operands.
963 bool hasLiveDef
= false;
964 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
965 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
966 MO
.setReg(NewLI
.reg
);
968 if (!MI
->isRegTiedToDefOperand(Ops
[i
]))
975 DEBUG(dbgs() << "\trewrite: " << Idx
<< '\t' << *MI
);
977 // FIXME: Use a second vreg if instruction has no tied ops.
978 if (Writes
&& hasLiveDef
)
979 insertSpill(NewLI
, OldLI
, Idx
, MI
);
981 DEBUG(dbgs() << "\tinterval: " << NewLI
<< '\n');
985 /// spillAll - Spill all registers remaining after rematerialization.
986 void InlineSpiller::spillAll() {
987 // Update LiveStacks now that we are committed to spilling.
988 if (StackSlot
== VirtRegMap::NO_STACK_SLOT
) {
989 StackSlot
= VRM
.assignVirt2StackSlot(Original
);
990 StackInt
= &LSS
.getOrCreateInterval(StackSlot
, MRI
.getRegClass(Original
));
991 StackInt
->getNextValue(SlotIndex(), 0, LSS
.getVNInfoAllocator());
993 StackInt
= &LSS
.getInterval(StackSlot
);
995 if (Original
!= Edit
->getReg())
996 VRM
.assignVirt2StackSlot(Edit
->getReg(), StackSlot
);
998 assert(StackInt
->getNumValNums() == 1 && "Bad stack interval values");
999 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
)
1000 StackInt
->MergeRangesInAsValue(LIS
.getInterval(RegsToSpill
[i
]),
1001 StackInt
->getValNumInfo(0));
1002 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt
<< '\n');
1004 // Spill around uses of all RegsToSpill.
1005 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
)
1006 spillAroundUses(RegsToSpill
[i
]);
1008 // Hoisted spills may cause dead code.
1009 if (!DeadDefs
.empty()) {
1010 DEBUG(dbgs() << "Eliminating " << DeadDefs
.size() << " dead defs\n");
1011 Edit
->eliminateDeadDefs(DeadDefs
, LIS
, VRM
, TII
);
1014 // Finally delete the SnippetCopies.
1015 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
) {
1016 for (MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(RegsToSpill
[i
]);
1017 MachineInstr
*MI
= RI
.skipInstruction();) {
1018 assert(SnippetCopies
.count(MI
) && "Remaining use wasn't a snippet copy");
1019 // FIXME: Do this with a LiveRangeEdit callback.
1020 VRM
.RemoveMachineInstrFromMaps(MI
);
1021 LIS
.RemoveMachineInstrFromMaps(MI
);
1022 MI
->eraseFromParent();
1026 // Delete all spilled registers.
1027 for (unsigned i
= 0, e
= RegsToSpill
.size(); i
!= e
; ++i
)
1028 Edit
->eraseVirtReg(RegsToSpill
[i
], LIS
);
1031 void InlineSpiller::spill(LiveRangeEdit
&edit
) {
1034 assert(!TargetRegisterInfo::isStackSlot(edit
.getReg())
1035 && "Trying to spill a stack slot.");
1036 // Share a stack slot among all descendants of Original.
1037 Original
= VRM
.getOriginal(edit
.getReg());
1038 StackSlot
= VRM
.getStackSlot(Original
);
1041 DEBUG(dbgs() << "Inline spilling "
1042 << MRI
.getRegClass(edit
.getReg())->getName()
1043 << ':' << edit
.getParent() << "\nFrom original "
1044 << LIS
.getInterval(Original
) << '\n');
1045 assert(edit
.getParent().isSpillable() &&
1046 "Attempting to spill already spilled value.");
1047 assert(DeadDefs
.empty() && "Previous spill didn't remove dead defs");
1049 collectRegsToSpill();
1050 analyzeSiblingValues();
1053 // Remat may handle everything.
1054 if (!RegsToSpill
.empty())
1057 Edit
->calculateRegClassAndHint(MF
, LIS
, Loops
);