1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
17 #include "LiveRangeEdit.h"
19 #include "VirtRegMap.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
36 VerifySpills("verify-spills", cl::desc("Verify after each spill/split"));
39 class InlineSpiller
: public Spiller
{
40 MachineFunctionPass
&pass_
;
44 MachineDominatorTree
&mdt_
;
45 MachineLoopInfo
&loops_
;
47 MachineFrameInfo
&mfi_
;
48 MachineRegisterInfo
&mri_
;
49 const TargetInstrInfo
&tii_
;
50 const TargetRegisterInfo
&tri_
;
51 const BitVector reserved_
;
53 SplitAnalysis splitAnalysis_
;
55 // Variables that are valid during spill(), but used by multiple methods.
57 const TargetRegisterClass
*rc_
;
60 // Values that failed to remat at some point.
61 SmallPtrSet
<VNInfo
*, 8> usedValues_
;
66 InlineSpiller(MachineFunctionPass
&pass
,
71 lis_(pass
.getAnalysis
<LiveIntervals
>()),
72 lss_(pass
.getAnalysis
<LiveStacks
>()),
73 mdt_(pass
.getAnalysis
<MachineDominatorTree
>()),
74 loops_(pass
.getAnalysis
<MachineLoopInfo
>()),
76 mfi_(*mf
.getFrameInfo()),
77 mri_(mf
.getRegInfo()),
78 tii_(*mf
.getTarget().getInstrInfo()),
79 tri_(*mf
.getTarget().getRegisterInfo()),
80 reserved_(tri_
.getReservedRegs(mf_
)),
81 splitAnalysis_(mf
, lis_
, loops_
) {}
83 void spill(LiveInterval
*li
,
84 SmallVectorImpl
<LiveInterval
*> &newIntervals
,
85 SmallVectorImpl
<LiveInterval
*> &spillIs
);
87 void spill(LiveRangeEdit
&);
92 bool reMaterializeFor(MachineBasicBlock::iterator MI
);
93 void reMaterializeAll();
95 bool coalesceStackAccess(MachineInstr
*MI
);
96 bool foldMemoryOperand(MachineBasicBlock::iterator MI
,
97 const SmallVectorImpl
<unsigned> &Ops
);
98 void insertReload(LiveInterval
&NewLI
, MachineBasicBlock::iterator MI
);
99 void insertSpill(LiveInterval
&NewLI
, MachineBasicBlock::iterator MI
);
104 Spiller
*createInlineSpiller(MachineFunctionPass
&pass
,
109 return new InlineSpiller(pass
, mf
, vrm
);
113 /// split - try splitting the current interval into pieces that may allocate
114 /// separately. Return true if successful.
115 bool InlineSpiller::split() {
116 splitAnalysis_
.analyze(&edit_
->getParent());
118 // Try splitting around loops.
119 if (const MachineLoop
*loop
= splitAnalysis_
.getBestSplitLoop()) {
120 SplitEditor(splitAnalysis_
, lis_
, vrm_
, mdt_
, *edit_
)
121 .splitAroundLoop(loop
);
125 // Try splitting into single block intervals.
126 SplitAnalysis::BlockPtrSet blocks
;
127 if (splitAnalysis_
.getMultiUseBlocks(blocks
)) {
128 SplitEditor(splitAnalysis_
, lis_
, vrm_
, mdt_
, *edit_
)
129 .splitSingleBlocks(blocks
);
133 // Try splitting inside a basic block.
134 if (const MachineBasicBlock
*MBB
= splitAnalysis_
.getBlockForInsideSplit()) {
135 SplitEditor(splitAnalysis_
, lis_
, vrm_
, mdt_
, *edit_
)
136 .splitInsideBlock(MBB
);
143 /// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of
145 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI
) {
146 SlotIndex UseIdx
= lis_
.getInstructionIndex(MI
).getUseIndex();
147 VNInfo
*OrigVNI
= edit_
->getParent().getVNInfoAt(UseIdx
);
150 DEBUG(dbgs() << "\tadding <undef> flags: ");
151 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
152 MachineOperand
&MO
= MI
->getOperand(i
);
153 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == edit_
->getReg())
156 DEBUG(dbgs() << UseIdx
<< '\t' << *MI
);
160 LiveRangeEdit::Remat RM
= edit_
->canRematerializeAt(OrigVNI
, UseIdx
, false,
163 usedValues_
.insert(OrigVNI
);
164 DEBUG(dbgs() << "\tcannot remat for " << UseIdx
<< '\t' << *MI
);
168 // If the instruction also writes edit_->getReg(), it had better not require
169 // the same register for uses and defs.
171 SmallVector
<unsigned, 8> Ops
;
172 tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(edit_
->getReg(), &Ops
);
174 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
175 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
176 if (MO
.isUse() ? MI
->isRegTiedToDefOperand(Ops
[i
]) : MO
.getSubReg()) {
177 usedValues_
.insert(OrigVNI
);
178 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx
<< '\t' << *MI
);
184 // Alocate a new register for the remat.
185 LiveInterval
&NewLI
= edit_
->create(mri_
, lis_
, vrm_
);
186 NewLI
.markNotSpillable();
188 // Finally we can rematerialize OrigMI before MI.
189 SlotIndex DefIdx
= edit_
->rematerializeAt(*MI
->getParent(), MI
, NewLI
.reg
, RM
,
191 DEBUG(dbgs() << "\tremat: " << DefIdx
<< '\n');
194 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
195 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
196 if (MO
.isReg() && MO
.isUse() && MO
.getReg() == edit_
->getReg()) {
197 MO
.setReg(NewLI
.reg
);
201 DEBUG(dbgs() << "\t " << UseIdx
<< '\t' << *MI
);
203 VNInfo
*DefVNI
= NewLI
.getNextValue(DefIdx
, 0, lis_
.getVNInfoAllocator());
204 NewLI
.addRange(LiveRange(DefIdx
, UseIdx
.getDefIndex(), DefVNI
));
205 DEBUG(dbgs() << "\tinterval: " << NewLI
<< '\n');
209 /// reMaterializeAll - Try to rematerialize as many uses as possible,
210 /// and trim the live ranges after.
211 void InlineSpiller::reMaterializeAll() {
212 // Do a quick scan of the interval values to find if any are remattable.
213 if (!edit_
->anyRematerializable(lis_
, tii_
, 0))
218 // Try to remat before all uses of edit_->getReg().
219 bool anyRemat
= false;
220 for (MachineRegisterInfo::use_nodbg_iterator
221 RI
= mri_
.use_nodbg_begin(edit_
->getReg());
222 MachineInstr
*MI
= RI
.skipInstruction();)
223 anyRemat
|= reMaterializeFor(MI
);
228 // Remove any values that were completely rematted.
229 bool anyRemoved
= false;
230 for (LiveInterval::vni_iterator I
= edit_
->getParent().vni_begin(),
231 E
= edit_
->getParent().vni_end(); I
!= E
; ++I
) {
233 if (VNI
->hasPHIKill() || !edit_
->didRematerialize(VNI
) ||
234 usedValues_
.count(VNI
))
236 MachineInstr
*DefMI
= lis_
.getInstructionFromIndex(VNI
->def
);
237 DEBUG(dbgs() << "\tremoving dead def: " << VNI
->def
<< '\t' << *DefMI
);
238 lis_
.RemoveMachineInstrFromMaps(DefMI
);
239 vrm_
.RemoveMachineInstrFromMaps(DefMI
);
240 DefMI
->eraseFromParent();
241 VNI
->def
= SlotIndex();
248 // Removing values may cause debug uses where parent is not live.
249 for (MachineRegisterInfo::use_iterator RI
= mri_
.use_begin(edit_
->getReg());
250 MachineInstr
*MI
= RI
.skipInstruction();) {
251 if (!MI
->isDebugValue())
253 // Try to preserve the debug value if parent is live immediately after it.
254 MachineBasicBlock::iterator NextMI
= MI
;
256 if (NextMI
!= MI
->getParent()->end() && !lis_
.isNotInMIMap(NextMI
)) {
257 SlotIndex Idx
= lis_
.getInstructionIndex(NextMI
);
258 VNInfo
*VNI
= edit_
->getParent().getVNInfoAt(Idx
);
259 if (VNI
&& (VNI
->hasPHIKill() || usedValues_
.count(VNI
)))
262 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI
);
263 MI
->eraseFromParent();
267 /// If MI is a load or store of stackSlot_, it can be removed.
268 bool InlineSpiller::coalesceStackAccess(MachineInstr
*MI
) {
271 if (!(reg
= tii_
.isLoadFromStackSlot(MI
, FI
)) &&
272 !(reg
= tii_
.isStoreToStackSlot(MI
, FI
)))
275 // We have a stack access. Is it the right register and slot?
276 if (reg
!= edit_
->getReg() || FI
!= stackSlot_
)
279 DEBUG(dbgs() << "Coalescing stack access: " << *MI
);
280 lis_
.RemoveMachineInstrFromMaps(MI
);
281 MI
->eraseFromParent();
285 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
286 /// Return true on success, and MI will be erased.
287 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI
,
288 const SmallVectorImpl
<unsigned> &Ops
) {
289 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
291 SmallVector
<unsigned, 8> FoldOps
;
292 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
293 unsigned Idx
= Ops
[i
];
294 MachineOperand
&MO
= MI
->getOperand(Idx
);
297 // FIXME: Teach targets to deal with subregs.
300 // Tied use operands should not be passed to foldMemoryOperand.
301 if (!MI
->isRegTiedToDefOperand(Idx
))
302 FoldOps
.push_back(Idx
);
305 MachineInstr
*FoldMI
= tii_
.foldMemoryOperand(MI
, FoldOps
, stackSlot_
);
308 lis_
.ReplaceMachineInstrInMaps(MI
, FoldMI
);
309 vrm_
.addSpillSlotUse(stackSlot_
, FoldMI
);
310 MI
->eraseFromParent();
311 DEBUG(dbgs() << "\tfolded: " << *FoldMI
);
315 /// insertReload - Insert a reload of NewLI.reg before MI.
316 void InlineSpiller::insertReload(LiveInterval
&NewLI
,
317 MachineBasicBlock::iterator MI
) {
318 MachineBasicBlock
&MBB
= *MI
->getParent();
319 SlotIndex Idx
= lis_
.getInstructionIndex(MI
).getDefIndex();
320 tii_
.loadRegFromStackSlot(MBB
, MI
, NewLI
.reg
, stackSlot_
, rc_
, &tri_
);
321 --MI
; // Point to load instruction.
322 SlotIndex LoadIdx
= lis_
.InsertMachineInstrInMaps(MI
).getDefIndex();
323 vrm_
.addSpillSlotUse(stackSlot_
, MI
);
324 DEBUG(dbgs() << "\treload: " << LoadIdx
<< '\t' << *MI
);
325 VNInfo
*LoadVNI
= NewLI
.getNextValue(LoadIdx
, 0,
326 lis_
.getVNInfoAllocator());
327 NewLI
.addRange(LiveRange(LoadIdx
, Idx
, LoadVNI
));
330 /// insertSpill - Insert a spill of NewLI.reg after MI.
331 void InlineSpiller::insertSpill(LiveInterval
&NewLI
,
332 MachineBasicBlock::iterator MI
) {
333 MachineBasicBlock
&MBB
= *MI
->getParent();
334 SlotIndex Idx
= lis_
.getInstructionIndex(MI
).getDefIndex();
335 tii_
.storeRegToStackSlot(MBB
, ++MI
, NewLI
.reg
, true, stackSlot_
, rc_
, &tri_
);
336 --MI
; // Point to store instruction.
337 SlotIndex StoreIdx
= lis_
.InsertMachineInstrInMaps(MI
).getDefIndex();
338 vrm_
.addSpillSlotUse(stackSlot_
, MI
);
339 DEBUG(dbgs() << "\tspilled: " << StoreIdx
<< '\t' << *MI
);
340 VNInfo
*StoreVNI
= NewLI
.getNextValue(Idx
, 0, lis_
.getVNInfoAllocator());
341 NewLI
.addRange(LiveRange(Idx
, StoreIdx
, StoreVNI
));
344 void InlineSpiller::spill(LiveInterval
*li
,
345 SmallVectorImpl
<LiveInterval
*> &newIntervals
,
346 SmallVectorImpl
<LiveInterval
*> &spillIs
) {
347 LiveRangeEdit
edit(*li
, newIntervals
, spillIs
);
353 void InlineSpiller::spill(LiveRangeEdit
&edit
) {
355 assert(!edit
.getParent().isStackSlot() && "Trying to spill a stack slot.");
356 DEBUG(dbgs() << "Inline spilling "
357 << mri_
.getRegClass(edit
.getReg())->getName()
358 << ':' << edit
.getParent() << "\n");
359 assert(edit
.getParent().isSpillable() &&
360 "Attempting to spill already spilled value.");
367 // Remat may handle everything.
368 if (edit_
->getParent().empty())
371 rc_
= mri_
.getRegClass(edit
.getReg());
372 stackSlot_
= vrm_
.assignVirt2StackSlot(edit_
->getReg());
374 // Update LiveStacks now that we are committed to spilling.
375 LiveInterval
&stacklvr
= lss_
.getOrCreateInterval(stackSlot_
, rc_
);
376 assert(stacklvr
.empty() && "Just created stack slot not empty");
377 stacklvr
.getNextValue(SlotIndex(), 0, lss_
.getVNInfoAllocator());
378 stacklvr
.MergeRangesInAsValue(edit_
->getParent(), stacklvr
.getValNumInfo(0));
380 // Iterate over instructions using register.
381 for (MachineRegisterInfo::reg_iterator RI
= mri_
.reg_begin(edit
.getReg());
382 MachineInstr
*MI
= RI
.skipInstruction();) {
384 // Debug values are not allowed to affect codegen.
385 if (MI
->isDebugValue()) {
386 // Modify DBG_VALUE now that the value is in a spill slot.
387 uint64_t Offset
= MI
->getOperand(1).getImm();
388 const MDNode
*MDPtr
= MI
->getOperand(2).getMetadata();
389 DebugLoc DL
= MI
->getDebugLoc();
390 if (MachineInstr
*NewDV
= tii_
.emitFrameIndexDebugValue(mf_
, stackSlot_
,
391 Offset
, MDPtr
, DL
)) {
392 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI
);
393 MachineBasicBlock
*MBB
= MI
->getParent();
394 MBB
->insert(MBB
->erase(MI
), NewDV
);
396 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI
);
397 MI
->eraseFromParent();
402 // Stack slot accesses may coalesce away.
403 if (coalesceStackAccess(MI
))
406 // Analyze instruction.
408 SmallVector
<unsigned, 8> Ops
;
409 tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(edit
.getReg(), &Ops
);
411 // Attempt to fold memory ops.
412 if (foldMemoryOperand(MI
, Ops
))
415 // Allocate interval around instruction.
416 // FIXME: Infer regclass from instruction alone.
417 LiveInterval
&NewLI
= edit
.create(mri_
, lis_
, vrm_
);
418 NewLI
.markNotSpillable();
421 insertReload(NewLI
, MI
);
423 // Rewrite instruction operands.
424 bool hasLiveDef
= false;
425 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
426 MachineOperand
&MO
= MI
->getOperand(Ops
[i
]);
427 MO
.setReg(NewLI
.reg
);
429 if (!MI
->isRegTiedToDefOperand(Ops
[i
]))
437 // FIXME: Use a second vreg if instruction has no tied ops.
438 if (Writes
&& hasLiveDef
)
439 insertSpill(NewLI
, MI
);
441 DEBUG(dbgs() << "\tinterval: " << NewLI
<< '\n');