1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/LiveStackAnalysis.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineLoopInfo.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Target/TargetMachine.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
32 enum SpillerName
{ trivial
, standard
, splitting
, inline_
};
35 static cl::opt
<SpillerName
>
37 cl::desc("Spiller to use: (default: standard)"),
39 cl::values(clEnumVal(trivial
, "trivial spiller"),
40 clEnumVal(standard
, "default spiller"),
41 clEnumVal(splitting
, "splitting spiller"),
42 clEnumValN(inline_
, "inline", "inline spiller"),
46 // Spiller virtual destructor implementation.
47 Spiller::~Spiller() {}
51 /// Utility class for spillers.
52 class SpillerBase
: public Spiller
{
54 MachineFunctionPass
*pass
;
58 MachineFrameInfo
*mfi
;
59 MachineRegisterInfo
*mri
;
60 const TargetInstrInfo
*tii
;
61 const TargetRegisterInfo
*tri
;
63 /// Construct a spiller base.
64 SpillerBase(MachineFunctionPass
&pass
, MachineFunction
&mf
, VirtRegMap
&vrm
)
65 : pass(&pass
), mf(&mf
), vrm(&vrm
)
67 lis
= &pass
.getAnalysis
<LiveIntervals
>();
68 mfi
= mf
.getFrameInfo();
69 mri
= &mf
.getRegInfo();
70 tii
= mf
.getTarget().getInstrInfo();
71 tri
= mf
.getTarget().getRegisterInfo();
74 /// Add spill ranges for every use/def of the live interval, inserting loads
75 /// immediately before each use, and stores after each def. No folding or
76 /// remat is attempted.
77 void trivialSpillEverywhere(LiveInterval
*li
,
78 SmallVectorImpl
<LiveInterval
*> &newIntervals
) {
79 DEBUG(dbgs() << "Spilling everywhere " << *li
<< "\n");
81 assert(li
->weight
!= HUGE_VALF
&&
82 "Attempting to spill already spilled value.");
84 assert(!li
->isStackSlot() &&
85 "Trying to spill a stack slot.");
87 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li
->reg
<< "\n");
89 const TargetRegisterClass
*trc
= mri
->getRegClass(li
->reg
);
90 unsigned ss
= vrm
->assignVirt2StackSlot(li
->reg
);
92 // Iterate over reg uses/defs.
93 for (MachineRegisterInfo::reg_iterator
94 regItr
= mri
->reg_begin(li
->reg
); regItr
!= mri
->reg_end();) {
96 // Grab the use/def instr.
97 MachineInstr
*mi
= &*regItr
;
99 DEBUG(dbgs() << " Processing " << *mi
);
101 // Step regItr to the next use/def instr.
104 } while (regItr
!= mri
->reg_end() && (&*regItr
== mi
));
106 // Collect uses & defs for this instr.
107 SmallVector
<unsigned, 2> indices
;
110 for (unsigned i
= 0; i
!= mi
->getNumOperands(); ++i
) {
111 MachineOperand
&op
= mi
->getOperand(i
);
112 if (!op
.isReg() || op
.getReg() != li
->reg
)
114 hasUse
|= mi
->getOperand(i
).isUse();
115 hasDef
|= mi
->getOperand(i
).isDef();
116 indices
.push_back(i
);
119 // Create a new vreg & interval for this instr.
120 unsigned newVReg
= mri
->createVirtualRegister(trc
);
122 vrm
->assignVirt2StackSlot(newVReg
, ss
);
123 LiveInterval
*newLI
= &lis
->getOrCreateInterval(newVReg
);
124 newLI
->weight
= HUGE_VALF
;
126 // Update the reg operands & kill flags.
127 for (unsigned i
= 0; i
< indices
.size(); ++i
) {
128 unsigned mopIdx
= indices
[i
];
129 MachineOperand
&mop
= mi
->getOperand(mopIdx
);
131 if (mop
.isUse() && !mi
->isRegTiedToDefOperand(mopIdx
)) {
135 assert(hasUse
|| hasDef
);
137 // Insert reload if necessary.
138 MachineBasicBlock::iterator
miItr(mi
);
140 tii
->loadRegFromStackSlot(*mi
->getParent(), miItr
, newVReg
, ss
, trc
,
142 MachineInstr
*loadInstr(prior(miItr
));
143 SlotIndex loadIndex
=
144 lis
->InsertMachineInstrInMaps(loadInstr
).getDefIndex();
145 vrm
->addSpillSlotUse(ss
, loadInstr
);
146 SlotIndex endIndex
= loadIndex
.getNextIndex();
148 newLI
->getNextValue(loadIndex
, 0, lis
->getVNInfoAllocator());
149 newLI
->addRange(LiveRange(loadIndex
, endIndex
, loadVNI
));
152 // Insert store if necessary.
154 tii
->storeRegToStackSlot(*mi
->getParent(), llvm::next(miItr
), newVReg
,
156 MachineInstr
*storeInstr(llvm::next(miItr
));
157 SlotIndex storeIndex
=
158 lis
->InsertMachineInstrInMaps(storeInstr
).getDefIndex();
159 vrm
->addSpillSlotUse(ss
, storeInstr
);
160 SlotIndex beginIndex
= storeIndex
.getPrevIndex();
162 newLI
->getNextValue(beginIndex
, 0, lis
->getVNInfoAllocator());
163 newLI
->addRange(LiveRange(beginIndex
, storeIndex
, storeVNI
));
166 newIntervals
.push_back(newLI
);
171 } // end anonymous namespace
175 /// Spills any live range using the spill-everywhere method with no attempt at
177 class TrivialSpiller
: public SpillerBase
{
180 TrivialSpiller(MachineFunctionPass
&pass
, MachineFunction
&mf
,
182 : SpillerBase(pass
, mf
, vrm
) {}
184 void spill(LiveInterval
*li
,
185 SmallVectorImpl
<LiveInterval
*> &newIntervals
,
186 SmallVectorImpl
<LiveInterval
*> &) {
187 // Ignore spillIs - we don't use it.
188 trivialSpillEverywhere(li
, newIntervals
);
192 } // end anonymous namespace
196 /// Falls back on LiveIntervals::addIntervalsForSpills.
197 class StandardSpiller
: public Spiller
{
202 MachineLoopInfo
*loopInfo
;
205 StandardSpiller(MachineFunctionPass
&pass
, MachineFunction
&mf
,
208 lis(&pass
.getAnalysis
<LiveIntervals
>()),
209 lss(&pass
.getAnalysis
<LiveStacks
>()),
210 loopInfo(pass
.getAnalysisIfAvailable
<MachineLoopInfo
>()),
213 /// Falls back on LiveIntervals::addIntervalsForSpills.
214 void spill(LiveInterval
*li
,
215 SmallVectorImpl
<LiveInterval
*> &newIntervals
,
216 SmallVectorImpl
<LiveInterval
*> &spillIs
) {
217 std::vector
<LiveInterval
*> added
=
218 lis
->addIntervalsForSpills(*li
, spillIs
, loopInfo
, *vrm
);
219 newIntervals
.insert(newIntervals
.end(), added
.begin(), added
.end());
221 // Update LiveStacks.
222 int SS
= vrm
->getStackSlot(li
->reg
);
223 if (SS
== VirtRegMap::NO_STACK_SLOT
)
225 const TargetRegisterClass
*RC
= mf
->getRegInfo().getRegClass(li
->reg
);
226 LiveInterval
&SI
= lss
->getOrCreateInterval(SS
, RC
);
227 if (!SI
.hasAtLeastOneValue())
228 SI
.getNextValue(SlotIndex(), 0, lss
->getVNInfoAllocator());
229 SI
.MergeRangesInAsValue(*li
, SI
.getValNumInfo(0));
233 } // end anonymous namespace
237 /// When a call to spill is placed this spiller will first try to break the
238 /// interval up into its component values (one new interval per value).
239 /// If this fails, or if a call is placed to spill a previously split interval
240 /// then the spiller falls back on the standard spilling mechanism.
241 class SplittingSpiller
: public StandardSpiller
{
243 SplittingSpiller(MachineFunctionPass
&pass
, MachineFunction
&mf
,
245 : StandardSpiller(pass
, mf
, vrm
) {
246 mri
= &mf
.getRegInfo();
247 tii
= mf
.getTarget().getInstrInfo();
248 tri
= mf
.getTarget().getRegisterInfo();
251 void spill(LiveInterval
*li
,
252 SmallVectorImpl
<LiveInterval
*> &newIntervals
,
253 SmallVectorImpl
<LiveInterval
*> &spillIs
) {
254 if (worthTryingToSplit(li
))
257 StandardSpiller::spill(li
, newIntervals
, spillIs
);
262 MachineRegisterInfo
*mri
;
263 const TargetInstrInfo
*tii
;
264 const TargetRegisterInfo
*tri
;
265 DenseSet
<LiveInterval
*> alreadySplit
;
267 bool worthTryingToSplit(LiveInterval
*li
) const {
268 return (!alreadySplit
.count(li
) && li
->getNumValNums() > 1);
271 /// Try to break a LiveInterval into its component values.
272 std::vector
<LiveInterval
*> tryVNISplit(LiveInterval
*li
) {
274 DEBUG(dbgs() << "Trying VNI split of %reg" << *li
<< "\n");
276 std::vector
<LiveInterval
*> added
;
277 SmallVector
<VNInfo
*, 4> vnis
;
279 std::copy(li
->vni_begin(), li
->vni_end(), std::back_inserter(vnis
));
281 for (SmallVectorImpl
<VNInfo
*>::iterator vniItr
= vnis
.begin(),
282 vniEnd
= vnis
.end(); vniItr
!= vniEnd
; ++vniItr
) {
283 VNInfo
*vni
= *vniItr
;
289 DEBUG(dbgs() << " Extracted Val #" << vni
->id
<< " as ");
290 LiveInterval
*splitInterval
= extractVNI(li
, vni
);
292 if (splitInterval
!= 0) {
293 DEBUG(dbgs() << *splitInterval
<< "\n");
294 added
.push_back(splitInterval
);
295 alreadySplit
.insert(splitInterval
);
297 DEBUG(dbgs() << "0\n");
301 DEBUG(dbgs() << "Original LI: " << *li
<< "\n");
303 // If there original interval still contains some live ranges
304 // add it to added and alreadySplit.
307 alreadySplit
.insert(li
);
313 /// Extract the given value number from the interval.
314 LiveInterval
* extractVNI(LiveInterval
*li
, VNInfo
*vni
) const {
315 assert((lis
->getInstructionFromIndex(vni
->def
) != 0 || vni
->isPHIDef()) &&
316 "Def index not sane?");
318 // Create a new vreg and live interval, copy VNI ranges over.
319 const TargetRegisterClass
*trc
= mri
->getRegClass(li
->reg
);
320 unsigned newVReg
= mri
->createVirtualRegister(trc
);
322 LiveInterval
*newLI
= &lis
->getOrCreateInterval(newVReg
);
323 VNInfo
*newVNI
= newLI
->createValueCopy(vni
, lis
->getVNInfoAllocator());
325 // Start by copying all live ranges in the VN to the new interval.
326 for (LiveInterval::iterator rItr
= li
->begin(), rEnd
= li
->end();
327 rItr
!= rEnd
; ++rItr
) {
328 if (rItr
->valno
== vni
) {
329 newLI
->addRange(LiveRange(rItr
->start
, rItr
->end
, newVNI
));
333 // Erase the old VNI & ranges.
334 li
->removeValNo(vni
);
336 // Collect all current uses of the register belonging to the given VNI.
337 // We'll use this to rename the register after we've dealt with the def.
338 std::set
<MachineInstr
*> uses
;
339 for (MachineRegisterInfo::use_iterator
340 useItr
= mri
->use_begin(li
->reg
), useEnd
= mri
->use_end();
341 useItr
!= useEnd
; ++useItr
) {
342 uses
.insert(&*useItr
);
345 // Process the def instruction for this VNI.
346 if (newVNI
->isPHIDef()) {
347 // Insert a copy at the start of the MBB. The range proceeding the
348 // copy will be attached to the original LiveInterval.
349 MachineBasicBlock
*defMBB
= lis
->getMBBFromIndex(newVNI
->def
);
350 MachineInstr
*copyMI
= BuildMI(*defMBB
, defMBB
->begin(), DebugLoc(),
351 tii
->get(TargetOpcode::COPY
), newVReg
)
352 .addReg(li
->reg
, RegState::Kill
);
353 SlotIndex copyIdx
= lis
->InsertMachineInstrInMaps(copyMI
);
354 SlotIndex phiDefIdx
= lis
->getMBBStartIdx(defMBB
);
355 assert(lis
->getInstructionFromIndex(phiDefIdx
) == 0 &&
356 "PHI def index points at actual instruction.");
357 VNInfo
*phiDefVNI
= li
->getNextValue(phiDefIdx
,
358 0, lis
->getVNInfoAllocator());
359 phiDefVNI
->setIsPHIDef(true);
360 li
->addRange(LiveRange(phiDefVNI
->def
, copyIdx
.getDefIndex(), phiDefVNI
));
361 LiveRange
*oldPHIDefRange
=
362 newLI
->getLiveRangeContaining(lis
->getMBBStartIdx(defMBB
));
364 // If the old phi def starts in the middle of the range chop it up.
365 if (oldPHIDefRange
->start
< lis
->getMBBStartIdx(defMBB
)) {
366 LiveRange
oldPHIDefRange2(copyIdx
.getDefIndex(), oldPHIDefRange
->end
,
367 oldPHIDefRange
->valno
);
368 oldPHIDefRange
->end
= lis
->getMBBStartIdx(defMBB
);
369 newLI
->addRange(oldPHIDefRange2
);
370 } else if (oldPHIDefRange
->start
== lis
->getMBBStartIdx(defMBB
)) {
371 // Otherwise if it's at the start of the range just trim it.
372 oldPHIDefRange
->start
= copyIdx
.getDefIndex();
374 assert(false && "PHI def range doesn't cover PHI def?");
377 newVNI
->def
= copyIdx
.getDefIndex();
378 newVNI
->setCopy(copyMI
);
379 newVNI
->setIsPHIDef(false); // not a PHI def anymore.
381 // non-PHI def. Rename the def. If it's two-addr that means renaming the
382 // use and inserting a new copy too.
383 MachineInstr
*defInst
= lis
->getInstructionFromIndex(newVNI
->def
);
384 // We'll rename this now, so we can remove it from uses.
386 unsigned defOpIdx
= defInst
->findRegisterDefOperandIdx(li
->reg
);
387 bool isTwoAddr
= defInst
->isRegTiedToUseOperand(defOpIdx
),
388 twoAddrUseIsUndef
= false;
390 for (unsigned i
= 0; i
< defInst
->getNumOperands(); ++i
) {
391 MachineOperand
&mo
= defInst
->getOperand(i
);
392 if (mo
.isReg() && (mo
.isDef() || isTwoAddr
) && (mo
.getReg()==li
->reg
)) {
394 if (isTwoAddr
&& mo
.isUse() && mo
.isUndef())
395 twoAddrUseIsUndef
= true;
399 SlotIndex defIdx
= lis
->getInstructionIndex(defInst
);
400 newVNI
->def
= defIdx
.getDefIndex();
402 if (isTwoAddr
&& !twoAddrUseIsUndef
) {
403 MachineBasicBlock
*defMBB
= defInst
->getParent();
404 MachineInstr
*copyMI
= BuildMI(*defMBB
, defInst
, DebugLoc(),
405 tii
->get(TargetOpcode::COPY
), newVReg
)
406 .addReg(li
->reg
, RegState::Kill
);
407 SlotIndex copyIdx
= lis
->InsertMachineInstrInMaps(copyMI
);
408 LiveRange
*origUseRange
=
409 li
->getLiveRangeContaining(newVNI
->def
.getUseIndex());
410 origUseRange
->end
= copyIdx
.getDefIndex();
411 VNInfo
*copyVNI
= newLI
->getNextValue(copyIdx
.getDefIndex(), copyMI
,
412 lis
->getVNInfoAllocator());
413 LiveRange
copyRange(copyIdx
.getDefIndex(),defIdx
.getDefIndex(),copyVNI
);
414 newLI
->addRange(copyRange
);
418 for (std::set
<MachineInstr
*>::iterator
419 usesItr
= uses
.begin(), usesEnd
= uses
.end();
420 usesItr
!= usesEnd
; ++usesItr
) {
421 MachineInstr
*useInst
= *usesItr
;
422 SlotIndex useIdx
= lis
->getInstructionIndex(useInst
);
423 LiveRange
*useRange
=
424 newLI
->getLiveRangeContaining(useIdx
.getUseIndex());
426 // If this use doesn't belong to the new interval skip it.
430 // This use doesn't belong to the VNI, skip it.
431 if (useRange
->valno
!= newVNI
)
434 // Check if this instr is two address.
435 unsigned useOpIdx
= useInst
->findRegisterUseOperandIdx(li
->reg
);
436 bool isTwoAddress
= useInst
->isRegTiedToDefOperand(useOpIdx
);
438 // Rename uses (and defs for two-address instrs).
439 for (unsigned i
= 0; i
< useInst
->getNumOperands(); ++i
) {
440 MachineOperand
&mo
= useInst
->getOperand(i
);
441 if (mo
.isReg() && (mo
.isUse() || isTwoAddress
) &&
442 (mo
.getReg() == li
->reg
)) {
447 // If this is a two address instruction we've got some extra work to do.
449 // We modified the def operand, so we need to copy back to the original
451 MachineBasicBlock
*useMBB
= useInst
->getParent();
452 MachineBasicBlock::iterator
useItr(useInst
);
453 MachineInstr
*copyMI
= BuildMI(*useMBB
, llvm::next(useItr
), DebugLoc(),
454 tii
->get(TargetOpcode::COPY
), newVReg
)
455 .addReg(li
->reg
, RegState::Kill
);
456 SlotIndex copyIdx
= lis
->InsertMachineInstrInMaps(copyMI
);
458 // Change the old two-address defined range & vni to start at
459 // (and be defined by) the copy.
460 LiveRange
*origDefRange
=
461 li
->getLiveRangeContaining(useIdx
.getDefIndex());
462 origDefRange
->start
= copyIdx
.getDefIndex();
463 origDefRange
->valno
->def
= copyIdx
.getDefIndex();
464 origDefRange
->valno
->setCopy(copyMI
);
466 // Insert a new range & vni for the two-address-to-copy value. This
467 // will be attached to the new live interval.
469 newLI
->getNextValue(useIdx
.getDefIndex(), 0,
470 lis
->getVNInfoAllocator());
471 LiveRange
copyRange(useIdx
.getDefIndex(),copyIdx
.getDefIndex(),copyVNI
);
472 newLI
->addRange(copyRange
);
476 // Iterate over any PHI kills - we'll need to insert new copies for them.
477 for (LiveInterval::iterator LRI
= newLI
->begin(), LRE
= newLI
->end();
479 if (LRI
->valno
!= newVNI
)
481 SlotIndex killIdx
= LRI
->end
;
482 MachineBasicBlock
*killMBB
= lis
->getMBBFromIndex(killIdx
);
483 MachineInstr
*copyMI
= BuildMI(*killMBB
, killMBB
->getFirstTerminator(),
484 DebugLoc(), tii
->get(TargetOpcode::COPY
),
486 .addReg(newVReg
, RegState::Kill
);
487 SlotIndex copyIdx
= lis
->InsertMachineInstrInMaps(copyMI
);
489 // Save the current end. We may need it to add a new range if the
490 // current range runs of the end of the MBB.
491 SlotIndex newKillRangeEnd
= LRI
->end
;
492 LRI
->end
= copyIdx
.getDefIndex();
494 if (newKillRangeEnd
!= lis
->getMBBEndIdx(killMBB
)) {
495 assert(newKillRangeEnd
> lis
->getMBBEndIdx(killMBB
) &&
496 "PHI kill range doesn't reach kill-block end. Not sane.");
497 newLI
->addRange(LiveRange(lis
->getMBBEndIdx(killMBB
),
498 newKillRangeEnd
, newVNI
));
501 VNInfo
*newKillVNI
= li
->getNextValue(copyIdx
.getDefIndex(),
502 copyMI
, lis
->getVNInfoAllocator());
503 newKillVNI
->setHasPHIKill(true);
504 li
->addRange(LiveRange(copyIdx
.getDefIndex(),
505 lis
->getMBBEndIdx(killMBB
),
508 newVNI
->setHasPHIKill(false);
515 } // end anonymous namespace
519 Spiller
*createInlineSpiller(MachineFunctionPass
&pass
,
524 llvm::Spiller
* llvm::createSpiller(MachineFunctionPass
&pass
,
527 switch (spillerOpt
) {
528 default: assert(0 && "unknown spiller");
529 case trivial
: return new TrivialSpiller(pass
, mf
, vrm
);
530 case standard
: return new StandardSpiller(pass
, mf
, vrm
);
531 case splitting
: return new SplittingSpiller(pass
, mf
, vrm
);
532 case inline_
: return createInlineSpiller(pass
, mf
, vrm
);