Fixed some bugs.
[llvm/zpu.git] / lib / CodeGen / Spiller.cpp
blob63112870bba95d8b348bfedb87dfbd400bc1257e
1 //===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "spiller"
12 #include "Spiller.h"
13 #include "VirtRegMap.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/LiveStackAnalysis.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineLoopInfo.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Target/TargetMachine.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include <set>
29 using namespace llvm;
31 namespace {
32 enum SpillerName { trivial, standard, splitting, inline_ };
35 static cl::opt<SpillerName>
36 spillerOpt("spiller",
37 cl::desc("Spiller to use: (default: standard)"),
38 cl::Prefix,
39 cl::values(clEnumVal(trivial, "trivial spiller"),
40 clEnumVal(standard, "default spiller"),
41 clEnumVal(splitting, "splitting spiller"),
42 clEnumValN(inline_, "inline", "inline spiller"),
43 clEnumValEnd),
44 cl::init(standard));
46 // Spiller virtual destructor implementation.
47 Spiller::~Spiller() {}
49 namespace {
51 /// Utility class for spillers.
52 class SpillerBase : public Spiller {
53 protected:
54 MachineFunctionPass *pass;
55 MachineFunction *mf;
56 VirtRegMap *vrm;
57 LiveIntervals *lis;
58 MachineFrameInfo *mfi;
59 MachineRegisterInfo *mri;
60 const TargetInstrInfo *tii;
61 const TargetRegisterInfo *tri;
63 /// Construct a spiller base.
64 SpillerBase(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
65 : pass(&pass), mf(&mf), vrm(&vrm)
67 lis = &pass.getAnalysis<LiveIntervals>();
68 mfi = mf.getFrameInfo();
69 mri = &mf.getRegInfo();
70 tii = mf.getTarget().getInstrInfo();
71 tri = mf.getTarget().getRegisterInfo();
74 /// Add spill ranges for every use/def of the live interval, inserting loads
75 /// immediately before each use, and stores after each def. No folding or
76 /// remat is attempted.
77 void trivialSpillEverywhere(LiveInterval *li,
78 SmallVectorImpl<LiveInterval*> &newIntervals) {
79 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
81 assert(li->weight != HUGE_VALF &&
82 "Attempting to spill already spilled value.");
84 assert(!li->isStackSlot() &&
85 "Trying to spill a stack slot.");
87 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
89 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
90 unsigned ss = vrm->assignVirt2StackSlot(li->reg);
92 // Iterate over reg uses/defs.
93 for (MachineRegisterInfo::reg_iterator
94 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
96 // Grab the use/def instr.
97 MachineInstr *mi = &*regItr;
99 DEBUG(dbgs() << " Processing " << *mi);
101 // Step regItr to the next use/def instr.
102 do {
103 ++regItr;
104 } while (regItr != mri->reg_end() && (&*regItr == mi));
106 // Collect uses & defs for this instr.
107 SmallVector<unsigned, 2> indices;
108 bool hasUse = false;
109 bool hasDef = false;
110 for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
111 MachineOperand &op = mi->getOperand(i);
112 if (!op.isReg() || op.getReg() != li->reg)
113 continue;
114 hasUse |= mi->getOperand(i).isUse();
115 hasDef |= mi->getOperand(i).isDef();
116 indices.push_back(i);
119 // Create a new vreg & interval for this instr.
120 unsigned newVReg = mri->createVirtualRegister(trc);
121 vrm->grow();
122 vrm->assignVirt2StackSlot(newVReg, ss);
123 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
124 newLI->weight = HUGE_VALF;
126 // Update the reg operands & kill flags.
127 for (unsigned i = 0; i < indices.size(); ++i) {
128 unsigned mopIdx = indices[i];
129 MachineOperand &mop = mi->getOperand(mopIdx);
130 mop.setReg(newVReg);
131 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
132 mop.setIsKill(true);
135 assert(hasUse || hasDef);
137 // Insert reload if necessary.
138 MachineBasicBlock::iterator miItr(mi);
139 if (hasUse) {
140 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc,
141 tri);
142 MachineInstr *loadInstr(prior(miItr));
143 SlotIndex loadIndex =
144 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
145 vrm->addSpillSlotUse(ss, loadInstr);
146 SlotIndex endIndex = loadIndex.getNextIndex();
147 VNInfo *loadVNI =
148 newLI->getNextValue(loadIndex, 0, lis->getVNInfoAllocator());
149 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
152 // Insert store if necessary.
153 if (hasDef) {
154 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg,
155 true, ss, trc, tri);
156 MachineInstr *storeInstr(llvm::next(miItr));
157 SlotIndex storeIndex =
158 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
159 vrm->addSpillSlotUse(ss, storeInstr);
160 SlotIndex beginIndex = storeIndex.getPrevIndex();
161 VNInfo *storeVNI =
162 newLI->getNextValue(beginIndex, 0, lis->getVNInfoAllocator());
163 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
166 newIntervals.push_back(newLI);
171 } // end anonymous namespace
173 namespace {
175 /// Spills any live range using the spill-everywhere method with no attempt at
176 /// folding.
177 class TrivialSpiller : public SpillerBase {
178 public:
180 TrivialSpiller(MachineFunctionPass &pass, MachineFunction &mf,
181 VirtRegMap &vrm)
182 : SpillerBase(pass, mf, vrm) {}
184 void spill(LiveInterval *li,
185 SmallVectorImpl<LiveInterval*> &newIntervals,
186 SmallVectorImpl<LiveInterval*> &) {
187 // Ignore spillIs - we don't use it.
188 trivialSpillEverywhere(li, newIntervals);
192 } // end anonymous namespace
194 namespace {
196 /// Falls back on LiveIntervals::addIntervalsForSpills.
197 class StandardSpiller : public Spiller {
198 protected:
199 MachineFunction *mf;
200 LiveIntervals *lis;
201 LiveStacks *lss;
202 MachineLoopInfo *loopInfo;
203 VirtRegMap *vrm;
204 public:
205 StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf,
206 VirtRegMap &vrm)
207 : mf(&mf),
208 lis(&pass.getAnalysis<LiveIntervals>()),
209 lss(&pass.getAnalysis<LiveStacks>()),
210 loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()),
211 vrm(&vrm) {}
213 /// Falls back on LiveIntervals::addIntervalsForSpills.
214 void spill(LiveInterval *li,
215 SmallVectorImpl<LiveInterval*> &newIntervals,
216 SmallVectorImpl<LiveInterval*> &spillIs) {
217 std::vector<LiveInterval*> added =
218 lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
219 newIntervals.insert(newIntervals.end(), added.begin(), added.end());
221 // Update LiveStacks.
222 int SS = vrm->getStackSlot(li->reg);
223 if (SS == VirtRegMap::NO_STACK_SLOT)
224 return;
225 const TargetRegisterClass *RC = mf->getRegInfo().getRegClass(li->reg);
226 LiveInterval &SI = lss->getOrCreateInterval(SS, RC);
227 if (!SI.hasAtLeastOneValue())
228 SI.getNextValue(SlotIndex(), 0, lss->getVNInfoAllocator());
229 SI.MergeRangesInAsValue(*li, SI.getValNumInfo(0));
233 } // end anonymous namespace
235 namespace {
237 /// When a call to spill is placed this spiller will first try to break the
238 /// interval up into its component values (one new interval per value).
239 /// If this fails, or if a call is placed to spill a previously split interval
240 /// then the spiller falls back on the standard spilling mechanism.
241 class SplittingSpiller : public StandardSpiller {
242 public:
243 SplittingSpiller(MachineFunctionPass &pass, MachineFunction &mf,
244 VirtRegMap &vrm)
245 : StandardSpiller(pass, mf, vrm) {
246 mri = &mf.getRegInfo();
247 tii = mf.getTarget().getInstrInfo();
248 tri = mf.getTarget().getRegisterInfo();
251 void spill(LiveInterval *li,
252 SmallVectorImpl<LiveInterval*> &newIntervals,
253 SmallVectorImpl<LiveInterval*> &spillIs) {
254 if (worthTryingToSplit(li))
255 tryVNISplit(li);
256 else
257 StandardSpiller::spill(li, newIntervals, spillIs);
260 private:
262 MachineRegisterInfo *mri;
263 const TargetInstrInfo *tii;
264 const TargetRegisterInfo *tri;
265 DenseSet<LiveInterval*> alreadySplit;
267 bool worthTryingToSplit(LiveInterval *li) const {
268 return (!alreadySplit.count(li) && li->getNumValNums() > 1);
271 /// Try to break a LiveInterval into its component values.
272 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li) {
274 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
276 std::vector<LiveInterval*> added;
277 SmallVector<VNInfo*, 4> vnis;
279 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
281 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
282 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
283 VNInfo *vni = *vniItr;
285 // Skip unused VNIs.
286 if (vni->isUnused())
287 continue;
289 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
290 LiveInterval *splitInterval = extractVNI(li, vni);
292 if (splitInterval != 0) {
293 DEBUG(dbgs() << *splitInterval << "\n");
294 added.push_back(splitInterval);
295 alreadySplit.insert(splitInterval);
296 } else {
297 DEBUG(dbgs() << "0\n");
301 DEBUG(dbgs() << "Original LI: " << *li << "\n");
303 // If there original interval still contains some live ranges
304 // add it to added and alreadySplit.
305 if (!li->empty()) {
306 added.push_back(li);
307 alreadySplit.insert(li);
310 return added;
313 /// Extract the given value number from the interval.
314 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
315 assert((lis->getInstructionFromIndex(vni->def) != 0 || vni->isPHIDef()) &&
316 "Def index not sane?");
318 // Create a new vreg and live interval, copy VNI ranges over.
319 const TargetRegisterClass *trc = mri->getRegClass(li->reg);
320 unsigned newVReg = mri->createVirtualRegister(trc);
321 vrm->grow();
322 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
323 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
325 // Start by copying all live ranges in the VN to the new interval.
326 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
327 rItr != rEnd; ++rItr) {
328 if (rItr->valno == vni) {
329 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
333 // Erase the old VNI & ranges.
334 li->removeValNo(vni);
336 // Collect all current uses of the register belonging to the given VNI.
337 // We'll use this to rename the register after we've dealt with the def.
338 std::set<MachineInstr*> uses;
339 for (MachineRegisterInfo::use_iterator
340 useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
341 useItr != useEnd; ++useItr) {
342 uses.insert(&*useItr);
345 // Process the def instruction for this VNI.
346 if (newVNI->isPHIDef()) {
347 // Insert a copy at the start of the MBB. The range proceeding the
348 // copy will be attached to the original LiveInterval.
349 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
350 MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(),
351 tii->get(TargetOpcode::COPY), newVReg)
352 .addReg(li->reg, RegState::Kill);
353 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
354 SlotIndex phiDefIdx = lis->getMBBStartIdx(defMBB);
355 assert(lis->getInstructionFromIndex(phiDefIdx) == 0 &&
356 "PHI def index points at actual instruction.");
357 VNInfo *phiDefVNI = li->getNextValue(phiDefIdx,
358 0, lis->getVNInfoAllocator());
359 phiDefVNI->setIsPHIDef(true);
360 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
361 LiveRange *oldPHIDefRange =
362 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
364 // If the old phi def starts in the middle of the range chop it up.
365 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
366 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
367 oldPHIDefRange->valno);
368 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
369 newLI->addRange(oldPHIDefRange2);
370 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
371 // Otherwise if it's at the start of the range just trim it.
372 oldPHIDefRange->start = copyIdx.getDefIndex();
373 } else {
374 assert(false && "PHI def range doesn't cover PHI def?");
377 newVNI->def = copyIdx.getDefIndex();
378 newVNI->setCopy(copyMI);
379 newVNI->setIsPHIDef(false); // not a PHI def anymore.
380 } else {
381 // non-PHI def. Rename the def. If it's two-addr that means renaming the
382 // use and inserting a new copy too.
383 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
384 // We'll rename this now, so we can remove it from uses.
385 uses.erase(defInst);
386 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
387 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
388 twoAddrUseIsUndef = false;
390 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
391 MachineOperand &mo = defInst->getOperand(i);
392 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
393 mo.setReg(newVReg);
394 if (isTwoAddr && mo.isUse() && mo.isUndef())
395 twoAddrUseIsUndef = true;
399 SlotIndex defIdx = lis->getInstructionIndex(defInst);
400 newVNI->def = defIdx.getDefIndex();
402 if (isTwoAddr && !twoAddrUseIsUndef) {
403 MachineBasicBlock *defMBB = defInst->getParent();
404 MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(),
405 tii->get(TargetOpcode::COPY), newVReg)
406 .addReg(li->reg, RegState::Kill);
407 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
408 LiveRange *origUseRange =
409 li->getLiveRangeContaining(newVNI->def.getUseIndex());
410 origUseRange->end = copyIdx.getDefIndex();
411 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
412 lis->getVNInfoAllocator());
413 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
414 newLI->addRange(copyRange);
418 for (std::set<MachineInstr*>::iterator
419 usesItr = uses.begin(), usesEnd = uses.end();
420 usesItr != usesEnd; ++usesItr) {
421 MachineInstr *useInst = *usesItr;
422 SlotIndex useIdx = lis->getInstructionIndex(useInst);
423 LiveRange *useRange =
424 newLI->getLiveRangeContaining(useIdx.getUseIndex());
426 // If this use doesn't belong to the new interval skip it.
427 if (useRange == 0)
428 continue;
430 // This use doesn't belong to the VNI, skip it.
431 if (useRange->valno != newVNI)
432 continue;
434 // Check if this instr is two address.
435 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
436 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
438 // Rename uses (and defs for two-address instrs).
439 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
440 MachineOperand &mo = useInst->getOperand(i);
441 if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
442 (mo.getReg() == li->reg)) {
443 mo.setReg(newVReg);
447 // If this is a two address instruction we've got some extra work to do.
448 if (isTwoAddress) {
449 // We modified the def operand, so we need to copy back to the original
450 // reg.
451 MachineBasicBlock *useMBB = useInst->getParent();
452 MachineBasicBlock::iterator useItr(useInst);
453 MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(),
454 tii->get(TargetOpcode::COPY), newVReg)
455 .addReg(li->reg, RegState::Kill);
456 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
458 // Change the old two-address defined range & vni to start at
459 // (and be defined by) the copy.
460 LiveRange *origDefRange =
461 li->getLiveRangeContaining(useIdx.getDefIndex());
462 origDefRange->start = copyIdx.getDefIndex();
463 origDefRange->valno->def = copyIdx.getDefIndex();
464 origDefRange->valno->setCopy(copyMI);
466 // Insert a new range & vni for the two-address-to-copy value. This
467 // will be attached to the new live interval.
468 VNInfo *copyVNI =
469 newLI->getNextValue(useIdx.getDefIndex(), 0,
470 lis->getVNInfoAllocator());
471 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
472 newLI->addRange(copyRange);
476 // Iterate over any PHI kills - we'll need to insert new copies for them.
477 for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
478 LRI != LRE; ++LRI) {
479 if (LRI->valno != newVNI)
480 continue;
481 SlotIndex killIdx = LRI->end;
482 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
483 MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(),
484 DebugLoc(), tii->get(TargetOpcode::COPY),
485 li->reg)
486 .addReg(newVReg, RegState::Kill);
487 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
489 // Save the current end. We may need it to add a new range if the
490 // current range runs of the end of the MBB.
491 SlotIndex newKillRangeEnd = LRI->end;
492 LRI->end = copyIdx.getDefIndex();
494 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
495 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
496 "PHI kill range doesn't reach kill-block end. Not sane.");
497 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
498 newKillRangeEnd, newVNI));
501 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
502 copyMI, lis->getVNInfoAllocator());
503 newKillVNI->setHasPHIKill(true);
504 li->addRange(LiveRange(copyIdx.getDefIndex(),
505 lis->getMBBEndIdx(killMBB),
506 newKillVNI));
508 newVNI->setHasPHIKill(false);
510 return newLI;
515 } // end anonymous namespace
518 namespace llvm {
519 Spiller *createInlineSpiller(MachineFunctionPass &pass,
520 MachineFunction &mf,
521 VirtRegMap &vrm);
524 llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass,
525 MachineFunction &mf,
526 VirtRegMap &vrm) {
527 switch (spillerOpt) {
528 default: assert(0 && "unknown spiller");
529 case trivial: return new TrivialSpiller(pass, mf, vrm);
530 case standard: return new StandardSpiller(pass, mf, vrm);
531 case splitting: return new SplittingSpiller(pass, mf, vrm);
532 case inline_: return createInlineSpiller(pass, mf, vrm);