Work around the fact that GNU libstdc++'s debug mode uses RTTI.
[llvm/msp430.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blob612b9ac448cc19226bae918661e0ccec36e41d53
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include <algorithm>
39 #include <cmath>
40 using namespace llvm;
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
71 if (!StrongPHIElim) {
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
84 delete I->second;
86 MBB2IdxMap.clear();
87 Idx2MBBMap.clear();
88 mi2iMap_.clear();
89 i2miMap_.clear();
90 r2iMap_.clear();
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
95 ClonedMIs.pop_back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
104 Idx2MBBMap.clear();
105 MBB2IdxMap.clear();
106 mi2iMap_.clear();
107 i2miMap_.clear();
109 FunctionSize = 0;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
117 MBB != E; ++MBB) {
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125 I != E; ++I) {
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
128 inserted = true;
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
131 FunctionSize++;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
135 if (Slots == 0)
136 Slots = 1;
137 MIIndex += InstrSlots::NUM * Slots;
138 while (Slots--)
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
167 } else {
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
180 --I;
182 LI->end = getMBBEndIdx(I->second) + 1;
183 } else {
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189 else
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
196 VNInfo* vni = *VNI;
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
212 } else {
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
228 --I;
230 vni->kills[i] = getMBBEndIdx(I->second);
231 } else {
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
238 else
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249 mf_ = &fn;
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
258 computeNumbering();
259 computeIntervals();
261 numIntervals += getNumIntervals();
263 DEBUG(dump());
264 return true;
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
272 O << "\n";
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
304 continue;
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
307 if (!mop.isReg())
308 continue;
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
311 continue;
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
314 continue;
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
318 return true;
323 return false;
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
340 if (MI)
341 break;
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
347 continue;
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
350 if (!MO.isReg())
351 continue;
352 if (MO.isUse() && !CheckUse)
353 continue;
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
356 continue;
357 if (tri_->isSubRegister(Reg, PhysReg))
358 return true;
363 return false;
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
370 else
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
377 unsigned MOIdx,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
384 return;
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
397 VNInfo *ValNo;
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
403 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 CopyMI = mi;
405 // Earlyclobbers move back one.
406 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
408 assert(ValNo->id == 0 && "First value in interval is not 0?");
410 // Loop over all of the blocks that the vreg is defined in. There are
411 // two cases we have to handle here. The most common case is a vreg
412 // whose lifetime is contained within a basic block. In this case there
413 // will be a single kill, in MBB, which comes after the definition.
414 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
415 // FIXME: what about dead vars?
416 unsigned killIdx;
417 if (vi.Kills[0] != mi)
418 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 else
420 killIdx = defIndex+1;
422 // If the kill happens after the definition, we have an intra-block
423 // live range.
424 if (killIdx > defIndex) {
425 assert(vi.AliveBlocks.none() &&
426 "Shouldn't be alive across any blocks!");
427 LiveRange LR(defIndex, killIdx, ValNo);
428 interval.addRange(LR);
429 DOUT << " +" << LR << "\n";
430 interval.addKill(ValNo, killIdx);
431 return;
435 // The other case we handle is when a virtual register lives to the end
436 // of the defining block, potentially live across some blocks, then is
437 // live into some number of blocks, but gets killed. Start by adding a
438 // range that goes from this definition to the end of the defining block.
439 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
440 DOUT << " +" << NewLR;
441 interval.addRange(NewLR);
443 // Iterate over all of the blocks that the variable is completely
444 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 // live interval.
446 for (int i = vi.AliveBlocks.find_first(); i != -1;
447 i = vi.AliveBlocks.find_next(i)) {
448 LiveRange LR(getMBBStartIdx(i),
449 getMBBEndIdx(i)+1, // MBB ends at -1.
450 ValNo);
451 interval.addRange(LR);
452 DOUT << " +" << LR;
455 // Finally, this virtual register is live from the start of any killing
456 // block to the 'use' slot of the killing instruction.
457 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
458 MachineInstr *Kill = vi.Kills[i];
459 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
460 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 killIdx, ValNo);
462 interval.addRange(LR);
463 interval.addKill(ValNo, killIdx);
464 DOUT << " +" << LR;
467 } else {
468 // If this is the second time we see a virtual register definition, it
469 // must be due to phi elimination or two addr elimination. If this is
470 // the result of two address elimination, then the vreg is one of the
471 // def-and-use register operand.
472 if (mi->isRegTiedToUseOperand(MOIdx)) {
473 // If this is a two-address definition, then we have already processed
474 // the live range. The only problem is that we didn't realize there
475 // are actually two values in the live interval. Because of this we
476 // need to take the LiveRegion that defines this register and split it
477 // into two values.
478 assert(interval.containsOneValue());
479 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
480 unsigned RedefIndex = getDefIndex(MIIdx);
481 if (MO.isEarlyClobber())
482 RedefIndex = getUseIndex(MIIdx);
484 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
485 VNInfo *OldValNo = OldLR->valno;
487 // Delete the initial value, which should be short and continuous,
488 // because the 2-addr copy must be in the same MBB as the redef.
489 interval.removeRange(DefIndex, RedefIndex);
491 // Two-address vregs should always only be redefined once. This means
492 // that at this point, there should be exactly one value number in it.
493 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
495 // The new value number (#1) is defined by the instruction we claimed
496 // defined value #0.
497 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
498 VNInfoAllocator);
500 // Value#0 is now defined by the 2-addr instruction.
501 OldValNo->def = RedefIndex;
502 OldValNo->copy = 0;
503 if (MO.isEarlyClobber())
504 OldValNo->redefByEC = true;
506 // Add the new live interval which replaces the range for the input copy.
507 LiveRange LR(DefIndex, RedefIndex, ValNo);
508 DOUT << " replace range with " << LR;
509 interval.addRange(LR);
510 interval.addKill(ValNo, RedefIndex);
512 // If this redefinition is dead, we need to add a dummy unit live
513 // range covering the def slot.
514 if (MO.isDead())
515 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
517 DOUT << " RESULT: ";
518 interval.print(DOUT, tri_);
520 } else {
521 // Otherwise, this must be because of phi elimination. If this is the
522 // first redefinition of the vreg that we have seen, go back and change
523 // the live range in the PHI block to be a different value number.
524 if (interval.containsOneValue()) {
525 assert(vi.Kills.size() == 1 &&
526 "PHI elimination vreg should have one kill, the PHI itself!");
528 // Remove the old range that we now know has an incorrect number.
529 VNInfo *VNI = interval.getValNumInfo(0);
530 MachineInstr *Killer = vi.Kills[0];
531 unsigned Start = getMBBStartIdx(Killer->getParent());
532 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
533 DOUT << " Removing [" << Start << "," << End << "] from: ";
534 interval.print(DOUT, tri_); DOUT << "\n";
535 interval.removeRange(Start, End);
536 VNI->hasPHIKill = true;
537 DOUT << " RESULT: "; interval.print(DOUT, tri_);
539 // Replace the interval with one of a NEW value number. Note that this
540 // value number isn't actually defined by an instruction, weird huh? :)
541 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
542 DOUT << " replace range with " << LR;
543 interval.addRange(LR);
544 interval.addKill(LR.valno, End);
545 DOUT << " RESULT: "; interval.print(DOUT, tri_);
548 // In the case of PHI elimination, each variable definition is only
549 // live until the end of the block. We've already taken care of the
550 // rest of the live range.
551 unsigned defIndex = getDefIndex(MIIdx);
552 if (MO.isEarlyClobber())
553 defIndex = getUseIndex(MIIdx);
555 VNInfo *ValNo;
556 MachineInstr *CopyMI = NULL;
557 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
558 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
559 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
560 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
561 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
562 CopyMI = mi;
563 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
565 unsigned killIndex = getMBBEndIdx(mbb) + 1;
566 LiveRange LR(defIndex, killIndex, ValNo);
567 interval.addRange(LR);
568 interval.addKill(ValNo, killIndex);
569 ValNo->hasPHIKill = true;
570 DOUT << " +" << LR;
574 DOUT << '\n';
577 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
578 MachineBasicBlock::iterator mi,
579 unsigned MIIdx,
580 MachineOperand& MO,
581 LiveInterval &interval,
582 MachineInstr *CopyMI) {
583 // A physical register cannot be live across basic block, so its
584 // lifetime must end somewhere in its defining basic block.
585 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
587 unsigned baseIndex = MIIdx;
588 unsigned start = getDefIndex(baseIndex);
589 // Earlyclobbers move back one.
590 if (MO.isEarlyClobber())
591 start = getUseIndex(MIIdx);
592 unsigned end = start;
594 // If it is not used after definition, it is considered dead at
595 // the instruction defining it. Hence its interval is:
596 // [defSlot(def), defSlot(def)+1)
597 if (MO.isDead()) {
598 DOUT << " dead";
599 end = start + 1;
600 goto exit;
603 // If it is not dead on definition, it must be killed by a
604 // subsequent instruction. Hence its interval is:
605 // [defSlot(def), useSlot(kill)+1)
606 baseIndex += InstrSlots::NUM;
607 while (++mi != MBB->end()) {
608 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
609 getInstructionFromIndex(baseIndex) == 0)
610 baseIndex += InstrSlots::NUM;
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DOUT << " killed";
613 end = getUseIndex(baseIndex) + 1;
614 goto exit;
615 } else {
616 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
617 if (DefIdx != -1) {
618 if (mi->isRegTiedToUseOperand(DefIdx)) {
619 // Two-address instruction.
620 end = getDefIndex(baseIndex);
621 if (mi->getOperand(DefIdx).isEarlyClobber())
622 end = getUseIndex(baseIndex);
623 } else {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DOUT << " dead";
629 end = start + 1;
631 goto exit;
635 baseIndex += InstrSlots::NUM;
638 // The only case we should have a dead physreg here without a killing or
639 // instruction where we know it's dead is if it is live-in to the function
640 // and never used. Another possible case is the implicit use of the
641 // physical register has been deleted by two-address pass.
642 end = start + 1;
644 exit:
645 assert(start < end && "did not find end of interval?");
647 // Already exists? Extend old live interval.
648 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
649 bool Extend = OldLR != interval.end();
650 VNInfo *ValNo = Extend
651 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
652 if (MO.isEarlyClobber() && Extend)
653 ValNo->redefByEC = true;
654 LiveRange LR(start, end, ValNo);
655 interval.addRange(LR);
656 interval.addKill(LR.valno, end);
657 DOUT << " +" << LR << '\n';
660 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
661 MachineBasicBlock::iterator MI,
662 unsigned MIIdx,
663 MachineOperand& MO,
664 unsigned MOIdx) {
665 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
666 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
667 getOrCreateInterval(MO.getReg()));
668 else if (allocatableRegs_[MO.getReg()]) {
669 MachineInstr *CopyMI = NULL;
670 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
671 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
672 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
673 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
674 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
675 CopyMI = MI;
676 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
677 getOrCreateInterval(MO.getReg()), CopyMI);
678 // Def of a register also defines its sub-registers.
679 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
680 // If MI also modifies the sub-register explicitly, avoid processing it
681 // more than once. Do not pass in TRI here so it checks for exact match.
682 if (!MI->modifiesRegister(*AS))
683 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
684 getOrCreateInterval(*AS), 0);
688 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
689 unsigned MIIdx,
690 LiveInterval &interval, bool isAlias) {
691 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
693 // Look for kills, if it reaches a def before it's killed, then it shouldn't
694 // be considered a livein.
695 MachineBasicBlock::iterator mi = MBB->begin();
696 unsigned baseIndex = MIIdx;
697 unsigned start = baseIndex;
698 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
699 getInstructionFromIndex(baseIndex) == 0)
700 baseIndex += InstrSlots::NUM;
701 unsigned end = baseIndex;
702 bool SeenDefUse = false;
704 while (mi != MBB->end()) {
705 if (mi->killsRegister(interval.reg, tri_)) {
706 DOUT << " killed";
707 end = getUseIndex(baseIndex) + 1;
708 SeenDefUse = true;
709 goto exit;
710 } else if (mi->modifiesRegister(interval.reg, tri_)) {
711 // Another instruction redefines the register before it is ever read.
712 // Then the register is essentially dead at the instruction that defines
713 // it. Hence its interval is:
714 // [defSlot(def), defSlot(def)+1)
715 DOUT << " dead";
716 end = getDefIndex(start) + 1;
717 SeenDefUse = true;
718 goto exit;
721 baseIndex += InstrSlots::NUM;
722 ++mi;
723 if (mi != MBB->end()) {
724 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
725 getInstructionFromIndex(baseIndex) == 0)
726 baseIndex += InstrSlots::NUM;
730 exit:
731 // Live-in register might not be used at all.
732 if (!SeenDefUse) {
733 if (isAlias) {
734 DOUT << " dead";
735 end = getDefIndex(MIIdx) + 1;
736 } else {
737 DOUT << " live through";
738 end = baseIndex;
742 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
743 interval.addRange(LR);
744 interval.addKill(LR.valno, end);
745 DOUT << " +" << LR << '\n';
748 /// computeIntervals - computes the live intervals for virtual
749 /// registers. for some ordering of the machine instructions [1,N] a
750 /// live interval is an interval [i, j) where 1 <= i <= j < N for
751 /// which a variable is live
752 void LiveIntervals::computeIntervals() {
754 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
755 << "********** Function: "
756 << ((Value*)mf_->getFunction())->getName() << '\n';
758 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
759 MBBI != E; ++MBBI) {
760 MachineBasicBlock *MBB = MBBI;
761 // Track the index of the current machine instr.
762 unsigned MIIndex = getMBBStartIdx(MBB);
763 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
765 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
767 // Create intervals for live-ins to this BB first.
768 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
769 LE = MBB->livein_end(); LI != LE; ++LI) {
770 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
771 // Multiple live-ins can alias the same register.
772 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
773 if (!hasInterval(*AS))
774 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
775 true);
778 // Skip over empty initial indices.
779 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
780 getInstructionFromIndex(MIIndex) == 0)
781 MIIndex += InstrSlots::NUM;
783 for (; MI != miEnd; ++MI) {
784 DOUT << MIIndex << "\t" << *MI;
786 // Handle defs.
787 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
788 MachineOperand &MO = MI->getOperand(i);
789 // handle register defs - build intervals
790 if (MO.isReg() && MO.getReg() && MO.isDef()) {
791 handleRegisterDef(MBB, MI, MIIndex, MO, i);
795 // Skip over the empty slots after each instruction.
796 unsigned Slots = MI->getDesc().getNumDefs();
797 if (Slots == 0)
798 Slots = 1;
799 MIIndex += InstrSlots::NUM * Slots;
801 // Skip over empty indices.
802 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
803 getInstructionFromIndex(MIIndex) == 0)
804 MIIndex += InstrSlots::NUM;
809 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
810 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
811 std::vector<IdxMBBPair>::const_iterator I =
812 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
814 bool ResVal = false;
815 while (I != Idx2MBBMap.end()) {
816 if (I->first >= End)
817 break;
818 MBBs.push_back(I->second);
819 ResVal = true;
820 ++I;
822 return ResVal;
825 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
826 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
827 std::vector<IdxMBBPair>::const_iterator I =
828 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
830 bool ResVal = false;
831 while (I != Idx2MBBMap.end()) {
832 if (I->first > End)
833 break;
834 MachineBasicBlock *MBB = I->second;
835 if (getMBBEndIdx(MBB) > End)
836 break;
837 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
838 SE = MBB->succ_end(); SI != SE; ++SI)
839 MBBs.push_back(*SI);
840 ResVal = true;
841 ++I;
843 return ResVal;
846 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
847 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
848 return new LiveInterval(reg, Weight);
851 /// dupInterval - Duplicate a live interval. The caller is responsible for
852 /// managing the allocated memory.
853 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
854 LiveInterval *NewLI = createInterval(li->reg);
855 NewLI->Copy(*li, getVNInfoAllocator());
856 return NewLI;
859 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
860 /// copy field and returns the source register that defines it.
861 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
862 if (!VNI->copy)
863 return 0;
865 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
866 // If it's extracting out of a physical register, return the sub-register.
867 unsigned Reg = VNI->copy->getOperand(1).getReg();
868 if (TargetRegisterInfo::isPhysicalRegister(Reg))
869 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
870 return Reg;
871 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
872 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
873 return VNI->copy->getOperand(2).getReg();
875 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
876 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
877 return SrcReg;
878 assert(0 && "Unrecognized copy instruction!");
879 return 0;
882 //===----------------------------------------------------------------------===//
883 // Register allocator hooks.
886 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
887 /// allow one) virtual register operand, then its uses are implicitly using
888 /// the register. Returns the virtual register.
889 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
890 MachineInstr *MI) const {
891 unsigned RegOp = 0;
892 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
893 MachineOperand &MO = MI->getOperand(i);
894 if (!MO.isReg() || !MO.isUse())
895 continue;
896 unsigned Reg = MO.getReg();
897 if (Reg == 0 || Reg == li.reg)
898 continue;
899 // FIXME: For now, only remat MI with at most one register operand.
900 assert(!RegOp &&
901 "Can't rematerialize instruction with multiple register operand!");
902 RegOp = MO.getReg();
903 #ifndef NDEBUG
904 break;
905 #endif
907 return RegOp;
910 /// isValNoAvailableAt - Return true if the val# of the specified interval
911 /// which reaches the given instruction also reaches the specified use index.
912 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
913 unsigned UseIdx) const {
914 unsigned Index = getInstructionIndex(MI);
915 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
916 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
917 return UI != li.end() && UI->valno == ValNo;
920 /// isReMaterializable - Returns true if the definition MI of the specified
921 /// val# of the specified interval is re-materializable.
922 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
923 const VNInfo *ValNo, MachineInstr *MI,
924 SmallVectorImpl<LiveInterval*> &SpillIs,
925 bool &isLoad) {
926 if (DisableReMat)
927 return false;
929 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
930 return true;
932 int FrameIdx = 0;
933 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
934 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
935 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
936 // this but remember this is not safe to fold into a two-address
937 // instruction.
938 // This is a load from fixed stack slot. It can be rematerialized.
939 return true;
941 // If the target-specific rules don't identify an instruction as
942 // being trivially rematerializable, use some target-independent
943 // rules.
944 if (!MI->getDesc().isRematerializable() ||
945 !tii_->isTriviallyReMaterializable(MI)) {
946 if (!EnableAggressiveRemat)
947 return false;
949 // If the instruction accesses memory but the memoperands have been lost,
950 // we can't analyze it.
951 const TargetInstrDesc &TID = MI->getDesc();
952 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
953 return false;
955 // Avoid instructions obviously unsafe for remat.
956 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
957 return false;
959 // If the instruction accesses memory and the memory could be non-constant,
960 // assume the instruction is not rematerializable.
961 for (std::list<MachineMemOperand>::const_iterator
962 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
963 const MachineMemOperand &MMO = *I;
964 if (MMO.isVolatile() || MMO.isStore())
965 return false;
966 const Value *V = MMO.getValue();
967 if (!V)
968 return false;
969 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
970 if (!PSV->isConstant(mf_->getFrameInfo()))
971 return false;
972 } else if (!aa_->pointsToConstantMemory(V))
973 return false;
976 // If any of the registers accessed are non-constant, conservatively assume
977 // the instruction is not rematerializable.
978 unsigned ImpUse = 0;
979 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
980 const MachineOperand &MO = MI->getOperand(i);
981 if (MO.isReg()) {
982 unsigned Reg = MO.getReg();
983 if (Reg == 0)
984 continue;
985 if (TargetRegisterInfo::isPhysicalRegister(Reg))
986 return false;
988 // Only allow one def, and that in the first operand.
989 if (MO.isDef() != (i == 0))
990 return false;
992 // Only allow constant-valued registers.
993 bool IsLiveIn = mri_->isLiveIn(Reg);
994 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
995 E = mri_->def_end();
997 // For the def, it should be the only def of that register.
998 if (MO.isDef() && (next(I) != E || IsLiveIn))
999 return false;
1001 if (MO.isUse()) {
1002 // Only allow one use other register use, as that's all the
1003 // remat mechanisms support currently.
1004 if (Reg != li.reg) {
1005 if (ImpUse == 0)
1006 ImpUse = Reg;
1007 else if (Reg != ImpUse)
1008 return false;
1010 // For the use, there should be only one associated def.
1011 if (I != E && (next(I) != E || IsLiveIn))
1012 return false;
1018 unsigned ImpUse = getReMatImplicitUse(li, MI);
1019 if (ImpUse) {
1020 const LiveInterval &ImpLi = getInterval(ImpUse);
1021 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1022 re = mri_->use_end(); ri != re; ++ri) {
1023 MachineInstr *UseMI = &*ri;
1024 unsigned UseIdx = getInstructionIndex(UseMI);
1025 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1026 continue;
1027 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1028 return false;
1031 // If a register operand of the re-materialized instruction is going to
1032 // be spilled next, then it's not legal to re-materialize this instruction.
1033 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1034 if (ImpUse == SpillIs[i]->reg)
1035 return false;
1037 return true;
1040 /// isReMaterializable - Returns true if the definition MI of the specified
1041 /// val# of the specified interval is re-materializable.
1042 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1043 const VNInfo *ValNo, MachineInstr *MI) {
1044 SmallVector<LiveInterval*, 4> Dummy1;
1045 bool Dummy2;
1046 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1049 /// isReMaterializable - Returns true if every definition of MI of every
1050 /// val# of the specified interval is re-materializable.
1051 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1052 SmallVectorImpl<LiveInterval*> &SpillIs,
1053 bool &isLoad) {
1054 isLoad = false;
1055 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1056 i != e; ++i) {
1057 const VNInfo *VNI = *i;
1058 unsigned DefIdx = VNI->def;
1059 if (DefIdx == ~1U)
1060 continue; // Dead val#.
1061 // Is the def for the val# rematerializable?
1062 if (DefIdx == ~0u)
1063 return false;
1064 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1065 bool DefIsLoad = false;
1066 if (!ReMatDefMI ||
1067 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1068 return false;
1069 isLoad |= DefIsLoad;
1071 return true;
1074 /// FilterFoldedOps - Filter out two-address use operands. Return
1075 /// true if it finds any issue with the operands that ought to prevent
1076 /// folding.
1077 static bool FilterFoldedOps(MachineInstr *MI,
1078 SmallVector<unsigned, 2> &Ops,
1079 unsigned &MRInfo,
1080 SmallVector<unsigned, 2> &FoldOps) {
1081 MRInfo = 0;
1082 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1083 unsigned OpIdx = Ops[i];
1084 MachineOperand &MO = MI->getOperand(OpIdx);
1085 // FIXME: fold subreg use.
1086 if (MO.getSubReg())
1087 return true;
1088 if (MO.isDef())
1089 MRInfo |= (unsigned)VirtRegMap::isMod;
1090 else {
1091 // Filter out two-address use operand(s).
1092 if (MI->isRegTiedToDefOperand(OpIdx)) {
1093 MRInfo = VirtRegMap::isModRef;
1094 continue;
1096 MRInfo |= (unsigned)VirtRegMap::isRef;
1098 FoldOps.push_back(OpIdx);
1100 return false;
1104 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1105 /// slot / to reg or any rematerialized load into ith operand of specified
1106 /// MI. If it is successul, MI is updated with the newly created MI and
1107 /// returns true.
1108 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1109 VirtRegMap &vrm, MachineInstr *DefMI,
1110 unsigned InstrIdx,
1111 SmallVector<unsigned, 2> &Ops,
1112 bool isSS, int Slot, unsigned Reg) {
1113 // If it is an implicit def instruction, just delete it.
1114 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1115 RemoveMachineInstrFromMaps(MI);
1116 vrm.RemoveMachineInstrFromMaps(MI);
1117 MI->eraseFromParent();
1118 ++numFolds;
1119 return true;
1122 // Filter the list of operand indexes that are to be folded. Abort if
1123 // any operand will prevent folding.
1124 unsigned MRInfo = 0;
1125 SmallVector<unsigned, 2> FoldOps;
1126 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1127 return false;
1129 // The only time it's safe to fold into a two address instruction is when
1130 // it's folding reload and spill from / into a spill stack slot.
1131 if (DefMI && (MRInfo & VirtRegMap::isMod))
1132 return false;
1134 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1135 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1136 if (fmi) {
1137 // Remember this instruction uses the spill slot.
1138 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1140 // Attempt to fold the memory reference into the instruction. If
1141 // we can do this, we don't need to insert spill code.
1142 MachineBasicBlock &MBB = *MI->getParent();
1143 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1144 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1145 vrm.transferSpillPts(MI, fmi);
1146 vrm.transferRestorePts(MI, fmi);
1147 vrm.transferEmergencySpills(MI, fmi);
1148 mi2iMap_.erase(MI);
1149 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1150 mi2iMap_[fmi] = InstrIdx;
1151 MI = MBB.insert(MBB.erase(MI), fmi);
1152 ++numFolds;
1153 return true;
1155 return false;
1158 /// canFoldMemoryOperand - Returns true if the specified load / store
1159 /// folding is possible.
1160 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1161 SmallVector<unsigned, 2> &Ops,
1162 bool ReMat) const {
1163 // Filter the list of operand indexes that are to be folded. Abort if
1164 // any operand will prevent folding.
1165 unsigned MRInfo = 0;
1166 SmallVector<unsigned, 2> FoldOps;
1167 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1168 return false;
1170 // It's only legal to remat for a use, not a def.
1171 if (ReMat && (MRInfo & VirtRegMap::isMod))
1172 return false;
1174 return tii_->canFoldMemoryOperand(MI, FoldOps);
1177 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1178 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1179 for (LiveInterval::Ranges::const_iterator
1180 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1181 std::vector<IdxMBBPair>::const_iterator II =
1182 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1183 if (II == Idx2MBBMap.end())
1184 continue;
1185 if (I->end > II->first) // crossing a MBB.
1186 return false;
1187 MBBs.insert(II->second);
1188 if (MBBs.size() > 1)
1189 return false;
1191 return true;
1194 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1195 /// interval on to-be re-materialized operands of MI) with new register.
1196 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1197 MachineInstr *MI, unsigned NewVReg,
1198 VirtRegMap &vrm) {
1199 // There is an implicit use. That means one of the other operand is
1200 // being remat'ed and the remat'ed instruction has li.reg as an
1201 // use operand. Make sure we rewrite that as well.
1202 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1203 MachineOperand &MO = MI->getOperand(i);
1204 if (!MO.isReg())
1205 continue;
1206 unsigned Reg = MO.getReg();
1207 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1208 continue;
1209 if (!vrm.isReMaterialized(Reg))
1210 continue;
1211 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1212 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1213 if (UseMO)
1214 UseMO->setReg(NewVReg);
1218 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1219 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1220 bool LiveIntervals::
1221 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1222 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1223 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1224 unsigned Slot, int LdSlot,
1225 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1226 VirtRegMap &vrm,
1227 const TargetRegisterClass* rc,
1228 SmallVector<int, 4> &ReMatIds,
1229 const MachineLoopInfo *loopInfo,
1230 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1231 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1232 std::vector<LiveInterval*> &NewLIs) {
1233 bool CanFold = false;
1234 RestartInstruction:
1235 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1236 MachineOperand& mop = MI->getOperand(i);
1237 if (!mop.isReg())
1238 continue;
1239 unsigned Reg = mop.getReg();
1240 unsigned RegI = Reg;
1241 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1242 continue;
1243 if (Reg != li.reg)
1244 continue;
1246 bool TryFold = !DefIsReMat;
1247 bool FoldSS = true; // Default behavior unless it's a remat.
1248 int FoldSlot = Slot;
1249 if (DefIsReMat) {
1250 // If this is the rematerializable definition MI itself and
1251 // all of its uses are rematerialized, simply delete it.
1252 if (MI == ReMatOrigDefMI && CanDelete) {
1253 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1254 DOUT << MI << '\n';
1255 RemoveMachineInstrFromMaps(MI);
1256 vrm.RemoveMachineInstrFromMaps(MI);
1257 MI->eraseFromParent();
1258 break;
1261 // If def for this use can't be rematerialized, then try folding.
1262 // If def is rematerializable and it's a load, also try folding.
1263 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1264 if (isLoad) {
1265 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1266 FoldSS = isLoadSS;
1267 FoldSlot = LdSlot;
1271 // Scan all of the operands of this instruction rewriting operands
1272 // to use NewVReg instead of li.reg as appropriate. We do this for
1273 // two reasons:
1275 // 1. If the instr reads the same spilled vreg multiple times, we
1276 // want to reuse the NewVReg.
1277 // 2. If the instr is a two-addr instruction, we are required to
1278 // keep the src/dst regs pinned.
1280 // Keep track of whether we replace a use and/or def so that we can
1281 // create the spill interval with the appropriate range.
1283 HasUse = mop.isUse();
1284 HasDef = mop.isDef();
1285 SmallVector<unsigned, 2> Ops;
1286 Ops.push_back(i);
1287 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1288 const MachineOperand &MOj = MI->getOperand(j);
1289 if (!MOj.isReg())
1290 continue;
1291 unsigned RegJ = MOj.getReg();
1292 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1293 continue;
1294 if (RegJ == RegI) {
1295 Ops.push_back(j);
1296 HasUse |= MOj.isUse();
1297 HasDef |= MOj.isDef();
1301 if (HasUse && !li.liveAt(getUseIndex(index)))
1302 // Must be defined by an implicit def. It should not be spilled. Note,
1303 // this is for correctness reason. e.g.
1304 // 8 %reg1024<def> = IMPLICIT_DEF
1305 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1306 // The live range [12, 14) are not part of the r1024 live interval since
1307 // it's defined by an implicit def. It will not conflicts with live
1308 // interval of r1025. Now suppose both registers are spilled, you can
1309 // easily see a situation where both registers are reloaded before
1310 // the INSERT_SUBREG and both target registers that would overlap.
1311 HasUse = false;
1313 // Create a new virtual register for the spill interval.
1314 // Create the new register now so we can map the fold instruction
1315 // to the new register so when it is unfolded we get the correct
1316 // answer.
1317 bool CreatedNewVReg = false;
1318 if (NewVReg == 0) {
1319 NewVReg = mri_->createVirtualRegister(rc);
1320 vrm.grow();
1321 CreatedNewVReg = true;
1324 if (!TryFold)
1325 CanFold = false;
1326 else {
1327 // Do not fold load / store here if we are splitting. We'll find an
1328 // optimal point to insert a load / store later.
1329 if (!TrySplit) {
1330 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1331 Ops, FoldSS, FoldSlot, NewVReg)) {
1332 // Folding the load/store can completely change the instruction in
1333 // unpredictable ways, rescan it from the beginning.
1335 if (FoldSS) {
1336 // We need to give the new vreg the same stack slot as the
1337 // spilled interval.
1338 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1341 HasUse = false;
1342 HasDef = false;
1343 CanFold = false;
1344 if (isNotInMIMap(MI))
1345 break;
1346 goto RestartInstruction;
1348 } else {
1349 // We'll try to fold it later if it's profitable.
1350 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1354 mop.setReg(NewVReg);
1355 if (mop.isImplicit())
1356 rewriteImplicitOps(li, MI, NewVReg, vrm);
1358 // Reuse NewVReg for other reads.
1359 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1360 MachineOperand &mopj = MI->getOperand(Ops[j]);
1361 mopj.setReg(NewVReg);
1362 if (mopj.isImplicit())
1363 rewriteImplicitOps(li, MI, NewVReg, vrm);
1366 if (CreatedNewVReg) {
1367 if (DefIsReMat) {
1368 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1369 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1370 // Each valnum may have its own remat id.
1371 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1372 } else {
1373 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1375 if (!CanDelete || (HasUse && HasDef)) {
1376 // If this is a two-addr instruction then its use operands are
1377 // rematerializable but its def is not. It should be assigned a
1378 // stack slot.
1379 vrm.assignVirt2StackSlot(NewVReg, Slot);
1381 } else {
1382 vrm.assignVirt2StackSlot(NewVReg, Slot);
1384 } else if (HasUse && HasDef &&
1385 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1386 // If this interval hasn't been assigned a stack slot (because earlier
1387 // def is a deleted remat def), do it now.
1388 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1389 vrm.assignVirt2StackSlot(NewVReg, Slot);
1392 // Re-matting an instruction with virtual register use. Add the
1393 // register as an implicit use on the use MI.
1394 if (DefIsReMat && ImpUse)
1395 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1397 // Create a new register interval for this spill / remat.
1398 LiveInterval &nI = getOrCreateInterval(NewVReg);
1399 if (CreatedNewVReg) {
1400 NewLIs.push_back(&nI);
1401 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1402 if (TrySplit)
1403 vrm.setIsSplitFromReg(NewVReg, li.reg);
1406 if (HasUse) {
1407 if (CreatedNewVReg) {
1408 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1409 nI.getNextValue(~0U, 0, VNInfoAllocator));
1410 DOUT << " +" << LR;
1411 nI.addRange(LR);
1412 } else {
1413 // Extend the split live interval to this def / use.
1414 unsigned End = getUseIndex(index)+1;
1415 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1416 nI.getValNumInfo(nI.getNumValNums()-1));
1417 DOUT << " +" << LR;
1418 nI.addRange(LR);
1421 if (HasDef) {
1422 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1423 nI.getNextValue(~0U, 0, VNInfoAllocator));
1424 DOUT << " +" << LR;
1425 nI.addRange(LR);
1428 DOUT << "\t\t\t\tAdded new interval: ";
1429 nI.print(DOUT, tri_);
1430 DOUT << '\n';
1432 return CanFold;
1434 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1435 const VNInfo *VNI,
1436 MachineBasicBlock *MBB, unsigned Idx) const {
1437 unsigned End = getMBBEndIdx(MBB);
1438 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1439 unsigned KillIdx = VNI->kills[j];
1440 if (KillIdx > Idx && KillIdx < End)
1441 return true;
1443 return false;
1446 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1447 /// during spilling.
1448 namespace {
1449 struct RewriteInfo {
1450 unsigned Index;
1451 MachineInstr *MI;
1452 bool HasUse;
1453 bool HasDef;
1454 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1455 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1458 struct RewriteInfoCompare {
1459 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1460 return LHS.Index < RHS.Index;
1465 void LiveIntervals::
1466 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1467 LiveInterval::Ranges::const_iterator &I,
1468 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1469 unsigned Slot, int LdSlot,
1470 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1471 VirtRegMap &vrm,
1472 const TargetRegisterClass* rc,
1473 SmallVector<int, 4> &ReMatIds,
1474 const MachineLoopInfo *loopInfo,
1475 BitVector &SpillMBBs,
1476 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1477 BitVector &RestoreMBBs,
1478 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1479 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1480 std::vector<LiveInterval*> &NewLIs) {
1481 bool AllCanFold = true;
1482 unsigned NewVReg = 0;
1483 unsigned start = getBaseIndex(I->start);
1484 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1486 // First collect all the def / use in this live range that will be rewritten.
1487 // Make sure they are sorted according to instruction index.
1488 std::vector<RewriteInfo> RewriteMIs;
1489 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1490 re = mri_->reg_end(); ri != re; ) {
1491 MachineInstr *MI = &*ri;
1492 MachineOperand &O = ri.getOperand();
1493 ++ri;
1494 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1495 unsigned index = getInstructionIndex(MI);
1496 if (index < start || index >= end)
1497 continue;
1498 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1499 // Must be defined by an implicit def. It should not be spilled. Note,
1500 // this is for correctness reason. e.g.
1501 // 8 %reg1024<def> = IMPLICIT_DEF
1502 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1503 // The live range [12, 14) are not part of the r1024 live interval since
1504 // it's defined by an implicit def. It will not conflicts with live
1505 // interval of r1025. Now suppose both registers are spilled, you can
1506 // easily see a situation where both registers are reloaded before
1507 // the INSERT_SUBREG and both target registers that would overlap.
1508 continue;
1509 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1511 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1513 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1514 // Now rewrite the defs and uses.
1515 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1516 RewriteInfo &rwi = RewriteMIs[i];
1517 ++i;
1518 unsigned index = rwi.Index;
1519 bool MIHasUse = rwi.HasUse;
1520 bool MIHasDef = rwi.HasDef;
1521 MachineInstr *MI = rwi.MI;
1522 // If MI def and/or use the same register multiple times, then there
1523 // are multiple entries.
1524 unsigned NumUses = MIHasUse;
1525 while (i != e && RewriteMIs[i].MI == MI) {
1526 assert(RewriteMIs[i].Index == index);
1527 bool isUse = RewriteMIs[i].HasUse;
1528 if (isUse) ++NumUses;
1529 MIHasUse |= isUse;
1530 MIHasDef |= RewriteMIs[i].HasDef;
1531 ++i;
1533 MachineBasicBlock *MBB = MI->getParent();
1535 if (ImpUse && MI != ReMatDefMI) {
1536 // Re-matting an instruction with virtual register use. Update the
1537 // register interval's spill weight to HUGE_VALF to prevent it from
1538 // being spilled.
1539 LiveInterval &ImpLi = getInterval(ImpUse);
1540 ImpLi.weight = HUGE_VALF;
1543 unsigned MBBId = MBB->getNumber();
1544 unsigned ThisVReg = 0;
1545 if (TrySplit) {
1546 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1547 if (NVI != MBBVRegsMap.end()) {
1548 ThisVReg = NVI->second;
1549 // One common case:
1550 // x = use
1551 // ...
1552 // ...
1553 // def = ...
1554 // = use
1555 // It's better to start a new interval to avoid artifically
1556 // extend the new interval.
1557 if (MIHasDef && !MIHasUse) {
1558 MBBVRegsMap.erase(MBB->getNumber());
1559 ThisVReg = 0;
1564 bool IsNew = ThisVReg == 0;
1565 if (IsNew) {
1566 // This ends the previous live interval. If all of its def / use
1567 // can be folded, give it a low spill weight.
1568 if (NewVReg && TrySplit && AllCanFold) {
1569 LiveInterval &nI = getOrCreateInterval(NewVReg);
1570 nI.weight /= 10.0F;
1572 AllCanFold = true;
1574 NewVReg = ThisVReg;
1576 bool HasDef = false;
1577 bool HasUse = false;
1578 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1579 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1580 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1581 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1582 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1583 if (!HasDef && !HasUse)
1584 continue;
1586 AllCanFold &= CanFold;
1588 // Update weight of spill interval.
1589 LiveInterval &nI = getOrCreateInterval(NewVReg);
1590 if (!TrySplit) {
1591 // The spill weight is now infinity as it cannot be spilled again.
1592 nI.weight = HUGE_VALF;
1593 continue;
1596 // Keep track of the last def and first use in each MBB.
1597 if (HasDef) {
1598 if (MI != ReMatOrigDefMI || !CanDelete) {
1599 bool HasKill = false;
1600 if (!HasUse)
1601 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1602 else {
1603 // If this is a two-address code, then this index starts a new VNInfo.
1604 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1605 if (VNI)
1606 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1608 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1609 SpillIdxes.find(MBBId);
1610 if (!HasKill) {
1611 if (SII == SpillIdxes.end()) {
1612 std::vector<SRInfo> S;
1613 S.push_back(SRInfo(index, NewVReg, true));
1614 SpillIdxes.insert(std::make_pair(MBBId, S));
1615 } else if (SII->second.back().vreg != NewVReg) {
1616 SII->second.push_back(SRInfo(index, NewVReg, true));
1617 } else if ((int)index > SII->second.back().index) {
1618 // If there is an earlier def and this is a two-address
1619 // instruction, then it's not possible to fold the store (which
1620 // would also fold the load).
1621 SRInfo &Info = SII->second.back();
1622 Info.index = index;
1623 Info.canFold = !HasUse;
1625 SpillMBBs.set(MBBId);
1626 } else if (SII != SpillIdxes.end() &&
1627 SII->second.back().vreg == NewVReg &&
1628 (int)index > SII->second.back().index) {
1629 // There is an earlier def that's not killed (must be two-address).
1630 // The spill is no longer needed.
1631 SII->second.pop_back();
1632 if (SII->second.empty()) {
1633 SpillIdxes.erase(MBBId);
1634 SpillMBBs.reset(MBBId);
1640 if (HasUse) {
1641 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1642 SpillIdxes.find(MBBId);
1643 if (SII != SpillIdxes.end() &&
1644 SII->second.back().vreg == NewVReg &&
1645 (int)index > SII->second.back().index)
1646 // Use(s) following the last def, it's not safe to fold the spill.
1647 SII->second.back().canFold = false;
1648 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1649 RestoreIdxes.find(MBBId);
1650 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1651 // If we are splitting live intervals, only fold if it's the first
1652 // use and there isn't another use later in the MBB.
1653 RII->second.back().canFold = false;
1654 else if (IsNew) {
1655 // Only need a reload if there isn't an earlier def / use.
1656 if (RII == RestoreIdxes.end()) {
1657 std::vector<SRInfo> Infos;
1658 Infos.push_back(SRInfo(index, NewVReg, true));
1659 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1660 } else {
1661 RII->second.push_back(SRInfo(index, NewVReg, true));
1663 RestoreMBBs.set(MBBId);
1667 // Update spill weight.
1668 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1669 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1672 if (NewVReg && TrySplit && AllCanFold) {
1673 // If all of its def / use can be folded, give it a low spill weight.
1674 LiveInterval &nI = getOrCreateInterval(NewVReg);
1675 nI.weight /= 10.0F;
1679 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1680 BitVector &RestoreMBBs,
1681 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1682 if (!RestoreMBBs[Id])
1683 return false;
1684 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1685 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1686 if (Restores[i].index == index &&
1687 Restores[i].vreg == vr &&
1688 Restores[i].canFold)
1689 return true;
1690 return false;
1693 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1694 BitVector &RestoreMBBs,
1695 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1696 if (!RestoreMBBs[Id])
1697 return;
1698 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1699 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1700 if (Restores[i].index == index && Restores[i].vreg)
1701 Restores[i].index = -1;
1704 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1705 /// spilled and create empty intervals for their uses.
1706 void
1707 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1708 const TargetRegisterClass* rc,
1709 std::vector<LiveInterval*> &NewLIs) {
1710 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1711 re = mri_->reg_end(); ri != re; ) {
1712 MachineOperand &O = ri.getOperand();
1713 MachineInstr *MI = &*ri;
1714 ++ri;
1715 if (O.isDef()) {
1716 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1717 "Register def was not rewritten?");
1718 RemoveMachineInstrFromMaps(MI);
1719 vrm.RemoveMachineInstrFromMaps(MI);
1720 MI->eraseFromParent();
1721 } else {
1722 // This must be an use of an implicit_def so it's not part of the live
1723 // interval. Create a new empty live interval for it.
1724 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1725 unsigned NewVReg = mri_->createVirtualRegister(rc);
1726 vrm.grow();
1727 vrm.setIsImplicitlyDefined(NewVReg);
1728 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1729 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1730 MachineOperand &MO = MI->getOperand(i);
1731 if (MO.isReg() && MO.getReg() == li.reg)
1732 MO.setReg(NewVReg);
1738 std::vector<LiveInterval*> LiveIntervals::
1739 addIntervalsForSpillsFast(const LiveInterval &li,
1740 const MachineLoopInfo *loopInfo,
1741 VirtRegMap &vrm) {
1742 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1744 std::vector<LiveInterval*> added;
1746 assert(li.weight != HUGE_VALF &&
1747 "attempt to spill already spilled interval!");
1749 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1750 DEBUG(li.dump());
1751 DOUT << '\n';
1753 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1755 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1756 while (RI != mri_->reg_end()) {
1757 MachineInstr* MI = &*RI;
1759 SmallVector<unsigned, 2> Indices;
1760 bool HasUse = false;
1761 bool HasDef = false;
1763 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1764 MachineOperand& mop = MI->getOperand(i);
1765 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1767 HasUse |= MI->getOperand(i).isUse();
1768 HasDef |= MI->getOperand(i).isDef();
1770 Indices.push_back(i);
1773 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1774 Indices, true, slot, li.reg)) {
1775 unsigned NewVReg = mri_->createVirtualRegister(rc);
1776 vrm.grow();
1777 vrm.assignVirt2StackSlot(NewVReg, slot);
1779 // create a new register for this spill
1780 LiveInterval &nI = getOrCreateInterval(NewVReg);
1782 // the spill weight is now infinity as it
1783 // cannot be spilled again
1784 nI.weight = HUGE_VALF;
1786 // Rewrite register operands to use the new vreg.
1787 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1788 E = Indices.end(); I != E; ++I) {
1789 MI->getOperand(*I).setReg(NewVReg);
1791 if (MI->getOperand(*I).isUse())
1792 MI->getOperand(*I).setIsKill(true);
1795 // Fill in the new live interval.
1796 unsigned index = getInstructionIndex(MI);
1797 if (HasUse) {
1798 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1799 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1800 DOUT << " +" << LR;
1801 nI.addRange(LR);
1802 vrm.addRestorePoint(NewVReg, MI);
1804 if (HasDef) {
1805 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1806 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1807 DOUT << " +" << LR;
1808 nI.addRange(LR);
1809 vrm.addSpillPoint(NewVReg, true, MI);
1812 added.push_back(&nI);
1814 DOUT << "\t\t\t\tadded new interval: ";
1815 DEBUG(nI.dump());
1816 DOUT << '\n';
1820 RI = mri_->reg_begin(li.reg);
1823 return added;
1826 std::vector<LiveInterval*> LiveIntervals::
1827 addIntervalsForSpills(const LiveInterval &li,
1828 SmallVectorImpl<LiveInterval*> &SpillIs,
1829 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1831 if (EnableFastSpilling)
1832 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1834 assert(li.weight != HUGE_VALF &&
1835 "attempt to spill already spilled interval!");
1837 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1838 li.print(DOUT, tri_);
1839 DOUT << '\n';
1841 // Each bit specify whether a spill is required in the MBB.
1842 BitVector SpillMBBs(mf_->getNumBlockIDs());
1843 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1844 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1845 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1846 DenseMap<unsigned,unsigned> MBBVRegsMap;
1847 std::vector<LiveInterval*> NewLIs;
1848 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1850 unsigned NumValNums = li.getNumValNums();
1851 SmallVector<MachineInstr*, 4> ReMatDefs;
1852 ReMatDefs.resize(NumValNums, NULL);
1853 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1854 ReMatOrigDefs.resize(NumValNums, NULL);
1855 SmallVector<int, 4> ReMatIds;
1856 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1857 BitVector ReMatDelete(NumValNums);
1858 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1860 // Spilling a split live interval. It cannot be split any further. Also,
1861 // it's also guaranteed to be a single val# / range interval.
1862 if (vrm.getPreSplitReg(li.reg)) {
1863 vrm.setIsSplitFromReg(li.reg, 0);
1864 // Unset the split kill marker on the last use.
1865 unsigned KillIdx = vrm.getKillPoint(li.reg);
1866 if (KillIdx) {
1867 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1868 assert(KillMI && "Last use disappeared?");
1869 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1870 assert(KillOp != -1 && "Last use disappeared?");
1871 KillMI->getOperand(KillOp).setIsKill(false);
1873 vrm.removeKillPoint(li.reg);
1874 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1875 Slot = vrm.getStackSlot(li.reg);
1876 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1877 MachineInstr *ReMatDefMI = DefIsReMat ?
1878 vrm.getReMaterializedMI(li.reg) : NULL;
1879 int LdSlot = 0;
1880 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1881 bool isLoad = isLoadSS ||
1882 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1883 bool IsFirstRange = true;
1884 for (LiveInterval::Ranges::const_iterator
1885 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1886 // If this is a split live interval with multiple ranges, it means there
1887 // are two-address instructions that re-defined the value. Only the
1888 // first def can be rematerialized!
1889 if (IsFirstRange) {
1890 // Note ReMatOrigDefMI has already been deleted.
1891 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1892 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1893 false, vrm, rc, ReMatIds, loopInfo,
1894 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1895 MBBVRegsMap, NewLIs);
1896 } else {
1897 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1898 Slot, 0, false, false, false,
1899 false, vrm, rc, ReMatIds, loopInfo,
1900 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1901 MBBVRegsMap, NewLIs);
1903 IsFirstRange = false;
1906 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1907 return NewLIs;
1910 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1911 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1912 TrySplit = false;
1913 if (TrySplit)
1914 ++numSplits;
1915 bool NeedStackSlot = false;
1916 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1917 i != e; ++i) {
1918 const VNInfo *VNI = *i;
1919 unsigned VN = VNI->id;
1920 unsigned DefIdx = VNI->def;
1921 if (DefIdx == ~1U)
1922 continue; // Dead val#.
1923 // Is the def for the val# rematerializable?
1924 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1925 ? 0 : getInstructionFromIndex(DefIdx);
1926 bool dummy;
1927 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1928 // Remember how to remat the def of this val#.
1929 ReMatOrigDefs[VN] = ReMatDefMI;
1930 // Original def may be modified so we have to make a copy here.
1931 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1932 ClonedMIs.push_back(Clone);
1933 ReMatDefs[VN] = Clone;
1935 bool CanDelete = true;
1936 if (VNI->hasPHIKill) {
1937 // A kill is a phi node, not all of its uses can be rematerialized.
1938 // It must not be deleted.
1939 CanDelete = false;
1940 // Need a stack slot if there is any live range where uses cannot be
1941 // rematerialized.
1942 NeedStackSlot = true;
1944 if (CanDelete)
1945 ReMatDelete.set(VN);
1946 } else {
1947 // Need a stack slot if there is any live range where uses cannot be
1948 // rematerialized.
1949 NeedStackSlot = true;
1953 // One stack slot per live interval.
1954 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1955 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1956 Slot = vrm.assignVirt2StackSlot(li.reg);
1958 // This case only occurs when the prealloc splitter has already assigned
1959 // a stack slot to this vreg.
1960 else
1961 Slot = vrm.getStackSlot(li.reg);
1964 // Create new intervals and rewrite defs and uses.
1965 for (LiveInterval::Ranges::const_iterator
1966 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1967 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1968 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1969 bool DefIsReMat = ReMatDefMI != NULL;
1970 bool CanDelete = ReMatDelete[I->valno->id];
1971 int LdSlot = 0;
1972 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1973 bool isLoad = isLoadSS ||
1974 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1975 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1976 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1977 CanDelete, vrm, rc, ReMatIds, loopInfo,
1978 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1979 MBBVRegsMap, NewLIs);
1982 // Insert spills / restores if we are splitting.
1983 if (!TrySplit) {
1984 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1985 return NewLIs;
1988 SmallPtrSet<LiveInterval*, 4> AddedKill;
1989 SmallVector<unsigned, 2> Ops;
1990 if (NeedStackSlot) {
1991 int Id = SpillMBBs.find_first();
1992 while (Id != -1) {
1993 std::vector<SRInfo> &spills = SpillIdxes[Id];
1994 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1995 int index = spills[i].index;
1996 unsigned VReg = spills[i].vreg;
1997 LiveInterval &nI = getOrCreateInterval(VReg);
1998 bool isReMat = vrm.isReMaterialized(VReg);
1999 MachineInstr *MI = getInstructionFromIndex(index);
2000 bool CanFold = false;
2001 bool FoundUse = false;
2002 Ops.clear();
2003 if (spills[i].canFold) {
2004 CanFold = true;
2005 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2006 MachineOperand &MO = MI->getOperand(j);
2007 if (!MO.isReg() || MO.getReg() != VReg)
2008 continue;
2010 Ops.push_back(j);
2011 if (MO.isDef())
2012 continue;
2013 if (isReMat ||
2014 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2015 RestoreMBBs, RestoreIdxes))) {
2016 // MI has two-address uses of the same register. If the use
2017 // isn't the first and only use in the BB, then we can't fold
2018 // it. FIXME: Move this to rewriteInstructionsForSpills.
2019 CanFold = false;
2020 break;
2022 FoundUse = true;
2025 // Fold the store into the def if possible.
2026 bool Folded = false;
2027 if (CanFold && !Ops.empty()) {
2028 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2029 Folded = true;
2030 if (FoundUse) {
2031 // Also folded uses, do not issue a load.
2032 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2033 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2035 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2039 // Otherwise tell the spiller to issue a spill.
2040 if (!Folded) {
2041 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2042 bool isKill = LR->end == getStoreIndex(index);
2043 if (!MI->registerDefIsDead(nI.reg))
2044 // No need to spill a dead def.
2045 vrm.addSpillPoint(VReg, isKill, MI);
2046 if (isKill)
2047 AddedKill.insert(&nI);
2050 Id = SpillMBBs.find_next(Id);
2054 int Id = RestoreMBBs.find_first();
2055 while (Id != -1) {
2056 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2057 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2058 int index = restores[i].index;
2059 if (index == -1)
2060 continue;
2061 unsigned VReg = restores[i].vreg;
2062 LiveInterval &nI = getOrCreateInterval(VReg);
2063 bool isReMat = vrm.isReMaterialized(VReg);
2064 MachineInstr *MI = getInstructionFromIndex(index);
2065 bool CanFold = false;
2066 Ops.clear();
2067 if (restores[i].canFold) {
2068 CanFold = true;
2069 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2070 MachineOperand &MO = MI->getOperand(j);
2071 if (!MO.isReg() || MO.getReg() != VReg)
2072 continue;
2074 if (MO.isDef()) {
2075 // If this restore were to be folded, it would have been folded
2076 // already.
2077 CanFold = false;
2078 break;
2080 Ops.push_back(j);
2084 // Fold the load into the use if possible.
2085 bool Folded = false;
2086 if (CanFold && !Ops.empty()) {
2087 if (!isReMat)
2088 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2089 else {
2090 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2091 int LdSlot = 0;
2092 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2093 // If the rematerializable def is a load, also try to fold it.
2094 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2095 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2096 Ops, isLoadSS, LdSlot, VReg);
2097 if (!Folded) {
2098 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2099 if (ImpUse) {
2100 // Re-matting an instruction with virtual register use. Add the
2101 // register as an implicit use on the use MI and update the register
2102 // interval's spill weight to HUGE_VALF to prevent it from being
2103 // spilled.
2104 LiveInterval &ImpLi = getInterval(ImpUse);
2105 ImpLi.weight = HUGE_VALF;
2106 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2111 // If folding is not possible / failed, then tell the spiller to issue a
2112 // load / rematerialization for us.
2113 if (Folded)
2114 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2115 else
2116 vrm.addRestorePoint(VReg, MI);
2118 Id = RestoreMBBs.find_next(Id);
2121 // Finalize intervals: add kills, finalize spill weights, and filter out
2122 // dead intervals.
2123 std::vector<LiveInterval*> RetNewLIs;
2124 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2125 LiveInterval *LI = NewLIs[i];
2126 if (!LI->empty()) {
2127 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2128 if (!AddedKill.count(LI)) {
2129 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2130 unsigned LastUseIdx = getBaseIndex(LR->end);
2131 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2132 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2133 assert(UseIdx != -1);
2134 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2135 LastUse->getOperand(UseIdx).setIsKill();
2136 vrm.addKillPoint(LI->reg, LastUseIdx);
2139 RetNewLIs.push_back(LI);
2143 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2144 return RetNewLIs;
2147 /// hasAllocatableSuperReg - Return true if the specified physical register has
2148 /// any super register that's allocatable.
2149 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2150 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2151 if (allocatableRegs_[*AS] && hasInterval(*AS))
2152 return true;
2153 return false;
2156 /// getRepresentativeReg - Find the largest super register of the specified
2157 /// physical register.
2158 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2159 // Find the largest super-register that is allocatable.
2160 unsigned BestReg = Reg;
2161 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2162 unsigned SuperReg = *AS;
2163 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2164 BestReg = SuperReg;
2165 break;
2168 return BestReg;
2171 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2172 /// specified interval that conflicts with the specified physical register.
2173 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2174 unsigned PhysReg) const {
2175 unsigned NumConflicts = 0;
2176 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2177 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2178 E = mri_->reg_end(); I != E; ++I) {
2179 MachineOperand &O = I.getOperand();
2180 MachineInstr *MI = O.getParent();
2181 unsigned Index = getInstructionIndex(MI);
2182 if (pli.liveAt(Index))
2183 ++NumConflicts;
2185 return NumConflicts;
2188 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2189 /// around all defs and uses of the specified interval. Return true if it
2190 /// was able to cut its interval.
2191 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2192 unsigned PhysReg, VirtRegMap &vrm) {
2193 unsigned SpillReg = getRepresentativeReg(PhysReg);
2195 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2196 // If there are registers which alias PhysReg, but which are not a
2197 // sub-register of the chosen representative super register. Assert
2198 // since we can't handle it yet.
2199 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2200 tri_->isSuperRegister(*AS, SpillReg));
2202 bool Cut = false;
2203 LiveInterval &pli = getInterval(SpillReg);
2204 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2205 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2206 E = mri_->reg_end(); I != E; ++I) {
2207 MachineOperand &O = I.getOperand();
2208 MachineInstr *MI = O.getParent();
2209 if (SeenMIs.count(MI))
2210 continue;
2211 SeenMIs.insert(MI);
2212 unsigned Index = getInstructionIndex(MI);
2213 if (pli.liveAt(Index)) {
2214 vrm.addEmergencySpill(SpillReg, MI);
2215 unsigned StartIdx = getLoadIndex(Index);
2216 unsigned EndIdx = getStoreIndex(Index)+1;
2217 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2218 pli.removeRange(StartIdx, EndIdx);
2219 Cut = true;
2220 } else {
2221 cerr << "Ran out of registers during register allocation!\n";
2222 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2223 cerr << "Please check your inline asm statement for invalid "
2224 << "constraints:\n";
2225 MI->print(cerr.stream(), tm_);
2227 exit(1);
2229 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2230 if (!hasInterval(*AS))
2231 continue;
2232 LiveInterval &spli = getInterval(*AS);
2233 if (spli.liveAt(Index))
2234 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2238 return Cut;
2241 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2242 MachineInstr* startInst) {
2243 LiveInterval& Interval = getOrCreateInterval(reg);
2244 VNInfo* VN = Interval.getNextValue(
2245 getInstructionIndex(startInst) + InstrSlots::DEF,
2246 startInst, getVNInfoAllocator());
2247 VN->hasPHIKill = true;
2248 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2249 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2250 getMBBEndIdx(startInst->getParent()) + 1, VN);
2251 Interval.addRange(LR);
2253 return LR;