add a new MCInstPrinter class, move the (trivial) MCDisassmbler ctor inline.
[llvm/avr.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blob0c67149b76147d12ac0cd8f4be633aec1a531016
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
43 #include <algorithm>
44 #include <limits>
45 #include <cmath>
46 using namespace llvm;
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.setPreservesCFG();
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
78 if (!StrongPHIElim) {
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
91 delete I->second;
93 MBB2IdxMap.clear();
94 Idx2MBBMap.clear();
95 mi2iMap_.clear();
96 i2miMap_.clear();
97 r2iMap_.clear();
98 terminatorGaps.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!ClonedMIs.empty()) {
103 MachineInstr *MI = ClonedMIs.back();
104 ClonedMIs.pop_back();
105 mf_->DeleteMachineInstr(MI);
109 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110 const TargetInstrInfo *tii_) {
111 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
113 Reg == SrcReg)
114 return true;
116 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118 MI->getOperand(2).getReg() == Reg)
119 return true;
120 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121 MI->getOperand(1).getReg() == Reg)
122 return true;
123 return false;
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
136 DFI != E; ++DFI) {
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
139 I != E; ) {
140 MachineInstr *MI = &*I;
141 ++I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 ImpDefMIs.push_back(MI);
146 continue;
149 bool ChangedToImpDef = false;
150 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151 MachineOperand& MO = MI->getOperand(i);
152 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
153 continue;
154 unsigned Reg = MO.getReg();
155 if (!Reg)
156 continue;
157 if (!ImpDefRegs.count(Reg))
158 continue;
159 // Use is a copy, just turn it into an implicit_def.
160 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161 bool isKill = MO.isKill();
162 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164 MI->RemoveOperand(j);
165 if (isKill)
166 ImpDefRegs.erase(Reg);
167 ChangedToImpDef = true;
168 break;
171 MO.setIsUndef();
172 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173 // Make sure other uses of
174 for (unsigned j = i+1; j != e; ++j) {
175 MachineOperand &MOJ = MI->getOperand(j);
176 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
177 MOJ.setIsUndef();
179 ImpDefRegs.erase(Reg);
183 if (ChangedToImpDef) {
184 // Backtrack to process this new implicit_def.
185 --I;
186 } else {
187 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188 MachineOperand& MO = MI->getOperand(i);
189 if (!MO.isReg() || !MO.isDef())
190 continue;
191 ImpDefRegs.erase(MO.getReg());
196 // Any outstanding liveout implicit_def's?
197 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198 MachineInstr *MI = ImpDefMIs[i];
199 unsigned Reg = MI->getOperand(0).getReg();
200 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201 !ImpDefRegs.count(Reg)) {
202 // Delete all "local" implicit_def's. That include those which define
203 // physical registers since they cannot be liveout.
204 MI->eraseFromParent();
205 continue;
208 // If there are multiple defs of the same register and at least one
209 // is not an implicit_def, do not insert implicit_def's before the
210 // uses.
211 bool Skip = false;
212 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213 DE = mri_->def_end(); DI != DE; ++DI) {
214 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
215 Skip = true;
216 break;
219 if (Skip)
220 continue;
222 // The only implicit_def which we want to keep are those that are live
223 // out of its block.
224 MI->eraseFromParent();
226 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227 UE = mri_->use_end(); UI != UE; ) {
228 MachineOperand &RMO = UI.getOperand();
229 MachineInstr *RMI = &*UI;
230 ++UI;
231 MachineBasicBlock *RMBB = RMI->getParent();
232 if (RMBB == MBB)
233 continue;
235 // Turn a copy use into an implicit_def.
236 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
238 Reg == SrcReg) {
239 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241 RMI->RemoveOperand(j);
242 continue;
245 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246 unsigned NewVReg = mri_->createVirtualRegister(RC);
247 RMO.setReg(NewVReg);
248 RMO.setIsUndef();
249 RMO.setIsKill();
252 ImpDefRegs.clear();
253 ImpDefMIs.clear();
258 void LiveIntervals::computeNumbering() {
259 Index2MiMap OldI2MI = i2miMap_;
260 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
262 Idx2MBBMap.clear();
263 MBB2IdxMap.clear();
264 mi2iMap_.clear();
265 i2miMap_.clear();
266 terminatorGaps.clear();
268 FunctionSize = 0;
270 // Number MachineInstrs and MachineBasicBlocks.
271 // Initialize MBB indexes to a sentinal.
272 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
273 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
275 MachineInstrIndex MIIndex;
276 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
277 MBB != E; ++MBB) {
278 MachineInstrIndex StartIdx = MIIndex;
280 // Insert an empty slot at the beginning of each block.
281 MIIndex = getNextIndex(MIIndex);
282 i2miMap_.push_back(0);
284 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
285 I != E; ++I) {
287 if (I == MBB->getFirstTerminator()) {
288 // Leave a gap for before terminators, this is where we will point
289 // PHI kills.
290 MachineInstrIndex tGap(true, MIIndex);
291 bool inserted =
292 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
293 assert(inserted &&
294 "Multiple 'first' terminators encountered during numbering.");
295 inserted = inserted; // Avoid compiler warning if assertions turned off.
296 i2miMap_.push_back(0);
298 MIIndex = getNextIndex(MIIndex);
301 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
302 assert(inserted && "multiple MachineInstr -> index mappings");
303 inserted = true;
304 i2miMap_.push_back(I);
305 MIIndex = getNextIndex(MIIndex);
306 FunctionSize++;
308 // Insert max(1, numdefs) empty slots after every instruction.
309 unsigned Slots = I->getDesc().getNumDefs();
310 if (Slots == 0)
311 Slots = 1;
312 while (Slots--) {
313 MIIndex = getNextIndex(MIIndex);
314 i2miMap_.push_back(0);
319 if (MBB->getFirstTerminator() == MBB->end()) {
320 // Leave a gap for before terminators, this is where we will point
321 // PHI kills.
322 MachineInstrIndex tGap(true, MIIndex);
323 bool inserted =
324 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
325 assert(inserted &&
326 "Multiple 'first' terminators encountered during numbering.");
327 inserted = inserted; // Avoid compiler warning if assertions turned off.
328 i2miMap_.push_back(0);
330 MIIndex = getNextIndex(MIIndex);
333 // Set the MBB2IdxMap entry for this MBB.
334 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
335 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
338 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
340 if (!OldI2MI.empty())
341 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
342 for (LiveInterval::iterator LI = OI->second->begin(),
343 LE = OI->second->end(); LI != LE; ++LI) {
345 // Remap the start index of the live range to the corresponding new
346 // number, or our best guess at what it _should_ correspond to if the
347 // original instruction has been erased. This is either the following
348 // instruction or its predecessor.
349 unsigned index = LI->start.getVecIndex();
350 MachineInstrIndex::Slot offset = LI->start.getSlot();
351 if (LI->start.isLoad()) {
352 std::vector<IdxMBBPair>::const_iterator I =
353 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
354 // Take the pair containing the index
355 std::vector<IdxMBBPair>::const_iterator J =
356 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
358 LI->start = getMBBStartIdx(J->second);
359 } else {
360 LI->start = MachineInstrIndex(
361 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
362 (MachineInstrIndex::Slot)offset);
365 // Remap the ending index in the same way that we remapped the start,
366 // except for the final step where we always map to the immediately
367 // following instruction.
368 index = (getPrevSlot(LI->end)).getVecIndex();
369 offset = LI->end.getSlot();
370 if (LI->end.isLoad()) {
371 // VReg dies at end of block.
372 std::vector<IdxMBBPair>::const_iterator I =
373 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
374 --I;
376 LI->end = getNextSlot(getMBBEndIdx(I->second));
377 } else {
378 unsigned idx = index;
379 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
381 if (index != OldI2MI.size())
382 LI->end =
383 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
384 (idx == index ? offset : MachineInstrIndex::LOAD));
385 else
386 LI->end =
387 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
391 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
392 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
393 VNInfo* vni = *VNI;
395 // Remap the VNInfo def index, which works the same as the
396 // start indices above. VN's with special sentinel defs
397 // don't need to be remapped.
398 if (vni->isDefAccurate() && !vni->isUnused()) {
399 unsigned index = vni->def.getVecIndex();
400 MachineInstrIndex::Slot offset = vni->def.getSlot();
401 if (vni->def.isLoad()) {
402 std::vector<IdxMBBPair>::const_iterator I =
403 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
404 // Take the pair containing the index
405 std::vector<IdxMBBPair>::const_iterator J =
406 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
408 vni->def = getMBBStartIdx(J->second);
409 } else {
410 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
414 // Remap the VNInfo kill indices, which works the same as
415 // the end indices above.
416 for (size_t i = 0; i < vni->kills.size(); ++i) {
417 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
418 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
420 if (vni->kills[i].isLoad()) {
421 assert("Value killed at a load slot.");
422 /*std::vector<IdxMBBPair>::const_iterator I =
423 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
424 --I;
426 vni->kills[i] = getMBBEndIdx(I->second);*/
427 } else {
428 if (vni->kills[i].isPHIIndex()) {
429 std::vector<IdxMBBPair>::const_iterator I =
430 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
431 --I;
432 vni->kills[i] = terminatorGaps[I->second];
433 } else {
434 assert(OldI2MI[index] != 0 &&
435 "Kill refers to instruction not present in index maps.");
436 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
440 unsigned idx = index;
441 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
443 if (index != OldI2MI.size())
444 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
445 (idx == index ? offset : 0);
446 else
447 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
455 void LiveIntervals::scaleNumbering(int factor) {
456 // Need to
457 // * scale MBB begin and end points
458 // * scale all ranges.
459 // * Update VNI structures.
460 // * Scale instruction numberings
462 // Scale the MBB indices.
463 Idx2MBBMap.clear();
464 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
465 MBB != MBBE; ++MBB) {
466 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
467 mbbIndices.first = mbbIndices.first.scale(factor);
468 mbbIndices.second = mbbIndices.second.scale(factor);
469 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
471 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
473 // Scale terminator gaps.
474 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
475 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
476 TGI != TGE; ++TGI) {
477 terminatorGaps[TGI->first] = TGI->second.scale(factor);
480 // Scale the intervals.
481 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
482 LI->second->scaleNumbering(factor);
485 // Scale MachineInstrs.
486 Mi2IndexMap oldmi2iMap = mi2iMap_;
487 MachineInstrIndex highestSlot;
488 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
489 MI != ME; ++MI) {
490 MachineInstrIndex newSlot = MI->second.scale(factor);
491 mi2iMap_[MI->first] = newSlot;
492 highestSlot = std::max(highestSlot, newSlot);
495 unsigned highestVIndex = highestSlot.getVecIndex();
496 i2miMap_.clear();
497 i2miMap_.resize(highestVIndex + 1);
498 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
499 MI != ME; ++MI) {
500 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
506 /// runOnMachineFunction - Register allocate the whole function
508 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
509 mf_ = &fn;
510 mri_ = &mf_->getRegInfo();
511 tm_ = &fn.getTarget();
512 tri_ = tm_->getRegisterInfo();
513 tii_ = tm_->getInstrInfo();
514 aa_ = &getAnalysis<AliasAnalysis>();
515 lv_ = &getAnalysis<LiveVariables>();
516 allocatableRegs_ = tri_->getAllocatableSet(fn);
518 processImplicitDefs();
519 computeNumbering();
520 computeIntervals();
522 numIntervals += getNumIntervals();
524 DEBUG(dump());
525 return true;
528 /// print - Implement the dump method.
529 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
530 OS << "********** INTERVALS **********\n";
531 for (const_iterator I = begin(), E = end(); I != E; ++I) {
532 I->second->print(OS, tri_);
533 OS << "\n";
536 OS << "********** MACHINEINSTRS **********\n";
538 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
539 mbbi != mbbe; ++mbbi) {
540 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
541 for (MachineBasicBlock::iterator mii = mbbi->begin(),
542 mie = mbbi->end(); mii != mie; ++mii) {
543 OS << getInstructionIndex(mii) << '\t' << *mii;
548 /// conflictsWithPhysRegDef - Returns true if the specified register
549 /// is defined during the duration of the specified interval.
550 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
551 VirtRegMap &vrm, unsigned reg) {
552 for (LiveInterval::Ranges::const_iterator
553 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
554 for (MachineInstrIndex index = getBaseIndex(I->start),
555 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
556 index = getNextIndex(index)) {
557 // skip deleted instructions
558 while (index != end && !getInstructionFromIndex(index))
559 index = getNextIndex(index);
560 if (index == end) break;
562 MachineInstr *MI = getInstructionFromIndex(index);
563 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
564 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
565 if (SrcReg == li.reg || DstReg == li.reg)
566 continue;
567 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
568 MachineOperand& mop = MI->getOperand(i);
569 if (!mop.isReg())
570 continue;
571 unsigned PhysReg = mop.getReg();
572 if (PhysReg == 0 || PhysReg == li.reg)
573 continue;
574 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
575 if (!vrm.hasPhys(PhysReg))
576 continue;
577 PhysReg = vrm.getPhys(PhysReg);
579 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
580 return true;
585 return false;
588 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
589 /// it can check use as well.
590 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
591 unsigned Reg, bool CheckUse,
592 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
593 for (LiveInterval::Ranges::const_iterator
594 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
595 for (MachineInstrIndex index = getBaseIndex(I->start),
596 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
597 index = getNextIndex(index)) {
598 // Skip deleted instructions.
599 MachineInstr *MI = 0;
600 while (index != end) {
601 MI = getInstructionFromIndex(index);
602 if (MI)
603 break;
604 index = getNextIndex(index);
606 if (index == end) break;
608 if (JoinedCopies.count(MI))
609 continue;
610 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
611 MachineOperand& MO = MI->getOperand(i);
612 if (!MO.isReg())
613 continue;
614 if (MO.isUse() && !CheckUse)
615 continue;
616 unsigned PhysReg = MO.getReg();
617 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
618 continue;
619 if (tri_->isSubRegister(Reg, PhysReg))
620 return true;
625 return false;
629 void LiveIntervals::printRegName(unsigned reg) const {
630 if (TargetRegisterInfo::isPhysicalRegister(reg))
631 errs() << tri_->getName(reg);
632 else
633 errs() << "%reg" << reg;
636 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
637 MachineBasicBlock::iterator mi,
638 MachineInstrIndex MIIdx,
639 MachineOperand& MO,
640 unsigned MOIdx,
641 LiveInterval &interval) {
642 DEBUG({
643 errs() << "\t\tregister: ";
644 printRegName(interval.reg);
647 // Virtual registers may be defined multiple times (due to phi
648 // elimination and 2-addr elimination). Much of what we do only has to be
649 // done once for the vreg. We use an empty interval to detect the first
650 // time we see a vreg.
651 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
652 if (interval.empty()) {
653 // Get the Idx of the defining instructions.
654 MachineInstrIndex defIndex = getDefIndex(MIIdx);
655 // Earlyclobbers move back one.
656 if (MO.isEarlyClobber())
657 defIndex = getUseIndex(MIIdx);
658 VNInfo *ValNo;
659 MachineInstr *CopyMI = NULL;
660 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
661 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
662 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
663 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
664 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
665 CopyMI = mi;
666 // Earlyclobbers move back one.
667 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
669 assert(ValNo->id == 0 && "First value in interval is not 0?");
671 // Loop over all of the blocks that the vreg is defined in. There are
672 // two cases we have to handle here. The most common case is a vreg
673 // whose lifetime is contained within a basic block. In this case there
674 // will be a single kill, in MBB, which comes after the definition.
675 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
676 // FIXME: what about dead vars?
677 MachineInstrIndex killIdx;
678 if (vi.Kills[0] != mi)
679 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
680 else
681 killIdx = getNextSlot(defIndex);
683 // If the kill happens after the definition, we have an intra-block
684 // live range.
685 if (killIdx > defIndex) {
686 assert(vi.AliveBlocks.empty() &&
687 "Shouldn't be alive across any blocks!");
688 LiveRange LR(defIndex, killIdx, ValNo);
689 interval.addRange(LR);
690 DEBUG(errs() << " +" << LR << "\n");
691 ValNo->addKill(killIdx);
692 return;
696 // The other case we handle is when a virtual register lives to the end
697 // of the defining block, potentially live across some blocks, then is
698 // live into some number of blocks, but gets killed. Start by adding a
699 // range that goes from this definition to the end of the defining block.
700 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
701 DEBUG(errs() << " +" << NewLR);
702 interval.addRange(NewLR);
704 // Iterate over all of the blocks that the variable is completely
705 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
706 // live interval.
707 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
708 E = vi.AliveBlocks.end(); I != E; ++I) {
709 LiveRange LR(getMBBStartIdx(*I),
710 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
711 ValNo);
712 interval.addRange(LR);
713 DEBUG(errs() << " +" << LR);
716 // Finally, this virtual register is live from the start of any killing
717 // block to the 'use' slot of the killing instruction.
718 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
719 MachineInstr *Kill = vi.Kills[i];
720 MachineInstrIndex killIdx =
721 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
722 LiveRange LR(getMBBStartIdx(Kill->getParent()),
723 killIdx, ValNo);
724 interval.addRange(LR);
725 ValNo->addKill(killIdx);
726 DEBUG(errs() << " +" << LR);
729 } else {
730 // If this is the second time we see a virtual register definition, it
731 // must be due to phi elimination or two addr elimination. If this is
732 // the result of two address elimination, then the vreg is one of the
733 // def-and-use register operand.
734 if (mi->isRegTiedToUseOperand(MOIdx)) {
735 // If this is a two-address definition, then we have already processed
736 // the live range. The only problem is that we didn't realize there
737 // are actually two values in the live interval. Because of this we
738 // need to take the LiveRegion that defines this register and split it
739 // into two values.
740 assert(interval.containsOneValue());
741 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
742 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
743 if (MO.isEarlyClobber())
744 RedefIndex = getUseIndex(MIIdx);
746 const LiveRange *OldLR =
747 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
748 VNInfo *OldValNo = OldLR->valno;
750 // Delete the initial value, which should be short and continuous,
751 // because the 2-addr copy must be in the same MBB as the redef.
752 interval.removeRange(DefIndex, RedefIndex);
754 // Two-address vregs should always only be redefined once. This means
755 // that at this point, there should be exactly one value number in it.
756 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
758 // The new value number (#1) is defined by the instruction we claimed
759 // defined value #0.
760 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
761 false, // update at *
762 VNInfoAllocator);
763 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
765 // Value#0 is now defined by the 2-addr instruction.
766 OldValNo->def = RedefIndex;
767 OldValNo->setCopy(0);
768 if (MO.isEarlyClobber())
769 OldValNo->setHasRedefByEC(true);
771 // Add the new live interval which replaces the range for the input copy.
772 LiveRange LR(DefIndex, RedefIndex, ValNo);
773 DEBUG(errs() << " replace range with " << LR);
774 interval.addRange(LR);
775 ValNo->addKill(RedefIndex);
777 // If this redefinition is dead, we need to add a dummy unit live
778 // range covering the def slot.
779 if (MO.isDead())
780 interval.addRange(
781 LiveRange(RedefIndex, getNextSlot(RedefIndex), OldValNo));
783 DEBUG({
784 errs() << " RESULT: ";
785 interval.print(errs(), tri_);
787 } else {
788 // Otherwise, this must be because of phi elimination. If this is the
789 // first redefinition of the vreg that we have seen, go back and change
790 // the live range in the PHI block to be a different value number.
791 if (interval.containsOneValue()) {
792 assert(vi.Kills.size() == 1 &&
793 "PHI elimination vreg should have one kill, the PHI itself!");
795 // Remove the old range that we now know has an incorrect number.
796 VNInfo *VNI = interval.getValNumInfo(0);
797 MachineInstr *Killer = vi.Kills[0];
798 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
799 MachineInstrIndex End =
800 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
801 DEBUG({
802 errs() << " Removing [" << Start << "," << End << "] from: ";
803 interval.print(errs(), tri_);
804 errs() << "\n";
806 interval.removeRange(Start, End);
807 assert(interval.ranges.size() == 1 &&
808 "newly discovered PHI interval has >1 ranges.");
809 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
810 VNI->addKill(terminatorGaps[killMBB]);
811 VNI->setHasPHIKill(true);
812 DEBUG({
813 errs() << " RESULT: ";
814 interval.print(errs(), tri_);
817 // Replace the interval with one of a NEW value number. Note that this
818 // value number isn't actually defined by an instruction, weird huh? :)
819 LiveRange LR(Start, End,
820 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
821 0, false, VNInfoAllocator));
822 LR.valno->setIsPHIDef(true);
823 DEBUG(errs() << " replace range with " << LR);
824 interval.addRange(LR);
825 LR.valno->addKill(End);
826 DEBUG({
827 errs() << " RESULT: ";
828 interval.print(errs(), tri_);
832 // In the case of PHI elimination, each variable definition is only
833 // live until the end of the block. We've already taken care of the
834 // rest of the live range.
835 MachineInstrIndex defIndex = getDefIndex(MIIdx);
836 if (MO.isEarlyClobber())
837 defIndex = getUseIndex(MIIdx);
839 VNInfo *ValNo;
840 MachineInstr *CopyMI = NULL;
841 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
842 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
843 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
844 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
845 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
846 CopyMI = mi;
847 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
849 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
850 LiveRange LR(defIndex, killIndex, ValNo);
851 interval.addRange(LR);
852 ValNo->addKill(terminatorGaps[mbb]);
853 ValNo->setHasPHIKill(true);
854 DEBUG(errs() << " +" << LR);
858 DEBUG(errs() << '\n');
861 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
862 MachineBasicBlock::iterator mi,
863 MachineInstrIndex MIIdx,
864 MachineOperand& MO,
865 LiveInterval &interval,
866 MachineInstr *CopyMI) {
867 // A physical register cannot be live across basic block, so its
868 // lifetime must end somewhere in its defining basic block.
869 DEBUG({
870 errs() << "\t\tregister: ";
871 printRegName(interval.reg);
874 MachineInstrIndex baseIndex = MIIdx;
875 MachineInstrIndex start = getDefIndex(baseIndex);
876 // Earlyclobbers move back one.
877 if (MO.isEarlyClobber())
878 start = getUseIndex(MIIdx);
879 MachineInstrIndex end = start;
881 // If it is not used after definition, it is considered dead at
882 // the instruction defining it. Hence its interval is:
883 // [defSlot(def), defSlot(def)+1)
884 if (MO.isDead()) {
885 DEBUG(errs() << " dead");
886 end = getNextSlot(start);
887 goto exit;
890 // If it is not dead on definition, it must be killed by a
891 // subsequent instruction. Hence its interval is:
892 // [defSlot(def), useSlot(kill)+1)
893 baseIndex = getNextIndex(baseIndex);
894 while (++mi != MBB->end()) {
895 while (baseIndex.getVecIndex() < i2miMap_.size() &&
896 getInstructionFromIndex(baseIndex) == 0)
897 baseIndex = getNextIndex(baseIndex);
898 if (mi->killsRegister(interval.reg, tri_)) {
899 DEBUG(errs() << " killed");
900 end = getNextSlot(getUseIndex(baseIndex));
901 goto exit;
902 } else {
903 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
904 if (DefIdx != -1) {
905 if (mi->isRegTiedToUseOperand(DefIdx)) {
906 // Two-address instruction.
907 end = getDefIndex(baseIndex);
908 if (mi->getOperand(DefIdx).isEarlyClobber())
909 end = getUseIndex(baseIndex);
910 } else {
911 // Another instruction redefines the register before it is ever read.
912 // Then the register is essentially dead at the instruction that defines
913 // it. Hence its interval is:
914 // [defSlot(def), defSlot(def)+1)
915 DEBUG(errs() << " dead");
916 end = getNextSlot(start);
918 goto exit;
922 baseIndex = getNextIndex(baseIndex);
925 // The only case we should have a dead physreg here without a killing or
926 // instruction where we know it's dead is if it is live-in to the function
927 // and never used. Another possible case is the implicit use of the
928 // physical register has been deleted by two-address pass.
929 end = getNextSlot(start);
931 exit:
932 assert(start < end && "did not find end of interval?");
934 // Already exists? Extend old live interval.
935 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
936 bool Extend = OldLR != interval.end();
937 VNInfo *ValNo = Extend
938 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
939 if (MO.isEarlyClobber() && Extend)
940 ValNo->setHasRedefByEC(true);
941 LiveRange LR(start, end, ValNo);
942 interval.addRange(LR);
943 LR.valno->addKill(end);
944 DEBUG(errs() << " +" << LR << '\n');
947 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
948 MachineBasicBlock::iterator MI,
949 MachineInstrIndex MIIdx,
950 MachineOperand& MO,
951 unsigned MOIdx) {
952 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
953 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
954 getOrCreateInterval(MO.getReg()));
955 else if (allocatableRegs_[MO.getReg()]) {
956 MachineInstr *CopyMI = NULL;
957 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
958 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
959 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
960 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
961 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
962 CopyMI = MI;
963 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
964 getOrCreateInterval(MO.getReg()), CopyMI);
965 // Def of a register also defines its sub-registers.
966 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
967 // If MI also modifies the sub-register explicitly, avoid processing it
968 // more than once. Do not pass in TRI here so it checks for exact match.
969 if (!MI->modifiesRegister(*AS))
970 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
971 getOrCreateInterval(*AS), 0);
975 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
976 MachineInstrIndex MIIdx,
977 LiveInterval &interval, bool isAlias) {
978 DEBUG({
979 errs() << "\t\tlivein register: ";
980 printRegName(interval.reg);
983 // Look for kills, if it reaches a def before it's killed, then it shouldn't
984 // be considered a livein.
985 MachineBasicBlock::iterator mi = MBB->begin();
986 MachineInstrIndex baseIndex = MIIdx;
987 MachineInstrIndex start = baseIndex;
988 while (baseIndex.getVecIndex() < i2miMap_.size() &&
989 getInstructionFromIndex(baseIndex) == 0)
990 baseIndex = getNextIndex(baseIndex);
991 MachineInstrIndex end = baseIndex;
992 bool SeenDefUse = false;
994 while (mi != MBB->end()) {
995 if (mi->killsRegister(interval.reg, tri_)) {
996 DEBUG(errs() << " killed");
997 end = getNextSlot(getUseIndex(baseIndex));
998 SeenDefUse = true;
999 break;
1000 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1001 // Another instruction redefines the register before it is ever read.
1002 // Then the register is essentially dead at the instruction that defines
1003 // it. Hence its interval is:
1004 // [defSlot(def), defSlot(def)+1)
1005 DEBUG(errs() << " dead");
1006 end = getNextSlot(getDefIndex(start));
1007 SeenDefUse = true;
1008 break;
1011 baseIndex = getNextIndex(baseIndex);
1012 ++mi;
1013 if (mi != MBB->end()) {
1014 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1015 getInstructionFromIndex(baseIndex) == 0)
1016 baseIndex = getNextIndex(baseIndex);
1020 // Live-in register might not be used at all.
1021 if (!SeenDefUse) {
1022 if (isAlias) {
1023 DEBUG(errs() << " dead");
1024 end = getNextSlot(getDefIndex(MIIdx));
1025 } else {
1026 DEBUG(errs() << " live through");
1027 end = baseIndex;
1031 VNInfo *vni =
1032 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1033 0, false, VNInfoAllocator);
1034 vni->setIsPHIDef(true);
1035 LiveRange LR(start, end, vni);
1037 interval.addRange(LR);
1038 LR.valno->addKill(end);
1039 DEBUG(errs() << " +" << LR << '\n');
1042 /// computeIntervals - computes the live intervals for virtual
1043 /// registers. for some ordering of the machine instructions [1,N] a
1044 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1045 /// which a variable is live
1046 void LiveIntervals::computeIntervals() {
1047 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1048 << "********** Function: "
1049 << ((Value*)mf_->getFunction())->getName() << '\n');
1051 SmallVector<unsigned, 8> UndefUses;
1052 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1053 MBBI != E; ++MBBI) {
1054 MachineBasicBlock *MBB = MBBI;
1055 // Track the index of the current machine instr.
1056 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1057 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1059 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1061 // Create intervals for live-ins to this BB first.
1062 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1063 LE = MBB->livein_end(); LI != LE; ++LI) {
1064 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1065 // Multiple live-ins can alias the same register.
1066 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1067 if (!hasInterval(*AS))
1068 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1069 true);
1072 // Skip over empty initial indices.
1073 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1074 getInstructionFromIndex(MIIndex) == 0)
1075 MIIndex = getNextIndex(MIIndex);
1077 for (; MI != miEnd; ++MI) {
1078 DEBUG(errs() << MIIndex << "\t" << *MI);
1080 // Handle defs.
1081 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1082 MachineOperand &MO = MI->getOperand(i);
1083 if (!MO.isReg() || !MO.getReg())
1084 continue;
1086 // handle register defs - build intervals
1087 if (MO.isDef())
1088 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1089 else if (MO.isUndef())
1090 UndefUses.push_back(MO.getReg());
1093 // Skip over the empty slots after each instruction.
1094 unsigned Slots = MI->getDesc().getNumDefs();
1095 if (Slots == 0)
1096 Slots = 1;
1098 while (Slots--)
1099 MIIndex = getNextIndex(MIIndex);
1101 // Skip over empty indices.
1102 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1103 getInstructionFromIndex(MIIndex) == 0)
1104 MIIndex = getNextIndex(MIIndex);
1108 // Create empty intervals for registers defined by implicit_def's (except
1109 // for those implicit_def that define values which are liveout of their
1110 // blocks.
1111 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1112 unsigned UndefReg = UndefUses[i];
1113 (void)getOrCreateInterval(UndefReg);
1117 bool LiveIntervals::findLiveInMBBs(
1118 MachineInstrIndex Start, MachineInstrIndex End,
1119 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1120 std::vector<IdxMBBPair>::const_iterator I =
1121 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1123 bool ResVal = false;
1124 while (I != Idx2MBBMap.end()) {
1125 if (I->first >= End)
1126 break;
1127 MBBs.push_back(I->second);
1128 ResVal = true;
1129 ++I;
1131 return ResVal;
1134 bool LiveIntervals::findReachableMBBs(
1135 MachineInstrIndex Start, MachineInstrIndex End,
1136 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1137 std::vector<IdxMBBPair>::const_iterator I =
1138 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1140 bool ResVal = false;
1141 while (I != Idx2MBBMap.end()) {
1142 if (I->first > End)
1143 break;
1144 MachineBasicBlock *MBB = I->second;
1145 if (getMBBEndIdx(MBB) > End)
1146 break;
1147 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1148 SE = MBB->succ_end(); SI != SE; ++SI)
1149 MBBs.push_back(*SI);
1150 ResVal = true;
1151 ++I;
1153 return ResVal;
1156 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1157 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1158 return new LiveInterval(reg, Weight);
1161 /// dupInterval - Duplicate a live interval. The caller is responsible for
1162 /// managing the allocated memory.
1163 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1164 LiveInterval *NewLI = createInterval(li->reg);
1165 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1166 return NewLI;
1169 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1170 /// copy field and returns the source register that defines it.
1171 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1172 if (!VNI->getCopy())
1173 return 0;
1175 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1176 // If it's extracting out of a physical register, return the sub-register.
1177 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1178 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1179 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1180 return Reg;
1181 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1182 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1183 return VNI->getCopy()->getOperand(2).getReg();
1185 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1186 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1187 return SrcReg;
1188 llvm_unreachable("Unrecognized copy instruction!");
1189 return 0;
1192 //===----------------------------------------------------------------------===//
1193 // Register allocator hooks.
1196 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1197 /// allow one) virtual register operand, then its uses are implicitly using
1198 /// the register. Returns the virtual register.
1199 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1200 MachineInstr *MI) const {
1201 unsigned RegOp = 0;
1202 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1203 MachineOperand &MO = MI->getOperand(i);
1204 if (!MO.isReg() || !MO.isUse())
1205 continue;
1206 unsigned Reg = MO.getReg();
1207 if (Reg == 0 || Reg == li.reg)
1208 continue;
1210 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1211 !allocatableRegs_[Reg])
1212 continue;
1213 // FIXME: For now, only remat MI with at most one register operand.
1214 assert(!RegOp &&
1215 "Can't rematerialize instruction with multiple register operand!");
1216 RegOp = MO.getReg();
1217 #ifndef NDEBUG
1218 break;
1219 #endif
1221 return RegOp;
1224 /// isValNoAvailableAt - Return true if the val# of the specified interval
1225 /// which reaches the given instruction also reaches the specified use index.
1226 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1227 MachineInstrIndex UseIdx) const {
1228 MachineInstrIndex Index = getInstructionIndex(MI);
1229 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1230 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1231 return UI != li.end() && UI->valno == ValNo;
1234 /// isReMaterializable - Returns true if the definition MI of the specified
1235 /// val# of the specified interval is re-materializable.
1236 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1237 const VNInfo *ValNo, MachineInstr *MI,
1238 SmallVectorImpl<LiveInterval*> &SpillIs,
1239 bool &isLoad) {
1240 if (DisableReMat)
1241 return false;
1243 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1244 return true;
1246 int FrameIdx = 0;
1247 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1248 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1249 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1250 // this but remember this is not safe to fold into a two-address
1251 // instruction.
1252 // This is a load from fixed stack slot. It can be rematerialized.
1253 return true;
1255 // If the target-specific rules don't identify an instruction as
1256 // being trivially rematerializable, use some target-independent
1257 // rules.
1258 if (!MI->getDesc().isRematerializable() ||
1259 !tii_->isTriviallyReMaterializable(MI)) {
1260 if (!EnableAggressiveRemat)
1261 return false;
1263 // If the instruction accesses memory but the memoperands have been lost,
1264 // we can't analyze it.
1265 const TargetInstrDesc &TID = MI->getDesc();
1266 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1267 return false;
1269 // Avoid instructions obviously unsafe for remat.
1270 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1271 return false;
1273 // If the instruction accesses memory and the memory could be non-constant,
1274 // assume the instruction is not rematerializable.
1275 for (std::list<MachineMemOperand>::const_iterator
1276 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1277 const MachineMemOperand &MMO = *I;
1278 if (MMO.isVolatile() || MMO.isStore())
1279 return false;
1280 const Value *V = MMO.getValue();
1281 if (!V)
1282 return false;
1283 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1284 if (!PSV->isConstant(mf_->getFrameInfo()))
1285 return false;
1286 } else if (!aa_->pointsToConstantMemory(V))
1287 return false;
1290 // If any of the registers accessed are non-constant, conservatively assume
1291 // the instruction is not rematerializable.
1292 unsigned ImpUse = 0;
1293 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1294 const MachineOperand &MO = MI->getOperand(i);
1295 if (MO.isReg()) {
1296 unsigned Reg = MO.getReg();
1297 if (Reg == 0)
1298 continue;
1299 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1300 return false;
1302 // Only allow one def, and that in the first operand.
1303 if (MO.isDef() != (i == 0))
1304 return false;
1306 // Only allow constant-valued registers.
1307 bool IsLiveIn = mri_->isLiveIn(Reg);
1308 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1309 E = mri_->def_end();
1311 // For the def, it should be the only def of that register.
1312 if (MO.isDef() && (next(I) != E || IsLiveIn))
1313 return false;
1315 if (MO.isUse()) {
1316 // Only allow one use other register use, as that's all the
1317 // remat mechanisms support currently.
1318 if (Reg != li.reg) {
1319 if (ImpUse == 0)
1320 ImpUse = Reg;
1321 else if (Reg != ImpUse)
1322 return false;
1324 // For the use, there should be only one associated def.
1325 if (I != E && (next(I) != E || IsLiveIn))
1326 return false;
1332 unsigned ImpUse = getReMatImplicitUse(li, MI);
1333 if (ImpUse) {
1334 const LiveInterval &ImpLi = getInterval(ImpUse);
1335 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1336 re = mri_->use_end(); ri != re; ++ri) {
1337 MachineInstr *UseMI = &*ri;
1338 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1339 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1340 continue;
1341 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1342 return false;
1345 // If a register operand of the re-materialized instruction is going to
1346 // be spilled next, then it's not legal to re-materialize this instruction.
1347 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1348 if (ImpUse == SpillIs[i]->reg)
1349 return false;
1351 return true;
1354 /// isReMaterializable - Returns true if the definition MI of the specified
1355 /// val# of the specified interval is re-materializable.
1356 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1357 const VNInfo *ValNo, MachineInstr *MI) {
1358 SmallVector<LiveInterval*, 4> Dummy1;
1359 bool Dummy2;
1360 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1363 /// isReMaterializable - Returns true if every definition of MI of every
1364 /// val# of the specified interval is re-materializable.
1365 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1366 SmallVectorImpl<LiveInterval*> &SpillIs,
1367 bool &isLoad) {
1368 isLoad = false;
1369 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1370 i != e; ++i) {
1371 const VNInfo *VNI = *i;
1372 if (VNI->isUnused())
1373 continue; // Dead val#.
1374 // Is the def for the val# rematerializable?
1375 if (!VNI->isDefAccurate())
1376 return false;
1377 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1378 bool DefIsLoad = false;
1379 if (!ReMatDefMI ||
1380 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1381 return false;
1382 isLoad |= DefIsLoad;
1384 return true;
1387 /// FilterFoldedOps - Filter out two-address use operands. Return
1388 /// true if it finds any issue with the operands that ought to prevent
1389 /// folding.
1390 static bool FilterFoldedOps(MachineInstr *MI,
1391 SmallVector<unsigned, 2> &Ops,
1392 unsigned &MRInfo,
1393 SmallVector<unsigned, 2> &FoldOps) {
1394 MRInfo = 0;
1395 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1396 unsigned OpIdx = Ops[i];
1397 MachineOperand &MO = MI->getOperand(OpIdx);
1398 // FIXME: fold subreg use.
1399 if (MO.getSubReg())
1400 return true;
1401 if (MO.isDef())
1402 MRInfo |= (unsigned)VirtRegMap::isMod;
1403 else {
1404 // Filter out two-address use operand(s).
1405 if (MI->isRegTiedToDefOperand(OpIdx)) {
1406 MRInfo = VirtRegMap::isModRef;
1407 continue;
1409 MRInfo |= (unsigned)VirtRegMap::isRef;
1411 FoldOps.push_back(OpIdx);
1413 return false;
1417 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1418 /// slot / to reg or any rematerialized load into ith operand of specified
1419 /// MI. If it is successul, MI is updated with the newly created MI and
1420 /// returns true.
1421 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1422 VirtRegMap &vrm, MachineInstr *DefMI,
1423 MachineInstrIndex InstrIdx,
1424 SmallVector<unsigned, 2> &Ops,
1425 bool isSS, int Slot, unsigned Reg) {
1426 // If it is an implicit def instruction, just delete it.
1427 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1428 RemoveMachineInstrFromMaps(MI);
1429 vrm.RemoveMachineInstrFromMaps(MI);
1430 MI->eraseFromParent();
1431 ++numFolds;
1432 return true;
1435 // Filter the list of operand indexes that are to be folded. Abort if
1436 // any operand will prevent folding.
1437 unsigned MRInfo = 0;
1438 SmallVector<unsigned, 2> FoldOps;
1439 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1440 return false;
1442 // The only time it's safe to fold into a two address instruction is when
1443 // it's folding reload and spill from / into a spill stack slot.
1444 if (DefMI && (MRInfo & VirtRegMap::isMod))
1445 return false;
1447 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1448 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1449 if (fmi) {
1450 // Remember this instruction uses the spill slot.
1451 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1453 // Attempt to fold the memory reference into the instruction. If
1454 // we can do this, we don't need to insert spill code.
1455 MachineBasicBlock &MBB = *MI->getParent();
1456 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1457 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1458 vrm.transferSpillPts(MI, fmi);
1459 vrm.transferRestorePts(MI, fmi);
1460 vrm.transferEmergencySpills(MI, fmi);
1461 mi2iMap_.erase(MI);
1462 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1463 mi2iMap_[fmi] = InstrIdx;
1464 MI = MBB.insert(MBB.erase(MI), fmi);
1465 ++numFolds;
1466 return true;
1468 return false;
1471 /// canFoldMemoryOperand - Returns true if the specified load / store
1472 /// folding is possible.
1473 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1474 SmallVector<unsigned, 2> &Ops,
1475 bool ReMat) const {
1476 // Filter the list of operand indexes that are to be folded. Abort if
1477 // any operand will prevent folding.
1478 unsigned MRInfo = 0;
1479 SmallVector<unsigned, 2> FoldOps;
1480 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1481 return false;
1483 // It's only legal to remat for a use, not a def.
1484 if (ReMat && (MRInfo & VirtRegMap::isMod))
1485 return false;
1487 return tii_->canFoldMemoryOperand(MI, FoldOps);
1490 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1491 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1492 for (LiveInterval::Ranges::const_iterator
1493 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1494 std::vector<IdxMBBPair>::const_iterator II =
1495 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1496 if (II == Idx2MBBMap.end())
1497 continue;
1498 if (I->end > II->first) // crossing a MBB.
1499 return false;
1500 MBBs.insert(II->second);
1501 if (MBBs.size() > 1)
1502 return false;
1504 return true;
1507 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1508 /// interval on to-be re-materialized operands of MI) with new register.
1509 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1510 MachineInstr *MI, unsigned NewVReg,
1511 VirtRegMap &vrm) {
1512 // There is an implicit use. That means one of the other operand is
1513 // being remat'ed and the remat'ed instruction has li.reg as an
1514 // use operand. Make sure we rewrite that as well.
1515 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1516 MachineOperand &MO = MI->getOperand(i);
1517 if (!MO.isReg())
1518 continue;
1519 unsigned Reg = MO.getReg();
1520 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1521 continue;
1522 if (!vrm.isReMaterialized(Reg))
1523 continue;
1524 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1525 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1526 if (UseMO)
1527 UseMO->setReg(NewVReg);
1531 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1532 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1533 bool LiveIntervals::
1534 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1535 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1536 MachineInstr *MI,
1537 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1538 unsigned Slot, int LdSlot,
1539 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1540 VirtRegMap &vrm,
1541 const TargetRegisterClass* rc,
1542 SmallVector<int, 4> &ReMatIds,
1543 const MachineLoopInfo *loopInfo,
1544 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1545 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1546 std::vector<LiveInterval*> &NewLIs) {
1547 bool CanFold = false;
1548 RestartInstruction:
1549 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1550 MachineOperand& mop = MI->getOperand(i);
1551 if (!mop.isReg())
1552 continue;
1553 unsigned Reg = mop.getReg();
1554 unsigned RegI = Reg;
1555 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1556 continue;
1557 if (Reg != li.reg)
1558 continue;
1560 bool TryFold = !DefIsReMat;
1561 bool FoldSS = true; // Default behavior unless it's a remat.
1562 int FoldSlot = Slot;
1563 if (DefIsReMat) {
1564 // If this is the rematerializable definition MI itself and
1565 // all of its uses are rematerialized, simply delete it.
1566 if (MI == ReMatOrigDefMI && CanDelete) {
1567 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1568 << MI << '\n');
1569 RemoveMachineInstrFromMaps(MI);
1570 vrm.RemoveMachineInstrFromMaps(MI);
1571 MI->eraseFromParent();
1572 break;
1575 // If def for this use can't be rematerialized, then try folding.
1576 // If def is rematerializable and it's a load, also try folding.
1577 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1578 if (isLoad) {
1579 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1580 FoldSS = isLoadSS;
1581 FoldSlot = LdSlot;
1585 // Scan all of the operands of this instruction rewriting operands
1586 // to use NewVReg instead of li.reg as appropriate. We do this for
1587 // two reasons:
1589 // 1. If the instr reads the same spilled vreg multiple times, we
1590 // want to reuse the NewVReg.
1591 // 2. If the instr is a two-addr instruction, we are required to
1592 // keep the src/dst regs pinned.
1594 // Keep track of whether we replace a use and/or def so that we can
1595 // create the spill interval with the appropriate range.
1597 HasUse = mop.isUse();
1598 HasDef = mop.isDef();
1599 SmallVector<unsigned, 2> Ops;
1600 Ops.push_back(i);
1601 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1602 const MachineOperand &MOj = MI->getOperand(j);
1603 if (!MOj.isReg())
1604 continue;
1605 unsigned RegJ = MOj.getReg();
1606 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1607 continue;
1608 if (RegJ == RegI) {
1609 Ops.push_back(j);
1610 if (!MOj.isUndef()) {
1611 HasUse |= MOj.isUse();
1612 HasDef |= MOj.isDef();
1617 // Create a new virtual register for the spill interval.
1618 // Create the new register now so we can map the fold instruction
1619 // to the new register so when it is unfolded we get the correct
1620 // answer.
1621 bool CreatedNewVReg = false;
1622 if (NewVReg == 0) {
1623 NewVReg = mri_->createVirtualRegister(rc);
1624 vrm.grow();
1625 CreatedNewVReg = true;
1628 if (!TryFold)
1629 CanFold = false;
1630 else {
1631 // Do not fold load / store here if we are splitting. We'll find an
1632 // optimal point to insert a load / store later.
1633 if (!TrySplit) {
1634 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1635 Ops, FoldSS, FoldSlot, NewVReg)) {
1636 // Folding the load/store can completely change the instruction in
1637 // unpredictable ways, rescan it from the beginning.
1639 if (FoldSS) {
1640 // We need to give the new vreg the same stack slot as the
1641 // spilled interval.
1642 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1645 HasUse = false;
1646 HasDef = false;
1647 CanFold = false;
1648 if (isNotInMIMap(MI))
1649 break;
1650 goto RestartInstruction;
1652 } else {
1653 // We'll try to fold it later if it's profitable.
1654 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1658 mop.setReg(NewVReg);
1659 if (mop.isImplicit())
1660 rewriteImplicitOps(li, MI, NewVReg, vrm);
1662 // Reuse NewVReg for other reads.
1663 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1664 MachineOperand &mopj = MI->getOperand(Ops[j]);
1665 mopj.setReg(NewVReg);
1666 if (mopj.isImplicit())
1667 rewriteImplicitOps(li, MI, NewVReg, vrm);
1670 if (CreatedNewVReg) {
1671 if (DefIsReMat) {
1672 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1673 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1674 // Each valnum may have its own remat id.
1675 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1676 } else {
1677 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1679 if (!CanDelete || (HasUse && HasDef)) {
1680 // If this is a two-addr instruction then its use operands are
1681 // rematerializable but its def is not. It should be assigned a
1682 // stack slot.
1683 vrm.assignVirt2StackSlot(NewVReg, Slot);
1685 } else {
1686 vrm.assignVirt2StackSlot(NewVReg, Slot);
1688 } else if (HasUse && HasDef &&
1689 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1690 // If this interval hasn't been assigned a stack slot (because earlier
1691 // def is a deleted remat def), do it now.
1692 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1693 vrm.assignVirt2StackSlot(NewVReg, Slot);
1696 // Re-matting an instruction with virtual register use. Add the
1697 // register as an implicit use on the use MI.
1698 if (DefIsReMat && ImpUse)
1699 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1701 // Create a new register interval for this spill / remat.
1702 LiveInterval &nI = getOrCreateInterval(NewVReg);
1703 if (CreatedNewVReg) {
1704 NewLIs.push_back(&nI);
1705 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1706 if (TrySplit)
1707 vrm.setIsSplitFromReg(NewVReg, li.reg);
1710 if (HasUse) {
1711 if (CreatedNewVReg) {
1712 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1713 nI.getNextValue(MachineInstrIndex(), 0, false,
1714 VNInfoAllocator));
1715 DEBUG(errs() << " +" << LR);
1716 nI.addRange(LR);
1717 } else {
1718 // Extend the split live interval to this def / use.
1719 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1720 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1721 nI.getValNumInfo(nI.getNumValNums()-1));
1722 DEBUG(errs() << " +" << LR);
1723 nI.addRange(LR);
1726 if (HasDef) {
1727 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1728 nI.getNextValue(MachineInstrIndex(), 0, false,
1729 VNInfoAllocator));
1730 DEBUG(errs() << " +" << LR);
1731 nI.addRange(LR);
1734 DEBUG({
1735 errs() << "\t\t\t\tAdded new interval: ";
1736 nI.print(errs(), tri_);
1737 errs() << '\n';
1740 return CanFold;
1742 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1743 const VNInfo *VNI,
1744 MachineBasicBlock *MBB,
1745 MachineInstrIndex Idx) const {
1746 MachineInstrIndex End = getMBBEndIdx(MBB);
1747 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1748 if (VNI->kills[j].isPHIIndex())
1749 continue;
1751 MachineInstrIndex KillIdx = VNI->kills[j];
1752 if (KillIdx > Idx && KillIdx < End)
1753 return true;
1755 return false;
1758 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1759 /// during spilling.
1760 namespace {
1761 struct RewriteInfo {
1762 MachineInstrIndex Index;
1763 MachineInstr *MI;
1764 bool HasUse;
1765 bool HasDef;
1766 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1767 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1770 struct RewriteInfoCompare {
1771 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1772 return LHS.Index < RHS.Index;
1777 void LiveIntervals::
1778 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1779 LiveInterval::Ranges::const_iterator &I,
1780 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1781 unsigned Slot, int LdSlot,
1782 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1783 VirtRegMap &vrm,
1784 const TargetRegisterClass* rc,
1785 SmallVector<int, 4> &ReMatIds,
1786 const MachineLoopInfo *loopInfo,
1787 BitVector &SpillMBBs,
1788 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1789 BitVector &RestoreMBBs,
1790 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1791 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1792 std::vector<LiveInterval*> &NewLIs) {
1793 bool AllCanFold = true;
1794 unsigned NewVReg = 0;
1795 MachineInstrIndex start = getBaseIndex(I->start);
1796 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1798 // First collect all the def / use in this live range that will be rewritten.
1799 // Make sure they are sorted according to instruction index.
1800 std::vector<RewriteInfo> RewriteMIs;
1801 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1802 re = mri_->reg_end(); ri != re; ) {
1803 MachineInstr *MI = &*ri;
1804 MachineOperand &O = ri.getOperand();
1805 ++ri;
1806 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1807 MachineInstrIndex index = getInstructionIndex(MI);
1808 if (index < start || index >= end)
1809 continue;
1811 if (O.isUndef())
1812 // Must be defined by an implicit def. It should not be spilled. Note,
1813 // this is for correctness reason. e.g.
1814 // 8 %reg1024<def> = IMPLICIT_DEF
1815 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1816 // The live range [12, 14) are not part of the r1024 live interval since
1817 // it's defined by an implicit def. It will not conflicts with live
1818 // interval of r1025. Now suppose both registers are spilled, you can
1819 // easily see a situation where both registers are reloaded before
1820 // the INSERT_SUBREG and both target registers that would overlap.
1821 continue;
1822 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1824 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1826 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1827 // Now rewrite the defs and uses.
1828 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1829 RewriteInfo &rwi = RewriteMIs[i];
1830 ++i;
1831 MachineInstrIndex index = rwi.Index;
1832 bool MIHasUse = rwi.HasUse;
1833 bool MIHasDef = rwi.HasDef;
1834 MachineInstr *MI = rwi.MI;
1835 // If MI def and/or use the same register multiple times, then there
1836 // are multiple entries.
1837 unsigned NumUses = MIHasUse;
1838 while (i != e && RewriteMIs[i].MI == MI) {
1839 assert(RewriteMIs[i].Index == index);
1840 bool isUse = RewriteMIs[i].HasUse;
1841 if (isUse) ++NumUses;
1842 MIHasUse |= isUse;
1843 MIHasDef |= RewriteMIs[i].HasDef;
1844 ++i;
1846 MachineBasicBlock *MBB = MI->getParent();
1848 if (ImpUse && MI != ReMatDefMI) {
1849 // Re-matting an instruction with virtual register use. Update the
1850 // register interval's spill weight to HUGE_VALF to prevent it from
1851 // being spilled.
1852 LiveInterval &ImpLi = getInterval(ImpUse);
1853 ImpLi.weight = HUGE_VALF;
1856 unsigned MBBId = MBB->getNumber();
1857 unsigned ThisVReg = 0;
1858 if (TrySplit) {
1859 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1860 if (NVI != MBBVRegsMap.end()) {
1861 ThisVReg = NVI->second;
1862 // One common case:
1863 // x = use
1864 // ...
1865 // ...
1866 // def = ...
1867 // = use
1868 // It's better to start a new interval to avoid artifically
1869 // extend the new interval.
1870 if (MIHasDef && !MIHasUse) {
1871 MBBVRegsMap.erase(MBB->getNumber());
1872 ThisVReg = 0;
1877 bool IsNew = ThisVReg == 0;
1878 if (IsNew) {
1879 // This ends the previous live interval. If all of its def / use
1880 // can be folded, give it a low spill weight.
1881 if (NewVReg && TrySplit && AllCanFold) {
1882 LiveInterval &nI = getOrCreateInterval(NewVReg);
1883 nI.weight /= 10.0F;
1885 AllCanFold = true;
1887 NewVReg = ThisVReg;
1889 bool HasDef = false;
1890 bool HasUse = false;
1891 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1892 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1893 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1894 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1895 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1896 if (!HasDef && !HasUse)
1897 continue;
1899 AllCanFold &= CanFold;
1901 // Update weight of spill interval.
1902 LiveInterval &nI = getOrCreateInterval(NewVReg);
1903 if (!TrySplit) {
1904 // The spill weight is now infinity as it cannot be spilled again.
1905 nI.weight = HUGE_VALF;
1906 continue;
1909 // Keep track of the last def and first use in each MBB.
1910 if (HasDef) {
1911 if (MI != ReMatOrigDefMI || !CanDelete) {
1912 bool HasKill = false;
1913 if (!HasUse)
1914 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1915 else {
1916 // If this is a two-address code, then this index starts a new VNInfo.
1917 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
1918 if (VNI)
1919 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1921 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1922 SpillIdxes.find(MBBId);
1923 if (!HasKill) {
1924 if (SII == SpillIdxes.end()) {
1925 std::vector<SRInfo> S;
1926 S.push_back(SRInfo(index, NewVReg, true));
1927 SpillIdxes.insert(std::make_pair(MBBId, S));
1928 } else if (SII->second.back().vreg != NewVReg) {
1929 SII->second.push_back(SRInfo(index, NewVReg, true));
1930 } else if (index > SII->second.back().index) {
1931 // If there is an earlier def and this is a two-address
1932 // instruction, then it's not possible to fold the store (which
1933 // would also fold the load).
1934 SRInfo &Info = SII->second.back();
1935 Info.index = index;
1936 Info.canFold = !HasUse;
1938 SpillMBBs.set(MBBId);
1939 } else if (SII != SpillIdxes.end() &&
1940 SII->second.back().vreg == NewVReg &&
1941 index > SII->second.back().index) {
1942 // There is an earlier def that's not killed (must be two-address).
1943 // The spill is no longer needed.
1944 SII->second.pop_back();
1945 if (SII->second.empty()) {
1946 SpillIdxes.erase(MBBId);
1947 SpillMBBs.reset(MBBId);
1953 if (HasUse) {
1954 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1955 SpillIdxes.find(MBBId);
1956 if (SII != SpillIdxes.end() &&
1957 SII->second.back().vreg == NewVReg &&
1958 index > SII->second.back().index)
1959 // Use(s) following the last def, it's not safe to fold the spill.
1960 SII->second.back().canFold = false;
1961 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1962 RestoreIdxes.find(MBBId);
1963 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1964 // If we are splitting live intervals, only fold if it's the first
1965 // use and there isn't another use later in the MBB.
1966 RII->second.back().canFold = false;
1967 else if (IsNew) {
1968 // Only need a reload if there isn't an earlier def / use.
1969 if (RII == RestoreIdxes.end()) {
1970 std::vector<SRInfo> Infos;
1971 Infos.push_back(SRInfo(index, NewVReg, true));
1972 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1973 } else {
1974 RII->second.push_back(SRInfo(index, NewVReg, true));
1976 RestoreMBBs.set(MBBId);
1980 // Update spill weight.
1981 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1982 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1985 if (NewVReg && TrySplit && AllCanFold) {
1986 // If all of its def / use can be folded, give it a low spill weight.
1987 LiveInterval &nI = getOrCreateInterval(NewVReg);
1988 nI.weight /= 10.0F;
1992 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
1993 unsigned vr, BitVector &RestoreMBBs,
1994 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1995 if (!RestoreMBBs[Id])
1996 return false;
1997 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1998 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1999 if (Restores[i].index == index &&
2000 Restores[i].vreg == vr &&
2001 Restores[i].canFold)
2002 return true;
2003 return false;
2006 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2007 unsigned vr, BitVector &RestoreMBBs,
2008 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2009 if (!RestoreMBBs[Id])
2010 return;
2011 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2012 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2013 if (Restores[i].index == index && Restores[i].vreg)
2014 Restores[i].index = MachineInstrIndex();
2017 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2018 /// spilled and create empty intervals for their uses.
2019 void
2020 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2021 const TargetRegisterClass* rc,
2022 std::vector<LiveInterval*> &NewLIs) {
2023 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2024 re = mri_->reg_end(); ri != re; ) {
2025 MachineOperand &O = ri.getOperand();
2026 MachineInstr *MI = &*ri;
2027 ++ri;
2028 if (O.isDef()) {
2029 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2030 "Register def was not rewritten?");
2031 RemoveMachineInstrFromMaps(MI);
2032 vrm.RemoveMachineInstrFromMaps(MI);
2033 MI->eraseFromParent();
2034 } else {
2035 // This must be an use of an implicit_def so it's not part of the live
2036 // interval. Create a new empty live interval for it.
2037 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2038 unsigned NewVReg = mri_->createVirtualRegister(rc);
2039 vrm.grow();
2040 vrm.setIsImplicitlyDefined(NewVReg);
2041 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2042 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2043 MachineOperand &MO = MI->getOperand(i);
2044 if (MO.isReg() && MO.getReg() == li.reg) {
2045 MO.setReg(NewVReg);
2046 MO.setIsUndef();
2053 std::vector<LiveInterval*> LiveIntervals::
2054 addIntervalsForSpillsFast(const LiveInterval &li,
2055 const MachineLoopInfo *loopInfo,
2056 VirtRegMap &vrm) {
2057 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2059 std::vector<LiveInterval*> added;
2061 assert(li.weight != HUGE_VALF &&
2062 "attempt to spill already spilled interval!");
2064 DEBUG({
2065 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2066 li.dump();
2067 errs() << '\n';
2070 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2072 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2073 while (RI != mri_->reg_end()) {
2074 MachineInstr* MI = &*RI;
2076 SmallVector<unsigned, 2> Indices;
2077 bool HasUse = false;
2078 bool HasDef = false;
2080 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2081 MachineOperand& mop = MI->getOperand(i);
2082 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2084 HasUse |= MI->getOperand(i).isUse();
2085 HasDef |= MI->getOperand(i).isDef();
2087 Indices.push_back(i);
2090 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2091 Indices, true, slot, li.reg)) {
2092 unsigned NewVReg = mri_->createVirtualRegister(rc);
2093 vrm.grow();
2094 vrm.assignVirt2StackSlot(NewVReg, slot);
2096 // create a new register for this spill
2097 LiveInterval &nI = getOrCreateInterval(NewVReg);
2099 // the spill weight is now infinity as it
2100 // cannot be spilled again
2101 nI.weight = HUGE_VALF;
2103 // Rewrite register operands to use the new vreg.
2104 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2105 E = Indices.end(); I != E; ++I) {
2106 MI->getOperand(*I).setReg(NewVReg);
2108 if (MI->getOperand(*I).isUse())
2109 MI->getOperand(*I).setIsKill(true);
2112 // Fill in the new live interval.
2113 MachineInstrIndex index = getInstructionIndex(MI);
2114 if (HasUse) {
2115 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2116 nI.getNextValue(MachineInstrIndex(), 0, false,
2117 getVNInfoAllocator()));
2118 DEBUG(errs() << " +" << LR);
2119 nI.addRange(LR);
2120 vrm.addRestorePoint(NewVReg, MI);
2122 if (HasDef) {
2123 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2124 nI.getNextValue(MachineInstrIndex(), 0, false,
2125 getVNInfoAllocator()));
2126 DEBUG(errs() << " +" << LR);
2127 nI.addRange(LR);
2128 vrm.addSpillPoint(NewVReg, true, MI);
2131 added.push_back(&nI);
2133 DEBUG({
2134 errs() << "\t\t\t\tadded new interval: ";
2135 nI.dump();
2136 errs() << '\n';
2141 RI = mri_->reg_begin(li.reg);
2144 return added;
2147 std::vector<LiveInterval*> LiveIntervals::
2148 addIntervalsForSpills(const LiveInterval &li,
2149 SmallVectorImpl<LiveInterval*> &SpillIs,
2150 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2152 if (EnableFastSpilling)
2153 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2155 assert(li.weight != HUGE_VALF &&
2156 "attempt to spill already spilled interval!");
2158 DEBUG({
2159 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2160 li.print(errs(), tri_);
2161 errs() << '\n';
2164 // Each bit specify whether a spill is required in the MBB.
2165 BitVector SpillMBBs(mf_->getNumBlockIDs());
2166 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2167 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2168 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2169 DenseMap<unsigned,unsigned> MBBVRegsMap;
2170 std::vector<LiveInterval*> NewLIs;
2171 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2173 unsigned NumValNums = li.getNumValNums();
2174 SmallVector<MachineInstr*, 4> ReMatDefs;
2175 ReMatDefs.resize(NumValNums, NULL);
2176 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2177 ReMatOrigDefs.resize(NumValNums, NULL);
2178 SmallVector<int, 4> ReMatIds;
2179 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2180 BitVector ReMatDelete(NumValNums);
2181 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2183 // Spilling a split live interval. It cannot be split any further. Also,
2184 // it's also guaranteed to be a single val# / range interval.
2185 if (vrm.getPreSplitReg(li.reg)) {
2186 vrm.setIsSplitFromReg(li.reg, 0);
2187 // Unset the split kill marker on the last use.
2188 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2189 if (KillIdx != MachineInstrIndex()) {
2190 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2191 assert(KillMI && "Last use disappeared?");
2192 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2193 assert(KillOp != -1 && "Last use disappeared?");
2194 KillMI->getOperand(KillOp).setIsKill(false);
2196 vrm.removeKillPoint(li.reg);
2197 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2198 Slot = vrm.getStackSlot(li.reg);
2199 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2200 MachineInstr *ReMatDefMI = DefIsReMat ?
2201 vrm.getReMaterializedMI(li.reg) : NULL;
2202 int LdSlot = 0;
2203 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2204 bool isLoad = isLoadSS ||
2205 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2206 bool IsFirstRange = true;
2207 for (LiveInterval::Ranges::const_iterator
2208 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2209 // If this is a split live interval with multiple ranges, it means there
2210 // are two-address instructions that re-defined the value. Only the
2211 // first def can be rematerialized!
2212 if (IsFirstRange) {
2213 // Note ReMatOrigDefMI has already been deleted.
2214 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2215 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2216 false, vrm, rc, ReMatIds, loopInfo,
2217 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2218 MBBVRegsMap, NewLIs);
2219 } else {
2220 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2221 Slot, 0, false, false, false,
2222 false, vrm, rc, ReMatIds, loopInfo,
2223 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2224 MBBVRegsMap, NewLIs);
2226 IsFirstRange = false;
2229 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2230 return NewLIs;
2233 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2234 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2235 TrySplit = false;
2236 if (TrySplit)
2237 ++numSplits;
2238 bool NeedStackSlot = false;
2239 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2240 i != e; ++i) {
2241 const VNInfo *VNI = *i;
2242 unsigned VN = VNI->id;
2243 if (VNI->isUnused())
2244 continue; // Dead val#.
2245 // Is the def for the val# rematerializable?
2246 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2247 ? getInstructionFromIndex(VNI->def) : 0;
2248 bool dummy;
2249 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2250 // Remember how to remat the def of this val#.
2251 ReMatOrigDefs[VN] = ReMatDefMI;
2252 // Original def may be modified so we have to make a copy here.
2253 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2254 ClonedMIs.push_back(Clone);
2255 ReMatDefs[VN] = Clone;
2257 bool CanDelete = true;
2258 if (VNI->hasPHIKill()) {
2259 // A kill is a phi node, not all of its uses can be rematerialized.
2260 // It must not be deleted.
2261 CanDelete = false;
2262 // Need a stack slot if there is any live range where uses cannot be
2263 // rematerialized.
2264 NeedStackSlot = true;
2266 if (CanDelete)
2267 ReMatDelete.set(VN);
2268 } else {
2269 // Need a stack slot if there is any live range where uses cannot be
2270 // rematerialized.
2271 NeedStackSlot = true;
2275 // One stack slot per live interval.
2276 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2277 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2278 Slot = vrm.assignVirt2StackSlot(li.reg);
2280 // This case only occurs when the prealloc splitter has already assigned
2281 // a stack slot to this vreg.
2282 else
2283 Slot = vrm.getStackSlot(li.reg);
2286 // Create new intervals and rewrite defs and uses.
2287 for (LiveInterval::Ranges::const_iterator
2288 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2289 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2290 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2291 bool DefIsReMat = ReMatDefMI != NULL;
2292 bool CanDelete = ReMatDelete[I->valno->id];
2293 int LdSlot = 0;
2294 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2295 bool isLoad = isLoadSS ||
2296 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2297 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2298 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2299 CanDelete, vrm, rc, ReMatIds, loopInfo,
2300 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2301 MBBVRegsMap, NewLIs);
2304 // Insert spills / restores if we are splitting.
2305 if (!TrySplit) {
2306 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2307 return NewLIs;
2310 SmallPtrSet<LiveInterval*, 4> AddedKill;
2311 SmallVector<unsigned, 2> Ops;
2312 if (NeedStackSlot) {
2313 int Id = SpillMBBs.find_first();
2314 while (Id != -1) {
2315 std::vector<SRInfo> &spills = SpillIdxes[Id];
2316 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2317 MachineInstrIndex index = spills[i].index;
2318 unsigned VReg = spills[i].vreg;
2319 LiveInterval &nI = getOrCreateInterval(VReg);
2320 bool isReMat = vrm.isReMaterialized(VReg);
2321 MachineInstr *MI = getInstructionFromIndex(index);
2322 bool CanFold = false;
2323 bool FoundUse = false;
2324 Ops.clear();
2325 if (spills[i].canFold) {
2326 CanFold = true;
2327 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2328 MachineOperand &MO = MI->getOperand(j);
2329 if (!MO.isReg() || MO.getReg() != VReg)
2330 continue;
2332 Ops.push_back(j);
2333 if (MO.isDef())
2334 continue;
2335 if (isReMat ||
2336 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2337 RestoreMBBs, RestoreIdxes))) {
2338 // MI has two-address uses of the same register. If the use
2339 // isn't the first and only use in the BB, then we can't fold
2340 // it. FIXME: Move this to rewriteInstructionsForSpills.
2341 CanFold = false;
2342 break;
2344 FoundUse = true;
2347 // Fold the store into the def if possible.
2348 bool Folded = false;
2349 if (CanFold && !Ops.empty()) {
2350 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2351 Folded = true;
2352 if (FoundUse) {
2353 // Also folded uses, do not issue a load.
2354 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2355 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2357 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2361 // Otherwise tell the spiller to issue a spill.
2362 if (!Folded) {
2363 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2364 bool isKill = LR->end == getStoreIndex(index);
2365 if (!MI->registerDefIsDead(nI.reg))
2366 // No need to spill a dead def.
2367 vrm.addSpillPoint(VReg, isKill, MI);
2368 if (isKill)
2369 AddedKill.insert(&nI);
2372 Id = SpillMBBs.find_next(Id);
2376 int Id = RestoreMBBs.find_first();
2377 while (Id != -1) {
2378 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2379 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2380 MachineInstrIndex index = restores[i].index;
2381 if (index == MachineInstrIndex())
2382 continue;
2383 unsigned VReg = restores[i].vreg;
2384 LiveInterval &nI = getOrCreateInterval(VReg);
2385 bool isReMat = vrm.isReMaterialized(VReg);
2386 MachineInstr *MI = getInstructionFromIndex(index);
2387 bool CanFold = false;
2388 Ops.clear();
2389 if (restores[i].canFold) {
2390 CanFold = true;
2391 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2392 MachineOperand &MO = MI->getOperand(j);
2393 if (!MO.isReg() || MO.getReg() != VReg)
2394 continue;
2396 if (MO.isDef()) {
2397 // If this restore were to be folded, it would have been folded
2398 // already.
2399 CanFold = false;
2400 break;
2402 Ops.push_back(j);
2406 // Fold the load into the use if possible.
2407 bool Folded = false;
2408 if (CanFold && !Ops.empty()) {
2409 if (!isReMat)
2410 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2411 else {
2412 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2413 int LdSlot = 0;
2414 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2415 // If the rematerializable def is a load, also try to fold it.
2416 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2417 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2418 Ops, isLoadSS, LdSlot, VReg);
2419 if (!Folded) {
2420 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2421 if (ImpUse) {
2422 // Re-matting an instruction with virtual register use. Add the
2423 // register as an implicit use on the use MI and update the register
2424 // interval's spill weight to HUGE_VALF to prevent it from being
2425 // spilled.
2426 LiveInterval &ImpLi = getInterval(ImpUse);
2427 ImpLi.weight = HUGE_VALF;
2428 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2433 // If folding is not possible / failed, then tell the spiller to issue a
2434 // load / rematerialization for us.
2435 if (Folded)
2436 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2437 else
2438 vrm.addRestorePoint(VReg, MI);
2440 Id = RestoreMBBs.find_next(Id);
2443 // Finalize intervals: add kills, finalize spill weights, and filter out
2444 // dead intervals.
2445 std::vector<LiveInterval*> RetNewLIs;
2446 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2447 LiveInterval *LI = NewLIs[i];
2448 if (!LI->empty()) {
2449 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2450 if (!AddedKill.count(LI)) {
2451 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2452 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2453 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2454 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2455 assert(UseIdx != -1);
2456 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2457 LastUse->getOperand(UseIdx).setIsKill();
2458 vrm.addKillPoint(LI->reg, LastUseIdx);
2461 RetNewLIs.push_back(LI);
2465 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2466 return RetNewLIs;
2469 /// hasAllocatableSuperReg - Return true if the specified physical register has
2470 /// any super register that's allocatable.
2471 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2472 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2473 if (allocatableRegs_[*AS] && hasInterval(*AS))
2474 return true;
2475 return false;
2478 /// getRepresentativeReg - Find the largest super register of the specified
2479 /// physical register.
2480 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2481 // Find the largest super-register that is allocatable.
2482 unsigned BestReg = Reg;
2483 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2484 unsigned SuperReg = *AS;
2485 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2486 BestReg = SuperReg;
2487 break;
2490 return BestReg;
2493 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2494 /// specified interval that conflicts with the specified physical register.
2495 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2496 unsigned PhysReg) const {
2497 unsigned NumConflicts = 0;
2498 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2499 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2500 E = mri_->reg_end(); I != E; ++I) {
2501 MachineOperand &O = I.getOperand();
2502 MachineInstr *MI = O.getParent();
2503 MachineInstrIndex Index = getInstructionIndex(MI);
2504 if (pli.liveAt(Index))
2505 ++NumConflicts;
2507 return NumConflicts;
2510 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2511 /// around all defs and uses of the specified interval. Return true if it
2512 /// was able to cut its interval.
2513 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2514 unsigned PhysReg, VirtRegMap &vrm) {
2515 unsigned SpillReg = getRepresentativeReg(PhysReg);
2517 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2518 // If there are registers which alias PhysReg, but which are not a
2519 // sub-register of the chosen representative super register. Assert
2520 // since we can't handle it yet.
2521 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2522 tri_->isSuperRegister(*AS, SpillReg));
2524 bool Cut = false;
2525 LiveInterval &pli = getInterval(SpillReg);
2526 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2527 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2528 E = mri_->reg_end(); I != E; ++I) {
2529 MachineOperand &O = I.getOperand();
2530 MachineInstr *MI = O.getParent();
2531 if (SeenMIs.count(MI))
2532 continue;
2533 SeenMIs.insert(MI);
2534 MachineInstrIndex Index = getInstructionIndex(MI);
2535 if (pli.liveAt(Index)) {
2536 vrm.addEmergencySpill(SpillReg, MI);
2537 MachineInstrIndex StartIdx = getLoadIndex(Index);
2538 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2539 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2540 pli.removeRange(StartIdx, EndIdx);
2541 Cut = true;
2542 } else {
2543 std::string msg;
2544 raw_string_ostream Msg(msg);
2545 Msg << "Ran out of registers during register allocation!";
2546 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2547 Msg << "\nPlease check your inline asm statement for invalid "
2548 << "constraints:\n";
2549 MI->print(Msg, tm_);
2551 llvm_report_error(Msg.str());
2553 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2554 if (!hasInterval(*AS))
2555 continue;
2556 LiveInterval &spli = getInterval(*AS);
2557 if (spli.liveAt(Index))
2558 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2562 return Cut;
2565 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2566 MachineInstr* startInst) {
2567 LiveInterval& Interval = getOrCreateInterval(reg);
2568 VNInfo* VN = Interval.getNextValue(
2569 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2570 startInst, true, getVNInfoAllocator());
2571 VN->setHasPHIKill(true);
2572 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2573 LiveRange LR(
2574 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2575 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2576 Interval.addRange(LR);
2578 return LR;