It's not legal to fold a load from a narrower stack slot into a wider instruction...
[llvm/avr.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blobb3e6aa72920b778918357e794b76b8c7f7f0b279
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
43 #include <algorithm>
44 #include <limits>
45 #include <cmath>
46 using namespace llvm;
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.setPreservesCFG();
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
78 if (!StrongPHIElim) {
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 MachineFunctionPass::getAnalysisUsage(AU);
87 void LiveIntervals::releaseMemory() {
88 // Free the live intervals themselves.
89 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90 E = r2iMap_.end(); I != E; ++I)
91 delete I->second;
93 MBB2IdxMap.clear();
94 Idx2MBBMap.clear();
95 mi2iMap_.clear();
96 i2miMap_.clear();
97 r2iMap_.clear();
98 terminatorGaps.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!ClonedMIs.empty()) {
103 MachineInstr *MI = ClonedMIs.back();
104 ClonedMIs.pop_back();
105 mf_->DeleteMachineInstr(MI);
109 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110 const TargetInstrInfo *tii_) {
111 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
113 Reg == SrcReg)
114 return true;
116 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118 MI->getOperand(2).getReg() == Reg)
119 return true;
120 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121 MI->getOperand(1).getReg() == Reg)
122 return true;
123 return false;
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
136 DFI != E; ++DFI) {
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
139 I != E; ) {
140 MachineInstr *MI = &*I;
141 ++I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 ImpDefMIs.push_back(MI);
146 continue;
149 bool ChangedToImpDef = false;
150 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151 MachineOperand& MO = MI->getOperand(i);
152 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
153 continue;
154 unsigned Reg = MO.getReg();
155 if (!Reg)
156 continue;
157 if (!ImpDefRegs.count(Reg))
158 continue;
159 // Use is a copy, just turn it into an implicit_def.
160 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161 bool isKill = MO.isKill();
162 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164 MI->RemoveOperand(j);
165 if (isKill)
166 ImpDefRegs.erase(Reg);
167 ChangedToImpDef = true;
168 break;
171 MO.setIsUndef();
172 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173 // Make sure other uses of
174 for (unsigned j = i+1; j != e; ++j) {
175 MachineOperand &MOJ = MI->getOperand(j);
176 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
177 MOJ.setIsUndef();
179 ImpDefRegs.erase(Reg);
183 if (ChangedToImpDef) {
184 // Backtrack to process this new implicit_def.
185 --I;
186 } else {
187 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188 MachineOperand& MO = MI->getOperand(i);
189 if (!MO.isReg() || !MO.isDef())
190 continue;
191 ImpDefRegs.erase(MO.getReg());
196 // Any outstanding liveout implicit_def's?
197 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198 MachineInstr *MI = ImpDefMIs[i];
199 unsigned Reg = MI->getOperand(0).getReg();
200 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201 !ImpDefRegs.count(Reg)) {
202 // Delete all "local" implicit_def's. That include those which define
203 // physical registers since they cannot be liveout.
204 MI->eraseFromParent();
205 continue;
208 // If there are multiple defs of the same register and at least one
209 // is not an implicit_def, do not insert implicit_def's before the
210 // uses.
211 bool Skip = false;
212 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213 DE = mri_->def_end(); DI != DE; ++DI) {
214 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
215 Skip = true;
216 break;
219 if (Skip)
220 continue;
222 // The only implicit_def which we want to keep are those that are live
223 // out of its block.
224 MI->eraseFromParent();
226 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227 UE = mri_->use_end(); UI != UE; ) {
228 MachineOperand &RMO = UI.getOperand();
229 MachineInstr *RMI = &*UI;
230 ++UI;
231 MachineBasicBlock *RMBB = RMI->getParent();
232 if (RMBB == MBB)
233 continue;
235 // Turn a copy use into an implicit_def.
236 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
238 Reg == SrcReg) {
239 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241 RMI->RemoveOperand(j);
242 continue;
245 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246 unsigned NewVReg = mri_->createVirtualRegister(RC);
247 RMO.setReg(NewVReg);
248 RMO.setIsUndef();
249 RMO.setIsKill();
252 ImpDefRegs.clear();
253 ImpDefMIs.clear();
258 void LiveIntervals::computeNumbering() {
259 Index2MiMap OldI2MI = i2miMap_;
260 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
262 Idx2MBBMap.clear();
263 MBB2IdxMap.clear();
264 mi2iMap_.clear();
265 i2miMap_.clear();
266 terminatorGaps.clear();
268 FunctionSize = 0;
270 // Number MachineInstrs and MachineBasicBlocks.
271 // Initialize MBB indexes to a sentinal.
272 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
273 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
275 MachineInstrIndex MIIndex;
276 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
277 MBB != E; ++MBB) {
278 MachineInstrIndex StartIdx = MIIndex;
280 // Insert an empty slot at the beginning of each block.
281 MIIndex = MIIndex.nextIndex();
282 i2miMap_.push_back(0);
284 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
285 I != E; ++I) {
287 if (I == MBB->getFirstTerminator()) {
288 // Leave a gap for before terminators, this is where we will point
289 // PHI kills.
290 MachineInstrIndex tGap(true, MIIndex);
291 bool inserted =
292 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
293 assert(inserted &&
294 "Multiple 'first' terminators encountered during numbering.");
295 inserted = inserted; // Avoid compiler warning if assertions turned off.
296 i2miMap_.push_back(0);
298 MIIndex = MIIndex.nextIndex();
301 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
302 assert(inserted && "multiple MachineInstr -> index mappings");
303 inserted = true;
304 i2miMap_.push_back(I);
305 MIIndex = MIIndex.nextIndex();
306 FunctionSize++;
308 // Insert max(1, numdefs) empty slots after every instruction.
309 unsigned Slots = I->getDesc().getNumDefs();
310 if (Slots == 0)
311 Slots = 1;
312 while (Slots--) {
313 MIIndex = MIIndex.nextIndex();
314 i2miMap_.push_back(0);
319 if (MBB->getFirstTerminator() == MBB->end()) {
320 // Leave a gap for before terminators, this is where we will point
321 // PHI kills.
322 MachineInstrIndex tGap(true, MIIndex);
323 bool inserted =
324 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
325 assert(inserted &&
326 "Multiple 'first' terminators encountered during numbering.");
327 inserted = inserted; // Avoid compiler warning if assertions turned off.
328 i2miMap_.push_back(0);
330 MIIndex = MIIndex.nextIndex();
333 // Set the MBB2IdxMap entry for this MBB.
334 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex.prevSlot());
335 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
338 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
340 if (!OldI2MI.empty())
341 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
342 for (LiveInterval::iterator LI = OI->second->begin(),
343 LE = OI->second->end(); LI != LE; ++LI) {
345 // Remap the start index of the live range to the corresponding new
346 // number, or our best guess at what it _should_ correspond to if the
347 // original instruction has been erased. This is either the following
348 // instruction or its predecessor.
349 unsigned index = LI->start.getVecIndex();
350 MachineInstrIndex::Slot offset = LI->start.getSlot();
351 if (LI->start.isLoad()) {
352 std::vector<IdxMBBPair>::const_iterator I =
353 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
354 // Take the pair containing the index
355 std::vector<IdxMBBPair>::const_iterator J =
356 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
358 LI->start = getMBBStartIdx(J->second);
359 } else {
360 LI->start = MachineInstrIndex(
361 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
362 (MachineInstrIndex::Slot)offset);
365 // Remap the ending index in the same way that we remapped the start,
366 // except for the final step where we always map to the immediately
367 // following instruction.
368 index = (LI->end.prevSlot()).getVecIndex();
369 offset = LI->end.getSlot();
370 if (LI->end.isLoad()) {
371 // VReg dies at end of block.
372 std::vector<IdxMBBPair>::const_iterator I =
373 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
374 --I;
376 LI->end = getMBBEndIdx(I->second).nextSlot();
377 } else {
378 unsigned idx = index;
379 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
381 if (index != OldI2MI.size())
382 LI->end =
383 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
384 (idx == index ? offset : MachineInstrIndex::LOAD));
385 else
386 LI->end =
387 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
391 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
392 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
393 VNInfo* vni = *VNI;
395 // Remap the VNInfo def index, which works the same as the
396 // start indices above. VN's with special sentinel defs
397 // don't need to be remapped.
398 if (vni->isDefAccurate() && !vni->isUnused()) {
399 unsigned index = vni->def.getVecIndex();
400 MachineInstrIndex::Slot offset = vni->def.getSlot();
401 if (vni->def.isLoad()) {
402 std::vector<IdxMBBPair>::const_iterator I =
403 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
404 // Take the pair containing the index
405 std::vector<IdxMBBPair>::const_iterator J =
406 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
408 vni->def = getMBBStartIdx(J->second);
409 } else {
410 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
414 // Remap the VNInfo kill indices, which works the same as
415 // the end indices above.
416 for (size_t i = 0; i < vni->kills.size(); ++i) {
417 unsigned index = vni->kills[i].prevSlot().getVecIndex();
418 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
420 if (vni->kills[i].isLoad()) {
421 assert("Value killed at a load slot.");
422 /*std::vector<IdxMBBPair>::const_iterator I =
423 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
424 --I;
426 vni->kills[i] = getMBBEndIdx(I->second);*/
427 } else {
428 if (vni->kills[i].isPHIIndex()) {
429 std::vector<IdxMBBPair>::const_iterator I =
430 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
431 --I;
432 vni->kills[i] = terminatorGaps[I->second];
433 } else {
434 assert(OldI2MI[index] != 0 &&
435 "Kill refers to instruction not present in index maps.");
436 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
440 unsigned idx = index;
441 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
443 if (index != OldI2MI.size())
444 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
445 (idx == index ? offset : 0);
446 else
447 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
455 void LiveIntervals::scaleNumbering(int factor) {
456 // Need to
457 // * scale MBB begin and end points
458 // * scale all ranges.
459 // * Update VNI structures.
460 // * Scale instruction numberings
462 // Scale the MBB indices.
463 Idx2MBBMap.clear();
464 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
465 MBB != MBBE; ++MBB) {
466 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
467 mbbIndices.first = mbbIndices.first.scale(factor);
468 mbbIndices.second = mbbIndices.second.scale(factor);
469 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
471 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
473 // Scale terminator gaps.
474 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
475 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
476 TGI != TGE; ++TGI) {
477 terminatorGaps[TGI->first] = TGI->second.scale(factor);
480 // Scale the intervals.
481 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
482 LI->second->scaleNumbering(factor);
485 // Scale MachineInstrs.
486 Mi2IndexMap oldmi2iMap = mi2iMap_;
487 MachineInstrIndex highestSlot;
488 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
489 MI != ME; ++MI) {
490 MachineInstrIndex newSlot = MI->second.scale(factor);
491 mi2iMap_[MI->first] = newSlot;
492 highestSlot = std::max(highestSlot, newSlot);
495 unsigned highestVIndex = highestSlot.getVecIndex();
496 i2miMap_.clear();
497 i2miMap_.resize(highestVIndex + 1);
498 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
499 MI != ME; ++MI) {
500 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
506 /// runOnMachineFunction - Register allocate the whole function
508 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
509 mf_ = &fn;
510 mri_ = &mf_->getRegInfo();
511 tm_ = &fn.getTarget();
512 tri_ = tm_->getRegisterInfo();
513 tii_ = tm_->getInstrInfo();
514 aa_ = &getAnalysis<AliasAnalysis>();
515 lv_ = &getAnalysis<LiveVariables>();
516 allocatableRegs_ = tri_->getAllocatableSet(fn);
518 processImplicitDefs();
519 computeNumbering();
520 computeIntervals();
522 numIntervals += getNumIntervals();
524 DEBUG(dump());
525 return true;
528 /// print - Implement the dump method.
529 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
530 OS << "********** INTERVALS **********\n";
531 for (const_iterator I = begin(), E = end(); I != E; ++I) {
532 I->second->print(OS, tri_);
533 OS << "\n";
536 OS << "********** MACHINEINSTRS **********\n";
538 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
539 mbbi != mbbe; ++mbbi) {
540 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
541 for (MachineBasicBlock::iterator mii = mbbi->begin(),
542 mie = mbbi->end(); mii != mie; ++mii) {
543 OS << getInstructionIndex(mii) << '\t' << *mii;
548 /// conflictsWithPhysRegDef - Returns true if the specified register
549 /// is defined during the duration of the specified interval.
550 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
551 VirtRegMap &vrm, unsigned reg) {
552 for (LiveInterval::Ranges::const_iterator
553 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
554 for (MachineInstrIndex index = getBaseIndex(I->start),
555 end = getBaseIndex(I->end.prevSlot()).nextIndex(); index != end;
556 index = index.nextIndex()) {
557 // skip deleted instructions
558 while (index != end && !getInstructionFromIndex(index))
559 index = index.nextIndex();
560 if (index == end) break;
562 MachineInstr *MI = getInstructionFromIndex(index);
563 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
564 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
565 if (SrcReg == li.reg || DstReg == li.reg)
566 continue;
567 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
568 MachineOperand& mop = MI->getOperand(i);
569 if (!mop.isReg())
570 continue;
571 unsigned PhysReg = mop.getReg();
572 if (PhysReg == 0 || PhysReg == li.reg)
573 continue;
574 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
575 if (!vrm.hasPhys(PhysReg))
576 continue;
577 PhysReg = vrm.getPhys(PhysReg);
579 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
580 return true;
585 return false;
588 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
589 /// it can check use as well.
590 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
591 unsigned Reg, bool CheckUse,
592 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
593 for (LiveInterval::Ranges::const_iterator
594 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
595 for (MachineInstrIndex index = getBaseIndex(I->start),
596 end = getBaseIndex(I->end.prevSlot()).nextIndex(); index != end;
597 index = index.nextIndex()) {
598 // Skip deleted instructions.
599 MachineInstr *MI = 0;
600 while (index != end) {
601 MI = getInstructionFromIndex(index);
602 if (MI)
603 break;
604 index = index.nextIndex();
606 if (index == end) break;
608 if (JoinedCopies.count(MI))
609 continue;
610 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
611 MachineOperand& MO = MI->getOperand(i);
612 if (!MO.isReg())
613 continue;
614 if (MO.isUse() && !CheckUse)
615 continue;
616 unsigned PhysReg = MO.getReg();
617 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
618 continue;
619 if (tri_->isSubRegister(Reg, PhysReg))
620 return true;
625 return false;
629 void LiveIntervals::printRegName(unsigned reg) const {
630 if (TargetRegisterInfo::isPhysicalRegister(reg))
631 errs() << tri_->getName(reg);
632 else
633 errs() << "%reg" << reg;
636 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
637 MachineBasicBlock::iterator mi,
638 MachineInstrIndex MIIdx,
639 MachineOperand& MO,
640 unsigned MOIdx,
641 LiveInterval &interval) {
642 DEBUG({
643 errs() << "\t\tregister: ";
644 printRegName(interval.reg);
647 // Virtual registers may be defined multiple times (due to phi
648 // elimination and 2-addr elimination). Much of what we do only has to be
649 // done once for the vreg. We use an empty interval to detect the first
650 // time we see a vreg.
651 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
652 if (interval.empty()) {
653 // Get the Idx of the defining instructions.
654 MachineInstrIndex defIndex = getDefIndex(MIIdx);
655 // Earlyclobbers move back one.
656 if (MO.isEarlyClobber())
657 defIndex = getUseIndex(MIIdx);
658 VNInfo *ValNo;
659 MachineInstr *CopyMI = NULL;
660 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
661 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
662 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
663 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
664 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
665 CopyMI = mi;
666 // Earlyclobbers move back one.
667 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
669 assert(ValNo->id == 0 && "First value in interval is not 0?");
671 // Loop over all of the blocks that the vreg is defined in. There are
672 // two cases we have to handle here. The most common case is a vreg
673 // whose lifetime is contained within a basic block. In this case there
674 // will be a single kill, in MBB, which comes after the definition.
675 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
676 // FIXME: what about dead vars?
677 MachineInstrIndex killIdx;
678 if (vi.Kills[0] != mi)
679 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0])).nextSlot();
680 else
681 killIdx = defIndex.nextSlot();
683 // If the kill happens after the definition, we have an intra-block
684 // live range.
685 if (killIdx > defIndex) {
686 assert(vi.AliveBlocks.empty() &&
687 "Shouldn't be alive across any blocks!");
688 LiveRange LR(defIndex, killIdx, ValNo);
689 interval.addRange(LR);
690 DEBUG(errs() << " +" << LR << "\n");
691 ValNo->addKill(killIdx);
692 return;
696 // The other case we handle is when a virtual register lives to the end
697 // of the defining block, potentially live across some blocks, then is
698 // live into some number of blocks, but gets killed. Start by adding a
699 // range that goes from this definition to the end of the defining block.
700 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).nextSlot(), ValNo);
701 DEBUG(errs() << " +" << NewLR);
702 interval.addRange(NewLR);
704 // Iterate over all of the blocks that the variable is completely
705 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
706 // live interval.
707 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
708 E = vi.AliveBlocks.end(); I != E; ++I) {
709 LiveRange LR(getMBBStartIdx(*I),
710 getMBBEndIdx(*I).nextSlot(), // MBB ends at -1.
711 ValNo);
712 interval.addRange(LR);
713 DEBUG(errs() << " +" << LR);
716 // Finally, this virtual register is live from the start of any killing
717 // block to the 'use' slot of the killing instruction.
718 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
719 MachineInstr *Kill = vi.Kills[i];
720 MachineInstrIndex killIdx = getUseIndex(getInstructionIndex(Kill)).nextSlot();
721 LiveRange LR(getMBBStartIdx(Kill->getParent()),
722 killIdx, ValNo);
723 interval.addRange(LR);
724 ValNo->addKill(killIdx);
725 DEBUG(errs() << " +" << LR);
728 } else {
729 // If this is the second time we see a virtual register definition, it
730 // must be due to phi elimination or two addr elimination. If this is
731 // the result of two address elimination, then the vreg is one of the
732 // def-and-use register operand.
733 if (mi->isRegTiedToUseOperand(MOIdx)) {
734 // If this is a two-address definition, then we have already processed
735 // the live range. The only problem is that we didn't realize there
736 // are actually two values in the live interval. Because of this we
737 // need to take the LiveRegion that defines this register and split it
738 // into two values.
739 assert(interval.containsOneValue());
740 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
741 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
742 if (MO.isEarlyClobber())
743 RedefIndex = getUseIndex(MIIdx);
745 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex.prevSlot());
746 VNInfo *OldValNo = OldLR->valno;
748 // Delete the initial value, which should be short and continuous,
749 // because the 2-addr copy must be in the same MBB as the redef.
750 interval.removeRange(DefIndex, RedefIndex);
752 // Two-address vregs should always only be redefined once. This means
753 // that at this point, there should be exactly one value number in it.
754 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
756 // The new value number (#1) is defined by the instruction we claimed
757 // defined value #0.
758 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
759 false, // update at *
760 VNInfoAllocator);
761 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
763 // Value#0 is now defined by the 2-addr instruction.
764 OldValNo->def = RedefIndex;
765 OldValNo->setCopy(0);
766 if (MO.isEarlyClobber())
767 OldValNo->setHasRedefByEC(true);
769 // Add the new live interval which replaces the range for the input copy.
770 LiveRange LR(DefIndex, RedefIndex, ValNo);
771 DEBUG(errs() << " replace range with " << LR);
772 interval.addRange(LR);
773 ValNo->addKill(RedefIndex);
775 // If this redefinition is dead, we need to add a dummy unit live
776 // range covering the def slot.
777 if (MO.isDead())
778 interval.addRange(LiveRange(RedefIndex, RedefIndex.nextSlot(), OldValNo));
780 DEBUG({
781 errs() << " RESULT: ";
782 interval.print(errs(), tri_);
784 } else {
785 // Otherwise, this must be because of phi elimination. If this is the
786 // first redefinition of the vreg that we have seen, go back and change
787 // the live range in the PHI block to be a different value number.
788 if (interval.containsOneValue()) {
789 assert(vi.Kills.size() == 1 &&
790 "PHI elimination vreg should have one kill, the PHI itself!");
792 // Remove the old range that we now know has an incorrect number.
793 VNInfo *VNI = interval.getValNumInfo(0);
794 MachineInstr *Killer = vi.Kills[0];
795 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
796 MachineInstrIndex End = getUseIndex(getInstructionIndex(Killer)).nextSlot();
797 DEBUG({
798 errs() << " Removing [" << Start << "," << End << "] from: ";
799 interval.print(errs(), tri_);
800 errs() << "\n";
802 interval.removeRange(Start, End);
803 assert(interval.ranges.size() == 1 &&
804 "newly discovered PHI interval has >1 ranges.");
805 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
806 VNI->addKill(terminatorGaps[killMBB]);
807 VNI->setHasPHIKill(true);
808 DEBUG({
809 errs() << " RESULT: ";
810 interval.print(errs(), tri_);
813 // Replace the interval with one of a NEW value number. Note that this
814 // value number isn't actually defined by an instruction, weird huh? :)
815 LiveRange LR(Start, End,
816 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
817 0, false, VNInfoAllocator));
818 LR.valno->setIsPHIDef(true);
819 DEBUG(errs() << " replace range with " << LR);
820 interval.addRange(LR);
821 LR.valno->addKill(End);
822 DEBUG({
823 errs() << " RESULT: ";
824 interval.print(errs(), tri_);
828 // In the case of PHI elimination, each variable definition is only
829 // live until the end of the block. We've already taken care of the
830 // rest of the live range.
831 MachineInstrIndex defIndex = getDefIndex(MIIdx);
832 if (MO.isEarlyClobber())
833 defIndex = getUseIndex(MIIdx);
835 VNInfo *ValNo;
836 MachineInstr *CopyMI = NULL;
837 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
838 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
839 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
840 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
841 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
842 CopyMI = mi;
843 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
845 MachineInstrIndex killIndex = getMBBEndIdx(mbb).nextSlot();
846 LiveRange LR(defIndex, killIndex, ValNo);
847 interval.addRange(LR);
848 ValNo->addKill(terminatorGaps[mbb]);
849 ValNo->setHasPHIKill(true);
850 DEBUG(errs() << " +" << LR);
854 DEBUG(errs() << '\n');
857 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
858 MachineBasicBlock::iterator mi,
859 MachineInstrIndex MIIdx,
860 MachineOperand& MO,
861 LiveInterval &interval,
862 MachineInstr *CopyMI) {
863 // A physical register cannot be live across basic block, so its
864 // lifetime must end somewhere in its defining basic block.
865 DEBUG({
866 errs() << "\t\tregister: ";
867 printRegName(interval.reg);
870 MachineInstrIndex baseIndex = MIIdx;
871 MachineInstrIndex start = getDefIndex(baseIndex);
872 // Earlyclobbers move back one.
873 if (MO.isEarlyClobber())
874 start = getUseIndex(MIIdx);
875 MachineInstrIndex end = start;
877 // If it is not used after definition, it is considered dead at
878 // the instruction defining it. Hence its interval is:
879 // [defSlot(def), defSlot(def)+1)
880 if (MO.isDead()) {
881 DEBUG(errs() << " dead");
882 end = start.nextSlot();
883 goto exit;
886 // If it is not dead on definition, it must be killed by a
887 // subsequent instruction. Hence its interval is:
888 // [defSlot(def), useSlot(kill)+1)
889 baseIndex = baseIndex.nextIndex();
890 while (++mi != MBB->end()) {
891 while (baseIndex.getVecIndex() < i2miMap_.size() &&
892 getInstructionFromIndex(baseIndex) == 0)
893 baseIndex = baseIndex.nextIndex();
894 if (mi->killsRegister(interval.reg, tri_)) {
895 DEBUG(errs() << " killed");
896 end = getUseIndex(baseIndex).nextSlot();
897 goto exit;
898 } else {
899 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
900 if (DefIdx != -1) {
901 if (mi->isRegTiedToUseOperand(DefIdx)) {
902 // Two-address instruction.
903 end = getDefIndex(baseIndex);
904 if (mi->getOperand(DefIdx).isEarlyClobber())
905 end = getUseIndex(baseIndex);
906 } else {
907 // Another instruction redefines the register before it is ever read.
908 // Then the register is essentially dead at the instruction that defines
909 // it. Hence its interval is:
910 // [defSlot(def), defSlot(def)+1)
911 DEBUG(errs() << " dead");
912 end = start.nextSlot();
914 goto exit;
918 baseIndex = baseIndex.nextIndex();
921 // The only case we should have a dead physreg here without a killing or
922 // instruction where we know it's dead is if it is live-in to the function
923 // and never used. Another possible case is the implicit use of the
924 // physical register has been deleted by two-address pass.
925 end = start.nextSlot();
927 exit:
928 assert(start < end && "did not find end of interval?");
930 // Already exists? Extend old live interval.
931 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
932 bool Extend = OldLR != interval.end();
933 VNInfo *ValNo = Extend
934 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
935 if (MO.isEarlyClobber() && Extend)
936 ValNo->setHasRedefByEC(true);
937 LiveRange LR(start, end, ValNo);
938 interval.addRange(LR);
939 LR.valno->addKill(end);
940 DEBUG(errs() << " +" << LR << '\n');
943 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
944 MachineBasicBlock::iterator MI,
945 MachineInstrIndex MIIdx,
946 MachineOperand& MO,
947 unsigned MOIdx) {
948 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
949 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
950 getOrCreateInterval(MO.getReg()));
951 else if (allocatableRegs_[MO.getReg()]) {
952 MachineInstr *CopyMI = NULL;
953 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
954 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
955 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
956 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
957 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
958 CopyMI = MI;
959 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
960 getOrCreateInterval(MO.getReg()), CopyMI);
961 // Def of a register also defines its sub-registers.
962 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
963 // If MI also modifies the sub-register explicitly, avoid processing it
964 // more than once. Do not pass in TRI here so it checks for exact match.
965 if (!MI->modifiesRegister(*AS))
966 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
967 getOrCreateInterval(*AS), 0);
971 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
972 MachineInstrIndex MIIdx,
973 LiveInterval &interval, bool isAlias) {
974 DEBUG({
975 errs() << "\t\tlivein register: ";
976 printRegName(interval.reg);
979 // Look for kills, if it reaches a def before it's killed, then it shouldn't
980 // be considered a livein.
981 MachineBasicBlock::iterator mi = MBB->begin();
982 MachineInstrIndex baseIndex = MIIdx;
983 MachineInstrIndex start = baseIndex;
984 while (baseIndex.getVecIndex() < i2miMap_.size() &&
985 getInstructionFromIndex(baseIndex) == 0)
986 baseIndex = baseIndex.nextIndex();
987 MachineInstrIndex end = baseIndex;
988 bool SeenDefUse = false;
990 while (mi != MBB->end()) {
991 if (mi->killsRegister(interval.reg, tri_)) {
992 DEBUG(errs() << " killed");
993 end = getUseIndex(baseIndex).nextSlot();
994 SeenDefUse = true;
995 break;
996 } else if (mi->modifiesRegister(interval.reg, tri_)) {
997 // Another instruction redefines the register before it is ever read.
998 // Then the register is essentially dead at the instruction that defines
999 // it. Hence its interval is:
1000 // [defSlot(def), defSlot(def)+1)
1001 DEBUG(errs() << " dead");
1002 end = getDefIndex(start).nextSlot();
1003 SeenDefUse = true;
1004 break;
1007 baseIndex = baseIndex.nextIndex();
1008 ++mi;
1009 if (mi != MBB->end()) {
1010 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1011 getInstructionFromIndex(baseIndex) == 0)
1012 baseIndex = baseIndex.nextIndex();
1016 // Live-in register might not be used at all.
1017 if (!SeenDefUse) {
1018 if (isAlias) {
1019 DEBUG(errs() << " dead");
1020 end = getDefIndex(MIIdx).nextSlot();
1021 } else {
1022 DEBUG(errs() << " live through");
1023 end = baseIndex;
1027 VNInfo *vni =
1028 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1029 0, false, VNInfoAllocator);
1030 vni->setIsPHIDef(true);
1031 LiveRange LR(start, end, vni);
1033 interval.addRange(LR);
1034 LR.valno->addKill(end);
1035 DEBUG(errs() << " +" << LR << '\n');
1038 /// computeIntervals - computes the live intervals for virtual
1039 /// registers. for some ordering of the machine instructions [1,N] a
1040 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1041 /// which a variable is live
1042 void LiveIntervals::computeIntervals() {
1043 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1044 << "********** Function: "
1045 << ((Value*)mf_->getFunction())->getName() << '\n');
1047 SmallVector<unsigned, 8> UndefUses;
1048 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1049 MBBI != E; ++MBBI) {
1050 MachineBasicBlock *MBB = MBBI;
1051 // Track the index of the current machine instr.
1052 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1053 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1055 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1057 // Create intervals for live-ins to this BB first.
1058 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1059 LE = MBB->livein_end(); LI != LE; ++LI) {
1060 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1061 // Multiple live-ins can alias the same register.
1062 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1063 if (!hasInterval(*AS))
1064 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1065 true);
1068 // Skip over empty initial indices.
1069 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1070 getInstructionFromIndex(MIIndex) == 0)
1071 MIIndex = MIIndex.nextIndex();
1073 for (; MI != miEnd; ++MI) {
1074 DEBUG(errs() << MIIndex << "\t" << *MI);
1076 // Handle defs.
1077 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1078 MachineOperand &MO = MI->getOperand(i);
1079 if (!MO.isReg() || !MO.getReg())
1080 continue;
1082 // handle register defs - build intervals
1083 if (MO.isDef())
1084 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1085 else if (MO.isUndef())
1086 UndefUses.push_back(MO.getReg());
1089 // Skip over the empty slots after each instruction.
1090 unsigned Slots = MI->getDesc().getNumDefs();
1091 if (Slots == 0)
1092 Slots = 1;
1094 while (Slots--)
1095 MIIndex = MIIndex.nextIndex();
1097 // Skip over empty indices.
1098 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1099 getInstructionFromIndex(MIIndex) == 0)
1100 MIIndex = MIIndex.nextIndex();
1104 // Create empty intervals for registers defined by implicit_def's (except
1105 // for those implicit_def that define values which are liveout of their
1106 // blocks.
1107 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1108 unsigned UndefReg = UndefUses[i];
1109 (void)getOrCreateInterval(UndefReg);
1113 bool LiveIntervals::findLiveInMBBs(
1114 MachineInstrIndex Start, MachineInstrIndex End,
1115 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1116 std::vector<IdxMBBPair>::const_iterator I =
1117 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1119 bool ResVal = false;
1120 while (I != Idx2MBBMap.end()) {
1121 if (I->first >= End)
1122 break;
1123 MBBs.push_back(I->second);
1124 ResVal = true;
1125 ++I;
1127 return ResVal;
1130 bool LiveIntervals::findReachableMBBs(
1131 MachineInstrIndex Start, MachineInstrIndex End,
1132 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1133 std::vector<IdxMBBPair>::const_iterator I =
1134 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1136 bool ResVal = false;
1137 while (I != Idx2MBBMap.end()) {
1138 if (I->first > End)
1139 break;
1140 MachineBasicBlock *MBB = I->second;
1141 if (getMBBEndIdx(MBB) > End)
1142 break;
1143 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1144 SE = MBB->succ_end(); SI != SE; ++SI)
1145 MBBs.push_back(*SI);
1146 ResVal = true;
1147 ++I;
1149 return ResVal;
1152 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1153 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1154 return new LiveInterval(reg, Weight);
1157 /// dupInterval - Duplicate a live interval. The caller is responsible for
1158 /// managing the allocated memory.
1159 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1160 LiveInterval *NewLI = createInterval(li->reg);
1161 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1162 return NewLI;
1165 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1166 /// copy field and returns the source register that defines it.
1167 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1168 if (!VNI->getCopy())
1169 return 0;
1171 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1172 // If it's extracting out of a physical register, return the sub-register.
1173 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1174 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1175 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1176 return Reg;
1177 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1178 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1179 return VNI->getCopy()->getOperand(2).getReg();
1181 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1182 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1183 return SrcReg;
1184 llvm_unreachable("Unrecognized copy instruction!");
1185 return 0;
1188 //===----------------------------------------------------------------------===//
1189 // Register allocator hooks.
1192 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1193 /// allow one) virtual register operand, then its uses are implicitly using
1194 /// the register. Returns the virtual register.
1195 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1196 MachineInstr *MI) const {
1197 unsigned RegOp = 0;
1198 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1199 MachineOperand &MO = MI->getOperand(i);
1200 if (!MO.isReg() || !MO.isUse())
1201 continue;
1202 unsigned Reg = MO.getReg();
1203 if (Reg == 0 || Reg == li.reg)
1204 continue;
1206 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1207 !allocatableRegs_[Reg])
1208 continue;
1209 // FIXME: For now, only remat MI with at most one register operand.
1210 assert(!RegOp &&
1211 "Can't rematerialize instruction with multiple register operand!");
1212 RegOp = MO.getReg();
1213 #ifndef NDEBUG
1214 break;
1215 #endif
1217 return RegOp;
1220 /// isValNoAvailableAt - Return true if the val# of the specified interval
1221 /// which reaches the given instruction also reaches the specified use index.
1222 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1223 MachineInstrIndex UseIdx) const {
1224 MachineInstrIndex Index = getInstructionIndex(MI);
1225 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1226 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1227 return UI != li.end() && UI->valno == ValNo;
1230 /// isReMaterializable - Returns true if the definition MI of the specified
1231 /// val# of the specified interval is re-materializable.
1232 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1233 const VNInfo *ValNo, MachineInstr *MI,
1234 SmallVectorImpl<LiveInterval*> &SpillIs,
1235 bool &isLoad) {
1236 if (DisableReMat)
1237 return false;
1239 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1240 return true;
1242 int FrameIdx = 0;
1243 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1244 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1245 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1246 // this but remember this is not safe to fold into a two-address
1247 // instruction.
1248 // This is a load from fixed stack slot. It can be rematerialized.
1249 return true;
1251 // If the target-specific rules don't identify an instruction as
1252 // being trivially rematerializable, use some target-independent
1253 // rules.
1254 if (!MI->getDesc().isRematerializable() ||
1255 !tii_->isTriviallyReMaterializable(MI)) {
1256 if (!EnableAggressiveRemat)
1257 return false;
1259 // If the instruction accesses memory but the memoperands have been lost,
1260 // we can't analyze it.
1261 const TargetInstrDesc &TID = MI->getDesc();
1262 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1263 return false;
1265 // Avoid instructions obviously unsafe for remat.
1266 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1267 return false;
1269 // If the instruction accesses memory and the memory could be non-constant,
1270 // assume the instruction is not rematerializable.
1271 for (std::list<MachineMemOperand>::const_iterator
1272 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1273 const MachineMemOperand &MMO = *I;
1274 if (MMO.isVolatile() || MMO.isStore())
1275 return false;
1276 const Value *V = MMO.getValue();
1277 if (!V)
1278 return false;
1279 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1280 if (!PSV->isConstant(mf_->getFrameInfo()))
1281 return false;
1282 } else if (!aa_->pointsToConstantMemory(V))
1283 return false;
1286 // If any of the registers accessed are non-constant, conservatively assume
1287 // the instruction is not rematerializable.
1288 unsigned ImpUse = 0;
1289 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1290 const MachineOperand &MO = MI->getOperand(i);
1291 if (MO.isReg()) {
1292 unsigned Reg = MO.getReg();
1293 if (Reg == 0)
1294 continue;
1295 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1296 return false;
1298 // Only allow one def, and that in the first operand.
1299 if (MO.isDef() != (i == 0))
1300 return false;
1302 // Only allow constant-valued registers.
1303 bool IsLiveIn = mri_->isLiveIn(Reg);
1304 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1305 E = mri_->def_end();
1307 // For the def, it should be the only def of that register.
1308 if (MO.isDef() && (next(I) != E || IsLiveIn))
1309 return false;
1311 if (MO.isUse()) {
1312 // Only allow one use other register use, as that's all the
1313 // remat mechanisms support currently.
1314 if (Reg != li.reg) {
1315 if (ImpUse == 0)
1316 ImpUse = Reg;
1317 else if (Reg != ImpUse)
1318 return false;
1320 // For the use, there should be only one associated def.
1321 if (I != E && (next(I) != E || IsLiveIn))
1322 return false;
1328 unsigned ImpUse = getReMatImplicitUse(li, MI);
1329 if (ImpUse) {
1330 const LiveInterval &ImpLi = getInterval(ImpUse);
1331 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1332 re = mri_->use_end(); ri != re; ++ri) {
1333 MachineInstr *UseMI = &*ri;
1334 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1335 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1336 continue;
1337 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1338 return false;
1341 // If a register operand of the re-materialized instruction is going to
1342 // be spilled next, then it's not legal to re-materialize this instruction.
1343 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1344 if (ImpUse == SpillIs[i]->reg)
1345 return false;
1347 return true;
1350 /// isReMaterializable - Returns true if the definition MI of the specified
1351 /// val# of the specified interval is re-materializable.
1352 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1353 const VNInfo *ValNo, MachineInstr *MI) {
1354 SmallVector<LiveInterval*, 4> Dummy1;
1355 bool Dummy2;
1356 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1359 /// isReMaterializable - Returns true if every definition of MI of every
1360 /// val# of the specified interval is re-materializable.
1361 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1362 SmallVectorImpl<LiveInterval*> &SpillIs,
1363 bool &isLoad) {
1364 isLoad = false;
1365 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1366 i != e; ++i) {
1367 const VNInfo *VNI = *i;
1368 if (VNI->isUnused())
1369 continue; // Dead val#.
1370 // Is the def for the val# rematerializable?
1371 if (!VNI->isDefAccurate())
1372 return false;
1373 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1374 bool DefIsLoad = false;
1375 if (!ReMatDefMI ||
1376 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1377 return false;
1378 isLoad |= DefIsLoad;
1380 return true;
1383 /// FilterFoldedOps - Filter out two-address use operands. Return
1384 /// true if it finds any issue with the operands that ought to prevent
1385 /// folding.
1386 static bool FilterFoldedOps(MachineInstr *MI,
1387 SmallVector<unsigned, 2> &Ops,
1388 unsigned &MRInfo,
1389 SmallVector<unsigned, 2> &FoldOps) {
1390 MRInfo = 0;
1391 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1392 unsigned OpIdx = Ops[i];
1393 MachineOperand &MO = MI->getOperand(OpIdx);
1394 // FIXME: fold subreg use.
1395 if (MO.getSubReg())
1396 return true;
1397 if (MO.isDef())
1398 MRInfo |= (unsigned)VirtRegMap::isMod;
1399 else {
1400 // Filter out two-address use operand(s).
1401 if (MI->isRegTiedToDefOperand(OpIdx)) {
1402 MRInfo = VirtRegMap::isModRef;
1403 continue;
1405 MRInfo |= (unsigned)VirtRegMap::isRef;
1407 FoldOps.push_back(OpIdx);
1409 return false;
1413 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1414 /// slot / to reg or any rematerialized load into ith operand of specified
1415 /// MI. If it is successul, MI is updated with the newly created MI and
1416 /// returns true.
1417 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1418 VirtRegMap &vrm, MachineInstr *DefMI,
1419 MachineInstrIndex InstrIdx,
1420 SmallVector<unsigned, 2> &Ops,
1421 bool isSS, int Slot, unsigned Reg) {
1422 // If it is an implicit def instruction, just delete it.
1423 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1424 RemoveMachineInstrFromMaps(MI);
1425 vrm.RemoveMachineInstrFromMaps(MI);
1426 MI->eraseFromParent();
1427 ++numFolds;
1428 return true;
1431 // Filter the list of operand indexes that are to be folded. Abort if
1432 // any operand will prevent folding.
1433 unsigned MRInfo = 0;
1434 SmallVector<unsigned, 2> FoldOps;
1435 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1436 return false;
1438 // The only time it's safe to fold into a two address instruction is when
1439 // it's folding reload and spill from / into a spill stack slot.
1440 if (DefMI && (MRInfo & VirtRegMap::isMod))
1441 return false;
1443 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1444 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1445 if (fmi) {
1446 // Remember this instruction uses the spill slot.
1447 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1449 // Attempt to fold the memory reference into the instruction. If
1450 // we can do this, we don't need to insert spill code.
1451 MachineBasicBlock &MBB = *MI->getParent();
1452 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1453 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1454 vrm.transferSpillPts(MI, fmi);
1455 vrm.transferRestorePts(MI, fmi);
1456 vrm.transferEmergencySpills(MI, fmi);
1457 mi2iMap_.erase(MI);
1458 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1459 mi2iMap_[fmi] = InstrIdx;
1460 MI = MBB.insert(MBB.erase(MI), fmi);
1461 ++numFolds;
1462 return true;
1464 return false;
1467 /// canFoldMemoryOperand - Returns true if the specified load / store
1468 /// folding is possible.
1469 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1470 SmallVector<unsigned, 2> &Ops,
1471 bool ReMat) const {
1472 // Filter the list of operand indexes that are to be folded. Abort if
1473 // any operand will prevent folding.
1474 unsigned MRInfo = 0;
1475 SmallVector<unsigned, 2> FoldOps;
1476 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1477 return false;
1479 // It's only legal to remat for a use, not a def.
1480 if (ReMat && (MRInfo & VirtRegMap::isMod))
1481 return false;
1483 return tii_->canFoldMemoryOperand(MI, FoldOps);
1486 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1487 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1488 for (LiveInterval::Ranges::const_iterator
1489 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1490 std::vector<IdxMBBPair>::const_iterator II =
1491 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1492 if (II == Idx2MBBMap.end())
1493 continue;
1494 if (I->end > II->first) // crossing a MBB.
1495 return false;
1496 MBBs.insert(II->second);
1497 if (MBBs.size() > 1)
1498 return false;
1500 return true;
1503 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1504 /// interval on to-be re-materialized operands of MI) with new register.
1505 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1506 MachineInstr *MI, unsigned NewVReg,
1507 VirtRegMap &vrm) {
1508 // There is an implicit use. That means one of the other operand is
1509 // being remat'ed and the remat'ed instruction has li.reg as an
1510 // use operand. Make sure we rewrite that as well.
1511 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1512 MachineOperand &MO = MI->getOperand(i);
1513 if (!MO.isReg())
1514 continue;
1515 unsigned Reg = MO.getReg();
1516 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1517 continue;
1518 if (!vrm.isReMaterialized(Reg))
1519 continue;
1520 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1521 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1522 if (UseMO)
1523 UseMO->setReg(NewVReg);
1527 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1528 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1529 bool LiveIntervals::
1530 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1531 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1532 MachineInstr *MI,
1533 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1534 unsigned Slot, int LdSlot,
1535 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1536 VirtRegMap &vrm,
1537 const TargetRegisterClass* rc,
1538 SmallVector<int, 4> &ReMatIds,
1539 const MachineLoopInfo *loopInfo,
1540 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1541 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1542 std::vector<LiveInterval*> &NewLIs) {
1543 bool CanFold = false;
1544 RestartInstruction:
1545 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1546 MachineOperand& mop = MI->getOperand(i);
1547 if (!mop.isReg())
1548 continue;
1549 unsigned Reg = mop.getReg();
1550 unsigned RegI = Reg;
1551 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1552 continue;
1553 if (Reg != li.reg)
1554 continue;
1556 bool TryFold = !DefIsReMat;
1557 bool FoldSS = true; // Default behavior unless it's a remat.
1558 int FoldSlot = Slot;
1559 if (DefIsReMat) {
1560 // If this is the rematerializable definition MI itself and
1561 // all of its uses are rematerialized, simply delete it.
1562 if (MI == ReMatOrigDefMI && CanDelete) {
1563 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1564 << MI << '\n');
1565 RemoveMachineInstrFromMaps(MI);
1566 vrm.RemoveMachineInstrFromMaps(MI);
1567 MI->eraseFromParent();
1568 break;
1571 // If def for this use can't be rematerialized, then try folding.
1572 // If def is rematerializable and it's a load, also try folding.
1573 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1574 if (isLoad) {
1575 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1576 FoldSS = isLoadSS;
1577 FoldSlot = LdSlot;
1581 // Scan all of the operands of this instruction rewriting operands
1582 // to use NewVReg instead of li.reg as appropriate. We do this for
1583 // two reasons:
1585 // 1. If the instr reads the same spilled vreg multiple times, we
1586 // want to reuse the NewVReg.
1587 // 2. If the instr is a two-addr instruction, we are required to
1588 // keep the src/dst regs pinned.
1590 // Keep track of whether we replace a use and/or def so that we can
1591 // create the spill interval with the appropriate range.
1593 HasUse = mop.isUse();
1594 HasDef = mop.isDef();
1595 SmallVector<unsigned, 2> Ops;
1596 Ops.push_back(i);
1597 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1598 const MachineOperand &MOj = MI->getOperand(j);
1599 if (!MOj.isReg())
1600 continue;
1601 unsigned RegJ = MOj.getReg();
1602 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1603 continue;
1604 if (RegJ == RegI) {
1605 Ops.push_back(j);
1606 if (!MOj.isUndef()) {
1607 HasUse |= MOj.isUse();
1608 HasDef |= MOj.isDef();
1613 // Create a new virtual register for the spill interval.
1614 // Create the new register now so we can map the fold instruction
1615 // to the new register so when it is unfolded we get the correct
1616 // answer.
1617 bool CreatedNewVReg = false;
1618 if (NewVReg == 0) {
1619 NewVReg = mri_->createVirtualRegister(rc);
1620 vrm.grow();
1621 CreatedNewVReg = true;
1624 if (!TryFold)
1625 CanFold = false;
1626 else {
1627 // Do not fold load / store here if we are splitting. We'll find an
1628 // optimal point to insert a load / store later.
1629 if (!TrySplit) {
1630 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1631 Ops, FoldSS, FoldSlot, NewVReg)) {
1632 // Folding the load/store can completely change the instruction in
1633 // unpredictable ways, rescan it from the beginning.
1635 if (FoldSS) {
1636 // We need to give the new vreg the same stack slot as the
1637 // spilled interval.
1638 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1641 HasUse = false;
1642 HasDef = false;
1643 CanFold = false;
1644 if (isNotInMIMap(MI))
1645 break;
1646 goto RestartInstruction;
1648 } else {
1649 // We'll try to fold it later if it's profitable.
1650 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1654 mop.setReg(NewVReg);
1655 if (mop.isImplicit())
1656 rewriteImplicitOps(li, MI, NewVReg, vrm);
1658 // Reuse NewVReg for other reads.
1659 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1660 MachineOperand &mopj = MI->getOperand(Ops[j]);
1661 mopj.setReg(NewVReg);
1662 if (mopj.isImplicit())
1663 rewriteImplicitOps(li, MI, NewVReg, vrm);
1666 if (CreatedNewVReg) {
1667 if (DefIsReMat) {
1668 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1669 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1670 // Each valnum may have its own remat id.
1671 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1672 } else {
1673 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1675 if (!CanDelete || (HasUse && HasDef)) {
1676 // If this is a two-addr instruction then its use operands are
1677 // rematerializable but its def is not. It should be assigned a
1678 // stack slot.
1679 vrm.assignVirt2StackSlot(NewVReg, Slot);
1681 } else {
1682 vrm.assignVirt2StackSlot(NewVReg, Slot);
1684 } else if (HasUse && HasDef &&
1685 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1686 // If this interval hasn't been assigned a stack slot (because earlier
1687 // def is a deleted remat def), do it now.
1688 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1689 vrm.assignVirt2StackSlot(NewVReg, Slot);
1692 // Re-matting an instruction with virtual register use. Add the
1693 // register as an implicit use on the use MI.
1694 if (DefIsReMat && ImpUse)
1695 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1697 // Create a new register interval for this spill / remat.
1698 LiveInterval &nI = getOrCreateInterval(NewVReg);
1699 if (CreatedNewVReg) {
1700 NewLIs.push_back(&nI);
1701 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1702 if (TrySplit)
1703 vrm.setIsSplitFromReg(NewVReg, li.reg);
1706 if (HasUse) {
1707 if (CreatedNewVReg) {
1708 LiveRange LR(getLoadIndex(index), getUseIndex(index).nextSlot(),
1709 nI.getNextValue(MachineInstrIndex(), 0, false,
1710 VNInfoAllocator));
1711 DEBUG(errs() << " +" << LR);
1712 nI.addRange(LR);
1713 } else {
1714 // Extend the split live interval to this def / use.
1715 MachineInstrIndex End = getUseIndex(index).nextSlot();
1716 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1717 nI.getValNumInfo(nI.getNumValNums()-1));
1718 DEBUG(errs() << " +" << LR);
1719 nI.addRange(LR);
1722 if (HasDef) {
1723 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1724 nI.getNextValue(MachineInstrIndex(), 0, false,
1725 VNInfoAllocator));
1726 DEBUG(errs() << " +" << LR);
1727 nI.addRange(LR);
1730 DEBUG({
1731 errs() << "\t\t\t\tAdded new interval: ";
1732 nI.print(errs(), tri_);
1733 errs() << '\n';
1736 return CanFold;
1738 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1739 const VNInfo *VNI,
1740 MachineBasicBlock *MBB,
1741 MachineInstrIndex Idx) const {
1742 MachineInstrIndex End = getMBBEndIdx(MBB);
1743 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1744 if (VNI->kills[j].isPHIIndex())
1745 continue;
1747 MachineInstrIndex KillIdx = VNI->kills[j];
1748 if (KillIdx > Idx && KillIdx < End)
1749 return true;
1751 return false;
1754 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1755 /// during spilling.
1756 namespace {
1757 struct RewriteInfo {
1758 MachineInstrIndex Index;
1759 MachineInstr *MI;
1760 bool HasUse;
1761 bool HasDef;
1762 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1763 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1766 struct RewriteInfoCompare {
1767 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1768 return LHS.Index < RHS.Index;
1773 void LiveIntervals::
1774 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1775 LiveInterval::Ranges::const_iterator &I,
1776 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1777 unsigned Slot, int LdSlot,
1778 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1779 VirtRegMap &vrm,
1780 const TargetRegisterClass* rc,
1781 SmallVector<int, 4> &ReMatIds,
1782 const MachineLoopInfo *loopInfo,
1783 BitVector &SpillMBBs,
1784 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1785 BitVector &RestoreMBBs,
1786 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1787 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1788 std::vector<LiveInterval*> &NewLIs) {
1789 bool AllCanFold = true;
1790 unsigned NewVReg = 0;
1791 MachineInstrIndex start = getBaseIndex(I->start);
1792 MachineInstrIndex end = getBaseIndex(I->end.prevSlot()).nextIndex();
1794 // First collect all the def / use in this live range that will be rewritten.
1795 // Make sure they are sorted according to instruction index.
1796 std::vector<RewriteInfo> RewriteMIs;
1797 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1798 re = mri_->reg_end(); ri != re; ) {
1799 MachineInstr *MI = &*ri;
1800 MachineOperand &O = ri.getOperand();
1801 ++ri;
1802 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1803 MachineInstrIndex index = getInstructionIndex(MI);
1804 if (index < start || index >= end)
1805 continue;
1807 if (O.isUndef())
1808 // Must be defined by an implicit def. It should not be spilled. Note,
1809 // this is for correctness reason. e.g.
1810 // 8 %reg1024<def> = IMPLICIT_DEF
1811 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1812 // The live range [12, 14) are not part of the r1024 live interval since
1813 // it's defined by an implicit def. It will not conflicts with live
1814 // interval of r1025. Now suppose both registers are spilled, you can
1815 // easily see a situation where both registers are reloaded before
1816 // the INSERT_SUBREG and both target registers that would overlap.
1817 continue;
1818 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1820 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1822 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1823 // Now rewrite the defs and uses.
1824 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1825 RewriteInfo &rwi = RewriteMIs[i];
1826 ++i;
1827 MachineInstrIndex index = rwi.Index;
1828 bool MIHasUse = rwi.HasUse;
1829 bool MIHasDef = rwi.HasDef;
1830 MachineInstr *MI = rwi.MI;
1831 // If MI def and/or use the same register multiple times, then there
1832 // are multiple entries.
1833 unsigned NumUses = MIHasUse;
1834 while (i != e && RewriteMIs[i].MI == MI) {
1835 assert(RewriteMIs[i].Index == index);
1836 bool isUse = RewriteMIs[i].HasUse;
1837 if (isUse) ++NumUses;
1838 MIHasUse |= isUse;
1839 MIHasDef |= RewriteMIs[i].HasDef;
1840 ++i;
1842 MachineBasicBlock *MBB = MI->getParent();
1844 if (ImpUse && MI != ReMatDefMI) {
1845 // Re-matting an instruction with virtual register use. Update the
1846 // register interval's spill weight to HUGE_VALF to prevent it from
1847 // being spilled.
1848 LiveInterval &ImpLi = getInterval(ImpUse);
1849 ImpLi.weight = HUGE_VALF;
1852 unsigned MBBId = MBB->getNumber();
1853 unsigned ThisVReg = 0;
1854 if (TrySplit) {
1855 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1856 if (NVI != MBBVRegsMap.end()) {
1857 ThisVReg = NVI->second;
1858 // One common case:
1859 // x = use
1860 // ...
1861 // ...
1862 // def = ...
1863 // = use
1864 // It's better to start a new interval to avoid artifically
1865 // extend the new interval.
1866 if (MIHasDef && !MIHasUse) {
1867 MBBVRegsMap.erase(MBB->getNumber());
1868 ThisVReg = 0;
1873 bool IsNew = ThisVReg == 0;
1874 if (IsNew) {
1875 // This ends the previous live interval. If all of its def / use
1876 // can be folded, give it a low spill weight.
1877 if (NewVReg && TrySplit && AllCanFold) {
1878 LiveInterval &nI = getOrCreateInterval(NewVReg);
1879 nI.weight /= 10.0F;
1881 AllCanFold = true;
1883 NewVReg = ThisVReg;
1885 bool HasDef = false;
1886 bool HasUse = false;
1887 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1888 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1889 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1890 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1891 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1892 if (!HasDef && !HasUse)
1893 continue;
1895 AllCanFold &= CanFold;
1897 // Update weight of spill interval.
1898 LiveInterval &nI = getOrCreateInterval(NewVReg);
1899 if (!TrySplit) {
1900 // The spill weight is now infinity as it cannot be spilled again.
1901 nI.weight = HUGE_VALF;
1902 continue;
1905 // Keep track of the last def and first use in each MBB.
1906 if (HasDef) {
1907 if (MI != ReMatOrigDefMI || !CanDelete) {
1908 bool HasKill = false;
1909 if (!HasUse)
1910 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1911 else {
1912 // If this is a two-address code, then this index starts a new VNInfo.
1913 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
1914 if (VNI)
1915 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1917 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1918 SpillIdxes.find(MBBId);
1919 if (!HasKill) {
1920 if (SII == SpillIdxes.end()) {
1921 std::vector<SRInfo> S;
1922 S.push_back(SRInfo(index, NewVReg, true));
1923 SpillIdxes.insert(std::make_pair(MBBId, S));
1924 } else if (SII->second.back().vreg != NewVReg) {
1925 SII->second.push_back(SRInfo(index, NewVReg, true));
1926 } else if (index > SII->second.back().index) {
1927 // If there is an earlier def and this is a two-address
1928 // instruction, then it's not possible to fold the store (which
1929 // would also fold the load).
1930 SRInfo &Info = SII->second.back();
1931 Info.index = index;
1932 Info.canFold = !HasUse;
1934 SpillMBBs.set(MBBId);
1935 } else if (SII != SpillIdxes.end() &&
1936 SII->second.back().vreg == NewVReg &&
1937 index > SII->second.back().index) {
1938 // There is an earlier def that's not killed (must be two-address).
1939 // The spill is no longer needed.
1940 SII->second.pop_back();
1941 if (SII->second.empty()) {
1942 SpillIdxes.erase(MBBId);
1943 SpillMBBs.reset(MBBId);
1949 if (HasUse) {
1950 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1951 SpillIdxes.find(MBBId);
1952 if (SII != SpillIdxes.end() &&
1953 SII->second.back().vreg == NewVReg &&
1954 index > SII->second.back().index)
1955 // Use(s) following the last def, it's not safe to fold the spill.
1956 SII->second.back().canFold = false;
1957 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1958 RestoreIdxes.find(MBBId);
1959 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1960 // If we are splitting live intervals, only fold if it's the first
1961 // use and there isn't another use later in the MBB.
1962 RII->second.back().canFold = false;
1963 else if (IsNew) {
1964 // Only need a reload if there isn't an earlier def / use.
1965 if (RII == RestoreIdxes.end()) {
1966 std::vector<SRInfo> Infos;
1967 Infos.push_back(SRInfo(index, NewVReg, true));
1968 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1969 } else {
1970 RII->second.push_back(SRInfo(index, NewVReg, true));
1972 RestoreMBBs.set(MBBId);
1976 // Update spill weight.
1977 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1978 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1981 if (NewVReg && TrySplit && AllCanFold) {
1982 // If all of its def / use can be folded, give it a low spill weight.
1983 LiveInterval &nI = getOrCreateInterval(NewVReg);
1984 nI.weight /= 10.0F;
1988 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
1989 unsigned vr, BitVector &RestoreMBBs,
1990 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1991 if (!RestoreMBBs[Id])
1992 return false;
1993 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1994 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1995 if (Restores[i].index == index &&
1996 Restores[i].vreg == vr &&
1997 Restores[i].canFold)
1998 return true;
1999 return false;
2002 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2003 unsigned vr, BitVector &RestoreMBBs,
2004 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2005 if (!RestoreMBBs[Id])
2006 return;
2007 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2008 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2009 if (Restores[i].index == index && Restores[i].vreg)
2010 Restores[i].index = MachineInstrIndex();
2013 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2014 /// spilled and create empty intervals for their uses.
2015 void
2016 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2017 const TargetRegisterClass* rc,
2018 std::vector<LiveInterval*> &NewLIs) {
2019 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2020 re = mri_->reg_end(); ri != re; ) {
2021 MachineOperand &O = ri.getOperand();
2022 MachineInstr *MI = &*ri;
2023 ++ri;
2024 if (O.isDef()) {
2025 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2026 "Register def was not rewritten?");
2027 RemoveMachineInstrFromMaps(MI);
2028 vrm.RemoveMachineInstrFromMaps(MI);
2029 MI->eraseFromParent();
2030 } else {
2031 // This must be an use of an implicit_def so it's not part of the live
2032 // interval. Create a new empty live interval for it.
2033 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2034 unsigned NewVReg = mri_->createVirtualRegister(rc);
2035 vrm.grow();
2036 vrm.setIsImplicitlyDefined(NewVReg);
2037 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2038 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2039 MachineOperand &MO = MI->getOperand(i);
2040 if (MO.isReg() && MO.getReg() == li.reg) {
2041 MO.setReg(NewVReg);
2042 MO.setIsUndef();
2049 std::vector<LiveInterval*> LiveIntervals::
2050 addIntervalsForSpillsFast(const LiveInterval &li,
2051 const MachineLoopInfo *loopInfo,
2052 VirtRegMap &vrm) {
2053 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2055 std::vector<LiveInterval*> added;
2057 assert(li.weight != HUGE_VALF &&
2058 "attempt to spill already spilled interval!");
2060 DEBUG({
2061 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2062 li.dump();
2063 errs() << '\n';
2066 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2068 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2069 while (RI != mri_->reg_end()) {
2070 MachineInstr* MI = &*RI;
2072 SmallVector<unsigned, 2> Indices;
2073 bool HasUse = false;
2074 bool HasDef = false;
2076 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2077 MachineOperand& mop = MI->getOperand(i);
2078 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2080 HasUse |= MI->getOperand(i).isUse();
2081 HasDef |= MI->getOperand(i).isDef();
2083 Indices.push_back(i);
2086 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2087 Indices, true, slot, li.reg)) {
2088 unsigned NewVReg = mri_->createVirtualRegister(rc);
2089 vrm.grow();
2090 vrm.assignVirt2StackSlot(NewVReg, slot);
2092 // create a new register for this spill
2093 LiveInterval &nI = getOrCreateInterval(NewVReg);
2095 // the spill weight is now infinity as it
2096 // cannot be spilled again
2097 nI.weight = HUGE_VALF;
2099 // Rewrite register operands to use the new vreg.
2100 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2101 E = Indices.end(); I != E; ++I) {
2102 MI->getOperand(*I).setReg(NewVReg);
2104 if (MI->getOperand(*I).isUse())
2105 MI->getOperand(*I).setIsKill(true);
2108 // Fill in the new live interval.
2109 MachineInstrIndex index = getInstructionIndex(MI);
2110 if (HasUse) {
2111 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2112 nI.getNextValue(MachineInstrIndex(), 0, false,
2113 getVNInfoAllocator()));
2114 DEBUG(errs() << " +" << LR);
2115 nI.addRange(LR);
2116 vrm.addRestorePoint(NewVReg, MI);
2118 if (HasDef) {
2119 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2120 nI.getNextValue(MachineInstrIndex(), 0, false,
2121 getVNInfoAllocator()));
2122 DEBUG(errs() << " +" << LR);
2123 nI.addRange(LR);
2124 vrm.addSpillPoint(NewVReg, true, MI);
2127 added.push_back(&nI);
2129 DEBUG({
2130 errs() << "\t\t\t\tadded new interval: ";
2131 nI.dump();
2132 errs() << '\n';
2137 RI = mri_->reg_begin(li.reg);
2140 return added;
2143 std::vector<LiveInterval*> LiveIntervals::
2144 addIntervalsForSpills(const LiveInterval &li,
2145 SmallVectorImpl<LiveInterval*> &SpillIs,
2146 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2148 if (EnableFastSpilling)
2149 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2151 assert(li.weight != HUGE_VALF &&
2152 "attempt to spill already spilled interval!");
2154 DEBUG({
2155 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2156 li.print(errs(), tri_);
2157 errs() << '\n';
2160 // Each bit specify whether a spill is required in the MBB.
2161 BitVector SpillMBBs(mf_->getNumBlockIDs());
2162 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2163 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2164 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2165 DenseMap<unsigned,unsigned> MBBVRegsMap;
2166 std::vector<LiveInterval*> NewLIs;
2167 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2169 unsigned NumValNums = li.getNumValNums();
2170 SmallVector<MachineInstr*, 4> ReMatDefs;
2171 ReMatDefs.resize(NumValNums, NULL);
2172 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2173 ReMatOrigDefs.resize(NumValNums, NULL);
2174 SmallVector<int, 4> ReMatIds;
2175 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2176 BitVector ReMatDelete(NumValNums);
2177 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2179 // Spilling a split live interval. It cannot be split any further. Also,
2180 // it's also guaranteed to be a single val# / range interval.
2181 if (vrm.getPreSplitReg(li.reg)) {
2182 vrm.setIsSplitFromReg(li.reg, 0);
2183 // Unset the split kill marker on the last use.
2184 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2185 if (KillIdx != MachineInstrIndex()) {
2186 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2187 assert(KillMI && "Last use disappeared?");
2188 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2189 assert(KillOp != -1 && "Last use disappeared?");
2190 KillMI->getOperand(KillOp).setIsKill(false);
2192 vrm.removeKillPoint(li.reg);
2193 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2194 Slot = vrm.getStackSlot(li.reg);
2195 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2196 MachineInstr *ReMatDefMI = DefIsReMat ?
2197 vrm.getReMaterializedMI(li.reg) : NULL;
2198 int LdSlot = 0;
2199 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2200 bool isLoad = isLoadSS ||
2201 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2202 bool IsFirstRange = true;
2203 for (LiveInterval::Ranges::const_iterator
2204 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2205 // If this is a split live interval with multiple ranges, it means there
2206 // are two-address instructions that re-defined the value. Only the
2207 // first def can be rematerialized!
2208 if (IsFirstRange) {
2209 // Note ReMatOrigDefMI has already been deleted.
2210 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2211 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2212 false, vrm, rc, ReMatIds, loopInfo,
2213 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2214 MBBVRegsMap, NewLIs);
2215 } else {
2216 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2217 Slot, 0, false, false, false,
2218 false, vrm, rc, ReMatIds, loopInfo,
2219 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2220 MBBVRegsMap, NewLIs);
2222 IsFirstRange = false;
2225 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2226 return NewLIs;
2229 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2230 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2231 TrySplit = false;
2232 if (TrySplit)
2233 ++numSplits;
2234 bool NeedStackSlot = false;
2235 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2236 i != e; ++i) {
2237 const VNInfo *VNI = *i;
2238 unsigned VN = VNI->id;
2239 if (VNI->isUnused())
2240 continue; // Dead val#.
2241 // Is the def for the val# rematerializable?
2242 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2243 ? getInstructionFromIndex(VNI->def) : 0;
2244 bool dummy;
2245 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2246 // Remember how to remat the def of this val#.
2247 ReMatOrigDefs[VN] = ReMatDefMI;
2248 // Original def may be modified so we have to make a copy here.
2249 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2250 ClonedMIs.push_back(Clone);
2251 ReMatDefs[VN] = Clone;
2253 bool CanDelete = true;
2254 if (VNI->hasPHIKill()) {
2255 // A kill is a phi node, not all of its uses can be rematerialized.
2256 // It must not be deleted.
2257 CanDelete = false;
2258 // Need a stack slot if there is any live range where uses cannot be
2259 // rematerialized.
2260 NeedStackSlot = true;
2262 if (CanDelete)
2263 ReMatDelete.set(VN);
2264 } else {
2265 // Need a stack slot if there is any live range where uses cannot be
2266 // rematerialized.
2267 NeedStackSlot = true;
2271 // One stack slot per live interval.
2272 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2273 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2274 Slot = vrm.assignVirt2StackSlot(li.reg);
2276 // This case only occurs when the prealloc splitter has already assigned
2277 // a stack slot to this vreg.
2278 else
2279 Slot = vrm.getStackSlot(li.reg);
2282 // Create new intervals and rewrite defs and uses.
2283 for (LiveInterval::Ranges::const_iterator
2284 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2285 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2286 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2287 bool DefIsReMat = ReMatDefMI != NULL;
2288 bool CanDelete = ReMatDelete[I->valno->id];
2289 int LdSlot = 0;
2290 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2291 bool isLoad = isLoadSS ||
2292 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2293 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2294 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2295 CanDelete, vrm, rc, ReMatIds, loopInfo,
2296 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2297 MBBVRegsMap, NewLIs);
2300 // Insert spills / restores if we are splitting.
2301 if (!TrySplit) {
2302 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2303 return NewLIs;
2306 SmallPtrSet<LiveInterval*, 4> AddedKill;
2307 SmallVector<unsigned, 2> Ops;
2308 if (NeedStackSlot) {
2309 int Id = SpillMBBs.find_first();
2310 while (Id != -1) {
2311 std::vector<SRInfo> &spills = SpillIdxes[Id];
2312 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2313 MachineInstrIndex index = spills[i].index;
2314 unsigned VReg = spills[i].vreg;
2315 LiveInterval &nI = getOrCreateInterval(VReg);
2316 bool isReMat = vrm.isReMaterialized(VReg);
2317 MachineInstr *MI = getInstructionFromIndex(index);
2318 bool CanFold = false;
2319 bool FoundUse = false;
2320 Ops.clear();
2321 if (spills[i].canFold) {
2322 CanFold = true;
2323 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2324 MachineOperand &MO = MI->getOperand(j);
2325 if (!MO.isReg() || MO.getReg() != VReg)
2326 continue;
2328 Ops.push_back(j);
2329 if (MO.isDef())
2330 continue;
2331 if (isReMat ||
2332 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2333 RestoreMBBs, RestoreIdxes))) {
2334 // MI has two-address uses of the same register. If the use
2335 // isn't the first and only use in the BB, then we can't fold
2336 // it. FIXME: Move this to rewriteInstructionsForSpills.
2337 CanFold = false;
2338 break;
2340 FoundUse = true;
2343 // Fold the store into the def if possible.
2344 bool Folded = false;
2345 if (CanFold && !Ops.empty()) {
2346 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2347 Folded = true;
2348 if (FoundUse) {
2349 // Also folded uses, do not issue a load.
2350 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2351 nI.removeRange(getLoadIndex(index), getUseIndex(index).nextSlot());
2353 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2357 // Otherwise tell the spiller to issue a spill.
2358 if (!Folded) {
2359 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2360 bool isKill = LR->end == getStoreIndex(index);
2361 if (!MI->registerDefIsDead(nI.reg))
2362 // No need to spill a dead def.
2363 vrm.addSpillPoint(VReg, isKill, MI);
2364 if (isKill)
2365 AddedKill.insert(&nI);
2368 Id = SpillMBBs.find_next(Id);
2372 int Id = RestoreMBBs.find_first();
2373 while (Id != -1) {
2374 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2375 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2376 MachineInstrIndex index = restores[i].index;
2377 if (index == MachineInstrIndex())
2378 continue;
2379 unsigned VReg = restores[i].vreg;
2380 LiveInterval &nI = getOrCreateInterval(VReg);
2381 bool isReMat = vrm.isReMaterialized(VReg);
2382 MachineInstr *MI = getInstructionFromIndex(index);
2383 bool CanFold = false;
2384 Ops.clear();
2385 if (restores[i].canFold) {
2386 CanFold = true;
2387 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2388 MachineOperand &MO = MI->getOperand(j);
2389 if (!MO.isReg() || MO.getReg() != VReg)
2390 continue;
2392 if (MO.isDef()) {
2393 // If this restore were to be folded, it would have been folded
2394 // already.
2395 CanFold = false;
2396 break;
2398 Ops.push_back(j);
2402 // Fold the load into the use if possible.
2403 bool Folded = false;
2404 if (CanFold && !Ops.empty()) {
2405 if (!isReMat)
2406 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2407 else {
2408 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2409 int LdSlot = 0;
2410 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2411 // If the rematerializable def is a load, also try to fold it.
2412 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2413 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2414 Ops, isLoadSS, LdSlot, VReg);
2415 if (!Folded) {
2416 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2417 if (ImpUse) {
2418 // Re-matting an instruction with virtual register use. Add the
2419 // register as an implicit use on the use MI and update the register
2420 // interval's spill weight to HUGE_VALF to prevent it from being
2421 // spilled.
2422 LiveInterval &ImpLi = getInterval(ImpUse);
2423 ImpLi.weight = HUGE_VALF;
2424 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2429 // If folding is not possible / failed, then tell the spiller to issue a
2430 // load / rematerialization for us.
2431 if (Folded)
2432 nI.removeRange(getLoadIndex(index), getUseIndex(index).nextSlot());
2433 else
2434 vrm.addRestorePoint(VReg, MI);
2436 Id = RestoreMBBs.find_next(Id);
2439 // Finalize intervals: add kills, finalize spill weights, and filter out
2440 // dead intervals.
2441 std::vector<LiveInterval*> RetNewLIs;
2442 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2443 LiveInterval *LI = NewLIs[i];
2444 if (!LI->empty()) {
2445 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2446 if (!AddedKill.count(LI)) {
2447 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2448 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2449 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2450 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2451 assert(UseIdx != -1);
2452 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2453 LastUse->getOperand(UseIdx).setIsKill();
2454 vrm.addKillPoint(LI->reg, LastUseIdx);
2457 RetNewLIs.push_back(LI);
2461 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2462 return RetNewLIs;
2465 /// hasAllocatableSuperReg - Return true if the specified physical register has
2466 /// any super register that's allocatable.
2467 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2468 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2469 if (allocatableRegs_[*AS] && hasInterval(*AS))
2470 return true;
2471 return false;
2474 /// getRepresentativeReg - Find the largest super register of the specified
2475 /// physical register.
2476 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2477 // Find the largest super-register that is allocatable.
2478 unsigned BestReg = Reg;
2479 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2480 unsigned SuperReg = *AS;
2481 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2482 BestReg = SuperReg;
2483 break;
2486 return BestReg;
2489 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2490 /// specified interval that conflicts with the specified physical register.
2491 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2492 unsigned PhysReg) const {
2493 unsigned NumConflicts = 0;
2494 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2495 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2496 E = mri_->reg_end(); I != E; ++I) {
2497 MachineOperand &O = I.getOperand();
2498 MachineInstr *MI = O.getParent();
2499 MachineInstrIndex Index = getInstructionIndex(MI);
2500 if (pli.liveAt(Index))
2501 ++NumConflicts;
2503 return NumConflicts;
2506 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2507 /// around all defs and uses of the specified interval. Return true if it
2508 /// was able to cut its interval.
2509 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2510 unsigned PhysReg, VirtRegMap &vrm) {
2511 unsigned SpillReg = getRepresentativeReg(PhysReg);
2513 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2514 // If there are registers which alias PhysReg, but which are not a
2515 // sub-register of the chosen representative super register. Assert
2516 // since we can't handle it yet.
2517 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2518 tri_->isSuperRegister(*AS, SpillReg));
2520 bool Cut = false;
2521 LiveInterval &pli = getInterval(SpillReg);
2522 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2523 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2524 E = mri_->reg_end(); I != E; ++I) {
2525 MachineOperand &O = I.getOperand();
2526 MachineInstr *MI = O.getParent();
2527 if (SeenMIs.count(MI))
2528 continue;
2529 SeenMIs.insert(MI);
2530 MachineInstrIndex Index = getInstructionIndex(MI);
2531 if (pli.liveAt(Index)) {
2532 vrm.addEmergencySpill(SpillReg, MI);
2533 MachineInstrIndex StartIdx = getLoadIndex(Index);
2534 MachineInstrIndex EndIdx = getStoreIndex(Index).nextSlot();
2535 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2536 pli.removeRange(StartIdx, EndIdx);
2537 Cut = true;
2538 } else {
2539 std::string msg;
2540 raw_string_ostream Msg(msg);
2541 Msg << "Ran out of registers during register allocation!";
2542 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2543 Msg << "\nPlease check your inline asm statement for invalid "
2544 << "constraints:\n";
2545 MI->print(Msg, tm_);
2547 llvm_report_error(Msg.str());
2549 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2550 if (!hasInterval(*AS))
2551 continue;
2552 LiveInterval &spli = getInterval(*AS);
2553 if (spli.liveAt(Index))
2554 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index).nextSlot());
2558 return Cut;
2561 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2562 MachineInstr* startInst) {
2563 LiveInterval& Interval = getOrCreateInterval(reg);
2564 VNInfo* VN = Interval.getNextValue(
2565 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2566 startInst, true, getVNInfoAllocator());
2567 VN->setHasPHIKill(true);
2568 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2569 LiveRange LR(
2570 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2571 getMBBEndIdx(startInst->getParent()).nextSlot(), VN);
2572 Interval.addRange(LR);
2574 return LR;