pass machinemoduleinfo down into getSymbolForDwarfGlobalReference,
[llvm/avr.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blob6296e933818b4bc3fc8e1c194a175a9d8c858e72
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
43 #include <algorithm>
44 #include <limits>
45 #include <cmath>
46 using namespace llvm;
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
59 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
62 STATISTIC(numIntervals , "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
65 STATISTIC(numCoalescing, "Number of early coalescing performed");
67 char LiveIntervals::ID = 0;
68 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.setPreservesCFG();
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
79 if (!StrongPHIElim) {
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 MachineFunctionPass::getAnalysisUsage(AU);
88 void LiveIntervals::releaseMemory() {
89 // Free the live intervals themselves.
90 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
91 E = r2iMap_.end(); I != E; ++I)
92 delete I->second;
94 MBB2IdxMap.clear();
95 Idx2MBBMap.clear();
96 mi2iMap_.clear();
97 i2miMap_.clear();
98 r2iMap_.clear();
99 terminatorGaps.clear();
100 phiJoinCopies.clear();
102 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
106 CloneMIs.pop_back();
107 mf_->DeleteMachineInstr(MI);
111 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
112 const TargetInstrInfo *tii_) {
113 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
114 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
115 Reg == SrcReg)
116 return true;
118 if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
119 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
120 MI->getOperand(2).getReg() == Reg)
121 return true;
122 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
123 MI->getOperand(1).getReg() == Reg)
124 return true;
125 return false;
128 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
129 /// there is one implicit_def for each use. Add isUndef marker to
130 /// implicit_def defs and their uses.
131 void LiveIntervals::processImplicitDefs() {
132 SmallSet<unsigned, 8> ImpDefRegs;
133 SmallVector<MachineInstr*, 8> ImpDefMIs;
134 MachineBasicBlock *Entry = mf_->begin();
135 SmallPtrSet<MachineBasicBlock*,16> Visited;
136 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
137 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
138 DFI != E; ++DFI) {
139 MachineBasicBlock *MBB = *DFI;
140 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
141 I != E; ) {
142 MachineInstr *MI = &*I;
143 ++I;
144 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
145 unsigned Reg = MI->getOperand(0).getReg();
146 ImpDefRegs.insert(Reg);
147 ImpDefMIs.push_back(MI);
148 continue;
151 bool ChangedToImpDef = false;
152 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
153 MachineOperand& MO = MI->getOperand(i);
154 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
155 continue;
156 unsigned Reg = MO.getReg();
157 if (!Reg)
158 continue;
159 if (!ImpDefRegs.count(Reg))
160 continue;
161 // Use is a copy, just turn it into an implicit_def.
162 if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
163 bool isKill = MO.isKill();
164 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
165 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
166 MI->RemoveOperand(j);
167 if (isKill)
168 ImpDefRegs.erase(Reg);
169 ChangedToImpDef = true;
170 break;
173 MO.setIsUndef();
174 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
175 // Make sure other uses of
176 for (unsigned j = i+1; j != e; ++j) {
177 MachineOperand &MOJ = MI->getOperand(j);
178 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
179 MOJ.setIsUndef();
181 ImpDefRegs.erase(Reg);
185 if (ChangedToImpDef) {
186 // Backtrack to process this new implicit_def.
187 --I;
188 } else {
189 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
190 MachineOperand& MO = MI->getOperand(i);
191 if (!MO.isReg() || !MO.isDef())
192 continue;
193 ImpDefRegs.erase(MO.getReg());
198 // Any outstanding liveout implicit_def's?
199 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
200 MachineInstr *MI = ImpDefMIs[i];
201 unsigned Reg = MI->getOperand(0).getReg();
202 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
203 !ImpDefRegs.count(Reg)) {
204 // Delete all "local" implicit_def's. That include those which define
205 // physical registers since they cannot be liveout.
206 MI->eraseFromParent();
207 continue;
210 // If there are multiple defs of the same register and at least one
211 // is not an implicit_def, do not insert implicit_def's before the
212 // uses.
213 bool Skip = false;
214 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
215 DE = mri_->def_end(); DI != DE; ++DI) {
216 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
217 Skip = true;
218 break;
221 if (Skip)
222 continue;
224 // The only implicit_def which we want to keep are those that are live
225 // out of its block.
226 MI->eraseFromParent();
228 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
229 UE = mri_->use_end(); UI != UE; ) {
230 MachineOperand &RMO = UI.getOperand();
231 MachineInstr *RMI = &*UI;
232 ++UI;
233 MachineBasicBlock *RMBB = RMI->getParent();
234 if (RMBB == MBB)
235 continue;
237 // Turn a copy use into an implicit_def.
238 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
239 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
240 Reg == SrcReg) {
241 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
242 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
243 RMI->RemoveOperand(j);
244 continue;
247 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
248 unsigned NewVReg = mri_->createVirtualRegister(RC);
249 RMO.setReg(NewVReg);
250 RMO.setIsUndef();
251 RMO.setIsKill();
254 ImpDefRegs.clear();
255 ImpDefMIs.clear();
260 void LiveIntervals::computeNumbering() {
261 Index2MiMap OldI2MI = i2miMap_;
262 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
264 Idx2MBBMap.clear();
265 MBB2IdxMap.clear();
266 mi2iMap_.clear();
267 i2miMap_.clear();
268 terminatorGaps.clear();
269 phiJoinCopies.clear();
271 FunctionSize = 0;
273 // Number MachineInstrs and MachineBasicBlocks.
274 // Initialize MBB indexes to a sentinal.
275 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
276 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
278 MachineInstrIndex MIIndex;
279 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
280 MBB != E; ++MBB) {
281 MachineInstrIndex StartIdx = MIIndex;
283 // Insert an empty slot at the beginning of each block.
284 MIIndex = getNextIndex(MIIndex);
285 i2miMap_.push_back(0);
287 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
288 I != E; ++I) {
290 if (I == MBB->getFirstTerminator()) {
291 // Leave a gap for before terminators, this is where we will point
292 // PHI kills.
293 MachineInstrIndex tGap(true, MIIndex);
294 bool inserted =
295 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
296 assert(inserted &&
297 "Multiple 'first' terminators encountered during numbering.");
298 inserted = inserted; // Avoid compiler warning if assertions turned off.
299 i2miMap_.push_back(0);
301 MIIndex = getNextIndex(MIIndex);
304 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
305 assert(inserted && "multiple MachineInstr -> index mappings");
306 inserted = true;
307 i2miMap_.push_back(I);
308 MIIndex = getNextIndex(MIIndex);
309 FunctionSize++;
311 // Insert max(1, numdefs) empty slots after every instruction.
312 unsigned Slots = I->getDesc().getNumDefs();
313 if (Slots == 0)
314 Slots = 1;
315 while (Slots--) {
316 MIIndex = getNextIndex(MIIndex);
317 i2miMap_.push_back(0);
322 if (MBB->getFirstTerminator() == MBB->end()) {
323 // Leave a gap for before terminators, this is where we will point
324 // PHI kills.
325 MachineInstrIndex tGap(true, MIIndex);
326 bool inserted =
327 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
328 assert(inserted &&
329 "Multiple 'first' terminators encountered during numbering.");
330 inserted = inserted; // Avoid compiler warning if assertions turned off.
331 i2miMap_.push_back(0);
333 MIIndex = getNextIndex(MIIndex);
336 // Set the MBB2IdxMap entry for this MBB.
337 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
338 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
341 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
343 if (!OldI2MI.empty())
344 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
345 for (LiveInterval::iterator LI = OI->second->begin(),
346 LE = OI->second->end(); LI != LE; ++LI) {
348 // Remap the start index of the live range to the corresponding new
349 // number, or our best guess at what it _should_ correspond to if the
350 // original instruction has been erased. This is either the following
351 // instruction or its predecessor.
352 unsigned index = LI->start.getVecIndex();
353 MachineInstrIndex::Slot offset = LI->start.getSlot();
354 if (LI->start.isLoad()) {
355 std::vector<IdxMBBPair>::const_iterator I =
356 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
357 // Take the pair containing the index
358 std::vector<IdxMBBPair>::const_iterator J =
359 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
361 LI->start = getMBBStartIdx(J->second);
362 } else {
363 LI->start = MachineInstrIndex(
364 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
365 (MachineInstrIndex::Slot)offset);
368 // Remap the ending index in the same way that we remapped the start,
369 // except for the final step where we always map to the immediately
370 // following instruction.
371 index = (getPrevSlot(LI->end)).getVecIndex();
372 offset = LI->end.getSlot();
373 if (LI->end.isLoad()) {
374 // VReg dies at end of block.
375 std::vector<IdxMBBPair>::const_iterator I =
376 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
377 --I;
379 LI->end = getNextSlot(getMBBEndIdx(I->second));
380 } else {
381 unsigned idx = index;
382 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
384 if (index != OldI2MI.size())
385 LI->end =
386 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
387 (idx == index ? offset : MachineInstrIndex::LOAD));
388 else
389 LI->end =
390 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
394 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
395 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
396 VNInfo* vni = *VNI;
398 // Remap the VNInfo def index, which works the same as the
399 // start indices above. VN's with special sentinel defs
400 // don't need to be remapped.
401 if (vni->isDefAccurate() && !vni->isUnused()) {
402 unsigned index = vni->def.getVecIndex();
403 MachineInstrIndex::Slot offset = vni->def.getSlot();
404 if (vni->def.isLoad()) {
405 std::vector<IdxMBBPair>::const_iterator I =
406 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
407 // Take the pair containing the index
408 std::vector<IdxMBBPair>::const_iterator J =
409 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
411 vni->def = getMBBStartIdx(J->second);
412 } else {
413 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
417 // Remap the VNInfo kill indices, which works the same as
418 // the end indices above.
419 for (size_t i = 0; i < vni->kills.size(); ++i) {
420 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
421 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
423 if (vni->kills[i].isLoad()) {
424 assert("Value killed at a load slot.");
425 /*std::vector<IdxMBBPair>::const_iterator I =
426 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
427 --I;
429 vni->kills[i] = getMBBEndIdx(I->second);*/
430 } else {
431 if (vni->kills[i].isPHIIndex()) {
432 std::vector<IdxMBBPair>::const_iterator I =
433 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
434 --I;
435 vni->kills[i] = terminatorGaps[I->second];
436 } else {
437 assert(OldI2MI[index] != 0 &&
438 "Kill refers to instruction not present in index maps.");
439 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
443 unsigned idx = index;
444 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
446 if (index != OldI2MI.size())
447 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
448 (idx == index ? offset : 0);
449 else
450 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
458 void LiveIntervals::scaleNumbering(int factor) {
459 // Need to
460 // * scale MBB begin and end points
461 // * scale all ranges.
462 // * Update VNI structures.
463 // * Scale instruction numberings
465 // Scale the MBB indices.
466 Idx2MBBMap.clear();
467 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
468 MBB != MBBE; ++MBB) {
469 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
470 mbbIndices.first = mbbIndices.first.scale(factor);
471 mbbIndices.second = mbbIndices.second.scale(factor);
472 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
474 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
476 // Scale terminator gaps.
477 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
478 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
479 TGI != TGE; ++TGI) {
480 terminatorGaps[TGI->first] = TGI->second.scale(factor);
483 // Scale the intervals.
484 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
485 LI->second->scaleNumbering(factor);
488 // Scale MachineInstrs.
489 Mi2IndexMap oldmi2iMap = mi2iMap_;
490 MachineInstrIndex highestSlot;
491 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
492 MI != ME; ++MI) {
493 MachineInstrIndex newSlot = MI->second.scale(factor);
494 mi2iMap_[MI->first] = newSlot;
495 highestSlot = std::max(highestSlot, newSlot);
498 unsigned highestVIndex = highestSlot.getVecIndex();
499 i2miMap_.clear();
500 i2miMap_.resize(highestVIndex + 1);
501 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
502 MI != ME; ++MI) {
503 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
509 /// runOnMachineFunction - Register allocate the whole function
511 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
512 mf_ = &fn;
513 mri_ = &mf_->getRegInfo();
514 tm_ = &fn.getTarget();
515 tri_ = tm_->getRegisterInfo();
516 tii_ = tm_->getInstrInfo();
517 aa_ = &getAnalysis<AliasAnalysis>();
518 lv_ = &getAnalysis<LiveVariables>();
519 allocatableRegs_ = tri_->getAllocatableSet(fn);
521 processImplicitDefs();
522 computeNumbering();
523 computeIntervals();
524 performEarlyCoalescing();
526 numIntervals += getNumIntervals();
528 DEBUG(dump());
529 return true;
532 /// print - Implement the dump method.
533 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
534 OS << "********** INTERVALS **********\n";
535 for (const_iterator I = begin(), E = end(); I != E; ++I) {
536 I->second->print(OS, tri_);
537 OS << "\n";
540 printInstrs(OS);
543 void LiveIntervals::printInstrs(raw_ostream &OS) const {
544 OS << "********** MACHINEINSTRS **********\n";
546 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
547 mbbi != mbbe; ++mbbi) {
548 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
549 for (MachineBasicBlock::iterator mii = mbbi->begin(),
550 mie = mbbi->end(); mii != mie; ++mii) {
551 OS << getInstructionIndex(mii) << '\t' << *mii;
556 void LiveIntervals::dumpInstrs() const {
557 printInstrs(errs());
560 /// conflictsWithPhysRegDef - Returns true if the specified register
561 /// is defined during the duration of the specified interval.
562 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
563 VirtRegMap &vrm, unsigned reg) {
564 for (LiveInterval::Ranges::const_iterator
565 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
566 for (MachineInstrIndex index = getBaseIndex(I->start),
567 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
568 index = getNextIndex(index)) {
569 // skip deleted instructions
570 while (index != end && !getInstructionFromIndex(index))
571 index = getNextIndex(index);
572 if (index == end) break;
574 MachineInstr *MI = getInstructionFromIndex(index);
575 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
576 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
577 if (SrcReg == li.reg || DstReg == li.reg)
578 continue;
579 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
580 MachineOperand& mop = MI->getOperand(i);
581 if (!mop.isReg())
582 continue;
583 unsigned PhysReg = mop.getReg();
584 if (PhysReg == 0 || PhysReg == li.reg)
585 continue;
586 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
587 if (!vrm.hasPhys(PhysReg))
588 continue;
589 PhysReg = vrm.getPhys(PhysReg);
591 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
592 return true;
597 return false;
600 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
601 /// it can check use as well.
602 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
603 unsigned Reg, bool CheckUse,
604 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
605 for (LiveInterval::Ranges::const_iterator
606 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
607 for (MachineInstrIndex index = getBaseIndex(I->start),
608 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
609 index = getNextIndex(index)) {
610 // Skip deleted instructions.
611 MachineInstr *MI = 0;
612 while (index != end) {
613 MI = getInstructionFromIndex(index);
614 if (MI)
615 break;
616 index = getNextIndex(index);
618 if (index == end) break;
620 if (JoinedCopies.count(MI))
621 continue;
622 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
623 MachineOperand& MO = MI->getOperand(i);
624 if (!MO.isReg())
625 continue;
626 if (MO.isUse() && !CheckUse)
627 continue;
628 unsigned PhysReg = MO.getReg();
629 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
630 continue;
631 if (tri_->isSubRegister(Reg, PhysReg))
632 return true;
637 return false;
640 #ifndef NDEBUG
641 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
642 if (TargetRegisterInfo::isPhysicalRegister(reg))
643 errs() << tri_->getName(reg);
644 else
645 errs() << "%reg" << reg;
647 #endif
649 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
650 MachineBasicBlock::iterator mi,
651 MachineInstrIndex MIIdx,
652 MachineOperand& MO,
653 unsigned MOIdx,
654 LiveInterval &interval) {
655 DEBUG({
656 errs() << "\t\tregister: ";
657 printRegName(interval.reg, tri_);
660 // Virtual registers may be defined multiple times (due to phi
661 // elimination and 2-addr elimination). Much of what we do only has to be
662 // done once for the vreg. We use an empty interval to detect the first
663 // time we see a vreg.
664 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
665 if (interval.empty()) {
666 // Get the Idx of the defining instructions.
667 MachineInstrIndex defIndex = getDefIndex(MIIdx);
668 // Earlyclobbers move back one.
669 if (MO.isEarlyClobber())
670 defIndex = getUseIndex(MIIdx);
671 VNInfo *ValNo;
672 MachineInstr *CopyMI = NULL;
673 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
674 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
675 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
676 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
677 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
678 CopyMI = mi;
679 // Earlyclobbers move back one.
680 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
682 assert(ValNo->id == 0 && "First value in interval is not 0?");
684 // Loop over all of the blocks that the vreg is defined in. There are
685 // two cases we have to handle here. The most common case is a vreg
686 // whose lifetime is contained within a basic block. In this case there
687 // will be a single kill, in MBB, which comes after the definition.
688 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
689 // FIXME: what about dead vars?
690 MachineInstrIndex killIdx;
691 if (vi.Kills[0] != mi)
692 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
693 else
694 killIdx = getNextSlot(defIndex);
696 // If the kill happens after the definition, we have an intra-block
697 // live range.
698 if (killIdx > defIndex) {
699 assert(vi.AliveBlocks.empty() &&
700 "Shouldn't be alive across any blocks!");
701 LiveRange LR(defIndex, killIdx, ValNo);
702 interval.addRange(LR);
703 DEBUG(errs() << " +" << LR << "\n");
704 ValNo->addKill(killIdx);
705 return;
709 // The other case we handle is when a virtual register lives to the end
710 // of the defining block, potentially live across some blocks, then is
711 // live into some number of blocks, but gets killed. Start by adding a
712 // range that goes from this definition to the end of the defining block.
713 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
714 DEBUG(errs() << " +" << NewLR);
715 interval.addRange(NewLR);
717 // Iterate over all of the blocks that the variable is completely
718 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
719 // live interval.
720 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
721 E = vi.AliveBlocks.end(); I != E; ++I) {
722 LiveRange LR(getMBBStartIdx(*I),
723 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
724 ValNo);
725 interval.addRange(LR);
726 DEBUG(errs() << " +" << LR);
729 // Finally, this virtual register is live from the start of any killing
730 // block to the 'use' slot of the killing instruction.
731 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
732 MachineInstr *Kill = vi.Kills[i];
733 MachineInstrIndex killIdx =
734 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
735 LiveRange LR(getMBBStartIdx(Kill->getParent()),
736 killIdx, ValNo);
737 interval.addRange(LR);
738 ValNo->addKill(killIdx);
739 DEBUG(errs() << " +" << LR);
742 } else {
743 // If this is the second time we see a virtual register definition, it
744 // must be due to phi elimination or two addr elimination. If this is
745 // the result of two address elimination, then the vreg is one of the
746 // def-and-use register operand.
747 if (mi->isRegTiedToUseOperand(MOIdx)) {
748 // If this is a two-address definition, then we have already processed
749 // the live range. The only problem is that we didn't realize there
750 // are actually two values in the live interval. Because of this we
751 // need to take the LiveRegion that defines this register and split it
752 // into two values.
753 assert(interval.containsOneValue());
754 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
755 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
756 if (MO.isEarlyClobber())
757 RedefIndex = getUseIndex(MIIdx);
759 const LiveRange *OldLR =
760 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
761 VNInfo *OldValNo = OldLR->valno;
763 // Delete the initial value, which should be short and continuous,
764 // because the 2-addr copy must be in the same MBB as the redef.
765 interval.removeRange(DefIndex, RedefIndex);
767 // Two-address vregs should always only be redefined once. This means
768 // that at this point, there should be exactly one value number in it.
769 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
771 // The new value number (#1) is defined by the instruction we claimed
772 // defined value #0.
773 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
774 false, // update at *
775 VNInfoAllocator);
776 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
778 // Value#0 is now defined by the 2-addr instruction.
779 OldValNo->def = RedefIndex;
780 OldValNo->setCopy(0);
781 if (MO.isEarlyClobber())
782 OldValNo->setHasRedefByEC(true);
784 // Add the new live interval which replaces the range for the input copy.
785 LiveRange LR(DefIndex, RedefIndex, ValNo);
786 DEBUG(errs() << " replace range with " << LR);
787 interval.addRange(LR);
788 ValNo->addKill(RedefIndex);
790 // If this redefinition is dead, we need to add a dummy unit live
791 // range covering the def slot.
792 if (MO.isDead())
793 interval.addRange(
794 LiveRange(RedefIndex, getNextSlot(RedefIndex), OldValNo));
796 DEBUG({
797 errs() << " RESULT: ";
798 interval.print(errs(), tri_);
800 } else {
801 // Otherwise, this must be because of phi elimination. If this is the
802 // first redefinition of the vreg that we have seen, go back and change
803 // the live range in the PHI block to be a different value number.
804 if (interval.containsOneValue()) {
805 // Remove the old range that we now know has an incorrect number.
806 VNInfo *VNI = interval.getValNumInfo(0);
807 MachineInstr *Killer = vi.Kills[0];
808 phiJoinCopies.push_back(Killer);
809 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
810 MachineInstrIndex End =
811 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
812 DEBUG({
813 errs() << " Removing [" << Start << "," << End << "] from: ";
814 interval.print(errs(), tri_);
815 errs() << "\n";
817 interval.removeRange(Start, End);
818 assert(interval.ranges.size() == 1 &&
819 "Newly discovered PHI interval has >1 ranges.");
820 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
821 VNI->addKill(terminatorGaps[killMBB]);
822 VNI->setHasPHIKill(true);
823 DEBUG({
824 errs() << " RESULT: ";
825 interval.print(errs(), tri_);
828 // Replace the interval with one of a NEW value number. Note that this
829 // value number isn't actually defined by an instruction, weird huh? :)
830 LiveRange LR(Start, End,
831 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
832 0, false, VNInfoAllocator));
833 LR.valno->setIsPHIDef(true);
834 DEBUG(errs() << " replace range with " << LR);
835 interval.addRange(LR);
836 LR.valno->addKill(End);
837 DEBUG({
838 errs() << " RESULT: ";
839 interval.print(errs(), tri_);
843 // In the case of PHI elimination, each variable definition is only
844 // live until the end of the block. We've already taken care of the
845 // rest of the live range.
846 MachineInstrIndex defIndex = getDefIndex(MIIdx);
847 if (MO.isEarlyClobber())
848 defIndex = getUseIndex(MIIdx);
850 VNInfo *ValNo;
851 MachineInstr *CopyMI = NULL;
852 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
853 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
854 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
855 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
856 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
857 CopyMI = mi;
858 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
860 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
861 LiveRange LR(defIndex, killIndex, ValNo);
862 interval.addRange(LR);
863 ValNo->addKill(terminatorGaps[mbb]);
864 ValNo->setHasPHIKill(true);
865 DEBUG(errs() << " +" << LR);
869 DEBUG(errs() << '\n');
872 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
873 MachineBasicBlock::iterator mi,
874 MachineInstrIndex MIIdx,
875 MachineOperand& MO,
876 LiveInterval &interval,
877 MachineInstr *CopyMI) {
878 // A physical register cannot be live across basic block, so its
879 // lifetime must end somewhere in its defining basic block.
880 DEBUG({
881 errs() << "\t\tregister: ";
882 printRegName(interval.reg, tri_);
885 MachineInstrIndex baseIndex = MIIdx;
886 MachineInstrIndex start = getDefIndex(baseIndex);
887 // Earlyclobbers move back one.
888 if (MO.isEarlyClobber())
889 start = getUseIndex(MIIdx);
890 MachineInstrIndex end = start;
892 // If it is not used after definition, it is considered dead at
893 // the instruction defining it. Hence its interval is:
894 // [defSlot(def), defSlot(def)+1)
895 if (MO.isDead()) {
896 DEBUG(errs() << " dead");
897 end = getNextSlot(start);
898 goto exit;
901 // If it is not dead on definition, it must be killed by a
902 // subsequent instruction. Hence its interval is:
903 // [defSlot(def), useSlot(kill)+1)
904 baseIndex = getNextIndex(baseIndex);
905 while (++mi != MBB->end()) {
906 while (baseIndex.getVecIndex() < i2miMap_.size() &&
907 getInstructionFromIndex(baseIndex) == 0)
908 baseIndex = getNextIndex(baseIndex);
909 if (mi->killsRegister(interval.reg, tri_)) {
910 DEBUG(errs() << " killed");
911 end = getNextSlot(getUseIndex(baseIndex));
912 goto exit;
913 } else {
914 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
915 if (DefIdx != -1) {
916 if (mi->isRegTiedToUseOperand(DefIdx)) {
917 // Two-address instruction.
918 end = getDefIndex(baseIndex);
919 if (mi->getOperand(DefIdx).isEarlyClobber())
920 end = getUseIndex(baseIndex);
921 } else {
922 // Another instruction redefines the register before it is ever read.
923 // Then the register is essentially dead at the instruction that defines
924 // it. Hence its interval is:
925 // [defSlot(def), defSlot(def)+1)
926 DEBUG(errs() << " dead");
927 end = getNextSlot(start);
929 goto exit;
933 baseIndex = getNextIndex(baseIndex);
936 // The only case we should have a dead physreg here without a killing or
937 // instruction where we know it's dead is if it is live-in to the function
938 // and never used. Another possible case is the implicit use of the
939 // physical register has been deleted by two-address pass.
940 end = getNextSlot(start);
942 exit:
943 assert(start < end && "did not find end of interval?");
945 // Already exists? Extend old live interval.
946 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
947 bool Extend = OldLR != interval.end();
948 VNInfo *ValNo = Extend
949 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
950 if (MO.isEarlyClobber() && Extend)
951 ValNo->setHasRedefByEC(true);
952 LiveRange LR(start, end, ValNo);
953 interval.addRange(LR);
954 LR.valno->addKill(end);
955 DEBUG(errs() << " +" << LR << '\n');
958 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
959 MachineBasicBlock::iterator MI,
960 MachineInstrIndex MIIdx,
961 MachineOperand& MO,
962 unsigned MOIdx) {
963 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
964 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
965 getOrCreateInterval(MO.getReg()));
966 else if (allocatableRegs_[MO.getReg()]) {
967 MachineInstr *CopyMI = NULL;
968 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
969 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
970 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
971 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
972 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
973 CopyMI = MI;
974 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
975 getOrCreateInterval(MO.getReg()), CopyMI);
976 // Def of a register also defines its sub-registers.
977 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
978 // If MI also modifies the sub-register explicitly, avoid processing it
979 // more than once. Do not pass in TRI here so it checks for exact match.
980 if (!MI->modifiesRegister(*AS))
981 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
982 getOrCreateInterval(*AS), 0);
986 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
987 MachineInstrIndex MIIdx,
988 LiveInterval &interval, bool isAlias) {
989 DEBUG({
990 errs() << "\t\tlivein register: ";
991 printRegName(interval.reg, tri_);
994 // Look for kills, if it reaches a def before it's killed, then it shouldn't
995 // be considered a livein.
996 MachineBasicBlock::iterator mi = MBB->begin();
997 MachineInstrIndex baseIndex = MIIdx;
998 MachineInstrIndex start = baseIndex;
999 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1000 getInstructionFromIndex(baseIndex) == 0)
1001 baseIndex = getNextIndex(baseIndex);
1002 MachineInstrIndex end = baseIndex;
1003 bool SeenDefUse = false;
1005 while (mi != MBB->end()) {
1006 if (mi->killsRegister(interval.reg, tri_)) {
1007 DEBUG(errs() << " killed");
1008 end = getNextSlot(getUseIndex(baseIndex));
1009 SeenDefUse = true;
1010 break;
1011 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1012 // Another instruction redefines the register before it is ever read.
1013 // Then the register is essentially dead at the instruction that defines
1014 // it. Hence its interval is:
1015 // [defSlot(def), defSlot(def)+1)
1016 DEBUG(errs() << " dead");
1017 end = getNextSlot(getDefIndex(start));
1018 SeenDefUse = true;
1019 break;
1022 baseIndex = getNextIndex(baseIndex);
1023 ++mi;
1024 if (mi != MBB->end()) {
1025 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1026 getInstructionFromIndex(baseIndex) == 0)
1027 baseIndex = getNextIndex(baseIndex);
1031 // Live-in register might not be used at all.
1032 if (!SeenDefUse) {
1033 if (isAlias) {
1034 DEBUG(errs() << " dead");
1035 end = getNextSlot(getDefIndex(MIIdx));
1036 } else {
1037 DEBUG(errs() << " live through");
1038 end = baseIndex;
1042 VNInfo *vni =
1043 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1044 0, false, VNInfoAllocator);
1045 vni->setIsPHIDef(true);
1046 LiveRange LR(start, end, vni);
1048 interval.addRange(LR);
1049 LR.valno->addKill(end);
1050 DEBUG(errs() << " +" << LR << '\n');
1053 bool
1054 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1055 SmallVector<MachineInstr*,16> &IdentCopies,
1056 SmallVector<MachineInstr*,16> &OtherCopies) {
1057 bool HaveConflict = false;
1058 unsigned NumIdent = 0;
1059 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(SrcInt.reg),
1060 re = mri_->reg_end(); ri != re; ++ri) {
1061 MachineOperand &O = ri.getOperand();
1062 if (!O.isDef())
1063 continue;
1065 MachineInstr *MI = &*ri;
1066 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1067 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1068 return false;
1069 if (SrcReg != DstInt.reg) {
1070 OtherCopies.push_back(MI);
1071 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1072 } else {
1073 IdentCopies.push_back(MI);
1074 ++NumIdent;
1078 if (!HaveConflict)
1079 return false; // Let coalescer handle it
1080 return IdentCopies.size() > OtherCopies.size();
1083 void LiveIntervals::performEarlyCoalescing() {
1084 if (!EarlyCoalescing)
1085 return;
1087 /// Perform early coalescing: eliminate copies which feed into phi joins
1088 /// and whose sources are defined by the phi joins.
1089 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1090 MachineInstr *Join = phiJoinCopies[i];
1091 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1092 break;
1094 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1095 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1096 #ifndef NDEBUG
1097 assert(isMove && "PHI join instruction must be a move!");
1098 #else
1099 isMove = isMove;
1100 #endif
1102 LiveInterval &DstInt = getInterval(PHIDst);
1103 LiveInterval &SrcInt = getInterval(PHISrc);
1104 SmallVector<MachineInstr*, 16> IdentCopies;
1105 SmallVector<MachineInstr*, 16> OtherCopies;
1106 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1107 continue;
1109 DEBUG(errs() << "PHI Join: " << *Join);
1110 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1111 VNInfo *VNI = DstInt.getValNumInfo(0);
1113 // Change the non-identity copies to directly target the phi destination.
1114 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1115 MachineInstr *PHICopy = OtherCopies[i];
1116 DEBUG(errs() << "Moving: " << *PHICopy);
1118 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1119 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1120 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1121 MachineInstrIndex StartIndex = SLR->start;
1122 MachineInstrIndex EndIndex = SLR->end;
1124 // Delete val# defined by the now identity copy and add the range from
1125 // beginning of the mbb to the end of the range.
1126 SrcInt.removeValNo(SLR->valno);
1127 DEBUG(errs() << " added range [" << StartIndex << ','
1128 << EndIndex << "] to reg" << DstInt.reg << '\n');
1129 if (DstInt.liveAt(StartIndex))
1130 DstInt.removeRange(StartIndex, EndIndex);
1131 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1132 VNInfoAllocator);
1133 NewVNI->setHasPHIKill(true);
1134 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1135 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1136 MachineOperand &MO = PHICopy->getOperand(j);
1137 if (!MO.isReg() || MO.getReg() != PHISrc)
1138 continue;
1139 MO.setReg(PHIDst);
1143 // Now let's eliminate all the would-be identity copies.
1144 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1145 MachineInstr *PHICopy = IdentCopies[i];
1146 DEBUG(errs() << "Coalescing: " << *PHICopy);
1148 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1149 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1150 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1151 MachineInstrIndex StartIndex = SLR->start;
1152 MachineInstrIndex EndIndex = SLR->end;
1154 // Delete val# defined by the now identity copy and add the range from
1155 // beginning of the mbb to the end of the range.
1156 SrcInt.removeValNo(SLR->valno);
1157 RemoveMachineInstrFromMaps(PHICopy);
1158 PHICopy->eraseFromParent();
1159 DEBUG(errs() << " added range [" << StartIndex << ','
1160 << EndIndex << "] to reg" << DstInt.reg << '\n');
1161 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1164 // Remove the phi join and update the phi block liveness.
1165 MachineInstrIndex MIIndex = getInstructionIndex(Join);
1166 MachineInstrIndex UseIndex = getUseIndex(MIIndex);
1167 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1168 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1169 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1170 DLR->valno->setCopy(0);
1171 DLR->valno->setIsDefAccurate(false);
1172 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1173 SrcInt.removeRange(SLR->start, SLR->end);
1174 assert(SrcInt.empty());
1175 removeInterval(PHISrc);
1176 RemoveMachineInstrFromMaps(Join);
1177 Join->eraseFromParent();
1179 ++numCoalescing;
1183 /// computeIntervals - computes the live intervals for virtual
1184 /// registers. for some ordering of the machine instructions [1,N] a
1185 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1186 /// which a variable is live
1187 void LiveIntervals::computeIntervals() {
1188 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1189 << "********** Function: "
1190 << ((Value*)mf_->getFunction())->getName() << '\n');
1192 SmallVector<unsigned, 8> UndefUses;
1193 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1194 MBBI != E; ++MBBI) {
1195 MachineBasicBlock *MBB = MBBI;
1196 // Track the index of the current machine instr.
1197 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1198 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1200 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1202 // Create intervals for live-ins to this BB first.
1203 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1204 LE = MBB->livein_end(); LI != LE; ++LI) {
1205 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1206 // Multiple live-ins can alias the same register.
1207 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1208 if (!hasInterval(*AS))
1209 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1210 true);
1213 // Skip over empty initial indices.
1214 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1215 getInstructionFromIndex(MIIndex) == 0)
1216 MIIndex = getNextIndex(MIIndex);
1218 for (; MI != miEnd; ++MI) {
1219 DEBUG(errs() << MIIndex << "\t" << *MI);
1221 // Handle defs.
1222 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1223 MachineOperand &MO = MI->getOperand(i);
1224 if (!MO.isReg() || !MO.getReg())
1225 continue;
1227 // handle register defs - build intervals
1228 if (MO.isDef())
1229 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1230 else if (MO.isUndef())
1231 UndefUses.push_back(MO.getReg());
1234 // Skip over the empty slots after each instruction.
1235 unsigned Slots = MI->getDesc().getNumDefs();
1236 if (Slots == 0)
1237 Slots = 1;
1239 while (Slots--)
1240 MIIndex = getNextIndex(MIIndex);
1242 // Skip over empty indices.
1243 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1244 getInstructionFromIndex(MIIndex) == 0)
1245 MIIndex = getNextIndex(MIIndex);
1249 // Create empty intervals for registers defined by implicit_def's (except
1250 // for those implicit_def that define values which are liveout of their
1251 // blocks.
1252 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1253 unsigned UndefReg = UndefUses[i];
1254 (void)getOrCreateInterval(UndefReg);
1258 bool LiveIntervals::findLiveInMBBs(
1259 MachineInstrIndex Start, MachineInstrIndex End,
1260 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1261 std::vector<IdxMBBPair>::const_iterator I =
1262 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1264 bool ResVal = false;
1265 while (I != Idx2MBBMap.end()) {
1266 if (I->first >= End)
1267 break;
1268 MBBs.push_back(I->second);
1269 ResVal = true;
1270 ++I;
1272 return ResVal;
1275 bool LiveIntervals::findReachableMBBs(
1276 MachineInstrIndex Start, MachineInstrIndex End,
1277 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1278 std::vector<IdxMBBPair>::const_iterator I =
1279 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1281 bool ResVal = false;
1282 while (I != Idx2MBBMap.end()) {
1283 if (I->first > End)
1284 break;
1285 MachineBasicBlock *MBB = I->second;
1286 if (getMBBEndIdx(MBB) > End)
1287 break;
1288 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1289 SE = MBB->succ_end(); SI != SE; ++SI)
1290 MBBs.push_back(*SI);
1291 ResVal = true;
1292 ++I;
1294 return ResVal;
1297 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1298 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1299 return new LiveInterval(reg, Weight);
1302 /// dupInterval - Duplicate a live interval. The caller is responsible for
1303 /// managing the allocated memory.
1304 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1305 LiveInterval *NewLI = createInterval(li->reg);
1306 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1307 return NewLI;
1310 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1311 /// copy field and returns the source register that defines it.
1312 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1313 if (!VNI->getCopy())
1314 return 0;
1316 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1317 // If it's extracting out of a physical register, return the sub-register.
1318 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1319 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1320 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1321 return Reg;
1322 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1323 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1324 return VNI->getCopy()->getOperand(2).getReg();
1326 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1327 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1328 return SrcReg;
1329 llvm_unreachable("Unrecognized copy instruction!");
1330 return 0;
1333 //===----------------------------------------------------------------------===//
1334 // Register allocator hooks.
1337 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1338 /// allow one) virtual register operand, then its uses are implicitly using
1339 /// the register. Returns the virtual register.
1340 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1341 MachineInstr *MI) const {
1342 unsigned RegOp = 0;
1343 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1344 MachineOperand &MO = MI->getOperand(i);
1345 if (!MO.isReg() || !MO.isUse())
1346 continue;
1347 unsigned Reg = MO.getReg();
1348 if (Reg == 0 || Reg == li.reg)
1349 continue;
1351 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1352 !allocatableRegs_[Reg])
1353 continue;
1354 // FIXME: For now, only remat MI with at most one register operand.
1355 assert(!RegOp &&
1356 "Can't rematerialize instruction with multiple register operand!");
1357 RegOp = MO.getReg();
1358 #ifndef NDEBUG
1359 break;
1360 #endif
1362 return RegOp;
1365 /// isValNoAvailableAt - Return true if the val# of the specified interval
1366 /// which reaches the given instruction also reaches the specified use index.
1367 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1368 MachineInstrIndex UseIdx) const {
1369 MachineInstrIndex Index = getInstructionIndex(MI);
1370 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1371 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1372 return UI != li.end() && UI->valno == ValNo;
1375 /// isReMaterializable - Returns true if the definition MI of the specified
1376 /// val# of the specified interval is re-materializable.
1377 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1378 const VNInfo *ValNo, MachineInstr *MI,
1379 SmallVectorImpl<LiveInterval*> &SpillIs,
1380 bool &isLoad) {
1381 if (DisableReMat)
1382 return false;
1384 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1385 return true;
1387 int FrameIdx = 0;
1388 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1389 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1390 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1391 // this but remember this is not safe to fold into a two-address
1392 // instruction.
1393 // This is a load from fixed stack slot. It can be rematerialized.
1394 return true;
1396 // If the target-specific rules don't identify an instruction as
1397 // being trivially rematerializable, use some target-independent
1398 // rules.
1399 if (!MI->getDesc().isRematerializable() ||
1400 !tii_->isTriviallyReMaterializable(MI)) {
1401 if (!EnableAggressiveRemat)
1402 return false;
1404 // If the instruction accesses memory but the memoperands have been lost,
1405 // we can't analyze it.
1406 const TargetInstrDesc &TID = MI->getDesc();
1407 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1408 return false;
1410 // Avoid instructions obviously unsafe for remat.
1411 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1412 return false;
1414 // If the instruction accesses memory and the memory could be non-constant,
1415 // assume the instruction is not rematerializable.
1416 for (std::list<MachineMemOperand>::const_iterator
1417 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1418 const MachineMemOperand &MMO = *I;
1419 if (MMO.isVolatile() || MMO.isStore())
1420 return false;
1421 const Value *V = MMO.getValue();
1422 if (!V)
1423 return false;
1424 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1425 if (!PSV->isConstant(mf_->getFrameInfo()))
1426 return false;
1427 } else if (!aa_->pointsToConstantMemory(V))
1428 return false;
1431 // If any of the registers accessed are non-constant, conservatively assume
1432 // the instruction is not rematerializable.
1433 unsigned ImpUse = 0;
1434 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1435 const MachineOperand &MO = MI->getOperand(i);
1436 if (MO.isReg()) {
1437 unsigned Reg = MO.getReg();
1438 if (Reg == 0)
1439 continue;
1440 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1441 return false;
1443 // Only allow one def, and that in the first operand.
1444 if (MO.isDef() != (i == 0))
1445 return false;
1447 // Only allow constant-valued registers.
1448 bool IsLiveIn = mri_->isLiveIn(Reg);
1449 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1450 E = mri_->def_end();
1452 // For the def, it should be the only def of that register.
1453 if (MO.isDef() && (next(I) != E || IsLiveIn))
1454 return false;
1456 if (MO.isUse()) {
1457 // Only allow one use other register use, as that's all the
1458 // remat mechanisms support currently.
1459 if (Reg != li.reg) {
1460 if (ImpUse == 0)
1461 ImpUse = Reg;
1462 else if (Reg != ImpUse)
1463 return false;
1465 // For the use, there should be only one associated def.
1466 if (I != E && (next(I) != E || IsLiveIn))
1467 return false;
1473 unsigned ImpUse = getReMatImplicitUse(li, MI);
1474 if (ImpUse) {
1475 const LiveInterval &ImpLi = getInterval(ImpUse);
1476 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1477 re = mri_->use_end(); ri != re; ++ri) {
1478 MachineInstr *UseMI = &*ri;
1479 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1480 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1481 continue;
1482 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1483 return false;
1486 // If a register operand of the re-materialized instruction is going to
1487 // be spilled next, then it's not legal to re-materialize this instruction.
1488 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1489 if (ImpUse == SpillIs[i]->reg)
1490 return false;
1492 return true;
1495 /// isReMaterializable - Returns true if the definition MI of the specified
1496 /// val# of the specified interval is re-materializable.
1497 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1498 const VNInfo *ValNo, MachineInstr *MI) {
1499 SmallVector<LiveInterval*, 4> Dummy1;
1500 bool Dummy2;
1501 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1504 /// isReMaterializable - Returns true if every definition of MI of every
1505 /// val# of the specified interval is re-materializable.
1506 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1507 SmallVectorImpl<LiveInterval*> &SpillIs,
1508 bool &isLoad) {
1509 isLoad = false;
1510 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1511 i != e; ++i) {
1512 const VNInfo *VNI = *i;
1513 if (VNI->isUnused())
1514 continue; // Dead val#.
1515 // Is the def for the val# rematerializable?
1516 if (!VNI->isDefAccurate())
1517 return false;
1518 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1519 bool DefIsLoad = false;
1520 if (!ReMatDefMI ||
1521 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1522 return false;
1523 isLoad |= DefIsLoad;
1525 return true;
1528 /// FilterFoldedOps - Filter out two-address use operands. Return
1529 /// true if it finds any issue with the operands that ought to prevent
1530 /// folding.
1531 static bool FilterFoldedOps(MachineInstr *MI,
1532 SmallVector<unsigned, 2> &Ops,
1533 unsigned &MRInfo,
1534 SmallVector<unsigned, 2> &FoldOps) {
1535 MRInfo = 0;
1536 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1537 unsigned OpIdx = Ops[i];
1538 MachineOperand &MO = MI->getOperand(OpIdx);
1539 // FIXME: fold subreg use.
1540 if (MO.getSubReg())
1541 return true;
1542 if (MO.isDef())
1543 MRInfo |= (unsigned)VirtRegMap::isMod;
1544 else {
1545 // Filter out two-address use operand(s).
1546 if (MI->isRegTiedToDefOperand(OpIdx)) {
1547 MRInfo = VirtRegMap::isModRef;
1548 continue;
1550 MRInfo |= (unsigned)VirtRegMap::isRef;
1552 FoldOps.push_back(OpIdx);
1554 return false;
1558 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1559 /// slot / to reg or any rematerialized load into ith operand of specified
1560 /// MI. If it is successul, MI is updated with the newly created MI and
1561 /// returns true.
1562 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1563 VirtRegMap &vrm, MachineInstr *DefMI,
1564 MachineInstrIndex InstrIdx,
1565 SmallVector<unsigned, 2> &Ops,
1566 bool isSS, int Slot, unsigned Reg) {
1567 // If it is an implicit def instruction, just delete it.
1568 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1569 RemoveMachineInstrFromMaps(MI);
1570 vrm.RemoveMachineInstrFromMaps(MI);
1571 MI->eraseFromParent();
1572 ++numFolds;
1573 return true;
1576 // Filter the list of operand indexes that are to be folded. Abort if
1577 // any operand will prevent folding.
1578 unsigned MRInfo = 0;
1579 SmallVector<unsigned, 2> FoldOps;
1580 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1581 return false;
1583 // The only time it's safe to fold into a two address instruction is when
1584 // it's folding reload and spill from / into a spill stack slot.
1585 if (DefMI && (MRInfo & VirtRegMap::isMod))
1586 return false;
1588 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1589 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1590 if (fmi) {
1591 // Remember this instruction uses the spill slot.
1592 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1594 // Attempt to fold the memory reference into the instruction. If
1595 // we can do this, we don't need to insert spill code.
1596 MachineBasicBlock &MBB = *MI->getParent();
1597 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1598 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1599 vrm.transferSpillPts(MI, fmi);
1600 vrm.transferRestorePts(MI, fmi);
1601 vrm.transferEmergencySpills(MI, fmi);
1602 mi2iMap_.erase(MI);
1603 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1604 mi2iMap_[fmi] = InstrIdx;
1605 MI = MBB.insert(MBB.erase(MI), fmi);
1606 ++numFolds;
1607 return true;
1609 return false;
1612 /// canFoldMemoryOperand - Returns true if the specified load / store
1613 /// folding is possible.
1614 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1615 SmallVector<unsigned, 2> &Ops,
1616 bool ReMat) const {
1617 // Filter the list of operand indexes that are to be folded. Abort if
1618 // any operand will prevent folding.
1619 unsigned MRInfo = 0;
1620 SmallVector<unsigned, 2> FoldOps;
1621 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1622 return false;
1624 // It's only legal to remat for a use, not a def.
1625 if (ReMat && (MRInfo & VirtRegMap::isMod))
1626 return false;
1628 return tii_->canFoldMemoryOperand(MI, FoldOps);
1631 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1632 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1633 for (LiveInterval::Ranges::const_iterator
1634 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1635 std::vector<IdxMBBPair>::const_iterator II =
1636 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1637 if (II == Idx2MBBMap.end())
1638 continue;
1639 if (I->end > II->first) // crossing a MBB.
1640 return false;
1641 MBBs.insert(II->second);
1642 if (MBBs.size() > 1)
1643 return false;
1645 return true;
1648 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1649 /// interval on to-be re-materialized operands of MI) with new register.
1650 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1651 MachineInstr *MI, unsigned NewVReg,
1652 VirtRegMap &vrm) {
1653 // There is an implicit use. That means one of the other operand is
1654 // being remat'ed and the remat'ed instruction has li.reg as an
1655 // use operand. Make sure we rewrite that as well.
1656 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1657 MachineOperand &MO = MI->getOperand(i);
1658 if (!MO.isReg())
1659 continue;
1660 unsigned Reg = MO.getReg();
1661 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1662 continue;
1663 if (!vrm.isReMaterialized(Reg))
1664 continue;
1665 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1666 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1667 if (UseMO)
1668 UseMO->setReg(NewVReg);
1672 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1673 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1674 bool LiveIntervals::
1675 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1676 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1677 MachineInstr *MI,
1678 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1679 unsigned Slot, int LdSlot,
1680 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1681 VirtRegMap &vrm,
1682 const TargetRegisterClass* rc,
1683 SmallVector<int, 4> &ReMatIds,
1684 const MachineLoopInfo *loopInfo,
1685 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1686 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1687 std::vector<LiveInterval*> &NewLIs) {
1688 bool CanFold = false;
1689 RestartInstruction:
1690 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1691 MachineOperand& mop = MI->getOperand(i);
1692 if (!mop.isReg())
1693 continue;
1694 unsigned Reg = mop.getReg();
1695 unsigned RegI = Reg;
1696 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1697 continue;
1698 if (Reg != li.reg)
1699 continue;
1701 bool TryFold = !DefIsReMat;
1702 bool FoldSS = true; // Default behavior unless it's a remat.
1703 int FoldSlot = Slot;
1704 if (DefIsReMat) {
1705 // If this is the rematerializable definition MI itself and
1706 // all of its uses are rematerialized, simply delete it.
1707 if (MI == ReMatOrigDefMI && CanDelete) {
1708 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1709 << MI << '\n');
1710 RemoveMachineInstrFromMaps(MI);
1711 vrm.RemoveMachineInstrFromMaps(MI);
1712 MI->eraseFromParent();
1713 break;
1716 // If def for this use can't be rematerialized, then try folding.
1717 // If def is rematerializable and it's a load, also try folding.
1718 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1719 if (isLoad) {
1720 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1721 FoldSS = isLoadSS;
1722 FoldSlot = LdSlot;
1726 // Scan all of the operands of this instruction rewriting operands
1727 // to use NewVReg instead of li.reg as appropriate. We do this for
1728 // two reasons:
1730 // 1. If the instr reads the same spilled vreg multiple times, we
1731 // want to reuse the NewVReg.
1732 // 2. If the instr is a two-addr instruction, we are required to
1733 // keep the src/dst regs pinned.
1735 // Keep track of whether we replace a use and/or def so that we can
1736 // create the spill interval with the appropriate range.
1738 HasUse = mop.isUse();
1739 HasDef = mop.isDef();
1740 SmallVector<unsigned, 2> Ops;
1741 Ops.push_back(i);
1742 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1743 const MachineOperand &MOj = MI->getOperand(j);
1744 if (!MOj.isReg())
1745 continue;
1746 unsigned RegJ = MOj.getReg();
1747 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1748 continue;
1749 if (RegJ == RegI) {
1750 Ops.push_back(j);
1751 if (!MOj.isUndef()) {
1752 HasUse |= MOj.isUse();
1753 HasDef |= MOj.isDef();
1758 // Create a new virtual register for the spill interval.
1759 // Create the new register now so we can map the fold instruction
1760 // to the new register so when it is unfolded we get the correct
1761 // answer.
1762 bool CreatedNewVReg = false;
1763 if (NewVReg == 0) {
1764 NewVReg = mri_->createVirtualRegister(rc);
1765 vrm.grow();
1766 CreatedNewVReg = true;
1769 if (!TryFold)
1770 CanFold = false;
1771 else {
1772 // Do not fold load / store here if we are splitting. We'll find an
1773 // optimal point to insert a load / store later.
1774 if (!TrySplit) {
1775 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1776 Ops, FoldSS, FoldSlot, NewVReg)) {
1777 // Folding the load/store can completely change the instruction in
1778 // unpredictable ways, rescan it from the beginning.
1780 if (FoldSS) {
1781 // We need to give the new vreg the same stack slot as the
1782 // spilled interval.
1783 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1786 HasUse = false;
1787 HasDef = false;
1788 CanFold = false;
1789 if (isNotInMIMap(MI))
1790 break;
1791 goto RestartInstruction;
1793 } else {
1794 // We'll try to fold it later if it's profitable.
1795 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1799 mop.setReg(NewVReg);
1800 if (mop.isImplicit())
1801 rewriteImplicitOps(li, MI, NewVReg, vrm);
1803 // Reuse NewVReg for other reads.
1804 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1805 MachineOperand &mopj = MI->getOperand(Ops[j]);
1806 mopj.setReg(NewVReg);
1807 if (mopj.isImplicit())
1808 rewriteImplicitOps(li, MI, NewVReg, vrm);
1811 if (CreatedNewVReg) {
1812 if (DefIsReMat) {
1813 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1814 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1815 // Each valnum may have its own remat id.
1816 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1817 } else {
1818 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1820 if (!CanDelete || (HasUse && HasDef)) {
1821 // If this is a two-addr instruction then its use operands are
1822 // rematerializable but its def is not. It should be assigned a
1823 // stack slot.
1824 vrm.assignVirt2StackSlot(NewVReg, Slot);
1826 } else {
1827 vrm.assignVirt2StackSlot(NewVReg, Slot);
1829 } else if (HasUse && HasDef &&
1830 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1831 // If this interval hasn't been assigned a stack slot (because earlier
1832 // def is a deleted remat def), do it now.
1833 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1834 vrm.assignVirt2StackSlot(NewVReg, Slot);
1837 // Re-matting an instruction with virtual register use. Add the
1838 // register as an implicit use on the use MI.
1839 if (DefIsReMat && ImpUse)
1840 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1842 // Create a new register interval for this spill / remat.
1843 LiveInterval &nI = getOrCreateInterval(NewVReg);
1844 if (CreatedNewVReg) {
1845 NewLIs.push_back(&nI);
1846 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1847 if (TrySplit)
1848 vrm.setIsSplitFromReg(NewVReg, li.reg);
1851 if (HasUse) {
1852 if (CreatedNewVReg) {
1853 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1854 nI.getNextValue(MachineInstrIndex(), 0, false,
1855 VNInfoAllocator));
1856 DEBUG(errs() << " +" << LR);
1857 nI.addRange(LR);
1858 } else {
1859 // Extend the split live interval to this def / use.
1860 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1861 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1862 nI.getValNumInfo(nI.getNumValNums()-1));
1863 DEBUG(errs() << " +" << LR);
1864 nI.addRange(LR);
1867 if (HasDef) {
1868 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1869 nI.getNextValue(MachineInstrIndex(), 0, false,
1870 VNInfoAllocator));
1871 DEBUG(errs() << " +" << LR);
1872 nI.addRange(LR);
1875 DEBUG({
1876 errs() << "\t\t\t\tAdded new interval: ";
1877 nI.print(errs(), tri_);
1878 errs() << '\n';
1881 return CanFold;
1883 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1884 const VNInfo *VNI,
1885 MachineBasicBlock *MBB,
1886 MachineInstrIndex Idx) const {
1887 MachineInstrIndex End = getMBBEndIdx(MBB);
1888 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1889 if (VNI->kills[j].isPHIIndex())
1890 continue;
1892 MachineInstrIndex KillIdx = VNI->kills[j];
1893 if (KillIdx > Idx && KillIdx < End)
1894 return true;
1896 return false;
1899 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1900 /// during spilling.
1901 namespace {
1902 struct RewriteInfo {
1903 MachineInstrIndex Index;
1904 MachineInstr *MI;
1905 bool HasUse;
1906 bool HasDef;
1907 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1908 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1911 struct RewriteInfoCompare {
1912 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1913 return LHS.Index < RHS.Index;
1918 void LiveIntervals::
1919 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1920 LiveInterval::Ranges::const_iterator &I,
1921 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1922 unsigned Slot, int LdSlot,
1923 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1924 VirtRegMap &vrm,
1925 const TargetRegisterClass* rc,
1926 SmallVector<int, 4> &ReMatIds,
1927 const MachineLoopInfo *loopInfo,
1928 BitVector &SpillMBBs,
1929 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1930 BitVector &RestoreMBBs,
1931 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1932 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1933 std::vector<LiveInterval*> &NewLIs) {
1934 bool AllCanFold = true;
1935 unsigned NewVReg = 0;
1936 MachineInstrIndex start = getBaseIndex(I->start);
1937 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1939 // First collect all the def / use in this live range that will be rewritten.
1940 // Make sure they are sorted according to instruction index.
1941 std::vector<RewriteInfo> RewriteMIs;
1942 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1943 re = mri_->reg_end(); ri != re; ) {
1944 MachineInstr *MI = &*ri;
1945 MachineOperand &O = ri.getOperand();
1946 ++ri;
1947 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1948 MachineInstrIndex index = getInstructionIndex(MI);
1949 if (index < start || index >= end)
1950 continue;
1952 if (O.isUndef())
1953 // Must be defined by an implicit def. It should not be spilled. Note,
1954 // this is for correctness reason. e.g.
1955 // 8 %reg1024<def> = IMPLICIT_DEF
1956 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1957 // The live range [12, 14) are not part of the r1024 live interval since
1958 // it's defined by an implicit def. It will not conflicts with live
1959 // interval of r1025. Now suppose both registers are spilled, you can
1960 // easily see a situation where both registers are reloaded before
1961 // the INSERT_SUBREG and both target registers that would overlap.
1962 continue;
1963 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1965 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1967 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1968 // Now rewrite the defs and uses.
1969 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1970 RewriteInfo &rwi = RewriteMIs[i];
1971 ++i;
1972 MachineInstrIndex index = rwi.Index;
1973 bool MIHasUse = rwi.HasUse;
1974 bool MIHasDef = rwi.HasDef;
1975 MachineInstr *MI = rwi.MI;
1976 // If MI def and/or use the same register multiple times, then there
1977 // are multiple entries.
1978 unsigned NumUses = MIHasUse;
1979 while (i != e && RewriteMIs[i].MI == MI) {
1980 assert(RewriteMIs[i].Index == index);
1981 bool isUse = RewriteMIs[i].HasUse;
1982 if (isUse) ++NumUses;
1983 MIHasUse |= isUse;
1984 MIHasDef |= RewriteMIs[i].HasDef;
1985 ++i;
1987 MachineBasicBlock *MBB = MI->getParent();
1989 if (ImpUse && MI != ReMatDefMI) {
1990 // Re-matting an instruction with virtual register use. Update the
1991 // register interval's spill weight to HUGE_VALF to prevent it from
1992 // being spilled.
1993 LiveInterval &ImpLi = getInterval(ImpUse);
1994 ImpLi.weight = HUGE_VALF;
1997 unsigned MBBId = MBB->getNumber();
1998 unsigned ThisVReg = 0;
1999 if (TrySplit) {
2000 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2001 if (NVI != MBBVRegsMap.end()) {
2002 ThisVReg = NVI->second;
2003 // One common case:
2004 // x = use
2005 // ...
2006 // ...
2007 // def = ...
2008 // = use
2009 // It's better to start a new interval to avoid artifically
2010 // extend the new interval.
2011 if (MIHasDef && !MIHasUse) {
2012 MBBVRegsMap.erase(MBB->getNumber());
2013 ThisVReg = 0;
2018 bool IsNew = ThisVReg == 0;
2019 if (IsNew) {
2020 // This ends the previous live interval. If all of its def / use
2021 // can be folded, give it a low spill weight.
2022 if (NewVReg && TrySplit && AllCanFold) {
2023 LiveInterval &nI = getOrCreateInterval(NewVReg);
2024 nI.weight /= 10.0F;
2026 AllCanFold = true;
2028 NewVReg = ThisVReg;
2030 bool HasDef = false;
2031 bool HasUse = false;
2032 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2033 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2034 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2035 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2036 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2037 if (!HasDef && !HasUse)
2038 continue;
2040 AllCanFold &= CanFold;
2042 // Update weight of spill interval.
2043 LiveInterval &nI = getOrCreateInterval(NewVReg);
2044 if (!TrySplit) {
2045 // The spill weight is now infinity as it cannot be spilled again.
2046 nI.weight = HUGE_VALF;
2047 continue;
2050 // Keep track of the last def and first use in each MBB.
2051 if (HasDef) {
2052 if (MI != ReMatOrigDefMI || !CanDelete) {
2053 bool HasKill = false;
2054 if (!HasUse)
2055 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2056 else {
2057 // If this is a two-address code, then this index starts a new VNInfo.
2058 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2059 if (VNI)
2060 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2062 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2063 SpillIdxes.find(MBBId);
2064 if (!HasKill) {
2065 if (SII == SpillIdxes.end()) {
2066 std::vector<SRInfo> S;
2067 S.push_back(SRInfo(index, NewVReg, true));
2068 SpillIdxes.insert(std::make_pair(MBBId, S));
2069 } else if (SII->second.back().vreg != NewVReg) {
2070 SII->second.push_back(SRInfo(index, NewVReg, true));
2071 } else if (index > SII->second.back().index) {
2072 // If there is an earlier def and this is a two-address
2073 // instruction, then it's not possible to fold the store (which
2074 // would also fold the load).
2075 SRInfo &Info = SII->second.back();
2076 Info.index = index;
2077 Info.canFold = !HasUse;
2079 SpillMBBs.set(MBBId);
2080 } else if (SII != SpillIdxes.end() &&
2081 SII->second.back().vreg == NewVReg &&
2082 index > SII->second.back().index) {
2083 // There is an earlier def that's not killed (must be two-address).
2084 // The spill is no longer needed.
2085 SII->second.pop_back();
2086 if (SII->second.empty()) {
2087 SpillIdxes.erase(MBBId);
2088 SpillMBBs.reset(MBBId);
2094 if (HasUse) {
2095 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2096 SpillIdxes.find(MBBId);
2097 if (SII != SpillIdxes.end() &&
2098 SII->second.back().vreg == NewVReg &&
2099 index > SII->second.back().index)
2100 // Use(s) following the last def, it's not safe to fold the spill.
2101 SII->second.back().canFold = false;
2102 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2103 RestoreIdxes.find(MBBId);
2104 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2105 // If we are splitting live intervals, only fold if it's the first
2106 // use and there isn't another use later in the MBB.
2107 RII->second.back().canFold = false;
2108 else if (IsNew) {
2109 // Only need a reload if there isn't an earlier def / use.
2110 if (RII == RestoreIdxes.end()) {
2111 std::vector<SRInfo> Infos;
2112 Infos.push_back(SRInfo(index, NewVReg, true));
2113 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2114 } else {
2115 RII->second.push_back(SRInfo(index, NewVReg, true));
2117 RestoreMBBs.set(MBBId);
2121 // Update spill weight.
2122 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2123 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2126 if (NewVReg && TrySplit && AllCanFold) {
2127 // If all of its def / use can be folded, give it a low spill weight.
2128 LiveInterval &nI = getOrCreateInterval(NewVReg);
2129 nI.weight /= 10.0F;
2133 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
2134 unsigned vr, BitVector &RestoreMBBs,
2135 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2136 if (!RestoreMBBs[Id])
2137 return false;
2138 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2139 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2140 if (Restores[i].index == index &&
2141 Restores[i].vreg == vr &&
2142 Restores[i].canFold)
2143 return true;
2144 return false;
2147 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2148 unsigned vr, BitVector &RestoreMBBs,
2149 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2150 if (!RestoreMBBs[Id])
2151 return;
2152 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2153 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2154 if (Restores[i].index == index && Restores[i].vreg)
2155 Restores[i].index = MachineInstrIndex();
2158 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2159 /// spilled and create empty intervals for their uses.
2160 void
2161 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2162 const TargetRegisterClass* rc,
2163 std::vector<LiveInterval*> &NewLIs) {
2164 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2165 re = mri_->reg_end(); ri != re; ) {
2166 MachineOperand &O = ri.getOperand();
2167 MachineInstr *MI = &*ri;
2168 ++ri;
2169 if (O.isDef()) {
2170 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2171 "Register def was not rewritten?");
2172 RemoveMachineInstrFromMaps(MI);
2173 vrm.RemoveMachineInstrFromMaps(MI);
2174 MI->eraseFromParent();
2175 } else {
2176 // This must be an use of an implicit_def so it's not part of the live
2177 // interval. Create a new empty live interval for it.
2178 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2179 unsigned NewVReg = mri_->createVirtualRegister(rc);
2180 vrm.grow();
2181 vrm.setIsImplicitlyDefined(NewVReg);
2182 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2183 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2184 MachineOperand &MO = MI->getOperand(i);
2185 if (MO.isReg() && MO.getReg() == li.reg) {
2186 MO.setReg(NewVReg);
2187 MO.setIsUndef();
2194 std::vector<LiveInterval*> LiveIntervals::
2195 addIntervalsForSpillsFast(const LiveInterval &li,
2196 const MachineLoopInfo *loopInfo,
2197 VirtRegMap &vrm) {
2198 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2200 std::vector<LiveInterval*> added;
2202 assert(li.weight != HUGE_VALF &&
2203 "attempt to spill already spilled interval!");
2205 DEBUG({
2206 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2207 li.dump();
2208 errs() << '\n';
2211 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2213 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2214 while (RI != mri_->reg_end()) {
2215 MachineInstr* MI = &*RI;
2217 SmallVector<unsigned, 2> Indices;
2218 bool HasUse = false;
2219 bool HasDef = false;
2221 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2222 MachineOperand& mop = MI->getOperand(i);
2223 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2225 HasUse |= MI->getOperand(i).isUse();
2226 HasDef |= MI->getOperand(i).isDef();
2228 Indices.push_back(i);
2231 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2232 Indices, true, slot, li.reg)) {
2233 unsigned NewVReg = mri_->createVirtualRegister(rc);
2234 vrm.grow();
2235 vrm.assignVirt2StackSlot(NewVReg, slot);
2237 // create a new register for this spill
2238 LiveInterval &nI = getOrCreateInterval(NewVReg);
2240 // the spill weight is now infinity as it
2241 // cannot be spilled again
2242 nI.weight = HUGE_VALF;
2244 // Rewrite register operands to use the new vreg.
2245 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2246 E = Indices.end(); I != E; ++I) {
2247 MI->getOperand(*I).setReg(NewVReg);
2249 if (MI->getOperand(*I).isUse())
2250 MI->getOperand(*I).setIsKill(true);
2253 // Fill in the new live interval.
2254 MachineInstrIndex index = getInstructionIndex(MI);
2255 if (HasUse) {
2256 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2257 nI.getNextValue(MachineInstrIndex(), 0, false,
2258 getVNInfoAllocator()));
2259 DEBUG(errs() << " +" << LR);
2260 nI.addRange(LR);
2261 vrm.addRestorePoint(NewVReg, MI);
2263 if (HasDef) {
2264 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2265 nI.getNextValue(MachineInstrIndex(), 0, false,
2266 getVNInfoAllocator()));
2267 DEBUG(errs() << " +" << LR);
2268 nI.addRange(LR);
2269 vrm.addSpillPoint(NewVReg, true, MI);
2272 added.push_back(&nI);
2274 DEBUG({
2275 errs() << "\t\t\t\tadded new interval: ";
2276 nI.dump();
2277 errs() << '\n';
2282 RI = mri_->reg_begin(li.reg);
2285 return added;
2288 std::vector<LiveInterval*> LiveIntervals::
2289 addIntervalsForSpills(const LiveInterval &li,
2290 SmallVectorImpl<LiveInterval*> &SpillIs,
2291 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2293 if (EnableFastSpilling)
2294 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2296 assert(li.weight != HUGE_VALF &&
2297 "attempt to spill already spilled interval!");
2299 DEBUG({
2300 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2301 li.print(errs(), tri_);
2302 errs() << '\n';
2305 // Each bit specify whether a spill is required in the MBB.
2306 BitVector SpillMBBs(mf_->getNumBlockIDs());
2307 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2308 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2309 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2310 DenseMap<unsigned,unsigned> MBBVRegsMap;
2311 std::vector<LiveInterval*> NewLIs;
2312 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2314 unsigned NumValNums = li.getNumValNums();
2315 SmallVector<MachineInstr*, 4> ReMatDefs;
2316 ReMatDefs.resize(NumValNums, NULL);
2317 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2318 ReMatOrigDefs.resize(NumValNums, NULL);
2319 SmallVector<int, 4> ReMatIds;
2320 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2321 BitVector ReMatDelete(NumValNums);
2322 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2324 // Spilling a split live interval. It cannot be split any further. Also,
2325 // it's also guaranteed to be a single val# / range interval.
2326 if (vrm.getPreSplitReg(li.reg)) {
2327 vrm.setIsSplitFromReg(li.reg, 0);
2328 // Unset the split kill marker on the last use.
2329 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2330 if (KillIdx != MachineInstrIndex()) {
2331 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2332 assert(KillMI && "Last use disappeared?");
2333 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2334 assert(KillOp != -1 && "Last use disappeared?");
2335 KillMI->getOperand(KillOp).setIsKill(false);
2337 vrm.removeKillPoint(li.reg);
2338 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2339 Slot = vrm.getStackSlot(li.reg);
2340 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2341 MachineInstr *ReMatDefMI = DefIsReMat ?
2342 vrm.getReMaterializedMI(li.reg) : NULL;
2343 int LdSlot = 0;
2344 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2345 bool isLoad = isLoadSS ||
2346 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2347 bool IsFirstRange = true;
2348 for (LiveInterval::Ranges::const_iterator
2349 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2350 // If this is a split live interval with multiple ranges, it means there
2351 // are two-address instructions that re-defined the value. Only the
2352 // first def can be rematerialized!
2353 if (IsFirstRange) {
2354 // Note ReMatOrigDefMI has already been deleted.
2355 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2356 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2357 false, vrm, rc, ReMatIds, loopInfo,
2358 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2359 MBBVRegsMap, NewLIs);
2360 } else {
2361 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2362 Slot, 0, false, false, false,
2363 false, vrm, rc, ReMatIds, loopInfo,
2364 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2365 MBBVRegsMap, NewLIs);
2367 IsFirstRange = false;
2370 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2371 return NewLIs;
2374 bool TrySplit = !intervalIsInOneMBB(li);
2375 if (TrySplit)
2376 ++numSplits;
2377 bool NeedStackSlot = false;
2378 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2379 i != e; ++i) {
2380 const VNInfo *VNI = *i;
2381 unsigned VN = VNI->id;
2382 if (VNI->isUnused())
2383 continue; // Dead val#.
2384 // Is the def for the val# rematerializable?
2385 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2386 ? getInstructionFromIndex(VNI->def) : 0;
2387 bool dummy;
2388 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2389 // Remember how to remat the def of this val#.
2390 ReMatOrigDefs[VN] = ReMatDefMI;
2391 // Original def may be modified so we have to make a copy here.
2392 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2393 CloneMIs.push_back(Clone);
2394 ReMatDefs[VN] = Clone;
2396 bool CanDelete = true;
2397 if (VNI->hasPHIKill()) {
2398 // A kill is a phi node, not all of its uses can be rematerialized.
2399 // It must not be deleted.
2400 CanDelete = false;
2401 // Need a stack slot if there is any live range where uses cannot be
2402 // rematerialized.
2403 NeedStackSlot = true;
2405 if (CanDelete)
2406 ReMatDelete.set(VN);
2407 } else {
2408 // Need a stack slot if there is any live range where uses cannot be
2409 // rematerialized.
2410 NeedStackSlot = true;
2414 // One stack slot per live interval.
2415 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2416 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2417 Slot = vrm.assignVirt2StackSlot(li.reg);
2419 // This case only occurs when the prealloc splitter has already assigned
2420 // a stack slot to this vreg.
2421 else
2422 Slot = vrm.getStackSlot(li.reg);
2425 // Create new intervals and rewrite defs and uses.
2426 for (LiveInterval::Ranges::const_iterator
2427 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2428 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2429 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2430 bool DefIsReMat = ReMatDefMI != NULL;
2431 bool CanDelete = ReMatDelete[I->valno->id];
2432 int LdSlot = 0;
2433 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2434 bool isLoad = isLoadSS ||
2435 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2436 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2437 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2438 CanDelete, vrm, rc, ReMatIds, loopInfo,
2439 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2440 MBBVRegsMap, NewLIs);
2443 // Insert spills / restores if we are splitting.
2444 if (!TrySplit) {
2445 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2446 return NewLIs;
2449 SmallPtrSet<LiveInterval*, 4> AddedKill;
2450 SmallVector<unsigned, 2> Ops;
2451 if (NeedStackSlot) {
2452 int Id = SpillMBBs.find_first();
2453 while (Id != -1) {
2454 std::vector<SRInfo> &spills = SpillIdxes[Id];
2455 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2456 MachineInstrIndex index = spills[i].index;
2457 unsigned VReg = spills[i].vreg;
2458 LiveInterval &nI = getOrCreateInterval(VReg);
2459 bool isReMat = vrm.isReMaterialized(VReg);
2460 MachineInstr *MI = getInstructionFromIndex(index);
2461 bool CanFold = false;
2462 bool FoundUse = false;
2463 Ops.clear();
2464 if (spills[i].canFold) {
2465 CanFold = true;
2466 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2467 MachineOperand &MO = MI->getOperand(j);
2468 if (!MO.isReg() || MO.getReg() != VReg)
2469 continue;
2471 Ops.push_back(j);
2472 if (MO.isDef())
2473 continue;
2474 if (isReMat ||
2475 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2476 RestoreMBBs, RestoreIdxes))) {
2477 // MI has two-address uses of the same register. If the use
2478 // isn't the first and only use in the BB, then we can't fold
2479 // it. FIXME: Move this to rewriteInstructionsForSpills.
2480 CanFold = false;
2481 break;
2483 FoundUse = true;
2486 // Fold the store into the def if possible.
2487 bool Folded = false;
2488 if (CanFold && !Ops.empty()) {
2489 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2490 Folded = true;
2491 if (FoundUse) {
2492 // Also folded uses, do not issue a load.
2493 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2494 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2496 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2500 // Otherwise tell the spiller to issue a spill.
2501 if (!Folded) {
2502 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2503 bool isKill = LR->end == getStoreIndex(index);
2504 if (!MI->registerDefIsDead(nI.reg))
2505 // No need to spill a dead def.
2506 vrm.addSpillPoint(VReg, isKill, MI);
2507 if (isKill)
2508 AddedKill.insert(&nI);
2511 Id = SpillMBBs.find_next(Id);
2515 int Id = RestoreMBBs.find_first();
2516 while (Id != -1) {
2517 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2518 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2519 MachineInstrIndex index = restores[i].index;
2520 if (index == MachineInstrIndex())
2521 continue;
2522 unsigned VReg = restores[i].vreg;
2523 LiveInterval &nI = getOrCreateInterval(VReg);
2524 bool isReMat = vrm.isReMaterialized(VReg);
2525 MachineInstr *MI = getInstructionFromIndex(index);
2526 bool CanFold = false;
2527 Ops.clear();
2528 if (restores[i].canFold) {
2529 CanFold = true;
2530 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2531 MachineOperand &MO = MI->getOperand(j);
2532 if (!MO.isReg() || MO.getReg() != VReg)
2533 continue;
2535 if (MO.isDef()) {
2536 // If this restore were to be folded, it would have been folded
2537 // already.
2538 CanFold = false;
2539 break;
2541 Ops.push_back(j);
2545 // Fold the load into the use if possible.
2546 bool Folded = false;
2547 if (CanFold && !Ops.empty()) {
2548 if (!isReMat)
2549 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2550 else {
2551 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2552 int LdSlot = 0;
2553 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2554 // If the rematerializable def is a load, also try to fold it.
2555 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2556 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2557 Ops, isLoadSS, LdSlot, VReg);
2558 if (!Folded) {
2559 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2560 if (ImpUse) {
2561 // Re-matting an instruction with virtual register use. Add the
2562 // register as an implicit use on the use MI and update the register
2563 // interval's spill weight to HUGE_VALF to prevent it from being
2564 // spilled.
2565 LiveInterval &ImpLi = getInterval(ImpUse);
2566 ImpLi.weight = HUGE_VALF;
2567 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2572 // If folding is not possible / failed, then tell the spiller to issue a
2573 // load / rematerialization for us.
2574 if (Folded)
2575 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2576 else
2577 vrm.addRestorePoint(VReg, MI);
2579 Id = RestoreMBBs.find_next(Id);
2582 // Finalize intervals: add kills, finalize spill weights, and filter out
2583 // dead intervals.
2584 std::vector<LiveInterval*> RetNewLIs;
2585 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2586 LiveInterval *LI = NewLIs[i];
2587 if (!LI->empty()) {
2588 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2589 if (!AddedKill.count(LI)) {
2590 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2591 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2592 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2593 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2594 assert(UseIdx != -1);
2595 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2596 LastUse->getOperand(UseIdx).setIsKill();
2597 vrm.addKillPoint(LI->reg, LastUseIdx);
2600 RetNewLIs.push_back(LI);
2604 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2605 return RetNewLIs;
2608 /// hasAllocatableSuperReg - Return true if the specified physical register has
2609 /// any super register that's allocatable.
2610 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2611 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2612 if (allocatableRegs_[*AS] && hasInterval(*AS))
2613 return true;
2614 return false;
2617 /// getRepresentativeReg - Find the largest super register of the specified
2618 /// physical register.
2619 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2620 // Find the largest super-register that is allocatable.
2621 unsigned BestReg = Reg;
2622 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2623 unsigned SuperReg = *AS;
2624 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2625 BestReg = SuperReg;
2626 break;
2629 return BestReg;
2632 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2633 /// specified interval that conflicts with the specified physical register.
2634 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2635 unsigned PhysReg) const {
2636 unsigned NumConflicts = 0;
2637 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2638 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2639 E = mri_->reg_end(); I != E; ++I) {
2640 MachineOperand &O = I.getOperand();
2641 MachineInstr *MI = O.getParent();
2642 MachineInstrIndex Index = getInstructionIndex(MI);
2643 if (pli.liveAt(Index))
2644 ++NumConflicts;
2646 return NumConflicts;
2649 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2650 /// around all defs and uses of the specified interval. Return true if it
2651 /// was able to cut its interval.
2652 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2653 unsigned PhysReg, VirtRegMap &vrm) {
2654 unsigned SpillReg = getRepresentativeReg(PhysReg);
2656 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2657 // If there are registers which alias PhysReg, but which are not a
2658 // sub-register of the chosen representative super register. Assert
2659 // since we can't handle it yet.
2660 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2661 tri_->isSuperRegister(*AS, SpillReg));
2663 bool Cut = false;
2664 LiveInterval &pli = getInterval(SpillReg);
2665 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2666 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2667 E = mri_->reg_end(); I != E; ++I) {
2668 MachineOperand &O = I.getOperand();
2669 MachineInstr *MI = O.getParent();
2670 if (SeenMIs.count(MI))
2671 continue;
2672 SeenMIs.insert(MI);
2673 MachineInstrIndex Index = getInstructionIndex(MI);
2674 if (pli.liveAt(Index)) {
2675 vrm.addEmergencySpill(SpillReg, MI);
2676 MachineInstrIndex StartIdx = getLoadIndex(Index);
2677 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2678 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2679 pli.removeRange(StartIdx, EndIdx);
2680 Cut = true;
2681 } else {
2682 std::string msg;
2683 raw_string_ostream Msg(msg);
2684 Msg << "Ran out of registers during register allocation!";
2685 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2686 Msg << "\nPlease check your inline asm statement for invalid "
2687 << "constraints:\n";
2688 MI->print(Msg, tm_);
2690 llvm_report_error(Msg.str());
2692 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2693 if (!hasInterval(*AS))
2694 continue;
2695 LiveInterval &spli = getInterval(*AS);
2696 if (spli.liveAt(Index))
2697 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2701 return Cut;
2704 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2705 MachineInstr* startInst) {
2706 LiveInterval& Interval = getOrCreateInterval(reg);
2707 VNInfo* VN = Interval.getNextValue(
2708 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2709 startInst, true, getVNInfoAllocator());
2710 VN->setHasPHIKill(true);
2711 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2712 LiveRange LR(
2713 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2714 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2715 Interval.addRange(LR);
2717 return LR;