We're not going to spend 100% of time in interrupts, do we? :)
[llvm/msp430.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blobd2927ed48013c5565c0000d1ec48d70b07e1257e
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include <algorithm>
39 #include <cmath>
40 using namespace llvm;
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
71 if (!StrongPHIElim) {
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
84 delete I->second;
86 MBB2IdxMap.clear();
87 Idx2MBBMap.clear();
88 mi2iMap_.clear();
89 i2miMap_.clear();
90 r2iMap_.clear();
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
95 ClonedMIs.pop_back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
104 Idx2MBBMap.clear();
105 MBB2IdxMap.clear();
106 mi2iMap_.clear();
107 i2miMap_.clear();
109 FunctionSize = 0;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
117 MBB != E; ++MBB) {
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125 I != E; ++I) {
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
128 inserted = true;
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
131 FunctionSize++;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
135 if (Slots == 0)
136 Slots = 1;
137 MIIndex += InstrSlots::NUM * Slots;
138 while (Slots--)
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
167 } else {
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
180 --I;
182 LI->end = getMBBEndIdx(I->second) + 1;
183 } else {
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189 else
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
196 VNInfo* vni = *VNI;
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
212 } else {
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
228 --I;
230 vni->kills[i] = getMBBEndIdx(I->second);
231 } else {
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
238 else
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249 mf_ = &fn;
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
258 computeNumbering();
259 computeIntervals();
261 numIntervals += getNumIntervals();
263 DEBUG(dump());
264 return true;
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
272 O << "\n";
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
304 continue;
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
307 if (!mop.isReg())
308 continue;
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
311 continue;
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
314 continue;
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
318 return true;
323 return false;
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
340 if (MI)
341 break;
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
347 continue;
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
350 if (!MO.isReg())
351 continue;
352 if (MO.isUse() && !CheckUse)
353 continue;
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
356 continue;
357 if (tri_->isSubRegister(Reg, PhysReg))
358 return true;
363 return false;
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
370 else
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
377 unsigned MOIdx,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
384 return;
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
397 VNInfo *ValNo;
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
403 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 CopyMI = mi;
405 // Earlyclobbers move back one.
406 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
408 assert(ValNo->id == 0 && "First value in interval is not 0?");
410 // Loop over all of the blocks that the vreg is defined in. There are
411 // two cases we have to handle here. The most common case is a vreg
412 // whose lifetime is contained within a basic block. In this case there
413 // will be a single kill, in MBB, which comes after the definition.
414 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
415 // FIXME: what about dead vars?
416 unsigned killIdx;
417 if (vi.Kills[0] != mi)
418 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 else
420 killIdx = defIndex+1;
422 // If the kill happens after the definition, we have an intra-block
423 // live range.
424 if (killIdx > defIndex) {
425 assert(vi.AliveBlocks.none() &&
426 "Shouldn't be alive across any blocks!");
427 LiveRange LR(defIndex, killIdx, ValNo);
428 interval.addRange(LR);
429 DOUT << " +" << LR << "\n";
430 interval.addKill(ValNo, killIdx);
431 return;
435 // The other case we handle is when a virtual register lives to the end
436 // of the defining block, potentially live across some blocks, then is
437 // live into some number of blocks, but gets killed. Start by adding a
438 // range that goes from this definition to the end of the defining block.
439 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
440 DOUT << " +" << NewLR;
441 interval.addRange(NewLR);
443 // Iterate over all of the blocks that the variable is completely
444 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 // live interval.
446 for (int i = vi.AliveBlocks.find_first(); i != -1;
447 i = vi.AliveBlocks.find_next(i)) {
448 LiveRange LR(getMBBStartIdx(i),
449 getMBBEndIdx(i)+1, // MBB ends at -1.
450 ValNo);
451 interval.addRange(LR);
452 DOUT << " +" << LR;
455 // Finally, this virtual register is live from the start of any killing
456 // block to the 'use' slot of the killing instruction.
457 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
458 MachineInstr *Kill = vi.Kills[i];
459 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
460 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 killIdx, ValNo);
462 interval.addRange(LR);
463 interval.addKill(ValNo, killIdx);
464 DOUT << " +" << LR;
467 } else {
468 // If this is the second time we see a virtual register definition, it
469 // must be due to phi elimination or two addr elimination. If this is
470 // the result of two address elimination, then the vreg is one of the
471 // def-and-use register operand.
472 if (mi->isRegTiedToUseOperand(MOIdx)) {
473 // If this is a two-address definition, then we have already processed
474 // the live range. The only problem is that we didn't realize there
475 // are actually two values in the live interval. Because of this we
476 // need to take the LiveRegion that defines this register and split it
477 // into two values.
478 assert(interval.containsOneValue());
479 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
480 unsigned RedefIndex = getDefIndex(MIIdx);
481 if (MO.isEarlyClobber())
482 RedefIndex = getUseIndex(MIIdx);
484 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
485 VNInfo *OldValNo = OldLR->valno;
487 // Delete the initial value, which should be short and continuous,
488 // because the 2-addr copy must be in the same MBB as the redef.
489 interval.removeRange(DefIndex, RedefIndex);
491 // Two-address vregs should always only be redefined once. This means
492 // that at this point, there should be exactly one value number in it.
493 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
495 // The new value number (#1) is defined by the instruction we claimed
496 // defined value #0.
497 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
498 VNInfoAllocator);
500 // Value#0 is now defined by the 2-addr instruction.
501 OldValNo->def = RedefIndex;
502 OldValNo->copy = 0;
503 if (MO.isEarlyClobber())
504 OldValNo->redefByEC = true;
506 // Add the new live interval which replaces the range for the input copy.
507 LiveRange LR(DefIndex, RedefIndex, ValNo);
508 DOUT << " replace range with " << LR;
509 interval.addRange(LR);
510 interval.addKill(ValNo, RedefIndex);
512 // If this redefinition is dead, we need to add a dummy unit live
513 // range covering the def slot.
514 if (MO.isDead())
515 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
517 DOUT << " RESULT: ";
518 interval.print(DOUT, tri_);
520 } else {
521 // Otherwise, this must be because of phi elimination. If this is the
522 // first redefinition of the vreg that we have seen, go back and change
523 // the live range in the PHI block to be a different value number.
524 if (interval.containsOneValue()) {
525 assert(vi.Kills.size() == 1 &&
526 "PHI elimination vreg should have one kill, the PHI itself!");
528 // Remove the old range that we now know has an incorrect number.
529 VNInfo *VNI = interval.getValNumInfo(0);
530 MachineInstr *Killer = vi.Kills[0];
531 unsigned Start = getMBBStartIdx(Killer->getParent());
532 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
533 DOUT << " Removing [" << Start << "," << End << "] from: ";
534 interval.print(DOUT, tri_); DOUT << "\n";
535 interval.removeRange(Start, End);
536 VNI->hasPHIKill = true;
537 DOUT << " RESULT: "; interval.print(DOUT, tri_);
539 // Replace the interval with one of a NEW value number. Note that this
540 // value number isn't actually defined by an instruction, weird huh? :)
541 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
542 DOUT << " replace range with " << LR;
543 interval.addRange(LR);
544 interval.addKill(LR.valno, End);
545 DOUT << " RESULT: "; interval.print(DOUT, tri_);
548 // In the case of PHI elimination, each variable definition is only
549 // live until the end of the block. We've already taken care of the
550 // rest of the live range.
551 unsigned defIndex = getDefIndex(MIIdx);
552 if (MO.isEarlyClobber())
553 defIndex = getUseIndex(MIIdx);
555 VNInfo *ValNo;
556 MachineInstr *CopyMI = NULL;
557 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
558 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
559 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
560 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
561 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
562 CopyMI = mi;
563 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
565 unsigned killIndex = getMBBEndIdx(mbb) + 1;
566 LiveRange LR(defIndex, killIndex, ValNo);
567 interval.addRange(LR);
568 interval.addKill(ValNo, killIndex);
569 ValNo->hasPHIKill = true;
570 DOUT << " +" << LR;
574 DOUT << '\n';
577 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
578 MachineBasicBlock::iterator mi,
579 unsigned MIIdx,
580 MachineOperand& MO,
581 LiveInterval &interval,
582 MachineInstr *CopyMI) {
583 // A physical register cannot be live across basic block, so its
584 // lifetime must end somewhere in its defining basic block.
585 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
587 unsigned baseIndex = MIIdx;
588 unsigned start = getDefIndex(baseIndex);
589 // Earlyclobbers move back one.
590 if (MO.isEarlyClobber())
591 start = getUseIndex(MIIdx);
592 unsigned end = start;
594 // If it is not used after definition, it is considered dead at
595 // the instruction defining it. Hence its interval is:
596 // [defSlot(def), defSlot(def)+1)
597 if (MO.isDead()) {
598 DOUT << " dead";
599 end = start + 1;
600 goto exit;
603 // If it is not dead on definition, it must be killed by a
604 // subsequent instruction. Hence its interval is:
605 // [defSlot(def), useSlot(kill)+1)
606 baseIndex += InstrSlots::NUM;
607 while (++mi != MBB->end()) {
608 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
609 getInstructionFromIndex(baseIndex) == 0)
610 baseIndex += InstrSlots::NUM;
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DOUT << " killed";
613 end = getUseIndex(baseIndex) + 1;
614 goto exit;
615 } else {
616 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
617 if (DefIdx != -1) {
618 if (mi->isRegTiedToUseOperand(DefIdx)) {
619 // Two-address instruction.
620 end = getDefIndex(baseIndex);
621 if (mi->getOperand(DefIdx).isEarlyClobber())
622 end = getUseIndex(baseIndex);
623 } else {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DOUT << " dead";
629 end = start + 1;
631 goto exit;
635 baseIndex += InstrSlots::NUM;
638 // The only case we should have a dead physreg here without a killing or
639 // instruction where we know it's dead is if it is live-in to the function
640 // and never used. Another possible case is the implicit use of the
641 // physical register has been deleted by two-address pass.
642 end = start + 1;
644 exit:
645 assert(start < end && "did not find end of interval?");
647 // Already exists? Extend old live interval.
648 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
649 bool Extend = OldLR != interval.end();
650 VNInfo *ValNo = Extend
651 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
652 if (MO.isEarlyClobber() && Extend)
653 ValNo->redefByEC = true;
654 LiveRange LR(start, end, ValNo);
655 interval.addRange(LR);
656 interval.addKill(LR.valno, end);
657 DOUT << " +" << LR << '\n';
660 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
661 MachineBasicBlock::iterator MI,
662 unsigned MIIdx,
663 MachineOperand& MO,
664 unsigned MOIdx) {
665 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
666 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
667 getOrCreateInterval(MO.getReg()));
668 else if (allocatableRegs_[MO.getReg()]) {
669 MachineInstr *CopyMI = NULL;
670 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
671 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
672 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
673 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
674 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
675 CopyMI = MI;
676 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
677 getOrCreateInterval(MO.getReg()), CopyMI);
678 // Def of a register also defines its sub-registers.
679 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
680 // If MI also modifies the sub-register explicitly, avoid processing it
681 // more than once. Do not pass in TRI here so it checks for exact match.
682 if (!MI->modifiesRegister(*AS))
683 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
684 getOrCreateInterval(*AS), 0);
688 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
689 unsigned MIIdx,
690 LiveInterval &interval, bool isAlias) {
691 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
693 // Look for kills, if it reaches a def before it's killed, then it shouldn't
694 // be considered a livein.
695 MachineBasicBlock::iterator mi = MBB->begin();
696 unsigned baseIndex = MIIdx;
697 unsigned start = baseIndex;
698 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
699 getInstructionFromIndex(baseIndex) == 0)
700 baseIndex += InstrSlots::NUM;
701 unsigned end = baseIndex;
702 bool SeenDefUse = false;
704 while (mi != MBB->end()) {
705 if (mi->killsRegister(interval.reg, tri_)) {
706 DOUT << " killed";
707 end = getUseIndex(baseIndex) + 1;
708 SeenDefUse = true;
709 goto exit;
710 } else if (mi->modifiesRegister(interval.reg, tri_)) {
711 // Another instruction redefines the register before it is ever read.
712 // Then the register is essentially dead at the instruction that defines
713 // it. Hence its interval is:
714 // [defSlot(def), defSlot(def)+1)
715 DOUT << " dead";
716 end = getDefIndex(start) + 1;
717 SeenDefUse = true;
718 goto exit;
721 baseIndex += InstrSlots::NUM;
722 ++mi;
723 if (mi != MBB->end()) {
724 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
725 getInstructionFromIndex(baseIndex) == 0)
726 baseIndex += InstrSlots::NUM;
730 exit:
731 // Live-in register might not be used at all.
732 if (!SeenDefUse) {
733 if (isAlias) {
734 DOUT << " dead";
735 end = getDefIndex(MIIdx) + 1;
736 } else {
737 DOUT << " live through";
738 end = baseIndex;
742 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
743 interval.addRange(LR);
744 interval.addKill(LR.valno, end);
745 DOUT << " +" << LR << '\n';
748 /// computeIntervals - computes the live intervals for virtual
749 /// registers. for some ordering of the machine instructions [1,N] a
750 /// live interval is an interval [i, j) where 1 <= i <= j < N for
751 /// which a variable is live
752 void LiveIntervals::computeIntervals() {
754 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
755 << "********** Function: "
756 << ((Value*)mf_->getFunction())->getName() << '\n';
758 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
759 MBBI != E; ++MBBI) {
760 MachineBasicBlock *MBB = MBBI;
761 // Track the index of the current machine instr.
762 unsigned MIIndex = getMBBStartIdx(MBB);
763 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
765 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
767 // Create intervals for live-ins to this BB first.
768 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
769 LE = MBB->livein_end(); LI != LE; ++LI) {
770 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
771 // Multiple live-ins can alias the same register.
772 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
773 if (!hasInterval(*AS))
774 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
775 true);
778 // Skip over empty initial indices.
779 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
780 getInstructionFromIndex(MIIndex) == 0)
781 MIIndex += InstrSlots::NUM;
783 for (; MI != miEnd; ++MI) {
784 DOUT << MIIndex << "\t" << *MI;
786 // Handle defs.
787 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
788 MachineOperand &MO = MI->getOperand(i);
789 // handle register defs - build intervals
790 if (MO.isReg() && MO.getReg() && MO.isDef()) {
791 handleRegisterDef(MBB, MI, MIIndex, MO, i);
795 // Skip over the empty slots after each instruction.
796 unsigned Slots = MI->getDesc().getNumDefs();
797 if (Slots == 0)
798 Slots = 1;
799 MIIndex += InstrSlots::NUM * Slots;
801 // Skip over empty indices.
802 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
803 getInstructionFromIndex(MIIndex) == 0)
804 MIIndex += InstrSlots::NUM;
809 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
810 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
811 std::vector<IdxMBBPair>::const_iterator I =
812 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
814 bool ResVal = false;
815 while (I != Idx2MBBMap.end()) {
816 if (I->first >= End)
817 break;
818 MBBs.push_back(I->second);
819 ResVal = true;
820 ++I;
822 return ResVal;
825 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
826 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
827 std::vector<IdxMBBPair>::const_iterator I =
828 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
830 bool ResVal = false;
831 while (I != Idx2MBBMap.end()) {
832 if (I->first > End)
833 break;
834 MachineBasicBlock *MBB = I->second;
835 if (getMBBEndIdx(MBB) > End)
836 break;
837 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
838 SE = MBB->succ_end(); SI != SE; ++SI)
839 MBBs.push_back(*SI);
840 ResVal = true;
841 ++I;
843 return ResVal;
846 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
847 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
848 return new LiveInterval(reg, Weight);
851 /// dupInterval - Duplicate a live interval. The caller is responsible for
852 /// managing the allocated memory.
853 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
854 LiveInterval *NewLI = createInterval(li->reg);
855 NewLI->Copy(*li, getVNInfoAllocator());
856 return NewLI;
859 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
860 /// copy field and returns the source register that defines it.
861 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
862 if (!VNI->copy)
863 return 0;
865 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
866 // If it's extracting out of a physical register, return the sub-register.
867 unsigned Reg = VNI->copy->getOperand(1).getReg();
868 if (TargetRegisterInfo::isPhysicalRegister(Reg))
869 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
870 return Reg;
871 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
872 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
873 return VNI->copy->getOperand(2).getReg();
875 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
876 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
877 return SrcReg;
878 assert(0 && "Unrecognized copy instruction!");
879 return 0;
882 //===----------------------------------------------------------------------===//
883 // Register allocator hooks.
886 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
887 /// allow one) virtual register operand, then its uses are implicitly using
888 /// the register. Returns the virtual register.
889 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
890 MachineInstr *MI) const {
891 unsigned RegOp = 0;
892 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
893 MachineOperand &MO = MI->getOperand(i);
894 if (!MO.isReg() || !MO.isUse())
895 continue;
896 unsigned Reg = MO.getReg();
897 if (Reg == 0 || Reg == li.reg)
898 continue;
899 // FIXME: For now, only remat MI with at most one register operand.
900 assert(!RegOp &&
901 "Can't rematerialize instruction with multiple register operand!");
902 RegOp = MO.getReg();
903 #ifndef NDEBUG
904 break;
905 #endif
907 return RegOp;
910 /// isValNoAvailableAt - Return true if the val# of the specified interval
911 /// which reaches the given instruction also reaches the specified use index.
912 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
913 unsigned UseIdx) const {
914 unsigned Index = getInstructionIndex(MI);
915 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
916 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
917 return UI != li.end() && UI->valno == ValNo;
920 /// isReMaterializable - Returns true if the definition MI of the specified
921 /// val# of the specified interval is re-materializable.
922 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
923 const VNInfo *ValNo, MachineInstr *MI,
924 SmallVectorImpl<LiveInterval*> &SpillIs,
925 bool &isLoad) {
926 if (DisableReMat)
927 return false;
929 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
930 return true;
932 int FrameIdx = 0;
933 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
934 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
935 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
936 // this but remember this is not safe to fold into a two-address
937 // instruction.
938 // This is a load from fixed stack slot. It can be rematerialized.
939 return true;
941 // If the target-specific rules don't identify an instruction as
942 // being trivially rematerializable, use some target-independent
943 // rules.
944 if (!MI->getDesc().isRematerializable() ||
945 !tii_->isTriviallyReMaterializable(MI)) {
946 if (!EnableAggressiveRemat)
947 return false;
949 // If the instruction accesses memory but the memoperands have been lost,
950 // we can't analyze it.
951 const TargetInstrDesc &TID = MI->getDesc();
952 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
953 return false;
955 // Avoid instructions obviously unsafe for remat.
956 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
957 return false;
959 // If the instruction accesses memory and the memory could be non-constant,
960 // assume the instruction is not rematerializable.
961 for (std::list<MachineMemOperand>::const_iterator
962 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
963 const MachineMemOperand &MMO = *I;
964 if (MMO.isVolatile() || MMO.isStore())
965 return false;
966 const Value *V = MMO.getValue();
967 if (!V)
968 return false;
969 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
970 if (!PSV->isConstant(mf_->getFrameInfo()))
971 return false;
972 } else if (!aa_->pointsToConstantMemory(V))
973 return false;
976 // If any of the registers accessed are non-constant, conservatively assume
977 // the instruction is not rematerializable.
978 unsigned ImpUse = 0;
979 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
980 const MachineOperand &MO = MI->getOperand(i);
981 if (MO.isReg()) {
982 unsigned Reg = MO.getReg();
983 if (Reg == 0)
984 continue;
985 if (TargetRegisterInfo::isPhysicalRegister(Reg))
986 return false;
988 // Only allow one def, and that in the first operand.
989 if (MO.isDef() != (i == 0))
990 return false;
992 // Only allow constant-valued registers.
993 bool IsLiveIn = mri_->isLiveIn(Reg);
994 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
995 E = mri_->def_end();
997 // For the def, it should be the only def of that register.
998 if (MO.isDef() && (next(I) != E || IsLiveIn))
999 return false;
1001 if (MO.isUse()) {
1002 // Only allow one use other register use, as that's all the
1003 // remat mechanisms support currently.
1004 if (Reg != li.reg) {
1005 if (ImpUse == 0)
1006 ImpUse = Reg;
1007 else if (Reg != ImpUse)
1008 return false;
1010 // For the use, there should be only one associated def.
1011 if (I != E && (next(I) != E || IsLiveIn))
1012 return false;
1018 unsigned ImpUse = getReMatImplicitUse(li, MI);
1019 if (ImpUse) {
1020 const LiveInterval &ImpLi = getInterval(ImpUse);
1021 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1022 re = mri_->use_end(); ri != re; ++ri) {
1023 MachineInstr *UseMI = &*ri;
1024 unsigned UseIdx = getInstructionIndex(UseMI);
1025 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1026 continue;
1027 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1028 return false;
1031 // If a register operand of the re-materialized instruction is going to
1032 // be spilled next, then it's not legal to re-materialize this instruction.
1033 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1034 if (ImpUse == SpillIs[i]->reg)
1035 return false;
1037 return true;
1040 /// isReMaterializable - Returns true if the definition MI of the specified
1041 /// val# of the specified interval is re-materializable.
1042 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1043 const VNInfo *ValNo, MachineInstr *MI) {
1044 SmallVector<LiveInterval*, 4> Dummy1;
1045 bool Dummy2;
1046 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1049 /// isReMaterializable - Returns true if every definition of MI of every
1050 /// val# of the specified interval is re-materializable.
1051 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1052 SmallVectorImpl<LiveInterval*> &SpillIs,
1053 bool &isLoad) {
1054 isLoad = false;
1055 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1056 i != e; ++i) {
1057 const VNInfo *VNI = *i;
1058 unsigned DefIdx = VNI->def;
1059 if (DefIdx == ~1U)
1060 continue; // Dead val#.
1061 // Is the def for the val# rematerializable?
1062 if (DefIdx == ~0u)
1063 return false;
1064 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1065 bool DefIsLoad = false;
1066 if (!ReMatDefMI ||
1067 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1068 return false;
1069 isLoad |= DefIsLoad;
1071 return true;
1074 /// FilterFoldedOps - Filter out two-address use operands. Return
1075 /// true if it finds any issue with the operands that ought to prevent
1076 /// folding.
1077 static bool FilterFoldedOps(MachineInstr *MI,
1078 SmallVector<unsigned, 2> &Ops,
1079 unsigned &MRInfo,
1080 SmallVector<unsigned, 2> &FoldOps) {
1081 MRInfo = 0;
1082 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1083 unsigned OpIdx = Ops[i];
1084 MachineOperand &MO = MI->getOperand(OpIdx);
1085 // FIXME: fold subreg use.
1086 if (MO.getSubReg())
1087 return true;
1088 if (MO.isDef())
1089 MRInfo |= (unsigned)VirtRegMap::isMod;
1090 else {
1091 // Filter out two-address use operand(s).
1092 if (MI->isRegTiedToDefOperand(OpIdx)) {
1093 MRInfo = VirtRegMap::isModRef;
1094 continue;
1096 MRInfo |= (unsigned)VirtRegMap::isRef;
1098 FoldOps.push_back(OpIdx);
1100 return false;
1104 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1105 /// slot / to reg or any rematerialized load into ith operand of specified
1106 /// MI. If it is successul, MI is updated with the newly created MI and
1107 /// returns true.
1108 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1109 VirtRegMap &vrm, MachineInstr *DefMI,
1110 unsigned InstrIdx,
1111 SmallVector<unsigned, 2> &Ops,
1112 bool isSS, int Slot, unsigned Reg) {
1113 // If it is an implicit def instruction, just delete it.
1114 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1115 RemoveMachineInstrFromMaps(MI);
1116 vrm.RemoveMachineInstrFromMaps(MI);
1117 MI->eraseFromParent();
1118 ++numFolds;
1119 return true;
1122 // Filter the list of operand indexes that are to be folded. Abort if
1123 // any operand will prevent folding.
1124 unsigned MRInfo = 0;
1125 SmallVector<unsigned, 2> FoldOps;
1126 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1127 return false;
1129 // The only time it's safe to fold into a two address instruction is when
1130 // it's folding reload and spill from / into a spill stack slot.
1131 if (DefMI && (MRInfo & VirtRegMap::isMod))
1132 return false;
1134 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1135 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1136 if (fmi) {
1137 // Remember this instruction uses the spill slot.
1138 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1140 // Attempt to fold the memory reference into the instruction. If
1141 // we can do this, we don't need to insert spill code.
1142 MachineBasicBlock &MBB = *MI->getParent();
1143 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1144 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1145 vrm.transferSpillPts(MI, fmi);
1146 vrm.transferRestorePts(MI, fmi);
1147 vrm.transferEmergencySpills(MI, fmi);
1148 mi2iMap_.erase(MI);
1149 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1150 mi2iMap_[fmi] = InstrIdx;
1151 MI = MBB.insert(MBB.erase(MI), fmi);
1152 ++numFolds;
1153 return true;
1155 return false;
1158 /// canFoldMemoryOperand - Returns true if the specified load / store
1159 /// folding is possible.
1160 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1161 SmallVector<unsigned, 2> &Ops,
1162 bool ReMat) const {
1163 // Filter the list of operand indexes that are to be folded. Abort if
1164 // any operand will prevent folding.
1165 unsigned MRInfo = 0;
1166 SmallVector<unsigned, 2> FoldOps;
1167 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1168 return false;
1170 // It's only legal to remat for a use, not a def.
1171 if (ReMat && (MRInfo & VirtRegMap::isMod))
1172 return false;
1174 return tii_->canFoldMemoryOperand(MI, FoldOps);
1177 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1178 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1179 for (LiveInterval::Ranges::const_iterator
1180 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1181 std::vector<IdxMBBPair>::const_iterator II =
1182 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1183 if (II == Idx2MBBMap.end())
1184 continue;
1185 if (I->end > II->first) // crossing a MBB.
1186 return false;
1187 MBBs.insert(II->second);
1188 if (MBBs.size() > 1)
1189 return false;
1191 return true;
1194 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1195 /// interval on to-be re-materialized operands of MI) with new register.
1196 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1197 MachineInstr *MI, unsigned NewVReg,
1198 VirtRegMap &vrm) {
1199 // There is an implicit use. That means one of the other operand is
1200 // being remat'ed and the remat'ed instruction has li.reg as an
1201 // use operand. Make sure we rewrite that as well.
1202 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1203 MachineOperand &MO = MI->getOperand(i);
1204 if (!MO.isReg())
1205 continue;
1206 unsigned Reg = MO.getReg();
1207 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1208 continue;
1209 if (!vrm.isReMaterialized(Reg))
1210 continue;
1211 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1212 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1213 if (UseMO)
1214 UseMO->setReg(NewVReg);
1218 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1219 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1220 bool LiveIntervals::
1221 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1222 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1223 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1224 unsigned Slot, int LdSlot,
1225 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1226 VirtRegMap &vrm,
1227 const TargetRegisterClass* rc,
1228 SmallVector<int, 4> &ReMatIds,
1229 const MachineLoopInfo *loopInfo,
1230 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1231 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1232 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1233 MachineBasicBlock *MBB = MI->getParent();
1234 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1235 bool CanFold = false;
1236 RestartInstruction:
1237 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1238 MachineOperand& mop = MI->getOperand(i);
1239 if (!mop.isReg())
1240 continue;
1241 unsigned Reg = mop.getReg();
1242 unsigned RegI = Reg;
1243 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1244 continue;
1245 if (Reg != li.reg)
1246 continue;
1248 bool TryFold = !DefIsReMat;
1249 bool FoldSS = true; // Default behavior unless it's a remat.
1250 int FoldSlot = Slot;
1251 if (DefIsReMat) {
1252 // If this is the rematerializable definition MI itself and
1253 // all of its uses are rematerialized, simply delete it.
1254 if (MI == ReMatOrigDefMI && CanDelete) {
1255 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1256 DOUT << MI << '\n';
1257 RemoveMachineInstrFromMaps(MI);
1258 vrm.RemoveMachineInstrFromMaps(MI);
1259 MI->eraseFromParent();
1260 break;
1263 // If def for this use can't be rematerialized, then try folding.
1264 // If def is rematerializable and it's a load, also try folding.
1265 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1266 if (isLoad) {
1267 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1268 FoldSS = isLoadSS;
1269 FoldSlot = LdSlot;
1273 // Scan all of the operands of this instruction rewriting operands
1274 // to use NewVReg instead of li.reg as appropriate. We do this for
1275 // two reasons:
1277 // 1. If the instr reads the same spilled vreg multiple times, we
1278 // want to reuse the NewVReg.
1279 // 2. If the instr is a two-addr instruction, we are required to
1280 // keep the src/dst regs pinned.
1282 // Keep track of whether we replace a use and/or def so that we can
1283 // create the spill interval with the appropriate range.
1285 HasUse = mop.isUse();
1286 HasDef = mop.isDef();
1287 SmallVector<unsigned, 2> Ops;
1288 Ops.push_back(i);
1289 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1290 const MachineOperand &MOj = MI->getOperand(j);
1291 if (!MOj.isReg())
1292 continue;
1293 unsigned RegJ = MOj.getReg();
1294 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1295 continue;
1296 if (RegJ == RegI) {
1297 Ops.push_back(j);
1298 HasUse |= MOj.isUse();
1299 HasDef |= MOj.isDef();
1303 if (HasUse && !li.liveAt(getUseIndex(index)))
1304 // Must be defined by an implicit def. It should not be spilled. Note,
1305 // this is for correctness reason. e.g.
1306 // 8 %reg1024<def> = IMPLICIT_DEF
1307 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1308 // The live range [12, 14) are not part of the r1024 live interval since
1309 // it's defined by an implicit def. It will not conflicts with live
1310 // interval of r1025. Now suppose both registers are spilled, you can
1311 // easily see a situation where both registers are reloaded before
1312 // the INSERT_SUBREG and both target registers that would overlap.
1313 HasUse = false;
1315 // Update stack slot spill weight if we are splitting.
1316 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1317 if (!TrySplit)
1318 SSWeight += Weight;
1320 // Create a new virtual register for the spill interval.
1321 // Create the new register now so we can map the fold instruction
1322 // to the new register so when it is unfolded we get the correct
1323 // answer.
1324 bool CreatedNewVReg = false;
1325 if (NewVReg == 0) {
1326 NewVReg = mri_->createVirtualRegister(rc);
1327 vrm.grow();
1328 CreatedNewVReg = true;
1331 if (!TryFold)
1332 CanFold = false;
1333 else {
1334 // Do not fold load / store here if we are splitting. We'll find an
1335 // optimal point to insert a load / store later.
1336 if (!TrySplit) {
1337 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1338 Ops, FoldSS, FoldSlot, NewVReg)) {
1339 // Folding the load/store can completely change the instruction in
1340 // unpredictable ways, rescan it from the beginning.
1342 if (FoldSS) {
1343 // We need to give the new vreg the same stack slot as the
1344 // spilled interval.
1345 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1348 HasUse = false;
1349 HasDef = false;
1350 CanFold = false;
1351 if (isNotInMIMap(MI)) {
1352 SSWeight -= Weight;
1353 break;
1355 goto RestartInstruction;
1357 } else {
1358 // We'll try to fold it later if it's profitable.
1359 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1363 mop.setReg(NewVReg);
1364 if (mop.isImplicit())
1365 rewriteImplicitOps(li, MI, NewVReg, vrm);
1367 // Reuse NewVReg for other reads.
1368 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1369 MachineOperand &mopj = MI->getOperand(Ops[j]);
1370 mopj.setReg(NewVReg);
1371 if (mopj.isImplicit())
1372 rewriteImplicitOps(li, MI, NewVReg, vrm);
1375 if (CreatedNewVReg) {
1376 if (DefIsReMat) {
1377 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1378 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1379 // Each valnum may have its own remat id.
1380 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1381 } else {
1382 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1384 if (!CanDelete || (HasUse && HasDef)) {
1385 // If this is a two-addr instruction then its use operands are
1386 // rematerializable but its def is not. It should be assigned a
1387 // stack slot.
1388 vrm.assignVirt2StackSlot(NewVReg, Slot);
1390 } else {
1391 vrm.assignVirt2StackSlot(NewVReg, Slot);
1393 } else if (HasUse && HasDef &&
1394 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1395 // If this interval hasn't been assigned a stack slot (because earlier
1396 // def is a deleted remat def), do it now.
1397 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1398 vrm.assignVirt2StackSlot(NewVReg, Slot);
1401 // Re-matting an instruction with virtual register use. Add the
1402 // register as an implicit use on the use MI.
1403 if (DefIsReMat && ImpUse)
1404 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1406 // Create a new register interval for this spill / remat.
1407 LiveInterval &nI = getOrCreateInterval(NewVReg);
1408 if (CreatedNewVReg) {
1409 NewLIs.push_back(&nI);
1410 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1411 if (TrySplit)
1412 vrm.setIsSplitFromReg(NewVReg, li.reg);
1415 if (HasUse) {
1416 if (CreatedNewVReg) {
1417 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1418 nI.getNextValue(~0U, 0, VNInfoAllocator));
1419 DOUT << " +" << LR;
1420 nI.addRange(LR);
1421 } else {
1422 // Extend the split live interval to this def / use.
1423 unsigned End = getUseIndex(index)+1;
1424 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1425 nI.getValNumInfo(nI.getNumValNums()-1));
1426 DOUT << " +" << LR;
1427 nI.addRange(LR);
1430 if (HasDef) {
1431 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1432 nI.getNextValue(~0U, 0, VNInfoAllocator));
1433 DOUT << " +" << LR;
1434 nI.addRange(LR);
1437 DOUT << "\t\t\t\tAdded new interval: ";
1438 nI.print(DOUT, tri_);
1439 DOUT << '\n';
1441 return CanFold;
1443 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1444 const VNInfo *VNI,
1445 MachineBasicBlock *MBB, unsigned Idx) const {
1446 unsigned End = getMBBEndIdx(MBB);
1447 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1448 unsigned KillIdx = VNI->kills[j];
1449 if (KillIdx > Idx && KillIdx < End)
1450 return true;
1452 return false;
1455 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1456 /// during spilling.
1457 namespace {
1458 struct RewriteInfo {
1459 unsigned Index;
1460 MachineInstr *MI;
1461 bool HasUse;
1462 bool HasDef;
1463 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1464 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1467 struct RewriteInfoCompare {
1468 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1469 return LHS.Index < RHS.Index;
1474 void LiveIntervals::
1475 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1476 LiveInterval::Ranges::const_iterator &I,
1477 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1478 unsigned Slot, int LdSlot,
1479 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1480 VirtRegMap &vrm,
1481 const TargetRegisterClass* rc,
1482 SmallVector<int, 4> &ReMatIds,
1483 const MachineLoopInfo *loopInfo,
1484 BitVector &SpillMBBs,
1485 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1486 BitVector &RestoreMBBs,
1487 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1488 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1489 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1490 bool AllCanFold = true;
1491 unsigned NewVReg = 0;
1492 unsigned start = getBaseIndex(I->start);
1493 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1495 // First collect all the def / use in this live range that will be rewritten.
1496 // Make sure they are sorted according to instruction index.
1497 std::vector<RewriteInfo> RewriteMIs;
1498 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1499 re = mri_->reg_end(); ri != re; ) {
1500 MachineInstr *MI = &*ri;
1501 MachineOperand &O = ri.getOperand();
1502 ++ri;
1503 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1504 unsigned index = getInstructionIndex(MI);
1505 if (index < start || index >= end)
1506 continue;
1507 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1508 // Must be defined by an implicit def. It should not be spilled. Note,
1509 // this is for correctness reason. e.g.
1510 // 8 %reg1024<def> = IMPLICIT_DEF
1511 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1512 // The live range [12, 14) are not part of the r1024 live interval since
1513 // it's defined by an implicit def. It will not conflicts with live
1514 // interval of r1025. Now suppose both registers are spilled, you can
1515 // easily see a situation where both registers are reloaded before
1516 // the INSERT_SUBREG and both target registers that would overlap.
1517 continue;
1518 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1520 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1522 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1523 // Now rewrite the defs and uses.
1524 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1525 RewriteInfo &rwi = RewriteMIs[i];
1526 ++i;
1527 unsigned index = rwi.Index;
1528 bool MIHasUse = rwi.HasUse;
1529 bool MIHasDef = rwi.HasDef;
1530 MachineInstr *MI = rwi.MI;
1531 // If MI def and/or use the same register multiple times, then there
1532 // are multiple entries.
1533 unsigned NumUses = MIHasUse;
1534 while (i != e && RewriteMIs[i].MI == MI) {
1535 assert(RewriteMIs[i].Index == index);
1536 bool isUse = RewriteMIs[i].HasUse;
1537 if (isUse) ++NumUses;
1538 MIHasUse |= isUse;
1539 MIHasDef |= RewriteMIs[i].HasDef;
1540 ++i;
1542 MachineBasicBlock *MBB = MI->getParent();
1544 if (ImpUse && MI != ReMatDefMI) {
1545 // Re-matting an instruction with virtual register use. Update the
1546 // register interval's spill weight to HUGE_VALF to prevent it from
1547 // being spilled.
1548 LiveInterval &ImpLi = getInterval(ImpUse);
1549 ImpLi.weight = HUGE_VALF;
1552 unsigned MBBId = MBB->getNumber();
1553 unsigned ThisVReg = 0;
1554 if (TrySplit) {
1555 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1556 if (NVI != MBBVRegsMap.end()) {
1557 ThisVReg = NVI->second;
1558 // One common case:
1559 // x = use
1560 // ...
1561 // ...
1562 // def = ...
1563 // = use
1564 // It's better to start a new interval to avoid artifically
1565 // extend the new interval.
1566 if (MIHasDef && !MIHasUse) {
1567 MBBVRegsMap.erase(MBB->getNumber());
1568 ThisVReg = 0;
1573 bool IsNew = ThisVReg == 0;
1574 if (IsNew) {
1575 // This ends the previous live interval. If all of its def / use
1576 // can be folded, give it a low spill weight.
1577 if (NewVReg && TrySplit && AllCanFold) {
1578 LiveInterval &nI = getOrCreateInterval(NewVReg);
1579 nI.weight /= 10.0F;
1581 AllCanFold = true;
1583 NewVReg = ThisVReg;
1585 bool HasDef = false;
1586 bool HasUse = false;
1587 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1588 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1589 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1590 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1591 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1592 if (!HasDef && !HasUse)
1593 continue;
1595 AllCanFold &= CanFold;
1597 // Update weight of spill interval.
1598 LiveInterval &nI = getOrCreateInterval(NewVReg);
1599 if (!TrySplit) {
1600 // The spill weight is now infinity as it cannot be spilled again.
1601 nI.weight = HUGE_VALF;
1602 continue;
1605 // Keep track of the last def and first use in each MBB.
1606 if (HasDef) {
1607 if (MI != ReMatOrigDefMI || !CanDelete) {
1608 bool HasKill = false;
1609 if (!HasUse)
1610 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1611 else {
1612 // If this is a two-address code, then this index starts a new VNInfo.
1613 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1614 if (VNI)
1615 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1617 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1618 SpillIdxes.find(MBBId);
1619 if (!HasKill) {
1620 if (SII == SpillIdxes.end()) {
1621 std::vector<SRInfo> S;
1622 S.push_back(SRInfo(index, NewVReg, true));
1623 SpillIdxes.insert(std::make_pair(MBBId, S));
1624 } else if (SII->second.back().vreg != NewVReg) {
1625 SII->second.push_back(SRInfo(index, NewVReg, true));
1626 } else if ((int)index > SII->second.back().index) {
1627 // If there is an earlier def and this is a two-address
1628 // instruction, then it's not possible to fold the store (which
1629 // would also fold the load).
1630 SRInfo &Info = SII->second.back();
1631 Info.index = index;
1632 Info.canFold = !HasUse;
1634 SpillMBBs.set(MBBId);
1635 } else if (SII != SpillIdxes.end() &&
1636 SII->second.back().vreg == NewVReg &&
1637 (int)index > SII->second.back().index) {
1638 // There is an earlier def that's not killed (must be two-address).
1639 // The spill is no longer needed.
1640 SII->second.pop_back();
1641 if (SII->second.empty()) {
1642 SpillIdxes.erase(MBBId);
1643 SpillMBBs.reset(MBBId);
1649 if (HasUse) {
1650 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1651 SpillIdxes.find(MBBId);
1652 if (SII != SpillIdxes.end() &&
1653 SII->second.back().vreg == NewVReg &&
1654 (int)index > SII->second.back().index)
1655 // Use(s) following the last def, it's not safe to fold the spill.
1656 SII->second.back().canFold = false;
1657 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1658 RestoreIdxes.find(MBBId);
1659 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1660 // If we are splitting live intervals, only fold if it's the first
1661 // use and there isn't another use later in the MBB.
1662 RII->second.back().canFold = false;
1663 else if (IsNew) {
1664 // Only need a reload if there isn't an earlier def / use.
1665 if (RII == RestoreIdxes.end()) {
1666 std::vector<SRInfo> Infos;
1667 Infos.push_back(SRInfo(index, NewVReg, true));
1668 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1669 } else {
1670 RII->second.push_back(SRInfo(index, NewVReg, true));
1672 RestoreMBBs.set(MBBId);
1676 // Update spill weight.
1677 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1678 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1681 if (NewVReg && TrySplit && AllCanFold) {
1682 // If all of its def / use can be folded, give it a low spill weight.
1683 LiveInterval &nI = getOrCreateInterval(NewVReg);
1684 nI.weight /= 10.0F;
1688 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1689 BitVector &RestoreMBBs,
1690 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1691 if (!RestoreMBBs[Id])
1692 return false;
1693 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1694 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1695 if (Restores[i].index == index &&
1696 Restores[i].vreg == vr &&
1697 Restores[i].canFold)
1698 return true;
1699 return false;
1702 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1703 BitVector &RestoreMBBs,
1704 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1705 if (!RestoreMBBs[Id])
1706 return;
1707 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1708 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1709 if (Restores[i].index == index && Restores[i].vreg)
1710 Restores[i].index = -1;
1713 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1714 /// spilled and create empty intervals for their uses.
1715 void
1716 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1717 const TargetRegisterClass* rc,
1718 std::vector<LiveInterval*> &NewLIs) {
1719 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1720 re = mri_->reg_end(); ri != re; ) {
1721 MachineOperand &O = ri.getOperand();
1722 MachineInstr *MI = &*ri;
1723 ++ri;
1724 if (O.isDef()) {
1725 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1726 "Register def was not rewritten?");
1727 RemoveMachineInstrFromMaps(MI);
1728 vrm.RemoveMachineInstrFromMaps(MI);
1729 MI->eraseFromParent();
1730 } else {
1731 // This must be an use of an implicit_def so it's not part of the live
1732 // interval. Create a new empty live interval for it.
1733 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1734 unsigned NewVReg = mri_->createVirtualRegister(rc);
1735 vrm.grow();
1736 vrm.setIsImplicitlyDefined(NewVReg);
1737 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1738 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1739 MachineOperand &MO = MI->getOperand(i);
1740 if (MO.isReg() && MO.getReg() == li.reg)
1741 MO.setReg(NewVReg);
1747 std::vector<LiveInterval*> LiveIntervals::
1748 addIntervalsForSpillsFast(const LiveInterval &li,
1749 const MachineLoopInfo *loopInfo,
1750 VirtRegMap &vrm, float& SSWeight) {
1751 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1753 std::vector<LiveInterval*> added;
1755 assert(li.weight != HUGE_VALF &&
1756 "attempt to spill already spilled interval!");
1758 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1759 DEBUG(li.dump());
1760 DOUT << '\n';
1762 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1764 SSWeight = 0.0f;
1766 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1767 while (RI != mri_->reg_end()) {
1768 MachineInstr* MI = &*RI;
1770 SmallVector<unsigned, 2> Indices;
1771 bool HasUse = false;
1772 bool HasDef = false;
1774 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1775 MachineOperand& mop = MI->getOperand(i);
1776 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1778 HasUse |= MI->getOperand(i).isUse();
1779 HasDef |= MI->getOperand(i).isDef();
1781 Indices.push_back(i);
1784 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1785 Indices, true, slot, li.reg)) {
1786 unsigned NewVReg = mri_->createVirtualRegister(rc);
1787 vrm.grow();
1788 vrm.assignVirt2StackSlot(NewVReg, slot);
1790 // create a new register for this spill
1791 LiveInterval &nI = getOrCreateInterval(NewVReg);
1793 // the spill weight is now infinity as it
1794 // cannot be spilled again
1795 nI.weight = HUGE_VALF;
1797 // Rewrite register operands to use the new vreg.
1798 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1799 E = Indices.end(); I != E; ++I) {
1800 MI->getOperand(*I).setReg(NewVReg);
1802 if (MI->getOperand(*I).isUse())
1803 MI->getOperand(*I).setIsKill(true);
1806 // Fill in the new live interval.
1807 unsigned index = getInstructionIndex(MI);
1808 if (HasUse) {
1809 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1810 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1811 DOUT << " +" << LR;
1812 nI.addRange(LR);
1813 vrm.addRestorePoint(NewVReg, MI);
1815 if (HasDef) {
1816 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1817 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1818 DOUT << " +" << LR;
1819 nI.addRange(LR);
1820 vrm.addSpillPoint(NewVReg, true, MI);
1823 added.push_back(&nI);
1825 DOUT << "\t\t\t\tadded new interval: ";
1826 DEBUG(nI.dump());
1827 DOUT << '\n';
1829 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1830 if (HasUse) {
1831 if (HasDef)
1832 SSWeight += getSpillWeight(true, true, loopDepth);
1833 else
1834 SSWeight += getSpillWeight(false, true, loopDepth);
1835 } else
1836 SSWeight += getSpillWeight(true, false, loopDepth);
1840 RI = mri_->reg_begin(li.reg);
1843 return added;
1846 std::vector<LiveInterval*> LiveIntervals::
1847 addIntervalsForSpills(const LiveInterval &li,
1848 SmallVectorImpl<LiveInterval*> &SpillIs,
1849 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1850 float &SSWeight) {
1852 if (EnableFastSpilling)
1853 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1855 assert(li.weight != HUGE_VALF &&
1856 "attempt to spill already spilled interval!");
1858 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1859 li.print(DOUT, tri_);
1860 DOUT << '\n';
1862 // Spill slot weight.
1863 SSWeight = 0.0f;
1865 // Each bit specify whether a spill is required in the MBB.
1866 BitVector SpillMBBs(mf_->getNumBlockIDs());
1867 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1868 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1869 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1870 DenseMap<unsigned,unsigned> MBBVRegsMap;
1871 std::vector<LiveInterval*> NewLIs;
1872 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1874 unsigned NumValNums = li.getNumValNums();
1875 SmallVector<MachineInstr*, 4> ReMatDefs;
1876 ReMatDefs.resize(NumValNums, NULL);
1877 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1878 ReMatOrigDefs.resize(NumValNums, NULL);
1879 SmallVector<int, 4> ReMatIds;
1880 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1881 BitVector ReMatDelete(NumValNums);
1882 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1884 // Spilling a split live interval. It cannot be split any further. Also,
1885 // it's also guaranteed to be a single val# / range interval.
1886 if (vrm.getPreSplitReg(li.reg)) {
1887 vrm.setIsSplitFromReg(li.reg, 0);
1888 // Unset the split kill marker on the last use.
1889 unsigned KillIdx = vrm.getKillPoint(li.reg);
1890 if (KillIdx) {
1891 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1892 assert(KillMI && "Last use disappeared?");
1893 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1894 assert(KillOp != -1 && "Last use disappeared?");
1895 KillMI->getOperand(KillOp).setIsKill(false);
1897 vrm.removeKillPoint(li.reg);
1898 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1899 Slot = vrm.getStackSlot(li.reg);
1900 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1901 MachineInstr *ReMatDefMI = DefIsReMat ?
1902 vrm.getReMaterializedMI(li.reg) : NULL;
1903 int LdSlot = 0;
1904 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1905 bool isLoad = isLoadSS ||
1906 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1907 bool IsFirstRange = true;
1908 for (LiveInterval::Ranges::const_iterator
1909 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1910 // If this is a split live interval with multiple ranges, it means there
1911 // are two-address instructions that re-defined the value. Only the
1912 // first def can be rematerialized!
1913 if (IsFirstRange) {
1914 // Note ReMatOrigDefMI has already been deleted.
1915 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1916 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1917 false, vrm, rc, ReMatIds, loopInfo,
1918 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1919 MBBVRegsMap, NewLIs, SSWeight);
1920 } else {
1921 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1922 Slot, 0, false, false, false,
1923 false, vrm, rc, ReMatIds, loopInfo,
1924 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1925 MBBVRegsMap, NewLIs, SSWeight);
1927 IsFirstRange = false;
1930 SSWeight = 0.0f; // Already accounted for when split.
1931 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1932 return NewLIs;
1935 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1936 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1937 TrySplit = false;
1938 if (TrySplit)
1939 ++numSplits;
1940 bool NeedStackSlot = false;
1941 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1942 i != e; ++i) {
1943 const VNInfo *VNI = *i;
1944 unsigned VN = VNI->id;
1945 unsigned DefIdx = VNI->def;
1946 if (DefIdx == ~1U)
1947 continue; // Dead val#.
1948 // Is the def for the val# rematerializable?
1949 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1950 ? 0 : getInstructionFromIndex(DefIdx);
1951 bool dummy;
1952 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1953 // Remember how to remat the def of this val#.
1954 ReMatOrigDefs[VN] = ReMatDefMI;
1955 // Original def may be modified so we have to make a copy here.
1956 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1957 ClonedMIs.push_back(Clone);
1958 ReMatDefs[VN] = Clone;
1960 bool CanDelete = true;
1961 if (VNI->hasPHIKill) {
1962 // A kill is a phi node, not all of its uses can be rematerialized.
1963 // It must not be deleted.
1964 CanDelete = false;
1965 // Need a stack slot if there is any live range where uses cannot be
1966 // rematerialized.
1967 NeedStackSlot = true;
1969 if (CanDelete)
1970 ReMatDelete.set(VN);
1971 } else {
1972 // Need a stack slot if there is any live range where uses cannot be
1973 // rematerialized.
1974 NeedStackSlot = true;
1978 // One stack slot per live interval.
1979 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1980 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1981 Slot = vrm.assignVirt2StackSlot(li.reg);
1983 // This case only occurs when the prealloc splitter has already assigned
1984 // a stack slot to this vreg.
1985 else
1986 Slot = vrm.getStackSlot(li.reg);
1989 // Create new intervals and rewrite defs and uses.
1990 for (LiveInterval::Ranges::const_iterator
1991 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1992 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1993 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1994 bool DefIsReMat = ReMatDefMI != NULL;
1995 bool CanDelete = ReMatDelete[I->valno->id];
1996 int LdSlot = 0;
1997 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1998 bool isLoad = isLoadSS ||
1999 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2000 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2001 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2002 CanDelete, vrm, rc, ReMatIds, loopInfo,
2003 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2004 MBBVRegsMap, NewLIs, SSWeight);
2007 // Insert spills / restores if we are splitting.
2008 if (!TrySplit) {
2009 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2010 return NewLIs;
2013 SmallPtrSet<LiveInterval*, 4> AddedKill;
2014 SmallVector<unsigned, 2> Ops;
2015 if (NeedStackSlot) {
2016 int Id = SpillMBBs.find_first();
2017 while (Id != -1) {
2018 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2019 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2020 std::vector<SRInfo> &spills = SpillIdxes[Id];
2021 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2022 int index = spills[i].index;
2023 unsigned VReg = spills[i].vreg;
2024 LiveInterval &nI = getOrCreateInterval(VReg);
2025 bool isReMat = vrm.isReMaterialized(VReg);
2026 MachineInstr *MI = getInstructionFromIndex(index);
2027 bool CanFold = false;
2028 bool FoundUse = false;
2029 Ops.clear();
2030 if (spills[i].canFold) {
2031 CanFold = true;
2032 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2033 MachineOperand &MO = MI->getOperand(j);
2034 if (!MO.isReg() || MO.getReg() != VReg)
2035 continue;
2037 Ops.push_back(j);
2038 if (MO.isDef())
2039 continue;
2040 if (isReMat ||
2041 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2042 RestoreMBBs, RestoreIdxes))) {
2043 // MI has two-address uses of the same register. If the use
2044 // isn't the first and only use in the BB, then we can't fold
2045 // it. FIXME: Move this to rewriteInstructionsForSpills.
2046 CanFold = false;
2047 break;
2049 FoundUse = true;
2052 // Fold the store into the def if possible.
2053 bool Folded = false;
2054 if (CanFold && !Ops.empty()) {
2055 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2056 Folded = true;
2057 if (FoundUse) {
2058 // Also folded uses, do not issue a load.
2059 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2060 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2062 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2066 // Otherwise tell the spiller to issue a spill.
2067 if (!Folded) {
2068 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2069 bool isKill = LR->end == getStoreIndex(index);
2070 if (!MI->registerDefIsDead(nI.reg))
2071 // No need to spill a dead def.
2072 vrm.addSpillPoint(VReg, isKill, MI);
2073 if (isKill)
2074 AddedKill.insert(&nI);
2077 // Update spill slot weight.
2078 if (!isReMat)
2079 SSWeight += getSpillWeight(true, false, loopDepth);
2081 Id = SpillMBBs.find_next(Id);
2085 int Id = RestoreMBBs.find_first();
2086 while (Id != -1) {
2087 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2088 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2090 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2091 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2092 int index = restores[i].index;
2093 if (index == -1)
2094 continue;
2095 unsigned VReg = restores[i].vreg;
2096 LiveInterval &nI = getOrCreateInterval(VReg);
2097 bool isReMat = vrm.isReMaterialized(VReg);
2098 MachineInstr *MI = getInstructionFromIndex(index);
2099 bool CanFold = false;
2100 Ops.clear();
2101 if (restores[i].canFold) {
2102 CanFold = true;
2103 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2104 MachineOperand &MO = MI->getOperand(j);
2105 if (!MO.isReg() || MO.getReg() != VReg)
2106 continue;
2108 if (MO.isDef()) {
2109 // If this restore were to be folded, it would have been folded
2110 // already.
2111 CanFold = false;
2112 break;
2114 Ops.push_back(j);
2118 // Fold the load into the use if possible.
2119 bool Folded = false;
2120 if (CanFold && !Ops.empty()) {
2121 if (!isReMat)
2122 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2123 else {
2124 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2125 int LdSlot = 0;
2126 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2127 // If the rematerializable def is a load, also try to fold it.
2128 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2129 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2130 Ops, isLoadSS, LdSlot, VReg);
2131 if (!Folded) {
2132 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2133 if (ImpUse) {
2134 // Re-matting an instruction with virtual register use. Add the
2135 // register as an implicit use on the use MI and update the register
2136 // interval's spill weight to HUGE_VALF to prevent it from being
2137 // spilled.
2138 LiveInterval &ImpLi = getInterval(ImpUse);
2139 ImpLi.weight = HUGE_VALF;
2140 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2145 // If folding is not possible / failed, then tell the spiller to issue a
2146 // load / rematerialization for us.
2147 if (Folded)
2148 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2149 else
2150 vrm.addRestorePoint(VReg, MI);
2152 // Update spill slot weight.
2153 if (!isReMat)
2154 SSWeight += getSpillWeight(false, true, loopDepth);
2156 Id = RestoreMBBs.find_next(Id);
2159 // Finalize intervals: add kills, finalize spill weights, and filter out
2160 // dead intervals.
2161 std::vector<LiveInterval*> RetNewLIs;
2162 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2163 LiveInterval *LI = NewLIs[i];
2164 if (!LI->empty()) {
2165 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2166 if (!AddedKill.count(LI)) {
2167 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2168 unsigned LastUseIdx = getBaseIndex(LR->end);
2169 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2170 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2171 assert(UseIdx != -1);
2172 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2173 LastUse->getOperand(UseIdx).setIsKill();
2174 vrm.addKillPoint(LI->reg, LastUseIdx);
2177 RetNewLIs.push_back(LI);
2181 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2182 return RetNewLIs;
2185 /// hasAllocatableSuperReg - Return true if the specified physical register has
2186 /// any super register that's allocatable.
2187 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2188 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2189 if (allocatableRegs_[*AS] && hasInterval(*AS))
2190 return true;
2191 return false;
2194 /// getRepresentativeReg - Find the largest super register of the specified
2195 /// physical register.
2196 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2197 // Find the largest super-register that is allocatable.
2198 unsigned BestReg = Reg;
2199 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2200 unsigned SuperReg = *AS;
2201 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2202 BestReg = SuperReg;
2203 break;
2206 return BestReg;
2209 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2210 /// specified interval that conflicts with the specified physical register.
2211 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2212 unsigned PhysReg) const {
2213 unsigned NumConflicts = 0;
2214 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2215 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2216 E = mri_->reg_end(); I != E; ++I) {
2217 MachineOperand &O = I.getOperand();
2218 MachineInstr *MI = O.getParent();
2219 unsigned Index = getInstructionIndex(MI);
2220 if (pli.liveAt(Index))
2221 ++NumConflicts;
2223 return NumConflicts;
2226 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2227 /// around all defs and uses of the specified interval. Return true if it
2228 /// was able to cut its interval.
2229 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2230 unsigned PhysReg, VirtRegMap &vrm) {
2231 unsigned SpillReg = getRepresentativeReg(PhysReg);
2233 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2234 // If there are registers which alias PhysReg, but which are not a
2235 // sub-register of the chosen representative super register. Assert
2236 // since we can't handle it yet.
2237 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2238 tri_->isSuperRegister(*AS, SpillReg));
2240 bool Cut = false;
2241 LiveInterval &pli = getInterval(SpillReg);
2242 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2243 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2244 E = mri_->reg_end(); I != E; ++I) {
2245 MachineOperand &O = I.getOperand();
2246 MachineInstr *MI = O.getParent();
2247 if (SeenMIs.count(MI))
2248 continue;
2249 SeenMIs.insert(MI);
2250 unsigned Index = getInstructionIndex(MI);
2251 if (pli.liveAt(Index)) {
2252 vrm.addEmergencySpill(SpillReg, MI);
2253 unsigned StartIdx = getLoadIndex(Index);
2254 unsigned EndIdx = getStoreIndex(Index)+1;
2255 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2256 pli.removeRange(StartIdx, EndIdx);
2257 Cut = true;
2258 } else {
2259 cerr << "Ran out of registers during register allocation!\n";
2260 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2261 cerr << "Please check your inline asm statement for invalid "
2262 << "constraints:\n";
2263 MI->print(cerr.stream(), tm_);
2265 exit(1);
2267 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2268 if (!hasInterval(*AS))
2269 continue;
2270 LiveInterval &spli = getInterval(*AS);
2271 if (spli.liveAt(Index))
2272 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2276 return Cut;
2279 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2280 MachineInstr* startInst) {
2281 LiveInterval& Interval = getOrCreateInterval(reg);
2282 VNInfo* VN = Interval.getNextValue(
2283 getInstructionIndex(startInst) + InstrSlots::DEF,
2284 startInst, getVNInfoAllocator());
2285 VN->hasPHIKill = true;
2286 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2287 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2288 getMBBEndIdx(startInst->getParent()) + 1, VN);
2289 Interval.addRange(LR);
2291 return LR;