Don't analyze block if it's not considered for ifcvt anymore.
[llvm/stm8.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blob9257191f7fc0677027a745dfaecf597f3a45a5d1
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
45 #include <algorithm>
46 #include <limits>
47 #include <cmath>
48 using namespace llvm;
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.setPreservesCFG();
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
81 if (!StrongPHIElim) {
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
98 delete I->second;
100 r2iMap_.clear();
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
106 CloneMIs.pop_back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mf_ = &fn;
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
124 computeIntervals();
126 numIntervals += getNumIntervals();
128 DEBUG(dump());
129 return true;
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
137 OS << "\n";
140 printInstrs(OS);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
149 printInstrs(dbgs());
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
156 return true;
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
167 if (!firstMI)
168 return false;
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
177 if (!lastMI)
178 return false;
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183 return true;
185 MachineBasicBlock::const_iterator E = lastMI;
186 ++E;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 if (MI.isCopy())
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
194 continue;
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
199 if (!mop.isReg())
200 continue;
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
203 continue;
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
206 continue;
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210 return true;
214 // No conflicts found.
215 return false;
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index != end;
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
227 if (!MI)
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
231 continue;
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
234 if (!MO.isReg())
235 continue;
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 continue;
240 if (tri_->regsOverlap(Reg, PhysReg))
241 return true;
246 return false;
249 static
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
254 if (!MO.isReg())
255 continue;
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
260 return true;
263 return false;
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
272 return false;
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 if (DefMI != 0) {
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
281 return false;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
286 SlotIndex MIIdx,
287 MachineOperand& MO,
288 unsigned MOIdx,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
301 // of inputs.
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
307 if (MO.getSubReg())
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
312 CopyMI = mi;
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
324 SlotIndex killIdx;
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327 else
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
331 // live range.
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
338 return;
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 if (PHIJoin) {
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
358 } else {
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361 // live interval.
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
380 if (PHIJoin) {
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
391 } else {
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
397 return;
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
413 // into two values.
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
428 // defined value #0.
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
446 if (MO.isDead())
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
448 OldValNo));
450 DEBUG({
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
463 VNInfo *ValNo;
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
466 CopyMI = mi;
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
474 } else {
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
484 SlotIndex MIIdx,
485 MachineOperand& MO,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
504 if (MO.isDead()) {
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
507 goto exit;
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
517 continue;
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
524 goto exit;
525 } else {
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527 if (DefIdx != -1) {
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
531 } else {
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
539 goto exit;
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
552 exit:
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
558 if (!Extend)
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
569 SlotIndex MIIdx,
570 MachineOperand& MO,
571 unsigned MOIdx) {
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else {
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
578 CopyMI = MI;
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
584 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
585 SlotIndex MIIdx,
586 LiveInterval &interval, bool isAlias) {
587 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
589 // Look for kills, if it reaches a def before it's killed, then it shouldn't
590 // be considered a livein.
591 MachineBasicBlock::iterator mi = MBB->begin();
592 MachineBasicBlock::iterator E = MBB->end();
593 // Skip over DBG_VALUE at the start of the MBB.
594 if (mi != E && mi->isDebugValue()) {
595 while (++mi != E && mi->isDebugValue())
597 if (mi == E)
598 // MBB is empty except for DBG_VALUE's.
599 return;
602 SlotIndex baseIndex = MIIdx;
603 SlotIndex start = baseIndex;
604 if (getInstructionFromIndex(baseIndex) == 0)
605 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
607 SlotIndex end = baseIndex;
608 bool SeenDefUse = false;
610 while (mi != E) {
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DEBUG(dbgs() << " killed");
613 end = baseIndex.getDefIndex();
614 SeenDefUse = true;
615 break;
616 } else if (mi->definesRegister(interval.reg, tri_)) {
617 // Another instruction redefines the register before it is ever read.
618 // Then the register is essentially dead at the instruction that defines
619 // it. Hence its interval is:
620 // [defSlot(def), defSlot(def)+1)
621 DEBUG(dbgs() << " dead");
622 end = start.getStoreIndex();
623 SeenDefUse = true;
624 break;
627 while (++mi != E && mi->isDebugValue())
628 // Skip over DBG_VALUE.
630 if (mi != E)
631 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
634 // Live-in register might not be used at all.
635 if (!SeenDefUse) {
636 if (isAlias) {
637 DEBUG(dbgs() << " dead");
638 end = MIIdx.getStoreIndex();
639 } else {
640 DEBUG(dbgs() << " live through");
641 end = getMBBEndIdx(MBB);
645 SlotIndex defIdx = getMBBStartIdx(MBB);
646 assert(getInstructionFromIndex(defIdx) == 0 &&
647 "PHI def index points at actual instruction.");
648 VNInfo *vni =
649 interval.getNextValue(defIdx, 0, VNInfoAllocator);
650 vni->setIsPHIDef(true);
651 LiveRange LR(start, end, vni);
653 interval.addRange(LR);
654 DEBUG(dbgs() << " +" << LR << '\n');
657 /// computeIntervals - computes the live intervals for virtual
658 /// registers. for some ordering of the machine instructions [1,N] a
659 /// live interval is an interval [i, j) where 1 <= i <= j < N for
660 /// which a variable is live
661 void LiveIntervals::computeIntervals() {
662 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
663 << "********** Function: "
664 << ((Value*)mf_->getFunction())->getName() << '\n');
666 SmallVector<unsigned, 8> UndefUses;
667 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
668 MBBI != E; ++MBBI) {
669 MachineBasicBlock *MBB = MBBI;
670 if (MBB->empty())
671 continue;
673 // Track the index of the current machine instr.
674 SlotIndex MIIndex = getMBBStartIdx(MBB);
675 DEBUG(dbgs() << "BB#" << MBB->getNumber()
676 << ":\t\t# derived from " << MBB->getName() << "\n");
678 // Create intervals for live-ins to this BB first.
679 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
680 LE = MBB->livein_end(); LI != LE; ++LI) {
681 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
682 // Multiple live-ins can alias the same register.
683 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
684 if (!hasInterval(*AS))
685 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
686 true);
689 // Skip over empty initial indices.
690 if (getInstructionFromIndex(MIIndex) == 0)
691 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
693 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
694 MI != miEnd; ++MI) {
695 DEBUG(dbgs() << MIIndex << "\t" << *MI);
696 if (MI->isDebugValue())
697 continue;
699 // Handle defs.
700 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
701 MachineOperand &MO = MI->getOperand(i);
702 if (!MO.isReg() || !MO.getReg())
703 continue;
705 // handle register defs - build intervals
706 if (MO.isDef())
707 handleRegisterDef(MBB, MI, MIIndex, MO, i);
708 else if (MO.isUndef())
709 UndefUses.push_back(MO.getReg());
712 // Move to the next instr slot.
713 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 // Create empty intervals for registers defined by implicit_def's (except
718 // for those implicit_def that define values which are liveout of their
719 // blocks.
720 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
721 unsigned UndefReg = UndefUses[i];
722 (void)getOrCreateInterval(UndefReg);
726 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
727 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
728 return new LiveInterval(reg, Weight);
731 /// dupInterval - Duplicate a live interval. The caller is responsible for
732 /// managing the allocated memory.
733 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
734 LiveInterval *NewLI = createInterval(li->reg);
735 NewLI->Copy(*li, mri_, getVNInfoAllocator());
736 return NewLI;
739 /// shrinkToUses - After removing some uses of a register, shrink its live
740 /// range to just the remaining uses. This method does not compute reaching
741 /// defs for new uses, and it doesn't remove dead defs.
742 bool LiveIntervals::shrinkToUses(LiveInterval *li,
743 SmallVectorImpl<MachineInstr*> *dead) {
744 DEBUG(dbgs() << "Shrink: " << *li << '\n');
745 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
746 && "Can't only shrink physical registers");
747 // Find all the values used, including PHI kills.
748 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
750 // Visit all instructions reading li->reg.
751 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
752 MachineInstr *UseMI = I.skipInstruction();) {
753 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
754 continue;
755 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
756 VNInfo *VNI = li->getVNInfoAt(Idx);
757 if (!VNI) {
758 // This shouldn't happen: readsVirtualRegister returns true, but there is
759 // no live value. It is likely caused by a target getting <undef> flags
760 // wrong.
761 DEBUG(dbgs() << Idx << '\t' << *UseMI
762 << "Warning: Instr claims to read non-existent value in "
763 << *li << '\n');
764 continue;
766 if (VNI->def == Idx) {
767 // Special case: An early-clobber tied operand reads and writes the
768 // register one slot early.
769 Idx = Idx.getPrevSlot();
770 VNI = li->getVNInfoAt(Idx);
771 assert(VNI && "Early-clobber tied value not available");
773 WorkList.push_back(std::make_pair(Idx, VNI));
776 // Create a new live interval with only minimal live segments per def.
777 LiveInterval NewLI(li->reg, 0);
778 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
779 I != E; ++I) {
780 VNInfo *VNI = *I;
781 if (VNI->isUnused())
782 continue;
783 // We may eliminate PHI values, so recompute PHIKill flags.
784 VNI->setHasPHIKill(false);
785 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
787 // A use tied to an early-clobber def ends at the load slot and isn't caught
788 // above. Catch it here instead. This probably only ever happens for inline
789 // assembly.
790 if (VNI->def.isUse())
791 if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
792 WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
795 // Keep track of the PHIs that are in use.
796 SmallPtrSet<VNInfo*, 8> UsedPHIs;
798 // Extend intervals to reach all uses in WorkList.
799 while (!WorkList.empty()) {
800 SlotIndex Idx = WorkList.back().first;
801 VNInfo *VNI = WorkList.back().second;
802 WorkList.pop_back();
803 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
804 SlotIndex BlockStart = getMBBStartIdx(MBB);
806 // Extend the live range for VNI to be live at Idx.
807 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
808 (void)ExtVNI;
809 assert(ExtVNI == VNI && "Unexpected existing value number");
810 // Is this a PHIDef we haven't seen before?
811 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
812 continue;
813 // The PHI is live, make sure the predecessors are live-out.
814 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
815 PE = MBB->pred_end(); PI != PE; ++PI) {
816 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
817 VNInfo *PVNI = li->getVNInfoAt(Stop);
818 // A predecessor is not required to have a live-out value for a PHI.
819 if (PVNI) {
820 PVNI->setHasPHIKill(true);
821 WorkList.push_back(std::make_pair(Stop, PVNI));
824 continue;
827 // VNI is live-in to MBB.
828 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
829 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
831 // Make sure VNI is live-out from the predecessors.
832 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
833 PE = MBB->pred_end(); PI != PE; ++PI) {
834 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
835 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
836 WorkList.push_back(std::make_pair(Stop, VNI));
840 // Handle dead values.
841 bool CanSeparate = false;
842 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
843 I != E; ++I) {
844 VNInfo *VNI = *I;
845 if (VNI->isUnused())
846 continue;
847 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
848 assert(LII != NewLI.end() && "Missing live range for PHI");
849 if (LII->end != VNI->def.getNextSlot())
850 continue;
851 if (VNI->isPHIDef()) {
852 // This is a dead PHI. Remove it.
853 VNI->setIsUnused(true);
854 NewLI.removeRange(*LII);
855 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
856 CanSeparate = true;
857 } else {
858 // This is a dead def. Make sure the instruction knows.
859 MachineInstr *MI = getInstructionFromIndex(VNI->def);
860 assert(MI && "No instruction defining live value");
861 MI->addRegisterDead(li->reg, tri_);
862 if (dead && MI->allDefsAreDead()) {
863 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
864 dead->push_back(MI);
869 // Move the trimmed ranges back.
870 li->ranges.swap(NewLI.ranges);
871 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
872 return CanSeparate;
876 //===----------------------------------------------------------------------===//
877 // Register allocator hooks.
880 MachineBasicBlock::iterator
881 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
882 MachineBasicBlock *mbb) const {
883 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
885 // If li is not live into a landing pad, we can insert spill code before the
886 // first terminator.
887 if (!lpad || !isLiveInToMBB(li, lpad))
888 return mbb->getFirstTerminator();
890 // When there is a landing pad, spill code must go before the call instruction
891 // that can throw.
892 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
893 while (I != B) {
894 --I;
895 if (I->getDesc().isCall())
896 return I;
898 // The block contains no calls that can throw, so use the first terminator.
899 return mbb->getFirstTerminator();
902 void LiveIntervals::addKillFlags() {
903 for (iterator I = begin(), E = end(); I != E; ++I) {
904 unsigned Reg = I->first;
905 if (TargetRegisterInfo::isPhysicalRegister(Reg))
906 continue;
907 if (mri_->reg_nodbg_empty(Reg))
908 continue;
909 LiveInterval *LI = I->second;
911 // Every instruction that kills Reg corresponds to a live range end point.
912 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
913 ++RI) {
914 // A LOAD index indicates an MBB edge.
915 if (RI->end.isLoad())
916 continue;
917 MachineInstr *MI = getInstructionFromIndex(RI->end);
918 if (!MI)
919 continue;
920 MI->addRegisterKilled(Reg, NULL);
925 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
926 /// allow one) virtual register operand, then its uses are implicitly using
927 /// the register. Returns the virtual register.
928 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
929 MachineInstr *MI) const {
930 unsigned RegOp = 0;
931 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
932 MachineOperand &MO = MI->getOperand(i);
933 if (!MO.isReg() || !MO.isUse())
934 continue;
935 unsigned Reg = MO.getReg();
936 if (Reg == 0 || Reg == li.reg)
937 continue;
939 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
940 !allocatableRegs_[Reg])
941 continue;
942 // FIXME: For now, only remat MI with at most one register operand.
943 assert(!RegOp &&
944 "Can't rematerialize instruction with multiple register operand!");
945 RegOp = MO.getReg();
946 #ifndef NDEBUG
947 break;
948 #endif
950 return RegOp;
953 /// isValNoAvailableAt - Return true if the val# of the specified interval
954 /// which reaches the given instruction also reaches the specified use index.
955 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
956 SlotIndex UseIdx) const {
957 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
958 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
961 /// isReMaterializable - Returns true if the definition MI of the specified
962 /// val# of the specified interval is re-materializable.
963 bool
964 LiveIntervals::isReMaterializable(const LiveInterval &li,
965 const VNInfo *ValNo, MachineInstr *MI,
966 const SmallVectorImpl<LiveInterval*> *SpillIs,
967 bool &isLoad) {
968 if (DisableReMat)
969 return false;
971 if (!tii_->isTriviallyReMaterializable(MI, aa_))
972 return false;
974 // Target-specific code can mark an instruction as being rematerializable
975 // if it has one virtual reg use, though it had better be something like
976 // a PIC base register which is likely to be live everywhere.
977 unsigned ImpUse = getReMatImplicitUse(li, MI);
978 if (ImpUse) {
979 const LiveInterval &ImpLi = getInterval(ImpUse);
980 for (MachineRegisterInfo::use_nodbg_iterator
981 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
982 ri != re; ++ri) {
983 MachineInstr *UseMI = &*ri;
984 SlotIndex UseIdx = getInstructionIndex(UseMI);
985 if (li.getVNInfoAt(UseIdx) != ValNo)
986 continue;
987 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
988 return false;
991 // If a register operand of the re-materialized instruction is going to
992 // be spilled next, then it's not legal to re-materialize this instruction.
993 if (SpillIs)
994 for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
995 if (ImpUse == (*SpillIs)[i]->reg)
996 return false;
998 return true;
1001 /// isReMaterializable - Returns true if the definition MI of the specified
1002 /// val# of the specified interval is re-materializable.
1003 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1004 const VNInfo *ValNo, MachineInstr *MI) {
1005 bool Dummy2;
1006 return isReMaterializable(li, ValNo, MI, 0, Dummy2);
1009 /// isReMaterializable - Returns true if every definition of MI of every
1010 /// val# of the specified interval is re-materializable.
1011 bool
1012 LiveIntervals::isReMaterializable(const LiveInterval &li,
1013 const SmallVectorImpl<LiveInterval*> *SpillIs,
1014 bool &isLoad) {
1015 isLoad = false;
1016 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1017 i != e; ++i) {
1018 const VNInfo *VNI = *i;
1019 if (VNI->isUnused())
1020 continue; // Dead val#.
1021 // Is the def for the val# rematerializable?
1022 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1023 if (!ReMatDefMI)
1024 return false;
1025 bool DefIsLoad = false;
1026 if (!ReMatDefMI ||
1027 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1028 return false;
1029 isLoad |= DefIsLoad;
1031 return true;
1034 /// FilterFoldedOps - Filter out two-address use operands. Return
1035 /// true if it finds any issue with the operands that ought to prevent
1036 /// folding.
1037 static bool FilterFoldedOps(MachineInstr *MI,
1038 SmallVector<unsigned, 2> &Ops,
1039 unsigned &MRInfo,
1040 SmallVector<unsigned, 2> &FoldOps) {
1041 MRInfo = 0;
1042 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1043 unsigned OpIdx = Ops[i];
1044 MachineOperand &MO = MI->getOperand(OpIdx);
1045 // FIXME: fold subreg use.
1046 if (MO.getSubReg())
1047 return true;
1048 if (MO.isDef())
1049 MRInfo |= (unsigned)VirtRegMap::isMod;
1050 else {
1051 // Filter out two-address use operand(s).
1052 if (MI->isRegTiedToDefOperand(OpIdx)) {
1053 MRInfo = VirtRegMap::isModRef;
1054 continue;
1056 MRInfo |= (unsigned)VirtRegMap::isRef;
1058 FoldOps.push_back(OpIdx);
1060 return false;
1064 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1065 /// slot / to reg or any rematerialized load into ith operand of specified
1066 /// MI. If it is successul, MI is updated with the newly created MI and
1067 /// returns true.
1068 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1069 VirtRegMap &vrm, MachineInstr *DefMI,
1070 SlotIndex InstrIdx,
1071 SmallVector<unsigned, 2> &Ops,
1072 bool isSS, int Slot, unsigned Reg) {
1073 // If it is an implicit def instruction, just delete it.
1074 if (MI->isImplicitDef()) {
1075 RemoveMachineInstrFromMaps(MI);
1076 vrm.RemoveMachineInstrFromMaps(MI);
1077 MI->eraseFromParent();
1078 ++numFolds;
1079 return true;
1082 // Filter the list of operand indexes that are to be folded. Abort if
1083 // any operand will prevent folding.
1084 unsigned MRInfo = 0;
1085 SmallVector<unsigned, 2> FoldOps;
1086 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1087 return false;
1089 // The only time it's safe to fold into a two address instruction is when
1090 // it's folding reload and spill from / into a spill stack slot.
1091 if (DefMI && (MRInfo & VirtRegMap::isMod))
1092 return false;
1094 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1095 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1096 if (fmi) {
1097 // Remember this instruction uses the spill slot.
1098 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1100 // Attempt to fold the memory reference into the instruction. If
1101 // we can do this, we don't need to insert spill code.
1102 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1103 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1104 vrm.transferSpillPts(MI, fmi);
1105 vrm.transferRestorePts(MI, fmi);
1106 vrm.transferEmergencySpills(MI, fmi);
1107 ReplaceMachineInstrInMaps(MI, fmi);
1108 MI->eraseFromParent();
1109 MI = fmi;
1110 ++numFolds;
1111 return true;
1113 return false;
1116 /// canFoldMemoryOperand - Returns true if the specified load / store
1117 /// folding is possible.
1118 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1119 SmallVector<unsigned, 2> &Ops,
1120 bool ReMat) const {
1121 // Filter the list of operand indexes that are to be folded. Abort if
1122 // any operand will prevent folding.
1123 unsigned MRInfo = 0;
1124 SmallVector<unsigned, 2> FoldOps;
1125 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1126 return false;
1128 // It's only legal to remat for a use, not a def.
1129 if (ReMat && (MRInfo & VirtRegMap::isMod))
1130 return false;
1132 return tii_->canFoldMemoryOperand(MI, FoldOps);
1135 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1136 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1138 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1140 if (mbb == 0)
1141 return false;
1143 for (++itr; itr != li.ranges.end(); ++itr) {
1144 MachineBasicBlock *mbb2 =
1145 indexes_->getMBBCoveringRange(itr->start, itr->end);
1147 if (mbb2 != mbb)
1148 return false;
1151 return true;
1154 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1155 /// interval on to-be re-materialized operands of MI) with new register.
1156 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1157 MachineInstr *MI, unsigned NewVReg,
1158 VirtRegMap &vrm) {
1159 // There is an implicit use. That means one of the other operand is
1160 // being remat'ed and the remat'ed instruction has li.reg as an
1161 // use operand. Make sure we rewrite that as well.
1162 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1163 MachineOperand &MO = MI->getOperand(i);
1164 if (!MO.isReg())
1165 continue;
1166 unsigned Reg = MO.getReg();
1167 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1168 continue;
1169 if (!vrm.isReMaterialized(Reg))
1170 continue;
1171 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1172 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1173 if (UseMO)
1174 UseMO->setReg(NewVReg);
1178 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1179 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1180 bool LiveIntervals::
1181 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1182 bool TrySplit, SlotIndex index, SlotIndex end,
1183 MachineInstr *MI,
1184 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1185 unsigned Slot, int LdSlot,
1186 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1187 VirtRegMap &vrm,
1188 const TargetRegisterClass* rc,
1189 SmallVector<int, 4> &ReMatIds,
1190 const MachineLoopInfo *loopInfo,
1191 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1192 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1193 std::vector<LiveInterval*> &NewLIs) {
1194 bool CanFold = false;
1195 RestartInstruction:
1196 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1197 MachineOperand& mop = MI->getOperand(i);
1198 if (!mop.isReg())
1199 continue;
1200 unsigned Reg = mop.getReg();
1201 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1202 continue;
1203 if (Reg != li.reg)
1204 continue;
1206 bool TryFold = !DefIsReMat;
1207 bool FoldSS = true; // Default behavior unless it's a remat.
1208 int FoldSlot = Slot;
1209 if (DefIsReMat) {
1210 // If this is the rematerializable definition MI itself and
1211 // all of its uses are rematerialized, simply delete it.
1212 if (MI == ReMatOrigDefMI && CanDelete) {
1213 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1214 << *MI << '\n');
1215 RemoveMachineInstrFromMaps(MI);
1216 vrm.RemoveMachineInstrFromMaps(MI);
1217 MI->eraseFromParent();
1218 break;
1221 // If def for this use can't be rematerialized, then try folding.
1222 // If def is rematerializable and it's a load, also try folding.
1223 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1224 if (isLoad) {
1225 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1226 FoldSS = isLoadSS;
1227 FoldSlot = LdSlot;
1231 // Scan all of the operands of this instruction rewriting operands
1232 // to use NewVReg instead of li.reg as appropriate. We do this for
1233 // two reasons:
1235 // 1. If the instr reads the same spilled vreg multiple times, we
1236 // want to reuse the NewVReg.
1237 // 2. If the instr is a two-addr instruction, we are required to
1238 // keep the src/dst regs pinned.
1240 // Keep track of whether we replace a use and/or def so that we can
1241 // create the spill interval with the appropriate range.
1242 SmallVector<unsigned, 2> Ops;
1243 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1245 // Create a new virtual register for the spill interval.
1246 // Create the new register now so we can map the fold instruction
1247 // to the new register so when it is unfolded we get the correct
1248 // answer.
1249 bool CreatedNewVReg = false;
1250 if (NewVReg == 0) {
1251 NewVReg = mri_->createVirtualRegister(rc);
1252 vrm.grow();
1253 CreatedNewVReg = true;
1255 // The new virtual register should get the same allocation hints as the
1256 // old one.
1257 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1258 if (Hint.first || Hint.second)
1259 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1262 if (!TryFold)
1263 CanFold = false;
1264 else {
1265 // Do not fold load / store here if we are splitting. We'll find an
1266 // optimal point to insert a load / store later.
1267 if (!TrySplit) {
1268 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1269 Ops, FoldSS, FoldSlot, NewVReg)) {
1270 // Folding the load/store can completely change the instruction in
1271 // unpredictable ways, rescan it from the beginning.
1273 if (FoldSS) {
1274 // We need to give the new vreg the same stack slot as the
1275 // spilled interval.
1276 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1279 HasUse = false;
1280 HasDef = false;
1281 CanFold = false;
1282 if (isNotInMIMap(MI))
1283 break;
1284 goto RestartInstruction;
1286 } else {
1287 // We'll try to fold it later if it's profitable.
1288 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1292 mop.setReg(NewVReg);
1293 if (mop.isImplicit())
1294 rewriteImplicitOps(li, MI, NewVReg, vrm);
1296 // Reuse NewVReg for other reads.
1297 bool HasEarlyClobber = false;
1298 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1299 MachineOperand &mopj = MI->getOperand(Ops[j]);
1300 mopj.setReg(NewVReg);
1301 if (mopj.isImplicit())
1302 rewriteImplicitOps(li, MI, NewVReg, vrm);
1303 if (mopj.isEarlyClobber())
1304 HasEarlyClobber = true;
1307 if (CreatedNewVReg) {
1308 if (DefIsReMat) {
1309 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1310 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1311 // Each valnum may have its own remat id.
1312 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1313 } else {
1314 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1316 if (!CanDelete || (HasUse && HasDef)) {
1317 // If this is a two-addr instruction then its use operands are
1318 // rematerializable but its def is not. It should be assigned a
1319 // stack slot.
1320 vrm.assignVirt2StackSlot(NewVReg, Slot);
1322 } else {
1323 vrm.assignVirt2StackSlot(NewVReg, Slot);
1325 } else if (HasUse && HasDef &&
1326 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1327 // If this interval hasn't been assigned a stack slot (because earlier
1328 // def is a deleted remat def), do it now.
1329 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1330 vrm.assignVirt2StackSlot(NewVReg, Slot);
1333 // Re-matting an instruction with virtual register use. Add the
1334 // register as an implicit use on the use MI.
1335 if (DefIsReMat && ImpUse)
1336 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1338 // Create a new register interval for this spill / remat.
1339 LiveInterval &nI = getOrCreateInterval(NewVReg);
1340 if (CreatedNewVReg) {
1341 NewLIs.push_back(&nI);
1342 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1343 if (TrySplit)
1344 vrm.setIsSplitFromReg(NewVReg, li.reg);
1347 if (HasUse) {
1348 if (CreatedNewVReg) {
1349 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1350 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1351 DEBUG(dbgs() << " +" << LR);
1352 nI.addRange(LR);
1353 } else {
1354 // Extend the split live interval to this def / use.
1355 SlotIndex End = index.getDefIndex();
1356 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1357 nI.getValNumInfo(nI.getNumValNums()-1));
1358 DEBUG(dbgs() << " +" << LR);
1359 nI.addRange(LR);
1362 if (HasDef) {
1363 // An early clobber starts at the use slot, except for an early clobber
1364 // tied to a use operand (yes, that is a thing).
1365 LiveRange LR(HasEarlyClobber && !HasUse ?
1366 index.getUseIndex() : index.getDefIndex(),
1367 index.getStoreIndex(),
1368 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1369 DEBUG(dbgs() << " +" << LR);
1370 nI.addRange(LR);
1373 DEBUG({
1374 dbgs() << "\t\t\t\tAdded new interval: ";
1375 nI.print(dbgs(), tri_);
1376 dbgs() << '\n';
1379 return CanFold;
1381 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1382 const VNInfo *VNI,
1383 MachineBasicBlock *MBB,
1384 SlotIndex Idx) const {
1385 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1388 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1389 /// during spilling.
1390 namespace {
1391 struct RewriteInfo {
1392 SlotIndex Index;
1393 MachineInstr *MI;
1394 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1397 struct RewriteInfoCompare {
1398 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1399 return LHS.Index < RHS.Index;
1404 void LiveIntervals::
1405 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1406 LiveInterval::Ranges::const_iterator &I,
1407 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1408 unsigned Slot, int LdSlot,
1409 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1410 VirtRegMap &vrm,
1411 const TargetRegisterClass* rc,
1412 SmallVector<int, 4> &ReMatIds,
1413 const MachineLoopInfo *loopInfo,
1414 BitVector &SpillMBBs,
1415 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1416 BitVector &RestoreMBBs,
1417 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1418 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1419 std::vector<LiveInterval*> &NewLIs) {
1420 bool AllCanFold = true;
1421 unsigned NewVReg = 0;
1422 SlotIndex start = I->start.getBaseIndex();
1423 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1425 // First collect all the def / use in this live range that will be rewritten.
1426 // Make sure they are sorted according to instruction index.
1427 std::vector<RewriteInfo> RewriteMIs;
1428 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1429 re = mri_->reg_end(); ri != re; ) {
1430 MachineInstr *MI = &*ri;
1431 MachineOperand &O = ri.getOperand();
1432 ++ri;
1433 if (MI->isDebugValue()) {
1434 // Modify DBG_VALUE now that the value is in a spill slot.
1435 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1436 uint64_t Offset = MI->getOperand(1).getImm();
1437 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1438 DebugLoc DL = MI->getDebugLoc();
1439 int FI = isLoadSS ? LdSlot : (int)Slot;
1440 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1441 Offset, MDPtr, DL)) {
1442 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1443 ReplaceMachineInstrInMaps(MI, NewDV);
1444 MachineBasicBlock *MBB = MI->getParent();
1445 MBB->insert(MBB->erase(MI), NewDV);
1446 continue;
1450 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1451 RemoveMachineInstrFromMaps(MI);
1452 vrm.RemoveMachineInstrFromMaps(MI);
1453 MI->eraseFromParent();
1454 continue;
1456 assert(!(O.isImplicit() && O.isUse()) &&
1457 "Spilling register that's used as implicit use?");
1458 SlotIndex index = getInstructionIndex(MI);
1459 if (index < start || index >= end)
1460 continue;
1462 if (O.isUndef())
1463 // Must be defined by an implicit def. It should not be spilled. Note,
1464 // this is for correctness reason. e.g.
1465 // 8 %reg1024<def> = IMPLICIT_DEF
1466 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1467 // The live range [12, 14) are not part of the r1024 live interval since
1468 // it's defined by an implicit def. It will not conflicts with live
1469 // interval of r1025. Now suppose both registers are spilled, you can
1470 // easily see a situation where both registers are reloaded before
1471 // the INSERT_SUBREG and both target registers that would overlap.
1472 continue;
1473 RewriteMIs.push_back(RewriteInfo(index, MI));
1475 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1477 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1478 // Now rewrite the defs and uses.
1479 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1480 RewriteInfo &rwi = RewriteMIs[i];
1481 ++i;
1482 SlotIndex index = rwi.Index;
1483 MachineInstr *MI = rwi.MI;
1484 // If MI def and/or use the same register multiple times, then there
1485 // are multiple entries.
1486 while (i != e && RewriteMIs[i].MI == MI) {
1487 assert(RewriteMIs[i].Index == index);
1488 ++i;
1490 MachineBasicBlock *MBB = MI->getParent();
1492 if (ImpUse && MI != ReMatDefMI) {
1493 // Re-matting an instruction with virtual register use. Prevent interval
1494 // from being spilled.
1495 getInterval(ImpUse).markNotSpillable();
1498 unsigned MBBId = MBB->getNumber();
1499 unsigned ThisVReg = 0;
1500 if (TrySplit) {
1501 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1502 if (NVI != MBBVRegsMap.end()) {
1503 ThisVReg = NVI->second;
1504 // One common case:
1505 // x = use
1506 // ...
1507 // ...
1508 // def = ...
1509 // = use
1510 // It's better to start a new interval to avoid artificially
1511 // extend the new interval.
1512 if (MI->readsWritesVirtualRegister(li.reg) ==
1513 std::make_pair(false,true)) {
1514 MBBVRegsMap.erase(MBB->getNumber());
1515 ThisVReg = 0;
1520 bool IsNew = ThisVReg == 0;
1521 if (IsNew) {
1522 // This ends the previous live interval. If all of its def / use
1523 // can be folded, give it a low spill weight.
1524 if (NewVReg && TrySplit && AllCanFold) {
1525 LiveInterval &nI = getOrCreateInterval(NewVReg);
1526 nI.weight /= 10.0F;
1528 AllCanFold = true;
1530 NewVReg = ThisVReg;
1532 bool HasDef = false;
1533 bool HasUse = false;
1534 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1535 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1536 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1537 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1538 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1539 if (!HasDef && !HasUse)
1540 continue;
1542 AllCanFold &= CanFold;
1544 // Update weight of spill interval.
1545 LiveInterval &nI = getOrCreateInterval(NewVReg);
1546 if (!TrySplit) {
1547 // The spill weight is now infinity as it cannot be spilled again.
1548 nI.markNotSpillable();
1549 continue;
1552 // Keep track of the last def and first use in each MBB.
1553 if (HasDef) {
1554 if (MI != ReMatOrigDefMI || !CanDelete) {
1555 bool HasKill = false;
1556 if (!HasUse)
1557 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1558 else {
1559 // If this is a two-address code, then this index starts a new VNInfo.
1560 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1561 if (VNI)
1562 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1564 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1565 SpillIdxes.find(MBBId);
1566 if (!HasKill) {
1567 if (SII == SpillIdxes.end()) {
1568 std::vector<SRInfo> S;
1569 S.push_back(SRInfo(index, NewVReg, true));
1570 SpillIdxes.insert(std::make_pair(MBBId, S));
1571 } else if (SII->second.back().vreg != NewVReg) {
1572 SII->second.push_back(SRInfo(index, NewVReg, true));
1573 } else if (index > SII->second.back().index) {
1574 // If there is an earlier def and this is a two-address
1575 // instruction, then it's not possible to fold the store (which
1576 // would also fold the load).
1577 SRInfo &Info = SII->second.back();
1578 Info.index = index;
1579 Info.canFold = !HasUse;
1581 SpillMBBs.set(MBBId);
1582 } else if (SII != SpillIdxes.end() &&
1583 SII->second.back().vreg == NewVReg &&
1584 index > SII->second.back().index) {
1585 // There is an earlier def that's not killed (must be two-address).
1586 // The spill is no longer needed.
1587 SII->second.pop_back();
1588 if (SII->second.empty()) {
1589 SpillIdxes.erase(MBBId);
1590 SpillMBBs.reset(MBBId);
1596 if (HasUse) {
1597 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1598 SpillIdxes.find(MBBId);
1599 if (SII != SpillIdxes.end() &&
1600 SII->second.back().vreg == NewVReg &&
1601 index > SII->second.back().index)
1602 // Use(s) following the last def, it's not safe to fold the spill.
1603 SII->second.back().canFold = false;
1604 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1605 RestoreIdxes.find(MBBId);
1606 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1607 // If we are splitting live intervals, only fold if it's the first
1608 // use and there isn't another use later in the MBB.
1609 RII->second.back().canFold = false;
1610 else if (IsNew) {
1611 // Only need a reload if there isn't an earlier def / use.
1612 if (RII == RestoreIdxes.end()) {
1613 std::vector<SRInfo> Infos;
1614 Infos.push_back(SRInfo(index, NewVReg, true));
1615 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1616 } else {
1617 RII->second.push_back(SRInfo(index, NewVReg, true));
1619 RestoreMBBs.set(MBBId);
1623 // Update spill weight.
1624 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1625 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1628 if (NewVReg && TrySplit && AllCanFold) {
1629 // If all of its def / use can be folded, give it a low spill weight.
1630 LiveInterval &nI = getOrCreateInterval(NewVReg);
1631 nI.weight /= 10.0F;
1635 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1636 unsigned vr, BitVector &RestoreMBBs,
1637 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1638 if (!RestoreMBBs[Id])
1639 return false;
1640 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1641 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1642 if (Restores[i].index == index &&
1643 Restores[i].vreg == vr &&
1644 Restores[i].canFold)
1645 return true;
1646 return false;
1649 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1650 unsigned vr, BitVector &RestoreMBBs,
1651 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1652 if (!RestoreMBBs[Id])
1653 return;
1654 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1655 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1656 if (Restores[i].index == index && Restores[i].vreg)
1657 Restores[i].index = SlotIndex();
1660 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1661 /// spilled and create empty intervals for their uses.
1662 void
1663 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1664 const TargetRegisterClass* rc,
1665 std::vector<LiveInterval*> &NewLIs) {
1666 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1667 re = mri_->reg_end(); ri != re; ) {
1668 MachineOperand &O = ri.getOperand();
1669 MachineInstr *MI = &*ri;
1670 ++ri;
1671 if (MI->isDebugValue()) {
1672 // Remove debug info for now.
1673 O.setReg(0U);
1674 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1675 continue;
1677 if (O.isDef()) {
1678 assert(MI->isImplicitDef() &&
1679 "Register def was not rewritten?");
1680 RemoveMachineInstrFromMaps(MI);
1681 vrm.RemoveMachineInstrFromMaps(MI);
1682 MI->eraseFromParent();
1683 } else {
1684 // This must be an use of an implicit_def so it's not part of the live
1685 // interval. Create a new empty live interval for it.
1686 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1687 unsigned NewVReg = mri_->createVirtualRegister(rc);
1688 vrm.grow();
1689 vrm.setIsImplicitlyDefined(NewVReg);
1690 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1691 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1692 MachineOperand &MO = MI->getOperand(i);
1693 if (MO.isReg() && MO.getReg() == li.reg) {
1694 MO.setReg(NewVReg);
1695 MO.setIsUndef();
1702 float
1703 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1704 // Limit the loop depth ridiculousness.
1705 if (loopDepth > 200)
1706 loopDepth = 200;
1708 // The loop depth is used to roughly estimate the number of times the
1709 // instruction is executed. Something like 10^d is simple, but will quickly
1710 // overflow a float. This expression behaves like 10^d for small d, but is
1711 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1712 // headroom before overflow.
1713 // By the way, powf() might be unavailable here. For consistency,
1714 // We may take pow(double,double).
1715 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1717 return (isDef + isUse) * lc;
1720 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1721 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1722 NewLIs[i]->weight =
1723 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1726 std::vector<LiveInterval*> LiveIntervals::
1727 addIntervalsForSpills(const LiveInterval &li,
1728 const SmallVectorImpl<LiveInterval*> *SpillIs,
1729 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1730 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1732 DEBUG({
1733 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1734 li.print(dbgs(), tri_);
1735 dbgs() << '\n';
1738 // Each bit specify whether a spill is required in the MBB.
1739 BitVector SpillMBBs(mf_->getNumBlockIDs());
1740 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1741 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1742 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1743 DenseMap<unsigned,unsigned> MBBVRegsMap;
1744 std::vector<LiveInterval*> NewLIs;
1745 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1747 unsigned NumValNums = li.getNumValNums();
1748 SmallVector<MachineInstr*, 4> ReMatDefs;
1749 ReMatDefs.resize(NumValNums, NULL);
1750 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1751 ReMatOrigDefs.resize(NumValNums, NULL);
1752 SmallVector<int, 4> ReMatIds;
1753 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1754 BitVector ReMatDelete(NumValNums);
1755 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1757 // Spilling a split live interval. It cannot be split any further. Also,
1758 // it's also guaranteed to be a single val# / range interval.
1759 if (vrm.getPreSplitReg(li.reg)) {
1760 vrm.setIsSplitFromReg(li.reg, 0);
1761 // Unset the split kill marker on the last use.
1762 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1763 if (KillIdx != SlotIndex()) {
1764 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1765 assert(KillMI && "Last use disappeared?");
1766 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1767 assert(KillOp != -1 && "Last use disappeared?");
1768 KillMI->getOperand(KillOp).setIsKill(false);
1770 vrm.removeKillPoint(li.reg);
1771 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1772 Slot = vrm.getStackSlot(li.reg);
1773 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1774 MachineInstr *ReMatDefMI = DefIsReMat ?
1775 vrm.getReMaterializedMI(li.reg) : NULL;
1776 int LdSlot = 0;
1777 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1778 bool isLoad = isLoadSS ||
1779 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1780 bool IsFirstRange = true;
1781 for (LiveInterval::Ranges::const_iterator
1782 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1783 // If this is a split live interval with multiple ranges, it means there
1784 // are two-address instructions that re-defined the value. Only the
1785 // first def can be rematerialized!
1786 if (IsFirstRange) {
1787 // Note ReMatOrigDefMI has already been deleted.
1788 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1789 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1790 false, vrm, rc, ReMatIds, loopInfo,
1791 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1792 MBBVRegsMap, NewLIs);
1793 } else {
1794 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1795 Slot, 0, false, false, false,
1796 false, vrm, rc, ReMatIds, loopInfo,
1797 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1798 MBBVRegsMap, NewLIs);
1800 IsFirstRange = false;
1803 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1804 normalizeSpillWeights(NewLIs);
1805 return NewLIs;
1808 bool TrySplit = !intervalIsInOneMBB(li);
1809 if (TrySplit)
1810 ++numSplits;
1811 bool NeedStackSlot = false;
1812 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1813 i != e; ++i) {
1814 const VNInfo *VNI = *i;
1815 unsigned VN = VNI->id;
1816 if (VNI->isUnused())
1817 continue; // Dead val#.
1818 // Is the def for the val# rematerializable?
1819 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1820 bool dummy;
1821 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1822 // Remember how to remat the def of this val#.
1823 ReMatOrigDefs[VN] = ReMatDefMI;
1824 // Original def may be modified so we have to make a copy here.
1825 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1826 CloneMIs.push_back(Clone);
1827 ReMatDefs[VN] = Clone;
1829 bool CanDelete = true;
1830 if (VNI->hasPHIKill()) {
1831 // A kill is a phi node, not all of its uses can be rematerialized.
1832 // It must not be deleted.
1833 CanDelete = false;
1834 // Need a stack slot if there is any live range where uses cannot be
1835 // rematerialized.
1836 NeedStackSlot = true;
1838 if (CanDelete)
1839 ReMatDelete.set(VN);
1840 } else {
1841 // Need a stack slot if there is any live range where uses cannot be
1842 // rematerialized.
1843 NeedStackSlot = true;
1847 // One stack slot per live interval.
1848 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1849 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1850 Slot = vrm.assignVirt2StackSlot(li.reg);
1852 // This case only occurs when the prealloc splitter has already assigned
1853 // a stack slot to this vreg.
1854 else
1855 Slot = vrm.getStackSlot(li.reg);
1858 // Create new intervals and rewrite defs and uses.
1859 for (LiveInterval::Ranges::const_iterator
1860 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1861 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1862 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1863 bool DefIsReMat = ReMatDefMI != NULL;
1864 bool CanDelete = ReMatDelete[I->valno->id];
1865 int LdSlot = 0;
1866 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1867 bool isLoad = isLoadSS ||
1868 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1869 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1870 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1871 CanDelete, vrm, rc, ReMatIds, loopInfo,
1872 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1873 MBBVRegsMap, NewLIs);
1876 // Insert spills / restores if we are splitting.
1877 if (!TrySplit) {
1878 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1879 normalizeSpillWeights(NewLIs);
1880 return NewLIs;
1883 SmallPtrSet<LiveInterval*, 4> AddedKill;
1884 SmallVector<unsigned, 2> Ops;
1885 if (NeedStackSlot) {
1886 int Id = SpillMBBs.find_first();
1887 while (Id != -1) {
1888 std::vector<SRInfo> &spills = SpillIdxes[Id];
1889 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1890 SlotIndex index = spills[i].index;
1891 unsigned VReg = spills[i].vreg;
1892 LiveInterval &nI = getOrCreateInterval(VReg);
1893 bool isReMat = vrm.isReMaterialized(VReg);
1894 MachineInstr *MI = getInstructionFromIndex(index);
1895 bool CanFold = false;
1896 bool FoundUse = false;
1897 Ops.clear();
1898 if (spills[i].canFold) {
1899 CanFold = true;
1900 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1901 MachineOperand &MO = MI->getOperand(j);
1902 if (!MO.isReg() || MO.getReg() != VReg)
1903 continue;
1905 Ops.push_back(j);
1906 if (MO.isDef())
1907 continue;
1908 if (isReMat ||
1909 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1910 RestoreMBBs, RestoreIdxes))) {
1911 // MI has two-address uses of the same register. If the use
1912 // isn't the first and only use in the BB, then we can't fold
1913 // it. FIXME: Move this to rewriteInstructionsForSpills.
1914 CanFold = false;
1915 break;
1917 FoundUse = true;
1920 // Fold the store into the def if possible.
1921 bool Folded = false;
1922 if (CanFold && !Ops.empty()) {
1923 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1924 Folded = true;
1925 if (FoundUse) {
1926 // Also folded uses, do not issue a load.
1927 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1928 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1930 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1934 // Otherwise tell the spiller to issue a spill.
1935 if (!Folded) {
1936 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1937 bool isKill = LR->end == index.getStoreIndex();
1938 if (!MI->registerDefIsDead(nI.reg))
1939 // No need to spill a dead def.
1940 vrm.addSpillPoint(VReg, isKill, MI);
1941 if (isKill)
1942 AddedKill.insert(&nI);
1945 Id = SpillMBBs.find_next(Id);
1949 int Id = RestoreMBBs.find_first();
1950 while (Id != -1) {
1951 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1952 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1953 SlotIndex index = restores[i].index;
1954 if (index == SlotIndex())
1955 continue;
1956 unsigned VReg = restores[i].vreg;
1957 LiveInterval &nI = getOrCreateInterval(VReg);
1958 bool isReMat = vrm.isReMaterialized(VReg);
1959 MachineInstr *MI = getInstructionFromIndex(index);
1960 bool CanFold = false;
1961 Ops.clear();
1962 if (restores[i].canFold) {
1963 CanFold = true;
1964 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1965 MachineOperand &MO = MI->getOperand(j);
1966 if (!MO.isReg() || MO.getReg() != VReg)
1967 continue;
1969 if (MO.isDef()) {
1970 // If this restore were to be folded, it would have been folded
1971 // already.
1972 CanFold = false;
1973 break;
1975 Ops.push_back(j);
1979 // Fold the load into the use if possible.
1980 bool Folded = false;
1981 if (CanFold && !Ops.empty()) {
1982 if (!isReMat)
1983 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1984 else {
1985 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1986 int LdSlot = 0;
1987 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1988 // If the rematerializable def is a load, also try to fold it.
1989 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1990 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1991 Ops, isLoadSS, LdSlot, VReg);
1992 if (!Folded) {
1993 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1994 if (ImpUse) {
1995 // Re-matting an instruction with virtual register use. Add the
1996 // register as an implicit use on the use MI and mark the register
1997 // interval as unspillable.
1998 LiveInterval &ImpLi = getInterval(ImpUse);
1999 ImpLi.markNotSpillable();
2000 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2005 // If folding is not possible / failed, then tell the spiller to issue a
2006 // load / rematerialization for us.
2007 if (Folded)
2008 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2009 else
2010 vrm.addRestorePoint(VReg, MI);
2012 Id = RestoreMBBs.find_next(Id);
2015 // Finalize intervals: add kills, finalize spill weights, and filter out
2016 // dead intervals.
2017 std::vector<LiveInterval*> RetNewLIs;
2018 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2019 LiveInterval *LI = NewLIs[i];
2020 if (!LI->empty()) {
2021 if (!AddedKill.count(LI)) {
2022 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2023 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2024 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2025 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2026 assert(UseIdx != -1);
2027 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2028 LastUse->getOperand(UseIdx).setIsKill();
2029 vrm.addKillPoint(LI->reg, LastUseIdx);
2032 RetNewLIs.push_back(LI);
2036 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2037 normalizeSpillWeights(RetNewLIs);
2038 return RetNewLIs;
2041 /// hasAllocatableSuperReg - Return true if the specified physical register has
2042 /// any super register that's allocatable.
2043 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2044 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2045 if (allocatableRegs_[*AS] && hasInterval(*AS))
2046 return true;
2047 return false;
2050 /// getRepresentativeReg - Find the largest super register of the specified
2051 /// physical register.
2052 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2053 // Find the largest super-register that is allocatable.
2054 unsigned BestReg = Reg;
2055 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2056 unsigned SuperReg = *AS;
2057 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2058 BestReg = SuperReg;
2059 break;
2062 return BestReg;
2065 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2066 /// specified interval that conflicts with the specified physical register.
2067 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2068 unsigned PhysReg) const {
2069 unsigned NumConflicts = 0;
2070 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2071 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2072 E = mri_->reg_end(); I != E; ++I) {
2073 MachineOperand &O = I.getOperand();
2074 MachineInstr *MI = O.getParent();
2075 if (MI->isDebugValue())
2076 continue;
2077 SlotIndex Index = getInstructionIndex(MI);
2078 if (pli.liveAt(Index))
2079 ++NumConflicts;
2081 return NumConflicts;
2084 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2085 /// around all defs and uses of the specified interval. Return true if it
2086 /// was able to cut its interval.
2087 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2088 unsigned PhysReg, VirtRegMap &vrm) {
2089 unsigned SpillReg = getRepresentativeReg(PhysReg);
2091 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2092 << " represented by " << tri_->getName(SpillReg) << '\n');
2094 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2095 // If there are registers which alias PhysReg, but which are not a
2096 // sub-register of the chosen representative super register. Assert
2097 // since we can't handle it yet.
2098 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2099 tri_->isSuperRegister(*AS, SpillReg));
2101 bool Cut = false;
2102 SmallVector<unsigned, 4> PRegs;
2103 if (hasInterval(SpillReg))
2104 PRegs.push_back(SpillReg);
2105 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2106 if (hasInterval(*SR))
2107 PRegs.push_back(*SR);
2109 DEBUG({
2110 dbgs() << "Trying to spill:";
2111 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2112 dbgs() << ' ' << tri_->getName(PRegs[i]);
2113 dbgs() << '\n';
2116 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2117 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2118 E = mri_->reg_end(); I != E; ++I) {
2119 MachineOperand &O = I.getOperand();
2120 MachineInstr *MI = O.getParent();
2121 if (MI->isDebugValue() || SeenMIs.count(MI))
2122 continue;
2123 SeenMIs.insert(MI);
2124 SlotIndex Index = getInstructionIndex(MI);
2125 bool LiveReg = false;
2126 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2127 unsigned PReg = PRegs[i];
2128 LiveInterval &pli = getInterval(PReg);
2129 if (!pli.liveAt(Index))
2130 continue;
2131 LiveReg = true;
2132 SlotIndex StartIdx = Index.getLoadIndex();
2133 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2134 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2135 std::string msg;
2136 raw_string_ostream Msg(msg);
2137 Msg << "Ran out of registers during register allocation!";
2138 if (MI->isInlineAsm()) {
2139 Msg << "\nPlease check your inline asm statement for invalid "
2140 << "constraints:\n";
2141 MI->print(Msg, tm_);
2143 report_fatal_error(Msg.str());
2145 pli.removeRange(StartIdx, EndIdx);
2146 LiveReg = true;
2148 if (!LiveReg)
2149 continue;
2150 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2151 vrm.addEmergencySpill(SpillReg, MI);
2152 Cut = true;
2154 return Cut;
2157 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2158 MachineInstr* startInst) {
2159 LiveInterval& Interval = getOrCreateInterval(reg);
2160 VNInfo* VN = Interval.getNextValue(
2161 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2162 startInst, getVNInfoAllocator());
2163 VN->setHasPHIKill(true);
2164 LiveRange LR(
2165 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2166 getMBBEndIdx(startInst->getParent()), VN);
2167 Interval.addRange(LR);
2169 return LR;