zpu: managed to compile program that writes constant to global variable
[llvm/zpu.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blob3fd82fdca7eed25dbcd4eb989acd644db37c9dd2
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
44 #include <algorithm>
45 #include <limits>
46 #include <cmath>
47 using namespace llvm;
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.setPreservesCFG();
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
80 if (!StrongPHIElim) {
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
97 delete I->second;
99 r2iMap_.clear();
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
105 CloneMIs.pop_back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113 mf_ = &fn;
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
123 computeIntervals();
125 numIntervals += getNumIntervals();
127 DEBUG(dump());
128 return true;
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
136 OS << "\n";
139 printInstrs(OS);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
144 mf_->print(OS, indexes_);
147 void LiveIntervals::dumpInstrs() const {
148 printInstrs(dbgs());
151 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 // We don't handle fancy stuff crossing basic block boundaries
154 if (li.ranges.size() != 1)
155 return true;
156 const LiveRange &range = li.ranges.front();
157 SlotIndex idx = range.start.getBaseIndex();
158 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160 // Skip deleted instructions
161 MachineInstr *firstMI = getInstructionFromIndex(idx);
162 while (!firstMI && idx != end) {
163 idx = idx.getNextIndex();
164 firstMI = getInstructionFromIndex(idx);
166 if (!firstMI)
167 return false;
169 // Find last instruction in range
170 SlotIndex lastIdx = end.getPrevIndex();
171 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172 while (!lastMI && lastIdx != idx) {
173 lastIdx = lastIdx.getPrevIndex();
174 lastMI = getInstructionFromIndex(lastIdx);
176 if (!lastMI)
177 return false;
179 // Range cannot cross basic block boundaries or terminators
180 MachineBasicBlock *MBB = firstMI->getParent();
181 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
182 return true;
184 MachineBasicBlock::const_iterator E = lastMI;
185 ++E;
186 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187 const MachineInstr &MI = *I;
189 // Allow copies to and from li.reg
190 if (MI.isCopy())
191 if (MI.getOperand(0).getReg() == li.reg ||
192 MI.getOperand(1).getReg() == li.reg)
193 continue;
195 // Check for operands using reg
196 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
197 const MachineOperand& mop = MI.getOperand(i);
198 if (!mop.isReg())
199 continue;
200 unsigned PhysReg = mop.getReg();
201 if (PhysReg == 0 || PhysReg == li.reg)
202 continue;
203 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204 if (!vrm.hasPhys(PhysReg))
205 continue;
206 PhysReg = vrm.getPhys(PhysReg);
208 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
209 return true;
213 // No conflicts found.
214 return false;
217 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219 for (LiveInterval::Ranges::const_iterator
220 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221 for (SlotIndex index = I->start.getBaseIndex(),
222 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
223 index != end;
224 index = index.getNextIndex()) {
225 MachineInstr *MI = getInstructionFromIndex(index);
226 if (!MI)
227 continue; // skip deleted instructions
229 if (JoinedCopies.count(MI))
230 continue;
231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232 MachineOperand& MO = MI->getOperand(i);
233 if (!MO.isReg())
234 continue;
235 unsigned PhysReg = MO.getReg();
236 if (PhysReg == 0 || PhysReg == Reg ||
237 TargetRegisterInfo::isVirtualRegister(PhysReg))
238 continue;
239 if (tri_->regsOverlap(Reg, PhysReg))
240 return true;
245 return false;
248 #ifndef NDEBUG
249 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
250 if (TargetRegisterInfo::isPhysicalRegister(reg))
251 dbgs() << tri_->getName(reg);
252 else
253 dbgs() << "%reg" << reg;
255 #endif
257 static
258 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
259 unsigned Reg = MI.getOperand(MOIdx).getReg();
260 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
261 const MachineOperand &MO = MI.getOperand(i);
262 if (!MO.isReg())
263 continue;
264 if (MO.getReg() == Reg && MO.isDef()) {
265 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
266 MI.getOperand(MOIdx).getSubReg() &&
267 (MO.getSubReg() || MO.isImplicit()));
268 return true;
271 return false;
274 /// isPartialRedef - Return true if the specified def at the specific index is
275 /// partially re-defining the specified live interval. A common case of this is
276 /// a definition of the sub-register.
277 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
278 LiveInterval &interval) {
279 if (!MO.getSubReg() || MO.isEarlyClobber())
280 return false;
282 SlotIndex RedefIndex = MIIdx.getDefIndex();
283 const LiveRange *OldLR =
284 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
285 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
286 if (DefMI != 0) {
287 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
289 return false;
292 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
293 MachineBasicBlock::iterator mi,
294 SlotIndex MIIdx,
295 MachineOperand& MO,
296 unsigned MOIdx,
297 LiveInterval &interval) {
298 DEBUG({
299 dbgs() << "\t\tregister: ";
300 printRegName(interval.reg, tri_);
303 // Virtual registers may be defined multiple times (due to phi
304 // elimination and 2-addr elimination). Much of what we do only has to be
305 // done once for the vreg. We use an empty interval to detect the first
306 // time we see a vreg.
307 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
308 if (interval.empty()) {
309 // Get the Idx of the defining instructions.
310 SlotIndex defIndex = MIIdx.getDefIndex();
311 // Earlyclobbers move back one, so that they overlap the live range
312 // of inputs.
313 if (MO.isEarlyClobber())
314 defIndex = MIIdx.getUseIndex();
316 // Make sure the first definition is not a partial redefinition. Add an
317 // <imp-def> of the full register.
318 if (MO.getSubReg())
319 mi->addRegisterDefined(interval.reg);
321 MachineInstr *CopyMI = NULL;
322 if (mi->isCopyLike()) {
323 CopyMI = mi;
326 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
327 assert(ValNo->id == 0 && "First value in interval is not 0?");
329 // Loop over all of the blocks that the vreg is defined in. There are
330 // two cases we have to handle here. The most common case is a vreg
331 // whose lifetime is contained within a basic block. In this case there
332 // will be a single kill, in MBB, which comes after the definition.
333 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
334 // FIXME: what about dead vars?
335 SlotIndex killIdx;
336 if (vi.Kills[0] != mi)
337 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
338 else
339 killIdx = defIndex.getStoreIndex();
341 // If the kill happens after the definition, we have an intra-block
342 // live range.
343 if (killIdx > defIndex) {
344 assert(vi.AliveBlocks.empty() &&
345 "Shouldn't be alive across any blocks!");
346 LiveRange LR(defIndex, killIdx, ValNo);
347 interval.addRange(LR);
348 DEBUG(dbgs() << " +" << LR << "\n");
349 return;
353 // The other case we handle is when a virtual register lives to the end
354 // of the defining block, potentially live across some blocks, then is
355 // live into some number of blocks, but gets killed. Start by adding a
356 // range that goes from this definition to the end of the defining block.
357 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
358 DEBUG(dbgs() << " +" << NewLR);
359 interval.addRange(NewLR);
361 bool PHIJoin = lv_->isPHIJoin(interval.reg);
363 if (PHIJoin) {
364 // A phi join register is killed at the end of the MBB and revived as a new
365 // valno in the killing blocks.
366 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
367 DEBUG(dbgs() << " phi-join");
368 ValNo->setHasPHIKill(true);
369 } else {
370 // Iterate over all of the blocks that the variable is completely
371 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
372 // live interval.
373 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
374 E = vi.AliveBlocks.end(); I != E; ++I) {
375 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
376 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
377 interval.addRange(LR);
378 DEBUG(dbgs() << " +" << LR);
382 // Finally, this virtual register is live from the start of any killing
383 // block to the 'use' slot of the killing instruction.
384 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
385 MachineInstr *Kill = vi.Kills[i];
386 SlotIndex Start = getMBBStartIdx(Kill->getParent());
387 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
389 // Create interval with one of a NEW value number. Note that this value
390 // number isn't actually defined by an instruction, weird huh? :)
391 if (PHIJoin) {
392 assert(getInstructionFromIndex(Start) == 0 &&
393 "PHI def index points at actual instruction.");
394 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
395 ValNo->setIsPHIDef(true);
397 LiveRange LR(Start, killIdx, ValNo);
398 interval.addRange(LR);
399 DEBUG(dbgs() << " +" << LR);
402 } else {
403 if (MultipleDefsBySameMI(*mi, MOIdx))
404 // Multiple defs of the same virtual register by the same instruction.
405 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
406 // This is likely due to elimination of REG_SEQUENCE instructions. Return
407 // here since there is nothing to do.
408 return;
410 // If this is the second time we see a virtual register definition, it
411 // must be due to phi elimination or two addr elimination. If this is
412 // the result of two address elimination, then the vreg is one of the
413 // def-and-use register operand.
415 // It may also be partial redef like this:
416 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
417 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
418 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
419 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
420 // If this is a two-address definition, then we have already processed
421 // the live range. The only problem is that we didn't realize there
422 // are actually two values in the live interval. Because of this we
423 // need to take the LiveRegion that defines this register and split it
424 // into two values.
425 SlotIndex RedefIndex = MIIdx.getDefIndex();
426 if (MO.isEarlyClobber())
427 RedefIndex = MIIdx.getUseIndex();
429 const LiveRange *OldLR =
430 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
431 VNInfo *OldValNo = OldLR->valno;
432 SlotIndex DefIndex = OldValNo->def.getDefIndex();
434 // Delete the previous value, which should be short and continuous,
435 // because the 2-addr copy must be in the same MBB as the redef.
436 interval.removeRange(DefIndex, RedefIndex);
438 // The new value number (#1) is defined by the instruction we claimed
439 // defined value #0.
440 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
442 // Value#0 is now defined by the 2-addr instruction.
443 OldValNo->def = RedefIndex;
444 OldValNo->setCopy(0);
446 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
447 if (PartReDef && mi->isCopyLike())
448 OldValNo->setCopy(&*mi);
450 // Add the new live interval which replaces the range for the input copy.
451 LiveRange LR(DefIndex, RedefIndex, ValNo);
452 DEBUG(dbgs() << " replace range with " << LR);
453 interval.addRange(LR);
455 // If this redefinition is dead, we need to add a dummy unit live
456 // range covering the def slot.
457 if (MO.isDead())
458 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
459 OldValNo));
461 DEBUG({
462 dbgs() << " RESULT: ";
463 interval.print(dbgs(), tri_);
465 } else if (lv_->isPHIJoin(interval.reg)) {
466 // In the case of PHI elimination, each variable definition is only
467 // live until the end of the block. We've already taken care of the
468 // rest of the live range.
470 SlotIndex defIndex = MIIdx.getDefIndex();
471 if (MO.isEarlyClobber())
472 defIndex = MIIdx.getUseIndex();
474 VNInfo *ValNo;
475 MachineInstr *CopyMI = NULL;
476 if (mi->isCopyLike())
477 CopyMI = mi;
478 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
480 SlotIndex killIndex = getMBBEndIdx(mbb);
481 LiveRange LR(defIndex, killIndex, ValNo);
482 interval.addRange(LR);
483 ValNo->setHasPHIKill(true);
484 DEBUG(dbgs() << " phi-join +" << LR);
485 } else {
486 llvm_unreachable("Multiply defined register");
490 DEBUG(dbgs() << '\n');
493 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
494 MachineBasicBlock::iterator mi,
495 SlotIndex MIIdx,
496 MachineOperand& MO,
497 LiveInterval &interval,
498 MachineInstr *CopyMI) {
499 // A physical register cannot be live across basic block, so its
500 // lifetime must end somewhere in its defining basic block.
501 DEBUG({
502 dbgs() << "\t\tregister: ";
503 printRegName(interval.reg, tri_);
506 SlotIndex baseIndex = MIIdx;
507 SlotIndex start = baseIndex.getDefIndex();
508 // Earlyclobbers move back one.
509 if (MO.isEarlyClobber())
510 start = MIIdx.getUseIndex();
511 SlotIndex end = start;
513 // If it is not used after definition, it is considered dead at
514 // the instruction defining it. Hence its interval is:
515 // [defSlot(def), defSlot(def)+1)
516 // For earlyclobbers, the defSlot was pushed back one; the extra
517 // advance below compensates.
518 if (MO.isDead()) {
519 DEBUG(dbgs() << " dead");
520 end = start.getStoreIndex();
521 goto exit;
524 // If it is not dead on definition, it must be killed by a
525 // subsequent instruction. Hence its interval is:
526 // [defSlot(def), useSlot(kill)+1)
527 baseIndex = baseIndex.getNextIndex();
528 while (++mi != MBB->end()) {
530 if (mi->isDebugValue())
531 continue;
532 if (getInstructionFromIndex(baseIndex) == 0)
533 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
535 if (mi->killsRegister(interval.reg, tri_)) {
536 DEBUG(dbgs() << " killed");
537 end = baseIndex.getDefIndex();
538 goto exit;
539 } else {
540 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
541 if (DefIdx != -1) {
542 if (mi->isRegTiedToUseOperand(DefIdx)) {
543 // Two-address instruction.
544 end = baseIndex.getDefIndex();
545 } else {
546 // Another instruction redefines the register before it is ever read.
547 // Then the register is essentially dead at the instruction that
548 // defines it. Hence its interval is:
549 // [defSlot(def), defSlot(def)+1)
550 DEBUG(dbgs() << " dead");
551 end = start.getStoreIndex();
553 goto exit;
557 baseIndex = baseIndex.getNextIndex();
560 // The only case we should have a dead physreg here without a killing or
561 // instruction where we know it's dead is if it is live-in to the function
562 // and never used. Another possible case is the implicit use of the
563 // physical register has been deleted by two-address pass.
564 end = start.getStoreIndex();
566 exit:
567 assert(start < end && "did not find end of interval?");
569 // Already exists? Extend old live interval.
570 VNInfo *ValNo = interval.getVNInfoAt(start);
571 bool Extend = ValNo != 0;
572 if (!Extend)
573 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
574 if (Extend && MO.isEarlyClobber())
575 ValNo->setHasRedefByEC(true);
576 LiveRange LR(start, end, ValNo);
577 interval.addRange(LR);
578 DEBUG(dbgs() << " +" << LR << '\n');
581 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
582 MachineBasicBlock::iterator MI,
583 SlotIndex MIIdx,
584 MachineOperand& MO,
585 unsigned MOIdx) {
586 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
587 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
588 getOrCreateInterval(MO.getReg()));
589 else if (allocatableRegs_[MO.getReg()]) {
590 MachineInstr *CopyMI = NULL;
591 if (MI->isCopyLike())
592 CopyMI = MI;
593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594 getOrCreateInterval(MO.getReg()), CopyMI);
595 // Def of a register also defines its sub-registers.
596 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
597 // If MI also modifies the sub-register explicitly, avoid processing it
598 // more than once. Do not pass in TRI here so it checks for exact match.
599 if (!MI->definesRegister(*AS))
600 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
601 getOrCreateInterval(*AS), 0);
605 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
606 SlotIndex MIIdx,
607 LiveInterval &interval, bool isAlias) {
608 DEBUG({
609 dbgs() << "\t\tlivein register: ";
610 printRegName(interval.reg, tri_);
613 // Look for kills, if it reaches a def before it's killed, then it shouldn't
614 // be considered a livein.
615 MachineBasicBlock::iterator mi = MBB->begin();
616 MachineBasicBlock::iterator E = MBB->end();
617 // Skip over DBG_VALUE at the start of the MBB.
618 if (mi != E && mi->isDebugValue()) {
619 while (++mi != E && mi->isDebugValue())
621 if (mi == E)
622 // MBB is empty except for DBG_VALUE's.
623 return;
626 SlotIndex baseIndex = MIIdx;
627 SlotIndex start = baseIndex;
628 if (getInstructionFromIndex(baseIndex) == 0)
629 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
631 SlotIndex end = baseIndex;
632 bool SeenDefUse = false;
634 while (mi != E) {
635 if (mi->killsRegister(interval.reg, tri_)) {
636 DEBUG(dbgs() << " killed");
637 end = baseIndex.getDefIndex();
638 SeenDefUse = true;
639 break;
640 } else if (mi->definesRegister(interval.reg, tri_)) {
641 // Another instruction redefines the register before it is ever read.
642 // Then the register is essentially dead at the instruction that defines
643 // it. Hence its interval is:
644 // [defSlot(def), defSlot(def)+1)
645 DEBUG(dbgs() << " dead");
646 end = start.getStoreIndex();
647 SeenDefUse = true;
648 break;
651 while (++mi != E && mi->isDebugValue())
652 // Skip over DBG_VALUE.
654 if (mi != E)
655 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
658 // Live-in register might not be used at all.
659 if (!SeenDefUse) {
660 if (isAlias) {
661 DEBUG(dbgs() << " dead");
662 end = MIIdx.getStoreIndex();
663 } else {
664 DEBUG(dbgs() << " live through");
665 end = baseIndex;
669 SlotIndex defIdx = getMBBStartIdx(MBB);
670 assert(getInstructionFromIndex(defIdx) == 0 &&
671 "PHI def index points at actual instruction.");
672 VNInfo *vni =
673 interval.getNextValue(defIdx, 0, VNInfoAllocator);
674 vni->setIsPHIDef(true);
675 LiveRange LR(start, end, vni);
677 interval.addRange(LR);
678 DEBUG(dbgs() << " +" << LR << '\n');
681 /// computeIntervals - computes the live intervals for virtual
682 /// registers. for some ordering of the machine instructions [1,N] a
683 /// live interval is an interval [i, j) where 1 <= i <= j < N for
684 /// which a variable is live
685 void LiveIntervals::computeIntervals() {
686 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
687 << "********** Function: "
688 << ((Value*)mf_->getFunction())->getName() << '\n');
690 SmallVector<unsigned, 8> UndefUses;
691 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
692 MBBI != E; ++MBBI) {
693 MachineBasicBlock *MBB = MBBI;
694 if (MBB->empty())
695 continue;
697 // Track the index of the current machine instr.
698 SlotIndex MIIndex = getMBBStartIdx(MBB);
699 DEBUG(dbgs() << "BB#" << MBB->getNumber()
700 << ":\t\t# derived from " << MBB->getName() << "\n");
702 // Create intervals for live-ins to this BB first.
703 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
704 LE = MBB->livein_end(); LI != LE; ++LI) {
705 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
706 // Multiple live-ins can alias the same register.
707 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
708 if (!hasInterval(*AS))
709 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
710 true);
713 // Skip over empty initial indices.
714 if (getInstructionFromIndex(MIIndex) == 0)
715 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
718 MI != miEnd; ++MI) {
719 DEBUG(dbgs() << MIIndex << "\t" << *MI);
720 if (MI->isDebugValue())
721 continue;
723 // Handle defs.
724 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
725 MachineOperand &MO = MI->getOperand(i);
726 if (!MO.isReg() || !MO.getReg())
727 continue;
729 // handle register defs - build intervals
730 if (MO.isDef())
731 handleRegisterDef(MBB, MI, MIIndex, MO, i);
732 else if (MO.isUndef())
733 UndefUses.push_back(MO.getReg());
736 // Move to the next instr slot.
737 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
741 // Create empty intervals for registers defined by implicit_def's (except
742 // for those implicit_def that define values which are liveout of their
743 // blocks.
744 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
745 unsigned UndefReg = UndefUses[i];
746 (void)getOrCreateInterval(UndefReg);
750 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
751 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
752 return new LiveInterval(reg, Weight);
755 /// dupInterval - Duplicate a live interval. The caller is responsible for
756 /// managing the allocated memory.
757 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
758 LiveInterval *NewLI = createInterval(li->reg);
759 NewLI->Copy(*li, mri_, getVNInfoAllocator());
760 return NewLI;
763 //===----------------------------------------------------------------------===//
764 // Register allocator hooks.
767 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768 /// allow one) virtual register operand, then its uses are implicitly using
769 /// the register. Returns the virtual register.
770 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771 MachineInstr *MI) const {
772 unsigned RegOp = 0;
773 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774 MachineOperand &MO = MI->getOperand(i);
775 if (!MO.isReg() || !MO.isUse())
776 continue;
777 unsigned Reg = MO.getReg();
778 if (Reg == 0 || Reg == li.reg)
779 continue;
781 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
782 !allocatableRegs_[Reg])
783 continue;
784 // FIXME: For now, only remat MI with at most one register operand.
785 assert(!RegOp &&
786 "Can't rematerialize instruction with multiple register operand!");
787 RegOp = MO.getReg();
788 #ifndef NDEBUG
789 break;
790 #endif
792 return RegOp;
795 /// isValNoAvailableAt - Return true if the val# of the specified interval
796 /// which reaches the given instruction also reaches the specified use index.
797 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
798 SlotIndex UseIdx) const {
799 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
800 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
803 /// isReMaterializable - Returns true if the definition MI of the specified
804 /// val# of the specified interval is re-materializable.
805 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
806 const VNInfo *ValNo, MachineInstr *MI,
807 SmallVectorImpl<LiveInterval*> &SpillIs,
808 bool &isLoad) {
809 if (DisableReMat)
810 return false;
812 if (!tii_->isTriviallyReMaterializable(MI, aa_))
813 return false;
815 // Target-specific code can mark an instruction as being rematerializable
816 // if it has one virtual reg use, though it had better be something like
817 // a PIC base register which is likely to be live everywhere.
818 unsigned ImpUse = getReMatImplicitUse(li, MI);
819 if (ImpUse) {
820 const LiveInterval &ImpLi = getInterval(ImpUse);
821 for (MachineRegisterInfo::use_nodbg_iterator
822 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
823 ri != re; ++ri) {
824 MachineInstr *UseMI = &*ri;
825 SlotIndex UseIdx = getInstructionIndex(UseMI);
826 if (li.getVNInfoAt(UseIdx) != ValNo)
827 continue;
828 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
829 return false;
832 // If a register operand of the re-materialized instruction is going to
833 // be spilled next, then it's not legal to re-materialize this instruction.
834 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
835 if (ImpUse == SpillIs[i]->reg)
836 return false;
838 return true;
841 /// isReMaterializable - Returns true if the definition MI of the specified
842 /// val# of the specified interval is re-materializable.
843 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
844 const VNInfo *ValNo, MachineInstr *MI) {
845 SmallVector<LiveInterval*, 4> Dummy1;
846 bool Dummy2;
847 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
850 /// isReMaterializable - Returns true if every definition of MI of every
851 /// val# of the specified interval is re-materializable.
852 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
853 SmallVectorImpl<LiveInterval*> &SpillIs,
854 bool &isLoad) {
855 isLoad = false;
856 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
857 i != e; ++i) {
858 const VNInfo *VNI = *i;
859 if (VNI->isUnused())
860 continue; // Dead val#.
861 // Is the def for the val# rematerializable?
862 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
863 if (!ReMatDefMI)
864 return false;
865 bool DefIsLoad = false;
866 if (!ReMatDefMI ||
867 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
868 return false;
869 isLoad |= DefIsLoad;
871 return true;
874 /// FilterFoldedOps - Filter out two-address use operands. Return
875 /// true if it finds any issue with the operands that ought to prevent
876 /// folding.
877 static bool FilterFoldedOps(MachineInstr *MI,
878 SmallVector<unsigned, 2> &Ops,
879 unsigned &MRInfo,
880 SmallVector<unsigned, 2> &FoldOps) {
881 MRInfo = 0;
882 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
883 unsigned OpIdx = Ops[i];
884 MachineOperand &MO = MI->getOperand(OpIdx);
885 // FIXME: fold subreg use.
886 if (MO.getSubReg())
887 return true;
888 if (MO.isDef())
889 MRInfo |= (unsigned)VirtRegMap::isMod;
890 else {
891 // Filter out two-address use operand(s).
892 if (MI->isRegTiedToDefOperand(OpIdx)) {
893 MRInfo = VirtRegMap::isModRef;
894 continue;
896 MRInfo |= (unsigned)VirtRegMap::isRef;
898 FoldOps.push_back(OpIdx);
900 return false;
904 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
905 /// slot / to reg or any rematerialized load into ith operand of specified
906 /// MI. If it is successul, MI is updated with the newly created MI and
907 /// returns true.
908 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
909 VirtRegMap &vrm, MachineInstr *DefMI,
910 SlotIndex InstrIdx,
911 SmallVector<unsigned, 2> &Ops,
912 bool isSS, int Slot, unsigned Reg) {
913 // If it is an implicit def instruction, just delete it.
914 if (MI->isImplicitDef()) {
915 RemoveMachineInstrFromMaps(MI);
916 vrm.RemoveMachineInstrFromMaps(MI);
917 MI->eraseFromParent();
918 ++numFolds;
919 return true;
922 // Filter the list of operand indexes that are to be folded. Abort if
923 // any operand will prevent folding.
924 unsigned MRInfo = 0;
925 SmallVector<unsigned, 2> FoldOps;
926 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
927 return false;
929 // The only time it's safe to fold into a two address instruction is when
930 // it's folding reload and spill from / into a spill stack slot.
931 if (DefMI && (MRInfo & VirtRegMap::isMod))
932 return false;
934 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
935 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
936 if (fmi) {
937 // Remember this instruction uses the spill slot.
938 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
940 // Attempt to fold the memory reference into the instruction. If
941 // we can do this, we don't need to insert spill code.
942 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
943 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
944 vrm.transferSpillPts(MI, fmi);
945 vrm.transferRestorePts(MI, fmi);
946 vrm.transferEmergencySpills(MI, fmi);
947 ReplaceMachineInstrInMaps(MI, fmi);
948 MI->eraseFromParent();
949 MI = fmi;
950 ++numFolds;
951 return true;
953 return false;
956 /// canFoldMemoryOperand - Returns true if the specified load / store
957 /// folding is possible.
958 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
959 SmallVector<unsigned, 2> &Ops,
960 bool ReMat) const {
961 // Filter the list of operand indexes that are to be folded. Abort if
962 // any operand will prevent folding.
963 unsigned MRInfo = 0;
964 SmallVector<unsigned, 2> FoldOps;
965 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
966 return false;
968 // It's only legal to remat for a use, not a def.
969 if (ReMat && (MRInfo & VirtRegMap::isMod))
970 return false;
972 return tii_->canFoldMemoryOperand(MI, FoldOps);
975 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
976 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
978 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
980 if (mbb == 0)
981 return false;
983 for (++itr; itr != li.ranges.end(); ++itr) {
984 MachineBasicBlock *mbb2 =
985 indexes_->getMBBCoveringRange(itr->start, itr->end);
987 if (mbb2 != mbb)
988 return false;
991 return true;
994 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
995 /// interval on to-be re-materialized operands of MI) with new register.
996 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
997 MachineInstr *MI, unsigned NewVReg,
998 VirtRegMap &vrm) {
999 // There is an implicit use. That means one of the other operand is
1000 // being remat'ed and the remat'ed instruction has li.reg as an
1001 // use operand. Make sure we rewrite that as well.
1002 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1003 MachineOperand &MO = MI->getOperand(i);
1004 if (!MO.isReg())
1005 continue;
1006 unsigned Reg = MO.getReg();
1007 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1008 continue;
1009 if (!vrm.isReMaterialized(Reg))
1010 continue;
1011 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1012 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1013 if (UseMO)
1014 UseMO->setReg(NewVReg);
1018 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1019 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1020 bool LiveIntervals::
1021 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1022 bool TrySplit, SlotIndex index, SlotIndex end,
1023 MachineInstr *MI,
1024 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1025 unsigned Slot, int LdSlot,
1026 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1027 VirtRegMap &vrm,
1028 const TargetRegisterClass* rc,
1029 SmallVector<int, 4> &ReMatIds,
1030 const MachineLoopInfo *loopInfo,
1031 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1032 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1033 std::vector<LiveInterval*> &NewLIs) {
1034 bool CanFold = false;
1035 RestartInstruction:
1036 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1037 MachineOperand& mop = MI->getOperand(i);
1038 if (!mop.isReg())
1039 continue;
1040 unsigned Reg = mop.getReg();
1041 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1042 continue;
1043 if (Reg != li.reg)
1044 continue;
1046 bool TryFold = !DefIsReMat;
1047 bool FoldSS = true; // Default behavior unless it's a remat.
1048 int FoldSlot = Slot;
1049 if (DefIsReMat) {
1050 // If this is the rematerializable definition MI itself and
1051 // all of its uses are rematerialized, simply delete it.
1052 if (MI == ReMatOrigDefMI && CanDelete) {
1053 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1054 << *MI << '\n');
1055 RemoveMachineInstrFromMaps(MI);
1056 vrm.RemoveMachineInstrFromMaps(MI);
1057 MI->eraseFromParent();
1058 break;
1061 // If def for this use can't be rematerialized, then try folding.
1062 // If def is rematerializable and it's a load, also try folding.
1063 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1064 if (isLoad) {
1065 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1066 FoldSS = isLoadSS;
1067 FoldSlot = LdSlot;
1071 // Scan all of the operands of this instruction rewriting operands
1072 // to use NewVReg instead of li.reg as appropriate. We do this for
1073 // two reasons:
1075 // 1. If the instr reads the same spilled vreg multiple times, we
1076 // want to reuse the NewVReg.
1077 // 2. If the instr is a two-addr instruction, we are required to
1078 // keep the src/dst regs pinned.
1080 // Keep track of whether we replace a use and/or def so that we can
1081 // create the spill interval with the appropriate range.
1082 SmallVector<unsigned, 2> Ops;
1083 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1085 // Create a new virtual register for the spill interval.
1086 // Create the new register now so we can map the fold instruction
1087 // to the new register so when it is unfolded we get the correct
1088 // answer.
1089 bool CreatedNewVReg = false;
1090 if (NewVReg == 0) {
1091 NewVReg = mri_->createVirtualRegister(rc);
1092 vrm.grow();
1093 CreatedNewVReg = true;
1095 // The new virtual register should get the same allocation hints as the
1096 // old one.
1097 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1098 if (Hint.first || Hint.second)
1099 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1102 if (!TryFold)
1103 CanFold = false;
1104 else {
1105 // Do not fold load / store here if we are splitting. We'll find an
1106 // optimal point to insert a load / store later.
1107 if (!TrySplit) {
1108 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1109 Ops, FoldSS, FoldSlot, NewVReg)) {
1110 // Folding the load/store can completely change the instruction in
1111 // unpredictable ways, rescan it from the beginning.
1113 if (FoldSS) {
1114 // We need to give the new vreg the same stack slot as the
1115 // spilled interval.
1116 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1119 HasUse = false;
1120 HasDef = false;
1121 CanFold = false;
1122 if (isNotInMIMap(MI))
1123 break;
1124 goto RestartInstruction;
1126 } else {
1127 // We'll try to fold it later if it's profitable.
1128 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1132 mop.setReg(NewVReg);
1133 if (mop.isImplicit())
1134 rewriteImplicitOps(li, MI, NewVReg, vrm);
1136 // Reuse NewVReg for other reads.
1137 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1138 MachineOperand &mopj = MI->getOperand(Ops[j]);
1139 mopj.setReg(NewVReg);
1140 if (mopj.isImplicit())
1141 rewriteImplicitOps(li, MI, NewVReg, vrm);
1144 if (CreatedNewVReg) {
1145 if (DefIsReMat) {
1146 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1147 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1148 // Each valnum may have its own remat id.
1149 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1150 } else {
1151 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1153 if (!CanDelete || (HasUse && HasDef)) {
1154 // If this is a two-addr instruction then its use operands are
1155 // rematerializable but its def is not. It should be assigned a
1156 // stack slot.
1157 vrm.assignVirt2StackSlot(NewVReg, Slot);
1159 } else {
1160 vrm.assignVirt2StackSlot(NewVReg, Slot);
1162 } else if (HasUse && HasDef &&
1163 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1164 // If this interval hasn't been assigned a stack slot (because earlier
1165 // def is a deleted remat def), do it now.
1166 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1167 vrm.assignVirt2StackSlot(NewVReg, Slot);
1170 // Re-matting an instruction with virtual register use. Add the
1171 // register as an implicit use on the use MI.
1172 if (DefIsReMat && ImpUse)
1173 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1175 // Create a new register interval for this spill / remat.
1176 LiveInterval &nI = getOrCreateInterval(NewVReg);
1177 if (CreatedNewVReg) {
1178 NewLIs.push_back(&nI);
1179 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1180 if (TrySplit)
1181 vrm.setIsSplitFromReg(NewVReg, li.reg);
1184 if (HasUse) {
1185 if (CreatedNewVReg) {
1186 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1187 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1188 DEBUG(dbgs() << " +" << LR);
1189 nI.addRange(LR);
1190 } else {
1191 // Extend the split live interval to this def / use.
1192 SlotIndex End = index.getDefIndex();
1193 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1194 nI.getValNumInfo(nI.getNumValNums()-1));
1195 DEBUG(dbgs() << " +" << LR);
1196 nI.addRange(LR);
1199 if (HasDef) {
1200 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1201 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1202 DEBUG(dbgs() << " +" << LR);
1203 nI.addRange(LR);
1206 DEBUG({
1207 dbgs() << "\t\t\t\tAdded new interval: ";
1208 nI.print(dbgs(), tri_);
1209 dbgs() << '\n';
1212 return CanFold;
1214 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1215 const VNInfo *VNI,
1216 MachineBasicBlock *MBB,
1217 SlotIndex Idx) const {
1218 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1221 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1222 /// during spilling.
1223 namespace {
1224 struct RewriteInfo {
1225 SlotIndex Index;
1226 MachineInstr *MI;
1227 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1230 struct RewriteInfoCompare {
1231 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1232 return LHS.Index < RHS.Index;
1237 void LiveIntervals::
1238 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1239 LiveInterval::Ranges::const_iterator &I,
1240 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1241 unsigned Slot, int LdSlot,
1242 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1243 VirtRegMap &vrm,
1244 const TargetRegisterClass* rc,
1245 SmallVector<int, 4> &ReMatIds,
1246 const MachineLoopInfo *loopInfo,
1247 BitVector &SpillMBBs,
1248 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1249 BitVector &RestoreMBBs,
1250 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1251 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1252 std::vector<LiveInterval*> &NewLIs) {
1253 bool AllCanFold = true;
1254 unsigned NewVReg = 0;
1255 SlotIndex start = I->start.getBaseIndex();
1256 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1258 // First collect all the def / use in this live range that will be rewritten.
1259 // Make sure they are sorted according to instruction index.
1260 std::vector<RewriteInfo> RewriteMIs;
1261 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1262 re = mri_->reg_end(); ri != re; ) {
1263 MachineInstr *MI = &*ri;
1264 MachineOperand &O = ri.getOperand();
1265 ++ri;
1266 if (MI->isDebugValue()) {
1267 // Modify DBG_VALUE now that the value is in a spill slot.
1268 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1269 uint64_t Offset = MI->getOperand(1).getImm();
1270 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1271 DebugLoc DL = MI->getDebugLoc();
1272 int FI = isLoadSS ? LdSlot : (int)Slot;
1273 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1274 Offset, MDPtr, DL)) {
1275 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1276 ReplaceMachineInstrInMaps(MI, NewDV);
1277 MachineBasicBlock *MBB = MI->getParent();
1278 MBB->insert(MBB->erase(MI), NewDV);
1279 continue;
1283 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1284 RemoveMachineInstrFromMaps(MI);
1285 vrm.RemoveMachineInstrFromMaps(MI);
1286 MI->eraseFromParent();
1287 continue;
1289 assert(!(O.isImplicit() && O.isUse()) &&
1290 "Spilling register that's used as implicit use?");
1291 SlotIndex index = getInstructionIndex(MI);
1292 if (index < start || index >= end)
1293 continue;
1295 if (O.isUndef())
1296 // Must be defined by an implicit def. It should not be spilled. Note,
1297 // this is for correctness reason. e.g.
1298 // 8 %reg1024<def> = IMPLICIT_DEF
1299 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1300 // The live range [12, 14) are not part of the r1024 live interval since
1301 // it's defined by an implicit def. It will not conflicts with live
1302 // interval of r1025. Now suppose both registers are spilled, you can
1303 // easily see a situation where both registers are reloaded before
1304 // the INSERT_SUBREG and both target registers that would overlap.
1305 continue;
1306 RewriteMIs.push_back(RewriteInfo(index, MI));
1308 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1310 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1311 // Now rewrite the defs and uses.
1312 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1313 RewriteInfo &rwi = RewriteMIs[i];
1314 ++i;
1315 SlotIndex index = rwi.Index;
1316 MachineInstr *MI = rwi.MI;
1317 // If MI def and/or use the same register multiple times, then there
1318 // are multiple entries.
1319 while (i != e && RewriteMIs[i].MI == MI) {
1320 assert(RewriteMIs[i].Index == index);
1321 ++i;
1323 MachineBasicBlock *MBB = MI->getParent();
1325 if (ImpUse && MI != ReMatDefMI) {
1326 // Re-matting an instruction with virtual register use. Prevent interval
1327 // from being spilled.
1328 getInterval(ImpUse).markNotSpillable();
1331 unsigned MBBId = MBB->getNumber();
1332 unsigned ThisVReg = 0;
1333 if (TrySplit) {
1334 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1335 if (NVI != MBBVRegsMap.end()) {
1336 ThisVReg = NVI->second;
1337 // One common case:
1338 // x = use
1339 // ...
1340 // ...
1341 // def = ...
1342 // = use
1343 // It's better to start a new interval to avoid artifically
1344 // extend the new interval.
1345 if (MI->readsWritesVirtualRegister(li.reg) ==
1346 std::make_pair(false,true)) {
1347 MBBVRegsMap.erase(MBB->getNumber());
1348 ThisVReg = 0;
1353 bool IsNew = ThisVReg == 0;
1354 if (IsNew) {
1355 // This ends the previous live interval. If all of its def / use
1356 // can be folded, give it a low spill weight.
1357 if (NewVReg && TrySplit && AllCanFold) {
1358 LiveInterval &nI = getOrCreateInterval(NewVReg);
1359 nI.weight /= 10.0F;
1361 AllCanFold = true;
1363 NewVReg = ThisVReg;
1365 bool HasDef = false;
1366 bool HasUse = false;
1367 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1368 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1369 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1370 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1371 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1372 if (!HasDef && !HasUse)
1373 continue;
1375 AllCanFold &= CanFold;
1377 // Update weight of spill interval.
1378 LiveInterval &nI = getOrCreateInterval(NewVReg);
1379 if (!TrySplit) {
1380 // The spill weight is now infinity as it cannot be spilled again.
1381 nI.markNotSpillable();
1382 continue;
1385 // Keep track of the last def and first use in each MBB.
1386 if (HasDef) {
1387 if (MI != ReMatOrigDefMI || !CanDelete) {
1388 bool HasKill = false;
1389 if (!HasUse)
1390 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1391 else {
1392 // If this is a two-address code, then this index starts a new VNInfo.
1393 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1394 if (VNI)
1395 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1397 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1398 SpillIdxes.find(MBBId);
1399 if (!HasKill) {
1400 if (SII == SpillIdxes.end()) {
1401 std::vector<SRInfo> S;
1402 S.push_back(SRInfo(index, NewVReg, true));
1403 SpillIdxes.insert(std::make_pair(MBBId, S));
1404 } else if (SII->second.back().vreg != NewVReg) {
1405 SII->second.push_back(SRInfo(index, NewVReg, true));
1406 } else if (index > SII->second.back().index) {
1407 // If there is an earlier def and this is a two-address
1408 // instruction, then it's not possible to fold the store (which
1409 // would also fold the load).
1410 SRInfo &Info = SII->second.back();
1411 Info.index = index;
1412 Info.canFold = !HasUse;
1414 SpillMBBs.set(MBBId);
1415 } else if (SII != SpillIdxes.end() &&
1416 SII->second.back().vreg == NewVReg &&
1417 index > SII->second.back().index) {
1418 // There is an earlier def that's not killed (must be two-address).
1419 // The spill is no longer needed.
1420 SII->second.pop_back();
1421 if (SII->second.empty()) {
1422 SpillIdxes.erase(MBBId);
1423 SpillMBBs.reset(MBBId);
1429 if (HasUse) {
1430 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1431 SpillIdxes.find(MBBId);
1432 if (SII != SpillIdxes.end() &&
1433 SII->second.back().vreg == NewVReg &&
1434 index > SII->second.back().index)
1435 // Use(s) following the last def, it's not safe to fold the spill.
1436 SII->second.back().canFold = false;
1437 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1438 RestoreIdxes.find(MBBId);
1439 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1440 // If we are splitting live intervals, only fold if it's the first
1441 // use and there isn't another use later in the MBB.
1442 RII->second.back().canFold = false;
1443 else if (IsNew) {
1444 // Only need a reload if there isn't an earlier def / use.
1445 if (RII == RestoreIdxes.end()) {
1446 std::vector<SRInfo> Infos;
1447 Infos.push_back(SRInfo(index, NewVReg, true));
1448 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1449 } else {
1450 RII->second.push_back(SRInfo(index, NewVReg, true));
1452 RestoreMBBs.set(MBBId);
1456 // Update spill weight.
1457 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1458 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1461 if (NewVReg && TrySplit && AllCanFold) {
1462 // If all of its def / use can be folded, give it a low spill weight.
1463 LiveInterval &nI = getOrCreateInterval(NewVReg);
1464 nI.weight /= 10.0F;
1468 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1469 unsigned vr, BitVector &RestoreMBBs,
1470 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1471 if (!RestoreMBBs[Id])
1472 return false;
1473 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1474 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1475 if (Restores[i].index == index &&
1476 Restores[i].vreg == vr &&
1477 Restores[i].canFold)
1478 return true;
1479 return false;
1482 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1483 unsigned vr, BitVector &RestoreMBBs,
1484 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1485 if (!RestoreMBBs[Id])
1486 return;
1487 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1488 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1489 if (Restores[i].index == index && Restores[i].vreg)
1490 Restores[i].index = SlotIndex();
1493 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1494 /// spilled and create empty intervals for their uses.
1495 void
1496 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1497 const TargetRegisterClass* rc,
1498 std::vector<LiveInterval*> &NewLIs) {
1499 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1500 re = mri_->reg_end(); ri != re; ) {
1501 MachineOperand &O = ri.getOperand();
1502 MachineInstr *MI = &*ri;
1503 ++ri;
1504 if (MI->isDebugValue()) {
1505 // Remove debug info for now.
1506 O.setReg(0U);
1507 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1508 continue;
1510 if (O.isDef()) {
1511 assert(MI->isImplicitDef() &&
1512 "Register def was not rewritten?");
1513 RemoveMachineInstrFromMaps(MI);
1514 vrm.RemoveMachineInstrFromMaps(MI);
1515 MI->eraseFromParent();
1516 } else {
1517 // This must be an use of an implicit_def so it's not part of the live
1518 // interval. Create a new empty live interval for it.
1519 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1520 unsigned NewVReg = mri_->createVirtualRegister(rc);
1521 vrm.grow();
1522 vrm.setIsImplicitlyDefined(NewVReg);
1523 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1524 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1525 MachineOperand &MO = MI->getOperand(i);
1526 if (MO.isReg() && MO.getReg() == li.reg) {
1527 MO.setReg(NewVReg);
1528 MO.setIsUndef();
1535 float
1536 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1537 // Limit the loop depth ridiculousness.
1538 if (loopDepth > 200)
1539 loopDepth = 200;
1541 // The loop depth is used to roughly estimate the number of times the
1542 // instruction is executed. Something like 10^d is simple, but will quickly
1543 // overflow a float. This expression behaves like 10^d for small d, but is
1544 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1545 // headroom before overflow.
1546 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1548 return (isDef + isUse) * lc;
1551 void
1552 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1553 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1554 normalizeSpillWeight(*NewLIs[i]);
1557 std::vector<LiveInterval*> LiveIntervals::
1558 addIntervalsForSpills(const LiveInterval &li,
1559 SmallVectorImpl<LiveInterval*> &SpillIs,
1560 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1561 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1563 DEBUG({
1564 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1565 li.print(dbgs(), tri_);
1566 dbgs() << '\n';
1569 // Each bit specify whether a spill is required in the MBB.
1570 BitVector SpillMBBs(mf_->getNumBlockIDs());
1571 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1572 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1573 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1574 DenseMap<unsigned,unsigned> MBBVRegsMap;
1575 std::vector<LiveInterval*> NewLIs;
1576 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1578 unsigned NumValNums = li.getNumValNums();
1579 SmallVector<MachineInstr*, 4> ReMatDefs;
1580 ReMatDefs.resize(NumValNums, NULL);
1581 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1582 ReMatOrigDefs.resize(NumValNums, NULL);
1583 SmallVector<int, 4> ReMatIds;
1584 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1585 BitVector ReMatDelete(NumValNums);
1586 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1588 // Spilling a split live interval. It cannot be split any further. Also,
1589 // it's also guaranteed to be a single val# / range interval.
1590 if (vrm.getPreSplitReg(li.reg)) {
1591 vrm.setIsSplitFromReg(li.reg, 0);
1592 // Unset the split kill marker on the last use.
1593 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1594 if (KillIdx != SlotIndex()) {
1595 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1596 assert(KillMI && "Last use disappeared?");
1597 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1598 assert(KillOp != -1 && "Last use disappeared?");
1599 KillMI->getOperand(KillOp).setIsKill(false);
1601 vrm.removeKillPoint(li.reg);
1602 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1603 Slot = vrm.getStackSlot(li.reg);
1604 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1605 MachineInstr *ReMatDefMI = DefIsReMat ?
1606 vrm.getReMaterializedMI(li.reg) : NULL;
1607 int LdSlot = 0;
1608 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1609 bool isLoad = isLoadSS ||
1610 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1611 bool IsFirstRange = true;
1612 for (LiveInterval::Ranges::const_iterator
1613 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1614 // If this is a split live interval with multiple ranges, it means there
1615 // are two-address instructions that re-defined the value. Only the
1616 // first def can be rematerialized!
1617 if (IsFirstRange) {
1618 // Note ReMatOrigDefMI has already been deleted.
1619 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1620 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1621 false, vrm, rc, ReMatIds, loopInfo,
1622 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1623 MBBVRegsMap, NewLIs);
1624 } else {
1625 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1626 Slot, 0, false, false, false,
1627 false, vrm, rc, ReMatIds, loopInfo,
1628 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1629 MBBVRegsMap, NewLIs);
1631 IsFirstRange = false;
1634 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1635 normalizeSpillWeights(NewLIs);
1636 return NewLIs;
1639 bool TrySplit = !intervalIsInOneMBB(li);
1640 if (TrySplit)
1641 ++numSplits;
1642 bool NeedStackSlot = false;
1643 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1644 i != e; ++i) {
1645 const VNInfo *VNI = *i;
1646 unsigned VN = VNI->id;
1647 if (VNI->isUnused())
1648 continue; // Dead val#.
1649 // Is the def for the val# rematerializable?
1650 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1651 bool dummy;
1652 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1653 // Remember how to remat the def of this val#.
1654 ReMatOrigDefs[VN] = ReMatDefMI;
1655 // Original def may be modified so we have to make a copy here.
1656 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1657 CloneMIs.push_back(Clone);
1658 ReMatDefs[VN] = Clone;
1660 bool CanDelete = true;
1661 if (VNI->hasPHIKill()) {
1662 // A kill is a phi node, not all of its uses can be rematerialized.
1663 // It must not be deleted.
1664 CanDelete = false;
1665 // Need a stack slot if there is any live range where uses cannot be
1666 // rematerialized.
1667 NeedStackSlot = true;
1669 if (CanDelete)
1670 ReMatDelete.set(VN);
1671 } else {
1672 // Need a stack slot if there is any live range where uses cannot be
1673 // rematerialized.
1674 NeedStackSlot = true;
1678 // One stack slot per live interval.
1679 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1680 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1681 Slot = vrm.assignVirt2StackSlot(li.reg);
1683 // This case only occurs when the prealloc splitter has already assigned
1684 // a stack slot to this vreg.
1685 else
1686 Slot = vrm.getStackSlot(li.reg);
1689 // Create new intervals and rewrite defs and uses.
1690 for (LiveInterval::Ranges::const_iterator
1691 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1692 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1693 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1694 bool DefIsReMat = ReMatDefMI != NULL;
1695 bool CanDelete = ReMatDelete[I->valno->id];
1696 int LdSlot = 0;
1697 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1698 bool isLoad = isLoadSS ||
1699 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1700 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1701 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1702 CanDelete, vrm, rc, ReMatIds, loopInfo,
1703 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1704 MBBVRegsMap, NewLIs);
1707 // Insert spills / restores if we are splitting.
1708 if (!TrySplit) {
1709 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1710 normalizeSpillWeights(NewLIs);
1711 return NewLIs;
1714 SmallPtrSet<LiveInterval*, 4> AddedKill;
1715 SmallVector<unsigned, 2> Ops;
1716 if (NeedStackSlot) {
1717 int Id = SpillMBBs.find_first();
1718 while (Id != -1) {
1719 std::vector<SRInfo> &spills = SpillIdxes[Id];
1720 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1721 SlotIndex index = spills[i].index;
1722 unsigned VReg = spills[i].vreg;
1723 LiveInterval &nI = getOrCreateInterval(VReg);
1724 bool isReMat = vrm.isReMaterialized(VReg);
1725 MachineInstr *MI = getInstructionFromIndex(index);
1726 bool CanFold = false;
1727 bool FoundUse = false;
1728 Ops.clear();
1729 if (spills[i].canFold) {
1730 CanFold = true;
1731 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1732 MachineOperand &MO = MI->getOperand(j);
1733 if (!MO.isReg() || MO.getReg() != VReg)
1734 continue;
1736 Ops.push_back(j);
1737 if (MO.isDef())
1738 continue;
1739 if (isReMat ||
1740 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1741 RestoreMBBs, RestoreIdxes))) {
1742 // MI has two-address uses of the same register. If the use
1743 // isn't the first and only use in the BB, then we can't fold
1744 // it. FIXME: Move this to rewriteInstructionsForSpills.
1745 CanFold = false;
1746 break;
1748 FoundUse = true;
1751 // Fold the store into the def if possible.
1752 bool Folded = false;
1753 if (CanFold && !Ops.empty()) {
1754 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1755 Folded = true;
1756 if (FoundUse) {
1757 // Also folded uses, do not issue a load.
1758 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1759 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1761 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1765 // Otherwise tell the spiller to issue a spill.
1766 if (!Folded) {
1767 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1768 bool isKill = LR->end == index.getStoreIndex();
1769 if (!MI->registerDefIsDead(nI.reg))
1770 // No need to spill a dead def.
1771 vrm.addSpillPoint(VReg, isKill, MI);
1772 if (isKill)
1773 AddedKill.insert(&nI);
1776 Id = SpillMBBs.find_next(Id);
1780 int Id = RestoreMBBs.find_first();
1781 while (Id != -1) {
1782 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1783 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1784 SlotIndex index = restores[i].index;
1785 if (index == SlotIndex())
1786 continue;
1787 unsigned VReg = restores[i].vreg;
1788 LiveInterval &nI = getOrCreateInterval(VReg);
1789 bool isReMat = vrm.isReMaterialized(VReg);
1790 MachineInstr *MI = getInstructionFromIndex(index);
1791 bool CanFold = false;
1792 Ops.clear();
1793 if (restores[i].canFold) {
1794 CanFold = true;
1795 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1796 MachineOperand &MO = MI->getOperand(j);
1797 if (!MO.isReg() || MO.getReg() != VReg)
1798 continue;
1800 if (MO.isDef()) {
1801 // If this restore were to be folded, it would have been folded
1802 // already.
1803 CanFold = false;
1804 break;
1806 Ops.push_back(j);
1810 // Fold the load into the use if possible.
1811 bool Folded = false;
1812 if (CanFold && !Ops.empty()) {
1813 if (!isReMat)
1814 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1815 else {
1816 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1817 int LdSlot = 0;
1818 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1819 // If the rematerializable def is a load, also try to fold it.
1820 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1821 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1822 Ops, isLoadSS, LdSlot, VReg);
1823 if (!Folded) {
1824 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1825 if (ImpUse) {
1826 // Re-matting an instruction with virtual register use. Add the
1827 // register as an implicit use on the use MI and mark the register
1828 // interval as unspillable.
1829 LiveInterval &ImpLi = getInterval(ImpUse);
1830 ImpLi.markNotSpillable();
1831 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1836 // If folding is not possible / failed, then tell the spiller to issue a
1837 // load / rematerialization for us.
1838 if (Folded)
1839 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1840 else
1841 vrm.addRestorePoint(VReg, MI);
1843 Id = RestoreMBBs.find_next(Id);
1846 // Finalize intervals: add kills, finalize spill weights, and filter out
1847 // dead intervals.
1848 std::vector<LiveInterval*> RetNewLIs;
1849 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1850 LiveInterval *LI = NewLIs[i];
1851 if (!LI->empty()) {
1852 if (!AddedKill.count(LI)) {
1853 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1854 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1855 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1856 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1857 assert(UseIdx != -1);
1858 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1859 LastUse->getOperand(UseIdx).setIsKill();
1860 vrm.addKillPoint(LI->reg, LastUseIdx);
1863 RetNewLIs.push_back(LI);
1867 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1868 normalizeSpillWeights(RetNewLIs);
1869 return RetNewLIs;
1872 /// hasAllocatableSuperReg - Return true if the specified physical register has
1873 /// any super register that's allocatable.
1874 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1875 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1876 if (allocatableRegs_[*AS] && hasInterval(*AS))
1877 return true;
1878 return false;
1881 /// getRepresentativeReg - Find the largest super register of the specified
1882 /// physical register.
1883 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1884 // Find the largest super-register that is allocatable.
1885 unsigned BestReg = Reg;
1886 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1887 unsigned SuperReg = *AS;
1888 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1889 BestReg = SuperReg;
1890 break;
1893 return BestReg;
1896 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1897 /// specified interval that conflicts with the specified physical register.
1898 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1899 unsigned PhysReg) const {
1900 unsigned NumConflicts = 0;
1901 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1902 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1903 E = mri_->reg_end(); I != E; ++I) {
1904 MachineOperand &O = I.getOperand();
1905 MachineInstr *MI = O.getParent();
1906 if (MI->isDebugValue())
1907 continue;
1908 SlotIndex Index = getInstructionIndex(MI);
1909 if (pli.liveAt(Index))
1910 ++NumConflicts;
1912 return NumConflicts;
1915 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1916 /// around all defs and uses of the specified interval. Return true if it
1917 /// was able to cut its interval.
1918 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1919 unsigned PhysReg, VirtRegMap &vrm) {
1920 unsigned SpillReg = getRepresentativeReg(PhysReg);
1922 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1923 // If there are registers which alias PhysReg, but which are not a
1924 // sub-register of the chosen representative super register. Assert
1925 // since we can't handle it yet.
1926 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1927 tri_->isSuperRegister(*AS, SpillReg));
1929 bool Cut = false;
1930 SmallVector<unsigned, 4> PRegs;
1931 if (hasInterval(SpillReg))
1932 PRegs.push_back(SpillReg);
1933 else {
1934 SmallSet<unsigned, 4> Added;
1935 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1936 if (Added.insert(*AS) && hasInterval(*AS)) {
1937 PRegs.push_back(*AS);
1938 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1939 Added.insert(*ASS);
1943 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1944 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1945 E = mri_->reg_end(); I != E; ++I) {
1946 MachineOperand &O = I.getOperand();
1947 MachineInstr *MI = O.getParent();
1948 if (MI->isDebugValue() || SeenMIs.count(MI))
1949 continue;
1950 SeenMIs.insert(MI);
1951 SlotIndex Index = getInstructionIndex(MI);
1952 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1953 unsigned PReg = PRegs[i];
1954 LiveInterval &pli = getInterval(PReg);
1955 if (!pli.liveAt(Index))
1956 continue;
1957 vrm.addEmergencySpill(PReg, MI);
1958 SlotIndex StartIdx = Index.getLoadIndex();
1959 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1960 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1961 pli.removeRange(StartIdx, EndIdx);
1962 Cut = true;
1963 } else {
1964 std::string msg;
1965 raw_string_ostream Msg(msg);
1966 Msg << "Ran out of registers during register allocation!";
1967 if (MI->isInlineAsm()) {
1968 Msg << "\nPlease check your inline asm statement for invalid "
1969 << "constraints:\n";
1970 MI->print(Msg, tm_);
1972 report_fatal_error(Msg.str());
1974 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1975 if (!hasInterval(*AS))
1976 continue;
1977 LiveInterval &spli = getInterval(*AS);
1978 if (spli.liveAt(Index))
1979 spli.removeRange(Index.getLoadIndex(),
1980 Index.getNextIndex().getBaseIndex());
1984 return Cut;
1987 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1988 MachineInstr* startInst) {
1989 LiveInterval& Interval = getOrCreateInterval(reg);
1990 VNInfo* VN = Interval.getNextValue(
1991 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1992 startInst, getVNInfoAllocator());
1993 VN->setHasPHIKill(true);
1994 LiveRange LR(
1995 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1996 getMBBEndIdx(startInst->getParent()), VN);
1997 Interval.addRange(LR);
1999 return LR;