Add a function for profiling to run at shutdown. Unlike the existing API, this
[llvm/stm8.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blobc77ae1b7a7933f84ada8008ff1e8c118e19d86a0
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
45 #include <algorithm>
46 #include <limits>
47 #include <cmath>
48 using namespace llvm;
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.setPreservesCFG();
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
81 if (!StrongPHIElim) {
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
98 delete I->second;
100 r2iMap_.clear();
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
106 CloneMIs.pop_back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mf_ = &fn;
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
124 computeIntervals();
126 numIntervals += getNumIntervals();
128 DEBUG(dump());
129 return true;
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
137 OS << "\n";
140 printInstrs(OS);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
149 printInstrs(dbgs());
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
156 return true;
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
167 if (!firstMI)
168 return false;
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
177 if (!lastMI)
178 return false;
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183 return true;
185 MachineBasicBlock::const_iterator E = lastMI;
186 ++E;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 if (MI.isCopy())
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
194 continue;
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
199 if (!mop.isReg())
200 continue;
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
203 continue;
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
206 continue;
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210 return true;
214 // No conflicts found.
215 return false;
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index != end;
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
227 if (!MI)
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
231 continue;
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
234 if (!MO.isReg())
235 continue;
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 continue;
240 if (tri_->regsOverlap(Reg, PhysReg))
241 return true;
246 return false;
249 static
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
254 if (!MO.isReg())
255 continue;
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
260 return true;
263 return false;
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
272 return false;
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 if (DefMI != 0) {
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
281 return false;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
286 SlotIndex MIIdx,
287 MachineOperand& MO,
288 unsigned MOIdx,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
301 // of inputs.
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
307 if (MO.getSubReg())
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
312 CopyMI = mi;
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
324 SlotIndex killIdx;
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327 else
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
331 // live range.
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
338 return;
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
352 if (PHIJoin) {
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
358 } else {
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361 // live interval.
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
380 if (PHIJoin) {
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
391 } else {
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
397 return;
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
413 // into two values.
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
428 // defined value #0.
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
446 if (MO.isDead())
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
448 OldValNo));
450 DEBUG({
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
463 VNInfo *ValNo;
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
466 CopyMI = mi;
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
474 } else {
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
484 SlotIndex MIIdx,
485 MachineOperand& MO,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
504 if (MO.isDead()) {
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
507 goto exit;
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
517 continue;
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
524 goto exit;
525 } else {
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527 if (DefIdx != -1) {
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
531 } else {
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
539 goto exit;
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
552 exit:
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
558 if (!Extend)
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
569 SlotIndex MIIdx,
570 MachineOperand& MO,
571 unsigned MOIdx) {
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else {
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
578 CopyMI = MI;
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->definesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592 SlotIndex MIIdx,
593 LiveInterval &interval, bool isAlias) {
594 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
596 // Look for kills, if it reaches a def before it's killed, then it shouldn't
597 // be considered a livein.
598 MachineBasicBlock::iterator mi = MBB->begin();
599 MachineBasicBlock::iterator E = MBB->end();
600 // Skip over DBG_VALUE at the start of the MBB.
601 if (mi != E && mi->isDebugValue()) {
602 while (++mi != E && mi->isDebugValue())
604 if (mi == E)
605 // MBB is empty except for DBG_VALUE's.
606 return;
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
617 while (mi != E) {
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
621 SeenDefUse = true;
622 break;
623 } else if (mi->definesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
630 SeenDefUse = true;
631 break;
634 while (++mi != E && mi->isDebugValue())
635 // Skip over DBG_VALUE.
637 if (mi != E)
638 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
641 // Live-in register might not be used at all.
642 if (!SeenDefUse) {
643 if (isAlias) {
644 DEBUG(dbgs() << " dead");
645 end = MIIdx.getStoreIndex();
646 } else {
647 DEBUG(dbgs() << " live through");
648 end = baseIndex;
652 SlotIndex defIdx = getMBBStartIdx(MBB);
653 assert(getInstructionFromIndex(defIdx) == 0 &&
654 "PHI def index points at actual instruction.");
655 VNInfo *vni =
656 interval.getNextValue(defIdx, 0, VNInfoAllocator);
657 vni->setIsPHIDef(true);
658 LiveRange LR(start, end, vni);
660 interval.addRange(LR);
661 DEBUG(dbgs() << " +" << LR << '\n');
664 /// computeIntervals - computes the live intervals for virtual
665 /// registers. for some ordering of the machine instructions [1,N] a
666 /// live interval is an interval [i, j) where 1 <= i <= j < N for
667 /// which a variable is live
668 void LiveIntervals::computeIntervals() {
669 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670 << "********** Function: "
671 << ((Value*)mf_->getFunction())->getName() << '\n');
673 SmallVector<unsigned, 8> UndefUses;
674 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675 MBBI != E; ++MBBI) {
676 MachineBasicBlock *MBB = MBBI;
677 if (MBB->empty())
678 continue;
680 // Track the index of the current machine instr.
681 SlotIndex MIIndex = getMBBStartIdx(MBB);
682 DEBUG(dbgs() << "BB#" << MBB->getNumber()
683 << ":\t\t# derived from " << MBB->getName() << "\n");
685 // Create intervals for live-ins to this BB first.
686 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687 LE = MBB->livein_end(); LI != LE; ++LI) {
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689 // Multiple live-ins can alias the same register.
690 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691 if (!hasInterval(*AS))
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
693 true);
696 // Skip over empty initial indices.
697 if (getInstructionFromIndex(MIIndex) == 0)
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701 MI != miEnd; ++MI) {
702 DEBUG(dbgs() << MIIndex << "\t" << *MI);
703 if (MI->isDebugValue())
704 continue;
706 // Handle defs.
707 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708 MachineOperand &MO = MI->getOperand(i);
709 if (!MO.isReg() || !MO.getReg())
710 continue;
712 // handle register defs - build intervals
713 if (MO.isDef())
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
715 else if (MO.isUndef())
716 UndefUses.push_back(MO.getReg());
719 // Move to the next instr slot.
720 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
724 // Create empty intervals for registers defined by implicit_def's (except
725 // for those implicit_def that define values which are liveout of their
726 // blocks.
727 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728 unsigned UndefReg = UndefUses[i];
729 (void)getOrCreateInterval(UndefReg);
733 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735 return new LiveInterval(reg, Weight);
738 /// dupInterval - Duplicate a live interval. The caller is responsible for
739 /// managing the allocated memory.
740 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741 LiveInterval *NewLI = createInterval(li->reg);
742 NewLI->Copy(*li, mri_, getVNInfoAllocator());
743 return NewLI;
746 /// shrinkToUses - After removing some uses of a register, shrink its live
747 /// range to just the remaining uses. This method does not compute reaching
748 /// defs for new uses, and it doesn't remove dead defs.
749 bool LiveIntervals::shrinkToUses(LiveInterval *li,
750 SmallVectorImpl<MachineInstr*> *dead) {
751 DEBUG(dbgs() << "Shrink: " << *li << '\n');
752 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
753 && "Can't only shrink physical registers");
754 // Find all the values used, including PHI kills.
755 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
757 // Visit all instructions reading li->reg.
758 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
759 MachineInstr *UseMI = I.skipInstruction();) {
760 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
761 continue;
762 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
763 VNInfo *VNI = li->getVNInfoAt(Idx);
764 if (!VNI) {
765 // This shouldn't happen: readsVirtualRegister returns true, but there is
766 // no live value. It is likely caused by a target getting <undef> flags
767 // wrong.
768 DEBUG(dbgs() << Idx << '\t' << *UseMI
769 << "Warning: Instr claims to read non-existent value in "
770 << *li << '\n');
771 continue;
773 if (VNI->def == Idx) {
774 // Special case: An early-clobber tied operand reads and writes the
775 // register one slot early.
776 Idx = Idx.getPrevSlot();
777 VNI = li->getVNInfoAt(Idx);
778 assert(VNI && "Early-clobber tied value not available");
780 WorkList.push_back(std::make_pair(Idx, VNI));
783 // Create a new live interval with only minimal live segments per def.
784 LiveInterval NewLI(li->reg, 0);
785 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
786 I != E; ++I) {
787 VNInfo *VNI = *I;
788 if (VNI->isUnused())
789 continue;
790 // We may eliminate PHI values, so recompute PHIKill flags.
791 VNI->setHasPHIKill(false);
792 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
794 // A use tied to an early-clobber def ends at the load slot and isn't caught
795 // above. Catch it here instead. This probably only ever happens for inline
796 // assembly.
797 if (VNI->def.isUse())
798 if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
799 WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
802 // Keep track of the PHIs that are in use.
803 SmallPtrSet<VNInfo*, 8> UsedPHIs;
805 // Extend intervals to reach all uses in WorkList.
806 while (!WorkList.empty()) {
807 SlotIndex Idx = WorkList.back().first;
808 VNInfo *VNI = WorkList.back().second;
809 WorkList.pop_back();
810 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
811 SlotIndex BlockStart = getMBBStartIdx(MBB);
813 // Extend the live range for VNI to be live at Idx.
814 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
815 (void)ExtVNI;
816 assert(ExtVNI == VNI && "Unexpected existing value number");
817 // Is this a PHIDef we haven't seen before?
818 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
819 continue;
820 // The PHI is live, make sure the predecessors are live-out.
821 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
822 PE = MBB->pred_end(); PI != PE; ++PI) {
823 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
824 VNInfo *PVNI = li->getVNInfoAt(Stop);
825 // A predecessor is not required to have a live-out value for a PHI.
826 if (PVNI) {
827 PVNI->setHasPHIKill(true);
828 WorkList.push_back(std::make_pair(Stop, PVNI));
831 continue;
834 // VNI is live-in to MBB.
835 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
836 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
838 // Make sure VNI is live-out from the predecessors.
839 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
840 PE = MBB->pred_end(); PI != PE; ++PI) {
841 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
842 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
843 WorkList.push_back(std::make_pair(Stop, VNI));
847 // Handle dead values.
848 bool CanSeparate = false;
849 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
850 I != E; ++I) {
851 VNInfo *VNI = *I;
852 if (VNI->isUnused())
853 continue;
854 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
855 assert(LII != NewLI.end() && "Missing live range for PHI");
856 if (LII->end != VNI->def.getNextSlot())
857 continue;
858 if (VNI->isPHIDef()) {
859 // This is a dead PHI. Remove it.
860 VNI->setIsUnused(true);
861 NewLI.removeRange(*LII);
862 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
863 CanSeparate = true;
864 } else {
865 // This is a dead def. Make sure the instruction knows.
866 MachineInstr *MI = getInstructionFromIndex(VNI->def);
867 assert(MI && "No instruction defining live value");
868 MI->addRegisterDead(li->reg, tri_);
869 if (dead && MI->allDefsAreDead()) {
870 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
871 dead->push_back(MI);
876 // Move the trimmed ranges back.
877 li->ranges.swap(NewLI.ranges);
878 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
879 return CanSeparate;
883 //===----------------------------------------------------------------------===//
884 // Register allocator hooks.
887 MachineBasicBlock::iterator
888 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
889 MachineBasicBlock *mbb) const {
890 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
892 // If li is not live into a landing pad, we can insert spill code before the
893 // first terminator.
894 if (!lpad || !isLiveInToMBB(li, lpad))
895 return mbb->getFirstTerminator();
897 // When there is a landing pad, spill code must go before the call instruction
898 // that can throw.
899 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
900 while (I != B) {
901 --I;
902 if (I->getDesc().isCall())
903 return I;
905 // The block contains no calls that can throw, so use the first terminator.
906 return mbb->getFirstTerminator();
909 void LiveIntervals::addKillFlags() {
910 for (iterator I = begin(), E = end(); I != E; ++I) {
911 unsigned Reg = I->first;
912 if (TargetRegisterInfo::isPhysicalRegister(Reg))
913 continue;
914 if (mri_->reg_nodbg_empty(Reg))
915 continue;
916 LiveInterval *LI = I->second;
918 // Every instruction that kills Reg corresponds to a live range end point.
919 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
920 ++RI) {
921 // A LOAD index indicates an MBB edge.
922 if (RI->end.isLoad())
923 continue;
924 MachineInstr *MI = getInstructionFromIndex(RI->end);
925 if (!MI)
926 continue;
927 MI->addRegisterKilled(Reg, NULL);
932 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
933 /// allow one) virtual register operand, then its uses are implicitly using
934 /// the register. Returns the virtual register.
935 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
936 MachineInstr *MI) const {
937 unsigned RegOp = 0;
938 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
939 MachineOperand &MO = MI->getOperand(i);
940 if (!MO.isReg() || !MO.isUse())
941 continue;
942 unsigned Reg = MO.getReg();
943 if (Reg == 0 || Reg == li.reg)
944 continue;
946 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
947 !allocatableRegs_[Reg])
948 continue;
949 // FIXME: For now, only remat MI with at most one register operand.
950 assert(!RegOp &&
951 "Can't rematerialize instruction with multiple register operand!");
952 RegOp = MO.getReg();
953 #ifndef NDEBUG
954 break;
955 #endif
957 return RegOp;
960 /// isValNoAvailableAt - Return true if the val# of the specified interval
961 /// which reaches the given instruction also reaches the specified use index.
962 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
963 SlotIndex UseIdx) const {
964 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
965 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
968 /// isReMaterializable - Returns true if the definition MI of the specified
969 /// val# of the specified interval is re-materializable.
970 bool
971 LiveIntervals::isReMaterializable(const LiveInterval &li,
972 const VNInfo *ValNo, MachineInstr *MI,
973 const SmallVectorImpl<LiveInterval*> *SpillIs,
974 bool &isLoad) {
975 if (DisableReMat)
976 return false;
978 if (!tii_->isTriviallyReMaterializable(MI, aa_))
979 return false;
981 // Target-specific code can mark an instruction as being rematerializable
982 // if it has one virtual reg use, though it had better be something like
983 // a PIC base register which is likely to be live everywhere.
984 unsigned ImpUse = getReMatImplicitUse(li, MI);
985 if (ImpUse) {
986 const LiveInterval &ImpLi = getInterval(ImpUse);
987 for (MachineRegisterInfo::use_nodbg_iterator
988 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
989 ri != re; ++ri) {
990 MachineInstr *UseMI = &*ri;
991 SlotIndex UseIdx = getInstructionIndex(UseMI);
992 if (li.getVNInfoAt(UseIdx) != ValNo)
993 continue;
994 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
995 return false;
998 // If a register operand of the re-materialized instruction is going to
999 // be spilled next, then it's not legal to re-materialize this instruction.
1000 if (SpillIs)
1001 for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
1002 if (ImpUse == (*SpillIs)[i]->reg)
1003 return false;
1005 return true;
1008 /// isReMaterializable - Returns true if the definition MI of the specified
1009 /// val# of the specified interval is re-materializable.
1010 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1011 const VNInfo *ValNo, MachineInstr *MI) {
1012 bool Dummy2;
1013 return isReMaterializable(li, ValNo, MI, 0, Dummy2);
1016 /// isReMaterializable - Returns true if every definition of MI of every
1017 /// val# of the specified interval is re-materializable.
1018 bool
1019 LiveIntervals::isReMaterializable(const LiveInterval &li,
1020 const SmallVectorImpl<LiveInterval*> *SpillIs,
1021 bool &isLoad) {
1022 isLoad = false;
1023 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1024 i != e; ++i) {
1025 const VNInfo *VNI = *i;
1026 if (VNI->isUnused())
1027 continue; // Dead val#.
1028 // Is the def for the val# rematerializable?
1029 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1030 if (!ReMatDefMI)
1031 return false;
1032 bool DefIsLoad = false;
1033 if (!ReMatDefMI ||
1034 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1035 return false;
1036 isLoad |= DefIsLoad;
1038 return true;
1041 /// FilterFoldedOps - Filter out two-address use operands. Return
1042 /// true if it finds any issue with the operands that ought to prevent
1043 /// folding.
1044 static bool FilterFoldedOps(MachineInstr *MI,
1045 SmallVector<unsigned, 2> &Ops,
1046 unsigned &MRInfo,
1047 SmallVector<unsigned, 2> &FoldOps) {
1048 MRInfo = 0;
1049 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1050 unsigned OpIdx = Ops[i];
1051 MachineOperand &MO = MI->getOperand(OpIdx);
1052 // FIXME: fold subreg use.
1053 if (MO.getSubReg())
1054 return true;
1055 if (MO.isDef())
1056 MRInfo |= (unsigned)VirtRegMap::isMod;
1057 else {
1058 // Filter out two-address use operand(s).
1059 if (MI->isRegTiedToDefOperand(OpIdx)) {
1060 MRInfo = VirtRegMap::isModRef;
1061 continue;
1063 MRInfo |= (unsigned)VirtRegMap::isRef;
1065 FoldOps.push_back(OpIdx);
1067 return false;
1071 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1072 /// slot / to reg or any rematerialized load into ith operand of specified
1073 /// MI. If it is successul, MI is updated with the newly created MI and
1074 /// returns true.
1075 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1076 VirtRegMap &vrm, MachineInstr *DefMI,
1077 SlotIndex InstrIdx,
1078 SmallVector<unsigned, 2> &Ops,
1079 bool isSS, int Slot, unsigned Reg) {
1080 // If it is an implicit def instruction, just delete it.
1081 if (MI->isImplicitDef()) {
1082 RemoveMachineInstrFromMaps(MI);
1083 vrm.RemoveMachineInstrFromMaps(MI);
1084 MI->eraseFromParent();
1085 ++numFolds;
1086 return true;
1089 // Filter the list of operand indexes that are to be folded. Abort if
1090 // any operand will prevent folding.
1091 unsigned MRInfo = 0;
1092 SmallVector<unsigned, 2> FoldOps;
1093 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1094 return false;
1096 // The only time it's safe to fold into a two address instruction is when
1097 // it's folding reload and spill from / into a spill stack slot.
1098 if (DefMI && (MRInfo & VirtRegMap::isMod))
1099 return false;
1101 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1102 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1103 if (fmi) {
1104 // Remember this instruction uses the spill slot.
1105 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1107 // Attempt to fold the memory reference into the instruction. If
1108 // we can do this, we don't need to insert spill code.
1109 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1110 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1111 vrm.transferSpillPts(MI, fmi);
1112 vrm.transferRestorePts(MI, fmi);
1113 vrm.transferEmergencySpills(MI, fmi);
1114 ReplaceMachineInstrInMaps(MI, fmi);
1115 MI->eraseFromParent();
1116 MI = fmi;
1117 ++numFolds;
1118 return true;
1120 return false;
1123 /// canFoldMemoryOperand - Returns true if the specified load / store
1124 /// folding is possible.
1125 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1126 SmallVector<unsigned, 2> &Ops,
1127 bool ReMat) const {
1128 // Filter the list of operand indexes that are to be folded. Abort if
1129 // any operand will prevent folding.
1130 unsigned MRInfo = 0;
1131 SmallVector<unsigned, 2> FoldOps;
1132 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1133 return false;
1135 // It's only legal to remat for a use, not a def.
1136 if (ReMat && (MRInfo & VirtRegMap::isMod))
1137 return false;
1139 return tii_->canFoldMemoryOperand(MI, FoldOps);
1142 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1143 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1145 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1147 if (mbb == 0)
1148 return false;
1150 for (++itr; itr != li.ranges.end(); ++itr) {
1151 MachineBasicBlock *mbb2 =
1152 indexes_->getMBBCoveringRange(itr->start, itr->end);
1154 if (mbb2 != mbb)
1155 return false;
1158 return true;
1161 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1162 /// interval on to-be re-materialized operands of MI) with new register.
1163 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1164 MachineInstr *MI, unsigned NewVReg,
1165 VirtRegMap &vrm) {
1166 // There is an implicit use. That means one of the other operand is
1167 // being remat'ed and the remat'ed instruction has li.reg as an
1168 // use operand. Make sure we rewrite that as well.
1169 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1170 MachineOperand &MO = MI->getOperand(i);
1171 if (!MO.isReg())
1172 continue;
1173 unsigned Reg = MO.getReg();
1174 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1175 continue;
1176 if (!vrm.isReMaterialized(Reg))
1177 continue;
1178 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1179 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1180 if (UseMO)
1181 UseMO->setReg(NewVReg);
1185 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1186 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1187 bool LiveIntervals::
1188 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1189 bool TrySplit, SlotIndex index, SlotIndex end,
1190 MachineInstr *MI,
1191 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1192 unsigned Slot, int LdSlot,
1193 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1194 VirtRegMap &vrm,
1195 const TargetRegisterClass* rc,
1196 SmallVector<int, 4> &ReMatIds,
1197 const MachineLoopInfo *loopInfo,
1198 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1199 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1200 std::vector<LiveInterval*> &NewLIs) {
1201 bool CanFold = false;
1202 RestartInstruction:
1203 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1204 MachineOperand& mop = MI->getOperand(i);
1205 if (!mop.isReg())
1206 continue;
1207 unsigned Reg = mop.getReg();
1208 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1209 continue;
1210 if (Reg != li.reg)
1211 continue;
1213 bool TryFold = !DefIsReMat;
1214 bool FoldSS = true; // Default behavior unless it's a remat.
1215 int FoldSlot = Slot;
1216 if (DefIsReMat) {
1217 // If this is the rematerializable definition MI itself and
1218 // all of its uses are rematerialized, simply delete it.
1219 if (MI == ReMatOrigDefMI && CanDelete) {
1220 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1221 << *MI << '\n');
1222 RemoveMachineInstrFromMaps(MI);
1223 vrm.RemoveMachineInstrFromMaps(MI);
1224 MI->eraseFromParent();
1225 break;
1228 // If def for this use can't be rematerialized, then try folding.
1229 // If def is rematerializable and it's a load, also try folding.
1230 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1231 if (isLoad) {
1232 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1233 FoldSS = isLoadSS;
1234 FoldSlot = LdSlot;
1238 // Scan all of the operands of this instruction rewriting operands
1239 // to use NewVReg instead of li.reg as appropriate. We do this for
1240 // two reasons:
1242 // 1. If the instr reads the same spilled vreg multiple times, we
1243 // want to reuse the NewVReg.
1244 // 2. If the instr is a two-addr instruction, we are required to
1245 // keep the src/dst regs pinned.
1247 // Keep track of whether we replace a use and/or def so that we can
1248 // create the spill interval with the appropriate range.
1249 SmallVector<unsigned, 2> Ops;
1250 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1252 // Create a new virtual register for the spill interval.
1253 // Create the new register now so we can map the fold instruction
1254 // to the new register so when it is unfolded we get the correct
1255 // answer.
1256 bool CreatedNewVReg = false;
1257 if (NewVReg == 0) {
1258 NewVReg = mri_->createVirtualRegister(rc);
1259 vrm.grow();
1260 CreatedNewVReg = true;
1262 // The new virtual register should get the same allocation hints as the
1263 // old one.
1264 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1265 if (Hint.first || Hint.second)
1266 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1269 if (!TryFold)
1270 CanFold = false;
1271 else {
1272 // Do not fold load / store here if we are splitting. We'll find an
1273 // optimal point to insert a load / store later.
1274 if (!TrySplit) {
1275 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1276 Ops, FoldSS, FoldSlot, NewVReg)) {
1277 // Folding the load/store can completely change the instruction in
1278 // unpredictable ways, rescan it from the beginning.
1280 if (FoldSS) {
1281 // We need to give the new vreg the same stack slot as the
1282 // spilled interval.
1283 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1286 HasUse = false;
1287 HasDef = false;
1288 CanFold = false;
1289 if (isNotInMIMap(MI))
1290 break;
1291 goto RestartInstruction;
1293 } else {
1294 // We'll try to fold it later if it's profitable.
1295 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1299 mop.setReg(NewVReg);
1300 if (mop.isImplicit())
1301 rewriteImplicitOps(li, MI, NewVReg, vrm);
1303 // Reuse NewVReg for other reads.
1304 bool HasEarlyClobber = false;
1305 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1306 MachineOperand &mopj = MI->getOperand(Ops[j]);
1307 mopj.setReg(NewVReg);
1308 if (mopj.isImplicit())
1309 rewriteImplicitOps(li, MI, NewVReg, vrm);
1310 if (mopj.isEarlyClobber())
1311 HasEarlyClobber = true;
1314 if (CreatedNewVReg) {
1315 if (DefIsReMat) {
1316 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1317 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1318 // Each valnum may have its own remat id.
1319 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1320 } else {
1321 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1323 if (!CanDelete || (HasUse && HasDef)) {
1324 // If this is a two-addr instruction then its use operands are
1325 // rematerializable but its def is not. It should be assigned a
1326 // stack slot.
1327 vrm.assignVirt2StackSlot(NewVReg, Slot);
1329 } else {
1330 vrm.assignVirt2StackSlot(NewVReg, Slot);
1332 } else if (HasUse && HasDef &&
1333 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1334 // If this interval hasn't been assigned a stack slot (because earlier
1335 // def is a deleted remat def), do it now.
1336 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1337 vrm.assignVirt2StackSlot(NewVReg, Slot);
1340 // Re-matting an instruction with virtual register use. Add the
1341 // register as an implicit use on the use MI.
1342 if (DefIsReMat && ImpUse)
1343 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1345 // Create a new register interval for this spill / remat.
1346 LiveInterval &nI = getOrCreateInterval(NewVReg);
1347 if (CreatedNewVReg) {
1348 NewLIs.push_back(&nI);
1349 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1350 if (TrySplit)
1351 vrm.setIsSplitFromReg(NewVReg, li.reg);
1354 if (HasUse) {
1355 if (CreatedNewVReg) {
1356 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1357 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1358 DEBUG(dbgs() << " +" << LR);
1359 nI.addRange(LR);
1360 } else {
1361 // Extend the split live interval to this def / use.
1362 SlotIndex End = index.getDefIndex();
1363 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1364 nI.getValNumInfo(nI.getNumValNums()-1));
1365 DEBUG(dbgs() << " +" << LR);
1366 nI.addRange(LR);
1369 if (HasDef) {
1370 // An early clobber starts at the use slot, except for an early clobber
1371 // tied to a use operand (yes, that is a thing).
1372 LiveRange LR(HasEarlyClobber && !HasUse ?
1373 index.getUseIndex() : index.getDefIndex(),
1374 index.getStoreIndex(),
1375 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1376 DEBUG(dbgs() << " +" << LR);
1377 nI.addRange(LR);
1380 DEBUG({
1381 dbgs() << "\t\t\t\tAdded new interval: ";
1382 nI.print(dbgs(), tri_);
1383 dbgs() << '\n';
1386 return CanFold;
1388 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1389 const VNInfo *VNI,
1390 MachineBasicBlock *MBB,
1391 SlotIndex Idx) const {
1392 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1395 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1396 /// during spilling.
1397 namespace {
1398 struct RewriteInfo {
1399 SlotIndex Index;
1400 MachineInstr *MI;
1401 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1404 struct RewriteInfoCompare {
1405 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1406 return LHS.Index < RHS.Index;
1411 void LiveIntervals::
1412 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1413 LiveInterval::Ranges::const_iterator &I,
1414 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1415 unsigned Slot, int LdSlot,
1416 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1417 VirtRegMap &vrm,
1418 const TargetRegisterClass* rc,
1419 SmallVector<int, 4> &ReMatIds,
1420 const MachineLoopInfo *loopInfo,
1421 BitVector &SpillMBBs,
1422 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1423 BitVector &RestoreMBBs,
1424 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1425 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1426 std::vector<LiveInterval*> &NewLIs) {
1427 bool AllCanFold = true;
1428 unsigned NewVReg = 0;
1429 SlotIndex start = I->start.getBaseIndex();
1430 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1432 // First collect all the def / use in this live range that will be rewritten.
1433 // Make sure they are sorted according to instruction index.
1434 std::vector<RewriteInfo> RewriteMIs;
1435 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1436 re = mri_->reg_end(); ri != re; ) {
1437 MachineInstr *MI = &*ri;
1438 MachineOperand &O = ri.getOperand();
1439 ++ri;
1440 if (MI->isDebugValue()) {
1441 // Modify DBG_VALUE now that the value is in a spill slot.
1442 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1443 uint64_t Offset = MI->getOperand(1).getImm();
1444 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1445 DebugLoc DL = MI->getDebugLoc();
1446 int FI = isLoadSS ? LdSlot : (int)Slot;
1447 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1448 Offset, MDPtr, DL)) {
1449 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1450 ReplaceMachineInstrInMaps(MI, NewDV);
1451 MachineBasicBlock *MBB = MI->getParent();
1452 MBB->insert(MBB->erase(MI), NewDV);
1453 continue;
1457 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1458 RemoveMachineInstrFromMaps(MI);
1459 vrm.RemoveMachineInstrFromMaps(MI);
1460 MI->eraseFromParent();
1461 continue;
1463 assert(!(O.isImplicit() && O.isUse()) &&
1464 "Spilling register that's used as implicit use?");
1465 SlotIndex index = getInstructionIndex(MI);
1466 if (index < start || index >= end)
1467 continue;
1469 if (O.isUndef())
1470 // Must be defined by an implicit def. It should not be spilled. Note,
1471 // this is for correctness reason. e.g.
1472 // 8 %reg1024<def> = IMPLICIT_DEF
1473 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1474 // The live range [12, 14) are not part of the r1024 live interval since
1475 // it's defined by an implicit def. It will not conflicts with live
1476 // interval of r1025. Now suppose both registers are spilled, you can
1477 // easily see a situation where both registers are reloaded before
1478 // the INSERT_SUBREG and both target registers that would overlap.
1479 continue;
1480 RewriteMIs.push_back(RewriteInfo(index, MI));
1482 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1484 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1485 // Now rewrite the defs and uses.
1486 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1487 RewriteInfo &rwi = RewriteMIs[i];
1488 ++i;
1489 SlotIndex index = rwi.Index;
1490 MachineInstr *MI = rwi.MI;
1491 // If MI def and/or use the same register multiple times, then there
1492 // are multiple entries.
1493 while (i != e && RewriteMIs[i].MI == MI) {
1494 assert(RewriteMIs[i].Index == index);
1495 ++i;
1497 MachineBasicBlock *MBB = MI->getParent();
1499 if (ImpUse && MI != ReMatDefMI) {
1500 // Re-matting an instruction with virtual register use. Prevent interval
1501 // from being spilled.
1502 getInterval(ImpUse).markNotSpillable();
1505 unsigned MBBId = MBB->getNumber();
1506 unsigned ThisVReg = 0;
1507 if (TrySplit) {
1508 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1509 if (NVI != MBBVRegsMap.end()) {
1510 ThisVReg = NVI->second;
1511 // One common case:
1512 // x = use
1513 // ...
1514 // ...
1515 // def = ...
1516 // = use
1517 // It's better to start a new interval to avoid artifically
1518 // extend the new interval.
1519 if (MI->readsWritesVirtualRegister(li.reg) ==
1520 std::make_pair(false,true)) {
1521 MBBVRegsMap.erase(MBB->getNumber());
1522 ThisVReg = 0;
1527 bool IsNew = ThisVReg == 0;
1528 if (IsNew) {
1529 // This ends the previous live interval. If all of its def / use
1530 // can be folded, give it a low spill weight.
1531 if (NewVReg && TrySplit && AllCanFold) {
1532 LiveInterval &nI = getOrCreateInterval(NewVReg);
1533 nI.weight /= 10.0F;
1535 AllCanFold = true;
1537 NewVReg = ThisVReg;
1539 bool HasDef = false;
1540 bool HasUse = false;
1541 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1542 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1543 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1544 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1545 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1546 if (!HasDef && !HasUse)
1547 continue;
1549 AllCanFold &= CanFold;
1551 // Update weight of spill interval.
1552 LiveInterval &nI = getOrCreateInterval(NewVReg);
1553 if (!TrySplit) {
1554 // The spill weight is now infinity as it cannot be spilled again.
1555 nI.markNotSpillable();
1556 continue;
1559 // Keep track of the last def and first use in each MBB.
1560 if (HasDef) {
1561 if (MI != ReMatOrigDefMI || !CanDelete) {
1562 bool HasKill = false;
1563 if (!HasUse)
1564 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1565 else {
1566 // If this is a two-address code, then this index starts a new VNInfo.
1567 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1568 if (VNI)
1569 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1571 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1572 SpillIdxes.find(MBBId);
1573 if (!HasKill) {
1574 if (SII == SpillIdxes.end()) {
1575 std::vector<SRInfo> S;
1576 S.push_back(SRInfo(index, NewVReg, true));
1577 SpillIdxes.insert(std::make_pair(MBBId, S));
1578 } else if (SII->second.back().vreg != NewVReg) {
1579 SII->second.push_back(SRInfo(index, NewVReg, true));
1580 } else if (index > SII->second.back().index) {
1581 // If there is an earlier def and this is a two-address
1582 // instruction, then it's not possible to fold the store (which
1583 // would also fold the load).
1584 SRInfo &Info = SII->second.back();
1585 Info.index = index;
1586 Info.canFold = !HasUse;
1588 SpillMBBs.set(MBBId);
1589 } else if (SII != SpillIdxes.end() &&
1590 SII->second.back().vreg == NewVReg &&
1591 index > SII->second.back().index) {
1592 // There is an earlier def that's not killed (must be two-address).
1593 // The spill is no longer needed.
1594 SII->second.pop_back();
1595 if (SII->second.empty()) {
1596 SpillIdxes.erase(MBBId);
1597 SpillMBBs.reset(MBBId);
1603 if (HasUse) {
1604 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1605 SpillIdxes.find(MBBId);
1606 if (SII != SpillIdxes.end() &&
1607 SII->second.back().vreg == NewVReg &&
1608 index > SII->second.back().index)
1609 // Use(s) following the last def, it's not safe to fold the spill.
1610 SII->second.back().canFold = false;
1611 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1612 RestoreIdxes.find(MBBId);
1613 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1614 // If we are splitting live intervals, only fold if it's the first
1615 // use and there isn't another use later in the MBB.
1616 RII->second.back().canFold = false;
1617 else if (IsNew) {
1618 // Only need a reload if there isn't an earlier def / use.
1619 if (RII == RestoreIdxes.end()) {
1620 std::vector<SRInfo> Infos;
1621 Infos.push_back(SRInfo(index, NewVReg, true));
1622 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1623 } else {
1624 RII->second.push_back(SRInfo(index, NewVReg, true));
1626 RestoreMBBs.set(MBBId);
1630 // Update spill weight.
1631 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1632 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1635 if (NewVReg && TrySplit && AllCanFold) {
1636 // If all of its def / use can be folded, give it a low spill weight.
1637 LiveInterval &nI = getOrCreateInterval(NewVReg);
1638 nI.weight /= 10.0F;
1642 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1643 unsigned vr, BitVector &RestoreMBBs,
1644 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1645 if (!RestoreMBBs[Id])
1646 return false;
1647 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1648 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1649 if (Restores[i].index == index &&
1650 Restores[i].vreg == vr &&
1651 Restores[i].canFold)
1652 return true;
1653 return false;
1656 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1657 unsigned vr, BitVector &RestoreMBBs,
1658 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1659 if (!RestoreMBBs[Id])
1660 return;
1661 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1662 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1663 if (Restores[i].index == index && Restores[i].vreg)
1664 Restores[i].index = SlotIndex();
1667 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1668 /// spilled and create empty intervals for their uses.
1669 void
1670 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1671 const TargetRegisterClass* rc,
1672 std::vector<LiveInterval*> &NewLIs) {
1673 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1674 re = mri_->reg_end(); ri != re; ) {
1675 MachineOperand &O = ri.getOperand();
1676 MachineInstr *MI = &*ri;
1677 ++ri;
1678 if (MI->isDebugValue()) {
1679 // Remove debug info for now.
1680 O.setReg(0U);
1681 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1682 continue;
1684 if (O.isDef()) {
1685 assert(MI->isImplicitDef() &&
1686 "Register def was not rewritten?");
1687 RemoveMachineInstrFromMaps(MI);
1688 vrm.RemoveMachineInstrFromMaps(MI);
1689 MI->eraseFromParent();
1690 } else {
1691 // This must be an use of an implicit_def so it's not part of the live
1692 // interval. Create a new empty live interval for it.
1693 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1694 unsigned NewVReg = mri_->createVirtualRegister(rc);
1695 vrm.grow();
1696 vrm.setIsImplicitlyDefined(NewVReg);
1697 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1698 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1699 MachineOperand &MO = MI->getOperand(i);
1700 if (MO.isReg() && MO.getReg() == li.reg) {
1701 MO.setReg(NewVReg);
1702 MO.setIsUndef();
1709 float
1710 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1711 // Limit the loop depth ridiculousness.
1712 if (loopDepth > 200)
1713 loopDepth = 200;
1715 // The loop depth is used to roughly estimate the number of times the
1716 // instruction is executed. Something like 10^d is simple, but will quickly
1717 // overflow a float. This expression behaves like 10^d for small d, but is
1718 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1719 // headroom before overflow.
1720 // By the way, powf() might be unavailable here. For consistency,
1721 // We may take pow(double,double).
1722 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1724 return (isDef + isUse) * lc;
1727 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1728 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1729 NewLIs[i]->weight =
1730 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1733 std::vector<LiveInterval*> LiveIntervals::
1734 addIntervalsForSpills(const LiveInterval &li,
1735 const SmallVectorImpl<LiveInterval*> *SpillIs,
1736 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1737 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1739 DEBUG({
1740 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1741 li.print(dbgs(), tri_);
1742 dbgs() << '\n';
1745 // Each bit specify whether a spill is required in the MBB.
1746 BitVector SpillMBBs(mf_->getNumBlockIDs());
1747 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1748 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1749 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1750 DenseMap<unsigned,unsigned> MBBVRegsMap;
1751 std::vector<LiveInterval*> NewLIs;
1752 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1754 unsigned NumValNums = li.getNumValNums();
1755 SmallVector<MachineInstr*, 4> ReMatDefs;
1756 ReMatDefs.resize(NumValNums, NULL);
1757 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1758 ReMatOrigDefs.resize(NumValNums, NULL);
1759 SmallVector<int, 4> ReMatIds;
1760 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1761 BitVector ReMatDelete(NumValNums);
1762 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1764 // Spilling a split live interval. It cannot be split any further. Also,
1765 // it's also guaranteed to be a single val# / range interval.
1766 if (vrm.getPreSplitReg(li.reg)) {
1767 vrm.setIsSplitFromReg(li.reg, 0);
1768 // Unset the split kill marker on the last use.
1769 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1770 if (KillIdx != SlotIndex()) {
1771 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1772 assert(KillMI && "Last use disappeared?");
1773 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1774 assert(KillOp != -1 && "Last use disappeared?");
1775 KillMI->getOperand(KillOp).setIsKill(false);
1777 vrm.removeKillPoint(li.reg);
1778 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1779 Slot = vrm.getStackSlot(li.reg);
1780 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1781 MachineInstr *ReMatDefMI = DefIsReMat ?
1782 vrm.getReMaterializedMI(li.reg) : NULL;
1783 int LdSlot = 0;
1784 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1785 bool isLoad = isLoadSS ||
1786 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1787 bool IsFirstRange = true;
1788 for (LiveInterval::Ranges::const_iterator
1789 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1790 // If this is a split live interval with multiple ranges, it means there
1791 // are two-address instructions that re-defined the value. Only the
1792 // first def can be rematerialized!
1793 if (IsFirstRange) {
1794 // Note ReMatOrigDefMI has already been deleted.
1795 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1796 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1797 false, vrm, rc, ReMatIds, loopInfo,
1798 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1799 MBBVRegsMap, NewLIs);
1800 } else {
1801 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1802 Slot, 0, false, false, false,
1803 false, vrm, rc, ReMatIds, loopInfo,
1804 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1805 MBBVRegsMap, NewLIs);
1807 IsFirstRange = false;
1810 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1811 normalizeSpillWeights(NewLIs);
1812 return NewLIs;
1815 bool TrySplit = !intervalIsInOneMBB(li);
1816 if (TrySplit)
1817 ++numSplits;
1818 bool NeedStackSlot = false;
1819 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1820 i != e; ++i) {
1821 const VNInfo *VNI = *i;
1822 unsigned VN = VNI->id;
1823 if (VNI->isUnused())
1824 continue; // Dead val#.
1825 // Is the def for the val# rematerializable?
1826 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1827 bool dummy;
1828 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1829 // Remember how to remat the def of this val#.
1830 ReMatOrigDefs[VN] = ReMatDefMI;
1831 // Original def may be modified so we have to make a copy here.
1832 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1833 CloneMIs.push_back(Clone);
1834 ReMatDefs[VN] = Clone;
1836 bool CanDelete = true;
1837 if (VNI->hasPHIKill()) {
1838 // A kill is a phi node, not all of its uses can be rematerialized.
1839 // It must not be deleted.
1840 CanDelete = false;
1841 // Need a stack slot if there is any live range where uses cannot be
1842 // rematerialized.
1843 NeedStackSlot = true;
1845 if (CanDelete)
1846 ReMatDelete.set(VN);
1847 } else {
1848 // Need a stack slot if there is any live range where uses cannot be
1849 // rematerialized.
1850 NeedStackSlot = true;
1854 // One stack slot per live interval.
1855 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1856 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1857 Slot = vrm.assignVirt2StackSlot(li.reg);
1859 // This case only occurs when the prealloc splitter has already assigned
1860 // a stack slot to this vreg.
1861 else
1862 Slot = vrm.getStackSlot(li.reg);
1865 // Create new intervals and rewrite defs and uses.
1866 for (LiveInterval::Ranges::const_iterator
1867 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1868 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1869 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1870 bool DefIsReMat = ReMatDefMI != NULL;
1871 bool CanDelete = ReMatDelete[I->valno->id];
1872 int LdSlot = 0;
1873 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1874 bool isLoad = isLoadSS ||
1875 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1876 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1877 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1878 CanDelete, vrm, rc, ReMatIds, loopInfo,
1879 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1880 MBBVRegsMap, NewLIs);
1883 // Insert spills / restores if we are splitting.
1884 if (!TrySplit) {
1885 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1886 normalizeSpillWeights(NewLIs);
1887 return NewLIs;
1890 SmallPtrSet<LiveInterval*, 4> AddedKill;
1891 SmallVector<unsigned, 2> Ops;
1892 if (NeedStackSlot) {
1893 int Id = SpillMBBs.find_first();
1894 while (Id != -1) {
1895 std::vector<SRInfo> &spills = SpillIdxes[Id];
1896 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1897 SlotIndex index = spills[i].index;
1898 unsigned VReg = spills[i].vreg;
1899 LiveInterval &nI = getOrCreateInterval(VReg);
1900 bool isReMat = vrm.isReMaterialized(VReg);
1901 MachineInstr *MI = getInstructionFromIndex(index);
1902 bool CanFold = false;
1903 bool FoundUse = false;
1904 Ops.clear();
1905 if (spills[i].canFold) {
1906 CanFold = true;
1907 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1908 MachineOperand &MO = MI->getOperand(j);
1909 if (!MO.isReg() || MO.getReg() != VReg)
1910 continue;
1912 Ops.push_back(j);
1913 if (MO.isDef())
1914 continue;
1915 if (isReMat ||
1916 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1917 RestoreMBBs, RestoreIdxes))) {
1918 // MI has two-address uses of the same register. If the use
1919 // isn't the first and only use in the BB, then we can't fold
1920 // it. FIXME: Move this to rewriteInstructionsForSpills.
1921 CanFold = false;
1922 break;
1924 FoundUse = true;
1927 // Fold the store into the def if possible.
1928 bool Folded = false;
1929 if (CanFold && !Ops.empty()) {
1930 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1931 Folded = true;
1932 if (FoundUse) {
1933 // Also folded uses, do not issue a load.
1934 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1935 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1937 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1941 // Otherwise tell the spiller to issue a spill.
1942 if (!Folded) {
1943 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1944 bool isKill = LR->end == index.getStoreIndex();
1945 if (!MI->registerDefIsDead(nI.reg))
1946 // No need to spill a dead def.
1947 vrm.addSpillPoint(VReg, isKill, MI);
1948 if (isKill)
1949 AddedKill.insert(&nI);
1952 Id = SpillMBBs.find_next(Id);
1956 int Id = RestoreMBBs.find_first();
1957 while (Id != -1) {
1958 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1959 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1960 SlotIndex index = restores[i].index;
1961 if (index == SlotIndex())
1962 continue;
1963 unsigned VReg = restores[i].vreg;
1964 LiveInterval &nI = getOrCreateInterval(VReg);
1965 bool isReMat = vrm.isReMaterialized(VReg);
1966 MachineInstr *MI = getInstructionFromIndex(index);
1967 bool CanFold = false;
1968 Ops.clear();
1969 if (restores[i].canFold) {
1970 CanFold = true;
1971 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1972 MachineOperand &MO = MI->getOperand(j);
1973 if (!MO.isReg() || MO.getReg() != VReg)
1974 continue;
1976 if (MO.isDef()) {
1977 // If this restore were to be folded, it would have been folded
1978 // already.
1979 CanFold = false;
1980 break;
1982 Ops.push_back(j);
1986 // Fold the load into the use if possible.
1987 bool Folded = false;
1988 if (CanFold && !Ops.empty()) {
1989 if (!isReMat)
1990 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1991 else {
1992 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1993 int LdSlot = 0;
1994 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1995 // If the rematerializable def is a load, also try to fold it.
1996 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1997 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1998 Ops, isLoadSS, LdSlot, VReg);
1999 if (!Folded) {
2000 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2001 if (ImpUse) {
2002 // Re-matting an instruction with virtual register use. Add the
2003 // register as an implicit use on the use MI and mark the register
2004 // interval as unspillable.
2005 LiveInterval &ImpLi = getInterval(ImpUse);
2006 ImpLi.markNotSpillable();
2007 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2012 // If folding is not possible / failed, then tell the spiller to issue a
2013 // load / rematerialization for us.
2014 if (Folded)
2015 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2016 else
2017 vrm.addRestorePoint(VReg, MI);
2019 Id = RestoreMBBs.find_next(Id);
2022 // Finalize intervals: add kills, finalize spill weights, and filter out
2023 // dead intervals.
2024 std::vector<LiveInterval*> RetNewLIs;
2025 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2026 LiveInterval *LI = NewLIs[i];
2027 if (!LI->empty()) {
2028 if (!AddedKill.count(LI)) {
2029 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2030 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2031 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2032 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2033 assert(UseIdx != -1);
2034 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2035 LastUse->getOperand(UseIdx).setIsKill();
2036 vrm.addKillPoint(LI->reg, LastUseIdx);
2039 RetNewLIs.push_back(LI);
2043 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2044 normalizeSpillWeights(RetNewLIs);
2045 return RetNewLIs;
2048 /// hasAllocatableSuperReg - Return true if the specified physical register has
2049 /// any super register that's allocatable.
2050 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2051 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2052 if (allocatableRegs_[*AS] && hasInterval(*AS))
2053 return true;
2054 return false;
2057 /// getRepresentativeReg - Find the largest super register of the specified
2058 /// physical register.
2059 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2060 // Find the largest super-register that is allocatable.
2061 unsigned BestReg = Reg;
2062 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2063 unsigned SuperReg = *AS;
2064 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2065 BestReg = SuperReg;
2066 break;
2069 return BestReg;
2072 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2073 /// specified interval that conflicts with the specified physical register.
2074 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2075 unsigned PhysReg) const {
2076 unsigned NumConflicts = 0;
2077 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2078 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2079 E = mri_->reg_end(); I != E; ++I) {
2080 MachineOperand &O = I.getOperand();
2081 MachineInstr *MI = O.getParent();
2082 if (MI->isDebugValue())
2083 continue;
2084 SlotIndex Index = getInstructionIndex(MI);
2085 if (pli.liveAt(Index))
2086 ++NumConflicts;
2088 return NumConflicts;
2091 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2092 /// around all defs and uses of the specified interval. Return true if it
2093 /// was able to cut its interval.
2094 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2095 unsigned PhysReg, VirtRegMap &vrm) {
2096 unsigned SpillReg = getRepresentativeReg(PhysReg);
2098 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2099 << " represented by " << tri_->getName(SpillReg) << '\n');
2101 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2102 // If there are registers which alias PhysReg, but which are not a
2103 // sub-register of the chosen representative super register. Assert
2104 // since we can't handle it yet.
2105 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2106 tri_->isSuperRegister(*AS, SpillReg));
2108 bool Cut = false;
2109 SmallVector<unsigned, 4> PRegs;
2110 if (hasInterval(SpillReg))
2111 PRegs.push_back(SpillReg);
2112 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2113 if (hasInterval(*SR))
2114 PRegs.push_back(*SR);
2116 DEBUG({
2117 dbgs() << "Trying to spill:";
2118 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2119 dbgs() << ' ' << tri_->getName(PRegs[i]);
2120 dbgs() << '\n';
2123 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2124 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2125 E = mri_->reg_end(); I != E; ++I) {
2126 MachineOperand &O = I.getOperand();
2127 MachineInstr *MI = O.getParent();
2128 if (MI->isDebugValue() || SeenMIs.count(MI))
2129 continue;
2130 SeenMIs.insert(MI);
2131 SlotIndex Index = getInstructionIndex(MI);
2132 bool LiveReg = false;
2133 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2134 unsigned PReg = PRegs[i];
2135 LiveInterval &pli = getInterval(PReg);
2136 if (!pli.liveAt(Index))
2137 continue;
2138 LiveReg = true;
2139 SlotIndex StartIdx = Index.getLoadIndex();
2140 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2141 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2142 std::string msg;
2143 raw_string_ostream Msg(msg);
2144 Msg << "Ran out of registers during register allocation!";
2145 if (MI->isInlineAsm()) {
2146 Msg << "\nPlease check your inline asm statement for invalid "
2147 << "constraints:\n";
2148 MI->print(Msg, tm_);
2150 report_fatal_error(Msg.str());
2152 pli.removeRange(StartIdx, EndIdx);
2153 LiveReg = true;
2155 if (!LiveReg)
2156 continue;
2157 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2158 vrm.addEmergencySpill(SpillReg, MI);
2159 Cut = true;
2161 return Cut;
2164 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2165 MachineInstr* startInst) {
2166 LiveInterval& Interval = getOrCreateInterval(reg);
2167 VNInfo* VN = Interval.getNextValue(
2168 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2169 startInst, getVNInfoAllocator());
2170 VN->setHasPHIKill(true);
2171 LiveRange LR(
2172 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2173 getMBBEndIdx(startInst->getParent()), VN);
2174 Interval.addRange(LR);
2176 return LR;