zpu: managed to compile program that writes constant to global variable
[llvm/zpu.git] / lib / CodeGen / VirtRegRewriter.cpp
blob5baedb98db18f026d58ca5c5ce431e4f5cd9867e
1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "VirtRegMap.h"
13 #include "llvm/Function.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/Statistic.h"
26 #include <algorithm>
27 using namespace llvm;
29 STATISTIC(NumDSE , "Number of dead stores elided");
30 STATISTIC(NumDSS , "Number of dead spill slots removed");
31 STATISTIC(NumCommutes, "Number of instructions commuted");
32 STATISTIC(NumDRM , "Number of re-materializable defs elided");
33 STATISTIC(NumStores , "Number of stores added");
34 STATISTIC(NumPSpills , "Number of physical register spills");
35 STATISTIC(NumOmitted , "Number of reloads omited");
36 STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
37 STATISTIC(NumCopified, "Number of available reloads turned into copies");
38 STATISTIC(NumReMats , "Number of re-materialization");
39 STATISTIC(NumLoads , "Number of loads added");
40 STATISTIC(NumReused , "Number of values reused");
41 STATISTIC(NumDCE , "Number of copies elided");
42 STATISTIC(NumSUnfold , "Number of stores unfolded");
43 STATISTIC(NumModRefUnfold, "Number of modref unfolded");
45 namespace {
46 enum RewriterName { local, trivial };
49 static cl::opt<RewriterName>
50 RewriterOpt("rewriter",
51 cl::desc("Rewriter to use (default=local)"),
52 cl::Prefix,
53 cl::values(clEnumVal(local, "local rewriter"),
54 clEnumVal(trivial, "trivial rewriter"),
55 clEnumValEnd),
56 cl::init(local));
58 static cl::opt<bool>
59 ScheduleSpills("schedule-spills",
60 cl::desc("Schedule spill code"),
61 cl::init(false));
63 VirtRegRewriter::~VirtRegRewriter() {}
65 /// substitutePhysReg - Replace virtual register in MachineOperand with a
66 /// physical register. Do the right thing with the sub-register index.
67 /// Note that operands may be added, so the MO reference is no longer valid.
68 static void substitutePhysReg(MachineOperand &MO, unsigned Reg,
69 const TargetRegisterInfo &TRI) {
70 if (MO.getSubReg()) {
71 MO.substPhysReg(Reg, TRI);
73 // Any kill flags apply to the full virtual register, so they also apply to
74 // the full physical register.
75 // We assume that partial defs have already been decorated with a super-reg
76 // <imp-def> operand by LiveIntervals.
77 MachineInstr &MI = *MO.getParent();
78 if (MO.isUse() && !MO.isUndef() &&
79 (MO.isKill() || MI.isRegTiedToDefOperand(&MO-&MI.getOperand(0))))
80 MI.addRegisterKilled(Reg, &TRI, /*AddIfNotFound=*/ true);
81 } else {
82 MO.setReg(Reg);
86 namespace {
88 /// This class is intended for use with the new spilling framework only. It
89 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
90 /// spill code.
91 struct TrivialRewriter : public VirtRegRewriter {
93 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
94 LiveIntervals* LIs) {
95 DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
96 DEBUG(dbgs() << "********** Function: "
97 << MF.getFunction()->getName() << '\n');
98 DEBUG(dbgs() << "**** Machine Instrs"
99 << "(NOTE! Does not include spills and reloads!) ****\n");
100 DEBUG(MF.dump());
102 MachineRegisterInfo *mri = &MF.getRegInfo();
103 const TargetRegisterInfo *tri = MF.getTarget().getRegisterInfo();
105 bool changed = false;
107 for (LiveIntervals::iterator liItr = LIs->begin(), liEnd = LIs->end();
108 liItr != liEnd; ++liItr) {
110 const LiveInterval *li = liItr->second;
111 unsigned reg = li->reg;
113 if (TargetRegisterInfo::isPhysicalRegister(reg)) {
114 if (!li->empty())
115 mri->setPhysRegUsed(reg);
117 else {
118 if (!VRM.hasPhys(reg))
119 continue;
120 unsigned pReg = VRM.getPhys(reg);
121 mri->setPhysRegUsed(pReg);
122 // Copy the register use-list before traversing it.
123 SmallVector<std::pair<MachineInstr*, unsigned>, 32> reglist;
124 for (MachineRegisterInfo::reg_iterator I = mri->reg_begin(reg),
125 E = mri->reg_end(); I != E; ++I)
126 reglist.push_back(std::make_pair(&*I, I.getOperandNo()));
127 for (unsigned N=0; N != reglist.size(); ++N)
128 substitutePhysReg(reglist[N].first->getOperand(reglist[N].second),
129 pReg, *tri);
130 changed |= !reglist.empty();
134 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
135 DEBUG(MF.dump());
137 return changed;
144 // ************************************************************************ //
146 namespace {
148 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
149 /// from top down, keep track of which spill slots or remat are available in
150 /// each register.
152 /// Note that not all physregs are created equal here. In particular, some
153 /// physregs are reloads that we are allowed to clobber or ignore at any time.
154 /// Other physregs are values that the register allocated program is using
155 /// that we cannot CHANGE, but we can read if we like. We keep track of this
156 /// on a per-stack-slot / remat id basis as the low bit in the value of the
157 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
158 /// this bit and addAvailable sets it if.
159 class AvailableSpills {
160 const TargetRegisterInfo *TRI;
161 const TargetInstrInfo *TII;
163 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
164 // or remat'ed virtual register values that are still available, due to
165 // being loaded or stored to, but not invalidated yet.
166 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
168 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
169 // indicating which stack slot values are currently held by a physreg. This
170 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
171 // physreg is modified.
172 std::multimap<unsigned, int> PhysRegsAvailable;
174 void disallowClobberPhysRegOnly(unsigned PhysReg);
176 void ClobberPhysRegOnly(unsigned PhysReg);
177 public:
178 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
179 : TRI(tri), TII(tii) {
182 /// clear - Reset the state.
183 void clear() {
184 SpillSlotsOrReMatsAvailable.clear();
185 PhysRegsAvailable.clear();
188 const TargetRegisterInfo *getRegInfo() const { return TRI; }
190 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
191 /// available in a physical register, return that PhysReg, otherwise
192 /// return 0.
193 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
194 std::map<int, unsigned>::const_iterator I =
195 SpillSlotsOrReMatsAvailable.find(Slot);
196 if (I != SpillSlotsOrReMatsAvailable.end()) {
197 return I->second >> 1; // Remove the CanClobber bit.
199 return 0;
202 /// addAvailable - Mark that the specified stack slot / remat is available
203 /// in the specified physreg. If CanClobber is true, the physreg can be
204 /// modified at any time without changing the semantics of the program.
205 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
206 // If this stack slot is thought to be available in some other physreg,
207 // remove its record.
208 ModifyStackSlotOrReMat(SlotOrReMat);
210 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
211 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
212 (unsigned)CanClobber;
214 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
215 DEBUG(dbgs() << "Remembering RM#"
216 << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
217 else
218 DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat);
219 DEBUG(dbgs() << " in physreg " << TRI->getName(Reg) << "\n");
222 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
223 /// the value of the specified stackslot register if it desires. The
224 /// specified stack slot must be available in a physreg for this query to
225 /// make sense.
226 bool canClobberPhysRegForSS(int SlotOrReMat) const {
227 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
228 "Value not available!");
229 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
232 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
233 /// physical register where values for some stack slot(s) might be
234 /// available.
235 bool canClobberPhysReg(unsigned PhysReg) const {
236 std::multimap<unsigned, int>::const_iterator I =
237 PhysRegsAvailable.lower_bound(PhysReg);
238 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
239 int SlotOrReMat = I->second;
240 I++;
241 if (!canClobberPhysRegForSS(SlotOrReMat))
242 return false;
244 return true;
247 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
248 /// stackslot register. The register is still available but is no longer
249 /// allowed to be modifed.
250 void disallowClobberPhysReg(unsigned PhysReg);
252 /// ClobberPhysReg - This is called when the specified physreg changes
253 /// value. We use this to invalidate any info about stuff that lives in
254 /// it and any of its aliases.
255 void ClobberPhysReg(unsigned PhysReg);
257 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
258 /// slot changes. This removes information about which register the
259 /// previous value for this slot lives in (as the previous value is dead
260 /// now).
261 void ModifyStackSlotOrReMat(int SlotOrReMat);
263 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
264 /// into the specified MBB. Add available physical registers as potential
265 /// live-in's. If they are reused in the MBB, they will be added to the
266 /// live-in set to make register scavenger and post-allocation scheduler.
267 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
268 std::vector<MachineOperand*> &KillOps);
273 // ************************************************************************ //
275 // Given a location where a reload of a spilled register or a remat of
276 // a constant is to be inserted, attempt to find a safe location to
277 // insert the load at an earlier point in the basic-block, to hide
278 // latency of the load and to avoid address-generation interlock
279 // issues.
280 static MachineBasicBlock::iterator
281 ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
282 MachineBasicBlock::iterator const Begin,
283 unsigned PhysReg,
284 const TargetRegisterInfo *TRI,
285 bool DoReMat,
286 int SSorRMId,
287 const TargetInstrInfo *TII,
288 const MachineFunction &MF)
290 if (!ScheduleSpills)
291 return InsertLoc;
293 // Spill backscheduling is of primary interest to addresses, so
294 // don't do anything if the register isn't in the register class
295 // used for pointers.
297 const TargetLowering *TL = MF.getTarget().getTargetLowering();
299 if (!TL->isTypeLegal(TL->getPointerTy()))
300 // Believe it or not, this is true on 16-bit targets like PIC16.
301 return InsertLoc;
303 const TargetRegisterClass *ptrRegClass =
304 TL->getRegClassFor(TL->getPointerTy());
305 if (!ptrRegClass->contains(PhysReg))
306 return InsertLoc;
308 // Scan upwards through the preceding instructions. If an instruction doesn't
309 // reference the stack slot or the register we're loading, we can
310 // backschedule the reload up past it.
311 MachineBasicBlock::iterator NewInsertLoc = InsertLoc;
312 while (NewInsertLoc != Begin) {
313 MachineBasicBlock::iterator Prev = prior(NewInsertLoc);
314 for (unsigned i = 0; i < Prev->getNumOperands(); ++i) {
315 MachineOperand &Op = Prev->getOperand(i);
316 if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId)
317 goto stop;
319 if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 ||
320 Prev->findRegisterDefOperand(PhysReg))
321 goto stop;
322 for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias)
323 if (Prev->findRegisterUseOperandIdx(*Alias) != -1 ||
324 Prev->findRegisterDefOperand(*Alias))
325 goto stop;
326 NewInsertLoc = Prev;
328 stop:;
330 // If we made it to the beginning of the block, turn around and move back
331 // down just past any existing reloads. They're likely to be reloads/remats
332 // for instructions earlier than what our current reload/remat is for, so
333 // they should be scheduled earlier.
334 if (NewInsertLoc == Begin) {
335 int FrameIdx;
336 while (InsertLoc != NewInsertLoc &&
337 (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) ||
338 TII->isTriviallyReMaterializable(NewInsertLoc)))
339 ++NewInsertLoc;
342 return NewInsertLoc;
345 namespace {
347 // ReusedOp - For each reused operand, we keep track of a bit of information,
348 // in case we need to rollback upon processing a new operand. See comments
349 // below.
350 struct ReusedOp {
351 // The MachineInstr operand that reused an available value.
352 unsigned Operand;
354 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
355 unsigned StackSlotOrReMat;
357 // PhysRegReused - The physical register the value was available in.
358 unsigned PhysRegReused;
360 // AssignedPhysReg - The physreg that was assigned for use by the reload.
361 unsigned AssignedPhysReg;
363 // VirtReg - The virtual register itself.
364 unsigned VirtReg;
366 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
367 unsigned vreg)
368 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
369 AssignedPhysReg(apr), VirtReg(vreg) {}
372 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
373 /// is reused instead of reloaded.
374 class ReuseInfo {
375 MachineInstr &MI;
376 std::vector<ReusedOp> Reuses;
377 BitVector PhysRegsClobbered;
378 public:
379 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
380 PhysRegsClobbered.resize(tri->getNumRegs());
383 bool hasReuses() const {
384 return !Reuses.empty();
387 /// addReuse - If we choose to reuse a virtual register that is already
388 /// available instead of reloading it, remember that we did so.
389 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
390 unsigned PhysRegReused, unsigned AssignedPhysReg,
391 unsigned VirtReg) {
392 // If the reload is to the assigned register anyway, no undo will be
393 // required.
394 if (PhysRegReused == AssignedPhysReg) return;
396 // Otherwise, remember this.
397 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
398 AssignedPhysReg, VirtReg));
401 void markClobbered(unsigned PhysReg) {
402 PhysRegsClobbered.set(PhysReg);
405 bool isClobbered(unsigned PhysReg) const {
406 return PhysRegsClobbered.test(PhysReg);
409 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
410 /// is some other operand that is using the specified register, either pick
411 /// a new register to use, or evict the previous reload and use this reg.
412 unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
413 MachineFunction &MF, MachineInstr *MI,
414 AvailableSpills &Spills,
415 std::vector<MachineInstr*> &MaybeDeadStores,
416 SmallSet<unsigned, 8> &Rejected,
417 BitVector &RegKills,
418 std::vector<MachineOperand*> &KillOps,
419 VirtRegMap &VRM);
421 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
422 /// 'Rejected' set to remember which registers have been considered and
423 /// rejected for the reload. This avoids infinite looping in case like
424 /// this:
425 /// t1 := op t2, t3
426 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
427 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
428 /// t1 <- desires r1
429 /// sees r1 is taken by t2, tries t2's reload register r0
430 /// sees r0 is taken by t3, tries t3's reload register r1
431 /// sees r1 is taken by t2, tries t2's reload register r0 ...
432 unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI,
433 AvailableSpills &Spills,
434 std::vector<MachineInstr*> &MaybeDeadStores,
435 BitVector &RegKills,
436 std::vector<MachineOperand*> &KillOps,
437 VirtRegMap &VRM) {
438 SmallSet<unsigned, 8> Rejected;
439 MachineFunction &MF = *MI->getParent()->getParent();
440 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
441 return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores,
442 Rejected, RegKills, KillOps, VRM);
448 // ****************** //
449 // Utility Functions //
450 // ****************** //
452 /// findSinglePredSuccessor - Return via reference a vector of machine basic
453 /// blocks each of which is a successor of the specified BB and has no other
454 /// predecessor.
455 static void findSinglePredSuccessor(MachineBasicBlock *MBB,
456 SmallVectorImpl<MachineBasicBlock *> &Succs){
457 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
458 SE = MBB->succ_end(); SI != SE; ++SI) {
459 MachineBasicBlock *SuccMBB = *SI;
460 if (SuccMBB->pred_size() == 1)
461 Succs.push_back(SuccMBB);
465 /// InvalidateKill - Invalidate register kill information for a specific
466 /// register. This also unsets the kills marker on the last kill operand.
467 static void InvalidateKill(unsigned Reg,
468 const TargetRegisterInfo* TRI,
469 BitVector &RegKills,
470 std::vector<MachineOperand*> &KillOps) {
471 if (RegKills[Reg]) {
472 KillOps[Reg]->setIsKill(false);
473 // KillOps[Reg] might be a def of a super-register.
474 unsigned KReg = KillOps[Reg]->getReg();
475 KillOps[KReg] = NULL;
476 RegKills.reset(KReg);
477 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
478 if (RegKills[*SR]) {
479 KillOps[*SR]->setIsKill(false);
480 KillOps[*SR] = NULL;
481 RegKills.reset(*SR);
487 /// InvalidateKills - MI is going to be deleted. If any of its operands are
488 /// marked kill, then invalidate the information.
489 static void InvalidateKills(MachineInstr &MI,
490 const TargetRegisterInfo* TRI,
491 BitVector &RegKills,
492 std::vector<MachineOperand*> &KillOps,
493 SmallVector<unsigned, 2> *KillRegs = NULL) {
494 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
495 MachineOperand &MO = MI.getOperand(i);
496 if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
497 continue;
498 unsigned Reg = MO.getReg();
499 if (TargetRegisterInfo::isVirtualRegister(Reg))
500 continue;
501 if (KillRegs)
502 KillRegs->push_back(Reg);
503 assert(Reg < KillOps.size());
504 if (KillOps[Reg] == &MO) {
505 KillOps[Reg] = NULL;
506 RegKills.reset(Reg);
507 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
508 if (RegKills[*SR]) {
509 KillOps[*SR] = NULL;
510 RegKills.reset(*SR);
517 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
518 /// (since its spill instruction is removed), mark it isDead. Also checks if
519 /// the def MI has other definition operands that are not dead. Returns it by
520 /// reference.
521 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
522 MachineInstr &NewDef, unsigned Reg,
523 bool &HasLiveDef,
524 const TargetRegisterInfo *TRI) {
525 // Due to remat, it's possible this reg isn't being reused. That is,
526 // the def of this reg (by prev MI) is now dead.
527 MachineInstr *DefMI = I;
528 MachineOperand *DefOp = NULL;
529 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
530 MachineOperand &MO = DefMI->getOperand(i);
531 if (!MO.isReg() || !MO.isDef() || !MO.isKill() || MO.isUndef())
532 continue;
533 if (MO.getReg() == Reg)
534 DefOp = &MO;
535 else if (!MO.isDead())
536 HasLiveDef = true;
538 if (!DefOp)
539 return false;
541 bool FoundUse = false, Done = false;
542 MachineBasicBlock::iterator E = &NewDef;
543 ++I; ++E;
544 for (; !Done && I != E; ++I) {
545 MachineInstr *NMI = I;
546 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
547 MachineOperand &MO = NMI->getOperand(j);
548 if (!MO.isReg() || MO.getReg() == 0 ||
549 (MO.getReg() != Reg && !TRI->isSubRegister(Reg, MO.getReg())))
550 continue;
551 if (MO.isUse())
552 FoundUse = true;
553 Done = true; // Stop after scanning all the operands of this MI.
556 if (!FoundUse) {
557 // Def is dead!
558 DefOp->setIsDead();
559 return true;
561 return false;
564 /// UpdateKills - Track and update kill info. If a MI reads a register that is
565 /// marked kill, then it must be due to register reuse. Transfer the kill info
566 /// over.
567 static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
568 BitVector &RegKills,
569 std::vector<MachineOperand*> &KillOps) {
570 // These do not affect kill info at all.
571 if (MI.isDebugValue())
572 return;
573 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
574 MachineOperand &MO = MI.getOperand(i);
575 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
576 continue;
577 unsigned Reg = MO.getReg();
578 if (Reg == 0)
579 continue;
581 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
582 // That can't be right. Register is killed but not re-defined and it's
583 // being reused. Let's fix that.
584 KillOps[Reg]->setIsKill(false);
585 // KillOps[Reg] might be a def of a super-register.
586 unsigned KReg = KillOps[Reg]->getReg();
587 KillOps[KReg] = NULL;
588 RegKills.reset(KReg);
590 // Must be a def of a super-register. Its other sub-regsters are no
591 // longer killed as well.
592 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
593 KillOps[*SR] = NULL;
594 RegKills.reset(*SR);
596 } else {
597 // Check for subreg kills as well.
598 // d4 =
599 // store d4, fi#0
600 // ...
601 // = s8<kill>
602 // ...
603 // = d4 <avoiding reload>
604 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
605 unsigned SReg = *SR;
606 if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI) {
607 KillOps[SReg]->setIsKill(false);
608 unsigned KReg = KillOps[SReg]->getReg();
609 KillOps[KReg] = NULL;
610 RegKills.reset(KReg);
612 for (const unsigned *SSR = TRI->getSubRegisters(KReg); *SSR; ++SSR) {
613 KillOps[*SSR] = NULL;
614 RegKills.reset(*SSR);
620 if (MO.isKill()) {
621 RegKills.set(Reg);
622 KillOps[Reg] = &MO;
623 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
624 RegKills.set(*SR);
625 KillOps[*SR] = &MO;
630 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
631 const MachineOperand &MO = MI.getOperand(i);
632 if (!MO.isReg() || !MO.getReg() || !MO.isDef())
633 continue;
634 unsigned Reg = MO.getReg();
635 RegKills.reset(Reg);
636 KillOps[Reg] = NULL;
637 // It also defines (or partially define) aliases.
638 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
639 RegKills.reset(*SR);
640 KillOps[*SR] = NULL;
642 for (const unsigned *SR = TRI->getSuperRegisters(Reg); *SR; ++SR) {
643 RegKills.reset(*SR);
644 KillOps[*SR] = NULL;
649 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
651 static void ReMaterialize(MachineBasicBlock &MBB,
652 MachineBasicBlock::iterator &MII,
653 unsigned DestReg, unsigned Reg,
654 const TargetInstrInfo *TII,
655 const TargetRegisterInfo *TRI,
656 VirtRegMap &VRM) {
657 MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
658 #ifndef NDEBUG
659 const TargetInstrDesc &TID = ReMatDefMI->getDesc();
660 assert(TID.getNumDefs() == 1 &&
661 "Don't know how to remat instructions that define > 1 values!");
662 #endif
663 TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
664 MachineInstr *NewMI = prior(MII);
665 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
666 MachineOperand &MO = NewMI->getOperand(i);
667 if (!MO.isReg() || MO.getReg() == 0)
668 continue;
669 unsigned VirtReg = MO.getReg();
670 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
671 continue;
672 assert(MO.isUse());
673 unsigned Phys = VRM.getPhys(VirtReg);
674 assert(Phys && "Virtual register is not assigned a register?");
675 substitutePhysReg(MO, Phys, *TRI);
677 ++NumReMats;
680 /// findSuperReg - Find the SubReg's super-register of given register class
681 /// where its SubIdx sub-register is SubReg.
682 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
683 unsigned SubIdx, const TargetRegisterInfo *TRI) {
684 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
685 I != E; ++I) {
686 unsigned Reg = *I;
687 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
688 return Reg;
690 return 0;
693 // ******************************** //
694 // Available Spills Implementation //
695 // ******************************** //
697 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
698 /// stackslot register. The register is still available but is no longer
699 /// allowed to be modifed.
700 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
701 std::multimap<unsigned, int>::iterator I =
702 PhysRegsAvailable.lower_bound(PhysReg);
703 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
704 int SlotOrReMat = I->second;
705 I++;
706 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
707 "Bidirectional map mismatch!");
708 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
709 DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
710 << " copied, it is available for use but can no longer be modified\n");
714 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
715 /// stackslot register and its aliases. The register and its aliases may
716 /// still available but is no longer allowed to be modifed.
717 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
718 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
719 disallowClobberPhysRegOnly(*AS);
720 disallowClobberPhysRegOnly(PhysReg);
723 /// ClobberPhysRegOnly - This is called when the specified physreg changes
724 /// value. We use this to invalidate any info about stuff we thing lives in it.
725 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
726 std::multimap<unsigned, int>::iterator I =
727 PhysRegsAvailable.lower_bound(PhysReg);
728 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
729 int SlotOrReMat = I->second;
730 PhysRegsAvailable.erase(I++);
731 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
732 "Bidirectional map mismatch!");
733 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
734 DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
735 << " clobbered, invalidating ");
736 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
737 DEBUG(dbgs() << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 <<"\n");
738 else
739 DEBUG(dbgs() << "SS#" << SlotOrReMat << "\n");
743 /// ClobberPhysReg - This is called when the specified physreg changes
744 /// value. We use this to invalidate any info about stuff we thing lives in
745 /// it and any of its aliases.
746 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
747 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
748 ClobberPhysRegOnly(*AS);
749 ClobberPhysRegOnly(PhysReg);
752 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
753 /// into the specified MBB. Add available physical registers as potential
754 /// live-in's. If they are reused in the MBB, they will be added to the
755 /// live-in set to make register scavenger and post-allocation scheduler.
756 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
757 BitVector &RegKills,
758 std::vector<MachineOperand*> &KillOps) {
759 std::set<unsigned> NotAvailable;
760 for (std::multimap<unsigned, int>::iterator
761 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
762 I != E; ++I) {
763 unsigned Reg = I->first;
764 const TargetRegisterClass* RC = TRI->getMinimalPhysRegClass(Reg);
765 // FIXME: A temporary workaround. We can't reuse available value if it's
766 // not safe to move the def of the virtual register's class. e.g.
767 // X86::RFP* register classes. Do not add it as a live-in.
768 if (!TII->isSafeToMoveRegClassDefs(RC))
769 // This is no longer available.
770 NotAvailable.insert(Reg);
771 else {
772 MBB.addLiveIn(Reg);
773 InvalidateKill(Reg, TRI, RegKills, KillOps);
776 // Skip over the same register.
777 std::multimap<unsigned, int>::iterator NI = llvm::next(I);
778 while (NI != E && NI->first == Reg) {
779 ++I;
780 ++NI;
784 for (std::set<unsigned>::iterator I = NotAvailable.begin(),
785 E = NotAvailable.end(); I != E; ++I) {
786 ClobberPhysReg(*I);
787 for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
788 *SubRegs; ++SubRegs)
789 ClobberPhysReg(*SubRegs);
793 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
794 /// slot changes. This removes information about which register the previous
795 /// value for this slot lives in (as the previous value is dead now).
796 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
797 std::map<int, unsigned>::iterator It =
798 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
799 if (It == SpillSlotsOrReMatsAvailable.end()) return;
800 unsigned Reg = It->second >> 1;
801 SpillSlotsOrReMatsAvailable.erase(It);
803 // This register may hold the value of multiple stack slots, only remove this
804 // stack slot from the set of values the register contains.
805 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
806 for (; ; ++I) {
807 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
808 "Map inverse broken!");
809 if (I->second == SlotOrReMat) break;
811 PhysRegsAvailable.erase(I);
814 // ************************** //
815 // Reuse Info Implementation //
816 // ************************** //
818 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
819 /// is some other operand that is using the specified register, either pick
820 /// a new register to use, or evict the previous reload and use this reg.
821 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
822 unsigned PhysReg,
823 MachineFunction &MF,
824 MachineInstr *MI, AvailableSpills &Spills,
825 std::vector<MachineInstr*> &MaybeDeadStores,
826 SmallSet<unsigned, 8> &Rejected,
827 BitVector &RegKills,
828 std::vector<MachineOperand*> &KillOps,
829 VirtRegMap &VRM) {
830 const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
831 const TargetRegisterInfo *TRI = Spills.getRegInfo();
833 if (Reuses.empty()) return PhysReg; // This is most often empty.
835 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
836 ReusedOp &Op = Reuses[ro];
837 // If we find some other reuse that was supposed to use this register
838 // exactly for its reload, we can change this reload to use ITS reload
839 // register. That is, unless its reload register has already been
840 // considered and subsequently rejected because it has also been reused
841 // by another operand.
842 if (Op.PhysRegReused == PhysReg &&
843 Rejected.count(Op.AssignedPhysReg) == 0 &&
844 RC->contains(Op.AssignedPhysReg)) {
845 // Yup, use the reload register that we didn't use before.
846 unsigned NewReg = Op.AssignedPhysReg;
847 Rejected.insert(PhysReg);
848 return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores,
849 Rejected, RegKills, KillOps, VRM);
850 } else {
851 // Otherwise, we might also have a problem if a previously reused
852 // value aliases the new register. If so, codegen the previous reload
853 // and use this one.
854 unsigned PRRU = Op.PhysRegReused;
855 if (TRI->regsOverlap(PRRU, PhysReg)) {
856 // Okay, we found out that an alias of a reused register
857 // was used. This isn't good because it means we have
858 // to undo a previous reuse.
859 MachineBasicBlock *MBB = MI->getParent();
860 const TargetRegisterClass *AliasRC =
861 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
863 // Copy Op out of the vector and remove it, we're going to insert an
864 // explicit load for it.
865 ReusedOp NewOp = Op;
866 Reuses.erase(Reuses.begin()+ro);
868 // MI may be using only a sub-register of PhysRegUsed.
869 unsigned RealPhysRegUsed = MI->getOperand(NewOp.Operand).getReg();
870 unsigned SubIdx = 0;
871 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed) &&
872 "A reuse cannot be a virtual register");
873 if (PRRU != RealPhysRegUsed) {
874 // What was the sub-register index?
875 SubIdx = TRI->getSubRegIndex(PRRU, RealPhysRegUsed);
876 assert(SubIdx &&
877 "Operand physreg is not a sub-register of PhysRegUsed");
880 // Ok, we're going to try to reload the assigned physreg into the
881 // slot that we were supposed to in the first place. However, that
882 // register could hold a reuse. Check to see if it conflicts or
883 // would prefer us to use a different register.
884 unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg,
885 MF, MI, Spills, MaybeDeadStores,
886 Rejected, RegKills, KillOps, VRM);
888 bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
889 int SSorRMId = DoReMat
890 ? VRM.getReMatId(NewOp.VirtReg) : (int) NewOp.StackSlotOrReMat;
892 // Back-schedule reloads and remats.
893 MachineBasicBlock::iterator InsertLoc =
894 ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI,
895 DoReMat, SSorRMId, TII, MF);
897 if (DoReMat) {
898 ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
899 TRI, VRM);
900 } else {
901 TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
902 NewOp.StackSlotOrReMat, AliasRC, TRI);
903 MachineInstr *LoadMI = prior(InsertLoc);
904 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
905 // Any stores to this stack slot are not dead anymore.
906 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
907 ++NumLoads;
909 Spills.ClobberPhysReg(NewPhysReg);
910 Spills.ClobberPhysReg(NewOp.PhysRegReused);
912 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) :NewPhysReg;
913 MI->getOperand(NewOp.Operand).setReg(RReg);
914 MI->getOperand(NewOp.Operand).setSubReg(0);
916 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
917 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
918 DEBUG(dbgs() << '\t' << *prior(InsertLoc));
920 DEBUG(dbgs() << "Reuse undone!\n");
921 --NumReused;
923 // Finally, PhysReg is now available, go ahead and use it.
924 return PhysReg;
928 return PhysReg;
931 // ************************************************************************ //
933 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
934 /// stack slot mod/ref. It also checks if it's possible to unfold the
935 /// instruction by having it define a specified physical register instead.
936 static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
937 const TargetInstrInfo *TII,
938 const TargetRegisterInfo *TRI,
939 VirtRegMap &VRM) {
940 if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
941 return false;
943 bool Found = false;
944 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
945 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
946 unsigned VirtReg = I->second.first;
947 VirtRegMap::ModRef MR = I->second.second;
948 if (MR & VirtRegMap::isModRef)
949 if (VRM.getStackSlot(VirtReg) == SS) {
950 Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
951 break;
954 if (!Found)
955 return false;
957 // Does the instruction uses a register that overlaps the scratch register?
958 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
959 MachineOperand &MO = MI.getOperand(i);
960 if (!MO.isReg() || MO.getReg() == 0)
961 continue;
962 unsigned Reg = MO.getReg();
963 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
964 if (!VRM.hasPhys(Reg))
965 continue;
966 Reg = VRM.getPhys(Reg);
968 if (TRI->regsOverlap(PhysReg, Reg))
969 return false;
971 return true;
974 /// FindFreeRegister - Find a free register of a given register class by looking
975 /// at (at most) the last two machine instructions.
976 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
977 MachineBasicBlock &MBB,
978 const TargetRegisterClass *RC,
979 const TargetRegisterInfo *TRI,
980 BitVector &AllocatableRegs) {
981 BitVector Defs(TRI->getNumRegs());
982 BitVector Uses(TRI->getNumRegs());
983 SmallVector<unsigned, 4> LocalUses;
984 SmallVector<unsigned, 4> Kills;
986 // Take a look at 2 instructions at most.
987 unsigned Count = 0;
988 while (Count < 2) {
989 if (MII == MBB.begin())
990 break;
991 MachineInstr *PrevMI = prior(MII);
992 MII = PrevMI;
994 if (PrevMI->isDebugValue())
995 continue; // Skip over dbg_value instructions.
996 ++Count;
998 for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
999 MachineOperand &MO = PrevMI->getOperand(i);
1000 if (!MO.isReg() || MO.getReg() == 0)
1001 continue;
1002 unsigned Reg = MO.getReg();
1003 if (MO.isDef()) {
1004 Defs.set(Reg);
1005 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
1006 Defs.set(*AS);
1007 } else {
1008 LocalUses.push_back(Reg);
1009 if (MO.isKill() && AllocatableRegs[Reg])
1010 Kills.push_back(Reg);
1014 for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
1015 unsigned Kill = Kills[i];
1016 if (!Defs[Kill] && !Uses[Kill] &&
1017 RC->contains(Kill))
1018 return Kill;
1020 for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
1021 unsigned Reg = LocalUses[i];
1022 Uses.set(Reg);
1023 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
1024 Uses.set(*AS);
1028 return 0;
1031 static
1032 void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
1033 const TargetRegisterInfo &TRI) {
1034 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1035 MachineOperand &MO = MI->getOperand(i);
1036 if (MO.isReg() && MO.getReg() == VirtReg)
1037 substitutePhysReg(MO, PhysReg, TRI);
1041 namespace {
1043 struct RefSorter {
1044 bool operator()(const std::pair<MachineInstr*, int> &A,
1045 const std::pair<MachineInstr*, int> &B) {
1046 return A.second < B.second;
1050 // ***************************** //
1051 // Local Spiller Implementation //
1052 // ***************************** //
1054 class LocalRewriter : public VirtRegRewriter {
1055 MachineRegisterInfo *MRI;
1056 const TargetRegisterInfo *TRI;
1057 const TargetInstrInfo *TII;
1058 VirtRegMap *VRM;
1059 BitVector AllocatableRegs;
1060 DenseMap<MachineInstr*, unsigned> DistanceMap;
1061 DenseMap<int, SmallVector<MachineInstr*,4> > Slot2DbgValues;
1063 MachineBasicBlock *MBB; // Basic block currently being processed.
1065 public:
1067 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
1068 LiveIntervals* LIs);
1070 private:
1072 bool OptimizeByUnfold2(unsigned VirtReg, int SS,
1073 MachineBasicBlock::iterator &MII,
1074 std::vector<MachineInstr*> &MaybeDeadStores,
1075 AvailableSpills &Spills,
1076 BitVector &RegKills,
1077 std::vector<MachineOperand*> &KillOps);
1079 bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
1080 std::vector<MachineInstr*> &MaybeDeadStores,
1081 AvailableSpills &Spills,
1082 BitVector &RegKills,
1083 std::vector<MachineOperand*> &KillOps);
1085 bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
1086 unsigned VirtReg, unsigned SrcReg, int SS,
1087 AvailableSpills &Spills,
1088 BitVector &RegKills,
1089 std::vector<MachineOperand*> &KillOps,
1090 const TargetRegisterInfo *TRI);
1092 void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
1093 int Idx, unsigned PhysReg, int StackSlot,
1094 const TargetRegisterClass *RC,
1095 bool isAvailable, MachineInstr *&LastStore,
1096 AvailableSpills &Spills,
1097 SmallSet<MachineInstr*, 4> &ReMatDefs,
1098 BitVector &RegKills,
1099 std::vector<MachineOperand*> &KillOps);
1101 void TransferDeadness(unsigned Reg, BitVector &RegKills,
1102 std::vector<MachineOperand*> &KillOps);
1104 bool InsertEmergencySpills(MachineInstr *MI);
1106 bool InsertRestores(MachineInstr *MI,
1107 AvailableSpills &Spills,
1108 BitVector &RegKills,
1109 std::vector<MachineOperand*> &KillOps);
1111 bool InsertSpills(MachineInstr *MI);
1113 void ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
1114 std::vector<MachineInstr*> &MaybeDeadStores,
1115 BitVector &RegKills,
1116 ReuseInfo &ReusedOperands,
1117 std::vector<MachineOperand*> &KillOps);
1119 void RewriteMBB(LiveIntervals *LIs,
1120 AvailableSpills &Spills, BitVector &RegKills,
1121 std::vector<MachineOperand*> &KillOps);
1125 bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
1126 LiveIntervals* LIs) {
1127 MRI = &MF.getRegInfo();
1128 TRI = MF.getTarget().getRegisterInfo();
1129 TII = MF.getTarget().getInstrInfo();
1130 VRM = &vrm;
1131 AllocatableRegs = TRI->getAllocatableSet(MF);
1132 DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
1133 << MF.getFunction()->getName() << "':\n");
1134 DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
1135 " reloads!) ****\n");
1136 DEBUG(MF.dump());
1138 // Spills - Keep track of which spilled values are available in physregs
1139 // so that we can choose to reuse the physregs instead of emitting
1140 // reloads. This is usually refreshed per basic block.
1141 AvailableSpills Spills(TRI, TII);
1143 // Keep track of kill information.
1144 BitVector RegKills(TRI->getNumRegs());
1145 std::vector<MachineOperand*> KillOps;
1146 KillOps.resize(TRI->getNumRegs(), NULL);
1148 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1149 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
1150 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
1152 // Traverse the basic blocks depth first.
1153 MachineBasicBlock *Entry = MF.begin();
1154 SmallPtrSet<MachineBasicBlock*,16> Visited;
1155 for (df_ext_iterator<MachineBasicBlock*,
1156 SmallPtrSet<MachineBasicBlock*,16> >
1157 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1158 DFI != E; ++DFI) {
1159 MBB = *DFI;
1160 if (!EarlyVisited.count(MBB))
1161 RewriteMBB(LIs, Spills, RegKills, KillOps);
1163 // If this MBB is the only predecessor of a successor. Keep the
1164 // availability information and visit it next.
1165 do {
1166 // Keep visiting single predecessor successor as long as possible.
1167 SinglePredSuccs.clear();
1168 findSinglePredSuccessor(MBB, SinglePredSuccs);
1169 if (SinglePredSuccs.empty())
1170 MBB = 0;
1171 else {
1172 // FIXME: More than one successors, each of which has MBB has
1173 // the only predecessor.
1174 MBB = SinglePredSuccs[0];
1175 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
1176 Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
1177 RewriteMBB(LIs, Spills, RegKills, KillOps);
1180 } while (MBB);
1182 // Clear the availability info.
1183 Spills.clear();
1186 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
1187 DEBUG(MF.dump());
1189 // Mark unused spill slots.
1190 MachineFrameInfo *MFI = MF.getFrameInfo();
1191 int SS = VRM->getLowSpillSlot();
1192 if (SS != VirtRegMap::NO_STACK_SLOT) {
1193 for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS) {
1194 SmallVector<MachineInstr*, 4> &DbgValues = Slot2DbgValues[SS];
1195 if (!VRM->isSpillSlotUsed(SS)) {
1196 MFI->RemoveStackObject(SS);
1197 for (unsigned j = 0, ee = DbgValues.size(); j != ee; ++j) {
1198 MachineInstr *DVMI = DbgValues[j];
1199 MachineBasicBlock *DVMBB = DVMI->getParent();
1200 DEBUG(dbgs() << "Removing debug info referencing FI#" << SS << '\n');
1201 VRM->RemoveMachineInstrFromMaps(DVMI);
1202 DVMBB->erase(DVMI);
1204 ++NumDSS;
1206 DbgValues.clear();
1209 Slot2DbgValues.clear();
1211 return true;
1214 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1215 /// a scratch register is available.
1216 /// xorq %r12<kill>, %r13
1217 /// addq %rax, -184(%rbp)
1218 /// addq %r13, -184(%rbp)
1219 /// ==>
1220 /// xorq %r12<kill>, %r13
1221 /// movq -184(%rbp), %r12
1222 /// addq %rax, %r12
1223 /// addq %r13, %r12
1224 /// movq %r12, -184(%rbp)
1225 bool LocalRewriter::
1226 OptimizeByUnfold2(unsigned VirtReg, int SS,
1227 MachineBasicBlock::iterator &MII,
1228 std::vector<MachineInstr*> &MaybeDeadStores,
1229 AvailableSpills &Spills,
1230 BitVector &RegKills,
1231 std::vector<MachineOperand*> &KillOps) {
1233 MachineBasicBlock::iterator NextMII = llvm::next(MII);
1234 // Skip over dbg_value instructions.
1235 while (NextMII != MBB->end() && NextMII->isDebugValue())
1236 NextMII = llvm::next(NextMII);
1237 if (NextMII == MBB->end())
1238 return false;
1240 if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
1241 return false;
1243 // Now let's see if the last couple of instructions happens to have freed up
1244 // a register.
1245 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1246 unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
1247 if (!PhysReg)
1248 return false;
1250 MachineFunction &MF = *MBB->getParent();
1251 TRI = MF.getTarget().getRegisterInfo();
1252 MachineInstr &MI = *MII;
1253 if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
1254 return false;
1256 // If the next instruction also folds the same SS modref and can be unfoled,
1257 // then it's worthwhile to issue a load from SS into the free register and
1258 // then unfold these instructions.
1259 if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
1260 return false;
1262 // Back-schedule reloads and remats.
1263 ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
1265 // Load from SS to the spare physical register.
1266 TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC, TRI);
1267 // This invalidates Phys.
1268 Spills.ClobberPhysReg(PhysReg);
1269 // Remember it's available.
1270 Spills.addAvailable(SS, PhysReg);
1271 MaybeDeadStores[SS] = NULL;
1273 // Unfold current MI.
1274 SmallVector<MachineInstr*, 4> NewMIs;
1275 if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
1276 llvm_unreachable("Unable unfold the load / store folding instruction!");
1277 assert(NewMIs.size() == 1);
1278 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
1279 VRM->transferRestorePts(&MI, NewMIs[0]);
1280 MII = MBB->insert(MII, NewMIs[0]);
1281 InvalidateKills(MI, TRI, RegKills, KillOps);
1282 VRM->RemoveMachineInstrFromMaps(&MI);
1283 MBB->erase(&MI);
1284 ++NumModRefUnfold;
1286 // Unfold next instructions that fold the same SS.
1287 do {
1288 MachineInstr &NextMI = *NextMII;
1289 NextMII = llvm::next(NextMII);
1290 NewMIs.clear();
1291 if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
1292 llvm_unreachable("Unable unfold the load / store folding instruction!");
1293 assert(NewMIs.size() == 1);
1294 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
1295 VRM->transferRestorePts(&NextMI, NewMIs[0]);
1296 MBB->insert(NextMII, NewMIs[0]);
1297 InvalidateKills(NextMI, TRI, RegKills, KillOps);
1298 VRM->RemoveMachineInstrFromMaps(&NextMI);
1299 MBB->erase(&NextMI);
1300 ++NumModRefUnfold;
1301 // Skip over dbg_value instructions.
1302 while (NextMII != MBB->end() && NextMII->isDebugValue())
1303 NextMII = llvm::next(NextMII);
1304 if (NextMII == MBB->end())
1305 break;
1306 } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
1308 // Store the value back into SS.
1309 TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC, TRI);
1310 MachineInstr *StoreMI = prior(NextMII);
1311 VRM->addSpillSlotUse(SS, StoreMI);
1312 VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1314 return true;
1317 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1318 /// instruction. e.g.
1319 /// xorl %edi, %eax
1320 /// movl %eax, -32(%ebp)
1321 /// movl -36(%ebp), %eax
1322 /// orl %eax, -32(%ebp)
1323 /// ==>
1324 /// xorl %edi, %eax
1325 /// orl -36(%ebp), %eax
1326 /// mov %eax, -32(%ebp)
1327 /// This enables unfolding optimization for a subsequent instruction which will
1328 /// also eliminate the newly introduced store instruction.
1329 bool LocalRewriter::
1330 OptimizeByUnfold(MachineBasicBlock::iterator &MII,
1331 std::vector<MachineInstr*> &MaybeDeadStores,
1332 AvailableSpills &Spills,
1333 BitVector &RegKills,
1334 std::vector<MachineOperand*> &KillOps) {
1335 MachineFunction &MF = *MBB->getParent();
1336 MachineInstr &MI = *MII;
1337 unsigned UnfoldedOpc = 0;
1338 unsigned UnfoldPR = 0;
1339 unsigned UnfoldVR = 0;
1340 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1341 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1342 for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
1343 // Only transform a MI that folds a single register.
1344 if (UnfoldedOpc)
1345 return false;
1346 UnfoldVR = I->second.first;
1347 VirtRegMap::ModRef MR = I->second.second;
1348 // MI2VirtMap be can updated which invalidate the iterator.
1349 // Increment the iterator first.
1350 ++I;
1351 if (VRM->isAssignedReg(UnfoldVR))
1352 continue;
1353 // If this reference is not a use, any previous store is now dead.
1354 // Otherwise, the store to this stack slot is not dead anymore.
1355 FoldedSS = VRM->getStackSlot(UnfoldVR);
1356 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1357 if (DeadStore && (MR & VirtRegMap::isModRef)) {
1358 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1359 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1360 continue;
1361 UnfoldPR = PhysReg;
1362 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1363 false, true);
1367 if (!UnfoldedOpc) {
1368 if (!UnfoldVR)
1369 return false;
1371 // Look for other unfolding opportunities.
1372 return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
1373 RegKills, KillOps);
1376 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1377 MachineOperand &MO = MI.getOperand(i);
1378 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1379 continue;
1380 unsigned VirtReg = MO.getReg();
1381 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1382 continue;
1383 if (VRM->isAssignedReg(VirtReg)) {
1384 unsigned PhysReg = VRM->getPhys(VirtReg);
1385 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1386 return false;
1387 } else if (VRM->isReMaterialized(VirtReg))
1388 continue;
1389 int SS = VRM->getStackSlot(VirtReg);
1390 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1391 if (PhysReg) {
1392 if (TRI->regsOverlap(PhysReg, UnfoldPR))
1393 return false;
1394 continue;
1396 if (VRM->hasPhys(VirtReg)) {
1397 PhysReg = VRM->getPhys(VirtReg);
1398 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1399 continue;
1402 // Ok, we'll need to reload the value into a register which makes
1403 // it impossible to perform the store unfolding optimization later.
1404 // Let's see if it is possible to fold the load if the store is
1405 // unfolded. This allows us to perform the store unfolding
1406 // optimization.
1407 SmallVector<MachineInstr*, 4> NewMIs;
1408 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1409 assert(NewMIs.size() == 1);
1410 MachineInstr *NewMI = NewMIs.back();
1411 MBB->insert(MII, NewMI);
1412 NewMIs.clear();
1413 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1414 assert(Idx != -1);
1415 SmallVector<unsigned, 1> Ops;
1416 Ops.push_back(Idx);
1417 MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
1418 NewMI->eraseFromParent();
1419 if (FoldedMI) {
1420 VRM->addSpillSlotUse(SS, FoldedMI);
1421 if (!VRM->hasPhys(UnfoldVR))
1422 VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
1423 VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1424 MII = FoldedMI;
1425 InvalidateKills(MI, TRI, RegKills, KillOps);
1426 VRM->RemoveMachineInstrFromMaps(&MI);
1427 MBB->erase(&MI);
1428 return true;
1433 return false;
1436 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1437 /// where SrcReg is r1 and it is tied to r0. Return true if after
1438 /// commuting this instruction it will be r0 = op r2, r1.
1439 static bool CommuteChangesDestination(MachineInstr *DefMI,
1440 const TargetInstrDesc &TID,
1441 unsigned SrcReg,
1442 const TargetInstrInfo *TII,
1443 unsigned &DstIdx) {
1444 if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
1445 return false;
1446 if (!DefMI->getOperand(1).isReg() ||
1447 DefMI->getOperand(1).getReg() != SrcReg)
1448 return false;
1449 unsigned DefIdx;
1450 if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
1451 return false;
1452 unsigned SrcIdx1, SrcIdx2;
1453 if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
1454 return false;
1455 if (SrcIdx1 == 1 && SrcIdx2 == 2) {
1456 DstIdx = 2;
1457 return true;
1459 return false;
1462 /// CommuteToFoldReload -
1463 /// Look for
1464 /// r1 = load fi#1
1465 /// r1 = op r1, r2<kill>
1466 /// store r1, fi#1
1468 /// If op is commutable and r2 is killed, then we can xform these to
1469 /// r2 = op r2, fi#1
1470 /// store r2, fi#1
1471 bool LocalRewriter::
1472 CommuteToFoldReload(MachineBasicBlock::iterator &MII,
1473 unsigned VirtReg, unsigned SrcReg, int SS,
1474 AvailableSpills &Spills,
1475 BitVector &RegKills,
1476 std::vector<MachineOperand*> &KillOps,
1477 const TargetRegisterInfo *TRI) {
1478 if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
1479 return false;
1481 MachineInstr &MI = *MII;
1482 MachineBasicBlock::iterator DefMII = prior(MII);
1483 MachineInstr *DefMI = DefMII;
1484 const TargetInstrDesc &TID = DefMI->getDesc();
1485 unsigned NewDstIdx;
1486 if (DefMII != MBB->begin() &&
1487 TID.isCommutable() &&
1488 CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
1489 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1490 unsigned NewReg = NewDstMO.getReg();
1491 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1492 return false;
1493 MachineInstr *ReloadMI = prior(DefMII);
1494 int FrameIdx;
1495 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1496 if (DestReg != SrcReg || FrameIdx != SS)
1497 return false;
1498 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1499 if (UseIdx == -1)
1500 return false;
1501 unsigned DefIdx;
1502 if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
1503 return false;
1504 assert(DefMI->getOperand(DefIdx).isReg() &&
1505 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1507 // Now commute def instruction.
1508 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1509 if (!CommutedMI)
1510 return false;
1511 MBB->insert(MII, CommutedMI);
1512 SmallVector<unsigned, 1> Ops;
1513 Ops.push_back(NewDstIdx);
1514 MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
1515 // Not needed since foldMemoryOperand returns new MI.
1516 CommutedMI->eraseFromParent();
1517 if (!FoldedMI)
1518 return false;
1520 VRM->addSpillSlotUse(SS, FoldedMI);
1521 VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1522 // Insert new def MI and spill MI.
1523 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1524 TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC, TRI);
1525 MII = prior(MII);
1526 MachineInstr *StoreMI = MII;
1527 VRM->addSpillSlotUse(SS, StoreMI);
1528 VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1529 MII = FoldedMI; // Update MII to backtrack.
1531 // Delete all 3 old instructions.
1532 InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
1533 VRM->RemoveMachineInstrFromMaps(ReloadMI);
1534 MBB->erase(ReloadMI);
1535 InvalidateKills(*DefMI, TRI, RegKills, KillOps);
1536 VRM->RemoveMachineInstrFromMaps(DefMI);
1537 MBB->erase(DefMI);
1538 InvalidateKills(MI, TRI, RegKills, KillOps);
1539 VRM->RemoveMachineInstrFromMaps(&MI);
1540 MBB->erase(&MI);
1542 // If NewReg was previously holding value of some SS, it's now clobbered.
1543 // This has to be done now because it's a physical register. When this
1544 // instruction is re-visited, it's ignored.
1545 Spills.ClobberPhysReg(NewReg);
1547 ++NumCommutes;
1548 return true;
1551 return false;
1554 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1555 /// the last store to the same slot is now dead. If so, remove the last store.
1556 void LocalRewriter::
1557 SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
1558 int Idx, unsigned PhysReg, int StackSlot,
1559 const TargetRegisterClass *RC,
1560 bool isAvailable, MachineInstr *&LastStore,
1561 AvailableSpills &Spills,
1562 SmallSet<MachineInstr*, 4> &ReMatDefs,
1563 BitVector &RegKills,
1564 std::vector<MachineOperand*> &KillOps) {
1566 MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
1567 TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC,
1568 TRI);
1569 MachineInstr *StoreMI = prior(oldNextMII);
1570 VRM->addSpillSlotUse(StackSlot, StoreMI);
1571 DEBUG(dbgs() << "Store:\t" << *StoreMI);
1573 // If there is a dead store to this stack slot, nuke it now.
1574 if (LastStore) {
1575 DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
1576 ++NumDSE;
1577 SmallVector<unsigned, 2> KillRegs;
1578 InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
1579 MachineBasicBlock::iterator PrevMII = LastStore;
1580 bool CheckDef = PrevMII != MBB->begin();
1581 if (CheckDef)
1582 --PrevMII;
1583 VRM->RemoveMachineInstrFromMaps(LastStore);
1584 MBB->erase(LastStore);
1585 if (CheckDef) {
1586 // Look at defs of killed registers on the store. Mark the defs
1587 // as dead since the store has been deleted and they aren't
1588 // being reused.
1589 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1590 bool HasOtherDef = false;
1591 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
1592 MachineInstr *DeadDef = PrevMII;
1593 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1594 // FIXME: This assumes a remat def does not have side effects.
1595 VRM->RemoveMachineInstrFromMaps(DeadDef);
1596 MBB->erase(DeadDef);
1597 ++NumDRM;
1604 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1605 // the last of multiple instructions is the actual store.
1606 LastStore = prior(oldNextMII);
1608 // If the stack slot value was previously available in some other
1609 // register, change it now. Otherwise, make the register available,
1610 // in PhysReg.
1611 Spills.ModifyStackSlotOrReMat(StackSlot);
1612 Spills.ClobberPhysReg(PhysReg);
1613 Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1614 ++NumStores;
1617 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1618 /// effect and all of its defs are dead.
1619 static bool isSafeToDelete(MachineInstr &MI) {
1620 const TargetInstrDesc &TID = MI.getDesc();
1621 if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
1622 TID.isCall() || TID.isBarrier() || TID.isReturn() ||
1623 TID.hasUnmodeledSideEffects())
1624 return false;
1625 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1626 MachineOperand &MO = MI.getOperand(i);
1627 if (!MO.isReg() || !MO.getReg())
1628 continue;
1629 if (MO.isDef() && !MO.isDead())
1630 return false;
1631 if (MO.isUse() && MO.isKill())
1632 // FIXME: We can't remove kill markers or else the scavenger will assert.
1633 // An alternative is to add a ADD pseudo instruction to replace kill
1634 // markers.
1635 return false;
1637 return true;
1640 /// TransferDeadness - A identity copy definition is dead and it's being
1641 /// removed. Find the last def or use and mark it as dead / kill.
1642 void LocalRewriter::
1643 TransferDeadness(unsigned Reg, BitVector &RegKills,
1644 std::vector<MachineOperand*> &KillOps) {
1645 SmallPtrSet<MachineInstr*, 4> Seens;
1646 SmallVector<std::pair<MachineInstr*, int>,8> Refs;
1647 for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
1648 RE = MRI->reg_end(); RI != RE; ++RI) {
1649 MachineInstr *UDMI = &*RI;
1650 if (UDMI->isDebugValue() || UDMI->getParent() != MBB)
1651 continue;
1652 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1653 if (DI == DistanceMap.end())
1654 continue;
1655 if (Seens.insert(UDMI))
1656 Refs.push_back(std::make_pair(UDMI, DI->second));
1659 if (Refs.empty())
1660 return;
1661 std::sort(Refs.begin(), Refs.end(), RefSorter());
1663 while (!Refs.empty()) {
1664 MachineInstr *LastUDMI = Refs.back().first;
1665 Refs.pop_back();
1667 MachineOperand *LastUD = NULL;
1668 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1669 MachineOperand &MO = LastUDMI->getOperand(i);
1670 if (!MO.isReg() || MO.getReg() != Reg)
1671 continue;
1672 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1673 LastUD = &MO;
1674 if (LastUDMI->isRegTiedToDefOperand(i))
1675 break;
1677 if (LastUD->isDef()) {
1678 // If the instruction has no side effect, delete it and propagate
1679 // backward further. Otherwise, mark is dead and we are done.
1680 if (!isSafeToDelete(*LastUDMI)) {
1681 LastUD->setIsDead();
1682 break;
1684 VRM->RemoveMachineInstrFromMaps(LastUDMI);
1685 MBB->erase(LastUDMI);
1686 } else {
1687 LastUD->setIsKill();
1688 RegKills.set(Reg);
1689 KillOps[Reg] = LastUD;
1690 break;
1695 /// InsertEmergencySpills - Insert emergency spills before MI if requested by
1696 /// VRM. Return true if spills were inserted.
1697 bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
1698 if (!VRM->hasEmergencySpills(MI))
1699 return false;
1700 MachineBasicBlock::iterator MII = MI;
1701 SmallSet<int, 4> UsedSS;
1702 std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
1703 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1704 unsigned PhysReg = EmSpills[i];
1705 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg);
1706 assert(RC && "Unable to determine register class!");
1707 int SS = VRM->getEmergencySpillSlot(RC);
1708 if (UsedSS.count(SS))
1709 llvm_unreachable("Need to spill more than one physical registers!");
1710 UsedSS.insert(SS);
1711 TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC, TRI);
1712 MachineInstr *StoreMI = prior(MII);
1713 VRM->addSpillSlotUse(SS, StoreMI);
1715 // Back-schedule reloads and remats.
1716 MachineBasicBlock::iterator InsertLoc =
1717 ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
1718 TII, *MBB->getParent());
1720 TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC, TRI);
1722 MachineInstr *LoadMI = prior(InsertLoc);
1723 VRM->addSpillSlotUse(SS, LoadMI);
1724 ++NumPSpills;
1725 DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
1727 return true;
1730 /// InsertRestores - Restore registers before MI is requested by VRM. Return
1731 /// true is any instructions were inserted.
1732 bool LocalRewriter::InsertRestores(MachineInstr *MI,
1733 AvailableSpills &Spills,
1734 BitVector &RegKills,
1735 std::vector<MachineOperand*> &KillOps) {
1736 if (!VRM->isRestorePt(MI))
1737 return false;
1738 MachineBasicBlock::iterator MII = MI;
1739 std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
1740 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1741 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1742 if (!VRM->getPreSplitReg(VirtReg))
1743 continue; // Split interval spilled again.
1744 unsigned Phys = VRM->getPhys(VirtReg);
1745 MRI->setPhysRegUsed(Phys);
1747 // Check if the value being restored if available. If so, it must be
1748 // from a predecessor BB that fallthrough into this BB. We do not
1749 // expect:
1750 // BB1:
1751 // r1 = load fi#1
1752 // ...
1753 // = r1<kill>
1754 // ... # r1 not clobbered
1755 // ...
1756 // = load fi#1
1757 bool DoReMat = VRM->isReMaterialized(VirtReg);
1758 int SSorRMId = DoReMat
1759 ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
1760 unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1761 if (InReg == Phys) {
1762 // If the value is already available in the expected register, save
1763 // a reload / remat.
1764 if (SSorRMId)
1765 DEBUG(dbgs() << "Reusing RM#"
1766 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1767 else
1768 DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
1769 DEBUG(dbgs() << " from physreg "
1770 << TRI->getName(InReg) << " for vreg"
1771 << VirtReg <<" instead of reloading into physreg "
1772 << TRI->getName(Phys) << '\n');
1773 ++NumOmitted;
1774 continue;
1775 } else if (InReg && InReg != Phys) {
1776 if (SSorRMId)
1777 DEBUG(dbgs() << "Reusing RM#"
1778 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1779 else
1780 DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
1781 DEBUG(dbgs() << " from physreg "
1782 << TRI->getName(InReg) << " for vreg"
1783 << VirtReg <<" by copying it into physreg "
1784 << TRI->getName(Phys) << '\n');
1786 // If the reloaded / remat value is available in another register,
1787 // copy it to the desired register.
1789 // Back-schedule reloads and remats.
1790 MachineBasicBlock::iterator InsertLoc =
1791 ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
1792 *MBB->getParent());
1793 MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI->getDebugLoc(),
1794 TII->get(TargetOpcode::COPY), Phys)
1795 .addReg(InReg, RegState::Kill);
1797 // This invalidates Phys.
1798 Spills.ClobberPhysReg(Phys);
1799 // Remember it's available.
1800 Spills.addAvailable(SSorRMId, Phys);
1802 CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1803 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
1805 DEBUG(dbgs() << '\t' << *CopyMI);
1806 ++NumCopified;
1807 continue;
1810 // Back-schedule reloads and remats.
1811 MachineBasicBlock::iterator InsertLoc =
1812 ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
1813 *MBB->getParent());
1815 if (VRM->isReMaterialized(VirtReg)) {
1816 ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
1817 } else {
1818 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1819 TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC, TRI);
1820 MachineInstr *LoadMI = prior(InsertLoc);
1821 VRM->addSpillSlotUse(SSorRMId, LoadMI);
1822 ++NumLoads;
1823 DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
1826 // This invalidates Phys.
1827 Spills.ClobberPhysReg(Phys);
1828 // Remember it's available.
1829 Spills.addAvailable(SSorRMId, Phys);
1831 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
1832 DEBUG(dbgs() << '\t' << *prior(MII));
1834 return true;
1837 /// InsertSpills - Insert spills after MI if requested by VRM. Return
1838 /// true if spills were inserted.
1839 bool LocalRewriter::InsertSpills(MachineInstr *MI) {
1840 if (!VRM->isSpillPt(MI))
1841 return false;
1842 MachineBasicBlock::iterator MII = MI;
1843 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1844 VRM->getSpillPtSpills(MI);
1845 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1846 unsigned VirtReg = SpillRegs[i].first;
1847 bool isKill = SpillRegs[i].second;
1848 if (!VRM->getPreSplitReg(VirtReg))
1849 continue; // Split interval spilled again.
1850 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
1851 unsigned Phys = VRM->getPhys(VirtReg);
1852 int StackSlot = VRM->getStackSlot(VirtReg);
1853 MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
1854 TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
1855 RC, TRI);
1856 MachineInstr *StoreMI = prior(oldNextMII);
1857 VRM->addSpillSlotUse(StackSlot, StoreMI);
1858 DEBUG(dbgs() << "Store:\t" << *StoreMI);
1859 VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1861 return true;
1865 /// ProcessUses - Process all of MI's spilled operands and all available
1866 /// operands.
1867 void LocalRewriter::ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
1868 std::vector<MachineInstr*> &MaybeDeadStores,
1869 BitVector &RegKills,
1870 ReuseInfo &ReusedOperands,
1871 std::vector<MachineOperand*> &KillOps) {
1872 // Clear kill info.
1873 SmallSet<unsigned, 2> KilledMIRegs;
1874 SmallVector<unsigned, 4> VirtUseOps;
1875 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1876 MachineOperand &MO = MI.getOperand(i);
1877 if (!MO.isReg() || MO.getReg() == 0)
1878 continue; // Ignore non-register operands.
1880 unsigned VirtReg = MO.getReg();
1881 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1882 // Ignore physregs for spilling, but remember that it is used by this
1883 // function.
1884 MRI->setPhysRegUsed(VirtReg);
1885 continue;
1888 // We want to process implicit virtual register uses first.
1889 if (MO.isImplicit())
1890 // If the virtual register is implicitly defined, emit a implicit_def
1891 // before so scavenger knows it's "defined".
1892 // FIXME: This is a horrible hack done the by register allocator to
1893 // remat a definition with virtual register operand.
1894 VirtUseOps.insert(VirtUseOps.begin(), i);
1895 else
1896 VirtUseOps.push_back(i);
1898 // A partial def causes problems because the same operand both reads and
1899 // writes the register. This rewriter is designed to rewrite uses and defs
1900 // separately, so a partial def would already have been rewritten to a
1901 // physreg by the time we get to processing defs.
1902 // Add an implicit use operand to model the partial def.
1903 if (MO.isDef() && MO.getSubReg() && MI.readsVirtualRegister(VirtReg) &&
1904 MI.findRegisterUseOperandIdx(VirtReg) == -1) {
1905 VirtUseOps.insert(VirtUseOps.begin(), MI.getNumOperands());
1906 MI.addOperand(MachineOperand::CreateReg(VirtReg,
1907 false, // isDef
1908 true)); // isImplicit
1909 DEBUG(dbgs() << "Partial redef: " << MI);
1913 // Process all of the spilled uses and all non spilled reg references.
1914 SmallVector<int, 2> PotentialDeadStoreSlots;
1915 KilledMIRegs.clear();
1916 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1917 unsigned i = VirtUseOps[j];
1918 unsigned VirtReg = MI.getOperand(i).getReg();
1919 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1920 "Not a virtual register?");
1922 unsigned SubIdx = MI.getOperand(i).getSubReg();
1923 if (VRM->isAssignedReg(VirtReg)) {
1924 // This virtual register was assigned a physreg!
1925 unsigned Phys = VRM->getPhys(VirtReg);
1926 MRI->setPhysRegUsed(Phys);
1927 if (MI.getOperand(i).isDef())
1928 ReusedOperands.markClobbered(Phys);
1929 substitutePhysReg(MI.getOperand(i), Phys, *TRI);
1930 if (VRM->isImplicitlyDefined(VirtReg))
1931 // FIXME: Is this needed?
1932 BuildMI(*MBB, &MI, MI.getDebugLoc(),
1933 TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
1934 continue;
1937 // This virtual register is now known to be a spilled value.
1938 if (!MI.getOperand(i).isUse())
1939 continue; // Handle defs in the loop below (handle use&def here though)
1941 bool AvoidReload = MI.getOperand(i).isUndef();
1942 // Check if it is defined by an implicit def. It should not be spilled.
1943 // Note, this is for correctness reason. e.g.
1944 // 8 %reg1024<def> = IMPLICIT_DEF
1945 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1946 // The live range [12, 14) are not part of the r1024 live interval since
1947 // it's defined by an implicit def. It will not conflicts with live
1948 // interval of r1025. Now suppose both registers are spilled, you can
1949 // easily see a situation where both registers are reloaded before
1950 // the INSERT_SUBREG and both target registers that would overlap.
1951 bool DoReMat = VRM->isReMaterialized(VirtReg);
1952 int SSorRMId = DoReMat
1953 ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
1954 int ReuseSlot = SSorRMId;
1956 // Check to see if this stack slot is available.
1957 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1959 // If this is a sub-register use, make sure the reuse register is in the
1960 // right register class. For example, for x86 not all of the 32-bit
1961 // registers have accessible sub-registers.
1962 // Similarly so for EXTRACT_SUBREG. Consider this:
1963 // EDI = op
1964 // MOV32_mr fi#1, EDI
1965 // ...
1966 // = EXTRACT_SUBREG fi#1
1967 // fi#1 is available in EDI, but it cannot be reused because it's not in
1968 // the right register file.
1969 if (PhysReg && !AvoidReload && SubIdx) {
1970 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1971 if (!RC->contains(PhysReg))
1972 PhysReg = 0;
1975 if (PhysReg && !AvoidReload) {
1976 // This spilled operand might be part of a two-address operand. If this
1977 // is the case, then changing it will necessarily require changing the
1978 // def part of the instruction as well. However, in some cases, we
1979 // aren't allowed to modify the reused register. If none of these cases
1980 // apply, reuse it.
1981 bool CanReuse = true;
1982 bool isTied = MI.isRegTiedToDefOperand(i);
1983 if (isTied) {
1984 // Okay, we have a two address operand. We can reuse this physreg as
1985 // long as we are allowed to clobber the value and there isn't an
1986 // earlier def that has already clobbered the physreg.
1987 CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
1988 Spills.canClobberPhysReg(PhysReg);
1990 // If this is an asm, and a PhysReg alias is used elsewhere as an
1991 // earlyclobber operand, we can't also use it as an input.
1992 if (MI.isInlineAsm()) {
1993 for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
1994 MachineOperand &MOk = MI.getOperand(k);
1995 if (MOk.isReg() && MOk.isEarlyClobber() &&
1996 TRI->regsOverlap(MOk.getReg(), PhysReg)) {
1997 CanReuse = false;
1998 DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
1999 << " for vreg" << VirtReg << ": " << MOk << '\n');
2000 break;
2005 if (CanReuse) {
2006 // If this stack slot value is already available, reuse it!
2007 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
2008 DEBUG(dbgs() << "Reusing RM#"
2009 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
2010 else
2011 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
2012 DEBUG(dbgs() << " from physreg "
2013 << TRI->getName(PhysReg) << " for vreg"
2014 << VirtReg <<" instead of reloading into physreg "
2015 << TRI->getName(VRM->getPhys(VirtReg)) << '\n');
2016 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2017 MI.getOperand(i).setReg(RReg);
2018 MI.getOperand(i).setSubReg(0);
2020 // The only technical detail we have is that we don't know that
2021 // PhysReg won't be clobbered by a reloaded stack slot that occurs
2022 // later in the instruction. In particular, consider 'op V1, V2'.
2023 // If V1 is available in physreg R0, we would choose to reuse it
2024 // here, instead of reloading it into the register the allocator
2025 // indicated (say R1). However, V2 might have to be reloaded
2026 // later, and it might indicate that it needs to live in R0. When
2027 // this occurs, we need to have information available that
2028 // indicates it is safe to use R1 for the reload instead of R0.
2030 // To further complicate matters, we might conflict with an alias,
2031 // or R0 and R1 might not be compatible with each other. In this
2032 // case, we actually insert a reload for V1 in R1, ensuring that
2033 // we can get at R0 or its alias.
2034 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
2035 VRM->getPhys(VirtReg), VirtReg);
2036 if (isTied)
2037 // Only mark it clobbered if this is a use&def operand.
2038 ReusedOperands.markClobbered(PhysReg);
2039 ++NumReused;
2041 if (MI.getOperand(i).isKill() &&
2042 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
2044 // The store of this spilled value is potentially dead, but we
2045 // won't know for certain until we've confirmed that the re-use
2046 // above is valid, which means waiting until the other operands
2047 // are processed. For now we just track the spill slot, we'll
2048 // remove it after the other operands are processed if valid.
2050 PotentialDeadStoreSlots.push_back(ReuseSlot);
2053 // Mark is isKill if it's there no other uses of the same virtual
2054 // register and it's not a two-address operand. IsKill will be
2055 // unset if reg is reused.
2056 if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
2057 MI.getOperand(i).setIsKill();
2058 KilledMIRegs.insert(VirtReg);
2061 continue;
2062 } // CanReuse
2064 // Otherwise we have a situation where we have a two-address instruction
2065 // whose mod/ref operand needs to be reloaded. This reload is already
2066 // available in some register "PhysReg", but if we used PhysReg as the
2067 // operand to our 2-addr instruction, the instruction would modify
2068 // PhysReg. This isn't cool if something later uses PhysReg and expects
2069 // to get its initial value.
2071 // To avoid this problem, and to avoid doing a load right after a store,
2072 // we emit a copy from PhysReg into the designated register for this
2073 // operand.
2075 // This case also applies to an earlyclobber'd PhysReg.
2076 unsigned DesignatedReg = VRM->getPhys(VirtReg);
2077 assert(DesignatedReg && "Must map virtreg to physreg!");
2079 // Note that, if we reused a register for a previous operand, the
2080 // register we want to reload into might not actually be
2081 // available. If this occurs, use the register indicated by the
2082 // reuser.
2083 if (ReusedOperands.hasReuses())
2084 DesignatedReg = ReusedOperands.
2085 GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
2086 MaybeDeadStores, RegKills, KillOps, *VRM);
2088 // If the mapped designated register is actually the physreg we have
2089 // incoming, we don't need to inserted a dead copy.
2090 if (DesignatedReg == PhysReg) {
2091 // If this stack slot value is already available, reuse it!
2092 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
2093 DEBUG(dbgs() << "Reusing RM#"
2094 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
2095 else
2096 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
2097 DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
2098 << " for vreg" << VirtReg
2099 << " instead of reloading into same physreg.\n");
2100 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2101 MI.getOperand(i).setReg(RReg);
2102 MI.getOperand(i).setSubReg(0);
2103 ReusedOperands.markClobbered(RReg);
2104 ++NumReused;
2105 continue;
2108 MRI->setPhysRegUsed(DesignatedReg);
2109 ReusedOperands.markClobbered(DesignatedReg);
2111 // Back-schedule reloads and remats.
2112 MachineBasicBlock::iterator InsertLoc =
2113 ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
2114 SSorRMId, TII, *MBB->getParent());
2115 MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
2116 TII->get(TargetOpcode::COPY),
2117 DesignatedReg).addReg(PhysReg);
2118 CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
2119 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
2121 // This invalidates DesignatedReg.
2122 Spills.ClobberPhysReg(DesignatedReg);
2124 Spills.addAvailable(ReuseSlot, DesignatedReg);
2125 unsigned RReg =
2126 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
2127 MI.getOperand(i).setReg(RReg);
2128 MI.getOperand(i).setSubReg(0);
2129 DEBUG(dbgs() << '\t' << *prior(InsertLoc));
2130 ++NumReused;
2131 continue;
2132 } // if (PhysReg)
2134 // Otherwise, reload it and remember that we have it.
2135 PhysReg = VRM->getPhys(VirtReg);
2136 assert(PhysReg && "Must map virtreg to physreg!");
2138 // Note that, if we reused a register for a previous operand, the
2139 // register we want to reload into might not actually be
2140 // available. If this occurs, use the register indicated by the
2141 // reuser.
2142 if (ReusedOperands.hasReuses())
2143 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2144 Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
2146 MRI->setPhysRegUsed(PhysReg);
2147 ReusedOperands.markClobbered(PhysReg);
2148 if (AvoidReload)
2149 ++NumAvoided;
2150 else {
2151 // Back-schedule reloads and remats.
2152 MachineBasicBlock::iterator InsertLoc =
2153 ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI, DoReMat,
2154 SSorRMId, TII, *MBB->getParent());
2156 if (DoReMat) {
2157 ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
2158 } else {
2159 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
2160 TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
2161 MachineInstr *LoadMI = prior(InsertLoc);
2162 VRM->addSpillSlotUse(SSorRMId, LoadMI);
2163 ++NumLoads;
2164 DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
2166 // This invalidates PhysReg.
2167 Spills.ClobberPhysReg(PhysReg);
2169 // Any stores to this stack slot are not dead anymore.
2170 if (!DoReMat)
2171 MaybeDeadStores[SSorRMId] = NULL;
2172 Spills.addAvailable(SSorRMId, PhysReg);
2173 // Assumes this is the last use. IsKill will be unset if reg is reused
2174 // unless it's a two-address operand.
2175 if (!MI.isRegTiedToDefOperand(i) &&
2176 KilledMIRegs.count(VirtReg) == 0) {
2177 MI.getOperand(i).setIsKill();
2178 KilledMIRegs.insert(VirtReg);
2181 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
2182 DEBUG(dbgs() << '\t' << *prior(InsertLoc));
2184 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2185 MI.getOperand(i).setReg(RReg);
2186 MI.getOperand(i).setSubReg(0);
2189 // Ok - now we can remove stores that have been confirmed dead.
2190 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
2191 // This was the last use and the spilled value is still available
2192 // for reuse. That means the spill was unnecessary!
2193 int PDSSlot = PotentialDeadStoreSlots[j];
2194 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
2195 if (DeadStore) {
2196 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
2197 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2198 VRM->RemoveMachineInstrFromMaps(DeadStore);
2199 MBB->erase(DeadStore);
2200 MaybeDeadStores[PDSSlot] = NULL;
2201 ++NumDSE;
2207 /// rewriteMBB - Keep track of which spills are available even after the
2208 /// register allocator is done with them. If possible, avoid reloading vregs.
2209 void
2210 LocalRewriter::RewriteMBB(LiveIntervals *LIs,
2211 AvailableSpills &Spills, BitVector &RegKills,
2212 std::vector<MachineOperand*> &KillOps) {
2214 DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
2215 << MBB->getName() << "':\n");
2217 MachineFunction &MF = *MBB->getParent();
2219 // MaybeDeadStores - When we need to write a value back into a stack slot,
2220 // keep track of the inserted store. If the stack slot value is never read
2221 // (because the value was used from some available register, for example), and
2222 // subsequently stored to, the original store is dead. This map keeps track
2223 // of inserted stores that are not used. If we see a subsequent store to the
2224 // same stack slot, the original store is deleted.
2225 std::vector<MachineInstr*> MaybeDeadStores;
2226 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
2228 // ReMatDefs - These are rematerializable def MIs which are not deleted.
2229 SmallSet<MachineInstr*, 4> ReMatDefs;
2231 // Keep track of the registers we have already spilled in case there are
2232 // multiple defs of the same register in MI.
2233 SmallSet<unsigned, 8> SpilledMIRegs;
2235 RegKills.reset();
2236 KillOps.clear();
2237 KillOps.resize(TRI->getNumRegs(), NULL);
2239 DistanceMap.clear();
2240 for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
2241 MII != E; ) {
2242 MachineBasicBlock::iterator NextMII = llvm::next(MII);
2244 if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
2245 NextMII = llvm::next(MII);
2247 if (InsertEmergencySpills(MII))
2248 NextMII = llvm::next(MII);
2250 InsertRestores(MII, Spills, RegKills, KillOps);
2252 if (InsertSpills(MII))
2253 NextMII = llvm::next(MII);
2255 bool Erased = false;
2256 bool BackTracked = false;
2257 MachineInstr &MI = *MII;
2259 // Remember DbgValue's which reference stack slots.
2260 if (MI.isDebugValue() && MI.getOperand(0).isFI())
2261 Slot2DbgValues[MI.getOperand(0).getIndex()].push_back(&MI);
2263 /// ReusedOperands - Keep track of operand reuse in case we need to undo
2264 /// reuse.
2265 ReuseInfo ReusedOperands(MI, TRI);
2267 ProcessUses(MI, Spills, MaybeDeadStores, RegKills, ReusedOperands, KillOps);
2269 DEBUG(dbgs() << '\t' << MI);
2272 // If we have folded references to memory operands, make sure we clear all
2273 // physical registers that may contain the value of the spilled virtual
2274 // register
2276 // Copy the folded virts to a small vector, we may change MI2VirtMap.
2277 SmallVector<std::pair<unsigned, VirtRegMap::ModRef>, 4> FoldedVirts;
2278 // C++0x FTW!
2279 for (std::pair<VirtRegMap::MI2VirtMapTy::const_iterator,
2280 VirtRegMap::MI2VirtMapTy::const_iterator> FVRange =
2281 VRM->getFoldedVirts(&MI);
2282 FVRange.first != FVRange.second; ++FVRange.first)
2283 FoldedVirts.push_back(FVRange.first->second);
2285 SmallSet<int, 2> FoldedSS;
2286 for (unsigned FVI = 0, FVE = FoldedVirts.size(); FVI != FVE; ++FVI) {
2287 unsigned VirtReg = FoldedVirts[FVI].first;
2288 VirtRegMap::ModRef MR = FoldedVirts[FVI].second;
2289 DEBUG(dbgs() << "Folded vreg: " << VirtReg << " MR: " << MR);
2291 int SS = VRM->getStackSlot(VirtReg);
2292 if (SS == VirtRegMap::NO_STACK_SLOT)
2293 continue;
2294 FoldedSS.insert(SS);
2295 DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
2297 // If this folded instruction is just a use, check to see if it's a
2298 // straight load from the virt reg slot.
2299 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
2300 int FrameIdx;
2301 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
2302 if (DestReg && FrameIdx == SS) {
2303 // If this spill slot is available, turn it into a copy (or nothing)
2304 // instead of leaving it as a load!
2305 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
2306 DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
2307 if (DestReg != InReg) {
2308 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
2309 MachineInstr *CopyMI = BuildMI(*MBB, &MI, MI.getDebugLoc(),
2310 TII->get(TargetOpcode::COPY))
2311 .addReg(DestReg, RegState::Define, DefMO->getSubReg())
2312 .addReg(InReg, RegState::Kill);
2313 // Revisit the copy so we make sure to notice the effects of the
2314 // operation on the destreg (either needing to RA it if it's
2315 // virtual or needing to clobber any values if it's physical).
2316 NextMII = CopyMI;
2317 NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
2318 BackTracked = true;
2319 } else {
2320 DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2321 // Unset last kill since it's being reused.
2322 InvalidateKill(InReg, TRI, RegKills, KillOps);
2323 Spills.disallowClobberPhysReg(InReg);
2326 InvalidateKills(MI, TRI, RegKills, KillOps);
2327 VRM->RemoveMachineInstrFromMaps(&MI);
2328 MBB->erase(&MI);
2329 Erased = true;
2330 goto ProcessNextInst;
2332 } else {
2333 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2334 SmallVector<MachineInstr*, 4> NewMIs;
2335 if (PhysReg &&
2336 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)){
2337 MBB->insert(MII, NewMIs[0]);
2338 InvalidateKills(MI, TRI, RegKills, KillOps);
2339 VRM->RemoveMachineInstrFromMaps(&MI);
2340 MBB->erase(&MI);
2341 Erased = true;
2342 --NextMII; // backtrack to the unfolded instruction.
2343 BackTracked = true;
2344 goto ProcessNextInst;
2349 // If this reference is not a use, any previous store is now dead.
2350 // Otherwise, the store to this stack slot is not dead anymore.
2351 MachineInstr* DeadStore = MaybeDeadStores[SS];
2352 if (DeadStore) {
2353 bool isDead = !(MR & VirtRegMap::isRef);
2354 MachineInstr *NewStore = NULL;
2355 if (MR & VirtRegMap::isModRef) {
2356 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2357 SmallVector<MachineInstr*, 4> NewMIs;
2358 // We can reuse this physreg as long as we are allowed to clobber
2359 // the value and there isn't an earlier def that has already clobbered
2360 // the physreg.
2361 if (PhysReg &&
2362 !ReusedOperands.isClobbered(PhysReg) &&
2363 Spills.canClobberPhysReg(PhysReg) &&
2364 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
2365 MachineOperand *KillOpnd =
2366 DeadStore->findRegisterUseOperand(PhysReg, true);
2367 // Note, if the store is storing a sub-register, it's possible the
2368 // super-register is needed below.
2369 if (KillOpnd && !KillOpnd->getSubReg() &&
2370 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
2371 MBB->insert(MII, NewMIs[0]);
2372 NewStore = NewMIs[1];
2373 MBB->insert(MII, NewStore);
2374 VRM->addSpillSlotUse(SS, NewStore);
2375 InvalidateKills(MI, TRI, RegKills, KillOps);
2376 VRM->RemoveMachineInstrFromMaps(&MI);
2377 MBB->erase(&MI);
2378 Erased = true;
2379 --NextMII;
2380 --NextMII; // backtrack to the unfolded instruction.
2381 BackTracked = true;
2382 isDead = true;
2383 ++NumSUnfold;
2388 if (isDead) { // Previous store is dead.
2389 // If we get here, the store is dead, nuke it now.
2390 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
2391 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2392 VRM->RemoveMachineInstrFromMaps(DeadStore);
2393 MBB->erase(DeadStore);
2394 if (!NewStore)
2395 ++NumDSE;
2398 MaybeDeadStores[SS] = NULL;
2399 if (NewStore) {
2400 // Treat this store as a spill merged into a copy. That makes the
2401 // stack slot value available.
2402 VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
2403 goto ProcessNextInst;
2407 // If the spill slot value is available, and this is a new definition of
2408 // the value, the value is not available anymore.
2409 if (MR & VirtRegMap::isMod) {
2410 // Notice that the value in this stack slot has been modified.
2411 Spills.ModifyStackSlotOrReMat(SS);
2413 // If this is *just* a mod of the value, check to see if this is just a
2414 // store to the spill slot (i.e. the spill got merged into the copy). If
2415 // so, realize that the vreg is available now, and add the store to the
2416 // MaybeDeadStore info.
2417 int StackSlot;
2418 if (!(MR & VirtRegMap::isRef)) {
2419 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
2420 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
2421 "Src hasn't been allocated yet?");
2423 if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
2424 Spills, RegKills, KillOps, TRI)) {
2425 NextMII = llvm::next(MII);
2426 BackTracked = true;
2427 goto ProcessNextInst;
2430 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2431 // this as a potentially dead store in case there is a subsequent
2432 // store into the stack slot without a read from it.
2433 MaybeDeadStores[StackSlot] = &MI;
2435 // If the stack slot value was previously available in some other
2436 // register, change it now. Otherwise, make the register
2437 // available in PhysReg.
2438 Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
2444 // Process all of the spilled defs.
2445 SpilledMIRegs.clear();
2446 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2447 MachineOperand &MO = MI.getOperand(i);
2448 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
2449 continue;
2451 unsigned VirtReg = MO.getReg();
2452 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
2453 // Check to see if this is a noop copy. If so, eliminate the
2454 // instruction before considering the dest reg to be changed.
2455 // Also check if it's copying from an "undef", if so, we can't
2456 // eliminate this or else the undef marker is lost and it will
2457 // confuses the scavenger. This is extremely rare.
2458 if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
2459 MI.getNumOperands() == 2) {
2460 ++NumDCE;
2461 DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2462 SmallVector<unsigned, 2> KillRegs;
2463 InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
2464 if (MO.isDead() && !KillRegs.empty()) {
2465 // Source register or an implicit super/sub-register use is killed.
2466 assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
2467 // Last def is now dead.
2468 TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
2470 VRM->RemoveMachineInstrFromMaps(&MI);
2471 MBB->erase(&MI);
2472 Erased = true;
2473 Spills.disallowClobberPhysReg(VirtReg);
2474 goto ProcessNextInst;
2477 // If it's not a no-op copy, it clobbers the value in the destreg.
2478 Spills.ClobberPhysReg(VirtReg);
2479 ReusedOperands.markClobbered(VirtReg);
2481 // Check to see if this instruction is a load from a stack slot into
2482 // a register. If so, this provides the stack slot value in the reg.
2483 int FrameIdx;
2484 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
2485 assert(DestReg == VirtReg && "Unknown load situation!");
2487 // If it is a folded reference, then it's not safe to clobber.
2488 bool Folded = FoldedSS.count(FrameIdx);
2489 // Otherwise, if it wasn't available, remember that it is now!
2490 Spills.addAvailable(FrameIdx, DestReg, !Folded);
2491 goto ProcessNextInst;
2494 continue;
2497 unsigned SubIdx = MO.getSubReg();
2498 bool DoReMat = VRM->isReMaterialized(VirtReg);
2499 if (DoReMat)
2500 ReMatDefs.insert(&MI);
2502 // The only vregs left are stack slot definitions.
2503 int StackSlot = VRM->getStackSlot(VirtReg);
2504 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
2506 // If this def is part of a two-address operand, make sure to execute
2507 // the store from the correct physical register.
2508 unsigned PhysReg;
2509 unsigned TiedOp;
2510 if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
2511 PhysReg = MI.getOperand(TiedOp).getReg();
2512 if (SubIdx) {
2513 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2514 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2515 "Can't find corresponding super-register!");
2516 PhysReg = SuperReg;
2518 } else {
2519 PhysReg = VRM->getPhys(VirtReg);
2520 if (ReusedOperands.isClobbered(PhysReg)) {
2521 // Another def has taken the assigned physreg. It must have been a
2522 // use&def which got it due to reuse. Undo the reuse!
2523 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2524 Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
2528 assert(PhysReg && "VR not assigned a physical register?");
2529 MRI->setPhysRegUsed(PhysReg);
2530 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2531 ReusedOperands.markClobbered(RReg);
2532 MI.getOperand(i).setReg(RReg);
2533 MI.getOperand(i).setSubReg(0);
2535 if (!MO.isDead() && SpilledMIRegs.insert(VirtReg)) {
2536 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2537 SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
2538 LastStore, Spills, ReMatDefs, RegKills, KillOps);
2539 NextMII = llvm::next(MII);
2541 // Check to see if this is a noop copy. If so, eliminate the
2542 // instruction before considering the dest reg to be changed.
2543 if (MI.isIdentityCopy()) {
2544 ++NumDCE;
2545 DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2546 InvalidateKills(MI, TRI, RegKills, KillOps);
2547 VRM->RemoveMachineInstrFromMaps(&MI);
2548 MBB->erase(&MI);
2549 Erased = true;
2550 UpdateKills(*LastStore, TRI, RegKills, KillOps);
2551 goto ProcessNextInst;
2555 ProcessNextInst:
2556 // Delete dead instructions without side effects.
2557 if (!Erased && !BackTracked && isSafeToDelete(MI)) {
2558 InvalidateKills(MI, TRI, RegKills, KillOps);
2559 VRM->RemoveMachineInstrFromMaps(&MI);
2560 MBB->erase(&MI);
2561 Erased = true;
2563 if (!Erased)
2564 DistanceMap.insert(std::make_pair(&MI, DistanceMap.size()));
2565 if (!Erased && !BackTracked) {
2566 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
2567 UpdateKills(*II, TRI, RegKills, KillOps);
2569 MII = NextMII;
2574 llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
2575 switch (RewriterOpt) {
2576 default: llvm_unreachable("Unreachable!");
2577 case local:
2578 return new LocalRewriter();
2579 case trivial:
2580 return new TrivialRewriter();