1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "VirtRegMap.h"
13 #include "llvm/Function.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
29 STATISTIC(NumDSE
, "Number of dead stores elided");
30 STATISTIC(NumDSS
, "Number of dead spill slots removed");
31 STATISTIC(NumCommutes
, "Number of instructions commuted");
32 STATISTIC(NumDRM
, "Number of re-materializable defs elided");
33 STATISTIC(NumStores
, "Number of stores added");
34 STATISTIC(NumPSpills
, "Number of physical register spills");
35 STATISTIC(NumOmitted
, "Number of reloads omited");
36 STATISTIC(NumAvoided
, "Number of reloads deemed unnecessary");
37 STATISTIC(NumCopified
, "Number of available reloads turned into copies");
38 STATISTIC(NumReMats
, "Number of re-materialization");
39 STATISTIC(NumLoads
, "Number of loads added");
40 STATISTIC(NumReused
, "Number of values reused");
41 STATISTIC(NumDCE
, "Number of copies elided");
42 STATISTIC(NumSUnfold
, "Number of stores unfolded");
43 STATISTIC(NumModRefUnfold
, "Number of modref unfolded");
46 enum RewriterName
{ local
, trivial
};
49 static cl::opt
<RewriterName
>
50 RewriterOpt("rewriter",
51 cl::desc("Rewriter to use (default=local)"),
53 cl::values(clEnumVal(local
, "local rewriter"),
54 clEnumVal(trivial
, "trivial rewriter"),
59 ScheduleSpills("schedule-spills",
60 cl::desc("Schedule spill code"),
63 VirtRegRewriter::~VirtRegRewriter() {}
65 /// substitutePhysReg - Replace virtual register in MachineOperand with a
66 /// physical register. Do the right thing with the sub-register index.
67 /// Note that operands may be added, so the MO reference is no longer valid.
68 static void substitutePhysReg(MachineOperand
&MO
, unsigned Reg
,
69 const TargetRegisterInfo
&TRI
) {
71 MO
.substPhysReg(Reg
, TRI
);
73 // Any kill flags apply to the full virtual register, so they also apply to
74 // the full physical register.
75 // We assume that partial defs have already been decorated with a super-reg
76 // <imp-def> operand by LiveIntervals.
77 MachineInstr
&MI
= *MO
.getParent();
78 if (MO
.isUse() && !MO
.isUndef() &&
79 (MO
.isKill() || MI
.isRegTiedToDefOperand(&MO
-&MI
.getOperand(0))))
80 MI
.addRegisterKilled(Reg
, &TRI
, /*AddIfNotFound=*/ true);
88 /// This class is intended for use with the new spilling framework only. It
89 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
91 struct TrivialRewriter
: public VirtRegRewriter
{
93 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
95 DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
96 DEBUG(dbgs() << "********** Function: "
97 << MF
.getFunction()->getName() << '\n');
98 DEBUG(dbgs() << "**** Machine Instrs"
99 << "(NOTE! Does not include spills and reloads!) ****\n");
102 MachineRegisterInfo
*mri
= &MF
.getRegInfo();
103 const TargetRegisterInfo
*tri
= MF
.getTarget().getRegisterInfo();
105 bool changed
= false;
107 for (LiveIntervals::iterator liItr
= LIs
->begin(), liEnd
= LIs
->end();
108 liItr
!= liEnd
; ++liItr
) {
110 const LiveInterval
*li
= liItr
->second
;
111 unsigned reg
= li
->reg
;
113 if (TargetRegisterInfo::isPhysicalRegister(reg
)) {
115 mri
->setPhysRegUsed(reg
);
118 if (!VRM
.hasPhys(reg
))
120 unsigned pReg
= VRM
.getPhys(reg
);
121 mri
->setPhysRegUsed(pReg
);
122 // Copy the register use-list before traversing it.
123 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 32> reglist
;
124 for (MachineRegisterInfo::reg_iterator I
= mri
->reg_begin(reg
),
125 E
= mri
->reg_end(); I
!= E
; ++I
)
126 reglist
.push_back(std::make_pair(&*I
, I
.getOperandNo()));
127 for (unsigned N
=0; N
!= reglist
.size(); ++N
)
128 substitutePhysReg(reglist
[N
].first
->getOperand(reglist
[N
].second
),
130 changed
|= !reglist
.empty();
134 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
144 // ************************************************************************ //
148 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
149 /// from top down, keep track of which spill slots or remat are available in
152 /// Note that not all physregs are created equal here. In particular, some
153 /// physregs are reloads that we are allowed to clobber or ignore at any time.
154 /// Other physregs are values that the register allocated program is using
155 /// that we cannot CHANGE, but we can read if we like. We keep track of this
156 /// on a per-stack-slot / remat id basis as the low bit in the value of the
157 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
158 /// this bit and addAvailable sets it if.
159 class AvailableSpills
{
160 const TargetRegisterInfo
*TRI
;
161 const TargetInstrInfo
*TII
;
163 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
164 // or remat'ed virtual register values that are still available, due to
165 // being loaded or stored to, but not invalidated yet.
166 std::map
<int, unsigned> SpillSlotsOrReMatsAvailable
;
168 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
169 // indicating which stack slot values are currently held by a physreg. This
170 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
171 // physreg is modified.
172 std::multimap
<unsigned, int> PhysRegsAvailable
;
174 void disallowClobberPhysRegOnly(unsigned PhysReg
);
176 void ClobberPhysRegOnly(unsigned PhysReg
);
178 AvailableSpills(const TargetRegisterInfo
*tri
, const TargetInstrInfo
*tii
)
179 : TRI(tri
), TII(tii
) {
182 /// clear - Reset the state.
184 SpillSlotsOrReMatsAvailable
.clear();
185 PhysRegsAvailable
.clear();
188 const TargetRegisterInfo
*getRegInfo() const { return TRI
; }
190 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
191 /// available in a physical register, return that PhysReg, otherwise
193 unsigned getSpillSlotOrReMatPhysReg(int Slot
) const {
194 std::map
<int, unsigned>::const_iterator I
=
195 SpillSlotsOrReMatsAvailable
.find(Slot
);
196 if (I
!= SpillSlotsOrReMatsAvailable
.end()) {
197 return I
->second
>> 1; // Remove the CanClobber bit.
202 /// addAvailable - Mark that the specified stack slot / remat is available
203 /// in the specified physreg. If CanClobber is true, the physreg can be
204 /// modified at any time without changing the semantics of the program.
205 void addAvailable(int SlotOrReMat
, unsigned Reg
, bool CanClobber
= true) {
206 // If this stack slot is thought to be available in some other physreg,
207 // remove its record.
208 ModifyStackSlotOrReMat(SlotOrReMat
);
210 PhysRegsAvailable
.insert(std::make_pair(Reg
, SlotOrReMat
));
211 SpillSlotsOrReMatsAvailable
[SlotOrReMat
]= (Reg
<< 1) |
212 (unsigned)CanClobber
;
214 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
215 DEBUG(dbgs() << "Remembering RM#"
216 << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1);
218 DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat
);
219 DEBUG(dbgs() << " in physreg " << TRI
->getName(Reg
)
220 << (CanClobber
? " canclobber" : "") << "\n");
223 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
224 /// the value of the specified stackslot register if it desires. The
225 /// specified stack slot must be available in a physreg for this query to
227 bool canClobberPhysRegForSS(int SlotOrReMat
) const {
228 assert(SpillSlotsOrReMatsAvailable
.count(SlotOrReMat
) &&
229 "Value not available!");
230 return SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
)->second
& 1;
233 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
234 /// physical register where values for some stack slot(s) might be
236 bool canClobberPhysReg(unsigned PhysReg
) const {
237 std::multimap
<unsigned, int>::const_iterator I
=
238 PhysRegsAvailable
.lower_bound(PhysReg
);
239 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
240 int SlotOrReMat
= I
->second
;
242 if (!canClobberPhysRegForSS(SlotOrReMat
))
248 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
249 /// stackslot register. The register is still available but is no longer
250 /// allowed to be modifed.
251 void disallowClobberPhysReg(unsigned PhysReg
);
253 /// ClobberPhysReg - This is called when the specified physreg changes
254 /// value. We use this to invalidate any info about stuff that lives in
255 /// it and any of its aliases.
256 void ClobberPhysReg(unsigned PhysReg
);
258 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
259 /// slot changes. This removes information about which register the
260 /// previous value for this slot lives in (as the previous value is dead
262 void ModifyStackSlotOrReMat(int SlotOrReMat
);
264 /// ClobberSharingStackSlots - When a register mapped to a stack slot changes,
265 /// other stack slots sharing the same register are no longer valid.
266 void ClobberSharingStackSlots(int StackSlot
);
268 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
269 /// into the specified MBB. Add available physical registers as potential
270 /// live-in's. If they are reused in the MBB, they will be added to the
271 /// live-in set to make register scavenger and post-allocation scheduler.
272 void AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
, BitVector
&RegKills
,
273 std::vector
<MachineOperand
*> &KillOps
);
278 // ************************************************************************ //
280 // Given a location where a reload of a spilled register or a remat of
281 // a constant is to be inserted, attempt to find a safe location to
282 // insert the load at an earlier point in the basic-block, to hide
283 // latency of the load and to avoid address-generation interlock
285 static MachineBasicBlock::iterator
286 ComputeReloadLoc(MachineBasicBlock::iterator
const InsertLoc
,
287 MachineBasicBlock::iterator
const Begin
,
289 const TargetRegisterInfo
*TRI
,
292 const TargetInstrInfo
*TII
,
293 const MachineFunction
&MF
)
298 // Spill backscheduling is of primary interest to addresses, so
299 // don't do anything if the register isn't in the register class
300 // used for pointers.
302 const TargetLowering
*TL
= MF
.getTarget().getTargetLowering();
304 if (!TL
->isTypeLegal(TL
->getPointerTy()))
305 // Believe it or not, this is true on 16-bit targets like PIC16.
308 const TargetRegisterClass
*ptrRegClass
=
309 TL
->getRegClassFor(TL
->getPointerTy());
310 if (!ptrRegClass
->contains(PhysReg
))
313 // Scan upwards through the preceding instructions. If an instruction doesn't
314 // reference the stack slot or the register we're loading, we can
315 // backschedule the reload up past it.
316 MachineBasicBlock::iterator NewInsertLoc
= InsertLoc
;
317 while (NewInsertLoc
!= Begin
) {
318 MachineBasicBlock::iterator Prev
= prior(NewInsertLoc
);
319 for (unsigned i
= 0; i
< Prev
->getNumOperands(); ++i
) {
320 MachineOperand
&Op
= Prev
->getOperand(i
);
321 if (!DoReMat
&& Op
.isFI() && Op
.getIndex() == SSorRMId
)
324 if (Prev
->findRegisterUseOperandIdx(PhysReg
) != -1 ||
325 Prev
->findRegisterDefOperand(PhysReg
))
327 for (const unsigned *Alias
= TRI
->getAliasSet(PhysReg
); *Alias
; ++Alias
)
328 if (Prev
->findRegisterUseOperandIdx(*Alias
) != -1 ||
329 Prev
->findRegisterDefOperand(*Alias
))
335 // If we made it to the beginning of the block, turn around and move back
336 // down just past any existing reloads. They're likely to be reloads/remats
337 // for instructions earlier than what our current reload/remat is for, so
338 // they should be scheduled earlier.
339 if (NewInsertLoc
== Begin
) {
341 while (InsertLoc
!= NewInsertLoc
&&
342 (TII
->isLoadFromStackSlot(NewInsertLoc
, FrameIdx
) ||
343 TII
->isTriviallyReMaterializable(NewInsertLoc
)))
352 // ReusedOp - For each reused operand, we keep track of a bit of information,
353 // in case we need to rollback upon processing a new operand. See comments
356 // The MachineInstr operand that reused an available value.
359 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
360 unsigned StackSlotOrReMat
;
362 // PhysRegReused - The physical register the value was available in.
363 unsigned PhysRegReused
;
365 // AssignedPhysReg - The physreg that was assigned for use by the reload.
366 unsigned AssignedPhysReg
;
368 // VirtReg - The virtual register itself.
371 ReusedOp(unsigned o
, unsigned ss
, unsigned prr
, unsigned apr
,
373 : Operand(o
), StackSlotOrReMat(ss
), PhysRegReused(prr
),
374 AssignedPhysReg(apr
), VirtReg(vreg
) {}
377 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
378 /// is reused instead of reloaded.
381 std::vector
<ReusedOp
> Reuses
;
382 BitVector PhysRegsClobbered
;
384 ReuseInfo(MachineInstr
&mi
, const TargetRegisterInfo
*tri
) : MI(mi
) {
385 PhysRegsClobbered
.resize(tri
->getNumRegs());
388 bool hasReuses() const {
389 return !Reuses
.empty();
392 /// addReuse - If we choose to reuse a virtual register that is already
393 /// available instead of reloading it, remember that we did so.
394 void addReuse(unsigned OpNo
, unsigned StackSlotOrReMat
,
395 unsigned PhysRegReused
, unsigned AssignedPhysReg
,
397 // If the reload is to the assigned register anyway, no undo will be
399 if (PhysRegReused
== AssignedPhysReg
) return;
401 // Otherwise, remember this.
402 Reuses
.push_back(ReusedOp(OpNo
, StackSlotOrReMat
, PhysRegReused
,
403 AssignedPhysReg
, VirtReg
));
406 void markClobbered(unsigned PhysReg
) {
407 PhysRegsClobbered
.set(PhysReg
);
410 bool isClobbered(unsigned PhysReg
) const {
411 return PhysRegsClobbered
.test(PhysReg
);
414 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
415 /// is some other operand that is using the specified register, either pick
416 /// a new register to use, or evict the previous reload and use this reg.
417 unsigned GetRegForReload(const TargetRegisterClass
*RC
, unsigned PhysReg
,
418 MachineFunction
&MF
, MachineInstr
*MI
,
419 AvailableSpills
&Spills
,
420 std::vector
<MachineInstr
*> &MaybeDeadStores
,
421 SmallSet
<unsigned, 8> &Rejected
,
423 std::vector
<MachineOperand
*> &KillOps
,
426 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
427 /// 'Rejected' set to remember which registers have been considered and
428 /// rejected for the reload. This avoids infinite looping in case like
431 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
432 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
434 /// sees r1 is taken by t2, tries t2's reload register r0
435 /// sees r0 is taken by t3, tries t3's reload register r1
436 /// sees r1 is taken by t2, tries t2's reload register r0 ...
437 unsigned GetRegForReload(unsigned VirtReg
, unsigned PhysReg
, MachineInstr
*MI
,
438 AvailableSpills
&Spills
,
439 std::vector
<MachineInstr
*> &MaybeDeadStores
,
441 std::vector
<MachineOperand
*> &KillOps
,
443 SmallSet
<unsigned, 8> Rejected
;
444 MachineFunction
&MF
= *MI
->getParent()->getParent();
445 const TargetRegisterClass
* RC
= MF
.getRegInfo().getRegClass(VirtReg
);
446 return GetRegForReload(RC
, PhysReg
, MF
, MI
, Spills
, MaybeDeadStores
,
447 Rejected
, RegKills
, KillOps
, VRM
);
453 // ****************** //
454 // Utility Functions //
455 // ****************** //
457 /// findSinglePredSuccessor - Return via reference a vector of machine basic
458 /// blocks each of which is a successor of the specified BB and has no other
460 static void findSinglePredSuccessor(MachineBasicBlock
*MBB
,
461 SmallVectorImpl
<MachineBasicBlock
*> &Succs
){
462 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
463 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
) {
464 MachineBasicBlock
*SuccMBB
= *SI
;
465 if (SuccMBB
->pred_size() == 1)
466 Succs
.push_back(SuccMBB
);
470 /// ResurrectConfirmedKill - Helper for ResurrectKill. This register is killed
471 /// but not re-defined and it's being reused. Remove the kill flag for the
472 /// register and unset the kill's marker and last kill operand.
473 static void ResurrectConfirmedKill(unsigned Reg
, const TargetRegisterInfo
* TRI
,
475 std::vector
<MachineOperand
*> &KillOps
) {
476 DEBUG(dbgs() << "Resurrect " << TRI
->getName(Reg
) << "\n");
478 MachineOperand
*KillOp
= KillOps
[Reg
];
479 KillOp
->setIsKill(false);
480 // KillOps[Reg] might be a def of a super-register.
481 unsigned KReg
= KillOp
->getReg();
485 assert(KillOps
[KReg
]->getParent() == KillOp
->getParent() &&
486 "invalid superreg kill flags");
487 KillOps
[KReg
] = NULL
;
488 RegKills
.reset(KReg
);
490 // If it's a def of a super-register. Its other sub-regsters are no
491 // longer killed as well.
492 for (const unsigned *SR
= TRI
->getSubRegisters(KReg
); *SR
; ++SR
) {
493 DEBUG(dbgs() << " Resurrect subreg " << TRI
->getName(*SR
) << "\n");
495 assert(KillOps
[*SR
]->getParent() == KillOp
->getParent() &&
496 "invalid subreg kill flags");
502 /// ResurrectKill - Invalidate kill info associated with a previous MI. An
503 /// optimization may have decided that it's safe to reuse a previously killed
504 /// register. If we fail to erase the invalid kill flags, then the register
505 /// scavenger may later clobber the register used by this MI. Note that this
506 /// must be done even if this MI is being deleted! Consider:
508 /// USE $r1 (vreg1) <kill>
510 /// $r1(vreg3) = COPY $r1 (vreg2)
512 /// RegAlloc has smartly assigned all three vregs to the same physreg. Initially
513 /// vreg1's only use is a kill. The rewriter doesn't know it should be live
514 /// until it rewrites vreg2. At that points it sees that the copy is dead and
515 /// deletes it. However, deleting the copy implicitly forwards liveness of $r1
516 /// (it's copy coalescing). We must resurrect $r1 by removing the kill flag at
517 /// vreg1 before deleting the copy.
518 static void ResurrectKill(MachineInstr
&MI
, unsigned Reg
,
519 const TargetRegisterInfo
* TRI
, BitVector
&RegKills
,
520 std::vector
<MachineOperand
*> &KillOps
) {
521 if (RegKills
[Reg
] && KillOps
[Reg
]->getParent() != &MI
) {
522 ResurrectConfirmedKill(Reg
, TRI
, RegKills
, KillOps
);
525 // No previous kill for this reg. Check for subreg kills as well.
531 // = d4 <avoiding reload>
532 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
534 if (RegKills
[SReg
] && KillOps
[SReg
]->getParent() != &MI
)
535 ResurrectConfirmedKill(SReg
, TRI
, RegKills
, KillOps
);
539 /// InvalidateKills - MI is going to be deleted. If any of its operands are
540 /// marked kill, then invalidate the information.
541 static void InvalidateKills(MachineInstr
&MI
,
542 const TargetRegisterInfo
* TRI
,
544 std::vector
<MachineOperand
*> &KillOps
,
545 SmallVector
<unsigned, 2> *KillRegs
= NULL
) {
546 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
547 MachineOperand
&MO
= MI
.getOperand(i
);
548 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill() || MO
.isUndef())
550 unsigned Reg
= MO
.getReg();
551 if (TargetRegisterInfo::isVirtualRegister(Reg
))
554 KillRegs
->push_back(Reg
);
555 assert(Reg
< KillOps
.size());
556 if (KillOps
[Reg
] == &MO
) {
557 // This operand was the kill, now no longer.
560 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
562 assert(KillOps
[*SR
] == &MO
&& "bad subreg kill flags");
569 // This operand may have reused a previously killed reg. Keep it live in
570 // case it continues to be used after erasing this instruction.
571 ResurrectKill(MI
, Reg
, TRI
, RegKills
, KillOps
);
576 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
577 /// (since its spill instruction is removed), mark it isDead. Also checks if
578 /// the def MI has other definition operands that are not dead. Returns it by
580 static bool InvalidateRegDef(MachineBasicBlock::iterator I
,
581 MachineInstr
&NewDef
, unsigned Reg
,
583 const TargetRegisterInfo
*TRI
) {
584 // Due to remat, it's possible this reg isn't being reused. That is,
585 // the def of this reg (by prev MI) is now dead.
586 MachineInstr
*DefMI
= I
;
587 MachineOperand
*DefOp
= NULL
;
588 for (unsigned i
= 0, e
= DefMI
->getNumOperands(); i
!= e
; ++i
) {
589 MachineOperand
&MO
= DefMI
->getOperand(i
);
590 if (!MO
.isReg() || !MO
.isDef() || !MO
.isKill() || MO
.isUndef())
592 if (MO
.getReg() == Reg
)
594 else if (!MO
.isDead())
600 bool FoundUse
= false, Done
= false;
601 MachineBasicBlock::iterator E
= &NewDef
;
603 for (; !Done
&& I
!= E
; ++I
) {
604 MachineInstr
*NMI
= I
;
605 for (unsigned j
= 0, ee
= NMI
->getNumOperands(); j
!= ee
; ++j
) {
606 MachineOperand
&MO
= NMI
->getOperand(j
);
607 if (!MO
.isReg() || MO
.getReg() == 0 ||
608 (MO
.getReg() != Reg
&& !TRI
->isSubRegister(Reg
, MO
.getReg())))
612 Done
= true; // Stop after scanning all the operands of this MI.
623 /// UpdateKills - Track and update kill info. If a MI reads a register that is
624 /// marked kill, then it must be due to register reuse. Transfer the kill info
626 static void UpdateKills(MachineInstr
&MI
, const TargetRegisterInfo
* TRI
,
628 std::vector
<MachineOperand
*> &KillOps
) {
629 // These do not affect kill info at all.
630 if (MI
.isDebugValue())
632 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
633 MachineOperand
&MO
= MI
.getOperand(i
);
634 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef())
636 unsigned Reg
= MO
.getReg();
640 // This operand may have reused a previously killed reg. Keep it live.
641 ResurrectKill(MI
, Reg
, TRI
, RegKills
, KillOps
);
646 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
653 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
654 const MachineOperand
&MO
= MI
.getOperand(i
);
655 if (!MO
.isReg() || !MO
.getReg() || !MO
.isDef())
657 unsigned Reg
= MO
.getReg();
660 // It also defines (or partially define) aliases.
661 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
665 for (const unsigned *SR
= TRI
->getSuperRegisters(Reg
); *SR
; ++SR
) {
672 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
674 static void ReMaterialize(MachineBasicBlock
&MBB
,
675 MachineBasicBlock::iterator
&MII
,
676 unsigned DestReg
, unsigned Reg
,
677 const TargetInstrInfo
*TII
,
678 const TargetRegisterInfo
*TRI
,
680 MachineInstr
*ReMatDefMI
= VRM
.getReMaterializedMI(Reg
);
682 const TargetInstrDesc
&TID
= ReMatDefMI
->getDesc();
683 assert(TID
.getNumDefs() == 1 &&
684 "Don't know how to remat instructions that define > 1 values!");
686 TII
->reMaterialize(MBB
, MII
, DestReg
, 0, ReMatDefMI
, *TRI
);
687 MachineInstr
*NewMI
= prior(MII
);
688 for (unsigned i
= 0, e
= NewMI
->getNumOperands(); i
!= e
; ++i
) {
689 MachineOperand
&MO
= NewMI
->getOperand(i
);
690 if (!MO
.isReg() || MO
.getReg() == 0)
692 unsigned VirtReg
= MO
.getReg();
693 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
))
696 unsigned Phys
= VRM
.getPhys(VirtReg
);
697 assert(Phys
&& "Virtual register is not assigned a register?");
698 substitutePhysReg(MO
, Phys
, *TRI
);
703 /// findSuperReg - Find the SubReg's super-register of given register class
704 /// where its SubIdx sub-register is SubReg.
705 static unsigned findSuperReg(const TargetRegisterClass
*RC
, unsigned SubReg
,
706 unsigned SubIdx
, const TargetRegisterInfo
*TRI
) {
707 for (TargetRegisterClass::iterator I
= RC
->begin(), E
= RC
->end();
710 if (TRI
->getSubReg(Reg
, SubIdx
) == SubReg
)
716 // ******************************** //
717 // Available Spills Implementation //
718 // ******************************** //
720 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
721 /// stackslot register. The register is still available but is no longer
722 /// allowed to be modifed.
723 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg
) {
724 std::multimap
<unsigned, int>::iterator I
=
725 PhysRegsAvailable
.lower_bound(PhysReg
);
726 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
727 int SlotOrReMat
= I
->second
;
729 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
730 "Bidirectional map mismatch!");
731 SpillSlotsOrReMatsAvailable
[SlotOrReMat
] &= ~1;
732 DEBUG(dbgs() << "PhysReg " << TRI
->getName(PhysReg
)
733 << " copied, it is available for use but can no longer be modified\n");
737 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
738 /// stackslot register and its aliases. The register and its aliases may
739 /// still available but is no longer allowed to be modifed.
740 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg
) {
741 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
742 disallowClobberPhysRegOnly(*AS
);
743 disallowClobberPhysRegOnly(PhysReg
);
746 /// ClobberPhysRegOnly - This is called when the specified physreg changes
747 /// value. We use this to invalidate any info about stuff we thing lives in it.
748 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
749 std::multimap
<unsigned, int>::iterator I
=
750 PhysRegsAvailable
.lower_bound(PhysReg
);
751 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
752 int SlotOrReMat
= I
->second
;
753 PhysRegsAvailable
.erase(I
++);
754 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
755 "Bidirectional map mismatch!");
756 SpillSlotsOrReMatsAvailable
.erase(SlotOrReMat
);
757 DEBUG(dbgs() << "PhysReg " << TRI
->getName(PhysReg
)
758 << " clobbered, invalidating ");
759 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
760 DEBUG(dbgs() << "RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1 <<"\n");
762 DEBUG(dbgs() << "SS#" << SlotOrReMat
<< "\n");
766 /// ClobberPhysReg - This is called when the specified physreg changes
767 /// value. We use this to invalidate any info about stuff we thing lives in
768 /// it and any of its aliases.
769 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
770 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
771 ClobberPhysRegOnly(*AS
);
772 ClobberPhysRegOnly(PhysReg
);
775 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
776 /// into the specified MBB. Add available physical registers as potential
777 /// live-in's. If they are reused in the MBB, they will be added to the
778 /// live-in set to make register scavenger and post-allocation scheduler.
779 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
,
781 std::vector
<MachineOperand
*> &KillOps
) {
782 std::set
<unsigned> NotAvailable
;
783 for (std::multimap
<unsigned, int>::iterator
784 I
= PhysRegsAvailable
.begin(), E
= PhysRegsAvailable
.end();
786 unsigned Reg
= I
->first
;
787 const TargetRegisterClass
* RC
= TRI
->getMinimalPhysRegClass(Reg
);
788 // FIXME: A temporary workaround. We can't reuse available value if it's
789 // not safe to move the def of the virtual register's class. e.g.
790 // X86::RFP* register classes. Do not add it as a live-in.
791 if (!TII
->isSafeToMoveRegClassDefs(RC
))
792 // This is no longer available.
793 NotAvailable
.insert(Reg
);
797 ResurrectConfirmedKill(Reg
, TRI
, RegKills
, KillOps
);
800 // Skip over the same register.
801 std::multimap
<unsigned, int>::iterator NI
= llvm::next(I
);
802 while (NI
!= E
&& NI
->first
== Reg
) {
808 for (std::set
<unsigned>::iterator I
= NotAvailable
.begin(),
809 E
= NotAvailable
.end(); I
!= E
; ++I
) {
811 for (const unsigned *SubRegs
= TRI
->getSubRegisters(*I
);
813 ClobberPhysReg(*SubRegs
);
817 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
818 /// slot changes. This removes information about which register the previous
819 /// value for this slot lives in (as the previous value is dead now).
820 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat
) {
821 std::map
<int, unsigned>::iterator It
=
822 SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
);
823 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
824 unsigned Reg
= It
->second
>> 1;
825 SpillSlotsOrReMatsAvailable
.erase(It
);
827 // This register may hold the value of multiple stack slots, only remove this
828 // stack slot from the set of values the register contains.
829 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
831 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
832 "Map inverse broken!");
833 if (I
->second
== SlotOrReMat
) break;
835 PhysRegsAvailable
.erase(I
);
838 void AvailableSpills::ClobberSharingStackSlots(int StackSlot
) {
839 std::map
<int, unsigned>::iterator It
=
840 SpillSlotsOrReMatsAvailable
.find(StackSlot
);
841 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
842 unsigned Reg
= It
->second
>> 1;
844 // Erase entries in PhysRegsAvailable for other stack slots.
845 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
846 while (I
!= PhysRegsAvailable
.end() && I
->first
== Reg
) {
847 std::multimap
<unsigned, int>::iterator NextI
= llvm::next(I
);
848 if (I
->second
!= StackSlot
) {
849 DEBUG(dbgs() << "Clobbered sharing SS#" << I
->second
<< " in "
850 << PrintReg(Reg
, TRI
) << '\n');
851 SpillSlotsOrReMatsAvailable
.erase(I
->second
);
852 PhysRegsAvailable
.erase(I
);
858 // ************************** //
859 // Reuse Info Implementation //
860 // ************************** //
862 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
863 /// is some other operand that is using the specified register, either pick
864 /// a new register to use, or evict the previous reload and use this reg.
865 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass
*RC
,
868 MachineInstr
*MI
, AvailableSpills
&Spills
,
869 std::vector
<MachineInstr
*> &MaybeDeadStores
,
870 SmallSet
<unsigned, 8> &Rejected
,
872 std::vector
<MachineOperand
*> &KillOps
,
874 const TargetInstrInfo
* TII
= MF
.getTarget().getInstrInfo();
875 const TargetRegisterInfo
*TRI
= Spills
.getRegInfo();
877 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
879 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
880 ReusedOp
&Op
= Reuses
[ro
];
881 // If we find some other reuse that was supposed to use this register
882 // exactly for its reload, we can change this reload to use ITS reload
883 // register. That is, unless its reload register has already been
884 // considered and subsequently rejected because it has also been reused
885 // by another operand.
886 if (Op
.PhysRegReused
== PhysReg
&&
887 Rejected
.count(Op
.AssignedPhysReg
) == 0 &&
888 RC
->contains(Op
.AssignedPhysReg
)) {
889 // Yup, use the reload register that we didn't use before.
890 unsigned NewReg
= Op
.AssignedPhysReg
;
891 Rejected
.insert(PhysReg
);
892 return GetRegForReload(RC
, NewReg
, MF
, MI
, Spills
, MaybeDeadStores
,
893 Rejected
, RegKills
, KillOps
, VRM
);
895 // Otherwise, we might also have a problem if a previously reused
896 // value aliases the new register. If so, codegen the previous reload
898 unsigned PRRU
= Op
.PhysRegReused
;
899 if (TRI
->regsOverlap(PRRU
, PhysReg
)) {
900 // Okay, we found out that an alias of a reused register
901 // was used. This isn't good because it means we have
902 // to undo a previous reuse.
903 MachineBasicBlock
*MBB
= MI
->getParent();
904 const TargetRegisterClass
*AliasRC
=
905 MBB
->getParent()->getRegInfo().getRegClass(Op
.VirtReg
);
907 // Copy Op out of the vector and remove it, we're going to insert an
908 // explicit load for it.
910 Reuses
.erase(Reuses
.begin()+ro
);
912 // MI may be using only a sub-register of PhysRegUsed.
913 unsigned RealPhysRegUsed
= MI
->getOperand(NewOp
.Operand
).getReg();
915 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed
) &&
916 "A reuse cannot be a virtual register");
917 if (PRRU
!= RealPhysRegUsed
) {
918 // What was the sub-register index?
919 SubIdx
= TRI
->getSubRegIndex(PRRU
, RealPhysRegUsed
);
921 "Operand physreg is not a sub-register of PhysRegUsed");
924 // Ok, we're going to try to reload the assigned physreg into the
925 // slot that we were supposed to in the first place. However, that
926 // register could hold a reuse. Check to see if it conflicts or
927 // would prefer us to use a different register.
928 unsigned NewPhysReg
= GetRegForReload(RC
, NewOp
.AssignedPhysReg
,
929 MF
, MI
, Spills
, MaybeDeadStores
,
930 Rejected
, RegKills
, KillOps
, VRM
);
932 bool DoReMat
= NewOp
.StackSlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
;
933 int SSorRMId
= DoReMat
934 ? VRM
.getReMatId(NewOp
.VirtReg
) : (int) NewOp
.StackSlotOrReMat
;
936 // Back-schedule reloads and remats.
937 MachineBasicBlock::iterator InsertLoc
=
938 ComputeReloadLoc(MI
, MBB
->begin(), PhysReg
, TRI
,
939 DoReMat
, SSorRMId
, TII
, MF
);
942 ReMaterialize(*MBB
, InsertLoc
, NewPhysReg
, NewOp
.VirtReg
, TII
,
945 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, NewPhysReg
,
946 NewOp
.StackSlotOrReMat
, AliasRC
, TRI
);
947 MachineInstr
*LoadMI
= prior(InsertLoc
);
948 VRM
.addSpillSlotUse(NewOp
.StackSlotOrReMat
, LoadMI
);
949 // Any stores to this stack slot are not dead anymore.
950 MaybeDeadStores
[NewOp
.StackSlotOrReMat
] = NULL
;
953 Spills
.ClobberPhysReg(NewPhysReg
);
954 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
956 unsigned RReg
= SubIdx
? TRI
->getSubReg(NewPhysReg
, SubIdx
) :NewPhysReg
;
957 MI
->getOperand(NewOp
.Operand
).setReg(RReg
);
958 MI
->getOperand(NewOp
.Operand
).setSubReg(0);
960 Spills
.addAvailable(NewOp
.StackSlotOrReMat
, NewPhysReg
);
961 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
962 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
964 DEBUG(dbgs() << "Reuse undone!\n");
967 // Finally, PhysReg is now available, go ahead and use it.
975 // ************************************************************************ //
977 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
978 /// stack slot mod/ref. It also checks if it's possible to unfold the
979 /// instruction by having it define a specified physical register instead.
980 static bool FoldsStackSlotModRef(MachineInstr
&MI
, int SS
, unsigned PhysReg
,
981 const TargetInstrInfo
*TII
,
982 const TargetRegisterInfo
*TRI
,
984 if (VRM
.hasEmergencySpills(&MI
) || VRM
.isSpillPt(&MI
))
988 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
989 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
990 unsigned VirtReg
= I
->second
.first
;
991 VirtRegMap::ModRef MR
= I
->second
.second
;
992 if (MR
& VirtRegMap::isModRef
)
993 if (VRM
.getStackSlot(VirtReg
) == SS
) {
994 Found
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(), true, true) != 0;
1001 // Does the instruction uses a register that overlaps the scratch register?
1002 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1003 MachineOperand
&MO
= MI
.getOperand(i
);
1004 if (!MO
.isReg() || MO
.getReg() == 0)
1006 unsigned Reg
= MO
.getReg();
1007 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
1008 if (!VRM
.hasPhys(Reg
))
1010 Reg
= VRM
.getPhys(Reg
);
1012 if (TRI
->regsOverlap(PhysReg
, Reg
))
1018 /// FindFreeRegister - Find a free register of a given register class by looking
1019 /// at (at most) the last two machine instructions.
1020 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII
,
1021 MachineBasicBlock
&MBB
,
1022 const TargetRegisterClass
*RC
,
1023 const TargetRegisterInfo
*TRI
,
1024 BitVector
&AllocatableRegs
) {
1025 BitVector
Defs(TRI
->getNumRegs());
1026 BitVector
Uses(TRI
->getNumRegs());
1027 SmallVector
<unsigned, 4> LocalUses
;
1028 SmallVector
<unsigned, 4> Kills
;
1030 // Take a look at 2 instructions at most.
1033 if (MII
== MBB
.begin())
1035 MachineInstr
*PrevMI
= prior(MII
);
1038 if (PrevMI
->isDebugValue())
1039 continue; // Skip over dbg_value instructions.
1042 for (unsigned i
= 0, e
= PrevMI
->getNumOperands(); i
!= e
; ++i
) {
1043 MachineOperand
&MO
= PrevMI
->getOperand(i
);
1044 if (!MO
.isReg() || MO
.getReg() == 0)
1046 unsigned Reg
= MO
.getReg();
1049 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
1052 LocalUses
.push_back(Reg
);
1053 if (MO
.isKill() && AllocatableRegs
[Reg
])
1054 Kills
.push_back(Reg
);
1058 for (unsigned i
= 0, e
= Kills
.size(); i
!= e
; ++i
) {
1059 unsigned Kill
= Kills
[i
];
1060 if (!Defs
[Kill
] && !Uses
[Kill
] &&
1064 for (unsigned i
= 0, e
= LocalUses
.size(); i
!= e
; ++i
) {
1065 unsigned Reg
= LocalUses
[i
];
1067 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
1076 void AssignPhysToVirtReg(MachineInstr
*MI
, unsigned VirtReg
, unsigned PhysReg
,
1077 const TargetRegisterInfo
&TRI
) {
1078 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1079 MachineOperand
&MO
= MI
->getOperand(i
);
1080 if (MO
.isReg() && MO
.getReg() == VirtReg
)
1081 substitutePhysReg(MO
, PhysReg
, TRI
);
1088 bool operator()(const std::pair
<MachineInstr
*, int> &A
,
1089 const std::pair
<MachineInstr
*, int> &B
) {
1090 return A
.second
< B
.second
;
1094 // ***************************** //
1095 // Local Spiller Implementation //
1096 // ***************************** //
1098 class LocalRewriter
: public VirtRegRewriter
{
1099 MachineRegisterInfo
*MRI
;
1100 const TargetRegisterInfo
*TRI
;
1101 const TargetInstrInfo
*TII
;
1104 BitVector AllocatableRegs
;
1105 DenseMap
<MachineInstr
*, unsigned> DistanceMap
;
1106 DenseMap
<int, SmallVector
<MachineInstr
*,4> > Slot2DbgValues
;
1108 MachineBasicBlock
*MBB
; // Basic block currently being processed.
1112 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
1113 LiveIntervals
* LIs
);
1116 void EraseInstr(MachineInstr
*MI
) {
1117 VRM
->RemoveMachineInstrFromMaps(MI
);
1118 LIs
->RemoveMachineInstrFromMaps(MI
);
1119 MI
->eraseFromParent();
1122 bool OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1123 MachineBasicBlock::iterator
&MII
,
1124 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1125 AvailableSpills
&Spills
,
1126 BitVector
&RegKills
,
1127 std::vector
<MachineOperand
*> &KillOps
);
1129 bool OptimizeByUnfold(MachineBasicBlock::iterator
&MII
,
1130 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1131 AvailableSpills
&Spills
,
1132 BitVector
&RegKills
,
1133 std::vector
<MachineOperand
*> &KillOps
);
1135 bool CommuteToFoldReload(MachineBasicBlock::iterator
&MII
,
1136 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1137 AvailableSpills
&Spills
,
1138 BitVector
&RegKills
,
1139 std::vector
<MachineOperand
*> &KillOps
,
1140 const TargetRegisterInfo
*TRI
);
1142 void SpillRegToStackSlot(MachineBasicBlock::iterator
&MII
,
1143 int Idx
, unsigned PhysReg
, int StackSlot
,
1144 const TargetRegisterClass
*RC
,
1145 bool isAvailable
, MachineInstr
*&LastStore
,
1146 AvailableSpills
&Spills
,
1147 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1148 BitVector
&RegKills
,
1149 std::vector
<MachineOperand
*> &KillOps
);
1151 void TransferDeadness(unsigned Reg
, BitVector
&RegKills
,
1152 std::vector
<MachineOperand
*> &KillOps
);
1154 bool InsertEmergencySpills(MachineInstr
*MI
);
1156 bool InsertRestores(MachineInstr
*MI
,
1157 AvailableSpills
&Spills
,
1158 BitVector
&RegKills
,
1159 std::vector
<MachineOperand
*> &KillOps
);
1161 bool InsertSpills(MachineInstr
*MI
);
1163 void ProcessUses(MachineInstr
&MI
, AvailableSpills
&Spills
,
1164 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1165 BitVector
&RegKills
,
1166 ReuseInfo
&ReusedOperands
,
1167 std::vector
<MachineOperand
*> &KillOps
);
1169 void RewriteMBB(LiveIntervals
*LIs
,
1170 AvailableSpills
&Spills
, BitVector
&RegKills
,
1171 std::vector
<MachineOperand
*> &KillOps
);
1175 bool LocalRewriter::runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&vrm
,
1176 LiveIntervals
* lis
) {
1177 MRI
= &MF
.getRegInfo();
1178 TRI
= MF
.getTarget().getRegisterInfo();
1179 TII
= MF
.getTarget().getInstrInfo();
1182 AllocatableRegs
= TRI
->getAllocatableSet(MF
);
1183 DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
1184 << MF
.getFunction()->getName() << "':\n");
1185 DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
1186 " reloads!) ****\n");
1187 DEBUG(MF
.print(dbgs(), LIs
->getSlotIndexes()));
1189 // Spills - Keep track of which spilled values are available in physregs
1190 // so that we can choose to reuse the physregs instead of emitting
1191 // reloads. This is usually refreshed per basic block.
1192 AvailableSpills
Spills(TRI
, TII
);
1194 // Keep track of kill information.
1195 BitVector
RegKills(TRI
->getNumRegs());
1196 std::vector
<MachineOperand
*> KillOps
;
1197 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1199 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1200 SmallVector
<MachineBasicBlock
*, 4> SinglePredSuccs
;
1201 SmallPtrSet
<MachineBasicBlock
*,16> EarlyVisited
;
1203 // Traverse the basic blocks depth first.
1204 MachineBasicBlock
*Entry
= MF
.begin();
1205 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
1206 for (df_ext_iterator
<MachineBasicBlock
*,
1207 SmallPtrSet
<MachineBasicBlock
*,16> >
1208 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
1211 if (!EarlyVisited
.count(MBB
))
1212 RewriteMBB(LIs
, Spills
, RegKills
, KillOps
);
1214 // If this MBB is the only predecessor of a successor. Keep the
1215 // availability information and visit it next.
1217 // Keep visiting single predecessor successor as long as possible.
1218 SinglePredSuccs
.clear();
1219 findSinglePredSuccessor(MBB
, SinglePredSuccs
);
1220 if (SinglePredSuccs
.empty())
1223 // FIXME: More than one successors, each of which has MBB has
1224 // the only predecessor.
1225 MBB
= SinglePredSuccs
[0];
1226 if (!Visited
.count(MBB
) && EarlyVisited
.insert(MBB
)) {
1227 Spills
.AddAvailableRegsToLiveIn(*MBB
, RegKills
, KillOps
);
1228 RewriteMBB(LIs
, Spills
, RegKills
, KillOps
);
1233 // Clear the availability info.
1237 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
1238 DEBUG(MF
.print(dbgs(), LIs
->getSlotIndexes()));
1240 // Mark unused spill slots.
1241 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1242 int SS
= VRM
->getLowSpillSlot();
1243 if (SS
!= VirtRegMap::NO_STACK_SLOT
) {
1244 for (int e
= VRM
->getHighSpillSlot(); SS
<= e
; ++SS
) {
1245 SmallVector
<MachineInstr
*, 4> &DbgValues
= Slot2DbgValues
[SS
];
1246 if (!VRM
->isSpillSlotUsed(SS
)) {
1247 MFI
->RemoveStackObject(SS
);
1248 for (unsigned j
= 0, ee
= DbgValues
.size(); j
!= ee
; ++j
) {
1249 MachineInstr
*DVMI
= DbgValues
[j
];
1250 DEBUG(dbgs() << "Removing debug info referencing FI#" << SS
<< '\n');
1258 Slot2DbgValues
.clear();
1263 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1264 /// a scratch register is available.
1265 /// xorq %r12<kill>, %r13
1266 /// addq %rax, -184(%rbp)
1267 /// addq %r13, -184(%rbp)
1269 /// xorq %r12<kill>, %r13
1270 /// movq -184(%rbp), %r12
1273 /// movq %r12, -184(%rbp)
1274 bool LocalRewriter::
1275 OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1276 MachineBasicBlock::iterator
&MII
,
1277 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1278 AvailableSpills
&Spills
,
1279 BitVector
&RegKills
,
1280 std::vector
<MachineOperand
*> &KillOps
) {
1282 MachineBasicBlock::iterator NextMII
= llvm::next(MII
);
1283 // Skip over dbg_value instructions.
1284 while (NextMII
!= MBB
->end() && NextMII
->isDebugValue())
1285 NextMII
= llvm::next(NextMII
);
1286 if (NextMII
== MBB
->end())
1289 if (TII
->getOpcodeAfterMemoryUnfold(MII
->getOpcode(), true, true) == 0)
1292 // Now let's see if the last couple of instructions happens to have freed up
1294 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1295 unsigned PhysReg
= FindFreeRegister(MII
, *MBB
, RC
, TRI
, AllocatableRegs
);
1299 MachineFunction
&MF
= *MBB
->getParent();
1300 TRI
= MF
.getTarget().getRegisterInfo();
1301 MachineInstr
&MI
= *MII
;
1302 if (!FoldsStackSlotModRef(MI
, SS
, PhysReg
, TII
, TRI
, *VRM
))
1305 // If the next instruction also folds the same SS modref and can be unfoled,
1306 // then it's worthwhile to issue a load from SS into the free register and
1307 // then unfold these instructions.
1308 if (!FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, *VRM
))
1311 // Back-schedule reloads and remats.
1312 ComputeReloadLoc(MII
, MBB
->begin(), PhysReg
, TRI
, false, SS
, TII
, MF
);
1314 // Load from SS to the spare physical register.
1315 TII
->loadRegFromStackSlot(*MBB
, MII
, PhysReg
, SS
, RC
, TRI
);
1316 // This invalidates Phys.
1317 Spills
.ClobberPhysReg(PhysReg
);
1318 // Remember it's available.
1319 Spills
.addAvailable(SS
, PhysReg
);
1320 MaybeDeadStores
[SS
] = NULL
;
1322 // Unfold current MI.
1323 SmallVector
<MachineInstr
*, 4> NewMIs
;
1324 if (!TII
->unfoldMemoryOperand(MF
, &MI
, VirtReg
, false, false, NewMIs
))
1325 llvm_unreachable("Unable unfold the load / store folding instruction!");
1326 assert(NewMIs
.size() == 1);
1327 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
, *TRI
);
1328 VRM
->transferRestorePts(&MI
, NewMIs
[0]);
1329 MII
= MBB
->insert(MII
, NewMIs
[0]);
1330 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1334 // Unfold next instructions that fold the same SS.
1336 MachineInstr
&NextMI
= *NextMII
;
1337 NextMII
= llvm::next(NextMII
);
1339 if (!TII
->unfoldMemoryOperand(MF
, &NextMI
, VirtReg
, false, false, NewMIs
))
1340 llvm_unreachable("Unable unfold the load / store folding instruction!");
1341 assert(NewMIs
.size() == 1);
1342 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
, *TRI
);
1343 VRM
->transferRestorePts(&NextMI
, NewMIs
[0]);
1344 MBB
->insert(NextMII
, NewMIs
[0]);
1345 InvalidateKills(NextMI
, TRI
, RegKills
, KillOps
);
1346 EraseInstr(&NextMI
);
1348 // Skip over dbg_value instructions.
1349 while (NextMII
!= MBB
->end() && NextMII
->isDebugValue())
1350 NextMII
= llvm::next(NextMII
);
1351 if (NextMII
== MBB
->end())
1353 } while (FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, *VRM
));
1355 // Store the value back into SS.
1356 TII
->storeRegToStackSlot(*MBB
, NextMII
, PhysReg
, true, SS
, RC
, TRI
);
1357 MachineInstr
*StoreMI
= prior(NextMII
);
1358 VRM
->addSpillSlotUse(SS
, StoreMI
);
1359 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1364 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1365 /// instruction. e.g.
1367 /// movl %eax, -32(%ebp)
1368 /// movl -36(%ebp), %eax
1369 /// orl %eax, -32(%ebp)
1372 /// orl -36(%ebp), %eax
1373 /// mov %eax, -32(%ebp)
1374 /// This enables unfolding optimization for a subsequent instruction which will
1375 /// also eliminate the newly introduced store instruction.
1376 bool LocalRewriter::
1377 OptimizeByUnfold(MachineBasicBlock::iterator
&MII
,
1378 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1379 AvailableSpills
&Spills
,
1380 BitVector
&RegKills
,
1381 std::vector
<MachineOperand
*> &KillOps
) {
1382 MachineFunction
&MF
= *MBB
->getParent();
1383 MachineInstr
&MI
= *MII
;
1384 unsigned UnfoldedOpc
= 0;
1385 unsigned UnfoldPR
= 0;
1386 unsigned UnfoldVR
= 0;
1387 int FoldedSS
= VirtRegMap::NO_STACK_SLOT
;
1388 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1389 for (tie(I
, End
) = VRM
->getFoldedVirts(&MI
); I
!= End
; ) {
1390 // Only transform a MI that folds a single register.
1393 UnfoldVR
= I
->second
.first
;
1394 VirtRegMap::ModRef MR
= I
->second
.second
;
1395 // MI2VirtMap be can updated which invalidate the iterator.
1396 // Increment the iterator first.
1398 if (VRM
->isAssignedReg(UnfoldVR
))
1400 // If this reference is not a use, any previous store is now dead.
1401 // Otherwise, the store to this stack slot is not dead anymore.
1402 FoldedSS
= VRM
->getStackSlot(UnfoldVR
);
1403 MachineInstr
* DeadStore
= MaybeDeadStores
[FoldedSS
];
1404 if (DeadStore
&& (MR
& VirtRegMap::isModRef
)) {
1405 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(FoldedSS
);
1406 if (!PhysReg
|| !DeadStore
->readsRegister(PhysReg
))
1409 UnfoldedOpc
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(),
1418 // Look for other unfolding opportunities.
1419 return OptimizeByUnfold2(UnfoldVR
, FoldedSS
, MII
, MaybeDeadStores
, Spills
,
1423 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1424 MachineOperand
&MO
= MI
.getOperand(i
);
1425 if (!MO
.isReg() || MO
.getReg() == 0 || !MO
.isUse())
1427 unsigned VirtReg
= MO
.getReg();
1428 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
) || MO
.getSubReg())
1430 if (VRM
->isAssignedReg(VirtReg
)) {
1431 unsigned PhysReg
= VRM
->getPhys(VirtReg
);
1432 if (PhysReg
&& TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1434 } else if (VRM
->isReMaterialized(VirtReg
))
1436 int SS
= VRM
->getStackSlot(VirtReg
);
1437 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1439 if (TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1443 if (VRM
->hasPhys(VirtReg
)) {
1444 PhysReg
= VRM
->getPhys(VirtReg
);
1445 if (!TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1449 // Ok, we'll need to reload the value into a register which makes
1450 // it impossible to perform the store unfolding optimization later.
1451 // Let's see if it is possible to fold the load if the store is
1452 // unfolded. This allows us to perform the store unfolding
1454 SmallVector
<MachineInstr
*, 4> NewMIs
;
1455 if (TII
->unfoldMemoryOperand(MF
, &MI
, UnfoldVR
, false, false, NewMIs
)) {
1456 assert(NewMIs
.size() == 1);
1457 MachineInstr
*NewMI
= NewMIs
.back();
1458 MBB
->insert(MII
, NewMI
);
1460 int Idx
= NewMI
->findRegisterUseOperandIdx(VirtReg
, false);
1462 SmallVector
<unsigned, 1> Ops
;
1464 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(NewMI
, Ops
, SS
);
1465 NewMI
->eraseFromParent();
1467 VRM
->addSpillSlotUse(SS
, FoldedMI
);
1468 if (!VRM
->hasPhys(UnfoldVR
))
1469 VRM
->assignVirt2Phys(UnfoldVR
, UnfoldPR
);
1470 VRM
->virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1472 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1482 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1483 /// where SrcReg is r1 and it is tied to r0. Return true if after
1484 /// commuting this instruction it will be r0 = op r2, r1.
1485 static bool CommuteChangesDestination(MachineInstr
*DefMI
,
1486 const TargetInstrDesc
&TID
,
1488 const TargetInstrInfo
*TII
,
1490 if (TID
.getNumDefs() != 1 && TID
.getNumOperands() != 3)
1492 if (!DefMI
->getOperand(1).isReg() ||
1493 DefMI
->getOperand(1).getReg() != SrcReg
)
1496 if (!DefMI
->isRegTiedToDefOperand(1, &DefIdx
) || DefIdx
!= 0)
1498 unsigned SrcIdx1
, SrcIdx2
;
1499 if (!TII
->findCommutedOpIndices(DefMI
, SrcIdx1
, SrcIdx2
))
1501 if (SrcIdx1
== 1 && SrcIdx2
== 2) {
1508 /// CommuteToFoldReload -
1511 /// r1 = op r1, r2<kill>
1514 /// If op is commutable and r2 is killed, then we can xform these to
1515 /// r2 = op r2, fi#1
1517 bool LocalRewriter::
1518 CommuteToFoldReload(MachineBasicBlock::iterator
&MII
,
1519 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1520 AvailableSpills
&Spills
,
1521 BitVector
&RegKills
,
1522 std::vector
<MachineOperand
*> &KillOps
,
1523 const TargetRegisterInfo
*TRI
) {
1524 if (MII
== MBB
->begin() || !MII
->killsRegister(SrcReg
))
1527 MachineInstr
&MI
= *MII
;
1528 MachineBasicBlock::iterator DefMII
= prior(MII
);
1529 MachineInstr
*DefMI
= DefMII
;
1530 const TargetInstrDesc
&TID
= DefMI
->getDesc();
1532 if (DefMII
!= MBB
->begin() &&
1533 TID
.isCommutable() &&
1534 CommuteChangesDestination(DefMI
, TID
, SrcReg
, TII
, NewDstIdx
)) {
1535 MachineOperand
&NewDstMO
= DefMI
->getOperand(NewDstIdx
);
1536 unsigned NewReg
= NewDstMO
.getReg();
1537 if (!NewDstMO
.isKill() || TRI
->regsOverlap(NewReg
, SrcReg
))
1539 MachineInstr
*ReloadMI
= prior(DefMII
);
1541 unsigned DestReg
= TII
->isLoadFromStackSlot(ReloadMI
, FrameIdx
);
1542 if (DestReg
!= SrcReg
|| FrameIdx
!= SS
)
1544 int UseIdx
= DefMI
->findRegisterUseOperandIdx(DestReg
, false);
1548 if (!MI
.isRegTiedToDefOperand(UseIdx
, &DefIdx
))
1550 assert(DefMI
->getOperand(DefIdx
).isReg() &&
1551 DefMI
->getOperand(DefIdx
).getReg() == SrcReg
);
1553 // Now commute def instruction.
1554 MachineInstr
*CommutedMI
= TII
->commuteInstruction(DefMI
, true);
1557 MBB
->insert(MII
, CommutedMI
);
1558 SmallVector
<unsigned, 1> Ops
;
1559 Ops
.push_back(NewDstIdx
);
1560 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(CommutedMI
, Ops
, SS
);
1561 // Not needed since foldMemoryOperand returns new MI.
1562 CommutedMI
->eraseFromParent();
1566 VRM
->addSpillSlotUse(SS
, FoldedMI
);
1567 VRM
->virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1568 // Insert new def MI and spill MI.
1569 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1570 TII
->storeRegToStackSlot(*MBB
, &MI
, NewReg
, true, SS
, RC
, TRI
);
1572 MachineInstr
*StoreMI
= MII
;
1573 VRM
->addSpillSlotUse(SS
, StoreMI
);
1574 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1575 MII
= FoldedMI
; // Update MII to backtrack.
1577 // Delete all 3 old instructions.
1578 InvalidateKills(*ReloadMI
, TRI
, RegKills
, KillOps
);
1579 EraseInstr(ReloadMI
);
1580 InvalidateKills(*DefMI
, TRI
, RegKills
, KillOps
);
1582 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1585 // If NewReg was previously holding value of some SS, it's now clobbered.
1586 // This has to be done now because it's a physical register. When this
1587 // instruction is re-visited, it's ignored.
1588 Spills
.ClobberPhysReg(NewReg
);
1597 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1598 /// the last store to the same slot is now dead. If so, remove the last store.
1599 void LocalRewriter::
1600 SpillRegToStackSlot(MachineBasicBlock::iterator
&MII
,
1601 int Idx
, unsigned PhysReg
, int StackSlot
,
1602 const TargetRegisterClass
*RC
,
1603 bool isAvailable
, MachineInstr
*&LastStore
,
1604 AvailableSpills
&Spills
,
1605 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1606 BitVector
&RegKills
,
1607 std::vector
<MachineOperand
*> &KillOps
) {
1609 MachineBasicBlock::iterator oldNextMII
= llvm::next(MII
);
1610 TII
->storeRegToStackSlot(*MBB
, llvm::next(MII
), PhysReg
, true, StackSlot
, RC
,
1612 MachineInstr
*StoreMI
= prior(oldNextMII
);
1613 VRM
->addSpillSlotUse(StackSlot
, StoreMI
);
1614 DEBUG(dbgs() << "Store:\t" << *StoreMI
);
1616 // If there is a dead store to this stack slot, nuke it now.
1618 DEBUG(dbgs() << "Removed dead store:\t" << *LastStore
);
1620 SmallVector
<unsigned, 2> KillRegs
;
1621 InvalidateKills(*LastStore
, TRI
, RegKills
, KillOps
, &KillRegs
);
1622 MachineBasicBlock::iterator PrevMII
= LastStore
;
1623 bool CheckDef
= PrevMII
!= MBB
->begin();
1626 EraseInstr(LastStore
);
1628 // Look at defs of killed registers on the store. Mark the defs
1629 // as dead since the store has been deleted and they aren't
1631 for (unsigned j
= 0, ee
= KillRegs
.size(); j
!= ee
; ++j
) {
1632 bool HasOtherDef
= false;
1633 if (InvalidateRegDef(PrevMII
, *MII
, KillRegs
[j
], HasOtherDef
, TRI
)) {
1634 MachineInstr
*DeadDef
= PrevMII
;
1635 if (ReMatDefs
.count(DeadDef
) && !HasOtherDef
) {
1636 // FIXME: This assumes a remat def does not have side effects.
1637 EraseInstr(DeadDef
);
1645 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1646 // the last of multiple instructions is the actual store.
1647 LastStore
= prior(oldNextMII
);
1649 // If the stack slot value was previously available in some other
1650 // register, change it now. Otherwise, make the register available,
1652 Spills
.ModifyStackSlotOrReMat(StackSlot
);
1653 Spills
.ClobberPhysReg(PhysReg
);
1654 Spills
.addAvailable(StackSlot
, PhysReg
, isAvailable
);
1658 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1659 /// effect and all of its defs are dead.
1660 static bool isSafeToDelete(MachineInstr
&MI
) {
1661 const TargetInstrDesc
&TID
= MI
.getDesc();
1662 if (TID
.mayLoad() || TID
.mayStore() || TID
.isTerminator() ||
1663 TID
.isCall() || TID
.isBarrier() || TID
.isReturn() ||
1664 MI
.isLabel() || MI
.isDebugValue() ||
1665 MI
.hasUnmodeledSideEffects())
1668 // Technically speaking inline asm without side effects and no defs can still
1669 // be deleted. But there is so much bad inline asm code out there, we should
1671 if (MI
.isInlineAsm())
1674 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1675 MachineOperand
&MO
= MI
.getOperand(i
);
1676 if (!MO
.isReg() || !MO
.getReg())
1678 if (MO
.isDef() && !MO
.isDead())
1680 if (MO
.isUse() && MO
.isKill())
1681 // FIXME: We can't remove kill markers or else the scavenger will assert.
1682 // An alternative is to add a ADD pseudo instruction to replace kill
1689 /// TransferDeadness - A identity copy definition is dead and it's being
1690 /// removed. Find the last def or use and mark it as dead / kill.
1691 void LocalRewriter::
1692 TransferDeadness(unsigned Reg
, BitVector
&RegKills
,
1693 std::vector
<MachineOperand
*> &KillOps
) {
1694 SmallPtrSet
<MachineInstr
*, 4> Seens
;
1695 SmallVector
<std::pair
<MachineInstr
*, int>,8> Refs
;
1696 for (MachineRegisterInfo::reg_iterator RI
= MRI
->reg_begin(Reg
),
1697 RE
= MRI
->reg_end(); RI
!= RE
; ++RI
) {
1698 MachineInstr
*UDMI
= &*RI
;
1699 if (UDMI
->isDebugValue() || UDMI
->getParent() != MBB
)
1701 DenseMap
<MachineInstr
*, unsigned>::iterator DI
= DistanceMap
.find(UDMI
);
1702 if (DI
== DistanceMap
.end())
1704 if (Seens
.insert(UDMI
))
1705 Refs
.push_back(std::make_pair(UDMI
, DI
->second
));
1710 std::sort(Refs
.begin(), Refs
.end(), RefSorter());
1712 while (!Refs
.empty()) {
1713 MachineInstr
*LastUDMI
= Refs
.back().first
;
1716 MachineOperand
*LastUD
= NULL
;
1717 for (unsigned i
= 0, e
= LastUDMI
->getNumOperands(); i
!= e
; ++i
) {
1718 MachineOperand
&MO
= LastUDMI
->getOperand(i
);
1719 if (!MO
.isReg() || MO
.getReg() != Reg
)
1721 if (!LastUD
|| (LastUD
->isUse() && MO
.isDef()))
1723 if (LastUDMI
->isRegTiedToDefOperand(i
))
1726 if (LastUD
->isDef()) {
1727 // If the instruction has no side effect, delete it and propagate
1728 // backward further. Otherwise, mark is dead and we are done.
1729 if (!isSafeToDelete(*LastUDMI
)) {
1730 LastUD
->setIsDead();
1733 EraseInstr(LastUDMI
);
1735 LastUD
->setIsKill();
1737 KillOps
[Reg
] = LastUD
;
1743 /// InsertEmergencySpills - Insert emergency spills before MI if requested by
1744 /// VRM. Return true if spills were inserted.
1745 bool LocalRewriter::InsertEmergencySpills(MachineInstr
*MI
) {
1746 if (!VRM
->hasEmergencySpills(MI
))
1748 MachineBasicBlock::iterator MII
= MI
;
1749 SmallSet
<int, 4> UsedSS
;
1750 std::vector
<unsigned> &EmSpills
= VRM
->getEmergencySpills(MI
);
1751 for (unsigned i
= 0, e
= EmSpills
.size(); i
!= e
; ++i
) {
1752 unsigned PhysReg
= EmSpills
[i
];
1753 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(PhysReg
);
1754 assert(RC
&& "Unable to determine register class!");
1755 int SS
= VRM
->getEmergencySpillSlot(RC
);
1756 if (UsedSS
.count(SS
))
1757 llvm_unreachable("Need to spill more than one physical registers!");
1759 TII
->storeRegToStackSlot(*MBB
, MII
, PhysReg
, true, SS
, RC
, TRI
);
1760 MachineInstr
*StoreMI
= prior(MII
);
1761 VRM
->addSpillSlotUse(SS
, StoreMI
);
1763 // Back-schedule reloads and remats.
1764 MachineBasicBlock::iterator InsertLoc
=
1765 ComputeReloadLoc(llvm::next(MII
), MBB
->begin(), PhysReg
, TRI
, false, SS
,
1766 TII
, *MBB
->getParent());
1768 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, PhysReg
, SS
, RC
, TRI
);
1770 MachineInstr
*LoadMI
= prior(InsertLoc
);
1771 VRM
->addSpillSlotUse(SS
, LoadMI
);
1773 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
1778 /// InsertRestores - Restore registers before MI is requested by VRM. Return
1779 /// true is any instructions were inserted.
1780 bool LocalRewriter::InsertRestores(MachineInstr
*MI
,
1781 AvailableSpills
&Spills
,
1782 BitVector
&RegKills
,
1783 std::vector
<MachineOperand
*> &KillOps
) {
1784 if (!VRM
->isRestorePt(MI
))
1786 MachineBasicBlock::iterator MII
= MI
;
1787 std::vector
<unsigned> &RestoreRegs
= VRM
->getRestorePtRestores(MI
);
1788 for (unsigned i
= 0, e
= RestoreRegs
.size(); i
!= e
; ++i
) {
1789 unsigned VirtReg
= RestoreRegs
[e
-i
-1]; // Reverse order.
1790 if (!VRM
->getPreSplitReg(VirtReg
))
1791 continue; // Split interval spilled again.
1792 unsigned Phys
= VRM
->getPhys(VirtReg
);
1793 MRI
->setPhysRegUsed(Phys
);
1795 // Check if the value being restored if available. If so, it must be
1796 // from a predecessor BB that fallthrough into this BB. We do not
1802 // ... # r1 not clobbered
1805 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
1806 int SSorRMId
= DoReMat
1807 ? VRM
->getReMatId(VirtReg
) : VRM
->getStackSlot(VirtReg
);
1808 unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1809 if (InReg
== Phys
) {
1810 // If the value is already available in the expected register, save
1811 // a reload / remat.
1813 DEBUG(dbgs() << "Reusing RM#"
1814 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1816 DEBUG(dbgs() << "Reusing SS#" << SSorRMId
);
1817 DEBUG(dbgs() << " from physreg "
1818 << TRI
->getName(InReg
) << " for " << PrintReg(VirtReg
)
1819 <<" instead of reloading into physreg "
1820 << TRI
->getName(Phys
) << '\n');
1822 // Reusing a physreg may resurrect it. But we expect ProcessUses to update
1823 // the kill flags for the current instruction after processing it.
1827 } else if (InReg
&& InReg
!= Phys
) {
1829 DEBUG(dbgs() << "Reusing RM#"
1830 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1832 DEBUG(dbgs() << "Reusing SS#" << SSorRMId
);
1833 DEBUG(dbgs() << " from physreg "
1834 << TRI
->getName(InReg
) << " for " << PrintReg(VirtReg
)
1835 <<" by copying it into physreg "
1836 << TRI
->getName(Phys
) << '\n');
1838 // If the reloaded / remat value is available in another register,
1839 // copy it to the desired register.
1841 // Back-schedule reloads and remats.
1842 MachineBasicBlock::iterator InsertLoc
=
1843 ComputeReloadLoc(MII
, MBB
->begin(), Phys
, TRI
, DoReMat
, SSorRMId
, TII
,
1845 MachineInstr
*CopyMI
= BuildMI(*MBB
, InsertLoc
, MI
->getDebugLoc(),
1846 TII
->get(TargetOpcode::COPY
), Phys
)
1847 .addReg(InReg
, RegState::Kill
);
1849 // This invalidates Phys.
1850 Spills
.ClobberPhysReg(Phys
);
1851 // Remember it's available.
1852 Spills
.addAvailable(SSorRMId
, Phys
);
1854 CopyMI
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
1855 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1857 DEBUG(dbgs() << '\t' << *CopyMI
);
1862 // Back-schedule reloads and remats.
1863 MachineBasicBlock::iterator InsertLoc
=
1864 ComputeReloadLoc(MII
, MBB
->begin(), Phys
, TRI
, DoReMat
, SSorRMId
, TII
,
1867 if (VRM
->isReMaterialized(VirtReg
)) {
1868 ReMaterialize(*MBB
, InsertLoc
, Phys
, VirtReg
, TII
, TRI
, *VRM
);
1870 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1871 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, Phys
, SSorRMId
, RC
, TRI
);
1872 MachineInstr
*LoadMI
= prior(InsertLoc
);
1873 VRM
->addSpillSlotUse(SSorRMId
, LoadMI
);
1875 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
1878 // This invalidates Phys.
1879 Spills
.ClobberPhysReg(Phys
);
1880 // Remember it's available.
1881 Spills
.addAvailable(SSorRMId
, Phys
);
1883 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
1884 DEBUG(dbgs() << '\t' << *prior(MII
));
1889 /// InsertSpills - Insert spills after MI if requested by VRM. Return
1890 /// true if spills were inserted.
1891 bool LocalRewriter::InsertSpills(MachineInstr
*MI
) {
1892 if (!VRM
->isSpillPt(MI
))
1894 MachineBasicBlock::iterator MII
= MI
;
1895 std::vector
<std::pair
<unsigned,bool> > &SpillRegs
=
1896 VRM
->getSpillPtSpills(MI
);
1897 for (unsigned i
= 0, e
= SpillRegs
.size(); i
!= e
; ++i
) {
1898 unsigned VirtReg
= SpillRegs
[i
].first
;
1899 bool isKill
= SpillRegs
[i
].second
;
1900 if (!VRM
->getPreSplitReg(VirtReg
))
1901 continue; // Split interval spilled again.
1902 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
1903 unsigned Phys
= VRM
->getPhys(VirtReg
);
1904 int StackSlot
= VRM
->getStackSlot(VirtReg
);
1905 MachineBasicBlock::iterator oldNextMII
= llvm::next(MII
);
1906 TII
->storeRegToStackSlot(*MBB
, llvm::next(MII
), Phys
, isKill
, StackSlot
,
1908 MachineInstr
*StoreMI
= prior(oldNextMII
);
1909 VRM
->addSpillSlotUse(StackSlot
, StoreMI
);
1910 DEBUG(dbgs() << "Store:\t" << *StoreMI
);
1911 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1917 /// ProcessUses - Process all of MI's spilled operands and all available
1919 void LocalRewriter::ProcessUses(MachineInstr
&MI
, AvailableSpills
&Spills
,
1920 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1921 BitVector
&RegKills
,
1922 ReuseInfo
&ReusedOperands
,
1923 std::vector
<MachineOperand
*> &KillOps
) {
1925 SmallSet
<unsigned, 2> KilledMIRegs
;
1926 SmallVector
<unsigned, 4> VirtUseOps
;
1927 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1928 MachineOperand
&MO
= MI
.getOperand(i
);
1929 if (!MO
.isReg() || MO
.getReg() == 0)
1930 continue; // Ignore non-register operands.
1932 unsigned VirtReg
= MO
.getReg();
1934 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
)) {
1935 // Ignore physregs for spilling, but remember that it is used by this
1937 MRI
->setPhysRegUsed(VirtReg
);
1941 // We want to process implicit virtual register uses first.
1942 if (MO
.isImplicit())
1943 // If the virtual register is implicitly defined, emit a implicit_def
1944 // before so scavenger knows it's "defined".
1945 // FIXME: This is a horrible hack done the by register allocator to
1946 // remat a definition with virtual register operand.
1947 VirtUseOps
.insert(VirtUseOps
.begin(), i
);
1949 VirtUseOps
.push_back(i
);
1951 // A partial def causes problems because the same operand both reads and
1952 // writes the register. This rewriter is designed to rewrite uses and defs
1953 // separately, so a partial def would already have been rewritten to a
1954 // physreg by the time we get to processing defs.
1955 // Add an implicit use operand to model the partial def.
1956 if (MO
.isDef() && MO
.getSubReg() && MI
.readsVirtualRegister(VirtReg
) &&
1957 MI
.findRegisterUseOperandIdx(VirtReg
) == -1) {
1958 VirtUseOps
.insert(VirtUseOps
.begin(), MI
.getNumOperands());
1959 MI
.addOperand(MachineOperand::CreateReg(VirtReg
,
1961 true)); // isImplicit
1962 DEBUG(dbgs() << "Partial redef: " << MI
);
1966 // Process all of the spilled uses and all non spilled reg references.
1967 SmallVector
<int, 2> PotentialDeadStoreSlots
;
1968 KilledMIRegs
.clear();
1969 for (unsigned j
= 0, e
= VirtUseOps
.size(); j
!= e
; ++j
) {
1970 unsigned i
= VirtUseOps
[j
];
1971 unsigned VirtReg
= MI
.getOperand(i
).getReg();
1972 assert(TargetRegisterInfo::isVirtualRegister(VirtReg
) &&
1973 "Not a virtual register?");
1975 unsigned SubIdx
= MI
.getOperand(i
).getSubReg();
1976 if (VRM
->isAssignedReg(VirtReg
)) {
1977 // This virtual register was assigned a physreg!
1978 unsigned Phys
= VRM
->getPhys(VirtReg
);
1979 MRI
->setPhysRegUsed(Phys
);
1980 if (MI
.getOperand(i
).isDef())
1981 ReusedOperands
.markClobbered(Phys
);
1982 substitutePhysReg(MI
.getOperand(i
), Phys
, *TRI
);
1983 if (VRM
->isImplicitlyDefined(VirtReg
))
1984 // FIXME: Is this needed?
1985 BuildMI(*MBB
, &MI
, MI
.getDebugLoc(),
1986 TII
->get(TargetOpcode::IMPLICIT_DEF
), Phys
);
1990 // This virtual register is now known to be a spilled value.
1991 if (!MI
.getOperand(i
).isUse())
1992 continue; // Handle defs in the loop below (handle use&def here though)
1994 bool AvoidReload
= MI
.getOperand(i
).isUndef();
1995 // Check if it is defined by an implicit def. It should not be spilled.
1996 // Note, this is for correctness reason. e.g.
1997 // 8 %reg1024<def> = IMPLICIT_DEF
1998 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1999 // The live range [12, 14) are not part of the r1024 live interval since
2000 // it's defined by an implicit def. It will not conflicts with live
2001 // interval of r1025. Now suppose both registers are spilled, you can
2002 // easily see a situation where both registers are reloaded before
2003 // the INSERT_SUBREG and both target registers that would overlap.
2004 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
2005 int SSorRMId
= DoReMat
2006 ? VRM
->getReMatId(VirtReg
) : VRM
->getStackSlot(VirtReg
);
2007 int ReuseSlot
= SSorRMId
;
2009 // Check to see if this stack slot is available.
2010 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
2012 // If this is a sub-register use, make sure the reuse register is in the
2013 // right register class. For example, for x86 not all of the 32-bit
2014 // registers have accessible sub-registers.
2015 // Similarly so for EXTRACT_SUBREG. Consider this:
2017 // MOV32_mr fi#1, EDI
2019 // = EXTRACT_SUBREG fi#1
2020 // fi#1 is available in EDI, but it cannot be reused because it's not in
2021 // the right register file.
2022 if (PhysReg
&& !AvoidReload
&& SubIdx
) {
2023 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
2024 if (!RC
->contains(PhysReg
))
2028 if (PhysReg
&& !AvoidReload
) {
2029 // This spilled operand might be part of a two-address operand. If this
2030 // is the case, then changing it will necessarily require changing the
2031 // def part of the instruction as well. However, in some cases, we
2032 // aren't allowed to modify the reused register. If none of these cases
2034 bool CanReuse
= true;
2035 bool isTied
= MI
.isRegTiedToDefOperand(i
);
2037 // Okay, we have a two address operand. We can reuse this physreg as
2038 // long as we are allowed to clobber the value and there isn't an
2039 // earlier def that has already clobbered the physreg.
2040 CanReuse
= !ReusedOperands
.isClobbered(PhysReg
) &&
2041 Spills
.canClobberPhysReg(PhysReg
);
2043 // If this is an asm, and a PhysReg alias is used elsewhere as an
2044 // earlyclobber operand, we can't also use it as an input.
2045 if (MI
.isInlineAsm()) {
2046 for (unsigned k
= 0, e
= MI
.getNumOperands(); k
!= e
; ++k
) {
2047 MachineOperand
&MOk
= MI
.getOperand(k
);
2048 if (MOk
.isReg() && MOk
.isEarlyClobber() &&
2049 TRI
->regsOverlap(MOk
.getReg(), PhysReg
)) {
2051 DEBUG(dbgs() << "Not reusing physreg " << TRI
->getName(PhysReg
)
2052 << " for " << PrintReg(VirtReg
) << ": " << MOk
2060 // If this stack slot value is already available, reuse it!
2061 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
2062 DEBUG(dbgs() << "Reusing RM#"
2063 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
2065 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot
);
2066 DEBUG(dbgs() << " from physreg "
2067 << TRI
->getName(PhysReg
) << " for " << PrintReg(VirtReg
)
2068 << " instead of reloading into "
2069 << PrintReg(VRM
->getPhys(VirtReg
), TRI
) << '\n');
2070 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2071 MI
.getOperand(i
).setReg(RReg
);
2072 MI
.getOperand(i
).setSubReg(0);
2074 // Reusing a physreg may resurrect it. But we expect ProcessUses to
2075 // update the kill flags for the current instr after processing it.
2077 // The only technical detail we have is that we don't know that
2078 // PhysReg won't be clobbered by a reloaded stack slot that occurs
2079 // later in the instruction. In particular, consider 'op V1, V2'.
2080 // If V1 is available in physreg R0, we would choose to reuse it
2081 // here, instead of reloading it into the register the allocator
2082 // indicated (say R1). However, V2 might have to be reloaded
2083 // later, and it might indicate that it needs to live in R0. When
2084 // this occurs, we need to have information available that
2085 // indicates it is safe to use R1 for the reload instead of R0.
2087 // To further complicate matters, we might conflict with an alias,
2088 // or R0 and R1 might not be compatible with each other. In this
2089 // case, we actually insert a reload for V1 in R1, ensuring that
2090 // we can get at R0 or its alias.
2091 ReusedOperands
.addReuse(i
, ReuseSlot
, PhysReg
,
2092 VRM
->getPhys(VirtReg
), VirtReg
);
2094 // Only mark it clobbered if this is a use&def operand.
2095 ReusedOperands
.markClobbered(PhysReg
);
2098 if (MI
.getOperand(i
).isKill() &&
2099 ReuseSlot
<= VirtRegMap::MAX_STACK_SLOT
) {
2101 // The store of this spilled value is potentially dead, but we
2102 // won't know for certain until we've confirmed that the re-use
2103 // above is valid, which means waiting until the other operands
2104 // are processed. For now we just track the spill slot, we'll
2105 // remove it after the other operands are processed if valid.
2107 PotentialDeadStoreSlots
.push_back(ReuseSlot
);
2110 // Mark is isKill if it's there no other uses of the same virtual
2111 // register and it's not a two-address operand. IsKill will be
2112 // unset if reg is reused.
2113 if (!isTied
&& KilledMIRegs
.count(VirtReg
) == 0) {
2114 MI
.getOperand(i
).setIsKill();
2115 KilledMIRegs
.insert(VirtReg
);
2120 // Otherwise we have a situation where we have a two-address instruction
2121 // whose mod/ref operand needs to be reloaded. This reload is already
2122 // available in some register "PhysReg", but if we used PhysReg as the
2123 // operand to our 2-addr instruction, the instruction would modify
2124 // PhysReg. This isn't cool if something later uses PhysReg and expects
2125 // to get its initial value.
2127 // To avoid this problem, and to avoid doing a load right after a store,
2128 // we emit a copy from PhysReg into the designated register for this
2131 // This case also applies to an earlyclobber'd PhysReg.
2132 unsigned DesignatedReg
= VRM
->getPhys(VirtReg
);
2133 assert(DesignatedReg
&& "Must map virtreg to physreg!");
2135 // Note that, if we reused a register for a previous operand, the
2136 // register we want to reload into might not actually be
2137 // available. If this occurs, use the register indicated by the
2139 if (ReusedOperands
.hasReuses())
2140 DesignatedReg
= ReusedOperands
.
2141 GetRegForReload(VirtReg
, DesignatedReg
, &MI
, Spills
,
2142 MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2144 // If the mapped designated register is actually the physreg we have
2145 // incoming, we don't need to inserted a dead copy.
2146 if (DesignatedReg
== PhysReg
) {
2147 // If this stack slot value is already available, reuse it!
2148 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
2149 DEBUG(dbgs() << "Reusing RM#"
2150 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
2152 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot
);
2153 DEBUG(dbgs() << " from physreg " << TRI
->getName(PhysReg
)
2154 << " for " << PrintReg(VirtReg
)
2155 << " instead of reloading into same physreg.\n");
2156 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2157 MI
.getOperand(i
).setReg(RReg
);
2158 MI
.getOperand(i
).setSubReg(0);
2159 ReusedOperands
.markClobbered(RReg
);
2164 MRI
->setPhysRegUsed(DesignatedReg
);
2165 ReusedOperands
.markClobbered(DesignatedReg
);
2167 // Back-schedule reloads and remats.
2168 MachineBasicBlock::iterator InsertLoc
=
2169 ComputeReloadLoc(&MI
, MBB
->begin(), PhysReg
, TRI
, DoReMat
,
2170 SSorRMId
, TII
, *MBB
->getParent());
2171 MachineInstr
*CopyMI
= BuildMI(*MBB
, InsertLoc
, MI
.getDebugLoc(),
2172 TII
->get(TargetOpcode::COPY
),
2173 DesignatedReg
).addReg(PhysReg
);
2174 CopyMI
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
2175 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
2177 // This invalidates DesignatedReg.
2178 Spills
.ClobberPhysReg(DesignatedReg
);
2180 Spills
.addAvailable(ReuseSlot
, DesignatedReg
);
2182 SubIdx
? TRI
->getSubReg(DesignatedReg
, SubIdx
) : DesignatedReg
;
2183 MI
.getOperand(i
).setReg(RReg
);
2184 MI
.getOperand(i
).setSubReg(0);
2185 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
2190 // Otherwise, reload it and remember that we have it.
2191 PhysReg
= VRM
->getPhys(VirtReg
);
2192 assert(PhysReg
&& "Must map virtreg to physreg!");
2194 // Note that, if we reused a register for a previous operand, the
2195 // register we want to reload into might not actually be
2196 // available. If this occurs, use the register indicated by the
2198 if (ReusedOperands
.hasReuses())
2199 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2200 Spills
, MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2202 MRI
->setPhysRegUsed(PhysReg
);
2203 ReusedOperands
.markClobbered(PhysReg
);
2207 // Back-schedule reloads and remats.
2208 MachineBasicBlock::iterator InsertLoc
=
2209 ComputeReloadLoc(MI
, MBB
->begin(), PhysReg
, TRI
, DoReMat
,
2210 SSorRMId
, TII
, *MBB
->getParent());
2213 ReMaterialize(*MBB
, InsertLoc
, PhysReg
, VirtReg
, TII
, TRI
, *VRM
);
2215 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
2216 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, PhysReg
, SSorRMId
, RC
,TRI
);
2217 MachineInstr
*LoadMI
= prior(InsertLoc
);
2218 VRM
->addSpillSlotUse(SSorRMId
, LoadMI
);
2220 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
2222 // This invalidates PhysReg.
2223 Spills
.ClobberPhysReg(PhysReg
);
2225 // Any stores to this stack slot are not dead anymore.
2227 MaybeDeadStores
[SSorRMId
] = NULL
;
2228 Spills
.addAvailable(SSorRMId
, PhysReg
);
2229 // Assumes this is the last use. IsKill will be unset if reg is reused
2230 // unless it's a two-address operand.
2231 if (!MI
.isRegTiedToDefOperand(i
) &&
2232 KilledMIRegs
.count(VirtReg
) == 0) {
2233 MI
.getOperand(i
).setIsKill();
2234 KilledMIRegs
.insert(VirtReg
);
2237 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
2238 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
2240 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2241 MI
.getOperand(i
).setReg(RReg
);
2242 MI
.getOperand(i
).setSubReg(0);
2245 // Ok - now we can remove stores that have been confirmed dead.
2246 for (unsigned j
= 0, e
= PotentialDeadStoreSlots
.size(); j
!= e
; ++j
) {
2247 // This was the last use and the spilled value is still available
2248 // for reuse. That means the spill was unnecessary!
2249 int PDSSlot
= PotentialDeadStoreSlots
[j
];
2250 MachineInstr
* DeadStore
= MaybeDeadStores
[PDSSlot
];
2252 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore
);
2253 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2254 EraseInstr(DeadStore
);
2255 MaybeDeadStores
[PDSSlot
] = NULL
;
2261 /// rewriteMBB - Keep track of which spills are available even after the
2262 /// register allocator is done with them. If possible, avoid reloading vregs.
2264 LocalRewriter::RewriteMBB(LiveIntervals
*LIs
,
2265 AvailableSpills
&Spills
, BitVector
&RegKills
,
2266 std::vector
<MachineOperand
*> &KillOps
) {
2268 DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
2269 << MBB
->getName() << "':\n");
2271 MachineFunction
&MF
= *MBB
->getParent();
2273 // MaybeDeadStores - When we need to write a value back into a stack slot,
2274 // keep track of the inserted store. If the stack slot value is never read
2275 // (because the value was used from some available register, for example), and
2276 // subsequently stored to, the original store is dead. This map keeps track
2277 // of inserted stores that are not used. If we see a subsequent store to the
2278 // same stack slot, the original store is deleted.
2279 std::vector
<MachineInstr
*> MaybeDeadStores
;
2280 MaybeDeadStores
.resize(MF
.getFrameInfo()->getObjectIndexEnd(), NULL
);
2282 // ReMatDefs - These are rematerializable def MIs which are not deleted.
2283 SmallSet
<MachineInstr
*, 4> ReMatDefs
;
2285 // Keep track of the registers we have already spilled in case there are
2286 // multiple defs of the same register in MI.
2287 SmallSet
<unsigned, 8> SpilledMIRegs
;
2291 KillOps
.resize(TRI
->getNumRegs(), NULL
);
2293 DistanceMap
.clear();
2294 for (MachineBasicBlock::iterator MII
= MBB
->begin(), E
= MBB
->end();
2296 MachineBasicBlock::iterator NextMII
= llvm::next(MII
);
2298 if (OptimizeByUnfold(MII
, MaybeDeadStores
, Spills
, RegKills
, KillOps
))
2299 NextMII
= llvm::next(MII
);
2301 if (InsertEmergencySpills(MII
))
2302 NextMII
= llvm::next(MII
);
2304 InsertRestores(MII
, Spills
, RegKills
, KillOps
);
2306 if (InsertSpills(MII
))
2307 NextMII
= llvm::next(MII
);
2309 bool Erased
= false;
2310 bool BackTracked
= false;
2311 MachineInstr
&MI
= *MII
;
2313 // Remember DbgValue's which reference stack slots.
2314 if (MI
.isDebugValue() && MI
.getOperand(0).isFI())
2315 Slot2DbgValues
[MI
.getOperand(0).getIndex()].push_back(&MI
);
2317 /// ReusedOperands - Keep track of operand reuse in case we need to undo
2319 ReuseInfo
ReusedOperands(MI
, TRI
);
2321 ProcessUses(MI
, Spills
, MaybeDeadStores
, RegKills
, ReusedOperands
, KillOps
);
2323 DEBUG(dbgs() << '\t' << MI
);
2326 // If we have folded references to memory operands, make sure we clear all
2327 // physical registers that may contain the value of the spilled virtual
2330 // Copy the folded virts to a small vector, we may change MI2VirtMap.
2331 SmallVector
<std::pair
<unsigned, VirtRegMap::ModRef
>, 4> FoldedVirts
;
2333 for (std::pair
<VirtRegMap::MI2VirtMapTy::const_iterator
,
2334 VirtRegMap::MI2VirtMapTy::const_iterator
> FVRange
=
2335 VRM
->getFoldedVirts(&MI
);
2336 FVRange
.first
!= FVRange
.second
; ++FVRange
.first
)
2337 FoldedVirts
.push_back(FVRange
.first
->second
);
2339 SmallSet
<int, 2> FoldedSS
;
2340 for (unsigned FVI
= 0, FVE
= FoldedVirts
.size(); FVI
!= FVE
; ++FVI
) {
2341 unsigned VirtReg
= FoldedVirts
[FVI
].first
;
2342 VirtRegMap::ModRef MR
= FoldedVirts
[FVI
].second
;
2343 DEBUG(dbgs() << "Folded " << PrintReg(VirtReg
) << " MR: " << MR
);
2345 int SS
= VRM
->getStackSlot(VirtReg
);
2346 if (SS
== VirtRegMap::NO_STACK_SLOT
)
2348 FoldedSS
.insert(SS
);
2349 DEBUG(dbgs() << " - StackSlot: " << SS
<< "\n");
2351 // If this folded instruction is just a use, check to see if it's a
2352 // straight load from the virt reg slot.
2353 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
2355 unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
);
2356 if (DestReg
&& FrameIdx
== SS
) {
2357 // If this spill slot is available, turn it into a copy (or nothing)
2358 // instead of leaving it as a load!
2359 if (unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
)) {
2360 DEBUG(dbgs() << "Promoted Load To Copy: " << MI
);
2361 if (DestReg
!= InReg
) {
2362 MachineOperand
*DefMO
= MI
.findRegisterDefOperand(DestReg
);
2363 MachineInstr
*CopyMI
= BuildMI(*MBB
, &MI
, MI
.getDebugLoc(),
2364 TII
->get(TargetOpcode::COPY
))
2365 .addReg(DestReg
, RegState::Define
, DefMO
->getSubReg())
2366 .addReg(InReg
, RegState::Kill
);
2367 // Revisit the copy so we make sure to notice the effects of the
2368 // operation on the destreg (either needing to RA it if it's
2369 // virtual or needing to clobber any values if it's physical).
2371 NextMII
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
2374 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2375 // InvalidateKills resurrects any prior kill of the copy's source
2376 // allowing the source reg to be reused in place of the copy.
2377 Spills
.disallowClobberPhysReg(InReg
);
2380 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2383 goto ProcessNextInst
;
2386 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2387 SmallVector
<MachineInstr
*, 4> NewMIs
;
2389 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, false, NewMIs
)){
2390 MBB
->insert(MII
, NewMIs
[0]);
2391 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2394 --NextMII
; // backtrack to the unfolded instruction.
2396 goto ProcessNextInst
;
2401 // If this reference is not a use, any previous store is now dead.
2402 // Otherwise, the store to this stack slot is not dead anymore.
2403 MachineInstr
* DeadStore
= MaybeDeadStores
[SS
];
2405 bool isDead
= !(MR
& VirtRegMap::isRef
);
2406 MachineInstr
*NewStore
= NULL
;
2407 if (MR
& VirtRegMap::isModRef
) {
2408 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2409 SmallVector
<MachineInstr
*, 4> NewMIs
;
2410 // We can reuse this physreg as long as we are allowed to clobber
2411 // the value and there isn't an earlier def that has already clobbered
2414 !ReusedOperands
.isClobbered(PhysReg
) &&
2415 Spills
.canClobberPhysReg(PhysReg
) &&
2416 !TII
->isStoreToStackSlot(&MI
, SS
)) { // Not profitable!
2417 MachineOperand
*KillOpnd
=
2418 DeadStore
->findRegisterUseOperand(PhysReg
, true);
2419 // Note, if the store is storing a sub-register, it's possible the
2420 // super-register is needed below.
2421 if (KillOpnd
&& !KillOpnd
->getSubReg() &&
2422 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, true,NewMIs
)){
2423 MBB
->insert(MII
, NewMIs
[0]);
2424 NewStore
= NewMIs
[1];
2425 MBB
->insert(MII
, NewStore
);
2426 VRM
->addSpillSlotUse(SS
, NewStore
);
2427 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2431 --NextMII
; // backtrack to the unfolded instruction.
2439 if (isDead
) { // Previous store is dead.
2440 // If we get here, the store is dead, nuke it now.
2441 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore
);
2442 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2443 EraseInstr(DeadStore
);
2448 MaybeDeadStores
[SS
] = NULL
;
2450 // Treat this store as a spill merged into a copy. That makes the
2451 // stack slot value available.
2452 VRM
->virtFolded(VirtReg
, NewStore
, VirtRegMap::isMod
);
2453 goto ProcessNextInst
;
2457 // If the spill slot value is available, and this is a new definition of
2458 // the value, the value is not available anymore.
2459 if (MR
& VirtRegMap::isMod
) {
2460 // Notice that the value in this stack slot has been modified.
2461 Spills
.ModifyStackSlotOrReMat(SS
);
2463 // If this is *just* a mod of the value, check to see if this is just a
2464 // store to the spill slot (i.e. the spill got merged into the copy). If
2465 // so, realize that the vreg is available now, and add the store to the
2466 // MaybeDeadStore info.
2468 if (!(MR
& VirtRegMap::isRef
)) {
2469 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
2470 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg
) &&
2471 "Src hasn't been allocated yet?");
2473 if (CommuteToFoldReload(MII
, VirtReg
, SrcReg
, StackSlot
,
2474 Spills
, RegKills
, KillOps
, TRI
)) {
2475 NextMII
= llvm::next(MII
);
2477 goto ProcessNextInst
;
2480 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2481 // this as a potentially dead store in case there is a subsequent
2482 // store into the stack slot without a read from it.
2483 MaybeDeadStores
[StackSlot
] = &MI
;
2485 // If the stack slot value was previously available in some other
2486 // register, change it now. Otherwise, make the register
2487 // available in PhysReg.
2488 Spills
.addAvailable(StackSlot
, SrcReg
, MI
.killsRegister(SrcReg
));
2494 // Process all of the spilled defs.
2495 SpilledMIRegs
.clear();
2496 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
2497 MachineOperand
&MO
= MI
.getOperand(i
);
2498 if (!(MO
.isReg() && MO
.getReg() && MO
.isDef()))
2501 unsigned VirtReg
= MO
.getReg();
2502 if (!TargetRegisterInfo::isVirtualRegister(VirtReg
)) {
2503 // Check to see if this is a noop copy. If so, eliminate the
2504 // instruction before considering the dest reg to be changed.
2505 // Also check if it's copying from an "undef", if so, we can't
2506 // eliminate this or else the undef marker is lost and it will
2507 // confuses the scavenger. This is extremely rare.
2508 if (MI
.isIdentityCopy() && !MI
.getOperand(1).isUndef() &&
2509 MI
.getNumOperands() == 2) {
2511 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2512 SmallVector
<unsigned, 2> KillRegs
;
2513 InvalidateKills(MI
, TRI
, RegKills
, KillOps
, &KillRegs
);
2514 if (MO
.isDead() && !KillRegs
.empty()) {
2515 // Source register or an implicit super/sub-register use is killed.
2516 assert(TRI
->regsOverlap(KillRegs
[0], MI
.getOperand(0).getReg()));
2517 // Last def is now dead.
2518 TransferDeadness(MI
.getOperand(1).getReg(), RegKills
, KillOps
);
2522 Spills
.disallowClobberPhysReg(VirtReg
);
2523 goto ProcessNextInst
;
2526 // If it's not a no-op copy, it clobbers the value in the destreg.
2527 Spills
.ClobberPhysReg(VirtReg
);
2528 ReusedOperands
.markClobbered(VirtReg
);
2530 // Check to see if this instruction is a load from a stack slot into
2531 // a register. If so, this provides the stack slot value in the reg.
2533 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
2534 assert(DestReg
== VirtReg
&& "Unknown load situation!");
2536 // If it is a folded reference, then it's not safe to clobber.
2537 bool Folded
= FoldedSS
.count(FrameIdx
);
2538 // Otherwise, if it wasn't available, remember that it is now!
2539 Spills
.addAvailable(FrameIdx
, DestReg
, !Folded
);
2540 goto ProcessNextInst
;
2546 unsigned SubIdx
= MO
.getSubReg();
2547 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
2549 ReMatDefs
.insert(&MI
);
2551 // The only vregs left are stack slot definitions.
2552 int StackSlot
= VRM
->getStackSlot(VirtReg
);
2553 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
2555 // If this def is part of a two-address operand, make sure to execute
2556 // the store from the correct physical register.
2559 if (MI
.isRegTiedToUseOperand(i
, &TiedOp
)) {
2560 PhysReg
= MI
.getOperand(TiedOp
).getReg();
2562 unsigned SuperReg
= findSuperReg(RC
, PhysReg
, SubIdx
, TRI
);
2563 assert(SuperReg
&& TRI
->getSubReg(SuperReg
, SubIdx
) == PhysReg
&&
2564 "Can't find corresponding super-register!");
2568 PhysReg
= VRM
->getPhys(VirtReg
);
2569 if (ReusedOperands
.isClobbered(PhysReg
)) {
2570 // Another def has taken the assigned physreg. It must have been a
2571 // use&def which got it due to reuse. Undo the reuse!
2572 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2573 Spills
, MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2577 // If StackSlot is available in a register that also holds other stack
2578 // slots, clobber those stack slots now.
2579 Spills
.ClobberSharingStackSlots(StackSlot
);
2581 assert(PhysReg
&& "VR not assigned a physical register?");
2582 MRI
->setPhysRegUsed(PhysReg
);
2583 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2584 ReusedOperands
.markClobbered(RReg
);
2585 MI
.getOperand(i
).setReg(RReg
);
2586 MI
.getOperand(i
).setSubReg(0);
2588 if (!MO
.isDead() && SpilledMIRegs
.insert(VirtReg
)) {
2589 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
2590 SpillRegToStackSlot(MII
, -1, PhysReg
, StackSlot
, RC
, true,
2591 LastStore
, Spills
, ReMatDefs
, RegKills
, KillOps
);
2592 NextMII
= llvm::next(MII
);
2594 // Check to see if this is a noop copy. If so, eliminate the
2595 // instruction before considering the dest reg to be changed.
2596 if (MI
.isIdentityCopy()) {
2598 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2599 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2602 UpdateKills(*LastStore
, TRI
, RegKills
, KillOps
);
2603 goto ProcessNextInst
;
2608 // Delete dead instructions without side effects.
2609 if (!Erased
&& !BackTracked
&& isSafeToDelete(MI
)) {
2610 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2615 DistanceMap
.insert(std::make_pair(&MI
, DistanceMap
.size()));
2616 if (!Erased
&& !BackTracked
) {
2617 for (MachineBasicBlock::iterator II
= &MI
; II
!= NextMII
; ++II
)
2618 UpdateKills(*II
, TRI
, RegKills
, KillOps
);
2625 llvm::VirtRegRewriter
* llvm::createVirtRegRewriter() {
2626 switch (RewriterOpt
) {
2627 default: llvm_unreachable("Unreachable!");
2629 return new LocalRewriter();
2631 return new TrivialRewriter();