1 //===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the VirtRegMap class.
12 // It also contains implementations of the the Spiller interface, which, given a
13 // virtual register map and a machine function, eliminates all virtual
14 // references by replacing them with physical register references - adding spill
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "spiller"
20 #include "VirtRegMap.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/Visibility.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
37 static Statistic
<> NumSpills("spiller", "Number of register spills");
38 static Statistic
<> NumStores("spiller", "Number of stores added");
39 static Statistic
<> NumLoads ("spiller", "Number of loads added");
40 static Statistic
<> NumReused("spiller", "Number of values reused");
41 static Statistic
<> NumDSE ("spiller", "Number of dead stores elided");
42 static Statistic
<> NumDCE ("spiller", "Number of copies elided");
44 enum SpillerName
{ simple
, local
};
46 static cl::opt
<SpillerName
>
48 cl::desc("Spiller to use: (default: local)"),
50 cl::values(clEnumVal(simple
, " simple spiller"),
51 clEnumVal(local
, " local spiller"),
56 //===----------------------------------------------------------------------===//
57 // VirtRegMap implementation
58 //===----------------------------------------------------------------------===//
60 void VirtRegMap::grow() {
61 Virt2PhysMap
.grow(MF
.getSSARegMap()->getLastVirtReg());
62 Virt2StackSlotMap
.grow(MF
.getSSARegMap()->getLastVirtReg());
65 int VirtRegMap::assignVirt2StackSlot(unsigned virtReg
) {
66 assert(MRegisterInfo::isVirtualRegister(virtReg
));
67 assert(Virt2StackSlotMap
[virtReg
] == NO_STACK_SLOT
&&
68 "attempt to assign stack slot to already spilled register");
69 const TargetRegisterClass
* RC
= MF
.getSSARegMap()->getRegClass(virtReg
);
70 int frameIndex
= MF
.getFrameInfo()->CreateStackObject(RC
->getSize(),
72 Virt2StackSlotMap
[virtReg
] = frameIndex
;
77 void VirtRegMap::assignVirt2StackSlot(unsigned virtReg
, int frameIndex
) {
78 assert(MRegisterInfo::isVirtualRegister(virtReg
));
79 assert(Virt2StackSlotMap
[virtReg
] == NO_STACK_SLOT
&&
80 "attempt to assign stack slot to already spilled register");
81 Virt2StackSlotMap
[virtReg
] = frameIndex
;
84 void VirtRegMap::virtFolded(unsigned VirtReg
, MachineInstr
*OldMI
,
85 unsigned OpNo
, MachineInstr
*NewMI
) {
86 // Move previous memory references folded to new instruction.
87 MI2VirtMapTy::iterator IP
= MI2VirtMap
.lower_bound(NewMI
);
88 for (MI2VirtMapTy::iterator I
= MI2VirtMap
.lower_bound(OldMI
),
89 E
= MI2VirtMap
.end(); I
!= E
&& I
->first
== OldMI
; ) {
90 MI2VirtMap
.insert(IP
, std::make_pair(NewMI
, I
->second
));
91 MI2VirtMap
.erase(I
++);
95 if (!OldMI
->getOperand(OpNo
).isDef()) {
96 assert(OldMI
->getOperand(OpNo
).isUse() && "Operand is not use or def?");
99 MRInfo
= OldMI
->getOperand(OpNo
).isUse() ? isModRef
: isMod
;
102 // add new memory reference
103 MI2VirtMap
.insert(IP
, std::make_pair(NewMI
, std::make_pair(VirtReg
, MRInfo
)));
106 void VirtRegMap::print(std::ostream
&OS
) const {
107 const MRegisterInfo
* MRI
= MF
.getTarget().getRegisterInfo();
109 OS
<< "********** REGISTER MAP **********\n";
110 for (unsigned i
= MRegisterInfo::FirstVirtualRegister
,
111 e
= MF
.getSSARegMap()->getLastVirtReg(); i
<= e
; ++i
) {
112 if (Virt2PhysMap
[i
] != (unsigned)VirtRegMap::NO_PHYS_REG
)
113 OS
<< "[reg" << i
<< " -> " << MRI
->getName(Virt2PhysMap
[i
]) << "]\n";
117 for (unsigned i
= MRegisterInfo::FirstVirtualRegister
,
118 e
= MF
.getSSARegMap()->getLastVirtReg(); i
<= e
; ++i
)
119 if (Virt2StackSlotMap
[i
] != VirtRegMap::NO_STACK_SLOT
)
120 OS
<< "[reg" << i
<< " -> fi#" << Virt2StackSlotMap
[i
] << "]\n";
124 void VirtRegMap::dump() const { print(std::cerr
); }
127 //===----------------------------------------------------------------------===//
128 // Simple Spiller Implementation
129 //===----------------------------------------------------------------------===//
131 Spiller::~Spiller() {}
134 struct VISIBILITY_HIDDEN SimpleSpiller
: public Spiller
{
135 bool runOnMachineFunction(MachineFunction
& mf
, VirtRegMap
&VRM
);
139 bool SimpleSpiller::runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
) {
140 DEBUG(std::cerr
<< "********** REWRITE MACHINE CODE **********\n");
141 DEBUG(std::cerr
<< "********** Function: "
142 << MF
.getFunction()->getName() << '\n');
143 const TargetMachine
&TM
= MF
.getTarget();
144 const MRegisterInfo
&MRI
= *TM
.getRegisterInfo();
145 bool *PhysRegsUsed
= MF
.getUsedPhysregs();
147 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
148 // each vreg once (in the case where a spilled vreg is used by multiple
149 // operands). This is always smaller than the number of operands to the
150 // current machine instr, so it should be small.
151 std::vector
<unsigned> LoadedRegs
;
153 for (MachineFunction::iterator MBBI
= MF
.begin(), E
= MF
.end();
155 DEBUG(std::cerr
<< MBBI
->getBasicBlock()->getName() << ":\n");
156 MachineBasicBlock
&MBB
= *MBBI
;
157 for (MachineBasicBlock::iterator MII
= MBB
.begin(),
158 E
= MBB
.end(); MII
!= E
; ++MII
) {
159 MachineInstr
&MI
= *MII
;
160 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
161 MachineOperand
&MO
= MI
.getOperand(i
);
162 if (MO
.isRegister() && MO
.getReg())
163 if (MRegisterInfo::isVirtualRegister(MO
.getReg())) {
164 unsigned VirtReg
= MO
.getReg();
165 unsigned PhysReg
= VRM
.getPhys(VirtReg
);
166 if (VRM
.hasStackSlot(VirtReg
)) {
167 int StackSlot
= VRM
.getStackSlot(VirtReg
);
168 const TargetRegisterClass
* RC
=
169 MF
.getSSARegMap()->getRegClass(VirtReg
);
172 std::find(LoadedRegs
.begin(), LoadedRegs
.end(), VirtReg
)
173 == LoadedRegs
.end()) {
174 MRI
.loadRegFromStackSlot(MBB
, &MI
, PhysReg
, StackSlot
, RC
);
175 LoadedRegs
.push_back(VirtReg
);
177 DEBUG(std::cerr
<< '\t' << *prior(MII
));
181 MRI
.storeRegToStackSlot(MBB
, next(MII
), PhysReg
, StackSlot
, RC
);
185 PhysRegsUsed
[PhysReg
] = true;
186 MI
.getOperand(i
).setReg(PhysReg
);
188 PhysRegsUsed
[MO
.getReg()] = true;
192 DEBUG(std::cerr
<< '\t' << MI
);
199 //===----------------------------------------------------------------------===//
200 // Local Spiller Implementation
201 //===----------------------------------------------------------------------===//
204 /// LocalSpiller - This spiller does a simple pass over the machine basic
205 /// block to attempt to keep spills in registers as much as possible for
206 /// blocks that have low register pressure (the vreg may be spilled due to
207 /// register pressure in other blocks).
208 class VISIBILITY_HIDDEN LocalSpiller
: public Spiller
{
209 const MRegisterInfo
*MRI
;
210 const TargetInstrInfo
*TII
;
212 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
) {
213 MRI
= MF
.getTarget().getRegisterInfo();
214 TII
= MF
.getTarget().getInstrInfo();
215 DEBUG(std::cerr
<< "\n**** Local spiller rewriting function '"
216 << MF
.getFunction()->getName() << "':\n");
218 for (MachineFunction::iterator MBB
= MF
.begin(), E
= MF
.end();
220 RewriteMBB(*MBB
, VRM
);
224 void RewriteMBB(MachineBasicBlock
&MBB
, VirtRegMap
&VRM
);
225 void ClobberPhysReg(unsigned PR
, std::map
<int, unsigned> &SpillSlots
,
226 std::multimap
<unsigned, int> &PhysRegs
);
227 void ClobberPhysRegOnly(unsigned PR
, std::map
<int, unsigned> &SpillSlots
,
228 std::multimap
<unsigned, int> &PhysRegs
);
229 void ModifyStackSlot(int Slot
, std::map
<int, unsigned> &SpillSlots
,
230 std::multimap
<unsigned, int> &PhysRegs
);
234 /// AvailableSpills - As the local spiller is scanning and rewriting an MBB from
235 /// top down, keep track of which spills slots are available in each register.
237 /// Note that not all physregs are created equal here. In particular, some
238 /// physregs are reloads that we are allowed to clobber or ignore at any time.
239 /// Other physregs are values that the register allocated program is using that
240 /// we cannot CHANGE, but we can read if we like. We keep track of this on a
241 /// per-stack-slot basis as the low bit in the value of the SpillSlotsAvailable
242 /// entries. The predicate 'canClobberPhysReg()' checks this bit and
243 /// addAvailable sets it if.
245 class VISIBILITY_HIDDEN AvailableSpills
{
246 const MRegisterInfo
*MRI
;
247 const TargetInstrInfo
*TII
;
249 // SpillSlotsAvailable - This map keeps track of all of the spilled virtual
250 // register values that are still available, due to being loaded or stored to,
251 // but not invalidated yet.
252 std::map
<int, unsigned> SpillSlotsAvailable
;
254 // PhysRegsAvailable - This is the inverse of SpillSlotsAvailable, indicating
255 // which stack slot values are currently held by a physreg. This is used to
256 // invalidate entries in SpillSlotsAvailable when a physreg is modified.
257 std::multimap
<unsigned, int> PhysRegsAvailable
;
259 void ClobberPhysRegOnly(unsigned PhysReg
);
261 AvailableSpills(const MRegisterInfo
*mri
, const TargetInstrInfo
*tii
)
262 : MRI(mri
), TII(tii
) {
265 /// getSpillSlotPhysReg - If the specified stack slot is available in a
266 /// physical register, return that PhysReg, otherwise return 0.
267 unsigned getSpillSlotPhysReg(int Slot
) const {
268 std::map
<int, unsigned>::const_iterator I
= SpillSlotsAvailable
.find(Slot
);
269 if (I
!= SpillSlotsAvailable
.end())
270 return I
->second
>> 1; // Remove the CanClobber bit.
274 const MRegisterInfo
*getRegInfo() const { return MRI
; }
276 /// addAvailable - Mark that the specified stack slot is available in the
277 /// specified physreg. If CanClobber is true, the physreg can be modified at
278 /// any time without changing the semantics of the program.
279 void addAvailable(int Slot
, unsigned Reg
, bool CanClobber
= true) {
280 // If this stack slot is thought to be available in some other physreg,
281 // remove its record.
282 ModifyStackSlot(Slot
);
284 PhysRegsAvailable
.insert(std::make_pair(Reg
, Slot
));
285 SpillSlotsAvailable
[Slot
] = (Reg
<< 1) | (unsigned)CanClobber
;
287 DEBUG(std::cerr
<< "Remembering SS#" << Slot
<< " in physreg "
288 << MRI
->getName(Reg
) << "\n");
291 /// canClobberPhysReg - Return true if the spiller is allowed to change the
292 /// value of the specified stackslot register if it desires. The specified
293 /// stack slot must be available in a physreg for this query to make sense.
294 bool canClobberPhysReg(int Slot
) const {
295 assert(SpillSlotsAvailable
.count(Slot
) && "Slot not available!");
296 return SpillSlotsAvailable
.find(Slot
)->second
& 1;
299 /// ClobberPhysReg - This is called when the specified physreg changes
300 /// value. We use this to invalidate any info about stuff we thing lives in
301 /// it and any of its aliases.
302 void ClobberPhysReg(unsigned PhysReg
);
304 /// ModifyStackSlot - This method is called when the value in a stack slot
305 /// changes. This removes information about which register the previous value
306 /// for this slot lives in (as the previous value is dead now).
307 void ModifyStackSlot(int Slot
);
311 /// ClobberPhysRegOnly - This is called when the specified physreg changes
312 /// value. We use this to invalidate any info about stuff we thing lives in it.
313 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
314 std::multimap
<unsigned, int>::iterator I
=
315 PhysRegsAvailable
.lower_bound(PhysReg
);
316 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
317 int Slot
= I
->second
;
318 PhysRegsAvailable
.erase(I
++);
319 assert((SpillSlotsAvailable
[Slot
] >> 1) == PhysReg
&&
320 "Bidirectional map mismatch!");
321 SpillSlotsAvailable
.erase(Slot
);
322 DEBUG(std::cerr
<< "PhysReg " << MRI
->getName(PhysReg
)
323 << " clobbered, invalidating SS#" << Slot
<< "\n");
327 /// ClobberPhysReg - This is called when the specified physreg changes
328 /// value. We use this to invalidate any info about stuff we thing lives in
329 /// it and any of its aliases.
330 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
331 for (const unsigned *AS
= MRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
332 ClobberPhysRegOnly(*AS
);
333 ClobberPhysRegOnly(PhysReg
);
336 /// ModifyStackSlot - This method is called when the value in a stack slot
337 /// changes. This removes information about which register the previous value
338 /// for this slot lives in (as the previous value is dead now).
339 void AvailableSpills::ModifyStackSlot(int Slot
) {
340 std::map
<int, unsigned>::iterator It
= SpillSlotsAvailable
.find(Slot
);
341 if (It
== SpillSlotsAvailable
.end()) return;
342 unsigned Reg
= It
->second
>> 1;
343 SpillSlotsAvailable
.erase(It
);
345 // This register may hold the value of multiple stack slots, only remove this
346 // stack slot from the set of values the register contains.
347 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
349 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
350 "Map inverse broken!");
351 if (I
->second
== Slot
) break;
353 PhysRegsAvailable
.erase(I
);
358 // ReusedOp - For each reused operand, we keep track of a bit of information, in
359 // case we need to rollback upon processing a new operand. See comments below.
362 // The MachineInstr operand that reused an available value.
365 // StackSlot - The spill slot of the value being reused.
368 // PhysRegReused - The physical register the value was available in.
369 unsigned PhysRegReused
;
371 // AssignedPhysReg - The physreg that was assigned for use by the reload.
372 unsigned AssignedPhysReg
;
374 // VirtReg - The virtual register itself.
377 ReusedOp(unsigned o
, unsigned ss
, unsigned prr
, unsigned apr
,
379 : Operand(o
), StackSlot(ss
), PhysRegReused(prr
), AssignedPhysReg(apr
),
383 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
384 /// is reused instead of reloaded.
385 class VISIBILITY_HIDDEN ReuseInfo
{
387 std::vector
<ReusedOp
> Reuses
;
389 ReuseInfo(MachineInstr
&mi
) : MI(mi
) {}
391 bool hasReuses() const {
392 return !Reuses
.empty();
395 /// addReuse - If we choose to reuse a virtual register that is already
396 /// available instead of reloading it, remember that we did so.
397 void addReuse(unsigned OpNo
, unsigned StackSlot
,
398 unsigned PhysRegReused
, unsigned AssignedPhysReg
,
400 // If the reload is to the assigned register anyway, no undo will be
402 if (PhysRegReused
== AssignedPhysReg
) return;
404 // Otherwise, remember this.
405 Reuses
.push_back(ReusedOp(OpNo
, StackSlot
, PhysRegReused
,
406 AssignedPhysReg
, VirtReg
));
409 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
410 /// is some other operand that is using the specified register, either pick
411 /// a new register to use, or evict the previous reload and use this reg.
412 unsigned GetRegForReload(unsigned PhysReg
, MachineInstr
*MI
,
413 AvailableSpills
&Spills
,
414 std::map
<int, MachineInstr
*> &MaybeDeadStores
) {
415 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
417 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
418 ReusedOp
&Op
= Reuses
[ro
];
419 // If we find some other reuse that was supposed to use this register
420 // exactly for its reload, we can change this reload to use ITS reload
422 if (Op
.PhysRegReused
== PhysReg
) {
423 // Yup, use the reload register that we didn't use before.
424 unsigned NewReg
= Op
.AssignedPhysReg
;
426 // Remove the record for the previous reuse. We know it can never be
428 Reuses
.erase(Reuses
.begin()+ro
);
429 return GetRegForReload(NewReg
, MI
, Spills
, MaybeDeadStores
);
431 // Otherwise, we might also have a problem if a previously reused
432 // value aliases the new register. If so, codegen the previous reload
434 unsigned PRRU
= Op
.PhysRegReused
;
435 const MRegisterInfo
*MRI
= Spills
.getRegInfo();
436 if (MRI
->areAliases(PRRU
, PhysReg
)) {
437 // Okay, we found out that an alias of a reused register
438 // was used. This isn't good because it means we have
439 // to undo a previous reuse.
440 MachineBasicBlock
*MBB
= MI
->getParent();
441 const TargetRegisterClass
*AliasRC
=
442 MBB
->getParent()->getSSARegMap()->getRegClass(Op
.VirtReg
);
444 // Copy Op out of the vector and remove it, we're going to insert an
445 // explicit load for it.
447 Reuses
.erase(Reuses
.begin()+ro
);
449 // Ok, we're going to try to reload the assigned physreg into the
450 // slot that we were supposed to in the first place. However, that
451 // register could hold a reuse. Check to see if it conflicts or
452 // would prefer us to use a different register.
453 unsigned NewPhysReg
= GetRegForReload(NewOp
.AssignedPhysReg
,
454 MI
, Spills
, MaybeDeadStores
);
456 MRI
->loadRegFromStackSlot(*MBB
, MI
, NewPhysReg
,
457 NewOp
.StackSlot
, AliasRC
);
458 Spills
.ClobberPhysReg(NewPhysReg
);
459 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
461 // Any stores to this stack slot are not dead anymore.
462 MaybeDeadStores
.erase(NewOp
.StackSlot
);
464 MI
->getOperand(NewOp
.Operand
).setReg(NewPhysReg
);
466 Spills
.addAvailable(NewOp
.StackSlot
, NewPhysReg
);
468 DEBUG(MachineBasicBlock::iterator MII
= MI
;
469 std::cerr
<< '\t' << *prior(MII
));
471 DEBUG(std::cerr
<< "Reuse undone!\n");
474 // Finally, PhysReg is now available, go ahead and use it.
485 /// rewriteMBB - Keep track of which spills are available even after the
486 /// register allocator is done with them. If possible, avoid reloading vregs.
487 void LocalSpiller::RewriteMBB(MachineBasicBlock
&MBB
, VirtRegMap
&VRM
) {
489 DEBUG(std::cerr
<< MBB
.getBasicBlock()->getName() << ":\n");
491 // Spills - Keep track of which spilled values are available in physregs so
492 // that we can choose to reuse the physregs instead of emitting reloads.
493 AvailableSpills
Spills(MRI
, TII
);
495 // DefAndUseVReg - When we see a def&use operand that is spilled, keep track
496 // of it. ".first" is the machine operand index (should always be 0 for now),
497 // and ".second" is the virtual register that is spilled.
498 std::vector
<std::pair
<unsigned, unsigned> > DefAndUseVReg
;
500 // MaybeDeadStores - When we need to write a value back into a stack slot,
501 // keep track of the inserted store. If the stack slot value is never read
502 // (because the value was used from some available register, for example), and
503 // subsequently stored to, the original store is dead. This map keeps track
504 // of inserted stores that are not used. If we see a subsequent store to the
505 // same stack slot, the original store is deleted.
506 std::map
<int, MachineInstr
*> MaybeDeadStores
;
508 bool *PhysRegsUsed
= MBB
.getParent()->getUsedPhysregs();
510 for (MachineBasicBlock::iterator MII
= MBB
.begin(), E
= MBB
.end();
512 MachineInstr
&MI
= *MII
;
513 MachineBasicBlock::iterator NextMII
= MII
; ++NextMII
;
515 /// ReusedOperands - Keep track of operand reuse in case we need to undo
517 ReuseInfo
ReusedOperands(MI
);
519 DefAndUseVReg
.clear();
521 // Process all of the spilled uses and all non spilled reg references.
522 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
523 MachineOperand
&MO
= MI
.getOperand(i
);
524 if (!MO
.isRegister() || MO
.getReg() == 0)
525 continue; // Ignore non-register operands.
527 if (MRegisterInfo::isPhysicalRegister(MO
.getReg())) {
528 // Ignore physregs for spilling, but remember that it is used by this
530 PhysRegsUsed
[MO
.getReg()] = true;
534 assert(MRegisterInfo::isVirtualRegister(MO
.getReg()) &&
535 "Not a virtual or a physical register?");
537 unsigned VirtReg
= MO
.getReg();
538 if (!VRM
.hasStackSlot(VirtReg
)) {
539 // This virtual register was assigned a physreg!
540 unsigned Phys
= VRM
.getPhys(VirtReg
);
541 PhysRegsUsed
[Phys
] = true;
542 MI
.getOperand(i
).setReg(Phys
);
546 // This virtual register is now known to be a spilled value.
548 continue; // Handle defs in the loop below (handle use&def here though)
550 // If this is both a def and a use, we need to emit a store to the
551 // stack slot after the instruction. Keep track of D&U operands
552 // because we are about to change it to a physreg here.
554 // Remember that this was a def-and-use operand, and that the
555 // stack slot is live after this instruction executes.
556 DefAndUseVReg
.push_back(std::make_pair(i
, VirtReg
));
559 int StackSlot
= VRM
.getStackSlot(VirtReg
);
562 // Check to see if this stack slot is available.
563 if ((PhysReg
= Spills
.getSpillSlotPhysReg(StackSlot
))) {
565 // Don't reuse it for a def&use operand if we aren't allowed to change
567 if (!MO
.isDef() || Spills
.canClobberPhysReg(StackSlot
)) {
568 // If this stack slot value is already available, reuse it!
569 DEBUG(std::cerr
<< "Reusing SS#" << StackSlot
<< " from physreg "
570 << MRI
->getName(PhysReg
) << " for vreg"
571 << VirtReg
<<" instead of reloading into physreg "
572 << MRI
->getName(VRM
.getPhys(VirtReg
)) << "\n");
573 MI
.getOperand(i
).setReg(PhysReg
);
575 // The only technical detail we have is that we don't know that
576 // PhysReg won't be clobbered by a reloaded stack slot that occurs
577 // later in the instruction. In particular, consider 'op V1, V2'.
578 // If V1 is available in physreg R0, we would choose to reuse it
579 // here, instead of reloading it into the register the allocator
580 // indicated (say R1). However, V2 might have to be reloaded
581 // later, and it might indicate that it needs to live in R0. When
582 // this occurs, we need to have information available that
583 // indicates it is safe to use R1 for the reload instead of R0.
585 // To further complicate matters, we might conflict with an alias,
586 // or R0 and R1 might not be compatible with each other. In this
587 // case, we actually insert a reload for V1 in R1, ensuring that
588 // we can get at R0 or its alias.
589 ReusedOperands
.addReuse(i
, StackSlot
, PhysReg
,
590 VRM
.getPhys(VirtReg
), VirtReg
);
595 // Otherwise we have a situation where we have a two-address instruction
596 // whose mod/ref operand needs to be reloaded. This reload is already
597 // available in some register "PhysReg", but if we used PhysReg as the
598 // operand to our 2-addr instruction, the instruction would modify
599 // PhysReg. This isn't cool if something later uses PhysReg and expects
600 // to get its initial value.
602 // To avoid this problem, and to avoid doing a load right after a store,
603 // we emit a copy from PhysReg into the designated register for this
605 unsigned DesignatedReg
= VRM
.getPhys(VirtReg
);
606 assert(DesignatedReg
&& "Must map virtreg to physreg!");
608 // Note that, if we reused a register for a previous operand, the
609 // register we want to reload into might not actually be
610 // available. If this occurs, use the register indicated by the
612 if (ReusedOperands
.hasReuses())
613 DesignatedReg
= ReusedOperands
.GetRegForReload(DesignatedReg
, &MI
,
614 Spills
, MaybeDeadStores
);
616 // If the mapped designated register is actually the physreg we have
617 // incoming, we don't need to inserted a dead copy.
618 if (DesignatedReg
== PhysReg
) {
619 // If this stack slot value is already available, reuse it!
620 DEBUG(std::cerr
<< "Reusing SS#" << StackSlot
<< " from physreg "
621 << MRI
->getName(PhysReg
) << " for vreg"
623 << " instead of reloading into same physreg.\n");
624 MI
.getOperand(i
).setReg(PhysReg
);
629 const TargetRegisterClass
* RC
=
630 MBB
.getParent()->getSSARegMap()->getRegClass(VirtReg
);
632 PhysRegsUsed
[DesignatedReg
] = true;
633 MRI
->copyRegToReg(MBB
, &MI
, DesignatedReg
, PhysReg
, RC
);
635 // This invalidates DesignatedReg.
636 Spills
.ClobberPhysReg(DesignatedReg
);
638 Spills
.addAvailable(StackSlot
, DesignatedReg
);
639 MI
.getOperand(i
).setReg(DesignatedReg
);
640 DEBUG(std::cerr
<< '\t' << *prior(MII
));
645 // Otherwise, reload it and remember that we have it.
646 PhysReg
= VRM
.getPhys(VirtReg
);
647 assert(PhysReg
&& "Must map virtreg to physreg!");
648 const TargetRegisterClass
* RC
=
649 MBB
.getParent()->getSSARegMap()->getRegClass(VirtReg
);
651 // Note that, if we reused a register for a previous operand, the
652 // register we want to reload into might not actually be
653 // available. If this occurs, use the register indicated by the
655 if (ReusedOperands
.hasReuses())
656 PhysReg
= ReusedOperands
.GetRegForReload(PhysReg
, &MI
,
657 Spills
, MaybeDeadStores
);
659 PhysRegsUsed
[PhysReg
] = true;
660 MRI
->loadRegFromStackSlot(MBB
, &MI
, PhysReg
, StackSlot
, RC
);
661 // This invalidates PhysReg.
662 Spills
.ClobberPhysReg(PhysReg
);
664 // Any stores to this stack slot are not dead anymore.
665 MaybeDeadStores
.erase(StackSlot
);
666 Spills
.addAvailable(StackSlot
, PhysReg
);
668 MI
.getOperand(i
).setReg(PhysReg
);
669 DEBUG(std::cerr
<< '\t' << *prior(MII
));
672 // Loop over all of the implicit defs, clearing them from our available
674 const unsigned *ImpDef
= TII
->getImplicitDefs(MI
.getOpcode());
676 for ( ; *ImpDef
; ++ImpDef
) {
677 PhysRegsUsed
[*ImpDef
] = true;
678 Spills
.ClobberPhysReg(*ImpDef
);
682 DEBUG(std::cerr
<< '\t' << MI
);
684 // If we have folded references to memory operands, make sure we clear all
685 // physical registers that may contain the value of the spilled virtual
687 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
688 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
689 DEBUG(std::cerr
<< "Folded vreg: " << I
->second
.first
<< " MR: "
690 << I
->second
.second
);
691 unsigned VirtReg
= I
->second
.first
;
692 VirtRegMap::ModRef MR
= I
->second
.second
;
693 if (!VRM
.hasStackSlot(VirtReg
)) {
694 DEBUG(std::cerr
<< ": No stack slot!\n");
697 int SS
= VRM
.getStackSlot(VirtReg
);
698 DEBUG(std::cerr
<< " - StackSlot: " << SS
<< "\n");
700 // If this folded instruction is just a use, check to see if it's a
701 // straight load from the virt reg slot.
702 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
704 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
705 // If this spill slot is available, turn it into a copy (or nothing)
706 // instead of leaving it as a load!
708 if (FrameIdx
== SS
&& (InReg
= Spills
.getSpillSlotPhysReg(SS
))) {
709 DEBUG(std::cerr
<< "Promoted Load To Copy: " << MI
);
710 MachineFunction
&MF
= *MBB
.getParent();
711 if (DestReg
!= InReg
) {
712 MRI
->copyRegToReg(MBB
, &MI
, DestReg
, InReg
,
713 MF
.getSSARegMap()->getRegClass(VirtReg
));
714 // Revisit the copy so we make sure to notice the effects of the
715 // operation on the destreg (either needing to RA it if it's
716 // virtual or needing to clobber any values if it's physical).
718 --NextMII
; // backtrack to the copy.
720 VRM
.RemoveFromFoldedVirtMap(&MI
);
722 goto ProcessNextInst
;
727 // If this reference is not a use, any previous store is now dead.
728 // Otherwise, the store to this stack slot is not dead anymore.
729 std::map
<int, MachineInstr
*>::iterator MDSI
= MaybeDeadStores
.find(SS
);
730 if (MDSI
!= MaybeDeadStores
.end()) {
731 if (MR
& VirtRegMap::isRef
) // Previous store is not dead.
732 MaybeDeadStores
.erase(MDSI
);
734 // If we get here, the store is dead, nuke it now.
735 assert(VirtRegMap::isMod
&& "Can't be modref!");
736 DEBUG(std::cerr
<< "Removed dead store:\t" << *MDSI
->second
);
737 MBB
.erase(MDSI
->second
);
738 VRM
.RemoveFromFoldedVirtMap(MDSI
->second
);
739 MaybeDeadStores
.erase(MDSI
);
744 // If the spill slot value is available, and this is a new definition of
745 // the value, the value is not available anymore.
746 if (MR
& VirtRegMap::isMod
) {
747 // Notice that the value in this stack slot has been modified.
748 Spills
.ModifyStackSlot(SS
);
750 // If this is *just* a mod of the value, check to see if this is just a
751 // store to the spill slot (i.e. the spill got merged into the copy). If
752 // so, realize that the vreg is available now, and add the store to the
753 // MaybeDeadStore info.
755 if (!(MR
& VirtRegMap::isRef
)) {
756 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
757 assert(MRegisterInfo::isPhysicalRegister(SrcReg
) &&
758 "Src hasn't been allocated yet?");
759 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
760 // this as a potentially dead store in case there is a subsequent
761 // store into the stack slot without a read from it.
762 MaybeDeadStores
[StackSlot
] = &MI
;
764 // If the stack slot value was previously available in some other
765 // register, change it now. Otherwise, make the register available,
767 Spills
.addAvailable(StackSlot
, SrcReg
, false /*don't clobber*/);
773 // Process all of the spilled defs.
774 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
775 MachineOperand
&MO
= MI
.getOperand(i
);
776 if (MO
.isRegister() && MO
.getReg() && MO
.isDef()) {
777 unsigned VirtReg
= MO
.getReg();
779 if (!MRegisterInfo::isVirtualRegister(VirtReg
)) {
780 // Check to see if this is a def-and-use vreg operand that we do need
781 // to insert a store for.
782 bool OpTakenCareOf
= false;
783 if (MO
.isUse() && !DefAndUseVReg
.empty()) {
784 for (unsigned dau
= 0, e
= DefAndUseVReg
.size(); dau
!= e
; ++dau
)
785 if (DefAndUseVReg
[dau
].first
== i
) {
786 VirtReg
= DefAndUseVReg
[dau
].second
;
787 OpTakenCareOf
= true;
792 if (!OpTakenCareOf
) {
793 // Check to see if this is a noop copy. If so, eliminate the
794 // instruction before considering the dest reg to be changed.
796 if (TII
->isMoveInstr(MI
, Src
, Dst
) && Src
== Dst
) {
798 DEBUG(std::cerr
<< "Removing now-noop copy: " << MI
);
800 VRM
.RemoveFromFoldedVirtMap(&MI
);
801 goto ProcessNextInst
;
803 Spills
.ClobberPhysReg(VirtReg
);
808 // The only vregs left are stack slot definitions.
809 int StackSlot
= VRM
.getStackSlot(VirtReg
);
810 const TargetRegisterClass
*RC
=
811 MBB
.getParent()->getSSARegMap()->getRegClass(VirtReg
);
814 // If this is a def&use operand, and we used a different physreg for
815 // it than the one assigned, make sure to execute the store from the
816 // correct physical register.
817 if (MO
.getReg() == VirtReg
)
818 PhysReg
= VRM
.getPhys(VirtReg
);
820 PhysReg
= MO
.getReg();
822 PhysRegsUsed
[PhysReg
] = true;
823 MRI
->storeRegToStackSlot(MBB
, next(MII
), PhysReg
, StackSlot
, RC
);
824 DEBUG(std::cerr
<< "Store:\t" << *next(MII
));
825 MI
.getOperand(i
).setReg(PhysReg
);
827 // Check to see if this is a noop copy. If so, eliminate the
828 // instruction before considering the dest reg to be changed.
831 if (TII
->isMoveInstr(MI
, Src
, Dst
) && Src
== Dst
) {
833 DEBUG(std::cerr
<< "Removing now-noop copy: " << MI
);
835 VRM
.RemoveFromFoldedVirtMap(&MI
);
836 goto ProcessNextInst
;
840 // If there is a dead store to this stack slot, nuke it now.
841 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
843 DEBUG(std::cerr
<< "Removed dead store:\t" << *LastStore
);
845 MBB
.erase(LastStore
);
846 VRM
.RemoveFromFoldedVirtMap(LastStore
);
848 LastStore
= next(MII
);
850 // If the stack slot value was previously available in some other
851 // register, change it now. Otherwise, make the register available,
853 Spills
.ModifyStackSlot(StackSlot
);
854 Spills
.ClobberPhysReg(PhysReg
);
855 Spills
.addAvailable(StackSlot
, PhysReg
);
866 llvm::Spiller
* llvm::createSpiller() {
867 switch (SpillerOpt
) {
868 default: assert(0 && "Unreachable!");
870 return new LocalSpiller();
872 return new SimpleSpiller();