[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / CodeGen / InlineSpiller.cpp
blob71e91b445d9abaac7b39c35c877d1cf7314af6a6
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
12 //===----------------------------------------------------------------------===//
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/CodeGen/LiveInterval.h"
26 #include "llvm/CodeGen/LiveIntervalCalc.h"
27 #include "llvm/CodeGen/LiveIntervals.h"
28 #include "llvm/CodeGen/LiveRangeEdit.h"
29 #include "llvm/CodeGen/LiveStacks.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineInstr.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineInstrBundle.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/SlotIndexes.h"
42 #include "llvm/CodeGen/Spiller.h"
43 #include "llvm/CodeGen/StackMaps.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetOpcodes.h"
46 #include "llvm/CodeGen/TargetRegisterInfo.h"
47 #include "llvm/CodeGen/TargetSubtargetInfo.h"
48 #include "llvm/CodeGen/VirtRegMap.h"
49 #include "llvm/Config/llvm-config.h"
50 #include "llvm/Support/BlockFrequency.h"
51 #include "llvm/Support/BranchProbability.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include <cassert>
58 #include <iterator>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
63 using namespace llvm;
65 #define DEBUG_TYPE "regalloc"
67 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
68 STATISTIC(NumSnippets, "Number of spilled snippets");
69 STATISTIC(NumSpills, "Number of spills inserted");
70 STATISTIC(NumSpillsRemoved, "Number of spills removed");
71 STATISTIC(NumReloads, "Number of reloads inserted");
72 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
73 STATISTIC(NumFolded, "Number of folded stack accesses");
74 STATISTIC(NumFoldedLoads, "Number of folded loads");
75 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
77 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78 cl::desc("Disable inline spill hoisting"));
79 static cl::opt<bool>
80 RestrictStatepointRemat("restrict-statepoint-remat",
81 cl::init(false), cl::Hidden,
82 cl::desc("Restrict remat for statepoint operands"));
84 namespace {
86 class HoistSpillHelper : private LiveRangeEdit::Delegate {
87 MachineFunction &MF;
88 LiveIntervals &LIS;
89 LiveStacks &LSS;
90 AliasAnalysis *AA;
91 MachineDominatorTree &MDT;
92 MachineLoopInfo &Loops;
93 VirtRegMap &VRM;
94 MachineRegisterInfo &MRI;
95 const TargetInstrInfo &TII;
96 const TargetRegisterInfo &TRI;
97 const MachineBlockFrequencyInfo &MBFI;
99 InsertPointAnalysis IPA;
101 // Map from StackSlot to the LiveInterval of the original register.
102 // Note the LiveInterval of the original register may have been deleted
103 // after it is spilled. We keep a copy here to track the range where
104 // spills can be moved.
105 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
107 // Map from pair of (StackSlot and Original VNI) to a set of spills which
108 // have the same stackslot and have equal values defined by Original VNI.
109 // These spills are mergeable and are hoist candiates.
110 using MergeableSpillsMap =
111 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
112 MergeableSpillsMap MergeableSpills;
114 /// This is the map from original register to a set containing all its
115 /// siblings. To hoist a spill to another BB, we need to find out a live
116 /// sibling there and use it as the source of the new spill.
117 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap;
119 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
120 MachineBasicBlock &BB, Register &LiveReg);
122 void rmRedundantSpills(
123 SmallPtrSet<MachineInstr *, 16> &Spills,
124 SmallVectorImpl<MachineInstr *> &SpillsToRm,
125 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
127 void getVisitOrders(
128 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
129 SmallVectorImpl<MachineDomTreeNode *> &Orders,
130 SmallVectorImpl<MachineInstr *> &SpillsToRm,
131 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
132 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
134 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
135 SmallPtrSet<MachineInstr *, 16> &Spills,
136 SmallVectorImpl<MachineInstr *> &SpillsToRm,
137 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
139 public:
140 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
141 VirtRegMap &vrm)
142 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
143 LSS(pass.getAnalysis<LiveStacks>()),
144 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
145 MDT(pass.getAnalysis<MachineDominatorTree>()),
146 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
147 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
148 TRI(*mf.getSubtarget().getRegisterInfo()),
149 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
150 IPA(LIS, mf.getNumBlockIDs()) {}
152 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
153 unsigned Original);
154 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
155 void hoistAllSpills();
156 void LRE_DidCloneVirtReg(Register, Register) override;
159 class InlineSpiller : public Spiller {
160 MachineFunction &MF;
161 LiveIntervals &LIS;
162 LiveStacks &LSS;
163 AliasAnalysis *AA;
164 MachineDominatorTree &MDT;
165 MachineLoopInfo &Loops;
166 VirtRegMap &VRM;
167 MachineRegisterInfo &MRI;
168 const TargetInstrInfo &TII;
169 const TargetRegisterInfo &TRI;
170 const MachineBlockFrequencyInfo &MBFI;
172 // Variables that are valid during spill(), but used by multiple methods.
173 LiveRangeEdit *Edit;
174 LiveInterval *StackInt;
175 int StackSlot;
176 Register Original;
178 // All registers to spill to StackSlot, including the main register.
179 SmallVector<Register, 8> RegsToSpill;
181 // All COPY instructions to/from snippets.
182 // They are ignored since both operands refer to the same stack slot.
183 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
185 // Values that failed to remat at some point.
186 SmallPtrSet<VNInfo*, 8> UsedValues;
188 // Dead defs generated during spilling.
189 SmallVector<MachineInstr*, 8> DeadDefs;
191 // Object records spills information and does the hoisting.
192 HoistSpillHelper HSpiller;
194 // Live range weight calculator.
195 VirtRegAuxInfo &VRAI;
197 ~InlineSpiller() override = default;
199 public:
200 InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
201 VirtRegAuxInfo &VRAI)
202 : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
203 LSS(Pass.getAnalysis<LiveStacks>()),
204 AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
205 MDT(Pass.getAnalysis<MachineDominatorTree>()),
206 Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
207 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
208 TRI(*MF.getSubtarget().getRegisterInfo()),
209 MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
210 HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
212 void spill(LiveRangeEdit &) override;
213 void postOptimization() override;
215 private:
216 bool isSnippet(const LiveInterval &SnipLI);
217 void collectRegsToSpill();
219 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
221 bool isSibling(Register Reg);
222 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
223 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
225 void markValueUsed(LiveInterval*, VNInfo*);
226 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
227 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
228 void reMaterializeAll();
230 bool coalesceStackAccess(MachineInstr *MI, Register Reg);
231 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
232 MachineInstr *LoadMI = nullptr);
233 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
234 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
236 void spillAroundUses(Register Reg);
237 void spillAll();
240 } // end anonymous namespace
242 Spiller::~Spiller() = default;
244 void Spiller::anchor() {}
246 Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass,
247 MachineFunction &MF, VirtRegMap &VRM,
248 VirtRegAuxInfo &VRAI) {
249 return new InlineSpiller(Pass, MF, VRM, VRAI);
252 //===----------------------------------------------------------------------===//
253 // Snippets
254 //===----------------------------------------------------------------------===//
256 // When spilling a virtual register, we also spill any snippets it is connected
257 // to. The snippets are small live ranges that only have a single real use,
258 // leftovers from live range splitting. Spilling them enables memory operand
259 // folding or tightens the live range around the single use.
261 // This minimizes register pressure and maximizes the store-to-load distance for
262 // spill slots which can be important in tight loops.
264 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
265 /// otherwise return 0.
266 static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
267 if (!MI.isFullCopy())
268 return Register();
269 if (MI.getOperand(0).getReg() == Reg)
270 return MI.getOperand(1).getReg();
271 if (MI.getOperand(1).getReg() == Reg)
272 return MI.getOperand(0).getReg();
273 return Register();
276 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
277 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
278 const MachineOperand &MO = MI.getOperand(I);
279 if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
280 LIS.getInterval(MO.getReg());
284 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
285 /// It is assumed that SnipLI is a virtual register with the same original as
286 /// Edit->getReg().
287 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
288 Register Reg = Edit->getReg();
290 // A snippet is a tiny live range with only a single instruction using it
291 // besides copies to/from Reg or spills/fills. We accept:
293 // %snip = COPY %Reg / FILL fi#
294 // %snip = USE %snip
295 // %Reg = COPY %snip / SPILL %snip, fi#
297 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
298 return false;
300 MachineInstr *UseMI = nullptr;
302 // Check that all uses satisfy our criteria.
303 for (MachineRegisterInfo::reg_instr_nodbg_iterator
304 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
305 E = MRI.reg_instr_nodbg_end();
306 RI != E;) {
307 MachineInstr &MI = *RI++;
309 // Allow copies to/from Reg.
310 if (isFullCopyOf(MI, Reg))
311 continue;
313 // Allow stack slot loads.
314 int FI;
315 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
316 continue;
318 // Allow stack slot stores.
319 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
320 continue;
322 // Allow a single additional instruction.
323 if (UseMI && &MI != UseMI)
324 return false;
325 UseMI = &MI;
327 return true;
330 /// collectRegsToSpill - Collect live range snippets that only have a single
331 /// real use.
332 void InlineSpiller::collectRegsToSpill() {
333 Register Reg = Edit->getReg();
335 // Main register always spills.
336 RegsToSpill.assign(1, Reg);
337 SnippetCopies.clear();
339 // Snippets all have the same original, so there can't be any for an original
340 // register.
341 if (Original == Reg)
342 return;
344 for (MachineRegisterInfo::reg_instr_iterator
345 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
346 MachineInstr &MI = *RI++;
347 Register SnipReg = isFullCopyOf(MI, Reg);
348 if (!isSibling(SnipReg))
349 continue;
350 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
351 if (!isSnippet(SnipLI))
352 continue;
353 SnippetCopies.insert(&MI);
354 if (isRegToSpill(SnipReg))
355 continue;
356 RegsToSpill.push_back(SnipReg);
357 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
358 ++NumSnippets;
362 bool InlineSpiller::isSibling(Register Reg) {
363 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
366 /// It is beneficial to spill to earlier place in the same BB in case
367 /// as follows:
368 /// There is an alternative def earlier in the same MBB.
369 /// Hoist the spill as far as possible in SpillMBB. This can ease
370 /// register pressure:
372 /// x = def
373 /// y = use x
374 /// s = copy x
376 /// Hoisting the spill of s to immediately after the def removes the
377 /// interference between x and y:
379 /// x = def
380 /// spill x
381 /// y = use killed x
383 /// This hoist only helps when the copy kills its source.
385 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
386 MachineInstr &CopyMI) {
387 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
388 #ifndef NDEBUG
389 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
390 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
391 #endif
393 Register SrcReg = CopyMI.getOperand(1).getReg();
394 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
395 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
396 LiveQueryResult SrcQ = SrcLI.Query(Idx);
397 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
398 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
399 return false;
401 // Conservatively extend the stack slot range to the range of the original
402 // value. We may be able to do better with stack slot coloring by being more
403 // careful here.
404 assert(StackInt && "No stack slot assigned yet.");
405 LiveInterval &OrigLI = LIS.getInterval(Original);
406 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
407 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
408 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
409 << *StackInt << '\n');
411 // We are going to spill SrcVNI immediately after its def, so clear out
412 // any later spills of the same value.
413 eliminateRedundantSpills(SrcLI, SrcVNI);
415 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
416 MachineBasicBlock::iterator MII;
417 if (SrcVNI->isPHIDef())
418 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
419 else {
420 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
421 assert(DefMI && "Defining instruction disappeared");
422 MII = DefMI;
423 ++MII;
425 MachineInstrSpan MIS(MII, MBB);
426 // Insert spill without kill flag immediately after def.
427 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
428 MRI.getRegClass(SrcReg), &TRI);
429 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
430 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
431 getVDefInterval(MI, LIS);
432 --MII; // Point to store instruction.
433 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
435 // If there is only 1 store instruction is required for spill, add it
436 // to mergeable list. In X86 AMX, 2 intructions are required to store.
437 // We disable the merge for this case.
438 if (MIS.begin() == MII)
439 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
440 ++NumSpills;
441 return true;
444 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
445 /// redundant spills of this value in SLI.reg and sibling copies.
446 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
447 assert(VNI && "Missing value");
448 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
449 WorkList.push_back(std::make_pair(&SLI, VNI));
450 assert(StackInt && "No stack slot assigned yet.");
452 do {
453 LiveInterval *LI;
454 std::tie(LI, VNI) = WorkList.pop_back_val();
455 Register Reg = LI->reg();
456 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
457 << VNI->def << " in " << *LI << '\n');
459 // Regs to spill are taken care of.
460 if (isRegToSpill(Reg))
461 continue;
463 // Add all of VNI's live range to StackInt.
464 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
465 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
467 // Find all spills and copies of VNI.
468 for (MachineRegisterInfo::use_instr_nodbg_iterator
469 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
470 UI != E; ) {
471 MachineInstr &MI = *UI++;
472 if (!MI.isCopy() && !MI.mayStore())
473 continue;
474 SlotIndex Idx = LIS.getInstructionIndex(MI);
475 if (LI->getVNInfoAt(Idx) != VNI)
476 continue;
478 // Follow sibling copies down the dominator tree.
479 if (Register DstReg = isFullCopyOf(MI, Reg)) {
480 if (isSibling(DstReg)) {
481 LiveInterval &DstLI = LIS.getInterval(DstReg);
482 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
483 assert(DstVNI && "Missing defined value");
484 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
485 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
487 continue;
490 // Erase spills.
491 int FI;
492 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
493 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
494 // eliminateDeadDefs won't normally remove stores, so switch opcode.
495 MI.setDesc(TII.get(TargetOpcode::KILL));
496 DeadDefs.push_back(&MI);
497 ++NumSpillsRemoved;
498 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
499 --NumSpills;
502 } while (!WorkList.empty());
505 //===----------------------------------------------------------------------===//
506 // Rematerialization
507 //===----------------------------------------------------------------------===//
509 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
510 /// instruction cannot be eliminated. See through snippet copies
511 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
512 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
513 WorkList.push_back(std::make_pair(LI, VNI));
514 do {
515 std::tie(LI, VNI) = WorkList.pop_back_val();
516 if (!UsedValues.insert(VNI).second)
517 continue;
519 if (VNI->isPHIDef()) {
520 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
521 for (MachineBasicBlock *P : MBB->predecessors()) {
522 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
523 if (PVNI)
524 WorkList.push_back(std::make_pair(LI, PVNI));
526 continue;
529 // Follow snippet copies.
530 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
531 if (!SnippetCopies.count(MI))
532 continue;
533 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
534 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
535 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
536 assert(SnipVNI && "Snippet undefined before copy");
537 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
538 } while (!WorkList.empty());
541 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
542 MachineInstr &MI) {
543 if (!RestrictStatepointRemat)
544 return true;
545 // Here's a quick explanation of the problem we're trying to handle here:
546 // * There are some pseudo instructions with more vreg uses than there are
547 // physical registers on the machine.
548 // * This is normally handled by spilling the vreg, and folding the reload
549 // into the user instruction. (Thus decreasing the number of used vregs
550 // until the remainder can be assigned to physregs.)
551 // * However, since we may try to spill vregs in any order, we can end up
552 // trying to spill each operand to the instruction, and then rematting it
553 // instead. When that happens, the new live intervals (for the remats) are
554 // expected to be trivially assignable (i.e. RS_Done). However, since we
555 // may have more remats than physregs, we're guaranteed to fail to assign
556 // one.
557 // At the moment, we only handle this for STATEPOINTs since they're the only
558 // pseudo op where we've seen this. If we start seeing other instructions
559 // with the same problem, we need to revisit this.
560 if (MI.getOpcode() != TargetOpcode::STATEPOINT)
561 return true;
562 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
563 // that number of physical registers is enough to cover all fixed arguments.
564 // If it is not true we need to revisit it.
565 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
566 EndIdx = MI.getNumOperands();
567 Idx < EndIdx; ++Idx) {
568 MachineOperand &MO = MI.getOperand(Idx);
569 if (MO.isReg() && MO.getReg() == VReg)
570 return false;
572 return true;
575 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
576 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
577 // Analyze instruction
578 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
579 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
581 if (!RI.Reads)
582 return false;
584 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
585 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
587 if (!ParentVNI) {
588 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
589 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
590 MachineOperand &MO = MI.getOperand(i);
591 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
592 MO.setIsUndef();
594 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
595 return true;
598 if (SnippetCopies.count(&MI))
599 return false;
601 LiveInterval &OrigLI = LIS.getInterval(Original);
602 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
603 LiveRangeEdit::Remat RM(ParentVNI);
604 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
606 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
607 markValueUsed(&VirtReg, ParentVNI);
608 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
609 return false;
612 // If the instruction also writes VirtReg.reg, it had better not require the
613 // same register for uses and defs.
614 if (RI.Tied) {
615 markValueUsed(&VirtReg, ParentVNI);
616 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
617 return false;
620 // Before rematerializing into a register for a single instruction, try to
621 // fold a load into the instruction. That avoids allocating a new register.
622 if (RM.OrigMI->canFoldAsLoad() &&
623 foldMemoryOperand(Ops, RM.OrigMI)) {
624 Edit->markRematerialized(RM.ParentVNI);
625 ++NumFoldedLoads;
626 return true;
629 // If we can't guarantee that we'll be able to actually assign the new vreg,
630 // we can't remat.
631 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
632 markValueUsed(&VirtReg, ParentVNI);
633 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
634 return false;
637 // Allocate a new register for the remat.
638 Register NewVReg = Edit->createFrom(Original);
640 // Finally we can rematerialize OrigMI before MI.
641 SlotIndex DefIdx =
642 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
644 // We take the DebugLoc from MI, since OrigMI may be attributed to a
645 // different source location.
646 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
647 NewMI->setDebugLoc(MI.getDebugLoc());
649 (void)DefIdx;
650 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
651 << *LIS.getInstructionFromIndex(DefIdx));
653 // Replace operands
654 for (const auto &OpPair : Ops) {
655 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
656 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
657 MO.setReg(NewVReg);
658 MO.setIsKill();
661 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
663 ++NumRemats;
664 return true;
667 /// reMaterializeAll - Try to rematerialize as many uses as possible,
668 /// and trim the live ranges after.
669 void InlineSpiller::reMaterializeAll() {
670 if (!Edit->anyRematerializable(AA))
671 return;
673 UsedValues.clear();
675 // Try to remat before all uses of snippets.
676 bool anyRemat = false;
677 for (Register Reg : RegsToSpill) {
678 LiveInterval &LI = LIS.getInterval(Reg);
679 for (MachineRegisterInfo::reg_bundle_iterator
680 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
681 RegI != E; ) {
682 MachineInstr &MI = *RegI++;
684 // Debug values are not allowed to affect codegen.
685 if (MI.isDebugValue())
686 continue;
688 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
689 "instruction that isn't a DBG_VALUE");
691 anyRemat |= reMaterializeFor(LI, MI);
694 if (!anyRemat)
695 return;
697 // Remove any values that were completely rematted.
698 for (Register Reg : RegsToSpill) {
699 LiveInterval &LI = LIS.getInterval(Reg);
700 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
701 I != E; ++I) {
702 VNInfo *VNI = *I;
703 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
704 continue;
705 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
706 MI->addRegisterDead(Reg, &TRI);
707 if (!MI->allDefsAreDead())
708 continue;
709 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
710 DeadDefs.push_back(MI);
714 // Eliminate dead code after remat. Note that some snippet copies may be
715 // deleted here.
716 if (DeadDefs.empty())
717 return;
718 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
719 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
721 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
722 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
723 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
724 // removed, PHI VNI are still left in the LiveInterval.
725 // So to get rid of unused reg, we need to check whether it has non-dbg
726 // reference instead of whether it has non-empty interval.
727 unsigned ResultPos = 0;
728 for (Register Reg : RegsToSpill) {
729 if (MRI.reg_nodbg_empty(Reg)) {
730 Edit->eraseVirtReg(Reg);
731 continue;
734 assert(LIS.hasInterval(Reg) &&
735 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
736 "Empty and not used live-range?!");
738 RegsToSpill[ResultPos++] = Reg;
740 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
741 LLVM_DEBUG(dbgs() << RegsToSpill.size()
742 << " registers to spill after remat.\n");
745 //===----------------------------------------------------------------------===//
746 // Spilling
747 //===----------------------------------------------------------------------===//
749 /// If MI is a load or store of StackSlot, it can be removed.
750 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
751 int FI = 0;
752 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
753 bool IsLoad = InstrReg;
754 if (!IsLoad)
755 InstrReg = TII.isStoreToStackSlot(*MI, FI);
757 // We have a stack access. Is it the right register and slot?
758 if (InstrReg != Reg || FI != StackSlot)
759 return false;
761 if (!IsLoad)
762 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
764 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
765 LIS.RemoveMachineInstrFromMaps(*MI);
766 MI->eraseFromParent();
768 if (IsLoad) {
769 ++NumReloadsRemoved;
770 --NumReloads;
771 } else {
772 ++NumSpillsRemoved;
773 --NumSpills;
776 return true;
779 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
780 LLVM_DUMP_METHOD
781 // Dump the range of instructions from B to E with their slot indexes.
782 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
783 MachineBasicBlock::iterator E,
784 LiveIntervals const &LIS,
785 const char *const header,
786 Register VReg = Register()) {
787 char NextLine = '\n';
788 char SlotIndent = '\t';
790 if (std::next(B) == E) {
791 NextLine = ' ';
792 SlotIndent = ' ';
795 dbgs() << '\t' << header << ": " << NextLine;
797 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
798 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
800 // If a register was passed in and this instruction has it as a
801 // destination that is marked as an early clobber, print the
802 // early-clobber slot index.
803 if (VReg) {
804 MachineOperand *MO = I->findRegisterDefOperand(VReg);
805 if (MO && MO->isEarlyClobber())
806 Idx = Idx.getRegSlot(true);
809 dbgs() << SlotIndent << Idx << '\t' << *I;
812 #endif
814 /// foldMemoryOperand - Try folding stack slot references in Ops into their
815 /// instructions.
817 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
818 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
819 /// @return True on success.
820 bool InlineSpiller::
821 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
822 MachineInstr *LoadMI) {
823 if (Ops.empty())
824 return false;
825 // Don't attempt folding in bundles.
826 MachineInstr *MI = Ops.front().first;
827 if (Ops.back().first != MI || MI->isBundled())
828 return false;
830 bool WasCopy = MI->isCopy();
831 Register ImpReg;
833 // TII::foldMemoryOperand will do what we need here for statepoint
834 // (fold load into use and remove corresponding def). We will replace
835 // uses of removed def with loads (spillAroundUses).
836 // For that to work we need to untie def and use to pass it through
837 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
838 // fold them.
839 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
841 // Spill subregs if the target allows it.
842 // We always want to spill subregs for stackmap/patchpoint pseudos.
843 bool SpillSubRegs = TII.isSubregFoldable() ||
844 MI->getOpcode() == TargetOpcode::STATEPOINT ||
845 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
846 MI->getOpcode() == TargetOpcode::STACKMAP;
848 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
849 // operands.
850 SmallVector<unsigned, 8> FoldOps;
851 for (const auto &OpPair : Ops) {
852 unsigned Idx = OpPair.second;
853 assert(MI == OpPair.first && "Instruction conflict during operand folding");
854 MachineOperand &MO = MI->getOperand(Idx);
855 if (MO.isImplicit()) {
856 ImpReg = MO.getReg();
857 continue;
860 if (!SpillSubRegs && MO.getSubReg())
861 return false;
862 // We cannot fold a load instruction into a def.
863 if (LoadMI && MO.isDef())
864 return false;
865 // Tied use operands should not be passed to foldMemoryOperand.
866 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
867 FoldOps.push_back(Idx);
870 // If we only have implicit uses, we won't be able to fold that.
871 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
872 if (FoldOps.empty())
873 return false;
875 MachineInstrSpan MIS(MI, MI->getParent());
877 SmallVector<std::pair<unsigned, unsigned> > TiedOps;
878 if (UntieRegs)
879 for (unsigned Idx : FoldOps) {
880 MachineOperand &MO = MI->getOperand(Idx);
881 if (!MO.isTied())
882 continue;
883 unsigned Tied = MI->findTiedOperandIdx(Idx);
884 if (MO.isUse())
885 TiedOps.emplace_back(Tied, Idx);
886 else {
887 assert(MO.isDef() && "Tied to not use and def?");
888 TiedOps.emplace_back(Idx, Tied);
890 MI->untieRegOperand(Idx);
893 MachineInstr *FoldMI =
894 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
895 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
896 if (!FoldMI) {
897 // Re-tie operands.
898 for (auto Tied : TiedOps)
899 MI->tieOperands(Tied.first, Tied.second);
900 return false;
903 // Remove LIS for any dead defs in the original MI not in FoldMI.
904 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
905 if (!MO->isReg())
906 continue;
907 Register Reg = MO->getReg();
908 if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) {
909 continue;
911 // Skip non-Defs, including undef uses and internal reads.
912 if (MO->isUse())
913 continue;
914 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
915 if (RI.FullyDefined)
916 continue;
917 // FoldMI does not define this physreg. Remove the LI segment.
918 assert(MO->isDead() && "Cannot fold physreg def");
919 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
920 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
923 int FI;
924 if (TII.isStoreToStackSlot(*MI, FI) &&
925 HSpiller.rmFromMergeableSpills(*MI, FI))
926 --NumSpills;
927 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
928 // Update the call site info.
929 if (MI->isCandidateForCallSiteEntry())
930 MI->getMF()->moveCallSiteInfo(MI, FoldMI);
931 MI->eraseFromParent();
933 // Insert any new instructions other than FoldMI into the LIS maps.
934 assert(!MIS.empty() && "Unexpected empty span of instructions!");
935 for (MachineInstr &MI : MIS)
936 if (&MI != FoldMI)
937 LIS.InsertMachineInstrInMaps(MI);
939 // TII.foldMemoryOperand may have left some implicit operands on the
940 // instruction. Strip them.
941 if (ImpReg)
942 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
943 MachineOperand &MO = FoldMI->getOperand(i - 1);
944 if (!MO.isReg() || !MO.isImplicit())
945 break;
946 if (MO.getReg() == ImpReg)
947 FoldMI->RemoveOperand(i - 1);
950 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
951 "folded"));
953 if (!WasCopy)
954 ++NumFolded;
955 else if (Ops.front().second == 0) {
956 ++NumSpills;
957 // If there is only 1 store instruction is required for spill, add it
958 // to mergeable list. In X86 AMX, 2 intructions are required to store.
959 // We disable the merge for this case.
960 if (std::distance(MIS.begin(), MIS.end()) <= 1)
961 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
962 } else
963 ++NumReloads;
964 return true;
967 void InlineSpiller::insertReload(Register NewVReg,
968 SlotIndex Idx,
969 MachineBasicBlock::iterator MI) {
970 MachineBasicBlock &MBB = *MI->getParent();
972 MachineInstrSpan MIS(MI, &MBB);
973 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
974 MRI.getRegClass(NewVReg), &TRI);
976 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
978 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
979 NewVReg));
980 ++NumReloads;
983 /// Check if \p Def fully defines a VReg with an undefined value.
984 /// If that's the case, that means the value of VReg is actually
985 /// not relevant.
986 static bool isRealSpill(const MachineInstr &Def) {
987 if (!Def.isImplicitDef())
988 return true;
989 assert(Def.getNumOperands() == 1 &&
990 "Implicit def with more than one definition");
991 // We can say that the VReg defined by Def is undef, only if it is
992 // fully defined by Def. Otherwise, some of the lanes may not be
993 // undef and the value of the VReg matters.
994 return Def.getOperand(0).getSubReg();
997 /// insertSpill - Insert a spill of NewVReg after MI.
998 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
999 MachineBasicBlock::iterator MI) {
1000 // Spill are not terminators, so inserting spills after terminators will
1001 // violate invariants in MachineVerifier.
1002 assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1003 MachineBasicBlock &MBB = *MI->getParent();
1005 MachineInstrSpan MIS(MI, &MBB);
1006 MachineBasicBlock::iterator SpillBefore = std::next(MI);
1007 bool IsRealSpill = isRealSpill(*MI);
1009 if (IsRealSpill)
1010 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1011 MRI.getRegClass(NewVReg), &TRI);
1012 else
1013 // Don't spill undef value.
1014 // Anything works for undef, in particular keeping the memory
1015 // uninitialized is a viable option and it saves code size and
1016 // run time.
1017 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1018 .addReg(NewVReg, getKillRegState(isKill));
1020 MachineBasicBlock::iterator Spill = std::next(MI);
1021 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1022 for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1023 getVDefInterval(MI, LIS);
1025 LLVM_DEBUG(
1026 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1027 ++NumSpills;
1028 // If there is only 1 store instruction is required for spill, add it
1029 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1030 // We disable the merge for this case.
1031 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1032 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1035 /// spillAroundUses - insert spill code around each use of Reg.
1036 void InlineSpiller::spillAroundUses(Register Reg) {
1037 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1038 LiveInterval &OldLI = LIS.getInterval(Reg);
1040 // Iterate over instructions using Reg.
1041 for (MachineRegisterInfo::reg_bundle_iterator
1042 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
1043 RegI != E; ) {
1044 MachineInstr *MI = &*(RegI++);
1046 // Debug values are not allowed to affect codegen.
1047 if (MI->isDebugValue()) {
1048 // Modify DBG_VALUE now that the value is in a spill slot.
1049 MachineBasicBlock *MBB = MI->getParent();
1050 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
1051 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot, Reg);
1052 MBB->erase(MI);
1053 continue;
1056 assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
1057 "instruction that isn't a DBG_VALUE");
1059 // Ignore copies to/from snippets. We'll delete them.
1060 if (SnippetCopies.count(MI))
1061 continue;
1063 // Stack slot accesses may coalesce away.
1064 if (coalesceStackAccess(MI, Reg))
1065 continue;
1067 // Analyze instruction.
1068 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
1069 VirtRegInfo RI = AnalyzeVirtRegInBundle(*MI, Reg, &Ops);
1071 // Find the slot index where this instruction reads and writes OldLI.
1072 // This is usually the def slot, except for tied early clobbers.
1073 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1074 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1075 if (SlotIndex::isSameInstr(Idx, VNI->def))
1076 Idx = VNI->def;
1078 // Check for a sibling copy.
1079 Register SibReg = isFullCopyOf(*MI, Reg);
1080 if (SibReg && isSibling(SibReg)) {
1081 // This may actually be a copy between snippets.
1082 if (isRegToSpill(SibReg)) {
1083 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1084 SnippetCopies.insert(MI);
1085 continue;
1087 if (RI.Writes) {
1088 if (hoistSpillInsideBB(OldLI, *MI)) {
1089 // This COPY is now dead, the value is already in the stack slot.
1090 MI->getOperand(0).setIsDead();
1091 DeadDefs.push_back(MI);
1092 continue;
1094 } else {
1095 // This is a reload for a sib-reg copy. Drop spills downstream.
1096 LiveInterval &SibLI = LIS.getInterval(SibReg);
1097 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1098 // The COPY will fold to a reload below.
1102 // Attempt to fold memory ops.
1103 if (foldMemoryOperand(Ops))
1104 continue;
1106 // Create a new virtual register for spill/fill.
1107 // FIXME: Infer regclass from instruction alone.
1108 Register NewVReg = Edit->createFrom(Reg);
1110 if (RI.Reads)
1111 insertReload(NewVReg, Idx, MI);
1113 // Rewrite instruction operands.
1114 bool hasLiveDef = false;
1115 for (const auto &OpPair : Ops) {
1116 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1117 MO.setReg(NewVReg);
1118 if (MO.isUse()) {
1119 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1120 MO.setIsKill();
1121 } else {
1122 if (!MO.isDead())
1123 hasLiveDef = true;
1126 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1128 // FIXME: Use a second vreg if instruction has no tied ops.
1129 if (RI.Writes)
1130 if (hasLiveDef)
1131 insertSpill(NewVReg, true, MI);
1135 /// spillAll - Spill all registers remaining after rematerialization.
1136 void InlineSpiller::spillAll() {
1137 // Update LiveStacks now that we are committed to spilling.
1138 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1139 StackSlot = VRM.assignVirt2StackSlot(Original);
1140 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1141 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1142 } else
1143 StackInt = &LSS.getInterval(StackSlot);
1145 if (Original != Edit->getReg())
1146 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1148 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1149 for (Register Reg : RegsToSpill)
1150 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1151 StackInt->getValNumInfo(0));
1152 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1154 // Spill around uses of all RegsToSpill.
1155 for (Register Reg : RegsToSpill)
1156 spillAroundUses(Reg);
1158 // Hoisted spills may cause dead code.
1159 if (!DeadDefs.empty()) {
1160 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1161 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1164 // Finally delete the SnippetCopies.
1165 for (Register Reg : RegsToSpill) {
1166 for (MachineRegisterInfo::reg_instr_iterator
1167 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1168 RI != E; ) {
1169 MachineInstr &MI = *(RI++);
1170 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1171 // FIXME: Do this with a LiveRangeEdit callback.
1172 LIS.RemoveMachineInstrFromMaps(MI);
1173 MI.eraseFromParent();
1177 // Delete all spilled registers.
1178 for (Register Reg : RegsToSpill)
1179 Edit->eraseVirtReg(Reg);
1182 void InlineSpiller::spill(LiveRangeEdit &edit) {
1183 ++NumSpilledRanges;
1184 Edit = &edit;
1185 assert(!Register::isStackSlot(edit.getReg()) &&
1186 "Trying to spill a stack slot.");
1187 // Share a stack slot among all descendants of Original.
1188 Original = VRM.getOriginal(edit.getReg());
1189 StackSlot = VRM.getStackSlot(Original);
1190 StackInt = nullptr;
1192 LLVM_DEBUG(dbgs() << "Inline spilling "
1193 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1194 << ':' << edit.getParent() << "\nFrom original "
1195 << printReg(Original) << '\n');
1196 assert(edit.getParent().isSpillable() &&
1197 "Attempting to spill already spilled value.");
1198 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1200 collectRegsToSpill();
1201 reMaterializeAll();
1203 // Remat may handle everything.
1204 if (!RegsToSpill.empty())
1205 spillAll();
1207 Edit->calculateRegClassAndHint(MF, VRAI);
1210 /// Optimizations after all the reg selections and spills are done.
1211 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1213 /// When a spill is inserted, add the spill to MergeableSpills map.
1214 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1215 unsigned Original) {
1216 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1217 LiveInterval &OrigLI = LIS.getInterval(Original);
1218 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1219 // LiveInterval may be cleared after all its references are spilled.
1220 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1221 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1222 LI->assign(OrigLI, Allocator);
1223 StackSlotToOrigLI[StackSlot] = std::move(LI);
1225 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1226 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1227 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1228 MergeableSpills[MIdx].insert(&Spill);
1231 /// When a spill is removed, remove the spill from MergeableSpills map.
1232 /// Return true if the spill is removed successfully.
1233 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1234 int StackSlot) {
1235 auto It = StackSlotToOrigLI.find(StackSlot);
1236 if (It == StackSlotToOrigLI.end())
1237 return false;
1238 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1239 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1240 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1241 return MergeableSpills[MIdx].erase(&Spill);
1244 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1245 /// i.e., there should be a living sibling of OrigReg at the insert point.
1246 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1247 MachineBasicBlock &BB, Register &LiveReg) {
1248 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1249 // The original def could be after the last insert point in the root block,
1250 // we can't hoist to here.
1251 if (Idx < OrigVNI.def) {
1252 // TODO: We could be better here. If LI is not alive in landing pad
1253 // we could hoist spill after LIP.
1254 LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1255 return false;
1257 Register OrigReg = OrigLI.reg();
1258 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1259 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1261 for (const Register &SibReg : Siblings) {
1262 LiveInterval &LI = LIS.getInterval(SibReg);
1263 VNInfo *VNI = LI.getVNInfoAt(Idx);
1264 if (VNI) {
1265 LiveReg = SibReg;
1266 return true;
1269 return false;
1272 /// Remove redundant spills in the same BB. Save those redundant spills in
1273 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1274 void HoistSpillHelper::rmRedundantSpills(
1275 SmallPtrSet<MachineInstr *, 16> &Spills,
1276 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1277 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1278 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1279 // another spill inside. If a BB contains more than one spill, only keep the
1280 // earlier spill with smaller SlotIndex.
1281 for (const auto CurrentSpill : Spills) {
1282 MachineBasicBlock *Block = CurrentSpill->getParent();
1283 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1284 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1285 if (PrevSpill) {
1286 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1287 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1288 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1289 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1290 SpillsToRm.push_back(SpillToRm);
1291 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1292 } else {
1293 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1296 for (const auto SpillToRm : SpillsToRm)
1297 Spills.erase(SpillToRm);
1300 /// Starting from \p Root find a top-down traversal order of the dominator
1301 /// tree to visit all basic blocks containing the elements of \p Spills.
1302 /// Redundant spills will be found and put into \p SpillsToRm at the same
1303 /// time. \p SpillBBToSpill will be populated as part of the process and
1304 /// maps a basic block to the first store occurring in the basic block.
1305 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1306 void HoistSpillHelper::getVisitOrders(
1307 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1308 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1309 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1310 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1311 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1312 // The set contains all the possible BB nodes to which we may hoist
1313 // original spills.
1314 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1315 // Save the BB nodes on the path from the first BB node containing
1316 // non-redundant spill to the Root node.
1317 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1318 // All the spills to be hoisted must originate from a single def instruction
1319 // to the OrigReg. It means the def instruction should dominate all the spills
1320 // to be hoisted. We choose the BB where the def instruction is located as
1321 // the Root.
1322 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1323 // For every node on the dominator tree with spill, walk up on the dominator
1324 // tree towards the Root node until it is reached. If there is other node
1325 // containing spill in the middle of the path, the previous spill saw will
1326 // be redundant and the node containing it will be removed. All the nodes on
1327 // the path starting from the first node with non-redundant spill to the Root
1328 // node will be added to the WorkSet, which will contain all the possible
1329 // locations where spills may be hoisted to after the loop below is done.
1330 for (const auto Spill : Spills) {
1331 MachineBasicBlock *Block = Spill->getParent();
1332 MachineDomTreeNode *Node = MDT[Block];
1333 MachineInstr *SpillToRm = nullptr;
1334 while (Node != RootIDomNode) {
1335 // If Node dominates Block, and it already contains a spill, the spill in
1336 // Block will be redundant.
1337 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1338 SpillToRm = SpillBBToSpill[MDT[Block]];
1339 break;
1340 /// If we see the Node already in WorkSet, the path from the Node to
1341 /// the Root node must already be traversed by another spill.
1342 /// Then no need to repeat.
1343 } else if (WorkSet.count(Node)) {
1344 break;
1345 } else {
1346 NodesOnPath.insert(Node);
1348 Node = Node->getIDom();
1350 if (SpillToRm) {
1351 SpillsToRm.push_back(SpillToRm);
1352 } else {
1353 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1354 // set the initial status before hoisting start. The value of BBs
1355 // containing original spills is set to 0, in order to descriminate
1356 // with BBs containing hoisted spills which will be inserted to
1357 // SpillsToKeep later during hoisting.
1358 SpillsToKeep[MDT[Block]] = 0;
1359 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1361 NodesOnPath.clear();
1364 // Sort the nodes in WorkSet in top-down order and save the nodes
1365 // in Orders. Orders will be used for hoisting in runHoistSpills.
1366 unsigned idx = 0;
1367 Orders.push_back(MDT.getBase().getNode(Root));
1368 do {
1369 MachineDomTreeNode *Node = Orders[idx++];
1370 for (MachineDomTreeNode *Child : Node->children()) {
1371 if (WorkSet.count(Child))
1372 Orders.push_back(Child);
1374 } while (idx != Orders.size());
1375 assert(Orders.size() == WorkSet.size() &&
1376 "Orders have different size with WorkSet");
1378 #ifndef NDEBUG
1379 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1380 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1381 for (; RIt != Orders.rend(); RIt++)
1382 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1383 LLVM_DEBUG(dbgs() << "\n");
1384 #endif
1387 /// Try to hoist spills according to BB hotness. The spills to removed will
1388 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1389 /// \p SpillsToIns.
1390 void HoistSpillHelper::runHoistSpills(
1391 LiveInterval &OrigLI, VNInfo &OrigVNI,
1392 SmallPtrSet<MachineInstr *, 16> &Spills,
1393 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1394 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1395 // Visit order of dominator tree nodes.
1396 SmallVector<MachineDomTreeNode *, 32> Orders;
1397 // SpillsToKeep contains all the nodes where spills are to be inserted
1398 // during hoisting. If the spill to be inserted is an original spill
1399 // (not a hoisted one), the value of the map entry is 0. If the spill
1400 // is a hoisted spill, the value of the map entry is the VReg to be used
1401 // as the source of the spill.
1402 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1403 // Map from BB to the first spill inside of it.
1404 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1406 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1408 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1409 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1410 SpillBBToSpill);
1412 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1413 // nodes set and the cost of all the spills inside those nodes.
1414 // The nodes set are the locations where spills are to be inserted
1415 // in the subtree of current node.
1416 using NodesCostPair =
1417 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1418 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1420 // Iterate Orders set in reverse order, which will be a bottom-up order
1421 // in the dominator tree. Once we visit a dom tree node, we know its
1422 // children have already been visited and the spill locations in the
1423 // subtrees of all the children have been determined.
1424 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1425 for (; RIt != Orders.rend(); RIt++) {
1426 MachineBasicBlock *Block = (*RIt)->getBlock();
1428 // If Block contains an original spill, simply continue.
1429 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1430 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1431 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1432 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1433 continue;
1436 // Collect spills in subtree of current node (*RIt) to
1437 // SpillsInSubTreeMap[*RIt].first.
1438 for (MachineDomTreeNode *Child : (*RIt)->children()) {
1439 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1440 continue;
1441 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1442 // should be placed before getting the begin and end iterators of
1443 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1444 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1445 // and the map grows and then the original buckets in the map are moved.
1446 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1447 SpillsInSubTreeMap[*RIt].first;
1448 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1449 SubTreeCost += SpillsInSubTreeMap[Child].second;
1450 auto BI = SpillsInSubTreeMap[Child].first.begin();
1451 auto EI = SpillsInSubTreeMap[Child].first.end();
1452 SpillsInSubTree.insert(BI, EI);
1453 SpillsInSubTreeMap.erase(Child);
1456 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1457 SpillsInSubTreeMap[*RIt].first;
1458 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1459 // No spills in subtree, simply continue.
1460 if (SpillsInSubTree.empty())
1461 continue;
1463 // Check whether Block is a possible candidate to insert spill.
1464 Register LiveReg;
1465 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1466 continue;
1468 // If there are multiple spills that could be merged, bias a little
1469 // to hoist the spill.
1470 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1471 ? BranchProbability(9, 10)
1472 : BranchProbability(1, 1);
1473 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1474 // Hoist: Move spills to current Block.
1475 for (const auto SpillBB : SpillsInSubTree) {
1476 // When SpillBB is a BB contains original spill, insert the spill
1477 // to SpillsToRm.
1478 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1479 !SpillsToKeep[SpillBB]) {
1480 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1481 SpillsToRm.push_back(SpillToRm);
1483 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1484 SpillsToKeep.erase(SpillBB);
1486 // Current Block is the BB containing the new hoisted spill. Add it to
1487 // SpillsToKeep. LiveReg is the source of the new spill.
1488 SpillsToKeep[*RIt] = LiveReg;
1489 LLVM_DEBUG({
1490 dbgs() << "spills in BB: ";
1491 for (const auto Rspill : SpillsInSubTree)
1492 dbgs() << Rspill->getBlock()->getNumber() << " ";
1493 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1494 << "\n";
1496 SpillsInSubTree.clear();
1497 SpillsInSubTree.insert(*RIt);
1498 SubTreeCost = MBFI.getBlockFreq(Block);
1501 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1502 // save them to SpillsToIns.
1503 for (const auto &Ent : SpillsToKeep) {
1504 if (Ent.second)
1505 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1509 /// For spills with equal values, remove redundant spills and hoist those left
1510 /// to less hot spots.
1512 /// Spills with equal values will be collected into the same set in
1513 /// MergeableSpills when spill is inserted. These equal spills are originated
1514 /// from the same defining instruction and are dominated by the instruction.
1515 /// Before hoisting all the equal spills, redundant spills inside in the same
1516 /// BB are first marked to be deleted. Then starting from the spills left, walk
1517 /// up on the dominator tree towards the Root node where the define instruction
1518 /// is located, mark the dominated spills to be deleted along the way and
1519 /// collect the BB nodes on the path from non-dominated spills to the define
1520 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1521 /// where we are considering to hoist the spills. We iterate the WorkSet in
1522 /// bottom-up order, and for each node, we will decide whether to hoist spills
1523 /// inside its subtree to that node. In this way, we can get benefit locally
1524 /// even if hoisting all the equal spills to one cold place is impossible.
1525 void HoistSpillHelper::hoistAllSpills() {
1526 SmallVector<Register, 4> NewVRegs;
1527 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1529 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1530 Register Reg = Register::index2VirtReg(i);
1531 Register Original = VRM.getPreSplitReg(Reg);
1532 if (!MRI.def_empty(Reg))
1533 Virt2SiblingsMap[Original].insert(Reg);
1536 // Each entry in MergeableSpills contains a spill set with equal values.
1537 for (auto &Ent : MergeableSpills) {
1538 int Slot = Ent.first.first;
1539 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1540 VNInfo *OrigVNI = Ent.first.second;
1541 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1542 if (Ent.second.empty())
1543 continue;
1545 LLVM_DEBUG({
1546 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1547 << "Equal spills in BB: ";
1548 for (const auto spill : EqValSpills)
1549 dbgs() << spill->getParent()->getNumber() << " ";
1550 dbgs() << "\n";
1553 // SpillsToRm is the spill set to be removed from EqValSpills.
1554 SmallVector<MachineInstr *, 16> SpillsToRm;
1555 // SpillsToIns is the spill set to be newly inserted after hoisting.
1556 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1558 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1560 LLVM_DEBUG({
1561 dbgs() << "Finally inserted spills in BB: ";
1562 for (const auto &Ispill : SpillsToIns)
1563 dbgs() << Ispill.first->getNumber() << " ";
1564 dbgs() << "\nFinally removed spills in BB: ";
1565 for (const auto Rspill : SpillsToRm)
1566 dbgs() << Rspill->getParent()->getNumber() << " ";
1567 dbgs() << "\n";
1570 // Stack live range update.
1571 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1572 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1573 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1574 StackIntvl.getValNumInfo(0));
1576 // Insert hoisted spills.
1577 for (auto const &Insert : SpillsToIns) {
1578 MachineBasicBlock *BB = Insert.first;
1579 Register LiveReg = Insert.second;
1580 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1581 MachineInstrSpan MIS(MII, BB);
1582 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1583 MRI.getRegClass(LiveReg), &TRI);
1584 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1585 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1586 getVDefInterval(MI, LIS);
1587 ++NumSpills;
1590 // Remove redundant spills or change them to dead instructions.
1591 NumSpills -= SpillsToRm.size();
1592 for (auto const RMEnt : SpillsToRm) {
1593 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1594 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1595 MachineOperand &MO = RMEnt->getOperand(i - 1);
1596 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1597 RMEnt->RemoveOperand(i - 1);
1600 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1604 /// For VirtReg clone, the \p New register should have the same physreg or
1605 /// stackslot as the \p old register.
1606 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1607 if (VRM.hasPhys(Old))
1608 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1609 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1610 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1611 else
1612 llvm_unreachable("VReg should be assigned either physreg or stackslot");
1613 if (VRM.hasShape(Old))
1614 VRM.assignVirt2Shape(New, VRM.getShape(Old));