[llvm-objcopy] - Reimplement strip-dwo-groups.test to stop using the precompiled...
[llvm-complete.git] / lib / CodeGen / InlineSpiller.cpp
blob41ae8061a917614d761afeb616976b286df2acff
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
12 //===----------------------------------------------------------------------===//
14 #include "LiveRangeCalc.h"
15 #include "Spiller.h"
16 #include "SplitKit.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/CodeGen/LiveInterval.h"
28 #include "llvm/CodeGen/LiveIntervals.h"
29 #include "llvm/CodeGen/LiveRangeEdit.h"
30 #include "llvm/CodeGen/LiveStacks.h"
31 #include "llvm/CodeGen/MachineBasicBlock.h"
32 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineInstr.h"
37 #include "llvm/CodeGen/MachineInstrBuilder.h"
38 #include "llvm/CodeGen/MachineInstrBundle.h"
39 #include "llvm/CodeGen/MachineLoopInfo.h"
40 #include "llvm/CodeGen/MachineOperand.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/SlotIndexes.h"
43 #include "llvm/CodeGen/TargetInstrInfo.h"
44 #include "llvm/CodeGen/TargetOpcodes.h"
45 #include "llvm/CodeGen/TargetRegisterInfo.h"
46 #include "llvm/CodeGen/TargetSubtargetInfo.h"
47 #include "llvm/CodeGen/VirtRegMap.h"
48 #include "llvm/Config/llvm-config.h"
49 #include "llvm/Support/BlockFrequency.h"
50 #include "llvm/Support/BranchProbability.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <cassert>
57 #include <iterator>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
62 using namespace llvm;
64 #define DEBUG_TYPE "regalloc"
66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
67 STATISTIC(NumSnippets, "Number of spilled snippets");
68 STATISTIC(NumSpills, "Number of spills inserted");
69 STATISTIC(NumSpillsRemoved, "Number of spills removed");
70 STATISTIC(NumReloads, "Number of reloads inserted");
71 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
72 STATISTIC(NumFolded, "Number of folded stack accesses");
73 STATISTIC(NumFoldedLoads, "Number of folded loads");
74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77 cl::desc("Disable inline spill hoisting"));
78 static cl::opt<bool>
79 RestrictStatepointRemat("restrict-statepoint-remat",
80 cl::init(false), cl::Hidden,
81 cl::desc("Restrict remat for statepoint operands"));
83 namespace {
85 class HoistSpillHelper : private LiveRangeEdit::Delegate {
86 MachineFunction &MF;
87 LiveIntervals &LIS;
88 LiveStacks &LSS;
89 AliasAnalysis *AA;
90 MachineDominatorTree &MDT;
91 MachineLoopInfo &Loops;
92 VirtRegMap &VRM;
93 MachineRegisterInfo &MRI;
94 const TargetInstrInfo &TII;
95 const TargetRegisterInfo &TRI;
96 const MachineBlockFrequencyInfo &MBFI;
98 InsertPointAnalysis IPA;
100 // Map from StackSlot to the LiveInterval of the original register.
101 // Note the LiveInterval of the original register may have been deleted
102 // after it is spilled. We keep a copy here to track the range where
103 // spills can be moved.
104 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
106 // Map from pair of (StackSlot and Original VNI) to a set of spills which
107 // have the same stackslot and have equal values defined by Original VNI.
108 // These spills are mergeable and are hoist candiates.
109 using MergeableSpillsMap =
110 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
111 MergeableSpillsMap MergeableSpills;
113 /// This is the map from original register to a set containing all its
114 /// siblings. To hoist a spill to another BB, we need to find out a live
115 /// sibling there and use it as the source of the new spill.
116 DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;
118 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
119 MachineBasicBlock &BB, unsigned &LiveReg);
121 void rmRedundantSpills(
122 SmallPtrSet<MachineInstr *, 16> &Spills,
123 SmallVectorImpl<MachineInstr *> &SpillsToRm,
124 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
126 void getVisitOrders(
127 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
128 SmallVectorImpl<MachineDomTreeNode *> &Orders,
129 SmallVectorImpl<MachineInstr *> &SpillsToRm,
130 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
131 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
133 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
134 SmallPtrSet<MachineInstr *, 16> &Spills,
135 SmallVectorImpl<MachineInstr *> &SpillsToRm,
136 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
138 public:
139 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
140 VirtRegMap &vrm)
141 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
142 LSS(pass.getAnalysis<LiveStacks>()),
143 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
144 MDT(pass.getAnalysis<MachineDominatorTree>()),
145 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
146 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
147 TRI(*mf.getSubtarget().getRegisterInfo()),
148 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
149 IPA(LIS, mf.getNumBlockIDs()) {}
151 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
152 unsigned Original);
153 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
154 void hoistAllSpills();
155 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
158 class InlineSpiller : public Spiller {
159 MachineFunction &MF;
160 LiveIntervals &LIS;
161 LiveStacks &LSS;
162 AliasAnalysis *AA;
163 MachineDominatorTree &MDT;
164 MachineLoopInfo &Loops;
165 VirtRegMap &VRM;
166 MachineRegisterInfo &MRI;
167 const TargetInstrInfo &TII;
168 const TargetRegisterInfo &TRI;
169 const MachineBlockFrequencyInfo &MBFI;
171 // Variables that are valid during spill(), but used by multiple methods.
172 LiveRangeEdit *Edit;
173 LiveInterval *StackInt;
174 int StackSlot;
175 unsigned Original;
177 // All registers to spill to StackSlot, including the main register.
178 SmallVector<unsigned, 8> RegsToSpill;
180 // All COPY instructions to/from snippets.
181 // They are ignored since both operands refer to the same stack slot.
182 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
184 // Values that failed to remat at some point.
185 SmallPtrSet<VNInfo*, 8> UsedValues;
187 // Dead defs generated during spilling.
188 SmallVector<MachineInstr*, 8> DeadDefs;
190 // Object records spills information and does the hoisting.
191 HoistSpillHelper HSpiller;
193 ~InlineSpiller() override = default;
195 public:
196 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
197 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
198 LSS(pass.getAnalysis<LiveStacks>()),
199 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
200 MDT(pass.getAnalysis<MachineDominatorTree>()),
201 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
202 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
203 TRI(*mf.getSubtarget().getRegisterInfo()),
204 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
205 HSpiller(pass, mf, vrm) {}
207 void spill(LiveRangeEdit &) override;
208 void postOptimization() override;
210 private:
211 bool isSnippet(const LiveInterval &SnipLI);
212 void collectRegsToSpill();
214 bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
216 bool isSibling(unsigned Reg);
217 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
218 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
220 void markValueUsed(LiveInterval*, VNInfo*);
221 bool canGuaranteeAssignmentAfterRemat(unsigned VReg, MachineInstr &MI);
222 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
223 void reMaterializeAll();
225 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
226 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
227 MachineInstr *LoadMI = nullptr);
228 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
229 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
231 void spillAroundUses(unsigned Reg);
232 void spillAll();
235 } // end anonymous namespace
237 Spiller::~Spiller() = default;
239 void Spiller::anchor() {}
241 Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
242 MachineFunction &mf,
243 VirtRegMap &vrm) {
244 return new InlineSpiller(pass, mf, vrm);
247 //===----------------------------------------------------------------------===//
248 // Snippets
249 //===----------------------------------------------------------------------===//
251 // When spilling a virtual register, we also spill any snippets it is connected
252 // to. The snippets are small live ranges that only have a single real use,
253 // leftovers from live range splitting. Spilling them enables memory operand
254 // folding or tightens the live range around the single use.
256 // This minimizes register pressure and maximizes the store-to-load distance for
257 // spill slots which can be important in tight loops.
259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
260 /// otherwise return 0.
261 static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
262 if (!MI.isFullCopy())
263 return 0;
264 if (MI.getOperand(0).getReg() == Reg)
265 return MI.getOperand(1).getReg();
266 if (MI.getOperand(1).getReg() == Reg)
267 return MI.getOperand(0).getReg();
268 return 0;
271 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
272 /// It is assumed that SnipLI is a virtual register with the same original as
273 /// Edit->getReg().
274 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
275 unsigned Reg = Edit->getReg();
277 // A snippet is a tiny live range with only a single instruction using it
278 // besides copies to/from Reg or spills/fills. We accept:
280 // %snip = COPY %Reg / FILL fi#
281 // %snip = USE %snip
282 // %Reg = COPY %snip / SPILL %snip, fi#
284 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
285 return false;
287 MachineInstr *UseMI = nullptr;
289 // Check that all uses satisfy our criteria.
290 for (MachineRegisterInfo::reg_instr_nodbg_iterator
291 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
292 E = MRI.reg_instr_nodbg_end(); RI != E; ) {
293 MachineInstr &MI = *RI++;
295 // Allow copies to/from Reg.
296 if (isFullCopyOf(MI, Reg))
297 continue;
299 // Allow stack slot loads.
300 int FI;
301 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
302 continue;
304 // Allow stack slot stores.
305 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
306 continue;
308 // Allow a single additional instruction.
309 if (UseMI && &MI != UseMI)
310 return false;
311 UseMI = &MI;
313 return true;
316 /// collectRegsToSpill - Collect live range snippets that only have a single
317 /// real use.
318 void InlineSpiller::collectRegsToSpill() {
319 unsigned Reg = Edit->getReg();
321 // Main register always spills.
322 RegsToSpill.assign(1, Reg);
323 SnippetCopies.clear();
325 // Snippets all have the same original, so there can't be any for an original
326 // register.
327 if (Original == Reg)
328 return;
330 for (MachineRegisterInfo::reg_instr_iterator
331 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
332 MachineInstr &MI = *RI++;
333 unsigned SnipReg = isFullCopyOf(MI, Reg);
334 if (!isSibling(SnipReg))
335 continue;
336 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
337 if (!isSnippet(SnipLI))
338 continue;
339 SnippetCopies.insert(&MI);
340 if (isRegToSpill(SnipReg))
341 continue;
342 RegsToSpill.push_back(SnipReg);
343 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
344 ++NumSnippets;
348 bool InlineSpiller::isSibling(unsigned Reg) {
349 return TargetRegisterInfo::isVirtualRegister(Reg) &&
350 VRM.getOriginal(Reg) == Original;
353 /// It is beneficial to spill to earlier place in the same BB in case
354 /// as follows:
355 /// There is an alternative def earlier in the same MBB.
356 /// Hoist the spill as far as possible in SpillMBB. This can ease
357 /// register pressure:
359 /// x = def
360 /// y = use x
361 /// s = copy x
363 /// Hoisting the spill of s to immediately after the def removes the
364 /// interference between x and y:
366 /// x = def
367 /// spill x
368 /// y = use killed x
370 /// This hoist only helps when the copy kills its source.
372 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
373 MachineInstr &CopyMI) {
374 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
375 #ifndef NDEBUG
376 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
377 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
378 #endif
380 unsigned SrcReg = CopyMI.getOperand(1).getReg();
381 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
382 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
383 LiveQueryResult SrcQ = SrcLI.Query(Idx);
384 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
385 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
386 return false;
388 // Conservatively extend the stack slot range to the range of the original
389 // value. We may be able to do better with stack slot coloring by being more
390 // careful here.
391 assert(StackInt && "No stack slot assigned yet.");
392 LiveInterval &OrigLI = LIS.getInterval(Original);
393 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
394 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
395 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
396 << *StackInt << '\n');
398 // We are going to spill SrcVNI immediately after its def, so clear out
399 // any later spills of the same value.
400 eliminateRedundantSpills(SrcLI, SrcVNI);
402 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
403 MachineBasicBlock::iterator MII;
404 if (SrcVNI->isPHIDef())
405 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
406 else {
407 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
408 assert(DefMI && "Defining instruction disappeared");
409 MII = DefMI;
410 ++MII;
412 // Insert spill without kill flag immediately after def.
413 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
414 MRI.getRegClass(SrcReg), &TRI);
415 --MII; // Point to store instruction.
416 LIS.InsertMachineInstrInMaps(*MII);
417 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
419 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
420 ++NumSpills;
421 return true;
424 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
425 /// redundant spills of this value in SLI.reg and sibling copies.
426 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
427 assert(VNI && "Missing value");
428 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
429 WorkList.push_back(std::make_pair(&SLI, VNI));
430 assert(StackInt && "No stack slot assigned yet.");
432 do {
433 LiveInterval *LI;
434 std::tie(LI, VNI) = WorkList.pop_back_val();
435 unsigned Reg = LI->reg;
436 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
437 << VNI->def << " in " << *LI << '\n');
439 // Regs to spill are taken care of.
440 if (isRegToSpill(Reg))
441 continue;
443 // Add all of VNI's live range to StackInt.
444 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
445 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
447 // Find all spills and copies of VNI.
448 for (MachineRegisterInfo::use_instr_nodbg_iterator
449 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
450 UI != E; ) {
451 MachineInstr &MI = *UI++;
452 if (!MI.isCopy() && !MI.mayStore())
453 continue;
454 SlotIndex Idx = LIS.getInstructionIndex(MI);
455 if (LI->getVNInfoAt(Idx) != VNI)
456 continue;
458 // Follow sibling copies down the dominator tree.
459 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
460 if (isSibling(DstReg)) {
461 LiveInterval &DstLI = LIS.getInterval(DstReg);
462 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
463 assert(DstVNI && "Missing defined value");
464 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
465 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
467 continue;
470 // Erase spills.
471 int FI;
472 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
473 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
474 // eliminateDeadDefs won't normally remove stores, so switch opcode.
475 MI.setDesc(TII.get(TargetOpcode::KILL));
476 DeadDefs.push_back(&MI);
477 ++NumSpillsRemoved;
478 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
479 --NumSpills;
482 } while (!WorkList.empty());
485 //===----------------------------------------------------------------------===//
486 // Rematerialization
487 //===----------------------------------------------------------------------===//
489 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
490 /// instruction cannot be eliminated. See through snippet copies
491 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
492 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
493 WorkList.push_back(std::make_pair(LI, VNI));
494 do {
495 std::tie(LI, VNI) = WorkList.pop_back_val();
496 if (!UsedValues.insert(VNI).second)
497 continue;
499 if (VNI->isPHIDef()) {
500 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
501 for (MachineBasicBlock *P : MBB->predecessors()) {
502 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
503 if (PVNI)
504 WorkList.push_back(std::make_pair(LI, PVNI));
506 continue;
509 // Follow snippet copies.
510 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
511 if (!SnippetCopies.count(MI))
512 continue;
513 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
514 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
515 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
516 assert(SnipVNI && "Snippet undefined before copy");
517 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
518 } while (!WorkList.empty());
521 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(unsigned VReg,
522 MachineInstr &MI) {
523 if (!RestrictStatepointRemat)
524 return true;
525 // Here's a quick explanation of the problem we're trying to handle here:
526 // * There are some pseudo instructions with more vreg uses than there are
527 // physical registers on the machine.
528 // * This is normally handled by spilling the vreg, and folding the reload
529 // into the user instruction. (Thus decreasing the number of used vregs
530 // until the remainder can be assigned to physregs.)
531 // * However, since we may try to spill vregs in any order, we can end up
532 // trying to spill each operand to the instruction, and then rematting it
533 // instead. When that happens, the new live intervals (for the remats) are
534 // expected to be trivially assignable (i.e. RS_Done). However, since we
535 // may have more remats than physregs, we're guaranteed to fail to assign
536 // one.
537 // At the moment, we only handle this for STATEPOINTs since they're the only
538 // psuedo op where we've seen this. If we start seeing other instructions
539 // with the same problem, we need to revisit this.
540 return (MI.getOpcode() != TargetOpcode::STATEPOINT);
543 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
544 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
545 // Analyze instruction
546 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
547 MIBundleOperands::VirtRegInfo RI =
548 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
550 if (!RI.Reads)
551 return false;
553 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
554 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
556 if (!ParentVNI) {
557 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
558 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
559 MachineOperand &MO = MI.getOperand(i);
560 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
561 MO.setIsUndef();
563 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
564 return true;
567 if (SnippetCopies.count(&MI))
568 return false;
570 LiveInterval &OrigLI = LIS.getInterval(Original);
571 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
572 LiveRangeEdit::Remat RM(ParentVNI);
573 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
575 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
576 markValueUsed(&VirtReg, ParentVNI);
577 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
578 return false;
581 // If the instruction also writes VirtReg.reg, it had better not require the
582 // same register for uses and defs.
583 if (RI.Tied) {
584 markValueUsed(&VirtReg, ParentVNI);
585 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
586 return false;
589 // Before rematerializing into a register for a single instruction, try to
590 // fold a load into the instruction. That avoids allocating a new register.
591 if (RM.OrigMI->canFoldAsLoad() &&
592 foldMemoryOperand(Ops, RM.OrigMI)) {
593 Edit->markRematerialized(RM.ParentVNI);
594 ++NumFoldedLoads;
595 return true;
598 // If we can't guarantee that we'll be able to actually assign the new vreg,
599 // we can't remat.
600 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg, MI)) {
601 markValueUsed(&VirtReg, ParentVNI);
602 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
603 return false;
606 // Allocate a new register for the remat.
607 unsigned NewVReg = Edit->createFrom(Original);
609 // Finally we can rematerialize OrigMI before MI.
610 SlotIndex DefIdx =
611 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
613 // We take the DebugLoc from MI, since OrigMI may be attributed to a
614 // different source location.
615 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
616 NewMI->setDebugLoc(MI.getDebugLoc());
618 (void)DefIdx;
619 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
620 << *LIS.getInstructionFromIndex(DefIdx));
622 // Replace operands
623 for (const auto &OpPair : Ops) {
624 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
625 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
626 MO.setReg(NewVReg);
627 MO.setIsKill();
630 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
632 ++NumRemats;
633 return true;
636 /// reMaterializeAll - Try to rematerialize as many uses as possible,
637 /// and trim the live ranges after.
638 void InlineSpiller::reMaterializeAll() {
639 if (!Edit->anyRematerializable(AA))
640 return;
642 UsedValues.clear();
644 // Try to remat before all uses of snippets.
645 bool anyRemat = false;
646 for (unsigned Reg : RegsToSpill) {
647 LiveInterval &LI = LIS.getInterval(Reg);
648 for (MachineRegisterInfo::reg_bundle_iterator
649 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
650 RegI != E; ) {
651 MachineInstr &MI = *RegI++;
653 // Debug values are not allowed to affect codegen.
654 if (MI.isDebugValue())
655 continue;
657 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
658 "instruction that isn't a DBG_VALUE");
660 anyRemat |= reMaterializeFor(LI, MI);
663 if (!anyRemat)
664 return;
666 // Remove any values that were completely rematted.
667 for (unsigned Reg : RegsToSpill) {
668 LiveInterval &LI = LIS.getInterval(Reg);
669 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
670 I != E; ++I) {
671 VNInfo *VNI = *I;
672 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
673 continue;
674 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
675 MI->addRegisterDead(Reg, &TRI);
676 if (!MI->allDefsAreDead())
677 continue;
678 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
679 DeadDefs.push_back(MI);
683 // Eliminate dead code after remat. Note that some snippet copies may be
684 // deleted here.
685 if (DeadDefs.empty())
686 return;
687 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
688 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
690 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
691 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
692 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
693 // removed, PHI VNI are still left in the LiveInterval.
694 // So to get rid of unused reg, we need to check whether it has non-dbg
695 // reference instead of whether it has non-empty interval.
696 unsigned ResultPos = 0;
697 for (unsigned Reg : RegsToSpill) {
698 if (MRI.reg_nodbg_empty(Reg)) {
699 Edit->eraseVirtReg(Reg);
700 continue;
703 assert(LIS.hasInterval(Reg) &&
704 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
705 "Empty and not used live-range?!");
707 RegsToSpill[ResultPos++] = Reg;
709 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
710 LLVM_DEBUG(dbgs() << RegsToSpill.size()
711 << " registers to spill after remat.\n");
714 //===----------------------------------------------------------------------===//
715 // Spilling
716 //===----------------------------------------------------------------------===//
718 /// If MI is a load or store of StackSlot, it can be removed.
719 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
720 int FI = 0;
721 unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
722 bool IsLoad = InstrReg;
723 if (!IsLoad)
724 InstrReg = TII.isStoreToStackSlot(*MI, FI);
726 // We have a stack access. Is it the right register and slot?
727 if (InstrReg != Reg || FI != StackSlot)
728 return false;
730 if (!IsLoad)
731 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
733 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
734 LIS.RemoveMachineInstrFromMaps(*MI);
735 MI->eraseFromParent();
737 if (IsLoad) {
738 ++NumReloadsRemoved;
739 --NumReloads;
740 } else {
741 ++NumSpillsRemoved;
742 --NumSpills;
745 return true;
748 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
749 LLVM_DUMP_METHOD
750 // Dump the range of instructions from B to E with their slot indexes.
751 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
752 MachineBasicBlock::iterator E,
753 LiveIntervals const &LIS,
754 const char *const header,
755 unsigned VReg =0) {
756 char NextLine = '\n';
757 char SlotIndent = '\t';
759 if (std::next(B) == E) {
760 NextLine = ' ';
761 SlotIndent = ' ';
764 dbgs() << '\t' << header << ": " << NextLine;
766 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
767 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
769 // If a register was passed in and this instruction has it as a
770 // destination that is marked as an early clobber, print the
771 // early-clobber slot index.
772 if (VReg) {
773 MachineOperand *MO = I->findRegisterDefOperand(VReg);
774 if (MO && MO->isEarlyClobber())
775 Idx = Idx.getRegSlot(true);
778 dbgs() << SlotIndent << Idx << '\t' << *I;
781 #endif
783 /// foldMemoryOperand - Try folding stack slot references in Ops into their
784 /// instructions.
786 /// @param Ops Operand indices from analyzeVirtReg().
787 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
788 /// @return True on success.
789 bool InlineSpiller::
790 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
791 MachineInstr *LoadMI) {
792 if (Ops.empty())
793 return false;
794 // Don't attempt folding in bundles.
795 MachineInstr *MI = Ops.front().first;
796 if (Ops.back().first != MI || MI->isBundled())
797 return false;
799 bool WasCopy = MI->isCopy();
800 unsigned ImpReg = 0;
802 // Spill subregs if the target allows it.
803 // We always want to spill subregs for stackmap/patchpoint pseudos.
804 bool SpillSubRegs = TII.isSubregFoldable() ||
805 MI->getOpcode() == TargetOpcode::STATEPOINT ||
806 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
807 MI->getOpcode() == TargetOpcode::STACKMAP;
809 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
810 // operands.
811 SmallVector<unsigned, 8> FoldOps;
812 for (const auto &OpPair : Ops) {
813 unsigned Idx = OpPair.second;
814 assert(MI == OpPair.first && "Instruction conflict during operand folding");
815 MachineOperand &MO = MI->getOperand(Idx);
816 if (MO.isImplicit()) {
817 ImpReg = MO.getReg();
818 continue;
821 if (!SpillSubRegs && MO.getSubReg())
822 return false;
823 // We cannot fold a load instruction into a def.
824 if (LoadMI && MO.isDef())
825 return false;
826 // Tied use operands should not be passed to foldMemoryOperand.
827 if (!MI->isRegTiedToDefOperand(Idx))
828 FoldOps.push_back(Idx);
831 // If we only have implicit uses, we won't be able to fold that.
832 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
833 if (FoldOps.empty())
834 return false;
836 MachineInstrSpan MIS(MI, MI->getParent());
838 MachineInstr *FoldMI =
839 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
840 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
841 if (!FoldMI)
842 return false;
844 // Remove LIS for any dead defs in the original MI not in FoldMI.
845 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
846 if (!MO->isReg())
847 continue;
848 unsigned Reg = MO->getReg();
849 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
850 MRI.isReserved(Reg)) {
851 continue;
853 // Skip non-Defs, including undef uses and internal reads.
854 if (MO->isUse())
855 continue;
856 MIBundleOperands::PhysRegInfo RI =
857 MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
858 if (RI.FullyDefined)
859 continue;
860 // FoldMI does not define this physreg. Remove the LI segment.
861 assert(MO->isDead() && "Cannot fold physreg def");
862 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
863 LIS.removePhysRegDefAt(Reg, Idx);
866 int FI;
867 if (TII.isStoreToStackSlot(*MI, FI) &&
868 HSpiller.rmFromMergeableSpills(*MI, FI))
869 --NumSpills;
870 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
871 if (MI->isCall())
872 MI->getMF()->updateCallSiteInfo(MI, FoldMI);
873 MI->eraseFromParent();
875 // Insert any new instructions other than FoldMI into the LIS maps.
876 assert(!MIS.empty() && "Unexpected empty span of instructions!");
877 for (MachineInstr &MI : MIS)
878 if (&MI != FoldMI)
879 LIS.InsertMachineInstrInMaps(MI);
881 // TII.foldMemoryOperand may have left some implicit operands on the
882 // instruction. Strip them.
883 if (ImpReg)
884 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
885 MachineOperand &MO = FoldMI->getOperand(i - 1);
886 if (!MO.isReg() || !MO.isImplicit())
887 break;
888 if (MO.getReg() == ImpReg)
889 FoldMI->RemoveOperand(i - 1);
892 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
893 "folded"));
895 if (!WasCopy)
896 ++NumFolded;
897 else if (Ops.front().second == 0) {
898 ++NumSpills;
899 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
900 } else
901 ++NumReloads;
902 return true;
905 void InlineSpiller::insertReload(unsigned NewVReg,
906 SlotIndex Idx,
907 MachineBasicBlock::iterator MI) {
908 MachineBasicBlock &MBB = *MI->getParent();
910 MachineInstrSpan MIS(MI, &MBB);
911 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
912 MRI.getRegClass(NewVReg), &TRI);
914 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
916 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
917 NewVReg));
918 ++NumReloads;
921 /// Check if \p Def fully defines a VReg with an undefined value.
922 /// If that's the case, that means the value of VReg is actually
923 /// not relevant.
924 static bool isFullUndefDef(const MachineInstr &Def) {
925 if (!Def.isImplicitDef())
926 return false;
927 assert(Def.getNumOperands() == 1 &&
928 "Implicit def with more than one definition");
929 // We can say that the VReg defined by Def is undef, only if it is
930 // fully defined by Def. Otherwise, some of the lanes may not be
931 // undef and the value of the VReg matters.
932 return !Def.getOperand(0).getSubReg();
935 /// insertSpill - Insert a spill of NewVReg after MI.
936 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
937 MachineBasicBlock::iterator MI) {
938 MachineBasicBlock &MBB = *MI->getParent();
940 MachineInstrSpan MIS(MI, &MBB);
941 bool IsRealSpill = true;
942 if (isFullUndefDef(*MI)) {
943 // Don't spill undef value.
944 // Anything works for undef, in particular keeping the memory
945 // uninitialized is a viable option and it saves code size and
946 // run time.
947 BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
948 .addReg(NewVReg, getKillRegState(isKill));
949 IsRealSpill = false;
950 } else
951 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
952 MRI.getRegClass(NewVReg), &TRI);
954 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
956 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
957 "spill"));
958 ++NumSpills;
959 if (IsRealSpill)
960 HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
963 /// spillAroundUses - insert spill code around each use of Reg.
964 void InlineSpiller::spillAroundUses(unsigned Reg) {
965 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
966 LiveInterval &OldLI = LIS.getInterval(Reg);
968 // Iterate over instructions using Reg.
969 for (MachineRegisterInfo::reg_bundle_iterator
970 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
971 RegI != E; ) {
972 MachineInstr *MI = &*(RegI++);
974 // Debug values are not allowed to affect codegen.
975 if (MI->isDebugValue()) {
976 // Modify DBG_VALUE now that the value is in a spill slot.
977 MachineBasicBlock *MBB = MI->getParent();
978 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
979 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
980 MBB->erase(MI);
981 continue;
984 assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
985 "instruction that isn't a DBG_VALUE");
987 // Ignore copies to/from snippets. We'll delete them.
988 if (SnippetCopies.count(MI))
989 continue;
991 // Stack slot accesses may coalesce away.
992 if (coalesceStackAccess(MI, Reg))
993 continue;
995 // Analyze instruction.
996 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
997 MIBundleOperands::VirtRegInfo RI =
998 MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
1000 // Find the slot index where this instruction reads and writes OldLI.
1001 // This is usually the def slot, except for tied early clobbers.
1002 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1003 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1004 if (SlotIndex::isSameInstr(Idx, VNI->def))
1005 Idx = VNI->def;
1007 // Check for a sibling copy.
1008 unsigned SibReg = isFullCopyOf(*MI, Reg);
1009 if (SibReg && isSibling(SibReg)) {
1010 // This may actually be a copy between snippets.
1011 if (isRegToSpill(SibReg)) {
1012 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1013 SnippetCopies.insert(MI);
1014 continue;
1016 if (RI.Writes) {
1017 if (hoistSpillInsideBB(OldLI, *MI)) {
1018 // This COPY is now dead, the value is already in the stack slot.
1019 MI->getOperand(0).setIsDead();
1020 DeadDefs.push_back(MI);
1021 continue;
1023 } else {
1024 // This is a reload for a sib-reg copy. Drop spills downstream.
1025 LiveInterval &SibLI = LIS.getInterval(SibReg);
1026 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1027 // The COPY will fold to a reload below.
1031 // Attempt to fold memory ops.
1032 if (foldMemoryOperand(Ops))
1033 continue;
1035 // Create a new virtual register for spill/fill.
1036 // FIXME: Infer regclass from instruction alone.
1037 unsigned NewVReg = Edit->createFrom(Reg);
1039 if (RI.Reads)
1040 insertReload(NewVReg, Idx, MI);
1042 // Rewrite instruction operands.
1043 bool hasLiveDef = false;
1044 for (const auto &OpPair : Ops) {
1045 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1046 MO.setReg(NewVReg);
1047 if (MO.isUse()) {
1048 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1049 MO.setIsKill();
1050 } else {
1051 if (!MO.isDead())
1052 hasLiveDef = true;
1055 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1057 // FIXME: Use a second vreg if instruction has no tied ops.
1058 if (RI.Writes)
1059 if (hasLiveDef)
1060 insertSpill(NewVReg, true, MI);
1064 /// spillAll - Spill all registers remaining after rematerialization.
1065 void InlineSpiller::spillAll() {
1066 // Update LiveStacks now that we are committed to spilling.
1067 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1068 StackSlot = VRM.assignVirt2StackSlot(Original);
1069 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1070 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1071 } else
1072 StackInt = &LSS.getInterval(StackSlot);
1074 if (Original != Edit->getReg())
1075 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1077 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1078 for (unsigned Reg : RegsToSpill)
1079 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1080 StackInt->getValNumInfo(0));
1081 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1083 // Spill around uses of all RegsToSpill.
1084 for (unsigned Reg : RegsToSpill)
1085 spillAroundUses(Reg);
1087 // Hoisted spills may cause dead code.
1088 if (!DeadDefs.empty()) {
1089 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1090 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1093 // Finally delete the SnippetCopies.
1094 for (unsigned Reg : RegsToSpill) {
1095 for (MachineRegisterInfo::reg_instr_iterator
1096 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1097 RI != E; ) {
1098 MachineInstr &MI = *(RI++);
1099 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1100 // FIXME: Do this with a LiveRangeEdit callback.
1101 LIS.RemoveMachineInstrFromMaps(MI);
1102 MI.eraseFromParent();
1106 // Delete all spilled registers.
1107 for (unsigned Reg : RegsToSpill)
1108 Edit->eraseVirtReg(Reg);
1111 void InlineSpiller::spill(LiveRangeEdit &edit) {
1112 ++NumSpilledRanges;
1113 Edit = &edit;
1114 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
1115 && "Trying to spill a stack slot.");
1116 // Share a stack slot among all descendants of Original.
1117 Original = VRM.getOriginal(edit.getReg());
1118 StackSlot = VRM.getStackSlot(Original);
1119 StackInt = nullptr;
1121 LLVM_DEBUG(dbgs() << "Inline spilling "
1122 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1123 << ':' << edit.getParent() << "\nFrom original "
1124 << printReg(Original) << '\n');
1125 assert(edit.getParent().isSpillable() &&
1126 "Attempting to spill already spilled value.");
1127 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1129 collectRegsToSpill();
1130 reMaterializeAll();
1132 // Remat may handle everything.
1133 if (!RegsToSpill.empty())
1134 spillAll();
1136 Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1139 /// Optimizations after all the reg selections and spills are done.
1140 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1142 /// When a spill is inserted, add the spill to MergeableSpills map.
1143 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1144 unsigned Original) {
1145 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1146 LiveInterval &OrigLI = LIS.getInterval(Original);
1147 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1148 // LiveInterval may be cleared after all its references are spilled.
1149 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1150 auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1151 LI->assign(OrigLI, Allocator);
1152 StackSlotToOrigLI[StackSlot] = std::move(LI);
1154 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1155 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1156 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1157 MergeableSpills[MIdx].insert(&Spill);
1160 /// When a spill is removed, remove the spill from MergeableSpills map.
1161 /// Return true if the spill is removed successfully.
1162 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1163 int StackSlot) {
1164 auto It = StackSlotToOrigLI.find(StackSlot);
1165 if (It == StackSlotToOrigLI.end())
1166 return false;
1167 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1168 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1169 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1170 return MergeableSpills[MIdx].erase(&Spill);
1173 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1174 /// i.e., there should be a living sibling of OrigReg at the insert point.
1175 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1176 MachineBasicBlock &BB, unsigned &LiveReg) {
1177 SlotIndex Idx;
1178 unsigned OrigReg = OrigLI.reg;
1179 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1180 if (MI != BB.end())
1181 Idx = LIS.getInstructionIndex(*MI);
1182 else
1183 Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1184 SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1185 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1187 for (auto const SibReg : Siblings) {
1188 LiveInterval &LI = LIS.getInterval(SibReg);
1189 VNInfo *VNI = LI.getVNInfoAt(Idx);
1190 if (VNI) {
1191 LiveReg = SibReg;
1192 return true;
1195 return false;
1198 /// Remove redundant spills in the same BB. Save those redundant spills in
1199 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1200 void HoistSpillHelper::rmRedundantSpills(
1201 SmallPtrSet<MachineInstr *, 16> &Spills,
1202 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1203 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1204 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1205 // another spill inside. If a BB contains more than one spill, only keep the
1206 // earlier spill with smaller SlotIndex.
1207 for (const auto CurrentSpill : Spills) {
1208 MachineBasicBlock *Block = CurrentSpill->getParent();
1209 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1210 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1211 if (PrevSpill) {
1212 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1213 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1214 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1215 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1216 SpillsToRm.push_back(SpillToRm);
1217 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1218 } else {
1219 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1222 for (const auto SpillToRm : SpillsToRm)
1223 Spills.erase(SpillToRm);
1226 /// Starting from \p Root find a top-down traversal order of the dominator
1227 /// tree to visit all basic blocks containing the elements of \p Spills.
1228 /// Redundant spills will be found and put into \p SpillsToRm at the same
1229 /// time. \p SpillBBToSpill will be populated as part of the process and
1230 /// maps a basic block to the first store occurring in the basic block.
1231 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1232 void HoistSpillHelper::getVisitOrders(
1233 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1234 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1235 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1236 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1237 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1238 // The set contains all the possible BB nodes to which we may hoist
1239 // original spills.
1240 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1241 // Save the BB nodes on the path from the first BB node containing
1242 // non-redundant spill to the Root node.
1243 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1244 // All the spills to be hoisted must originate from a single def instruction
1245 // to the OrigReg. It means the def instruction should dominate all the spills
1246 // to be hoisted. We choose the BB where the def instruction is located as
1247 // the Root.
1248 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1249 // For every node on the dominator tree with spill, walk up on the dominator
1250 // tree towards the Root node until it is reached. If there is other node
1251 // containing spill in the middle of the path, the previous spill saw will
1252 // be redundant and the node containing it will be removed. All the nodes on
1253 // the path starting from the first node with non-redundant spill to the Root
1254 // node will be added to the WorkSet, which will contain all the possible
1255 // locations where spills may be hoisted to after the loop below is done.
1256 for (const auto Spill : Spills) {
1257 MachineBasicBlock *Block = Spill->getParent();
1258 MachineDomTreeNode *Node = MDT[Block];
1259 MachineInstr *SpillToRm = nullptr;
1260 while (Node != RootIDomNode) {
1261 // If Node dominates Block, and it already contains a spill, the spill in
1262 // Block will be redundant.
1263 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1264 SpillToRm = SpillBBToSpill[MDT[Block]];
1265 break;
1266 /// If we see the Node already in WorkSet, the path from the Node to
1267 /// the Root node must already be traversed by another spill.
1268 /// Then no need to repeat.
1269 } else if (WorkSet.count(Node)) {
1270 break;
1271 } else {
1272 NodesOnPath.insert(Node);
1274 Node = Node->getIDom();
1276 if (SpillToRm) {
1277 SpillsToRm.push_back(SpillToRm);
1278 } else {
1279 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1280 // set the initial status before hoisting start. The value of BBs
1281 // containing original spills is set to 0, in order to descriminate
1282 // with BBs containing hoisted spills which will be inserted to
1283 // SpillsToKeep later during hoisting.
1284 SpillsToKeep[MDT[Block]] = 0;
1285 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1287 NodesOnPath.clear();
1290 // Sort the nodes in WorkSet in top-down order and save the nodes
1291 // in Orders. Orders will be used for hoisting in runHoistSpills.
1292 unsigned idx = 0;
1293 Orders.push_back(MDT.getBase().getNode(Root));
1294 do {
1295 MachineDomTreeNode *Node = Orders[idx++];
1296 const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1297 unsigned NumChildren = Children.size();
1298 for (unsigned i = 0; i != NumChildren; ++i) {
1299 MachineDomTreeNode *Child = Children[i];
1300 if (WorkSet.count(Child))
1301 Orders.push_back(Child);
1303 } while (idx != Orders.size());
1304 assert(Orders.size() == WorkSet.size() &&
1305 "Orders have different size with WorkSet");
1307 #ifndef NDEBUG
1308 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1309 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1310 for (; RIt != Orders.rend(); RIt++)
1311 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1312 LLVM_DEBUG(dbgs() << "\n");
1313 #endif
1316 /// Try to hoist spills according to BB hotness. The spills to removed will
1317 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1318 /// \p SpillsToIns.
1319 void HoistSpillHelper::runHoistSpills(
1320 LiveInterval &OrigLI, VNInfo &OrigVNI,
1321 SmallPtrSet<MachineInstr *, 16> &Spills,
1322 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1323 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1324 // Visit order of dominator tree nodes.
1325 SmallVector<MachineDomTreeNode *, 32> Orders;
1326 // SpillsToKeep contains all the nodes where spills are to be inserted
1327 // during hoisting. If the spill to be inserted is an original spill
1328 // (not a hoisted one), the value of the map entry is 0. If the spill
1329 // is a hoisted spill, the value of the map entry is the VReg to be used
1330 // as the source of the spill.
1331 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1332 // Map from BB to the first spill inside of it.
1333 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1335 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1337 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1338 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1339 SpillBBToSpill);
1341 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1342 // nodes set and the cost of all the spills inside those nodes.
1343 // The nodes set are the locations where spills are to be inserted
1344 // in the subtree of current node.
1345 using NodesCostPair =
1346 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1347 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1349 // Iterate Orders set in reverse order, which will be a bottom-up order
1350 // in the dominator tree. Once we visit a dom tree node, we know its
1351 // children have already been visited and the spill locations in the
1352 // subtrees of all the children have been determined.
1353 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1354 for (; RIt != Orders.rend(); RIt++) {
1355 MachineBasicBlock *Block = (*RIt)->getBlock();
1357 // If Block contains an original spill, simply continue.
1358 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1359 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1360 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1361 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1362 continue;
1365 // Collect spills in subtree of current node (*RIt) to
1366 // SpillsInSubTreeMap[*RIt].first.
1367 const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1368 unsigned NumChildren = Children.size();
1369 for (unsigned i = 0; i != NumChildren; ++i) {
1370 MachineDomTreeNode *Child = Children[i];
1371 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1372 continue;
1373 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1374 // should be placed before getting the begin and end iterators of
1375 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1376 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1377 // and the map grows and then the original buckets in the map are moved.
1378 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1379 SpillsInSubTreeMap[*RIt].first;
1380 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1381 SubTreeCost += SpillsInSubTreeMap[Child].second;
1382 auto BI = SpillsInSubTreeMap[Child].first.begin();
1383 auto EI = SpillsInSubTreeMap[Child].first.end();
1384 SpillsInSubTree.insert(BI, EI);
1385 SpillsInSubTreeMap.erase(Child);
1388 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1389 SpillsInSubTreeMap[*RIt].first;
1390 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1391 // No spills in subtree, simply continue.
1392 if (SpillsInSubTree.empty())
1393 continue;
1395 // Check whether Block is a possible candidate to insert spill.
1396 unsigned LiveReg = 0;
1397 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1398 continue;
1400 // If there are multiple spills that could be merged, bias a little
1401 // to hoist the spill.
1402 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1403 ? BranchProbability(9, 10)
1404 : BranchProbability(1, 1);
1405 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1406 // Hoist: Move spills to current Block.
1407 for (const auto SpillBB : SpillsInSubTree) {
1408 // When SpillBB is a BB contains original spill, insert the spill
1409 // to SpillsToRm.
1410 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1411 !SpillsToKeep[SpillBB]) {
1412 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1413 SpillsToRm.push_back(SpillToRm);
1415 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1416 SpillsToKeep.erase(SpillBB);
1418 // Current Block is the BB containing the new hoisted spill. Add it to
1419 // SpillsToKeep. LiveReg is the source of the new spill.
1420 SpillsToKeep[*RIt] = LiveReg;
1421 LLVM_DEBUG({
1422 dbgs() << "spills in BB: ";
1423 for (const auto Rspill : SpillsInSubTree)
1424 dbgs() << Rspill->getBlock()->getNumber() << " ";
1425 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1426 << "\n";
1428 SpillsInSubTree.clear();
1429 SpillsInSubTree.insert(*RIt);
1430 SubTreeCost = MBFI.getBlockFreq(Block);
1433 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1434 // save them to SpillsToIns.
1435 for (const auto Ent : SpillsToKeep) {
1436 if (Ent.second)
1437 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1441 /// For spills with equal values, remove redundant spills and hoist those left
1442 /// to less hot spots.
1444 /// Spills with equal values will be collected into the same set in
1445 /// MergeableSpills when spill is inserted. These equal spills are originated
1446 /// from the same defining instruction and are dominated by the instruction.
1447 /// Before hoisting all the equal spills, redundant spills inside in the same
1448 /// BB are first marked to be deleted. Then starting from the spills left, walk
1449 /// up on the dominator tree towards the Root node where the define instruction
1450 /// is located, mark the dominated spills to be deleted along the way and
1451 /// collect the BB nodes on the path from non-dominated spills to the define
1452 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1453 /// where we are considering to hoist the spills. We iterate the WorkSet in
1454 /// bottom-up order, and for each node, we will decide whether to hoist spills
1455 /// inside its subtree to that node. In this way, we can get benefit locally
1456 /// even if hoisting all the equal spills to one cold place is impossible.
1457 void HoistSpillHelper::hoistAllSpills() {
1458 SmallVector<unsigned, 4> NewVRegs;
1459 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1461 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1462 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1463 unsigned Original = VRM.getPreSplitReg(Reg);
1464 if (!MRI.def_empty(Reg))
1465 Virt2SiblingsMap[Original].insert(Reg);
1468 // Each entry in MergeableSpills contains a spill set with equal values.
1469 for (auto &Ent : MergeableSpills) {
1470 int Slot = Ent.first.first;
1471 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1472 VNInfo *OrigVNI = Ent.first.second;
1473 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1474 if (Ent.second.empty())
1475 continue;
1477 LLVM_DEBUG({
1478 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1479 << "Equal spills in BB: ";
1480 for (const auto spill : EqValSpills)
1481 dbgs() << spill->getParent()->getNumber() << " ";
1482 dbgs() << "\n";
1485 // SpillsToRm is the spill set to be removed from EqValSpills.
1486 SmallVector<MachineInstr *, 16> SpillsToRm;
1487 // SpillsToIns is the spill set to be newly inserted after hoisting.
1488 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1490 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1492 LLVM_DEBUG({
1493 dbgs() << "Finally inserted spills in BB: ";
1494 for (const auto Ispill : SpillsToIns)
1495 dbgs() << Ispill.first->getNumber() << " ";
1496 dbgs() << "\nFinally removed spills in BB: ";
1497 for (const auto Rspill : SpillsToRm)
1498 dbgs() << Rspill->getParent()->getNumber() << " ";
1499 dbgs() << "\n";
1502 // Stack live range update.
1503 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1504 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1505 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1506 StackIntvl.getValNumInfo(0));
1508 // Insert hoisted spills.
1509 for (auto const Insert : SpillsToIns) {
1510 MachineBasicBlock *BB = Insert.first;
1511 unsigned LiveReg = Insert.second;
1512 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1513 TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1514 MRI.getRegClass(LiveReg), &TRI);
1515 LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1516 ++NumSpills;
1519 // Remove redundant spills or change them to dead instructions.
1520 NumSpills -= SpillsToRm.size();
1521 for (auto const RMEnt : SpillsToRm) {
1522 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1523 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1524 MachineOperand &MO = RMEnt->getOperand(i - 1);
1525 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1526 RMEnt->RemoveOperand(i - 1);
1529 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1533 /// For VirtReg clone, the \p New register should have the same physreg or
1534 /// stackslot as the \p old register.
1535 void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1536 if (VRM.hasPhys(Old))
1537 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1538 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1539 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1540 else
1541 llvm_unreachable("VReg should be assigned either physreg or stackslot");