[docs] Add LICENSE.txt to the root of the mono-repo
[llvm-project.git] / llvm / lib / CodeGen / CalcSpillWeights.cpp
blob519b24c21d7a244f5a6b89fca3e570fe5c98cff9
1 //===- CalcSpillWeights.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/CodeGen/CalcSpillWeights.h"
10 #include "llvm/ADT/SmallPtrSet.h"
11 #include "llvm/CodeGen/LiveInterval.h"
12 #include "llvm/CodeGen/LiveIntervals.h"
13 #include "llvm/CodeGen/MachineFunction.h"
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/CodeGen/MachineLoopInfo.h"
16 #include "llvm/CodeGen/MachineOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/StackMaps.h"
19 #include "llvm/CodeGen/TargetInstrInfo.h"
20 #include "llvm/CodeGen/TargetRegisterInfo.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/CodeGen/VirtRegMap.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include <cassert>
26 #include <tuple>
28 using namespace llvm;
30 #define DEBUG_TYPE "calcspillweights"
32 void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
33 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
34 << "********** Function: " << MF.getName() << '\n');
36 MachineRegisterInfo &MRI = MF.getRegInfo();
37 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
38 Register Reg = Register::index2VirtReg(I);
39 if (MRI.reg_nodbg_empty(Reg))
40 continue;
41 calculateSpillWeightAndHint(LIS.getInterval(Reg));
45 // Return the preferred allocation register for reg, given a COPY instruction.
46 Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, unsigned Reg,
47 const TargetRegisterInfo &TRI,
48 const MachineRegisterInfo &MRI) {
49 unsigned Sub, HSub;
50 Register HReg;
51 if (MI->getOperand(0).getReg() == Reg) {
52 Sub = MI->getOperand(0).getSubReg();
53 HReg = MI->getOperand(1).getReg();
54 HSub = MI->getOperand(1).getSubReg();
55 } else {
56 Sub = MI->getOperand(1).getSubReg();
57 HReg = MI->getOperand(0).getReg();
58 HSub = MI->getOperand(0).getSubReg();
61 if (!HReg)
62 return 0;
64 if (Register::isVirtualRegister(HReg))
65 return Sub == HSub ? HReg : Register();
67 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
68 MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg();
69 if (RC->contains(CopiedPReg))
70 return CopiedPReg;
72 // Check if reg:sub matches so that a super register could be hinted.
73 if (Sub)
74 return TRI.getMatchingSuperReg(CopiedPReg, Sub, RC);
76 return 0;
79 // Check if all values in LI are rematerializable
80 bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
81 const LiveIntervals &LIS,
82 const VirtRegMap &VRM,
83 const TargetInstrInfo &TII) {
84 Register Reg = LI.reg();
85 Register Original = VRM.getOriginal(Reg);
86 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
87 I != E; ++I) {
88 const VNInfo *VNI = *I;
89 if (VNI->isUnused())
90 continue;
91 if (VNI->isPHIDef())
92 return false;
94 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
95 assert(MI && "Dead valno in interval");
97 // Trace copies introduced by live range splitting. The inline
98 // spiller can rematerialize through these copies, so the spill
99 // weight must reflect this.
100 while (MI->isFullCopy()) {
101 // The copy destination must match the interval register.
102 if (MI->getOperand(0).getReg() != Reg)
103 return false;
105 // Get the source register.
106 Reg = MI->getOperand(1).getReg();
108 // If the original (pre-splitting) registers match this
109 // copy came from a split.
110 if (!Register::isVirtualRegister(Reg) || VRM.getOriginal(Reg) != Original)
111 return false;
113 // Follow the copy live-in value.
114 const LiveInterval &SrcLI = LIS.getInterval(Reg);
115 LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
116 VNI = SrcQ.valueIn();
117 assert(VNI && "Copy from non-existing value");
118 if (VNI->isPHIDef())
119 return false;
120 MI = LIS.getInstructionFromIndex(VNI->def);
121 assert(MI && "Dead valno in interval");
124 if (!TII.isTriviallyReMaterializable(*MI))
125 return false;
127 return true;
130 bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) {
131 return any_of(VRM.getRegInfo().reg_operands(LI.reg()),
132 [](MachineOperand &MO) {
133 MachineInstr *MI = MO.getParent();
134 if (MI->getOpcode() != TargetOpcode::STATEPOINT)
135 return false;
136 return StatepointOpers(MI).getVarIdx() <= MI->getOperandNo(&MO);
140 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) {
141 float Weight = weightCalcHelper(LI);
142 // Check if unspillable.
143 if (Weight < 0)
144 return;
145 LI.setWeight(Weight);
148 float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
149 SlotIndex *End) {
150 MachineRegisterInfo &MRI = MF.getRegInfo();
151 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
152 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
153 MachineBasicBlock *MBB = nullptr;
154 MachineLoop *Loop = nullptr;
155 bool IsExiting = false;
156 float TotalWeight = 0;
157 unsigned NumInstr = 0; // Number of instructions using LI
158 SmallPtrSet<MachineInstr *, 8> Visited;
160 std::pair<Register, Register> TargetHint = MRI.getRegAllocationHint(LI.reg());
162 if (LI.isSpillable()) {
163 Register Reg = LI.reg();
164 Register Original = VRM.getOriginal(Reg);
165 const LiveInterval &OrigInt = LIS.getInterval(Original);
166 // li comes from a split of OrigInt. If OrigInt was marked
167 // as not spillable, make sure the new interval is marked
168 // as not spillable as well.
169 if (!OrigInt.isSpillable())
170 LI.markNotSpillable();
173 // Don't recompute spill weight for an unspillable register.
174 bool IsSpillable = LI.isSpillable();
176 bool IsLocalSplitArtifact = Start && End;
178 // Do not update future local split artifacts.
179 bool ShouldUpdateLI = !IsLocalSplitArtifact;
181 if (IsLocalSplitArtifact) {
182 MachineBasicBlock *LocalMBB = LIS.getMBBFromIndex(*End);
183 assert(LocalMBB == LIS.getMBBFromIndex(*Start) &&
184 "start and end are expected to be in the same basic block");
186 // Local split artifact will have 2 additional copy instructions and they
187 // will be in the same BB.
188 // localLI = COPY other
189 // ...
190 // other = COPY localLI
191 TotalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, LocalMBB);
192 TotalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, LocalMBB);
194 NumInstr += 2;
197 // CopyHint is a sortable hint derived from a COPY instruction.
198 struct CopyHint {
199 const Register Reg;
200 const float Weight;
201 CopyHint(Register R, float W) : Reg(R), Weight(W) {}
202 bool operator<(const CopyHint &Rhs) const {
203 // Always prefer any physreg hint.
204 if (Reg.isPhysical() != Rhs.Reg.isPhysical())
205 return Reg.isPhysical();
206 if (Weight != Rhs.Weight)
207 return (Weight > Rhs.Weight);
208 return Reg.id() < Rhs.Reg.id(); // Tie-breaker.
212 std::set<CopyHint> CopyHints;
213 DenseMap<unsigned, float> Hint;
214 for (MachineRegisterInfo::reg_instr_nodbg_iterator
215 I = MRI.reg_instr_nodbg_begin(LI.reg()),
216 E = MRI.reg_instr_nodbg_end();
217 I != E;) {
218 MachineInstr *MI = &*(I++);
220 // For local split artifacts, we are interested only in instructions between
221 // the expected start and end of the range.
222 SlotIndex SI = LIS.getInstructionIndex(*MI);
223 if (IsLocalSplitArtifact && ((SI < *Start) || (SI > *End)))
224 continue;
226 NumInstr++;
227 if (MI->isIdentityCopy() || MI->isImplicitDef())
228 continue;
229 if (!Visited.insert(MI).second)
230 continue;
232 // For terminators that produce values, ask the backend if the register is
233 // not spillable.
234 if (TII.isUnspillableTerminator(MI) && MI->definesRegister(LI.reg())) {
235 LI.markNotSpillable();
236 return -1.0f;
239 float Weight = 1.0f;
240 if (IsSpillable) {
241 // Get loop info for mi.
242 if (MI->getParent() != MBB) {
243 MBB = MI->getParent();
244 Loop = Loops.getLoopFor(MBB);
245 IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
248 // Calculate instr weight.
249 bool Reads, Writes;
250 std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
251 Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI);
253 // Give extra weight to what looks like a loop induction variable update.
254 if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB))
255 Weight *= 3;
257 TotalWeight += Weight;
260 // Get allocation hints from copies.
261 if (!MI->isCopy())
262 continue;
263 Register HintReg = copyHint(MI, LI.reg(), TRI, MRI);
264 if (!HintReg)
265 continue;
266 // Force hweight onto the stack so that x86 doesn't add hidden precision,
267 // making the comparison incorrectly pass (i.e., 1 > 1 == true??).
269 // FIXME: we probably shouldn't use floats at all.
270 volatile float HWeight = Hint[HintReg] += Weight;
271 if (HintReg.isVirtual() || MRI.isAllocatable(HintReg))
272 CopyHints.insert(CopyHint(HintReg, HWeight));
275 // Pass all the sorted copy hints to mri.
276 if (ShouldUpdateLI && CopyHints.size()) {
277 // Remove a generic hint if previously added by target.
278 if (TargetHint.first == 0 && TargetHint.second)
279 MRI.clearSimpleHint(LI.reg());
281 std::set<Register> HintedRegs;
282 for (const auto &Hint : CopyHints) {
283 if (!HintedRegs.insert(Hint.Reg).second ||
284 (TargetHint.first != 0 && Hint.Reg == TargetHint.second))
285 // Don't add the same reg twice or the target-type hint again.
286 continue;
287 MRI.addRegAllocationHint(LI.reg(), Hint.Reg);
290 // Weakly boost the spill weight of hinted registers.
291 TotalWeight *= 1.01F;
294 // If the live interval was already unspillable, leave it that way.
295 if (!IsSpillable)
296 return -1.0;
298 // Mark li as unspillable if all live ranges are tiny and the interval
299 // is not live at any reg mask. If the interval is live at a reg mask
300 // spilling may be required. If li is live as use in statepoint instruction
301 // spilling may be required due to if we mark interval with use in statepoint
302 // as not spillable we are risky to end up with no register to allocate.
303 // At the same time STATEPOINT instruction is perfectly fine to have this
304 // operand on stack, so spilling such interval and folding its load from stack
305 // into instruction itself makes perfect sense.
306 if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) &&
307 !LI.isLiveAtIndexes(LIS.getRegMaskSlots()) &&
308 !isLiveAtStatepointVarArg(LI)) {
309 LI.markNotSpillable();
310 return -1.0;
313 // If all of the definitions of the interval are re-materializable,
314 // it is a preferred candidate for spilling.
315 // FIXME: this gets much more complicated once we support non-trivial
316 // re-materialization.
317 if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo()))
318 TotalWeight *= 0.5F;
320 if (IsLocalSplitArtifact)
321 return normalize(TotalWeight, Start->distance(*End), NumInstr);
322 return normalize(TotalWeight, LI.getSize(), NumInstr);