[Clang/AMDGPU] Zero sized arrays not allowed in HIP device code. (#113470)
[llvm-project.git] / llvm / lib / CodeGen / CalcSpillWeights.cpp
blob6a9dc60584a3320aec47799fcda26806dc6503e0
1 //===- CalcSpillWeights.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/CodeGen/CalcSpillWeights.h"
10 #include "llvm/ADT/SmallPtrSet.h"
11 #include "llvm/CodeGen/LiveInterval.h"
12 #include "llvm/CodeGen/LiveIntervals.h"
13 #include "llvm/CodeGen/MachineFunction.h"
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/CodeGen/MachineLoopInfo.h"
16 #include "llvm/CodeGen/MachineOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/StackMaps.h"
19 #include "llvm/CodeGen/TargetInstrInfo.h"
20 #include "llvm/CodeGen/TargetRegisterInfo.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/CodeGen/VirtRegMap.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/MathExtras.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include <cassert>
27 #include <tuple>
29 using namespace llvm;
31 #define DEBUG_TYPE "calcspillweights"
33 void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
34 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
35 << "********** Function: " << MF.getName() << '\n');
37 MachineRegisterInfo &MRI = MF.getRegInfo();
38 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
39 Register Reg = Register::index2VirtReg(I);
40 if (MRI.reg_nodbg_empty(Reg))
41 continue;
42 calculateSpillWeightAndHint(LIS.getInterval(Reg));
46 // Return the preferred allocation register for reg, given a COPY instruction.
47 Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, unsigned Reg,
48 const TargetRegisterInfo &TRI,
49 const MachineRegisterInfo &MRI) {
50 unsigned Sub, HSub;
51 Register HReg;
52 if (MI->getOperand(0).getReg() == Reg) {
53 Sub = MI->getOperand(0).getSubReg();
54 HReg = MI->getOperand(1).getReg();
55 HSub = MI->getOperand(1).getSubReg();
56 } else {
57 Sub = MI->getOperand(1).getSubReg();
58 HReg = MI->getOperand(0).getReg();
59 HSub = MI->getOperand(0).getSubReg();
62 if (!HReg)
63 return 0;
65 if (HReg.isVirtual())
66 return Sub == HSub ? HReg : Register();
68 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
69 MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg();
70 if (RC->contains(CopiedPReg))
71 return CopiedPReg;
73 // Check if reg:sub matches so that a super register could be hinted.
74 if (Sub)
75 return TRI.getMatchingSuperReg(CopiedPReg, Sub, RC);
77 return 0;
80 // Check if all values in LI are rematerializable
81 bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
82 const LiveIntervals &LIS,
83 const VirtRegMap &VRM,
84 const TargetInstrInfo &TII) {
85 Register Reg = LI.reg();
86 Register Original = VRM.getOriginal(Reg);
87 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
88 I != E; ++I) {
89 const VNInfo *VNI = *I;
90 if (VNI->isUnused())
91 continue;
92 if (VNI->isPHIDef())
93 return false;
95 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
96 assert(MI && "Dead valno in interval");
98 // Trace copies introduced by live range splitting. The inline
99 // spiller can rematerialize through these copies, so the spill
100 // weight must reflect this.
101 while (TII.isFullCopyInstr(*MI)) {
102 // The copy destination must match the interval register.
103 if (MI->getOperand(0).getReg() != Reg)
104 return false;
106 // Get the source register.
107 Reg = MI->getOperand(1).getReg();
109 // If the original (pre-splitting) registers match this
110 // copy came from a split.
111 if (!Reg.isVirtual() || VRM.getOriginal(Reg) != Original)
112 return false;
114 // Follow the copy live-in value.
115 const LiveInterval &SrcLI = LIS.getInterval(Reg);
116 LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
117 VNI = SrcQ.valueIn();
118 assert(VNI && "Copy from non-existing value");
119 if (VNI->isPHIDef())
120 return false;
121 MI = LIS.getInstructionFromIndex(VNI->def);
122 assert(MI && "Dead valno in interval");
125 if (!TII.isTriviallyReMaterializable(*MI))
126 return false;
128 return true;
131 bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) {
132 return any_of(VRM.getRegInfo().reg_operands(LI.reg()),
133 [](MachineOperand &MO) {
134 MachineInstr *MI = MO.getParent();
135 if (MI->getOpcode() != TargetOpcode::STATEPOINT)
136 return false;
137 return StatepointOpers(MI).getVarIdx() <= MO.getOperandNo();
141 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) {
142 float Weight = weightCalcHelper(LI);
143 // Check if unspillable.
144 if (Weight < 0)
145 return;
146 LI.setWeight(Weight);
149 static bool canMemFoldInlineAsm(LiveInterval &LI,
150 const MachineRegisterInfo &MRI) {
151 for (const MachineOperand &MO : MRI.reg_operands(LI.reg())) {
152 const MachineInstr *MI = MO.getParent();
153 if (MI->isInlineAsm() && MI->mayFoldInlineAsmRegOp(MI->getOperandNo(&MO)))
154 return true;
157 return false;
160 float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
161 SlotIndex *End) {
162 MachineRegisterInfo &MRI = MF.getRegInfo();
163 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
164 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
165 MachineBasicBlock *MBB = nullptr;
166 float TotalWeight = 0;
167 unsigned NumInstr = 0; // Number of instructions using LI
168 SmallPtrSet<MachineInstr *, 8> Visited;
170 std::pair<unsigned, Register> TargetHint = MRI.getRegAllocationHint(LI.reg());
172 if (LI.isSpillable()) {
173 Register Reg = LI.reg();
174 Register Original = VRM.getOriginal(Reg);
175 const LiveInterval &OrigInt = LIS.getInterval(Original);
176 // li comes from a split of OrigInt. If OrigInt was marked
177 // as not spillable, make sure the new interval is marked
178 // as not spillable as well.
179 if (!OrigInt.isSpillable())
180 LI.markNotSpillable();
183 // Don't recompute spill weight for an unspillable register.
184 bool IsSpillable = LI.isSpillable();
186 bool IsLocalSplitArtifact = Start && End;
188 // Do not update future local split artifacts.
189 bool ShouldUpdateLI = !IsLocalSplitArtifact;
191 if (IsLocalSplitArtifact) {
192 MachineBasicBlock *LocalMBB = LIS.getMBBFromIndex(*End);
193 assert(LocalMBB == LIS.getMBBFromIndex(*Start) &&
194 "start and end are expected to be in the same basic block");
196 // Local split artifact will have 2 additional copy instructions and they
197 // will be in the same BB.
198 // localLI = COPY other
199 // ...
200 // other = COPY localLI
201 TotalWeight +=
202 LiveIntervals::getSpillWeight(true, false, &MBFI, LocalMBB, PSI);
203 TotalWeight +=
204 LiveIntervals::getSpillWeight(false, true, &MBFI, LocalMBB, PSI);
206 NumInstr += 2;
209 // CopyHint is a sortable hint derived from a COPY instruction.
210 struct CopyHint {
211 Register Reg;
212 float Weight;
213 CopyHint(Register R, float W) : Reg(R), Weight(W) {}
214 bool operator<(const CopyHint &Rhs) const {
215 // Always prefer any physreg hint.
216 if (Reg.isPhysical() != Rhs.Reg.isPhysical())
217 return Reg.isPhysical();
218 if (Weight != Rhs.Weight)
219 return (Weight > Rhs.Weight);
220 return Reg.id() < Rhs.Reg.id(); // Tie-breaker.
224 bool IsExiting = false;
225 SmallDenseMap<Register, float, 8> Hint;
226 for (MachineRegisterInfo::reg_instr_nodbg_iterator
227 I = MRI.reg_instr_nodbg_begin(LI.reg()),
228 E = MRI.reg_instr_nodbg_end();
229 I != E;) {
230 MachineInstr *MI = &*(I++);
232 // For local split artifacts, we are interested only in instructions between
233 // the expected start and end of the range.
234 SlotIndex SI = LIS.getInstructionIndex(*MI);
235 if (IsLocalSplitArtifact && ((SI < *Start) || (SI > *End)))
236 continue;
238 NumInstr++;
239 bool identityCopy = false;
240 auto DestSrc = TII.isCopyInstr(*MI);
241 if (DestSrc) {
242 const MachineOperand *DestRegOp = DestSrc->Destination;
243 const MachineOperand *SrcRegOp = DestSrc->Source;
244 identityCopy = DestRegOp->getReg() == SrcRegOp->getReg() &&
245 DestRegOp->getSubReg() == SrcRegOp->getSubReg();
248 if (identityCopy || MI->isImplicitDef())
249 continue;
250 if (!Visited.insert(MI).second)
251 continue;
253 // For terminators that produce values, ask the backend if the register is
254 // not spillable.
255 if (TII.isUnspillableTerminator(MI) &&
256 MI->definesRegister(LI.reg(), /*TRI=*/nullptr)) {
257 LI.markNotSpillable();
258 return -1.0f;
261 // Force Weight onto the stack so that x86 doesn't add hidden precision.
262 stack_float_t Weight = 1.0f;
263 if (IsSpillable) {
264 // Get loop info for mi.
265 if (MI->getParent() != MBB) {
266 MBB = MI->getParent();
267 const MachineLoop *Loop = Loops.getLoopFor(MBB);
268 IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
271 // Calculate instr weight.
272 bool Reads, Writes;
273 std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
274 Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI, PSI);
276 // Give extra weight to what looks like a loop induction variable update.
277 if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB))
278 Weight *= 3;
280 TotalWeight += Weight;
283 // Get allocation hints from copies.
284 if (!TII.isCopyInstr(*MI))
285 continue;
286 Register HintReg = copyHint(MI, LI.reg(), TRI, MRI);
287 if (HintReg && (HintReg.isVirtual() || MRI.isAllocatable(HintReg)))
288 Hint[HintReg] += Weight;
291 // Pass all the sorted copy hints to mri.
292 if (ShouldUpdateLI && Hint.size()) {
293 // Remove a generic hint if previously added by target.
294 if (TargetHint.first == 0 && TargetHint.second)
295 MRI.clearSimpleHint(LI.reg());
297 // Don't add the target-type hint again.
298 Register SkipReg = TargetHint.first != 0 ? TargetHint.second : Register();
299 SmallVector<CopyHint, 8> RegHints;
300 for (const auto &[Reg, Weight] : Hint) {
301 if (Reg != SkipReg)
302 RegHints.emplace_back(Reg, Weight);
304 sort(RegHints);
305 for (const auto &[Reg, Weight] : RegHints)
306 MRI.addRegAllocationHint(LI.reg(), Reg);
308 // Weakly boost the spill weight of hinted registers.
309 TotalWeight *= 1.01F;
312 // If the live interval was already unspillable, leave it that way.
313 if (!IsSpillable)
314 return -1.0;
316 // Mark li as unspillable if all live ranges are tiny and the interval
317 // is not live at any reg mask. If the interval is live at a reg mask
318 // spilling may be required. If li is live as use in statepoint instruction
319 // spilling may be required due to if we mark interval with use in statepoint
320 // as not spillable we are risky to end up with no register to allocate.
321 // At the same time STATEPOINT instruction is perfectly fine to have this
322 // operand on stack, so spilling such interval and folding its load from stack
323 // into instruction itself makes perfect sense.
324 if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) &&
325 !LI.isLiveAtIndexes(LIS.getRegMaskSlots()) &&
326 !isLiveAtStatepointVarArg(LI) && !canMemFoldInlineAsm(LI, MRI)) {
327 LI.markNotSpillable();
328 return -1.0;
331 // If all of the definitions of the interval are re-materializable,
332 // it is a preferred candidate for spilling.
333 // FIXME: this gets much more complicated once we support non-trivial
334 // re-materialization.
335 if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo()))
336 TotalWeight *= 0.5F;
338 if (IsLocalSplitArtifact)
339 return normalize(TotalWeight, Start->distance(*End), NumInstr);
340 return normalize(TotalWeight, LI.getSize(), NumInstr);