1 //===- CalcSpillWeights.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/CodeGen/CalcSpillWeights.h"
10 #include "llvm/ADT/SmallPtrSet.h"
11 #include "llvm/ADT/SmallSet.h"
12 #include "llvm/CodeGen/LiveInterval.h"
13 #include "llvm/CodeGen/LiveIntervals.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineLoopInfo.h"
17 #include "llvm/CodeGen/MachineOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/StackMaps.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 #include "llvm/CodeGen/TargetRegisterInfo.h"
22 #include "llvm/CodeGen/TargetSubtargetInfo.h"
23 #include "llvm/CodeGen/VirtRegMap.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
32 #define DEBUG_TYPE "calcspillweights"
34 void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
35 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
36 << "********** Function: " << MF
.getName() << '\n');
38 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
39 for (unsigned I
= 0, E
= MRI
.getNumVirtRegs(); I
!= E
; ++I
) {
40 Register Reg
= Register::index2VirtReg(I
);
41 if (MRI
.reg_nodbg_empty(Reg
))
43 calculateSpillWeightAndHint(LIS
.getInterval(Reg
));
47 // Return the preferred allocation register for reg, given a COPY instruction.
48 Register
VirtRegAuxInfo::copyHint(const MachineInstr
*MI
, unsigned Reg
,
49 const TargetRegisterInfo
&TRI
,
50 const MachineRegisterInfo
&MRI
) {
53 if (MI
->getOperand(0).getReg() == Reg
) {
54 Sub
= MI
->getOperand(0).getSubReg();
55 HReg
= MI
->getOperand(1).getReg();
56 HSub
= MI
->getOperand(1).getSubReg();
58 Sub
= MI
->getOperand(1).getSubReg();
59 HReg
= MI
->getOperand(0).getReg();
60 HSub
= MI
->getOperand(0).getSubReg();
67 return Sub
== HSub
? HReg
: Register();
69 const TargetRegisterClass
*RC
= MRI
.getRegClass(Reg
);
70 MCRegister CopiedPReg
= HSub
? TRI
.getSubReg(HReg
, HSub
) : HReg
.asMCReg();
71 if (RC
->contains(CopiedPReg
))
74 // Check if reg:sub matches so that a super register could be hinted.
76 return TRI
.getMatchingSuperReg(CopiedPReg
, Sub
, RC
);
81 // Check if all values in LI are rematerializable
82 bool VirtRegAuxInfo::isRematerializable(const LiveInterval
&LI
,
83 const LiveIntervals
&LIS
,
84 const VirtRegMap
&VRM
,
85 const TargetInstrInfo
&TII
) {
86 Register Reg
= LI
.reg();
87 Register Original
= VRM
.getOriginal(Reg
);
88 for (LiveInterval::const_vni_iterator I
= LI
.vni_begin(), E
= LI
.vni_end();
90 const VNInfo
*VNI
= *I
;
96 MachineInstr
*MI
= LIS
.getInstructionFromIndex(VNI
->def
);
97 assert(MI
&& "Dead valno in interval");
99 // Trace copies introduced by live range splitting. The inline
100 // spiller can rematerialize through these copies, so the spill
101 // weight must reflect this.
102 while (TII
.isFullCopyInstr(*MI
)) {
103 // The copy destination must match the interval register.
104 if (MI
->getOperand(0).getReg() != Reg
)
107 // Get the source register.
108 Reg
= MI
->getOperand(1).getReg();
110 // If the original (pre-splitting) registers match this
111 // copy came from a split.
112 if (!Reg
.isVirtual() || VRM
.getOriginal(Reg
) != Original
)
115 // Follow the copy live-in value.
116 const LiveInterval
&SrcLI
= LIS
.getInterval(Reg
);
117 LiveQueryResult SrcQ
= SrcLI
.Query(VNI
->def
);
118 VNI
= SrcQ
.valueIn();
119 assert(VNI
&& "Copy from non-existing value");
122 MI
= LIS
.getInstructionFromIndex(VNI
->def
);
123 assert(MI
&& "Dead valno in interval");
126 if (!TII
.isTriviallyReMaterializable(*MI
))
132 bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval
&LI
) {
133 return any_of(VRM
.getRegInfo().reg_operands(LI
.reg()),
134 [](MachineOperand
&MO
) {
135 MachineInstr
*MI
= MO
.getParent();
136 if (MI
->getOpcode() != TargetOpcode::STATEPOINT
)
138 return StatepointOpers(MI
).getVarIdx() <= MO
.getOperandNo();
142 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval
&LI
) {
143 float Weight
= weightCalcHelper(LI
);
144 // Check if unspillable.
147 LI
.setWeight(Weight
);
150 static bool canMemFoldInlineAsm(LiveInterval
&LI
,
151 const MachineRegisterInfo
&MRI
) {
152 for (const MachineOperand
&MO
: MRI
.reg_operands(LI
.reg())) {
153 const MachineInstr
*MI
= MO
.getParent();
154 if (MI
->isInlineAsm() && MI
->mayFoldInlineAsmRegOp(MI
->getOperandNo(&MO
)))
161 float VirtRegAuxInfo::weightCalcHelper(LiveInterval
&LI
, SlotIndex
*Start
,
163 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
164 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
165 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
166 MachineBasicBlock
*MBB
= nullptr;
167 float TotalWeight
= 0;
168 unsigned NumInstr
= 0; // Number of instructions using LI
169 SmallPtrSet
<MachineInstr
*, 8> Visited
;
171 std::pair
<unsigned, Register
> TargetHint
= MRI
.getRegAllocationHint(LI
.reg());
173 if (LI
.isSpillable()) {
174 Register Reg
= LI
.reg();
175 Register Original
= VRM
.getOriginal(Reg
);
176 const LiveInterval
&OrigInt
= LIS
.getInterval(Original
);
177 // li comes from a split of OrigInt. If OrigInt was marked
178 // as not spillable, make sure the new interval is marked
179 // as not spillable as well.
180 if (!OrigInt
.isSpillable())
181 LI
.markNotSpillable();
184 // Don't recompute spill weight for an unspillable register.
185 bool IsSpillable
= LI
.isSpillable();
187 bool IsLocalSplitArtifact
= Start
&& End
;
189 // Do not update future local split artifacts.
190 bool ShouldUpdateLI
= !IsLocalSplitArtifact
;
192 if (IsLocalSplitArtifact
) {
193 MachineBasicBlock
*LocalMBB
= LIS
.getMBBFromIndex(*End
);
194 assert(LocalMBB
== LIS
.getMBBFromIndex(*Start
) &&
195 "start and end are expected to be in the same basic block");
197 // Local split artifact will have 2 additional copy instructions and they
198 // will be in the same BB.
199 // localLI = COPY other
201 // other = COPY localLI
202 TotalWeight
+= LiveIntervals::getSpillWeight(true, false, &MBFI
, LocalMBB
);
203 TotalWeight
+= LiveIntervals::getSpillWeight(false, true, &MBFI
, LocalMBB
);
208 // CopyHint is a sortable hint derived from a COPY instruction.
212 CopyHint(Register R
, float W
) : Reg(R
), Weight(W
) {}
213 bool operator<(const CopyHint
&Rhs
) const {
214 // Always prefer any physreg hint.
215 if (Reg
.isPhysical() != Rhs
.Reg
.isPhysical())
216 return Reg
.isPhysical();
217 if (Weight
!= Rhs
.Weight
)
218 return (Weight
> Rhs
.Weight
);
219 return Reg
.id() < Rhs
.Reg
.id(); // Tie-breaker.
223 bool IsExiting
= false;
224 std::set
<CopyHint
> CopyHints
;
225 DenseMap
<unsigned, float> Hint
;
226 for (MachineRegisterInfo::reg_instr_nodbg_iterator
227 I
= MRI
.reg_instr_nodbg_begin(LI
.reg()),
228 E
= MRI
.reg_instr_nodbg_end();
230 MachineInstr
*MI
= &*(I
++);
232 // For local split artifacts, we are interested only in instructions between
233 // the expected start and end of the range.
234 SlotIndex SI
= LIS
.getInstructionIndex(*MI
);
235 if (IsLocalSplitArtifact
&& ((SI
< *Start
) || (SI
> *End
)))
239 bool identityCopy
= false;
240 auto DestSrc
= TII
.isCopyInstr(*MI
);
242 const MachineOperand
*DestRegOp
= DestSrc
->Destination
;
243 const MachineOperand
*SrcRegOp
= DestSrc
->Source
;
244 identityCopy
= DestRegOp
->getReg() == SrcRegOp
->getReg() &&
245 DestRegOp
->getSubReg() == SrcRegOp
->getSubReg();
248 if (identityCopy
|| MI
->isImplicitDef())
250 if (!Visited
.insert(MI
).second
)
253 // For terminators that produce values, ask the backend if the register is
255 if (TII
.isUnspillableTerminator(MI
) &&
256 MI
->definesRegister(LI
.reg(), /*TRI=*/nullptr)) {
257 LI
.markNotSpillable();
261 // Force Weight onto the stack so that x86 doesn't add hidden precision,
262 // similar to HWeight below.
263 stack_float_t Weight
= 1.0f
;
265 // Get loop info for mi.
266 if (MI
->getParent() != MBB
) {
267 MBB
= MI
->getParent();
268 const MachineLoop
*Loop
= Loops
.getLoopFor(MBB
);
269 IsExiting
= Loop
? Loop
->isLoopExiting(MBB
) : false;
272 // Calculate instr weight.
274 std::tie(Reads
, Writes
) = MI
->readsWritesVirtualRegister(LI
.reg());
275 Weight
= LiveIntervals::getSpillWeight(Writes
, Reads
, &MBFI
, *MI
);
277 // Give extra weight to what looks like a loop induction variable update.
278 if (Writes
&& IsExiting
&& LIS
.isLiveOutOfMBB(LI
, MBB
))
281 TotalWeight
+= Weight
;
284 // Get allocation hints from copies.
285 if (!TII
.isCopyInstr(*MI
))
287 Register HintReg
= copyHint(MI
, LI
.reg(), TRI
, MRI
);
290 // Force HWeight onto the stack so that x86 doesn't add hidden precision,
291 // making the comparison incorrectly pass (i.e., 1 > 1 == true??).
292 stack_float_t HWeight
= Hint
[HintReg
] += Weight
;
293 if (HintReg
.isVirtual() || MRI
.isAllocatable(HintReg
))
294 CopyHints
.insert(CopyHint(HintReg
, HWeight
));
297 // Pass all the sorted copy hints to mri.
298 if (ShouldUpdateLI
&& CopyHints
.size()) {
299 // Remove a generic hint if previously added by target.
300 if (TargetHint
.first
== 0 && TargetHint
.second
)
301 MRI
.clearSimpleHint(LI
.reg());
303 SmallSet
<Register
, 4> HintedRegs
;
304 for (const auto &Hint
: CopyHints
) {
305 if (!HintedRegs
.insert(Hint
.Reg
).second
||
306 (TargetHint
.first
!= 0 && Hint
.Reg
== TargetHint
.second
))
307 // Don't add the same reg twice or the target-type hint again.
309 MRI
.addRegAllocationHint(LI
.reg(), Hint
.Reg
);
312 // Weakly boost the spill weight of hinted registers.
313 TotalWeight
*= 1.01F
;
316 // If the live interval was already unspillable, leave it that way.
320 // Mark li as unspillable if all live ranges are tiny and the interval
321 // is not live at any reg mask. If the interval is live at a reg mask
322 // spilling may be required. If li is live as use in statepoint instruction
323 // spilling may be required due to if we mark interval with use in statepoint
324 // as not spillable we are risky to end up with no register to allocate.
325 // At the same time STATEPOINT instruction is perfectly fine to have this
326 // operand on stack, so spilling such interval and folding its load from stack
327 // into instruction itself makes perfect sense.
328 if (ShouldUpdateLI
&& LI
.isZeroLength(LIS
.getSlotIndexes()) &&
329 !LI
.isLiveAtIndexes(LIS
.getRegMaskSlots()) &&
330 !isLiveAtStatepointVarArg(LI
) && !canMemFoldInlineAsm(LI
, MRI
)) {
331 LI
.markNotSpillable();
335 // If all of the definitions of the interval are re-materializable,
336 // it is a preferred candidate for spilling.
337 // FIXME: this gets much more complicated once we support non-trivial
338 // re-materialization.
339 if (isRematerializable(LI
, LIS
, VRM
, *MF
.getSubtarget().getInstrInfo()))
342 if (IsLocalSplitArtifact
)
343 return normalize(TotalWeight
, Start
->distance(*End
), NumInstr
);
344 return normalize(TotalWeight
, LI
.getSize(), NumInstr
);