Fixed some bugs.
[llvm/zpu.git] / lib / CodeGen / CalcSpillWeights.cpp
blob5ace76c393ef8d91116f368588557fc9467608cd
1 //===------------------------ CalcSpillWeights.cpp ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "calcspillweights"
12 #include "llvm/Function.h"
13 #include "llvm/ADT/SmallSet.h"
14 #include "llvm/CodeGen/CalcSpillWeights.h"
15 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineLoopInfo.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/SlotIndexes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 using namespace llvm;
27 char CalculateSpillWeights::ID = 0;
28 INITIALIZE_PASS_BEGIN(CalculateSpillWeights, "calcspillweights",
29 "Calculate spill weights", false, false)
30 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
31 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
32 INITIALIZE_PASS_END(CalculateSpillWeights, "calcspillweights",
33 "Calculate spill weights", false, false)
35 void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
36 au.addRequired<LiveIntervals>();
37 au.addRequired<MachineLoopInfo>();
38 au.setPreservesAll();
39 MachineFunctionPass::getAnalysisUsage(au);
42 bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) {
44 DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
45 << "********** Function: "
46 << fn.getFunction()->getName() << '\n');
48 LiveIntervals &lis = getAnalysis<LiveIntervals>();
49 VirtRegAuxInfo vrai(fn, lis, getAnalysis<MachineLoopInfo>());
50 for (LiveIntervals::iterator I = lis.begin(), E = lis.end(); I != E; ++I) {
51 LiveInterval &li = *I->second;
52 if (TargetRegisterInfo::isVirtualRegister(li.reg))
53 vrai.CalculateWeightAndHint(li);
55 return false;
58 // Return the preferred allocation register for reg, given a COPY instruction.
59 static unsigned copyHint(const MachineInstr *mi, unsigned reg,
60 const TargetRegisterInfo &tri,
61 const MachineRegisterInfo &mri) {
62 unsigned sub, hreg, hsub;
63 if (mi->getOperand(0).getReg() == reg) {
64 sub = mi->getOperand(0).getSubReg();
65 hreg = mi->getOperand(1).getReg();
66 hsub = mi->getOperand(1).getSubReg();
67 } else {
68 sub = mi->getOperand(1).getSubReg();
69 hreg = mi->getOperand(0).getReg();
70 hsub = mi->getOperand(0).getSubReg();
73 if (!hreg)
74 return 0;
76 if (TargetRegisterInfo::isVirtualRegister(hreg))
77 return sub == hsub ? hreg : 0;
79 const TargetRegisterClass *rc = mri.getRegClass(reg);
81 // Only allow physreg hints in rc.
82 if (sub == 0)
83 return rc->contains(hreg) ? hreg : 0;
85 // reg:sub should match the physreg hreg.
86 return tri.getMatchingSuperReg(hreg, sub, rc);
89 void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
90 MachineRegisterInfo &mri = mf_.getRegInfo();
91 const TargetRegisterInfo &tri = *mf_.getTarget().getRegisterInfo();
92 MachineBasicBlock *mbb = 0;
93 MachineLoop *loop = 0;
94 unsigned loopDepth = 0;
95 bool isExiting = false;
96 float totalWeight = 0;
97 SmallPtrSet<MachineInstr*, 8> visited;
99 // Find the best physreg hist and the best virtreg hint.
100 float bestPhys = 0, bestVirt = 0;
101 unsigned hintPhys = 0, hintVirt = 0;
103 // Don't recompute a target specific hint.
104 bool noHint = mri.getRegAllocationHint(li.reg).first != 0;
106 for (MachineRegisterInfo::reg_iterator I = mri.reg_begin(li.reg);
107 MachineInstr *mi = I.skipInstruction();) {
108 if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
109 continue;
110 if (!visited.insert(mi))
111 continue;
113 // Get loop info for mi.
114 if (mi->getParent() != mbb) {
115 mbb = mi->getParent();
116 loop = loops_.getLoopFor(mbb);
117 loopDepth = loop ? loop->getLoopDepth() : 0;
118 isExiting = loop ? loop->isLoopExiting(mbb) : false;
121 // Calculate instr weight.
122 bool reads, writes;
123 tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
124 float weight = LiveIntervals::getSpillWeight(writes, reads, loopDepth);
126 // Give extra weight to what looks like a loop induction variable update.
127 if (writes && isExiting && lis_.isLiveOutOfMBB(li, mbb))
128 weight *= 3;
130 totalWeight += weight;
132 // Get allocation hints from copies.
133 if (noHint || !mi->isCopy())
134 continue;
135 unsigned hint = copyHint(mi, li.reg, tri, mri);
136 if (!hint)
137 continue;
138 float hweight = hint_[hint] += weight;
139 if (TargetRegisterInfo::isPhysicalRegister(hint)) {
140 if (hweight > bestPhys && lis_.isAllocatable(hint))
141 bestPhys = hweight, hintPhys = hint;
142 } else {
143 if (hweight > bestVirt)
144 bestVirt = hweight, hintVirt = hint;
148 hint_.clear();
150 // Always prefer the physreg hint.
151 if (unsigned hint = hintPhys ? hintPhys : hintVirt) {
152 mri.setRegAllocationHint(li.reg, 0, hint);
153 // Weakly boost the spill weifght of hinted registers.
154 totalWeight *= 1.01F;
157 // Mark li as unspillable if all live ranges are tiny.
158 if (li.isZeroLength()) {
159 li.markNotSpillable();
160 return;
163 // If all of the definitions of the interval are re-materializable,
164 // it is a preferred candidate for spilling. If none of the defs are
165 // loads, then it's potentially very cheap to re-materialize.
166 // FIXME: this gets much more complicated once we support non-trivial
167 // re-materialization.
168 bool isLoad = false;
169 SmallVector<LiveInterval*, 4> spillIs;
170 if (lis_.isReMaterializable(li, spillIs, isLoad)) {
171 if (isLoad)
172 totalWeight *= 0.9F;
173 else
174 totalWeight *= 0.5F;
177 li.weight = totalWeight;
178 lis_.normalizeSpillWeight(li);
181 void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
182 MachineRegisterInfo &mri = mf_.getRegInfo();
183 const TargetRegisterInfo *tri = mf_.getTarget().getRegisterInfo();
184 const TargetRegisterClass *orc = mri.getRegClass(reg);
185 SmallPtrSet<const TargetRegisterClass*,8> rcs;
187 for (MachineRegisterInfo::reg_nodbg_iterator I = mri.reg_nodbg_begin(reg),
188 E = mri.reg_nodbg_end(); I != E; ++I) {
189 // The targets don't have accurate enough regclass descriptions that we can
190 // handle subregs. We need something similar to
191 // TRI::getMatchingSuperRegClass, but returning a super class instead of a
192 // sub class.
193 if (I.getOperand().getSubReg()) {
194 DEBUG(dbgs() << "Cannot handle subregs: " << I.getOperand() << '\n');
195 return;
197 if (const TargetRegisterClass *rc =
198 I->getDesc().getRegClass(I.getOperandNo(), tri))
199 rcs.insert(rc);
202 // If we found no regclass constraints, just leave reg as is.
203 // In theory, we could inflate to the largest superclass of reg's existing
204 // class, but that might not be legal for the current cpu setting.
205 // This could happen if reg is only used by COPY instructions, so we may need
206 // to improve on this.
207 if (rcs.empty()) {
208 return;
211 // Compute the intersection of all classes in rcs.
212 // This ought to be independent of iteration order, but if the target register
213 // classes don't form a proper algebra, it is possible to get different
214 // results. The solution is to make sure the intersection of any two register
215 // classes is also a register class or the null set.
216 const TargetRegisterClass *rc = 0;
217 for (SmallPtrSet<const TargetRegisterClass*,8>::iterator I = rcs.begin(),
218 E = rcs.end(); I != E; ++I) {
219 rc = rc ? getCommonSubClass(rc, *I) : *I;
220 assert(rc && "Incompatible regclass constraints found");
223 if (rc == orc)
224 return;
225 DEBUG(dbgs() << "Inflating " << orc->getName() << ":%reg" << reg << " to "
226 << rc->getName() <<".\n");
227 mri.setRegClass(reg, rc);