1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the spill code placement analysis.
12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
13 // basic blocks are weighted by the block frequency and added to become the node
16 // Transparent basic blocks have the variable live through, but don't care if it
17 // is spilled or in a register. These blocks become connections in the Hopfield
18 // network, again weighted by block frequency.
20 // The Hopfield network minimizes (possibly locally) its energy function:
22 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
24 // The energy function represents the expected spill code execution frequency,
25 // or the cost of spilling. This is a Lyapunov function which never increases
26 // when a node is updated. It is guaranteed to converge to a local minimum.
28 //===----------------------------------------------------------------------===//
30 #define DEBUG_TYPE "spillplacement"
31 #include "SpillPlacement.h"
32 #include "llvm/CodeGen/EdgeBundles.h"
33 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/Format.h"
43 char SpillPlacement::ID
= 0;
44 INITIALIZE_PASS_BEGIN(SpillPlacement
, "spill-code-placement",
45 "Spill Code Placement Analysis", true, true)
46 INITIALIZE_PASS_DEPENDENCY(EdgeBundles
)
47 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
48 INITIALIZE_PASS_END(SpillPlacement
, "spill-code-placement",
49 "Spill Code Placement Analysis", true, true)
51 char &llvm::SpillPlacementID
= SpillPlacement::ID
;
53 void SpillPlacement::getAnalysisUsage(AnalysisUsage
&AU
) const {
55 AU
.addRequiredTransitive
<EdgeBundles
>();
56 AU
.addRequiredTransitive
<MachineLoopInfo
>();
57 MachineFunctionPass::getAnalysisUsage(AU
);
60 /// Node - Each edge bundle corresponds to a Hopfield node.
62 /// The node contains precomputed frequency data that only depends on the CFG,
63 /// but Bias and Links are computed each time placeSpills is called.
65 /// The node Value is positive when the variable should be in a register. The
66 /// value can change when linked nodes change, but convergence is very fast
67 /// because all weights are positive.
69 struct SpillPlacement::Node
{
70 /// Scale - Inverse block frequency feeding into[0] or out of[1] the bundle.
71 /// Ideally, these two numbers should be identical, but inaccuracies in the
72 /// block frequency estimates means that we need to normalize ingoing and
73 /// outgoing frequencies separately so they are commensurate.
76 /// Bias - Normalized contributions from non-transparent blocks.
77 /// A bundle connected to a MustSpill block has a huge negative bias,
78 /// otherwise it is a number in the range [-2;2].
81 /// Value - Output value of this node computed from the Bias and links.
82 /// This is always in the range [-1;1]. A positive number means the variable
83 /// should go in a register through this bundle.
86 typedef SmallVector
<std::pair
<float, unsigned>, 4> LinkVector
;
88 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
89 /// bundles. The weights are all positive and add up to at most 2, weights
90 /// from ingoing and outgoing nodes separately add up to a most 1. The weight
91 /// sum can be less than 2 when the variable is not live into / out of some
92 /// connected basic blocks.
95 /// preferReg - Return true when this node prefers to be in a register.
96 bool preferReg() const {
97 // Undecided nodes (Value==0) go on the stack.
101 /// mustSpill - Return True if this node is so biased that it must spill.
102 bool mustSpill() const {
103 // Actually, we must spill if Bias < sum(weights).
104 // It may be worth it to compute the weight sum here?
108 /// Node - Create a blank Node.
110 Scale
[0] = Scale
[1] = 0;
113 /// clear - Reset per-query data, but preserve frequencies that only depend on
120 /// addLink - Add a link to bundle b with weight w.
121 /// out=0 for an ingoing link, and 1 for an outgoing link.
122 void addLink(unsigned b
, float w
, bool out
) {
123 // Normalize w relative to all connected blocks from that direction.
126 // There can be multiple links to the same bundle, add them up.
127 for (LinkVector::iterator I
= Links
.begin(), E
= Links
.end(); I
!= E
; ++I
)
128 if (I
->second
== b
) {
132 // This must be the first link to b.
133 Links
.push_back(std::make_pair(w
, b
));
136 /// addBias - Bias this node from an ingoing[0] or outgoing[1] link.
137 /// Return the change to the total number of positive biases.
138 void addBias(float w
, bool out
) {
139 // Normalize w relative to all connected blocks from that direction.
144 /// update - Recompute Value from Bias and Links. Return true when node
145 /// preference changes.
146 bool update(const Node nodes
[]) {
147 // Compute the weighted sum of inputs.
149 for (LinkVector::iterator I
= Links
.begin(), E
= Links
.end(); I
!= E
; ++I
)
150 Sum
+= I
->first
* nodes
[I
->second
].Value
;
152 // The weighted sum is going to be in the range [-2;2]. Ideally, we should
153 // simply set Value = sign(Sum), but we will add a dead zone around 0 for
155 // 1. It avoids arbitrary bias when all links are 0 as is possible during
156 // initial iterations.
157 // 2. It helps tame rounding errors when the links nominally sum to 0.
158 const float Thres
= 1e-4f
;
159 bool Before
= preferReg();
162 else if (Sum
> Thres
)
166 return Before
!= preferReg();
170 bool SpillPlacement::runOnMachineFunction(MachineFunction
&mf
) {
172 bundles
= &getAnalysis
<EdgeBundles
>();
173 loops
= &getAnalysis
<MachineLoopInfo
>();
175 assert(!nodes
&& "Leaking node array");
176 nodes
= new Node
[bundles
->getNumBundles()];
178 // Compute total ingoing and outgoing block frequencies for all bundles.
179 BlockFrequency
.resize(mf
.getNumBlockIDs());
180 for (MachineFunction::iterator I
= mf
.begin(), E
= mf
.end(); I
!= E
; ++I
) {
181 float Freq
= LiveIntervals::getSpillWeight(true, false,
182 loops
->getLoopDepth(I
));
183 unsigned Num
= I
->getNumber();
184 BlockFrequency
[Num
] = Freq
;
185 nodes
[bundles
->getBundle(Num
, 1)].Scale
[0] += Freq
;
186 nodes
[bundles
->getBundle(Num
, 0)].Scale
[1] += Freq
;
189 // Scales are reciprocal frequencies.
190 for (unsigned i
= 0, e
= bundles
->getNumBundles(); i
!= e
; ++i
)
191 for (unsigned d
= 0; d
!= 2; ++d
)
192 if (nodes
[i
].Scale
[d
] > 0)
193 nodes
[i
].Scale
[d
] = 1 / nodes
[i
].Scale
[d
];
195 // We never change the function.
199 void SpillPlacement::releaseMemory() {
204 /// activate - mark node n as active if it wasn't already.
205 void SpillPlacement::activate(unsigned n
) {
206 if (ActiveNodes
->test(n
))
213 /// addConstraints - Compute node biases and weights from a set of constraints.
214 /// Set a bit in NodeMask for each active node.
215 void SpillPlacement::addConstraints(ArrayRef
<BlockConstraint
> LiveBlocks
) {
216 for (ArrayRef
<BlockConstraint
>::iterator I
= LiveBlocks
.begin(),
217 E
= LiveBlocks
.end(); I
!= E
; ++I
) {
218 float Freq
= getBlockFrequency(I
->Number
);
219 const float Bias
[] = {
223 -HUGE_VALF
// MustSpill
227 if (I
->Entry
!= DontCare
) {
228 unsigned ib
= bundles
->getBundle(I
->Number
, 0);
230 nodes
[ib
].addBias(Freq
* Bias
[I
->Entry
], 1);
233 // Live-out from block?
234 if (I
->Exit
!= DontCare
) {
235 unsigned ob
= bundles
->getBundle(I
->Number
, 1);
237 nodes
[ob
].addBias(Freq
* Bias
[I
->Exit
], 0);
242 void SpillPlacement::addLinks(ArrayRef
<unsigned> Links
) {
243 for (ArrayRef
<unsigned>::iterator I
= Links
.begin(), E
= Links
.end(); I
!= E
;
245 unsigned Number
= *I
;
246 unsigned ib
= bundles
->getBundle(Number
, 0);
247 unsigned ob
= bundles
->getBundle(Number
, 1);
249 // Ignore self-loops.
254 if (nodes
[ib
].Links
.empty() && !nodes
[ib
].mustSpill())
255 Linked
.push_back(ib
);
256 if (nodes
[ob
].Links
.empty() && !nodes
[ob
].mustSpill())
257 Linked
.push_back(ob
);
258 float Freq
= getBlockFrequency(Number
);
259 nodes
[ib
].addLink(ob
, Freq
, 1);
260 nodes
[ob
].addLink(ib
, Freq
, 0);
264 bool SpillPlacement::scanActiveBundles() {
266 RecentPositive
.clear();
267 for (int n
= ActiveNodes
->find_first(); n
>=0; n
= ActiveNodes
->find_next(n
)) {
268 nodes
[n
].update(nodes
);
269 // A node that must spill, or a node without any links is not going to
270 // change its value ever again, so exclude it from iterations.
271 if (nodes
[n
].mustSpill())
273 if (!nodes
[n
].Links
.empty())
275 if (nodes
[n
].preferReg())
276 RecentPositive
.push_back(n
);
278 return !RecentPositive
.empty();
281 /// iterate - Repeatedly update the Hopfield nodes until stability or the
282 /// maximum number of iterations is reached.
283 /// @param Linked - Numbers of linked nodes that need updating.
284 void SpillPlacement::iterate() {
285 // First update the recently positive nodes. They have likely received new
286 // negative bias that will turn them off.
287 while (!RecentPositive
.empty())
288 nodes
[RecentPositive
.pop_back_val()].update(nodes
);
293 // Run up to 10 iterations. The edge bundle numbering is closely related to
294 // basic block numbering, so there is a strong tendency towards chains of
295 // linked nodes with sequential numbers. By scanning the linked nodes
296 // backwards and forwards, we make it very likely that a single node can
297 // affect the entire network in a single iteration. That means very fast
298 // convergence, usually in a single iteration.
299 for (unsigned iteration
= 0; iteration
!= 10; ++iteration
) {
300 // Scan backwards, skipping the last node which was just updated.
301 bool Changed
= false;
302 for (SmallVectorImpl
<unsigned>::const_reverse_iterator I
=
303 llvm::next(Linked
.rbegin()), E
= Linked
.rend(); I
!= E
; ++I
) {
305 if (nodes
[n
].update(nodes
)) {
307 if (nodes
[n
].preferReg())
308 RecentPositive
.push_back(n
);
311 if (!Changed
|| !RecentPositive
.empty())
314 // Scan forwards, skipping the first node which was just updated.
316 for (SmallVectorImpl
<unsigned>::const_iterator I
=
317 llvm::next(Linked
.begin()), E
= Linked
.end(); I
!= E
; ++I
) {
319 if (nodes
[n
].update(nodes
)) {
321 if (nodes
[n
].preferReg())
322 RecentPositive
.push_back(n
);
325 if (!Changed
|| !RecentPositive
.empty())
330 void SpillPlacement::prepare(BitVector
&RegBundles
) {
332 RecentPositive
.clear();
333 // Reuse RegBundles as our ActiveNodes vector.
334 ActiveNodes
= &RegBundles
;
335 ActiveNodes
->clear();
336 ActiveNodes
->resize(bundles
->getNumBundles());
340 SpillPlacement::finish() {
341 assert(ActiveNodes
&& "Call prepare() first");
343 // Write preferences back to ActiveNodes.
345 for (int n
= ActiveNodes
->find_first(); n
>=0; n
= ActiveNodes
->find_next(n
))
346 if (!nodes
[n
].preferReg()) {
347 ActiveNodes
->reset(n
);