1 //===- SpillPlacement.cpp - Optimal Spill Code Placement ------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the spill code placement analysis.
11 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
12 // basic blocks are weighted by the block frequency and added to become the node
15 // Transparent basic blocks have the variable live through, but don't care if it
16 // is spilled or in a register. These blocks become connections in the Hopfield
17 // network, again weighted by block frequency.
19 // The Hopfield network minimizes (possibly locally) its energy function:
21 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
23 // The energy function represents the expected spill code execution frequency,
24 // or the cost of spilling. This is a Lyapunov function which never increases
25 // when a node is updated. It is guaranteed to converge to a local minimum.
27 //===----------------------------------------------------------------------===//
29 #include "SpillPlacement.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/MachineBasicBlock.h"
33 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/Pass.h"
45 #define DEBUG_TYPE "spill-code-placement"
47 char SpillPlacement::ID
= 0;
49 char &llvm::SpillPlacementID
= SpillPlacement::ID
;
51 INITIALIZE_PASS_BEGIN(SpillPlacement
, DEBUG_TYPE
,
52 "Spill Code Placement Analysis", true, true)
53 INITIALIZE_PASS_DEPENDENCY(EdgeBundles
)
54 INITIALIZE_PASS_END(SpillPlacement
, DEBUG_TYPE
,
55 "Spill Code Placement Analysis", true, true)
57 void SpillPlacement::getAnalysisUsage(AnalysisUsage
&AU
) const {
59 AU
.addRequired
<MachineBlockFrequencyInfo
>();
60 AU
.addRequiredTransitive
<EdgeBundles
>();
61 MachineFunctionPass::getAnalysisUsage(AU
);
64 /// Node - Each edge bundle corresponds to a Hopfield node.
66 /// The node contains precomputed frequency data that only depends on the CFG,
67 /// but Bias and Links are computed each time placeSpills is called.
69 /// The node Value is positive when the variable should be in a register. The
70 /// value can change when linked nodes change, but convergence is very fast
71 /// because all weights are positive.
72 struct SpillPlacement::Node
{
73 /// BiasN - Sum of blocks that prefer a spill.
76 /// BiasP - Sum of blocks that prefer a register.
79 /// Value - Output value of this node computed from the Bias and links.
80 /// This is always on of the values {-1, 0, 1}. A positive number means the
81 /// variable should go in a register through this bundle.
84 using LinkVector
= SmallVector
<std::pair
<BlockFrequency
, unsigned>, 4>;
86 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
87 /// bundles. The weights are all positive block frequencies.
90 /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
91 BlockFrequency SumLinkWeights
;
93 /// preferReg - Return true when this node prefers to be in a register.
94 bool preferReg() const {
95 // Undecided nodes (Value==0) go on the stack.
99 /// mustSpill - Return True if this node is so biased that it must spill.
100 bool mustSpill() const {
101 // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
102 // BiasN is saturated when MustSpill is set, make sure this still returns
103 // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
104 return BiasN
>= BiasP
+ SumLinkWeights
;
107 /// clear - Reset per-query data, but preserve frequencies that only depend on
109 void clear(BlockFrequency Threshold
) {
110 BiasN
= BlockFrequency(0);
111 BiasP
= BlockFrequency(0);
113 SumLinkWeights
= Threshold
;
117 /// addLink - Add a link to bundle b with weight w.
118 void addLink(unsigned b
, BlockFrequency w
) {
119 // Update cached sum.
122 // There can be multiple links to the same bundle, add them up.
123 for (std::pair
<BlockFrequency
, unsigned> &L
: Links
)
128 // This must be the first link to b.
129 Links
.push_back(std::make_pair(w
, b
));
132 /// addBias - Bias this node.
133 void addBias(BlockFrequency freq
, BorderConstraint direction
) {
144 BiasN
= BlockFrequency::max();
149 /// update - Recompute Value from Bias and Links. Return true when node
150 /// preference changes.
151 bool update(const Node nodes
[], BlockFrequency Threshold
) {
152 // Compute the weighted sum of inputs.
153 BlockFrequency SumN
= BiasN
;
154 BlockFrequency SumP
= BiasP
;
155 for (std::pair
<BlockFrequency
, unsigned> &L
: Links
) {
156 if (nodes
[L
.second
].Value
== -1)
158 else if (nodes
[L
.second
].Value
== 1)
162 // Each weighted sum is going to be less than the total frequency of the
163 // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
164 // will add a dead zone around 0 for two reasons:
166 // 1. It avoids arbitrary bias when all links are 0 as is possible during
167 // initial iterations.
168 // 2. It helps tame rounding errors when the links nominally sum to 0.
170 bool Before
= preferReg();
171 if (SumN
>= SumP
+ Threshold
)
173 else if (SumP
>= SumN
+ Threshold
)
177 return Before
!= preferReg();
180 void getDissentingNeighbors(SparseSet
<unsigned> &List
,
181 const Node nodes
[]) const {
182 for (const auto &Elt
: Links
) {
183 unsigned n
= Elt
.second
;
184 // Neighbors that already have the same value are not going to
185 // change because of this node changing.
186 if (Value
!= nodes
[n
].Value
)
192 bool SpillPlacement::runOnMachineFunction(MachineFunction
&mf
) {
194 bundles
= &getAnalysis
<EdgeBundles
>();
196 assert(!nodes
&& "Leaking node array");
197 nodes
= new Node
[bundles
->getNumBundles()];
199 TodoList
.setUniverse(bundles
->getNumBundles());
201 // Compute total ingoing and outgoing block frequencies for all bundles.
202 BlockFrequencies
.resize(mf
.getNumBlockIDs());
203 MBFI
= &getAnalysis
<MachineBlockFrequencyInfo
>();
204 setThreshold(MBFI
->getEntryFreq());
206 unsigned Num
= I
.getNumber();
207 BlockFrequencies
[Num
] = MBFI
->getBlockFreq(&I
);
210 // We never change the function.
214 void SpillPlacement::releaseMemory() {
220 /// activate - mark node n as active if it wasn't already.
221 void SpillPlacement::activate(unsigned n
) {
223 if (ActiveNodes
->test(n
))
226 nodes
[n
].clear(Threshold
);
228 // Very large bundles usually come from big switches, indirect branches,
229 // landing pads, or loops with many 'continue' statements. It is difficult to
230 // allocate registers when so many different blocks are involved.
232 // Give a small negative bias to large bundles such that a substantial
233 // fraction of the connected blocks need to be interested before we consider
234 // expanding the region through the bundle. This helps compile time by
235 // limiting the number of blocks visited and the number of links in the
237 if (bundles
->getBlocks(n
).size() > 100) {
238 nodes
[n
].BiasP
= BlockFrequency(0);
239 BlockFrequency BiasN
= MBFI
->getEntryFreq();
241 nodes
[n
].BiasN
= BiasN
;
245 /// Set the threshold for a given entry frequency.
247 /// Set the threshold relative to \c Entry. Since the threshold is used as a
248 /// bound on the open interval (-Threshold;Threshold), 1 is the minimum
250 void SpillPlacement::setThreshold(BlockFrequency Entry
) {
251 // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
252 // it. Divide by 2^13, rounding as appropriate.
253 uint64_t Freq
= Entry
.getFrequency();
254 uint64_t Scaled
= (Freq
>> 13) + bool(Freq
& (1 << 12));
255 Threshold
= BlockFrequency(std::max(UINT64_C(1), Scaled
));
258 /// addConstraints - Compute node biases and weights from a set of constraints.
259 /// Set a bit in NodeMask for each active node.
260 void SpillPlacement::addConstraints(ArrayRef
<BlockConstraint
> LiveBlocks
) {
261 for (const BlockConstraint
&LB
: LiveBlocks
) {
262 BlockFrequency Freq
= BlockFrequencies
[LB
.Number
];
265 if (LB
.Entry
!= DontCare
) {
266 unsigned ib
= bundles
->getBundle(LB
.Number
, false);
268 nodes
[ib
].addBias(Freq
, LB
.Entry
);
271 // Live-out from block?
272 if (LB
.Exit
!= DontCare
) {
273 unsigned ob
= bundles
->getBundle(LB
.Number
, true);
275 nodes
[ob
].addBias(Freq
, LB
.Exit
);
280 /// addPrefSpill - Same as addConstraints(PrefSpill)
281 void SpillPlacement::addPrefSpill(ArrayRef
<unsigned> Blocks
, bool Strong
) {
282 for (unsigned B
: Blocks
) {
283 BlockFrequency Freq
= BlockFrequencies
[B
];
286 unsigned ib
= bundles
->getBundle(B
, false);
287 unsigned ob
= bundles
->getBundle(B
, true);
290 nodes
[ib
].addBias(Freq
, PrefSpill
);
291 nodes
[ob
].addBias(Freq
, PrefSpill
);
295 void SpillPlacement::addLinks(ArrayRef
<unsigned> Links
) {
296 for (unsigned Number
: Links
) {
297 unsigned ib
= bundles
->getBundle(Number
, false);
298 unsigned ob
= bundles
->getBundle(Number
, true);
300 // Ignore self-loops.
305 BlockFrequency Freq
= BlockFrequencies
[Number
];
306 nodes
[ib
].addLink(ob
, Freq
);
307 nodes
[ob
].addLink(ib
, Freq
);
311 bool SpillPlacement::scanActiveBundles() {
312 RecentPositive
.clear();
313 for (unsigned n
: ActiveNodes
->set_bits()) {
315 // A node that must spill, or a node without any links is not going to
316 // change its value ever again, so exclude it from iterations.
317 if (nodes
[n
].mustSpill())
319 if (nodes
[n
].preferReg())
320 RecentPositive
.push_back(n
);
322 return !RecentPositive
.empty();
325 bool SpillPlacement::update(unsigned n
) {
326 if (!nodes
[n
].update(nodes
, Threshold
))
328 nodes
[n
].getDissentingNeighbors(TodoList
, nodes
);
332 /// iterate - Repeatedly update the Hopfield nodes until stability or the
333 /// maximum number of iterations is reached.
334 void SpillPlacement::iterate() {
335 // We do not need to push those node in the todolist.
336 // They are already been proceeded as part of the previous iteration.
337 RecentPositive
.clear();
339 // Since the last iteration, the todolist have been augmented by calls
340 // to addConstraints, addLinks, and co.
341 // Update the network energy starting at this new frontier.
342 // The call to ::update will add the nodes that changed into the todolist.
343 unsigned Limit
= bundles
->getNumBundles() * 10;
344 while(Limit
-- > 0 && !TodoList
.empty()) {
345 unsigned n
= TodoList
.pop_back_val();
348 if (nodes
[n
].preferReg())
349 RecentPositive
.push_back(n
);
353 void SpillPlacement::prepare(BitVector
&RegBundles
) {
354 RecentPositive
.clear();
356 // Reuse RegBundles as our ActiveNodes vector.
357 ActiveNodes
= &RegBundles
;
358 ActiveNodes
->clear();
359 ActiveNodes
->resize(bundles
->getNumBundles());
363 SpillPlacement::finish() {
364 assert(ActiveNodes
&& "Call prepare() first");
366 // Write preferences back to ActiveNodes.
368 for (unsigned n
: ActiveNodes
->set_bits())
369 if (!nodes
[n
].preferReg()) {
370 ActiveNodes
->reset(n
);
373 ActiveNodes
= nullptr;
377 void SpillPlacement::BlockConstraint::print(raw_ostream
&OS
) const {
378 auto toString
= [](BorderConstraint C
) -> StringRef
{
380 case DontCare
: return "DontCare";
381 case PrefReg
: return "PrefReg";
382 case PrefSpill
: return "PrefSpill";
383 case PrefBoth
: return "PrefBoth";
384 case MustSpill
: return "MustSpill";
386 llvm_unreachable("uncovered switch");
389 dbgs() << "{" << Number
<< ", "
390 << toString(Entry
) << ", "
391 << toString(Exit
) << ", "
392 << (ChangesValue
? "changes" : "no change") << "}";
395 void SpillPlacement::BlockConstraint::dump() const {