[llvm-objcopy] - Reimplement strip-dwo-groups.test to stop using the precompiled...
[llvm-complete.git] / lib / CodeGen / SpillPlacement.cpp
blob11452fdb747ac92bcbf1d687924d3b52cf89f011
1 //===- SpillPlacement.cpp - Optimal Spill Code Placement ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the spill code placement analysis.
11 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
12 // basic blocks are weighted by the block frequency and added to become the node
13 // bias.
15 // Transparent basic blocks have the variable live through, but don't care if it
16 // is spilled or in a register. These blocks become connections in the Hopfield
17 // network, again weighted by block frequency.
19 // The Hopfield network minimizes (possibly locally) its energy function:
21 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
23 // The energy function represents the expected spill code execution frequency,
24 // or the cost of spilling. This is a Lyapunov function which never increases
25 // when a node is updated. It is guaranteed to converge to a local minimum.
27 //===----------------------------------------------------------------------===//
29 #include "SpillPlacement.h"
30 #include "llvm/ADT/ArrayRef.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/SparseSet.h"
34 #include "llvm/CodeGen/EdgeBundles.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/BlockFrequency.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <utility>
47 using namespace llvm;
49 #define DEBUG_TYPE "spill-code-placement"
51 char SpillPlacement::ID = 0;
53 char &llvm::SpillPlacementID = SpillPlacement::ID;
55 INITIALIZE_PASS_BEGIN(SpillPlacement, DEBUG_TYPE,
56 "Spill Code Placement Analysis", true, true)
57 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
58 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
59 INITIALIZE_PASS_END(SpillPlacement, DEBUG_TYPE,
60 "Spill Code Placement Analysis", true, true)
62 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
63 AU.setPreservesAll();
64 AU.addRequired<MachineBlockFrequencyInfo>();
65 AU.addRequiredTransitive<EdgeBundles>();
66 AU.addRequiredTransitive<MachineLoopInfo>();
67 MachineFunctionPass::getAnalysisUsage(AU);
70 /// Node - Each edge bundle corresponds to a Hopfield node.
71 ///
72 /// The node contains precomputed frequency data that only depends on the CFG,
73 /// but Bias and Links are computed each time placeSpills is called.
74 ///
75 /// The node Value is positive when the variable should be in a register. The
76 /// value can change when linked nodes change, but convergence is very fast
77 /// because all weights are positive.
78 struct SpillPlacement::Node {
79 /// BiasN - Sum of blocks that prefer a spill.
80 BlockFrequency BiasN;
82 /// BiasP - Sum of blocks that prefer a register.
83 BlockFrequency BiasP;
85 /// Value - Output value of this node computed from the Bias and links.
86 /// This is always on of the values {-1, 0, 1}. A positive number means the
87 /// variable should go in a register through this bundle.
88 int Value;
90 using LinkVector = SmallVector<std::pair<BlockFrequency, unsigned>, 4>;
92 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
93 /// bundles. The weights are all positive block frequencies.
94 LinkVector Links;
96 /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
97 BlockFrequency SumLinkWeights;
99 /// preferReg - Return true when this node prefers to be in a register.
100 bool preferReg() const {
101 // Undecided nodes (Value==0) go on the stack.
102 return Value > 0;
105 /// mustSpill - Return True if this node is so biased that it must spill.
106 bool mustSpill() const {
107 // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
108 // BiasN is saturated when MustSpill is set, make sure this still returns
109 // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
110 return BiasN >= BiasP + SumLinkWeights;
113 /// clear - Reset per-query data, but preserve frequencies that only depend on
114 /// the CFG.
115 void clear(const BlockFrequency &Threshold) {
116 BiasN = BiasP = Value = 0;
117 SumLinkWeights = Threshold;
118 Links.clear();
121 /// addLink - Add a link to bundle b with weight w.
122 void addLink(unsigned b, BlockFrequency w) {
123 // Update cached sum.
124 SumLinkWeights += w;
126 // There can be multiple links to the same bundle, add them up.
127 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
128 if (I->second == b) {
129 I->first += w;
130 return;
132 // This must be the first link to b.
133 Links.push_back(std::make_pair(w, b));
136 /// addBias - Bias this node.
137 void addBias(BlockFrequency freq, BorderConstraint direction) {
138 switch (direction) {
139 default:
140 break;
141 case PrefReg:
142 BiasP += freq;
143 break;
144 case PrefSpill:
145 BiasN += freq;
146 break;
147 case MustSpill:
148 BiasN = BlockFrequency::getMaxFrequency();
149 break;
153 /// update - Recompute Value from Bias and Links. Return true when node
154 /// preference changes.
155 bool update(const Node nodes[], const BlockFrequency &Threshold) {
156 // Compute the weighted sum of inputs.
157 BlockFrequency SumN = BiasN;
158 BlockFrequency SumP = BiasP;
159 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
160 if (nodes[I->second].Value == -1)
161 SumN += I->first;
162 else if (nodes[I->second].Value == 1)
163 SumP += I->first;
166 // Each weighted sum is going to be less than the total frequency of the
167 // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
168 // will add a dead zone around 0 for two reasons:
170 // 1. It avoids arbitrary bias when all links are 0 as is possible during
171 // initial iterations.
172 // 2. It helps tame rounding errors when the links nominally sum to 0.
174 bool Before = preferReg();
175 if (SumN >= SumP + Threshold)
176 Value = -1;
177 else if (SumP >= SumN + Threshold)
178 Value = 1;
179 else
180 Value = 0;
181 return Before != preferReg();
184 void getDissentingNeighbors(SparseSet<unsigned> &List,
185 const Node nodes[]) const {
186 for (const auto &Elt : Links) {
187 unsigned n = Elt.second;
188 // Neighbors that already have the same value are not going to
189 // change because of this node changing.
190 if (Value != nodes[n].Value)
191 List.insert(n);
196 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
197 MF = &mf;
198 bundles = &getAnalysis<EdgeBundles>();
199 loops = &getAnalysis<MachineLoopInfo>();
201 assert(!nodes && "Leaking node array");
202 nodes = new Node[bundles->getNumBundles()];
203 TodoList.clear();
204 TodoList.setUniverse(bundles->getNumBundles());
206 // Compute total ingoing and outgoing block frequencies for all bundles.
207 BlockFrequencies.resize(mf.getNumBlockIDs());
208 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
209 setThreshold(MBFI->getEntryFreq());
210 for (auto &I : mf) {
211 unsigned Num = I.getNumber();
212 BlockFrequencies[Num] = MBFI->getBlockFreq(&I);
215 // We never change the function.
216 return false;
219 void SpillPlacement::releaseMemory() {
220 delete[] nodes;
221 nodes = nullptr;
222 TodoList.clear();
225 /// activate - mark node n as active if it wasn't already.
226 void SpillPlacement::activate(unsigned n) {
227 TodoList.insert(n);
228 if (ActiveNodes->test(n))
229 return;
230 ActiveNodes->set(n);
231 nodes[n].clear(Threshold);
233 // Very large bundles usually come from big switches, indirect branches,
234 // landing pads, or loops with many 'continue' statements. It is difficult to
235 // allocate registers when so many different blocks are involved.
237 // Give a small negative bias to large bundles such that a substantial
238 // fraction of the connected blocks need to be interested before we consider
239 // expanding the region through the bundle. This helps compile time by
240 // limiting the number of blocks visited and the number of links in the
241 // Hopfield network.
242 if (bundles->getBlocks(n).size() > 100) {
243 nodes[n].BiasP = 0;
244 nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
248 /// Set the threshold for a given entry frequency.
250 /// Set the threshold relative to \c Entry. Since the threshold is used as a
251 /// bound on the open interval (-Threshold;Threshold), 1 is the minimum
252 /// threshold.
253 void SpillPlacement::setThreshold(const BlockFrequency &Entry) {
254 // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
255 // it. Divide by 2^13, rounding as appropriate.
256 uint64_t Freq = Entry.getFrequency();
257 uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
258 Threshold = std::max(UINT64_C(1), Scaled);
261 /// addConstraints - Compute node biases and weights from a set of constraints.
262 /// Set a bit in NodeMask for each active node.
263 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
264 for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
265 E = LiveBlocks.end(); I != E; ++I) {
266 BlockFrequency Freq = BlockFrequencies[I->Number];
268 // Live-in to block?
269 if (I->Entry != DontCare) {
270 unsigned ib = bundles->getBundle(I->Number, false);
271 activate(ib);
272 nodes[ib].addBias(Freq, I->Entry);
275 // Live-out from block?
276 if (I->Exit != DontCare) {
277 unsigned ob = bundles->getBundle(I->Number, true);
278 activate(ob);
279 nodes[ob].addBias(Freq, I->Exit);
284 /// addPrefSpill - Same as addConstraints(PrefSpill)
285 void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
286 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
287 I != E; ++I) {
288 BlockFrequency Freq = BlockFrequencies[*I];
289 if (Strong)
290 Freq += Freq;
291 unsigned ib = bundles->getBundle(*I, false);
292 unsigned ob = bundles->getBundle(*I, true);
293 activate(ib);
294 activate(ob);
295 nodes[ib].addBias(Freq, PrefSpill);
296 nodes[ob].addBias(Freq, PrefSpill);
300 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
301 for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
302 ++I) {
303 unsigned Number = *I;
304 unsigned ib = bundles->getBundle(Number, false);
305 unsigned ob = bundles->getBundle(Number, true);
307 // Ignore self-loops.
308 if (ib == ob)
309 continue;
310 activate(ib);
311 activate(ob);
312 BlockFrequency Freq = BlockFrequencies[Number];
313 nodes[ib].addLink(ob, Freq);
314 nodes[ob].addLink(ib, Freq);
318 bool SpillPlacement::scanActiveBundles() {
319 RecentPositive.clear();
320 for (unsigned n : ActiveNodes->set_bits()) {
321 update(n);
322 // A node that must spill, or a node without any links is not going to
323 // change its value ever again, so exclude it from iterations.
324 if (nodes[n].mustSpill())
325 continue;
326 if (nodes[n].preferReg())
327 RecentPositive.push_back(n);
329 return !RecentPositive.empty();
332 bool SpillPlacement::update(unsigned n) {
333 if (!nodes[n].update(nodes, Threshold))
334 return false;
335 nodes[n].getDissentingNeighbors(TodoList, nodes);
336 return true;
339 /// iterate - Repeatedly update the Hopfield nodes until stability or the
340 /// maximum number of iterations is reached.
341 void SpillPlacement::iterate() {
342 // We do not need to push those node in the todolist.
343 // They are already been proceeded as part of the previous iteration.
344 RecentPositive.clear();
346 // Since the last iteration, the todolist have been augmented by calls
347 // to addConstraints, addLinks, and co.
348 // Update the network energy starting at this new frontier.
349 // The call to ::update will add the nodes that changed into the todolist.
350 unsigned Limit = bundles->getNumBundles() * 10;
351 while(Limit-- > 0 && !TodoList.empty()) {
352 unsigned n = TodoList.pop_back_val();
353 if (!update(n))
354 continue;
355 if (nodes[n].preferReg())
356 RecentPositive.push_back(n);
360 void SpillPlacement::prepare(BitVector &RegBundles) {
361 RecentPositive.clear();
362 TodoList.clear();
363 // Reuse RegBundles as our ActiveNodes vector.
364 ActiveNodes = &RegBundles;
365 ActiveNodes->clear();
366 ActiveNodes->resize(bundles->getNumBundles());
369 bool
370 SpillPlacement::finish() {
371 assert(ActiveNodes && "Call prepare() first");
373 // Write preferences back to ActiveNodes.
374 bool Perfect = true;
375 for (unsigned n : ActiveNodes->set_bits())
376 if (!nodes[n].preferReg()) {
377 ActiveNodes->reset(n);
378 Perfect = false;
380 ActiveNodes = nullptr;
381 return Perfect;