[Alignment][NFC] Support compile time constants
[llvm-core.git] / include / llvm / CodeGen / RegAllocPBQP.h
blobf7f92248f4ce99d7895146b1f7a8d535c363ee8a
1 //===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the PBQPBuilder interface, for classes which build PBQP
10 // instances to represent register allocation problems, and the RegAllocPBQP
11 // interface.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_CODEGEN_REGALLOCPBQP_H
16 #define LLVM_CODEGEN_REGALLOCPBQP_H
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/CodeGen/PBQP/CostAllocator.h"
21 #include "llvm/CodeGen/PBQP/Graph.h"
22 #include "llvm/CodeGen/PBQP/Math.h"
23 #include "llvm/CodeGen/PBQP/ReductionRules.h"
24 #include "llvm/CodeGen/PBQP/Solution.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include <algorithm>
27 #include <cassert>
28 #include <cstddef>
29 #include <limits>
30 #include <memory>
31 #include <set>
32 #include <vector>
34 namespace llvm {
36 class FunctionPass;
37 class LiveIntervals;
38 class MachineBlockFrequencyInfo;
39 class MachineFunction;
40 class raw_ostream;
42 namespace PBQP {
43 namespace RegAlloc {
45 /// Spill option index.
46 inline unsigned getSpillOptionIdx() { return 0; }
48 /// Metadata to speed allocatability test.
49 ///
50 /// Keeps track of the number of infinities in each row and column.
51 class MatrixMetadata {
52 public:
53 MatrixMetadata(const Matrix& M)
54 : UnsafeRows(new bool[M.getRows() - 1]()),
55 UnsafeCols(new bool[M.getCols() - 1]()) {
56 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
58 for (unsigned i = 1; i < M.getRows(); ++i) {
59 unsigned RowCount = 0;
60 for (unsigned j = 1; j < M.getCols(); ++j) {
61 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
62 ++RowCount;
63 ++ColCounts[j - 1];
64 UnsafeRows[i - 1] = true;
65 UnsafeCols[j - 1] = true;
68 WorstRow = std::max(WorstRow, RowCount);
70 unsigned WorstColCountForCurRow =
71 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
72 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
73 delete[] ColCounts;
76 MatrixMetadata(const MatrixMetadata &) = delete;
77 MatrixMetadata &operator=(const MatrixMetadata &) = delete;
79 unsigned getWorstRow() const { return WorstRow; }
80 unsigned getWorstCol() const { return WorstCol; }
81 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
82 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
84 private:
85 unsigned WorstRow = 0;
86 unsigned WorstCol = 0;
87 std::unique_ptr<bool[]> UnsafeRows;
88 std::unique_ptr<bool[]> UnsafeCols;
91 /// Holds a vector of the allowed physical regs for a vreg.
92 class AllowedRegVector {
93 friend hash_code hash_value(const AllowedRegVector &);
95 public:
96 AllowedRegVector() = default;
97 AllowedRegVector(AllowedRegVector &&) = default;
99 AllowedRegVector(const std::vector<unsigned> &OptVec)
100 : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
101 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
104 unsigned size() const { return NumOpts; }
105 unsigned operator[](size_t I) const { return Opts[I]; }
107 bool operator==(const AllowedRegVector &Other) const {
108 if (NumOpts != Other.NumOpts)
109 return false;
110 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
113 bool operator!=(const AllowedRegVector &Other) const {
114 return !(*this == Other);
117 private:
118 unsigned NumOpts = 0;
119 std::unique_ptr<unsigned[]> Opts;
122 inline hash_code hash_value(const AllowedRegVector &OptRegs) {
123 unsigned *OStart = OptRegs.Opts.get();
124 unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
125 return hash_combine(OptRegs.NumOpts,
126 hash_combine_range(OStart, OEnd));
129 /// Holds graph-level metadata relevant to PBQP RA problems.
130 class GraphMetadata {
131 private:
132 using AllowedRegVecPool = ValuePool<AllowedRegVector>;
134 public:
135 using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
137 GraphMetadata(MachineFunction &MF,
138 LiveIntervals &LIS,
139 MachineBlockFrequencyInfo &MBFI)
140 : MF(MF), LIS(LIS), MBFI(MBFI) {}
142 MachineFunction &MF;
143 LiveIntervals &LIS;
144 MachineBlockFrequencyInfo &MBFI;
146 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
147 VRegToNodeId[VReg] = NId;
150 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
151 auto VRegItr = VRegToNodeId.find(VReg);
152 if (VRegItr == VRegToNodeId.end())
153 return GraphBase::invalidNodeId();
154 return VRegItr->second;
157 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
158 return AllowedRegVecs.getValue(std::move(Allowed));
161 private:
162 DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
163 AllowedRegVecPool AllowedRegVecs;
166 /// Holds solver state and other metadata relevant to each PBQP RA node.
167 class NodeMetadata {
168 public:
169 using AllowedRegVector = RegAlloc::AllowedRegVector;
171 // The node's reduction state. The order in this enum is important,
172 // as it is assumed nodes can only progress up (i.e. towards being
173 // optimally reducible) when reducing the graph.
174 using ReductionState = enum {
175 Unprocessed,
176 NotProvablyAllocatable,
177 ConservativelyAllocatable,
178 OptimallyReducible
181 NodeMetadata() = default;
183 NodeMetadata(const NodeMetadata &Other)
184 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
185 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
186 AllowedRegs(Other.AllowedRegs)
187 #ifndef NDEBUG
188 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
189 #endif
191 if (NumOpts > 0) {
192 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
193 &OptUnsafeEdges[0]);
197 NodeMetadata(NodeMetadata &&) = default;
198 NodeMetadata& operator=(NodeMetadata &&) = default;
200 void setVReg(unsigned VReg) { this->VReg = VReg; }
201 unsigned getVReg() const { return VReg; }
203 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
204 this->AllowedRegs = std::move(AllowedRegs);
206 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
208 void setup(const Vector& Costs) {
209 NumOpts = Costs.getLength() - 1;
210 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
213 ReductionState getReductionState() const { return RS; }
214 void setReductionState(ReductionState RS) {
215 assert(RS >= this->RS && "A node's reduction state can not be downgraded");
216 this->RS = RS;
218 #ifndef NDEBUG
219 // Remember this state to assert later that a non-infinite register
220 // option was available.
221 if (RS == ConservativelyAllocatable)
222 everConservativelyAllocatable = true;
223 #endif
226 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
227 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
228 const bool* UnsafeOpts =
229 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
230 for (unsigned i = 0; i < NumOpts; ++i)
231 OptUnsafeEdges[i] += UnsafeOpts[i];
234 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
235 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
236 const bool* UnsafeOpts =
237 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
238 for (unsigned i = 0; i < NumOpts; ++i)
239 OptUnsafeEdges[i] -= UnsafeOpts[i];
242 bool isConservativelyAllocatable() const {
243 return (DeniedOpts < NumOpts) ||
244 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
245 &OptUnsafeEdges[NumOpts]);
248 #ifndef NDEBUG
249 bool wasConservativelyAllocatable() const {
250 return everConservativelyAllocatable;
252 #endif
254 private:
255 ReductionState RS = Unprocessed;
256 unsigned NumOpts = 0;
257 unsigned DeniedOpts = 0;
258 std::unique_ptr<unsigned[]> OptUnsafeEdges;
259 unsigned VReg = 0;
260 GraphMetadata::AllowedRegVecRef AllowedRegs;
262 #ifndef NDEBUG
263 bool everConservativelyAllocatable = false;
264 #endif
267 class RegAllocSolverImpl {
268 private:
269 using RAMatrix = MDMatrix<MatrixMetadata>;
271 public:
272 using RawVector = PBQP::Vector;
273 using RawMatrix = PBQP::Matrix;
274 using Vector = PBQP::Vector;
275 using Matrix = RAMatrix;
276 using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
278 using NodeId = GraphBase::NodeId;
279 using EdgeId = GraphBase::EdgeId;
281 using NodeMetadata = RegAlloc::NodeMetadata;
282 struct EdgeMetadata {};
283 using GraphMetadata = RegAlloc::GraphMetadata;
285 using Graph = PBQP::Graph<RegAllocSolverImpl>;
287 RegAllocSolverImpl(Graph &G) : G(G) {}
289 Solution solve() {
290 G.setSolver(*this);
291 Solution S;
292 setup();
293 S = backpropagate(G, reduce());
294 G.unsetSolver();
295 return S;
298 void handleAddNode(NodeId NId) {
299 assert(G.getNodeCosts(NId).getLength() > 1 &&
300 "PBQP Graph should not contain single or zero-option nodes");
301 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
304 void handleRemoveNode(NodeId NId) {}
305 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
307 void handleAddEdge(EdgeId EId) {
308 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
309 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
312 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
313 NodeMetadata& NMd = G.getNodeMetadata(NId);
314 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
315 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
316 promote(NId, NMd);
319 void handleReconnectEdge(EdgeId EId, NodeId NId) {
320 NodeMetadata& NMd = G.getNodeMetadata(NId);
321 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
322 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
325 void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
326 NodeId N1Id = G.getEdgeNode1Id(EId);
327 NodeId N2Id = G.getEdgeNode2Id(EId);
328 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
329 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
330 bool Transpose = N1Id != G.getEdgeNode1Id(EId);
332 // Metadata are computed incrementally. First, update them
333 // by removing the old cost.
334 const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
335 N1Md.handleRemoveEdge(OldMMd, Transpose);
336 N2Md.handleRemoveEdge(OldMMd, !Transpose);
338 // And update now the metadata with the new cost.
339 const MatrixMetadata& MMd = NewCosts.getMetadata();
340 N1Md.handleAddEdge(MMd, Transpose);
341 N2Md.handleAddEdge(MMd, !Transpose);
343 // As the metadata may have changed with the update, the nodes may have
344 // become ConservativelyAllocatable or OptimallyReducible.
345 promote(N1Id, N1Md);
346 promote(N2Id, N2Md);
349 private:
350 void promote(NodeId NId, NodeMetadata& NMd) {
351 if (G.getNodeDegree(NId) == 3) {
352 // This node is becoming optimally reducible.
353 moveToOptimallyReducibleNodes(NId);
354 } else if (NMd.getReductionState() ==
355 NodeMetadata::NotProvablyAllocatable &&
356 NMd.isConservativelyAllocatable()) {
357 // This node just became conservatively allocatable.
358 moveToConservativelyAllocatableNodes(NId);
362 void removeFromCurrentSet(NodeId NId) {
363 switch (G.getNodeMetadata(NId).getReductionState()) {
364 case NodeMetadata::Unprocessed: break;
365 case NodeMetadata::OptimallyReducible:
366 assert(OptimallyReducibleNodes.find(NId) !=
367 OptimallyReducibleNodes.end() &&
368 "Node not in optimally reducible set.");
369 OptimallyReducibleNodes.erase(NId);
370 break;
371 case NodeMetadata::ConservativelyAllocatable:
372 assert(ConservativelyAllocatableNodes.find(NId) !=
373 ConservativelyAllocatableNodes.end() &&
374 "Node not in conservatively allocatable set.");
375 ConservativelyAllocatableNodes.erase(NId);
376 break;
377 case NodeMetadata::NotProvablyAllocatable:
378 assert(NotProvablyAllocatableNodes.find(NId) !=
379 NotProvablyAllocatableNodes.end() &&
380 "Node not in not-provably-allocatable set.");
381 NotProvablyAllocatableNodes.erase(NId);
382 break;
386 void moveToOptimallyReducibleNodes(NodeId NId) {
387 removeFromCurrentSet(NId);
388 OptimallyReducibleNodes.insert(NId);
389 G.getNodeMetadata(NId).setReductionState(
390 NodeMetadata::OptimallyReducible);
393 void moveToConservativelyAllocatableNodes(NodeId NId) {
394 removeFromCurrentSet(NId);
395 ConservativelyAllocatableNodes.insert(NId);
396 G.getNodeMetadata(NId).setReductionState(
397 NodeMetadata::ConservativelyAllocatable);
400 void moveToNotProvablyAllocatableNodes(NodeId NId) {
401 removeFromCurrentSet(NId);
402 NotProvablyAllocatableNodes.insert(NId);
403 G.getNodeMetadata(NId).setReductionState(
404 NodeMetadata::NotProvablyAllocatable);
407 void setup() {
408 // Set up worklists.
409 for (auto NId : G.nodeIds()) {
410 if (G.getNodeDegree(NId) < 3)
411 moveToOptimallyReducibleNodes(NId);
412 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
413 moveToConservativelyAllocatableNodes(NId);
414 else
415 moveToNotProvablyAllocatableNodes(NId);
419 // Compute a reduction order for the graph by iteratively applying PBQP
420 // reduction rules. Locally optimal rules are applied whenever possible (R0,
421 // R1, R2). If no locally-optimal rules apply then any conservatively
422 // allocatable node is reduced. Finally, if no conservatively allocatable
423 // node exists then the node with the lowest spill-cost:degree ratio is
424 // selected.
425 std::vector<GraphBase::NodeId> reduce() {
426 assert(!G.empty() && "Cannot reduce empty graph.");
428 using NodeId = GraphBase::NodeId;
429 std::vector<NodeId> NodeStack;
431 // Consume worklists.
432 while (true) {
433 if (!OptimallyReducibleNodes.empty()) {
434 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
435 NodeId NId = *NItr;
436 OptimallyReducibleNodes.erase(NItr);
437 NodeStack.push_back(NId);
438 switch (G.getNodeDegree(NId)) {
439 case 0:
440 break;
441 case 1:
442 applyR1(G, NId);
443 break;
444 case 2:
445 applyR2(G, NId);
446 break;
447 default: llvm_unreachable("Not an optimally reducible node.");
449 } else if (!ConservativelyAllocatableNodes.empty()) {
450 // Conservatively allocatable nodes will never spill. For now just
451 // take the first node in the set and push it on the stack. When we
452 // start optimizing more heavily for register preferencing, it may
453 // would be better to push nodes with lower 'expected' or worst-case
454 // register costs first (since early nodes are the most
455 // constrained).
456 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
457 NodeId NId = *NItr;
458 ConservativelyAllocatableNodes.erase(NItr);
459 NodeStack.push_back(NId);
460 G.disconnectAllNeighborsFromNode(NId);
461 } else if (!NotProvablyAllocatableNodes.empty()) {
462 NodeSet::iterator NItr =
463 std::min_element(NotProvablyAllocatableNodes.begin(),
464 NotProvablyAllocatableNodes.end(),
465 SpillCostComparator(G));
466 NodeId NId = *NItr;
467 NotProvablyAllocatableNodes.erase(NItr);
468 NodeStack.push_back(NId);
469 G.disconnectAllNeighborsFromNode(NId);
470 } else
471 break;
474 return NodeStack;
477 class SpillCostComparator {
478 public:
479 SpillCostComparator(const Graph& G) : G(G) {}
481 bool operator()(NodeId N1Id, NodeId N2Id) {
482 PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
483 PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
484 if (N1SC == N2SC)
485 return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
486 return N1SC < N2SC;
489 private:
490 const Graph& G;
493 Graph& G;
494 using NodeSet = std::set<NodeId>;
495 NodeSet OptimallyReducibleNodes;
496 NodeSet ConservativelyAllocatableNodes;
497 NodeSet NotProvablyAllocatableNodes;
500 class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
501 private:
502 using BaseT = PBQP::Graph<RegAllocSolverImpl>;
504 public:
505 PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
507 /// Dump this graph to dbgs().
508 void dump() const;
510 /// Dump this graph to an output stream.
511 /// @param OS Output stream to print on.
512 void dump(raw_ostream &OS) const;
514 /// Print a representation of this graph in DOT format.
515 /// @param OS Output stream to print on.
516 void printDot(raw_ostream &OS) const;
519 inline Solution solve(PBQPRAGraph& G) {
520 if (G.empty())
521 return Solution();
522 RegAllocSolverImpl RegAllocSolver(G);
523 return RegAllocSolver.solve();
526 } // end namespace RegAlloc
527 } // end namespace PBQP
529 /// Create a PBQP register allocator instance.
530 FunctionPass *
531 createPBQPRegisterAllocator(char *customPassID = nullptr);
533 } // end namespace llvm
535 #endif // LLVM_CODEGEN_REGALLOCPBQP_H