[DAGCombiner] Eliminate dead stores to stack.
[llvm-complete.git] / lib / CodeGen / ScheduleDAG.cpp
blob8ebe8fa6bd63c07ef9aa9b392d826fead749d982
1 //===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file Implements the ScheduleDAG class, which is a base class used by
10 /// scheduling implementation classes.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/ScheduleDAG.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/iterator_range.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
20 #include "llvm/CodeGen/SelectionDAGNodes.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetRegisterInfo.h"
23 #include "llvm/CodeGen/TargetSubtargetInfo.h"
24 #include "llvm/Config/llvm-config.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm>
30 #include <cassert>
31 #include <iterator>
32 #include <limits>
33 #include <utility>
34 #include <vector>
36 using namespace llvm;
38 #define DEBUG_TYPE "pre-RA-sched"
40 #ifndef NDEBUG
41 static cl::opt<bool> StressSchedOpt(
42 "stress-sched", cl::Hidden, cl::init(false),
43 cl::desc("Stress test instruction scheduling"));
44 #endif
46 void SchedulingPriorityQueue::anchor() {}
48 ScheduleDAG::ScheduleDAG(MachineFunction &mf)
49 : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
50 TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
51 MRI(mf.getRegInfo()) {
52 #ifndef NDEBUG
53 StressSched = StressSchedOpt;
54 #endif
57 ScheduleDAG::~ScheduleDAG() = default;
59 void ScheduleDAG::clearDAG() {
60 SUnits.clear();
61 EntrySU = SUnit();
62 ExitSU = SUnit();
65 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
66 if (!Node || !Node->isMachineOpcode()) return nullptr;
67 return &TII->get(Node->getMachineOpcode());
70 LLVM_DUMP_METHOD void SDep::dump(const TargetRegisterInfo *TRI) const {
71 switch (getKind()) {
72 case Data: dbgs() << "Data"; break;
73 case Anti: dbgs() << "Anti"; break;
74 case Output: dbgs() << "Out "; break;
75 case Order: dbgs() << "Ord "; break;
78 switch (getKind()) {
79 case Data:
80 dbgs() << " Latency=" << getLatency();
81 if (TRI && isAssignedRegDep())
82 dbgs() << " Reg=" << printReg(getReg(), TRI);
83 break;
84 case Anti:
85 case Output:
86 dbgs() << " Latency=" << getLatency();
87 break;
88 case Order:
89 dbgs() << " Latency=" << getLatency();
90 switch(Contents.OrdKind) {
91 case Barrier: dbgs() << " Barrier"; break;
92 case MayAliasMem:
93 case MustAliasMem: dbgs() << " Memory"; break;
94 case Artificial: dbgs() << " Artificial"; break;
95 case Weak: dbgs() << " Weak"; break;
96 case Cluster: dbgs() << " Cluster"; break;
98 break;
102 bool SUnit::addPred(const SDep &D, bool Required) {
103 // If this node already has this dependence, don't add a redundant one.
104 for (SDep &PredDep : Preds) {
105 // Zero-latency weak edges may be added purely for heuristic ordering. Don't
106 // add them if another kind of edge already exists.
107 if (!Required && PredDep.getSUnit() == D.getSUnit())
108 return false;
109 if (PredDep.overlaps(D)) {
110 // Extend the latency if needed. Equivalent to
111 // removePred(PredDep) + addPred(D).
112 if (PredDep.getLatency() < D.getLatency()) {
113 SUnit *PredSU = PredDep.getSUnit();
114 // Find the corresponding successor in N.
115 SDep ForwardD = PredDep;
116 ForwardD.setSUnit(this);
117 for (SDep &SuccDep : PredSU->Succs) {
118 if (SuccDep == ForwardD) {
119 SuccDep.setLatency(D.getLatency());
120 break;
123 PredDep.setLatency(D.getLatency());
125 return false;
128 // Now add a corresponding succ to N.
129 SDep P = D;
130 P.setSUnit(this);
131 SUnit *N = D.getSUnit();
132 // Update the bookkeeping.
133 if (D.getKind() == SDep::Data) {
134 assert(NumPreds < std::numeric_limits<unsigned>::max() &&
135 "NumPreds will overflow!");
136 assert(N->NumSuccs < std::numeric_limits<unsigned>::max() &&
137 "NumSuccs will overflow!");
138 ++NumPreds;
139 ++N->NumSuccs;
141 if (!N->isScheduled) {
142 if (D.isWeak()) {
143 ++WeakPredsLeft;
145 else {
146 assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
147 "NumPredsLeft will overflow!");
148 ++NumPredsLeft;
151 if (!isScheduled) {
152 if (D.isWeak()) {
153 ++N->WeakSuccsLeft;
155 else {
156 assert(N->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
157 "NumSuccsLeft will overflow!");
158 ++N->NumSuccsLeft;
161 Preds.push_back(D);
162 N->Succs.push_back(P);
163 if (P.getLatency() != 0) {
164 this->setDepthDirty();
165 N->setHeightDirty();
167 return true;
170 void SUnit::removePred(const SDep &D) {
171 // Find the matching predecessor.
172 SmallVectorImpl<SDep>::iterator I = llvm::find(Preds, D);
173 if (I == Preds.end())
174 return;
175 // Find the corresponding successor in N.
176 SDep P = D;
177 P.setSUnit(this);
178 SUnit *N = D.getSUnit();
179 SmallVectorImpl<SDep>::iterator Succ = llvm::find(N->Succs, P);
180 assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
181 N->Succs.erase(Succ);
182 Preds.erase(I);
183 // Update the bookkeeping.
184 if (P.getKind() == SDep::Data) {
185 assert(NumPreds > 0 && "NumPreds will underflow!");
186 assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
187 --NumPreds;
188 --N->NumSuccs;
190 if (!N->isScheduled) {
191 if (D.isWeak())
192 --WeakPredsLeft;
193 else {
194 assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
195 --NumPredsLeft;
198 if (!isScheduled) {
199 if (D.isWeak())
200 --N->WeakSuccsLeft;
201 else {
202 assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
203 --N->NumSuccsLeft;
206 if (P.getLatency() != 0) {
207 this->setDepthDirty();
208 N->setHeightDirty();
212 void SUnit::setDepthDirty() {
213 if (!isDepthCurrent) return;
214 SmallVector<SUnit*, 8> WorkList;
215 WorkList.push_back(this);
216 do {
217 SUnit *SU = WorkList.pop_back_val();
218 SU->isDepthCurrent = false;
219 for (SDep &SuccDep : SU->Succs) {
220 SUnit *SuccSU = SuccDep.getSUnit();
221 if (SuccSU->isDepthCurrent)
222 WorkList.push_back(SuccSU);
224 } while (!WorkList.empty());
227 void SUnit::setHeightDirty() {
228 if (!isHeightCurrent) return;
229 SmallVector<SUnit*, 8> WorkList;
230 WorkList.push_back(this);
231 do {
232 SUnit *SU = WorkList.pop_back_val();
233 SU->isHeightCurrent = false;
234 for (SDep &PredDep : SU->Preds) {
235 SUnit *PredSU = PredDep.getSUnit();
236 if (PredSU->isHeightCurrent)
237 WorkList.push_back(PredSU);
239 } while (!WorkList.empty());
242 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
243 if (NewDepth <= getDepth())
244 return;
245 setDepthDirty();
246 Depth = NewDepth;
247 isDepthCurrent = true;
250 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
251 if (NewHeight <= getHeight())
252 return;
253 setHeightDirty();
254 Height = NewHeight;
255 isHeightCurrent = true;
258 /// Calculates the maximal path from the node to the exit.
259 void SUnit::ComputeDepth() {
260 SmallVector<SUnit*, 8> WorkList;
261 WorkList.push_back(this);
262 do {
263 SUnit *Cur = WorkList.back();
265 bool Done = true;
266 unsigned MaxPredDepth = 0;
267 for (const SDep &PredDep : Cur->Preds) {
268 SUnit *PredSU = PredDep.getSUnit();
269 if (PredSU->isDepthCurrent)
270 MaxPredDepth = std::max(MaxPredDepth,
271 PredSU->Depth + PredDep.getLatency());
272 else {
273 Done = false;
274 WorkList.push_back(PredSU);
278 if (Done) {
279 WorkList.pop_back();
280 if (MaxPredDepth != Cur->Depth) {
281 Cur->setDepthDirty();
282 Cur->Depth = MaxPredDepth;
284 Cur->isDepthCurrent = true;
286 } while (!WorkList.empty());
289 /// Calculates the maximal path from the node to the entry.
290 void SUnit::ComputeHeight() {
291 SmallVector<SUnit*, 8> WorkList;
292 WorkList.push_back(this);
293 do {
294 SUnit *Cur = WorkList.back();
296 bool Done = true;
297 unsigned MaxSuccHeight = 0;
298 for (const SDep &SuccDep : Cur->Succs) {
299 SUnit *SuccSU = SuccDep.getSUnit();
300 if (SuccSU->isHeightCurrent)
301 MaxSuccHeight = std::max(MaxSuccHeight,
302 SuccSU->Height + SuccDep.getLatency());
303 else {
304 Done = false;
305 WorkList.push_back(SuccSU);
309 if (Done) {
310 WorkList.pop_back();
311 if (MaxSuccHeight != Cur->Height) {
312 Cur->setHeightDirty();
313 Cur->Height = MaxSuccHeight;
315 Cur->isHeightCurrent = true;
317 } while (!WorkList.empty());
320 void SUnit::biasCriticalPath() {
321 if (NumPreds < 2)
322 return;
324 SUnit::pred_iterator BestI = Preds.begin();
325 unsigned MaxDepth = BestI->getSUnit()->getDepth();
326 for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
327 ++I) {
328 if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
329 BestI = I;
331 if (BestI != Preds.begin())
332 std::swap(*Preds.begin(), *BestI);
335 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
336 LLVM_DUMP_METHOD void SUnit::dumpAttributes() const {
337 dbgs() << " # preds left : " << NumPredsLeft << "\n";
338 dbgs() << " # succs left : " << NumSuccsLeft << "\n";
339 if (WeakPredsLeft)
340 dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
341 if (WeakSuccsLeft)
342 dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
343 dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
344 dbgs() << " Latency : " << Latency << "\n";
345 dbgs() << " Depth : " << getDepth() << "\n";
346 dbgs() << " Height : " << getHeight() << "\n";
349 LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeName(const SUnit &SU) const {
350 if (&SU == &EntrySU)
351 dbgs() << "EntrySU";
352 else if (&SU == &ExitSU)
353 dbgs() << "ExitSU";
354 else
355 dbgs() << "SU(" << SU.NodeNum << ")";
358 LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeAll(const SUnit &SU) const {
359 dumpNode(SU);
360 SU.dumpAttributes();
361 if (SU.Preds.size() > 0) {
362 dbgs() << " Predecessors:\n";
363 for (const SDep &Dep : SU.Preds) {
364 dbgs() << " ";
365 dumpNodeName(*Dep.getSUnit());
366 dbgs() << ": ";
367 Dep.dump(TRI);
368 dbgs() << '\n';
371 if (SU.Succs.size() > 0) {
372 dbgs() << " Successors:\n";
373 for (const SDep &Dep : SU.Succs) {
374 dbgs() << " ";
375 dumpNodeName(*Dep.getSUnit());
376 dbgs() << ": ";
377 Dep.dump(TRI);
378 dbgs() << '\n';
382 #endif
384 #ifndef NDEBUG
385 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
386 bool AnyNotSched = false;
387 unsigned DeadNodes = 0;
388 for (const SUnit &SUnit : SUnits) {
389 if (!SUnit.isScheduled) {
390 if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
391 ++DeadNodes;
392 continue;
394 if (!AnyNotSched)
395 dbgs() << "*** Scheduling failed! ***\n";
396 dumpNode(SUnit);
397 dbgs() << "has not been scheduled!\n";
398 AnyNotSched = true;
400 if (SUnit.isScheduled &&
401 (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
402 unsigned(std::numeric_limits<int>::max())) {
403 if (!AnyNotSched)
404 dbgs() << "*** Scheduling failed! ***\n";
405 dumpNode(SUnit);
406 dbgs() << "has an unexpected "
407 << (isBottomUp ? "Height" : "Depth") << " value!\n";
408 AnyNotSched = true;
410 if (isBottomUp) {
411 if (SUnit.NumSuccsLeft != 0) {
412 if (!AnyNotSched)
413 dbgs() << "*** Scheduling failed! ***\n";
414 dumpNode(SUnit);
415 dbgs() << "has successors left!\n";
416 AnyNotSched = true;
418 } else {
419 if (SUnit.NumPredsLeft != 0) {
420 if (!AnyNotSched)
421 dbgs() << "*** Scheduling failed! ***\n";
422 dumpNode(SUnit);
423 dbgs() << "has predecessors left!\n";
424 AnyNotSched = true;
428 assert(!AnyNotSched);
429 return SUnits.size() - DeadNodes;
431 #endif
433 void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
434 // The idea of the algorithm is taken from
435 // "Online algorithms for managing the topological order of
436 // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
437 // This is the MNR algorithm, which was first introduced by
438 // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
439 // "Maintaining a topological order under edge insertions".
441 // Short description of the algorithm:
443 // Topological ordering, ord, of a DAG maps each node to a topological
444 // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
446 // This means that if there is a path from the node X to the node Z,
447 // then ord(X) < ord(Z).
449 // This property can be used to check for reachability of nodes:
450 // if Z is reachable from X, then an insertion of the edge Z->X would
451 // create a cycle.
453 // The algorithm first computes a topological ordering for the DAG by
454 // initializing the Index2Node and Node2Index arrays and then tries to keep
455 // the ordering up-to-date after edge insertions by reordering the DAG.
457 // On insertion of the edge X->Y, the algorithm first marks by calling DFS
458 // the nodes reachable from Y, and then shifts them using Shift to lie
459 // immediately after X in Index2Node.
460 unsigned DAGSize = SUnits.size();
461 std::vector<SUnit*> WorkList;
462 WorkList.reserve(DAGSize);
464 Index2Node.resize(DAGSize);
465 Node2Index.resize(DAGSize);
467 // Initialize the data structures.
468 if (ExitSU)
469 WorkList.push_back(ExitSU);
470 for (SUnit &SU : SUnits) {
471 int NodeNum = SU.NodeNum;
472 unsigned Degree = SU.Succs.size();
473 // Temporarily use the Node2Index array as scratch space for degree counts.
474 Node2Index[NodeNum] = Degree;
476 // Is it a node without dependencies?
477 if (Degree == 0) {
478 assert(SU.Succs.empty() && "SUnit should have no successors");
479 // Collect leaf nodes.
480 WorkList.push_back(&SU);
484 int Id = DAGSize;
485 while (!WorkList.empty()) {
486 SUnit *SU = WorkList.back();
487 WorkList.pop_back();
488 if (SU->NodeNum < DAGSize)
489 Allocate(SU->NodeNum, --Id);
490 for (const SDep &PredDep : SU->Preds) {
491 SUnit *SU = PredDep.getSUnit();
492 if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
493 // If all dependencies of the node are processed already,
494 // then the node can be computed now.
495 WorkList.push_back(SU);
499 Visited.resize(DAGSize);
501 #ifndef NDEBUG
502 // Check correctness of the ordering
503 for (SUnit &SU : SUnits) {
504 for (const SDep &PD : SU.Preds) {
505 assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
506 "Wrong topological sorting");
509 #endif
512 void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
513 int UpperBound, LowerBound;
514 LowerBound = Node2Index[Y->NodeNum];
515 UpperBound = Node2Index[X->NodeNum];
516 bool HasLoop = false;
517 // Is Ord(X) < Ord(Y) ?
518 if (LowerBound < UpperBound) {
519 // Update the topological order.
520 Visited.reset();
521 DFS(Y, UpperBound, HasLoop);
522 assert(!HasLoop && "Inserted edge creates a loop!");
523 // Recompute topological indexes.
524 Shift(Visited, LowerBound, UpperBound);
528 void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
529 // InitDAGTopologicalSorting();
532 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
533 bool &HasLoop) {
534 std::vector<const SUnit*> WorkList;
535 WorkList.reserve(SUnits.size());
537 WorkList.push_back(SU);
538 do {
539 SU = WorkList.back();
540 WorkList.pop_back();
541 Visited.set(SU->NodeNum);
542 for (const SDep &SuccDep
543 : make_range(SU->Succs.rbegin(), SU->Succs.rend())) {
544 unsigned s = SuccDep.getSUnit()->NodeNum;
545 // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
546 if (s >= Node2Index.size())
547 continue;
548 if (Node2Index[s] == UpperBound) {
549 HasLoop = true;
550 return;
552 // Visit successors if not already and in affected region.
553 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
554 WorkList.push_back(SuccDep.getSUnit());
557 } while (!WorkList.empty());
560 std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
561 const SUnit &TargetSU,
562 bool &Success) {
563 std::vector<const SUnit*> WorkList;
564 int LowerBound = Node2Index[StartSU.NodeNum];
565 int UpperBound = Node2Index[TargetSU.NodeNum];
566 bool Found = false;
567 BitVector VisitedBack;
568 std::vector<int> Nodes;
570 if (LowerBound > UpperBound) {
571 Success = false;
572 return Nodes;
575 WorkList.reserve(SUnits.size());
576 Visited.reset();
578 // Starting from StartSU, visit all successors up
579 // to UpperBound.
580 WorkList.push_back(&StartSU);
581 do {
582 const SUnit *SU = WorkList.back();
583 WorkList.pop_back();
584 for (int I = SU->Succs.size()-1; I >= 0; --I) {
585 const SUnit *Succ = SU->Succs[I].getSUnit();
586 unsigned s = Succ->NodeNum;
587 // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
588 if (Succ->isBoundaryNode())
589 continue;
590 if (Node2Index[s] == UpperBound) {
591 Found = true;
592 continue;
594 // Visit successors if not already and in affected region.
595 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
596 Visited.set(s);
597 WorkList.push_back(Succ);
600 } while (!WorkList.empty());
602 if (!Found) {
603 Success = false;
604 return Nodes;
607 WorkList.clear();
608 VisitedBack.resize(SUnits.size());
609 Found = false;
611 // Starting from TargetSU, visit all predecessors up
612 // to LowerBound. SUs that are visited by the two
613 // passes are added to Nodes.
614 WorkList.push_back(&TargetSU);
615 do {
616 const SUnit *SU = WorkList.back();
617 WorkList.pop_back();
618 for (int I = SU->Preds.size()-1; I >= 0; --I) {
619 const SUnit *Pred = SU->Preds[I].getSUnit();
620 unsigned s = Pred->NodeNum;
621 // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
622 if (Pred->isBoundaryNode())
623 continue;
624 if (Node2Index[s] == LowerBound) {
625 Found = true;
626 continue;
628 if (!VisitedBack.test(s) && Visited.test(s)) {
629 VisitedBack.set(s);
630 WorkList.push_back(Pred);
631 Nodes.push_back(s);
634 } while (!WorkList.empty());
636 assert(Found && "Error in SUnit Graph!");
637 Success = true;
638 return Nodes;
641 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
642 int UpperBound) {
643 std::vector<int> L;
644 int shift = 0;
645 int i;
647 for (i = LowerBound; i <= UpperBound; ++i) {
648 // w is node at topological index i.
649 int w = Index2Node[i];
650 if (Visited.test(w)) {
651 // Unmark.
652 Visited.reset(w);
653 L.push_back(w);
654 shift = shift + 1;
655 } else {
656 Allocate(w, i - shift);
660 for (unsigned LI : L) {
661 Allocate(LI, i - shift);
662 i = i + 1;
666 bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
667 // Is SU reachable from TargetSU via successor edges?
668 if (IsReachable(SU, TargetSU))
669 return true;
670 for (const SDep &PredDep : TargetSU->Preds)
671 if (PredDep.isAssignedRegDep() &&
672 IsReachable(SU, PredDep.getSUnit()))
673 return true;
674 return false;
677 bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
678 const SUnit *TargetSU) {
679 // If insertion of the edge SU->TargetSU would create a cycle
680 // then there is a path from TargetSU to SU.
681 int UpperBound, LowerBound;
682 LowerBound = Node2Index[TargetSU->NodeNum];
683 UpperBound = Node2Index[SU->NodeNum];
684 bool HasLoop = false;
685 // Is Ord(TargetSU) < Ord(SU) ?
686 if (LowerBound < UpperBound) {
687 Visited.reset();
688 // There may be a path from TargetSU to SU. Check for it.
689 DFS(TargetSU, UpperBound, HasLoop);
691 return HasLoop;
694 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
695 Node2Index[n] = index;
696 Index2Node[index] = n;
699 ScheduleDAGTopologicalSort::
700 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
701 : SUnits(sunits), ExitSU(exitsu) {}
703 ScheduleHazardRecognizer::~ScheduleHazardRecognizer() = default;