[InstCombine] Signed saturation patterns
[llvm-complete.git] / lib / CodeGen / LatencyPriorityQueue.cpp
blob8a7a41d0f763a5f9c4375c3e7d7142010f78cfa3
1 //===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the LatencyPriorityQueue class, which is a
10 // SchedulingPriorityQueue that schedules using latency information to
11 // reduce the length of the critical path through the basic block.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/LatencyPriorityQueue.h"
16 #include "llvm/Config/llvm-config.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/raw_ostream.h"
19 using namespace llvm;
21 #define DEBUG_TYPE "scheduler"
23 bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
24 // The isScheduleHigh flag allows nodes with wraparound dependencies that
25 // cannot easily be modeled as edges with latencies to be scheduled as
26 // soon as possible in a top-down schedule.
27 if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
28 return false;
29 if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
30 return true;
32 unsigned LHSNum = LHS->NodeNum;
33 unsigned RHSNum = RHS->NodeNum;
35 // The most important heuristic is scheduling the critical path.
36 unsigned LHSLatency = PQ->getLatency(LHSNum);
37 unsigned RHSLatency = PQ->getLatency(RHSNum);
38 if (LHSLatency < RHSLatency) return true;
39 if (LHSLatency > RHSLatency) return false;
41 // After that, if two nodes have identical latencies, look to see if one will
42 // unblock more other nodes than the other.
43 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
44 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
45 if (LHSBlocked < RHSBlocked) return true;
46 if (LHSBlocked > RHSBlocked) return false;
48 // Finally, just to provide a stable ordering, use the node number as a
49 // deciding factor.
50 return RHSNum < LHSNum;
54 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
55 /// of SU, return it, otherwise return null.
56 SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
57 SUnit *OnlyAvailablePred = nullptr;
58 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
59 I != E; ++I) {
60 SUnit &Pred = *I->getSUnit();
61 if (!Pred.isScheduled) {
62 // We found an available, but not scheduled, predecessor. If it's the
63 // only one we have found, keep track of it... otherwise give up.
64 if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
65 return nullptr;
66 OnlyAvailablePred = &Pred;
70 return OnlyAvailablePred;
73 void LatencyPriorityQueue::push(SUnit *SU) {
74 // Look at all of the successors of this node. Count the number of nodes that
75 // this node is the sole unscheduled node for.
76 unsigned NumNodesBlocking = 0;
77 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
78 I != E; ++I) {
79 if (getSingleUnscheduledPred(I->getSUnit()) == SU)
80 ++NumNodesBlocking;
82 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
84 Queue.push_back(SU);
88 // scheduledNode - As nodes are scheduled, we look to see if there are any
89 // successor nodes that have a single unscheduled predecessor. If so, that
90 // single predecessor has a higher priority, since scheduling it will make
91 // the node available.
92 void LatencyPriorityQueue::scheduledNode(SUnit *SU) {
93 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
94 I != E; ++I) {
95 AdjustPriorityOfUnscheduledPreds(I->getSUnit());
99 /// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
100 /// scheduled. If SU is not itself available, then there is at least one
101 /// predecessor node that has not been scheduled yet. If SU has exactly ONE
102 /// unscheduled predecessor, we want to increase its priority: it getting
103 /// scheduled will make this node available, so it is better than some other
104 /// node of the same priority that will not make a node available.
105 void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
106 if (SU->isAvailable) return; // All preds scheduled.
108 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
109 if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return;
111 // Okay, we found a single predecessor that is available, but not scheduled.
112 // Since it is available, it must be in the priority queue. First remove it.
113 remove(OnlyAvailablePred);
115 // Reinsert the node into the priority queue, which recomputes its
116 // NumNodesSolelyBlocking value.
117 push(OnlyAvailablePred);
120 SUnit *LatencyPriorityQueue::pop() {
121 if (empty()) return nullptr;
122 std::vector<SUnit *>::iterator Best = Queue.begin();
123 for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
124 E = Queue.end(); I != E; ++I)
125 if (Picker(*Best, *I))
126 Best = I;
127 SUnit *V = *Best;
128 if (Best != std::prev(Queue.end()))
129 std::swap(*Best, Queue.back());
130 Queue.pop_back();
131 return V;
134 void LatencyPriorityQueue::remove(SUnit *SU) {
135 assert(!Queue.empty() && "Queue is empty!");
136 std::vector<SUnit *>::iterator I = find(Queue, SU);
137 assert(I != Queue.end() && "Queue doesn't contain the SU being removed!");
138 if (I != std::prev(Queue.end()))
139 std::swap(*I, Queue.back());
140 Queue.pop_back();
143 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
144 LLVM_DUMP_METHOD void LatencyPriorityQueue::dump(ScheduleDAG *DAG) const {
145 dbgs() << "Latency Priority Queue\n";
146 dbgs() << " Number of Queue Entries: " << Queue.size() << "\n";
147 for (const SUnit *SU : Queue) {
148 dbgs() << " ";
149 DAG->dumpNode(*SU);
152 #endif