zpu: managed to compile program that writes constant to global variable
[llvm/zpu.git] / lib / CodeGen / LatencyPriorityQueue.cpp
blobb9527fafbee8f4b9b0d5316d62152d1a8cee68ce
1 //===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LatencyPriorityQueue class, which is a
11 // SchedulingPriorityQueue that schedules using latency information to
12 // reduce the length of the critical path through the basic block.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "scheduler"
17 #include "llvm/CodeGen/LatencyPriorityQueue.h"
18 #include "llvm/Support/Debug.h"
19 using namespace llvm;
21 bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
22 // The isScheduleHigh flag allows nodes with wraparound dependencies that
23 // cannot easily be modeled as edges with latencies to be scheduled as
24 // soon as possible in a top-down schedule.
25 if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
26 return false;
27 if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
28 return true;
30 unsigned LHSNum = LHS->NodeNum;
31 unsigned RHSNum = RHS->NodeNum;
33 // The most important heuristic is scheduling the critical path.
34 unsigned LHSLatency = PQ->getLatency(LHSNum);
35 unsigned RHSLatency = PQ->getLatency(RHSNum);
36 if (LHSLatency < RHSLatency) return true;
37 if (LHSLatency > RHSLatency) return false;
39 // After that, if two nodes have identical latencies, look to see if one will
40 // unblock more other nodes than the other.
41 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
42 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
43 if (LHSBlocked < RHSBlocked) return true;
44 if (LHSBlocked > RHSBlocked) return false;
46 // Finally, just to provide a stable ordering, use the node number as a
47 // deciding factor.
48 return LHSNum < RHSNum;
52 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
53 /// of SU, return it, otherwise return null.
54 SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
55 SUnit *OnlyAvailablePred = 0;
56 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
57 I != E; ++I) {
58 SUnit &Pred = *I->getSUnit();
59 if (!Pred.isScheduled) {
60 // We found an available, but not scheduled, predecessor. If it's the
61 // only one we have found, keep track of it... otherwise give up.
62 if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
63 return 0;
64 OnlyAvailablePred = &Pred;
68 return OnlyAvailablePred;
71 void LatencyPriorityQueue::push(SUnit *SU) {
72 // Look at all of the successors of this node. Count the number of nodes that
73 // this node is the sole unscheduled node for.
74 unsigned NumNodesBlocking = 0;
75 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
76 I != E; ++I) {
77 if (getSingleUnscheduledPred(I->getSUnit()) == SU)
78 ++NumNodesBlocking;
80 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
82 Queue.push_back(SU);
86 // ScheduledNode - As nodes are scheduled, we look to see if there are any
87 // successor nodes that have a single unscheduled predecessor. If so, that
88 // single predecessor has a higher priority, since scheduling it will make
89 // the node available.
90 void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
91 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
92 I != E; ++I) {
93 AdjustPriorityOfUnscheduledPreds(I->getSUnit());
97 /// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
98 /// scheduled. If SU is not itself available, then there is at least one
99 /// predecessor node that has not been scheduled yet. If SU has exactly ONE
100 /// unscheduled predecessor, we want to increase its priority: it getting
101 /// scheduled will make this node available, so it is better than some other
102 /// node of the same priority that will not make a node available.
103 void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
104 if (SU->isAvailable) return; // All preds scheduled.
106 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
107 if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
109 // Okay, we found a single predecessor that is available, but not scheduled.
110 // Since it is available, it must be in the priority queue. First remove it.
111 remove(OnlyAvailablePred);
113 // Reinsert the node into the priority queue, which recomputes its
114 // NumNodesSolelyBlocking value.
115 push(OnlyAvailablePred);
118 SUnit *LatencyPriorityQueue::pop() {
119 if (empty()) return NULL;
120 std::vector<SUnit *>::iterator Best = Queue.begin();
121 for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
122 E = Queue.end(); I != E; ++I)
123 if (Picker(*Best, *I))
124 Best = I;
125 SUnit *V = *Best;
126 if (Best != prior(Queue.end()))
127 std::swap(*Best, Queue.back());
128 Queue.pop_back();
129 return V;
132 void LatencyPriorityQueue::remove(SUnit *SU) {
133 assert(!Queue.empty() && "Queue is empty!");
134 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
135 if (I != prior(Queue.end()))
136 std::swap(*I, Queue.back());
137 Queue.pop_back();