[sanitizer] Improve FreeBSD ASLR detection
[llvm-project.git] / llvm / lib / CodeGen / SelectionDAG / ScheduleDAGFast.cpp
blob1b89864116cb936bfd367c51db79f44721cdc693
1 //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a fast scheduler.
11 //===----------------------------------------------------------------------===//
13 #include "InstrEmitter.h"
14 #include "ScheduleDAGSDNodes.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/SchedulerRegistry.h"
20 #include "llvm/CodeGen/SelectionDAGISel.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetRegisterInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/InlineAsm.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 using namespace llvm;
30 #define DEBUG_TYPE "pre-RA-sched"
32 STATISTIC(NumUnfolds, "Number of nodes unfolded");
33 STATISTIC(NumDups, "Number of duplicated nodes");
34 STATISTIC(NumPRCopies, "Number of physical copies");
36 static RegisterScheduler
37 fastDAGScheduler("fast", "Fast suboptimal list scheduling",
38 createFastDAGScheduler);
39 static RegisterScheduler
40 linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
41 createDAGLinearizer);
44 namespace {
45 /// FastPriorityQueue - A degenerate priority queue that considers
46 /// all nodes to have the same priority.
47 ///
48 struct FastPriorityQueue {
49 SmallVector<SUnit *, 16> Queue;
51 bool empty() const { return Queue.empty(); }
53 void push(SUnit *U) {
54 Queue.push_back(U);
57 SUnit *pop() {
58 if (empty()) return nullptr;
59 return Queue.pop_back_val();
63 //===----------------------------------------------------------------------===//
64 /// ScheduleDAGFast - The actual "fast" list scheduler implementation.
65 ///
66 class ScheduleDAGFast : public ScheduleDAGSDNodes {
67 private:
68 /// AvailableQueue - The priority queue to use for the available SUnits.
69 FastPriorityQueue AvailableQueue;
71 /// LiveRegDefs - A set of physical registers and their definition
72 /// that are "live". These nodes must be scheduled before any other nodes that
73 /// modifies the registers can be scheduled.
74 unsigned NumLiveRegs;
75 std::vector<SUnit*> LiveRegDefs;
76 std::vector<unsigned> LiveRegCycles;
78 public:
79 ScheduleDAGFast(MachineFunction &mf)
80 : ScheduleDAGSDNodes(mf) {}
82 void Schedule() override;
84 /// AddPred - adds a predecessor edge to SUnit SU.
85 /// This returns true if this is a new predecessor.
86 void AddPred(SUnit *SU, const SDep &D) {
87 SU->addPred(D);
90 /// RemovePred - removes a predecessor edge from SUnit SU.
91 /// This returns true if an edge was removed.
92 void RemovePred(SUnit *SU, const SDep &D) {
93 SU->removePred(D);
96 private:
97 void ReleasePred(SUnit *SU, SDep *PredEdge);
98 void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
99 void ScheduleNodeBottomUp(SUnit*, unsigned);
100 SUnit *CopyAndMoveSuccessors(SUnit*);
101 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
102 const TargetRegisterClass*,
103 const TargetRegisterClass*,
104 SmallVectorImpl<SUnit*>&);
105 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
106 void ListScheduleBottomUp();
108 /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
109 bool forceUnitLatencies() const override { return true; }
111 } // end anonymous namespace
114 /// Schedule - Schedule the DAG using list scheduling.
115 void ScheduleDAGFast::Schedule() {
116 LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
118 NumLiveRegs = 0;
119 LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
120 LiveRegCycles.resize(TRI->getNumRegs(), 0);
122 // Build the scheduling graph.
123 BuildSchedGraph(nullptr);
125 LLVM_DEBUG(dump());
127 // Execute the actual scheduling loop.
128 ListScheduleBottomUp();
131 //===----------------------------------------------------------------------===//
132 // Bottom-Up Scheduling
133 //===----------------------------------------------------------------------===//
135 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
136 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
137 void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
138 SUnit *PredSU = PredEdge->getSUnit();
140 #ifndef NDEBUG
141 if (PredSU->NumSuccsLeft == 0) {
142 dbgs() << "*** Scheduling failed! ***\n";
143 dumpNode(*PredSU);
144 dbgs() << " has been released too many times!\n";
145 llvm_unreachable(nullptr);
147 #endif
148 --PredSU->NumSuccsLeft;
150 // If all the node's successors are scheduled, this node is ready
151 // to be scheduled. Ignore the special EntrySU node.
152 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
153 PredSU->isAvailable = true;
154 AvailableQueue.push(PredSU);
158 void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
159 // Bottom up: release predecessors
160 for (SDep &Pred : SU->Preds) {
161 ReleasePred(SU, &Pred);
162 if (Pred.isAssignedRegDep()) {
163 // This is a physical register dependency and it's impossible or
164 // expensive to copy the register. Make sure nothing that can
165 // clobber the register is scheduled between the predecessor and
166 // this node.
167 if (!LiveRegDefs[Pred.getReg()]) {
168 ++NumLiveRegs;
169 LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
170 LiveRegCycles[Pred.getReg()] = CurCycle;
176 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
177 /// count of its predecessors. If a predecessor pending count is zero, add it to
178 /// the Available queue.
179 void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
180 LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
181 LLVM_DEBUG(dumpNode(*SU));
183 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
184 SU->setHeightToAtLeast(CurCycle);
185 Sequence.push_back(SU);
187 ReleasePredecessors(SU, CurCycle);
189 // Release all the implicit physical register defs that are live.
190 for (SDep &Succ : SU->Succs) {
191 if (Succ.isAssignedRegDep()) {
192 if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
193 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
194 assert(LiveRegDefs[Succ.getReg()] == SU &&
195 "Physical register dependency violated?");
196 --NumLiveRegs;
197 LiveRegDefs[Succ.getReg()] = nullptr;
198 LiveRegCycles[Succ.getReg()] = 0;
203 SU->isScheduled = true;
206 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
207 /// successors to the newly created node.
208 SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
209 if (SU->getNode()->getGluedNode())
210 return nullptr;
212 SDNode *N = SU->getNode();
213 if (!N)
214 return nullptr;
216 SUnit *NewSU;
217 bool TryUnfold = false;
218 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
219 MVT VT = N->getSimpleValueType(i);
220 if (VT == MVT::Glue)
221 return nullptr;
222 else if (VT == MVT::Other)
223 TryUnfold = true;
225 for (const SDValue &Op : N->op_values()) {
226 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
227 if (VT == MVT::Glue)
228 return nullptr;
231 if (TryUnfold) {
232 SmallVector<SDNode*, 2> NewNodes;
233 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
234 return nullptr;
236 LLVM_DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
237 assert(NewNodes.size() == 2 && "Expected a load folding node!");
239 N = NewNodes[1];
240 SDNode *LoadNode = NewNodes[0];
241 unsigned NumVals = N->getNumValues();
242 unsigned OldNumVals = SU->getNode()->getNumValues();
243 for (unsigned i = 0; i != NumVals; ++i)
244 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
245 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
246 SDValue(LoadNode, 1));
248 SUnit *NewSU = newSUnit(N);
249 assert(N->getNodeId() == -1 && "Node already inserted!");
250 N->setNodeId(NewSU->NodeNum);
252 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
253 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
254 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
255 NewSU->isTwoAddress = true;
256 break;
259 if (MCID.isCommutable())
260 NewSU->isCommutable = true;
262 // LoadNode may already exist. This can happen when there is another
263 // load from the same location and producing the same type of value
264 // but it has different alignment or volatileness.
265 bool isNewLoad = true;
266 SUnit *LoadSU;
267 if (LoadNode->getNodeId() != -1) {
268 LoadSU = &SUnits[LoadNode->getNodeId()];
269 isNewLoad = false;
270 } else {
271 LoadSU = newSUnit(LoadNode);
272 LoadNode->setNodeId(LoadSU->NodeNum);
275 SDep ChainPred;
276 SmallVector<SDep, 4> ChainSuccs;
277 SmallVector<SDep, 4> LoadPreds;
278 SmallVector<SDep, 4> NodePreds;
279 SmallVector<SDep, 4> NodeSuccs;
280 for (SDep &Pred : SU->Preds) {
281 if (Pred.isCtrl())
282 ChainPred = Pred;
283 else if (Pred.getSUnit()->getNode() &&
284 Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
285 LoadPreds.push_back(Pred);
286 else
287 NodePreds.push_back(Pred);
289 for (SDep &Succ : SU->Succs) {
290 if (Succ.isCtrl())
291 ChainSuccs.push_back(Succ);
292 else
293 NodeSuccs.push_back(Succ);
296 if (ChainPred.getSUnit()) {
297 RemovePred(SU, ChainPred);
298 if (isNewLoad)
299 AddPred(LoadSU, ChainPred);
301 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
302 const SDep &Pred = LoadPreds[i];
303 RemovePred(SU, Pred);
304 if (isNewLoad) {
305 AddPred(LoadSU, Pred);
308 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
309 const SDep &Pred = NodePreds[i];
310 RemovePred(SU, Pred);
311 AddPred(NewSU, Pred);
313 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
314 SDep D = NodeSuccs[i];
315 SUnit *SuccDep = D.getSUnit();
316 D.setSUnit(SU);
317 RemovePred(SuccDep, D);
318 D.setSUnit(NewSU);
319 AddPred(SuccDep, D);
321 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
322 SDep D = ChainSuccs[i];
323 SUnit *SuccDep = D.getSUnit();
324 D.setSUnit(SU);
325 RemovePred(SuccDep, D);
326 if (isNewLoad) {
327 D.setSUnit(LoadSU);
328 AddPred(SuccDep, D);
331 if (isNewLoad) {
332 SDep D(LoadSU, SDep::Barrier);
333 D.setLatency(LoadSU->Latency);
334 AddPred(NewSU, D);
337 ++NumUnfolds;
339 if (NewSU->NumSuccsLeft == 0) {
340 NewSU->isAvailable = true;
341 return NewSU;
343 SU = NewSU;
346 LLVM_DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
347 NewSU = Clone(SU);
349 // New SUnit has the exact same predecessors.
350 for (SDep &Pred : SU->Preds)
351 if (!Pred.isArtificial())
352 AddPred(NewSU, Pred);
354 // Only copy scheduled successors. Cut them from old node's successor
355 // list and move them over.
356 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
357 for (SDep &Succ : SU->Succs) {
358 if (Succ.isArtificial())
359 continue;
360 SUnit *SuccSU = Succ.getSUnit();
361 if (SuccSU->isScheduled) {
362 SDep D = Succ;
363 D.setSUnit(NewSU);
364 AddPred(SuccSU, D);
365 D.setSUnit(SU);
366 DelDeps.push_back(std::make_pair(SuccSU, D));
369 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
370 RemovePred(DelDeps[i].first, DelDeps[i].second);
372 ++NumDups;
373 return NewSU;
376 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
377 /// scheduled successors of the given SUnit to the last copy.
378 void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
379 const TargetRegisterClass *DestRC,
380 const TargetRegisterClass *SrcRC,
381 SmallVectorImpl<SUnit*> &Copies) {
382 SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
383 CopyFromSU->CopySrcRC = SrcRC;
384 CopyFromSU->CopyDstRC = DestRC;
386 SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
387 CopyToSU->CopySrcRC = DestRC;
388 CopyToSU->CopyDstRC = SrcRC;
390 // Only copy scheduled successors. Cut them from old node's successor
391 // list and move them over.
392 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
393 for (SDep &Succ : SU->Succs) {
394 if (Succ.isArtificial())
395 continue;
396 SUnit *SuccSU = Succ.getSUnit();
397 if (SuccSU->isScheduled) {
398 SDep D = Succ;
399 D.setSUnit(CopyToSU);
400 AddPred(SuccSU, D);
401 DelDeps.push_back(std::make_pair(SuccSU, Succ));
404 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
405 RemovePred(DelDeps[i].first, DelDeps[i].second);
407 SDep FromDep(SU, SDep::Data, Reg);
408 FromDep.setLatency(SU->Latency);
409 AddPred(CopyFromSU, FromDep);
410 SDep ToDep(CopyFromSU, SDep::Data, 0);
411 ToDep.setLatency(CopyFromSU->Latency);
412 AddPred(CopyToSU, ToDep);
414 Copies.push_back(CopyFromSU);
415 Copies.push_back(CopyToSU);
417 ++NumPRCopies;
420 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
421 /// definition of the specified node.
422 /// FIXME: Move to SelectionDAG?
423 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
424 const TargetInstrInfo *TII) {
425 unsigned NumRes;
426 if (N->getOpcode() == ISD::CopyFromReg) {
427 // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
428 NumRes = 1;
429 } else {
430 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
431 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
432 NumRes = MCID.getNumDefs();
433 for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
434 if (Reg == *ImpDef)
435 break;
436 ++NumRes;
439 return N->getSimpleValueType(NumRes);
442 /// CheckForLiveRegDef - Return true and update live register vector if the
443 /// specified register def of the specified SUnit clobbers any "live" registers.
444 static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
445 std::vector<SUnit*> &LiveRegDefs,
446 SmallSet<unsigned, 4> &RegAdded,
447 SmallVectorImpl<unsigned> &LRegs,
448 const TargetRegisterInfo *TRI) {
449 bool Added = false;
450 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
451 if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
452 if (RegAdded.insert(*AI).second) {
453 LRegs.push_back(*AI);
454 Added = true;
458 return Added;
461 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
462 /// scheduling of the given node to satisfy live physical register dependencies.
463 /// If the specific node is the last one that's available to schedule, do
464 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
465 bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
466 SmallVectorImpl<unsigned> &LRegs){
467 if (NumLiveRegs == 0)
468 return false;
470 SmallSet<unsigned, 4> RegAdded;
471 // If this node would clobber any "live" register, then it's not ready.
472 for (SDep &Pred : SU->Preds) {
473 if (Pred.isAssignedRegDep()) {
474 CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
475 RegAdded, LRegs, TRI);
479 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
480 if (Node->getOpcode() == ISD::INLINEASM ||
481 Node->getOpcode() == ISD::INLINEASM_BR) {
482 // Inline asm can clobber physical defs.
483 unsigned NumOps = Node->getNumOperands();
484 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
485 --NumOps; // Ignore the glue operand.
487 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
488 unsigned Flags =
489 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
490 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
492 ++i; // Skip the ID value.
493 if (InlineAsm::isRegDefKind(Flags) ||
494 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
495 InlineAsm::isClobberKind(Flags)) {
496 // Check for def of register or earlyclobber register.
497 for (; NumVals; --NumVals, ++i) {
498 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
499 if (Register::isPhysicalRegister(Reg))
500 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
502 } else
503 i += NumVals;
505 continue;
507 if (!Node->isMachineOpcode())
508 continue;
509 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
510 if (!MCID.ImplicitDefs)
511 continue;
512 for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
513 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
516 return !LRegs.empty();
520 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
521 /// schedulers.
522 void ScheduleDAGFast::ListScheduleBottomUp() {
523 unsigned CurCycle = 0;
525 // Release any predecessors of the special Exit node.
526 ReleasePredecessors(&ExitSU, CurCycle);
528 // Add root to Available queue.
529 if (!SUnits.empty()) {
530 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
531 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
532 RootSU->isAvailable = true;
533 AvailableQueue.push(RootSU);
536 // While Available queue is not empty, grab the node with the highest
537 // priority. If it is not ready put it back. Schedule the node.
538 SmallVector<SUnit*, 4> NotReady;
539 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
540 Sequence.reserve(SUnits.size());
541 while (!AvailableQueue.empty()) {
542 bool Delayed = false;
543 LRegsMap.clear();
544 SUnit *CurSU = AvailableQueue.pop();
545 while (CurSU) {
546 SmallVector<unsigned, 4> LRegs;
547 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
548 break;
549 Delayed = true;
550 LRegsMap.insert(std::make_pair(CurSU, LRegs));
552 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
553 NotReady.push_back(CurSU);
554 CurSU = AvailableQueue.pop();
557 // All candidates are delayed due to live physical reg dependencies.
558 // Try code duplication or inserting cross class copies
559 // to resolve it.
560 if (Delayed && !CurSU) {
561 if (!CurSU) {
562 // Try duplicating the nodes that produces these
563 // "expensive to copy" values to break the dependency. In case even
564 // that doesn't work, insert cross class copies.
565 SUnit *TrySU = NotReady[0];
566 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
567 assert(LRegs.size() == 1 && "Can't handle this yet!");
568 unsigned Reg = LRegs[0];
569 SUnit *LRDef = LiveRegDefs[Reg];
570 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
571 const TargetRegisterClass *RC =
572 TRI->getMinimalPhysRegClass(Reg, VT);
573 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
575 // If cross copy register class is the same as RC, then it must be
576 // possible copy the value directly. Do not try duplicate the def.
577 // If cross copy register class is not the same as RC, then it's
578 // possible to copy the value but it require cross register class copies
579 // and it is expensive.
580 // If cross copy register class is null, then it's not possible to copy
581 // the value at all.
582 SUnit *NewDef = nullptr;
583 if (DestRC != RC) {
584 NewDef = CopyAndMoveSuccessors(LRDef);
585 if (!DestRC && !NewDef)
586 report_fatal_error("Can't handle live physical "
587 "register dependency!");
589 if (!NewDef) {
590 // Issue copies, these can be expensive cross register class copies.
591 SmallVector<SUnit*, 2> Copies;
592 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
593 LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
594 << " to SU #" << Copies.front()->NodeNum << "\n");
595 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
596 NewDef = Copies.back();
599 LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
600 << " to SU #" << TrySU->NodeNum << "\n");
601 LiveRegDefs[Reg] = NewDef;
602 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
603 TrySU->isAvailable = false;
604 CurSU = NewDef;
607 if (!CurSU) {
608 llvm_unreachable("Unable to resolve live physical register dependencies!");
612 // Add the nodes that aren't ready back onto the available list.
613 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
614 NotReady[i]->isPending = false;
615 // May no longer be available due to backtracking.
616 if (NotReady[i]->isAvailable)
617 AvailableQueue.push(NotReady[i]);
619 NotReady.clear();
621 if (CurSU)
622 ScheduleNodeBottomUp(CurSU, CurCycle);
623 ++CurCycle;
626 // Reverse the order since it is bottom up.
627 std::reverse(Sequence.begin(), Sequence.end());
629 #ifndef NDEBUG
630 VerifyScheduledSequence(/*isBottomUp=*/true);
631 #endif
635 namespace {
636 //===----------------------------------------------------------------------===//
637 // ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
638 // DAG in topological order.
639 // IMPORTANT: this may not work for targets with phyreg dependency.
641 class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
642 public:
643 ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
645 void Schedule() override;
647 MachineBasicBlock *
648 EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
650 private:
651 std::vector<SDNode*> Sequence;
652 DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
654 void ScheduleNode(SDNode *N);
656 } // end anonymous namespace
658 void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
659 if (N->getNodeId() != 0)
660 llvm_unreachable(nullptr);
662 if (!N->isMachineOpcode() &&
663 (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
664 // These nodes do not need to be translated into MIs.
665 return;
667 LLVM_DEBUG(dbgs() << "\n*** Scheduling: ");
668 LLVM_DEBUG(N->dump(DAG));
669 Sequence.push_back(N);
671 unsigned NumOps = N->getNumOperands();
672 if (unsigned NumLeft = NumOps) {
673 SDNode *GluedOpN = nullptr;
674 do {
675 const SDValue &Op = N->getOperand(NumLeft-1);
676 SDNode *OpN = Op.getNode();
678 if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
679 // Schedule glue operand right above N.
680 GluedOpN = OpN;
681 assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
682 OpN->setNodeId(0);
683 ScheduleNode(OpN);
684 continue;
687 if (OpN == GluedOpN)
688 // Glue operand is already scheduled.
689 continue;
691 DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
692 if (DI != GluedMap.end() && DI->second != N)
693 // Users of glues are counted against the glued users.
694 OpN = DI->second;
696 unsigned Degree = OpN->getNodeId();
697 assert(Degree > 0 && "Predecessor over-released!");
698 OpN->setNodeId(--Degree);
699 if (Degree == 0)
700 ScheduleNode(OpN);
701 } while (--NumLeft);
705 /// findGluedUser - Find the representative use of a glue value by walking
706 /// the use chain.
707 static SDNode *findGluedUser(SDNode *N) {
708 while (SDNode *Glued = N->getGluedUser())
709 N = Glued;
710 return N;
713 void ScheduleDAGLinearize::Schedule() {
714 LLVM_DEBUG(dbgs() << "********** DAG Linearization **********\n");
716 SmallVector<SDNode*, 8> Glues;
717 unsigned DAGSize = 0;
718 for (SDNode &Node : DAG->allnodes()) {
719 SDNode *N = &Node;
721 // Use node id to record degree.
722 unsigned Degree = N->use_size();
723 N->setNodeId(Degree);
724 unsigned NumVals = N->getNumValues();
725 if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
726 N->hasAnyUseOfValue(NumVals-1)) {
727 SDNode *User = findGluedUser(N);
728 if (User) {
729 Glues.push_back(N);
730 GluedMap.insert(std::make_pair(N, User));
734 if (N->isMachineOpcode() ||
735 (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
736 ++DAGSize;
739 for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
740 SDNode *Glue = Glues[i];
741 SDNode *GUser = GluedMap[Glue];
742 unsigned Degree = Glue->getNodeId();
743 unsigned UDegree = GUser->getNodeId();
745 // Glue user must be scheduled together with the glue operand. So other
746 // users of the glue operand must be treated as its users.
747 SDNode *ImmGUser = Glue->getGluedUser();
748 for (const SDNode *U : Glue->uses())
749 if (U == ImmGUser)
750 --Degree;
751 GUser->setNodeId(UDegree + Degree);
752 Glue->setNodeId(1);
755 Sequence.reserve(DAGSize);
756 ScheduleNode(DAG->getRoot().getNode());
759 MachineBasicBlock*
760 ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
761 InstrEmitter Emitter(DAG->getTarget(), BB, InsertPos);
762 DenseMap<SDValue, Register> VRBaseMap;
764 LLVM_DEBUG({ dbgs() << "\n*** Final schedule ***\n"; });
766 unsigned NumNodes = Sequence.size();
767 MachineBasicBlock *BB = Emitter.getBlock();
768 for (unsigned i = 0; i != NumNodes; ++i) {
769 SDNode *N = Sequence[NumNodes-i-1];
770 LLVM_DEBUG(N->dump(DAG));
771 Emitter.EmitNode(N, false, false, VRBaseMap);
773 // Emit any debug values associated with the node.
774 if (N->getHasDebugValue()) {
775 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
776 for (auto DV : DAG->GetDbgValues(N)) {
777 if (!DV->isEmitted())
778 if (auto *DbgMI = Emitter.EmitDbgValue(DV, VRBaseMap))
779 BB->insert(InsertPos, DbgMI);
784 LLVM_DEBUG(dbgs() << '\n');
786 InsertPos = Emitter.getInsertPos();
787 return Emitter.getBlock();
790 //===----------------------------------------------------------------------===//
791 // Public Constructor Functions
792 //===----------------------------------------------------------------------===//
794 llvm::ScheduleDAGSDNodes *
795 llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
796 return new ScheduleDAGFast(*IS->MF);
799 llvm::ScheduleDAGSDNodes *
800 llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
801 return new ScheduleDAGLinearize(*IS->MF);