1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "AntiDepBreaker.h"
23 #include "AggressiveAntiDepBreaker.h"
24 #include "CriticalAntiDepBreaker.h"
25 #include "ScheduleDAGInstrs.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/LatencyPriorityQueue.h"
28 #include "llvm/CodeGen/SchedulerRegistry.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunctionPass.h"
32 #include "llvm/CodeGen/MachineLoopInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Target/TargetLowering.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Target/TargetInstrInfo.h"
39 #include "llvm/Target/TargetRegisterInfo.h"
40 #include "llvm/Target/TargetSubtarget.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/ADT/BitVector.h"
46 #include "llvm/ADT/Statistic.h"
50 STATISTIC(NumNoops
, "Number of noops inserted");
51 STATISTIC(NumStalls
, "Number of pipeline stalls");
52 STATISTIC(NumFixedAnti
, "Number of fixed anti-dependencies");
54 // Post-RA scheduling is enabled with
55 // TargetSubtarget.enablePostRAScheduler(). This flag can be used to
56 // override the target.
58 EnablePostRAScheduler("post-RA-scheduler",
59 cl::desc("Enable scheduling after register allocation"),
60 cl::init(false), cl::Hidden
);
61 static cl::opt
<std::string
>
62 EnableAntiDepBreaking("break-anti-dependencies",
63 cl::desc("Break post-RA scheduling anti-dependencies: "
64 "\"critical\", \"all\", or \"none\""),
65 cl::init("none"), cl::Hidden
);
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69 DebugDiv("postra-sched-debugdiv",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden
);
73 DebugMod("postra-sched-debugmod",
74 cl::desc("Debug control MBBs that are scheduled"),
75 cl::init(0), cl::Hidden
);
77 AntiDepBreaker::~AntiDepBreaker() { }
80 class PostRAScheduler
: public MachineFunctionPass
{
82 const TargetInstrInfo
*TII
;
83 CodeGenOpt::Level OptLevel
;
87 PostRAScheduler(CodeGenOpt::Level ol
) :
88 MachineFunctionPass(ID
), OptLevel(ol
) {}
90 void getAnalysisUsage(AnalysisUsage
&AU
) const {
92 AU
.addRequired
<AliasAnalysis
>();
93 AU
.addRequired
<MachineDominatorTree
>();
94 AU
.addPreserved
<MachineDominatorTree
>();
95 AU
.addRequired
<MachineLoopInfo
>();
96 AU
.addPreserved
<MachineLoopInfo
>();
97 MachineFunctionPass::getAnalysisUsage(AU
);
100 const char *getPassName() const {
101 return "Post RA top-down list latency scheduler";
104 bool runOnMachineFunction(MachineFunction
&Fn
);
106 char PostRAScheduler::ID
= 0;
108 class SchedulePostRATDList
: public ScheduleDAGInstrs
{
109 /// AvailableQueue - The priority queue to use for the available SUnits.
111 LatencyPriorityQueue AvailableQueue
;
113 /// PendingQueue - This contains all of the instructions whose operands have
114 /// been issued, but their results are not ready yet (due to the latency of
115 /// the operation). Once the operands becomes available, the instruction is
116 /// added to the AvailableQueue.
117 std::vector
<SUnit
*> PendingQueue
;
119 /// Topo - A topological ordering for SUnits.
120 ScheduleDAGTopologicalSort Topo
;
122 /// HazardRec - The hazard recognizer to use.
123 ScheduleHazardRecognizer
*HazardRec
;
125 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
126 AntiDepBreaker
*AntiDepBreak
;
128 /// AA - AliasAnalysis for making memory reference queries.
131 /// KillIndices - The index of the most recent kill (proceding bottom-up),
132 /// or ~0u if the register is not live.
133 std::vector
<unsigned> KillIndices
;
136 SchedulePostRATDList(
137 MachineFunction
&MF
, MachineLoopInfo
&MLI
, MachineDominatorTree
&MDT
,
138 AliasAnalysis
*AA
, TargetSubtarget::AntiDepBreakMode AntiDepMode
,
139 SmallVectorImpl
<TargetRegisterClass
*> &CriticalPathRCs
);
141 ~SchedulePostRATDList();
143 /// StartBlock - Initialize register live-range state for scheduling in
146 void StartBlock(MachineBasicBlock
*BB
);
148 /// Schedule - Schedule the instruction range using list scheduling.
152 /// Observe - Update liveness information to account for the current
153 /// instruction, which will not be scheduled.
155 void Observe(MachineInstr
*MI
, unsigned Count
);
157 /// FinishBlock - Clean up register live-range state.
161 /// FixupKills - Fix register kill flags that have been made
162 /// invalid due to scheduling
164 void FixupKills(MachineBasicBlock
*MBB
);
167 void ReleaseSucc(SUnit
*SU
, SDep
*SuccEdge
);
168 void ReleaseSuccessors(SUnit
*SU
);
169 void ScheduleNodeTopDown(SUnit
*SU
, unsigned CurCycle
);
170 void ListScheduleTopDown();
171 void StartBlockForKills(MachineBasicBlock
*BB
);
173 // ToggleKillFlag - Toggle a register operand kill flag. Other
174 // adjustments may be made to the instruction if necessary. Return
175 // true if the operand has been deleted, false if not.
176 bool ToggleKillFlag(MachineInstr
*MI
, MachineOperand
&MO
);
180 SchedulePostRATDList::SchedulePostRATDList(
181 MachineFunction
&MF
, MachineLoopInfo
&MLI
, MachineDominatorTree
&MDT
,
182 AliasAnalysis
*AA
, TargetSubtarget::AntiDepBreakMode AntiDepMode
,
183 SmallVectorImpl
<TargetRegisterClass
*> &CriticalPathRCs
)
184 : ScheduleDAGInstrs(MF
, MLI
, MDT
), Topo(SUnits
), AA(AA
),
185 KillIndices(TRI
->getNumRegs())
187 const TargetMachine
&TM
= MF
.getTarget();
188 const InstrItineraryData
*InstrItins
= TM
.getInstrItineraryData();
190 TM
.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins
, this);
192 ((AntiDepMode
== TargetSubtarget::ANTIDEP_ALL
) ?
193 (AntiDepBreaker
*)new AggressiveAntiDepBreaker(MF
, CriticalPathRCs
) :
194 ((AntiDepMode
== TargetSubtarget::ANTIDEP_CRITICAL
) ?
195 (AntiDepBreaker
*)new CriticalAntiDepBreaker(MF
) : NULL
));
198 SchedulePostRATDList::~SchedulePostRATDList() {
203 bool PostRAScheduler::runOnMachineFunction(MachineFunction
&Fn
) {
204 TII
= Fn
.getTarget().getInstrInfo();
205 MachineLoopInfo
&MLI
= getAnalysis
<MachineLoopInfo
>();
206 MachineDominatorTree
&MDT
= getAnalysis
<MachineDominatorTree
>();
207 AliasAnalysis
*AA
= &getAnalysis
<AliasAnalysis
>();
209 // Check for explicit enable/disable of post-ra scheduling.
210 TargetSubtarget::AntiDepBreakMode AntiDepMode
= TargetSubtarget::ANTIDEP_NONE
;
211 SmallVector
<TargetRegisterClass
*, 4> CriticalPathRCs
;
212 if (EnablePostRAScheduler
.getPosition() > 0) {
213 if (!EnablePostRAScheduler
)
216 // Check that post-RA scheduling is enabled for this target.
217 // This may upgrade the AntiDepMode.
218 const TargetSubtarget
&ST
= Fn
.getTarget().getSubtarget
<TargetSubtarget
>();
219 if (!ST
.enablePostRAScheduler(OptLevel
, AntiDepMode
, CriticalPathRCs
))
223 // Check for antidep breaking override...
224 if (EnableAntiDepBreaking
.getPosition() > 0) {
225 AntiDepMode
= (EnableAntiDepBreaking
== "all") ?
226 TargetSubtarget::ANTIDEP_ALL
:
227 (EnableAntiDepBreaking
== "critical")
228 ? TargetSubtarget::ANTIDEP_CRITICAL
: TargetSubtarget::ANTIDEP_NONE
;
231 DEBUG(dbgs() << "PostRAScheduler\n");
233 SchedulePostRATDList
Scheduler(Fn
, MLI
, MDT
, AA
, AntiDepMode
,
236 // Loop over all of the basic blocks
237 for (MachineFunction::iterator MBB
= Fn
.begin(), MBBe
= Fn
.end();
238 MBB
!= MBBe
; ++MBB
) {
240 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
242 static int bbcnt
= 0;
243 if (bbcnt
++ % DebugDiv
!= DebugMod
)
245 dbgs() << "*** DEBUG scheduling " << Fn
.getFunction()->getNameStr() <<
246 ":BB#" << MBB
->getNumber() << " ***\n";
250 // Initialize register live-range state for scheduling in this block.
251 Scheduler
.StartBlock(MBB
);
253 // Schedule each sequence of instructions not interrupted by a label
254 // or anything else that effectively needs to shut down scheduling.
255 MachineBasicBlock::iterator Current
= MBB
->end();
256 unsigned Count
= MBB
->size(), CurrentCount
= Count
;
257 for (MachineBasicBlock::iterator I
= Current
; I
!= MBB
->begin(); ) {
258 MachineInstr
*MI
= llvm::prior(I
);
259 if (TII
->isSchedulingBoundary(MI
, MBB
, Fn
)) {
260 Scheduler
.Run(MBB
, I
, Current
, CurrentCount
);
261 Scheduler
.EmitSchedule();
263 CurrentCount
= Count
- 1;
264 Scheduler
.Observe(MI
, CurrentCount
);
269 assert(Count
== 0 && "Instruction count mismatch!");
270 assert((MBB
->begin() == Current
|| CurrentCount
!= 0) &&
271 "Instruction count mismatch!");
272 Scheduler
.Run(MBB
, MBB
->begin(), Current
, CurrentCount
);
273 Scheduler
.EmitSchedule();
275 // Clean up register live-range state.
276 Scheduler
.FinishBlock();
278 // Update register kills
279 Scheduler
.FixupKills(MBB
);
285 /// StartBlock - Initialize register live-range state for scheduling in
288 void SchedulePostRATDList::StartBlock(MachineBasicBlock
*BB
) {
289 // Call the superclass.
290 ScheduleDAGInstrs::StartBlock(BB
);
292 // Reset the hazard recognizer and anti-dep breaker.
294 if (AntiDepBreak
!= NULL
)
295 AntiDepBreak
->StartBlock(BB
);
298 /// Schedule - Schedule the instruction range using list scheduling.
300 void SchedulePostRATDList::Schedule() {
301 // Build the scheduling graph.
304 if (AntiDepBreak
!= NULL
) {
306 AntiDepBreak
->BreakAntiDependencies(SUnits
, Begin
, InsertPos
,
310 // We made changes. Update the dependency graph.
311 // Theoretically we could update the graph in place:
312 // When a live range is changed to use a different register, remove
313 // the def's anti-dependence *and* output-dependence edges due to
314 // that register, and add new anti-dependence and output-dependence
315 // edges based on the next live range of the register.
322 NumFixedAnti
+= Broken
;
326 DEBUG(dbgs() << "********** List Scheduling **********\n");
327 DEBUG(for (unsigned su
= 0, e
= SUnits
.size(); su
!= e
; ++su
)
328 SUnits
[su
].dumpAll(this));
330 AvailableQueue
.initNodes(SUnits
);
331 ListScheduleTopDown();
332 AvailableQueue
.releaseState();
335 /// Observe - Update liveness information to account for the current
336 /// instruction, which will not be scheduled.
338 void SchedulePostRATDList::Observe(MachineInstr
*MI
, unsigned Count
) {
339 if (AntiDepBreak
!= NULL
)
340 AntiDepBreak
->Observe(MI
, Count
, InsertPosIndex
);
343 /// FinishBlock - Clean up register live-range state.
345 void SchedulePostRATDList::FinishBlock() {
346 if (AntiDepBreak
!= NULL
)
347 AntiDepBreak
->FinishBlock();
349 // Call the superclass.
350 ScheduleDAGInstrs::FinishBlock();
353 /// StartBlockForKills - Initialize register live-range state for updating kills
355 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock
*BB
) {
356 // Initialize the indices to indicate that no registers are live.
357 for (unsigned i
= 0; i
< TRI
->getNumRegs(); ++i
)
358 KillIndices
[i
] = ~0u;
360 // Determine the live-out physregs for this block.
361 if (!BB
->empty() && BB
->back().getDesc().isReturn()) {
362 // In a return block, examine the function live-out regs.
363 for (MachineRegisterInfo::liveout_iterator I
= MRI
.liveout_begin(),
364 E
= MRI
.liveout_end(); I
!= E
; ++I
) {
366 KillIndices
[Reg
] = BB
->size();
367 // Repeat, for all subregs.
368 for (const unsigned *Subreg
= TRI
->getSubRegisters(Reg
);
370 KillIndices
[*Subreg
] = BB
->size();
375 // In a non-return block, examine the live-in regs of all successors.
376 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
377 SE
= BB
->succ_end(); SI
!= SE
; ++SI
) {
378 for (MachineBasicBlock::livein_iterator I
= (*SI
)->livein_begin(),
379 E
= (*SI
)->livein_end(); I
!= E
; ++I
) {
381 KillIndices
[Reg
] = BB
->size();
382 // Repeat, for all subregs.
383 for (const unsigned *Subreg
= TRI
->getSubRegisters(Reg
);
385 KillIndices
[*Subreg
] = BB
->size();
392 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr
*MI
,
393 MachineOperand
&MO
) {
394 // Setting kill flag...
400 // If MO itself is live, clear the kill flag...
401 if (KillIndices
[MO
.getReg()] != ~0u) {
406 // If any subreg of MO is live, then create an imp-def for that
407 // subreg and keep MO marked as killed.
410 const unsigned SuperReg
= MO
.getReg();
411 for (const unsigned *Subreg
= TRI
->getSubRegisters(SuperReg
);
413 if (KillIndices
[*Subreg
] != ~0u) {
414 MI
->addOperand(MachineOperand::CreateReg(*Subreg
,
428 /// FixupKills - Fix the register kill flags, they may have been made
429 /// incorrect by instruction reordering.
431 void SchedulePostRATDList::FixupKills(MachineBasicBlock
*MBB
) {
432 DEBUG(dbgs() << "Fixup kills for BB#" << MBB
->getNumber() << '\n');
434 std::set
<unsigned> killedRegs
;
435 BitVector ReservedRegs
= TRI
->getReservedRegs(MF
);
437 StartBlockForKills(MBB
);
439 // Examine block from end to start...
440 unsigned Count
= MBB
->size();
441 for (MachineBasicBlock::iterator I
= MBB
->end(), E
= MBB
->begin();
443 MachineInstr
*MI
= --I
;
444 if (MI
->isDebugValue())
447 // Update liveness. Registers that are defed but not used in this
448 // instruction are now dead. Mark register and all subregs as they
449 // are completely defined.
450 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
451 MachineOperand
&MO
= MI
->getOperand(i
);
452 if (!MO
.isReg()) continue;
453 unsigned Reg
= MO
.getReg();
454 if (Reg
== 0) continue;
455 if (!MO
.isDef()) continue;
456 // Ignore two-addr defs.
457 if (MI
->isRegTiedToUseOperand(i
)) continue;
459 KillIndices
[Reg
] = ~0u;
461 // Repeat for all subregs.
462 for (const unsigned *Subreg
= TRI
->getSubRegisters(Reg
);
464 KillIndices
[*Subreg
] = ~0u;
468 // Examine all used registers and set/clear kill flag. When a
469 // register is used multiple times we only set the kill flag on
472 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
473 MachineOperand
&MO
= MI
->getOperand(i
);
474 if (!MO
.isReg() || !MO
.isUse()) continue;
475 unsigned Reg
= MO
.getReg();
476 if ((Reg
== 0) || ReservedRegs
.test(Reg
)) continue;
479 if (killedRegs
.find(Reg
) == killedRegs
.end()) {
481 // A register is not killed if any subregs are live...
482 for (const unsigned *Subreg
= TRI
->getSubRegisters(Reg
);
484 if (KillIndices
[*Subreg
] != ~0u) {
490 // If subreg is not live, then register is killed if it became
491 // live in this instruction
493 kill
= (KillIndices
[Reg
] == ~0u);
496 if (MO
.isKill() != kill
) {
497 DEBUG(dbgs() << "Fixing " << MO
<< " in ");
498 // Warning: ToggleKillFlag may invalidate MO.
499 ToggleKillFlag(MI
, MO
);
503 killedRegs
.insert(Reg
);
506 // Mark any used register (that is not using undef) and subregs as
508 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
509 MachineOperand
&MO
= MI
->getOperand(i
);
510 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef()) continue;
511 unsigned Reg
= MO
.getReg();
512 if ((Reg
== 0) || ReservedRegs
.test(Reg
)) continue;
514 KillIndices
[Reg
] = Count
;
516 for (const unsigned *Subreg
= TRI
->getSubRegisters(Reg
);
518 KillIndices
[*Subreg
] = Count
;
524 //===----------------------------------------------------------------------===//
525 // Top-Down Scheduling
526 //===----------------------------------------------------------------------===//
528 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
529 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
530 void SchedulePostRATDList::ReleaseSucc(SUnit
*SU
, SDep
*SuccEdge
) {
531 SUnit
*SuccSU
= SuccEdge
->getSUnit();
534 if (SuccSU
->NumPredsLeft
== 0) {
535 dbgs() << "*** Scheduling failed! ***\n";
537 dbgs() << " has been released too many times!\n";
541 --SuccSU
->NumPredsLeft
;
543 // Compute how many cycles it will be before this actually becomes
544 // available. This is the max of the start time of all predecessors plus
546 SuccSU
->setDepthToAtLeast(SU
->getDepth() + SuccEdge
->getLatency());
548 // If all the node's predecessors are scheduled, this node is ready
549 // to be scheduled. Ignore the special ExitSU node.
550 if (SuccSU
->NumPredsLeft
== 0 && SuccSU
!= &ExitSU
)
551 PendingQueue
.push_back(SuccSU
);
554 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
555 void SchedulePostRATDList::ReleaseSuccessors(SUnit
*SU
) {
556 for (SUnit::succ_iterator I
= SU
->Succs
.begin(), E
= SU
->Succs
.end();
558 ReleaseSucc(SU
, &*I
);
562 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
563 /// count of its successors. If a successor pending count is zero, add it to
564 /// the Available queue.
565 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit
*SU
, unsigned CurCycle
) {
566 DEBUG(dbgs() << "*** Scheduling [" << CurCycle
<< "]: ");
567 DEBUG(SU
->dump(this));
569 Sequence
.push_back(SU
);
570 assert(CurCycle
>= SU
->getDepth() &&
571 "Node scheduled above its depth!");
572 SU
->setDepthToAtLeast(CurCycle
);
574 ReleaseSuccessors(SU
);
575 SU
->isScheduled
= true;
576 AvailableQueue
.ScheduledNode(SU
);
579 /// ListScheduleTopDown - The main loop of list scheduling for top-down
581 void SchedulePostRATDList::ListScheduleTopDown() {
582 unsigned CurCycle
= 0;
584 // We're scheduling top-down but we're visiting the regions in
585 // bottom-up order, so we don't know the hazards at the start of a
586 // region. So assume no hazards (this should usually be ok as most
587 // blocks are a single region).
590 // Release any successors of the special Entry node.
591 ReleaseSuccessors(&EntrySU
);
593 // Add all leaves to Available queue.
594 for (unsigned i
= 0, e
= SUnits
.size(); i
!= e
; ++i
) {
595 // It is available if it has no predecessors.
596 bool available
= SUnits
[i
].Preds
.empty();
598 AvailableQueue
.push(&SUnits
[i
]);
599 SUnits
[i
].isAvailable
= true;
603 // In any cycle where we can't schedule any instructions, we must
604 // stall or emit a noop, depending on the target.
605 bool CycleHasInsts
= false;
607 // While Available queue is not empty, grab the node with the highest
608 // priority. If it is not ready put it back. Schedule the node.
609 std::vector
<SUnit
*> NotReady
;
610 Sequence
.reserve(SUnits
.size());
611 while (!AvailableQueue
.empty() || !PendingQueue
.empty()) {
612 // Check to see if any of the pending instructions are ready to issue. If
613 // so, add them to the available queue.
614 unsigned MinDepth
= ~0u;
615 for (unsigned i
= 0, e
= PendingQueue
.size(); i
!= e
; ++i
) {
616 if (PendingQueue
[i
]->getDepth() <= CurCycle
) {
617 AvailableQueue
.push(PendingQueue
[i
]);
618 PendingQueue
[i
]->isAvailable
= true;
619 PendingQueue
[i
] = PendingQueue
.back();
620 PendingQueue
.pop_back();
622 } else if (PendingQueue
[i
]->getDepth() < MinDepth
)
623 MinDepth
= PendingQueue
[i
]->getDepth();
626 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue
.dump(this));
628 SUnit
*FoundSUnit
= 0;
629 bool HasNoopHazards
= false;
630 while (!AvailableQueue
.empty()) {
631 SUnit
*CurSUnit
= AvailableQueue
.pop();
633 ScheduleHazardRecognizer::HazardType HT
=
634 HazardRec
->getHazardType(CurSUnit
, 0/*no stalls*/);
635 if (HT
== ScheduleHazardRecognizer::NoHazard
) {
636 FoundSUnit
= CurSUnit
;
640 // Remember if this is a noop hazard.
641 HasNoopHazards
|= HT
== ScheduleHazardRecognizer::NoopHazard
;
643 NotReady
.push_back(CurSUnit
);
646 // Add the nodes that aren't ready back onto the available list.
647 if (!NotReady
.empty()) {
648 AvailableQueue
.push_all(NotReady
);
652 // If we found a node to schedule...
654 // ... schedule the node...
655 ScheduleNodeTopDown(FoundSUnit
, CurCycle
);
656 HazardRec
->EmitInstruction(FoundSUnit
);
657 CycleHasInsts
= true;
660 DEBUG(dbgs() << "*** Finished cycle " << CurCycle
<< '\n');
661 HazardRec
->AdvanceCycle();
662 } else if (!HasNoopHazards
) {
663 // Otherwise, we have a pipeline stall, but no other problem,
664 // just advance the current cycle and try again.
665 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle
<< '\n');
666 HazardRec
->AdvanceCycle();
669 // Otherwise, we have no instructions to issue and we have instructions
670 // that will fault if we don't do this right. This is the case for
671 // processors without pipeline interlocks and other cases.
672 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle
<< '\n');
673 HazardRec
->EmitNoop();
674 Sequence
.push_back(0); // NULL here means noop
679 CycleHasInsts
= false;
684 VerifySchedule(/*isBottomUp=*/false);
688 //===----------------------------------------------------------------------===//
689 // Public Constructor Functions
690 //===----------------------------------------------------------------------===//
692 FunctionPass
*llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel
) {
693 return new PostRAScheduler(OptLevel
);