1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ScheduleDAGInstrs.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/LatencyPriorityQueue.h"
25 #include "llvm/CodeGen/SchedulerRegistry.h"
26 #include "llvm/CodeGen/MachineDominators.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
31 #include "llvm/Target/TargetLowering.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetRegisterInfo.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/ADT/Statistic.h"
41 STATISTIC(NumNoops
, "Number of noops inserted");
42 STATISTIC(NumStalls
, "Number of pipeline stalls");
45 EnableAntiDepBreaking("break-anti-dependencies",
46 cl::desc("Break post-RA scheduling anti-dependencies"),
47 cl::init(true), cl::Hidden
);
50 EnablePostRAHazardAvoidance("avoid-hazards",
51 cl::desc("Enable simple hazard-avoidance"),
52 cl::init(true), cl::Hidden
);
55 class VISIBILITY_HIDDEN PostRAScheduler
: public MachineFunctionPass
{
58 PostRAScheduler() : MachineFunctionPass(&ID
) {}
60 void getAnalysisUsage(AnalysisUsage
&AU
) const {
61 AU
.addRequired
<MachineDominatorTree
>();
62 AU
.addPreserved
<MachineDominatorTree
>();
63 AU
.addRequired
<MachineLoopInfo
>();
64 AU
.addPreserved
<MachineLoopInfo
>();
65 MachineFunctionPass::getAnalysisUsage(AU
);
68 const char *getPassName() const {
69 return "Post RA top-down list latency scheduler";
72 bool runOnMachineFunction(MachineFunction
&Fn
);
74 char PostRAScheduler::ID
= 0;
76 class VISIBILITY_HIDDEN SchedulePostRATDList
: public ScheduleDAGInstrs
{
77 /// AvailableQueue - The priority queue to use for the available SUnits.
79 LatencyPriorityQueue AvailableQueue
;
81 /// PendingQueue - This contains all of the instructions whose operands have
82 /// been issued, but their results are not ready yet (due to the latency of
83 /// the operation). Once the operands becomes available, the instruction is
84 /// added to the AvailableQueue.
85 std::vector
<SUnit
*> PendingQueue
;
87 /// Topo - A topological ordering for SUnits.
88 ScheduleDAGTopologicalSort Topo
;
90 /// AllocatableSet - The set of allocatable registers.
91 /// We'll be ignoring anti-dependencies on non-allocatable registers,
92 /// because they may not be safe to break.
93 const BitVector AllocatableSet
;
95 /// HazardRec - The hazard recognizer to use.
96 ScheduleHazardRecognizer
*HazardRec
;
98 /// Classes - For live regs that are only used in one register class in a
99 /// live range, the register class. If the register is not live, the
100 /// corresponding value is null. If the register is live but used in
101 /// multiple register classes, the corresponding value is -1 casted to a
103 const TargetRegisterClass
*
104 Classes
[TargetRegisterInfo::FirstVirtualRegister
];
106 /// RegRegs - Map registers to all their references within a live range.
107 std::multimap
<unsigned, MachineOperand
*> RegRefs
;
109 /// The index of the most recent kill (proceding bottom-up), or ~0u if
110 /// the register is not live.
111 unsigned KillIndices
[TargetRegisterInfo::FirstVirtualRegister
];
113 /// The index of the most recent complete def (proceding bottom up), or ~0u
114 /// if the register is live.
115 unsigned DefIndices
[TargetRegisterInfo::FirstVirtualRegister
];
118 SchedulePostRATDList(MachineFunction
&MF
,
119 const MachineLoopInfo
&MLI
,
120 const MachineDominatorTree
&MDT
,
121 ScheduleHazardRecognizer
*HR
)
122 : ScheduleDAGInstrs(MF
, MLI
, MDT
), Topo(SUnits
),
123 AllocatableSet(TRI
->getAllocatableSet(MF
)),
126 ~SchedulePostRATDList() {
130 /// StartBlock - Initialize register live-range state for scheduling in
133 void StartBlock(MachineBasicBlock
*BB
);
135 /// Schedule - Schedule the instruction range using list scheduling.
139 /// Observe - Update liveness information to account for the current
140 /// instruction, which will not be scheduled.
142 void Observe(MachineInstr
*MI
, unsigned Count
);
144 /// FinishBlock - Clean up register live-range state.
149 void PrescanInstruction(MachineInstr
*MI
);
150 void ScanInstruction(MachineInstr
*MI
, unsigned Count
);
151 void ReleaseSucc(SUnit
*SU
, SDep
*SuccEdge
);
152 void ReleaseSuccessors(SUnit
*SU
);
153 void ScheduleNodeTopDown(SUnit
*SU
, unsigned CurCycle
);
154 void ListScheduleTopDown();
155 bool BreakAntiDependencies();
158 /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses
159 /// a coarse classification and attempts to avoid that instructions of
160 /// a given class aren't grouped too densely together.
161 class SimpleHazardRecognizer
: public ScheduleHazardRecognizer
{
162 /// Class - A simple classification for SUnits.
167 /// Window - The Class values of the most recently issued
171 /// getClass - Classify the given SUnit.
172 Class
getClass(const SUnit
*SU
) {
173 const MachineInstr
*MI
= SU
->getInstr();
174 const TargetInstrDesc
&TID
= MI
->getDesc();
182 /// Step - Rotate the existing entries in Window and insert the
183 /// given class value in position as the most recent.
185 std::copy(Window
+1, array_endof(Window
), Window
);
186 Window
[array_lengthof(Window
)-1] = C
;
190 SimpleHazardRecognizer() : Window() {}
192 virtual HazardType
getHazardType(SUnit
*SU
) {
193 Class C
= getClass(SU
);
197 for (unsigned i
= 0; i
!= array_lengthof(Window
); ++i
)
200 if (Score
> array_lengthof(Window
) * 2)
205 virtual void EmitInstruction(SUnit
*SU
) {
209 virtual void AdvanceCycle() {
215 /// isSchedulingBoundary - Test if the given instruction should be
216 /// considered a scheduling boundary. This primarily includes labels
219 static bool isSchedulingBoundary(const MachineInstr
*MI
,
220 const MachineFunction
&MF
) {
221 // Terminators and labels can't be scheduled around.
222 if (MI
->getDesc().isTerminator() || MI
->isLabel())
225 // Don't attempt to schedule around any instruction that modifies
226 // a stack-oriented pointer, as it's unlikely to be profitable. This
227 // saves compile time, because it doesn't require every single
228 // stack slot reference to depend on the instruction that does the
230 const TargetLowering
&TLI
= *MF
.getTarget().getTargetLowering();
231 if (MI
->modifiesRegister(TLI
.getStackPointerRegisterToSaveRestore()))
237 bool PostRAScheduler::runOnMachineFunction(MachineFunction
&Fn
) {
238 DOUT
<< "PostRAScheduler\n";
240 const MachineLoopInfo
&MLI
= getAnalysis
<MachineLoopInfo
>();
241 const MachineDominatorTree
&MDT
= getAnalysis
<MachineDominatorTree
>();
242 ScheduleHazardRecognizer
*HR
= EnablePostRAHazardAvoidance
?
243 new SimpleHazardRecognizer
:
244 new ScheduleHazardRecognizer();
246 SchedulePostRATDList
Scheduler(Fn
, MLI
, MDT
, HR
);
248 // Loop over all of the basic blocks
249 for (MachineFunction::iterator MBB
= Fn
.begin(), MBBe
= Fn
.end();
250 MBB
!= MBBe
; ++MBB
) {
251 // Initialize register live-range state for scheduling in this block.
252 Scheduler
.StartBlock(MBB
);
254 // Schedule each sequence of instructions not interrupted by a label
255 // or anything else that effectively needs to shut down scheduling.
256 MachineBasicBlock::iterator Current
= MBB
->end();
257 unsigned Count
= MBB
->size(), CurrentCount
= Count
;
258 for (MachineBasicBlock::iterator I
= Current
; I
!= MBB
->begin(); ) {
259 MachineInstr
*MI
= prior(I
);
260 if (isSchedulingBoundary(MI
, Fn
)) {
261 Scheduler
.Run(MBB
, I
, Current
, CurrentCount
);
262 Scheduler
.EmitSchedule();
264 CurrentCount
= Count
- 1;
265 Scheduler
.Observe(MI
, CurrentCount
);
270 assert(Count
== 0 && "Instruction count mismatch!");
271 assert((MBB
->begin() == Current
|| CurrentCount
!= 0) &&
272 "Instruction count mismatch!");
273 Scheduler
.Run(MBB
, MBB
->begin(), Current
, CurrentCount
);
274 Scheduler
.EmitSchedule();
276 // Clean up register live-range state.
277 Scheduler
.FinishBlock();
283 /// StartBlock - Initialize register live-range state for scheduling in
286 void SchedulePostRATDList::StartBlock(MachineBasicBlock
*BB
) {
287 // Call the superclass.
288 ScheduleDAGInstrs::StartBlock(BB
);
290 // Clear out the register class data.
291 std::fill(Classes
, array_endof(Classes
),
292 static_cast<const TargetRegisterClass
*>(0));
294 // Initialize the indices to indicate that no registers are live.
295 std::fill(KillIndices
, array_endof(KillIndices
), ~0u);
296 std::fill(DefIndices
, array_endof(DefIndices
), BB
->size());
298 // Determine the live-out physregs for this block.
299 if (!BB
->empty() && BB
->back().getDesc().isReturn())
300 // In a return block, examine the function live-out regs.
301 for (MachineRegisterInfo::liveout_iterator I
= MRI
.liveout_begin(),
302 E
= MRI
.liveout_end(); I
!= E
; ++I
) {
304 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
305 KillIndices
[Reg
] = BB
->size();
306 DefIndices
[Reg
] = ~0u;
307 // Repeat, for all aliases.
308 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
309 unsigned AliasReg
= *Alias
;
310 Classes
[AliasReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
311 KillIndices
[AliasReg
] = BB
->size();
312 DefIndices
[AliasReg
] = ~0u;
316 // In a non-return block, examine the live-in regs of all successors.
317 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
318 SE
= BB
->succ_end(); SI
!= SE
; ++SI
)
319 for (MachineBasicBlock::livein_iterator I
= (*SI
)->livein_begin(),
320 E
= (*SI
)->livein_end(); I
!= E
; ++I
) {
322 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
323 KillIndices
[Reg
] = BB
->size();
324 DefIndices
[Reg
] = ~0u;
325 // Repeat, for all aliases.
326 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
327 unsigned AliasReg
= *Alias
;
328 Classes
[AliasReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
329 KillIndices
[AliasReg
] = BB
->size();
330 DefIndices
[AliasReg
] = ~0u;
334 // Consider callee-saved registers as live-out, since we're running after
335 // prologue/epilogue insertion so there's no way to add additional
338 // TODO: If the callee saves and restores these, then we can potentially
339 // use them between the save and the restore. To do that, we could scan
340 // the exit blocks to see which of these registers are defined.
341 // Alternatively, callee-saved registers that aren't saved and restored
342 // could be marked live-in in every block.
343 for (const unsigned *I
= TRI
->getCalleeSavedRegs(); *I
; ++I
) {
345 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
346 KillIndices
[Reg
] = BB
->size();
347 DefIndices
[Reg
] = ~0u;
348 // Repeat, for all aliases.
349 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
350 unsigned AliasReg
= *Alias
;
351 Classes
[AliasReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
352 KillIndices
[AliasReg
] = BB
->size();
353 DefIndices
[AliasReg
] = ~0u;
358 /// Schedule - Schedule the instruction range using list scheduling.
360 void SchedulePostRATDList::Schedule() {
361 DOUT
<< "********** List Scheduling **********\n";
363 // Build the scheduling graph.
366 if (EnableAntiDepBreaking
) {
367 if (BreakAntiDependencies()) {
368 // We made changes. Update the dependency graph.
369 // Theoretically we could update the graph in place:
370 // When a live range is changed to use a different register, remove
371 // the def's anti-dependence *and* output-dependence edges due to
372 // that register, and add new anti-dependence and output-dependence
373 // edges based on the next live range of the register.
381 AvailableQueue
.initNodes(SUnits
);
383 ListScheduleTopDown();
385 AvailableQueue
.releaseState();
388 /// Observe - Update liveness information to account for the current
389 /// instruction, which will not be scheduled.
391 void SchedulePostRATDList::Observe(MachineInstr
*MI
, unsigned Count
) {
392 assert(Count
< InsertPosIndex
&& "Instruction index out of expected range!");
394 // Any register which was defined within the previous scheduling region
395 // may have been rescheduled and its lifetime may overlap with registers
396 // in ways not reflected in our current liveness state. For each such
397 // register, adjust the liveness state to be conservatively correct.
398 for (unsigned Reg
= 0; Reg
!= TargetRegisterInfo::FirstVirtualRegister
; ++Reg
)
399 if (DefIndices
[Reg
] < InsertPosIndex
&& DefIndices
[Reg
] >= Count
) {
400 assert(KillIndices
[Reg
] == ~0u && "Clobbered register is live!");
401 // Mark this register to be non-renamable.
402 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
403 // Move the def index to the end of the previous region, to reflect
404 // that the def could theoretically have been scheduled at the end.
405 DefIndices
[Reg
] = InsertPosIndex
;
408 PrescanInstruction(MI
);
409 ScanInstruction(MI
, Count
);
412 /// FinishBlock - Clean up register live-range state.
414 void SchedulePostRATDList::FinishBlock() {
417 // Call the superclass.
418 ScheduleDAGInstrs::FinishBlock();
421 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
423 static SDep
*CriticalPathStep(SUnit
*SU
) {
425 unsigned NextDepth
= 0;
426 // Find the predecessor edge with the greatest depth.
427 for (SUnit::pred_iterator P
= SU
->Preds
.begin(), PE
= SU
->Preds
.end();
429 SUnit
*PredSU
= P
->getSUnit();
430 unsigned PredLatency
= P
->getLatency();
431 unsigned PredTotalLatency
= PredSU
->getDepth() + PredLatency
;
432 // In the case of a latency tie, prefer an anti-dependency edge over
433 // other types of edges.
434 if (NextDepth
< PredTotalLatency
||
435 (NextDepth
== PredTotalLatency
&& P
->getKind() == SDep::Anti
)) {
436 NextDepth
= PredTotalLatency
;
443 void SchedulePostRATDList::PrescanInstruction(MachineInstr
*MI
) {
444 // Scan the register operands for this instruction and update
445 // Classes and RegRefs.
446 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
447 MachineOperand
&MO
= MI
->getOperand(i
);
448 if (!MO
.isReg()) continue;
449 unsigned Reg
= MO
.getReg();
450 if (Reg
== 0) continue;
451 const TargetRegisterClass
*NewRC
=
452 getInstrOperandRegClass(TRI
, MI
->getDesc(), i
);
454 // For now, only allow the register to be changed if its register
455 // class is consistent across all uses.
456 if (!Classes
[Reg
] && NewRC
)
457 Classes
[Reg
] = NewRC
;
458 else if (!NewRC
|| Classes
[Reg
] != NewRC
)
459 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
461 // Now check for aliases.
462 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
463 // If an alias of the reg is used during the live range, give up.
464 // Note that this allows us to skip checking if AntiDepReg
465 // overlaps with any of the aliases, among other things.
466 unsigned AliasReg
= *Alias
;
467 if (Classes
[AliasReg
]) {
468 Classes
[AliasReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
469 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
473 // If we're still willing to consider this register, note the reference.
474 if (Classes
[Reg
] != reinterpret_cast<TargetRegisterClass
*>(-1))
475 RegRefs
.insert(std::make_pair(Reg
, &MO
));
479 void SchedulePostRATDList::ScanInstruction(MachineInstr
*MI
,
482 // Proceding upwards, registers that are defed but not used in this
483 // instruction are now dead.
484 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
485 MachineOperand
&MO
= MI
->getOperand(i
);
486 if (!MO
.isReg()) continue;
487 unsigned Reg
= MO
.getReg();
488 if (Reg
== 0) continue;
489 if (!MO
.isDef()) continue;
490 // Ignore two-addr defs.
491 if (MI
->isRegTiedToUseOperand(i
)) continue;
493 DefIndices
[Reg
] = Count
;
494 KillIndices
[Reg
] = ~0u;
495 assert(((KillIndices
[Reg
] == ~0u) !=
496 (DefIndices
[Reg
] == ~0u)) &&
497 "Kill and Def maps aren't consistent for Reg!");
500 // Repeat, for all subregs.
501 for (const unsigned *Subreg
= TRI
->getSubRegisters(Reg
);
503 unsigned SubregReg
= *Subreg
;
504 DefIndices
[SubregReg
] = Count
;
505 KillIndices
[SubregReg
] = ~0u;
506 Classes
[SubregReg
] = 0;
507 RegRefs
.erase(SubregReg
);
509 // Conservatively mark super-registers as unusable.
510 for (const unsigned *Super
= TRI
->getSuperRegisters(Reg
);
512 unsigned SuperReg
= *Super
;
513 Classes
[SuperReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
516 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
517 MachineOperand
&MO
= MI
->getOperand(i
);
518 if (!MO
.isReg()) continue;
519 unsigned Reg
= MO
.getReg();
520 if (Reg
== 0) continue;
521 if (!MO
.isUse()) continue;
523 const TargetRegisterClass
*NewRC
=
524 getInstrOperandRegClass(TRI
, MI
->getDesc(), i
);
526 // For now, only allow the register to be changed if its register
527 // class is consistent across all uses.
528 if (!Classes
[Reg
] && NewRC
)
529 Classes
[Reg
] = NewRC
;
530 else if (!NewRC
|| Classes
[Reg
] != NewRC
)
531 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
533 RegRefs
.insert(std::make_pair(Reg
, &MO
));
535 // It wasn't previously live but now it is, this is a kill.
536 if (KillIndices
[Reg
] == ~0u) {
537 KillIndices
[Reg
] = Count
;
538 DefIndices
[Reg
] = ~0u;
539 assert(((KillIndices
[Reg
] == ~0u) !=
540 (DefIndices
[Reg
] == ~0u)) &&
541 "Kill and Def maps aren't consistent for Reg!");
543 // Repeat, for all aliases.
544 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
545 unsigned AliasReg
= *Alias
;
546 if (KillIndices
[AliasReg
] == ~0u) {
547 KillIndices
[AliasReg
] = Count
;
548 DefIndices
[AliasReg
] = ~0u;
554 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
555 /// of the ScheduleDAG and break them by renaming registers.
557 bool SchedulePostRATDList::BreakAntiDependencies() {
558 // The code below assumes that there is at least one instruction,
559 // so just duck out immediately if the block is empty.
560 if (SUnits
.empty()) return false;
562 // Find the node at the bottom of the critical path.
564 for (unsigned i
= 0, e
= SUnits
.size(); i
!= e
; ++i
) {
565 SUnit
*SU
= &SUnits
[i
];
566 if (!Max
|| SU
->getDepth() + SU
->Latency
> Max
->getDepth() + Max
->Latency
)
570 DOUT
<< "Critical path has total latency "
571 << (Max
->getDepth() + Max
->Latency
) << "\n";
573 // Track progress along the critical path through the SUnit graph as we walk
575 SUnit
*CriticalPathSU
= Max
;
576 MachineInstr
*CriticalPathMI
= CriticalPathSU
->getInstr();
578 // Consider this pattern:
587 // There are three anti-dependencies here, and without special care,
588 // we'd break all of them using the same register:
597 // because at each anti-dependence, B is the first register that
598 // isn't A which is free. This re-introduces anti-dependencies
599 // at all but one of the original anti-dependencies that we were
600 // trying to break. To avoid this, keep track of the most recent
601 // register that each register was replaced with, avoid avoid
602 // using it to repair an anti-dependence on the same register.
603 // This lets us produce this:
612 // This still has an anti-dependence on B, but at least it isn't on the
613 // original critical path.
615 // TODO: If we tracked more than one register here, we could potentially
616 // fix that remaining critical edge too. This is a little more involved,
617 // because unlike the most recent register, less recent registers should
618 // still be considered, though only if no other registers are available.
619 unsigned LastNewReg
[TargetRegisterInfo::FirstVirtualRegister
] = {};
621 // Attempt to break anti-dependence edges on the critical path. Walk the
622 // instructions from the bottom up, tracking information about liveness
623 // as we go to help determine which registers are available.
624 bool Changed
= false;
625 unsigned Count
= InsertPosIndex
- 1;
626 for (MachineBasicBlock::iterator I
= InsertPos
, E
= Begin
;
628 MachineInstr
*MI
= --I
;
630 // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
631 // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
632 // is left behind appearing to clobber the super-register, while the
633 // subregister needs to remain live. So we just ignore them.
634 if (MI
->getOpcode() == TargetInstrInfo::IMPLICIT_DEF
)
637 // Check if this instruction has a dependence on the critical path that
638 // is an anti-dependence that we may be able to break. If it is, set
639 // AntiDepReg to the non-zero register associated with the anti-dependence.
641 // We limit our attention to the critical path as a heuristic to avoid
642 // breaking anti-dependence edges that aren't going to significantly
643 // impact the overall schedule. There are a limited number of registers
644 // and we want to save them for the important edges.
646 // TODO: Instructions with multiple defs could have multiple
647 // anti-dependencies. The current code here only knows how to break one
648 // edge per instruction. Note that we'd have to be able to break all of
649 // the anti-dependencies in an instruction in order to be effective.
650 unsigned AntiDepReg
= 0;
651 if (MI
== CriticalPathMI
) {
652 if (SDep
*Edge
= CriticalPathStep(CriticalPathSU
)) {
653 SUnit
*NextSU
= Edge
->getSUnit();
655 // Only consider anti-dependence edges.
656 if (Edge
->getKind() == SDep::Anti
) {
657 AntiDepReg
= Edge
->getReg();
658 assert(AntiDepReg
!= 0 && "Anti-dependence on reg0?");
659 // Don't break anti-dependencies on non-allocatable registers.
660 if (!AllocatableSet
.test(AntiDepReg
))
663 // If the SUnit has other dependencies on the SUnit that it
664 // anti-depends on, don't bother breaking the anti-dependency
665 // since those edges would prevent such units from being
666 // scheduled past each other regardless.
668 // Also, if there are dependencies on other SUnits with the
669 // same register as the anti-dependency, don't attempt to
671 for (SUnit::pred_iterator P
= CriticalPathSU
->Preds
.begin(),
672 PE
= CriticalPathSU
->Preds
.end(); P
!= PE
; ++P
)
673 if (P
->getSUnit() == NextSU
?
674 (P
->getKind() != SDep::Anti
|| P
->getReg() != AntiDepReg
) :
675 (P
->getKind() == SDep::Data
&& P
->getReg() == AntiDepReg
)) {
681 CriticalPathSU
= NextSU
;
682 CriticalPathMI
= CriticalPathSU
->getInstr();
684 // We've reached the end of the critical path.
690 PrescanInstruction(MI
);
692 // If this instruction has a use of AntiDepReg, breaking it
694 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
695 MachineOperand
&MO
= MI
->getOperand(i
);
696 if (!MO
.isReg()) continue;
697 unsigned Reg
= MO
.getReg();
698 if (Reg
== 0) continue;
699 if (MO
.isUse() && AntiDepReg
== Reg
) {
705 // Determine AntiDepReg's register class, if it is live and is
706 // consistently used within a single class.
707 const TargetRegisterClass
*RC
= AntiDepReg
!= 0 ? Classes
[AntiDepReg
] : 0;
708 assert((AntiDepReg
== 0 || RC
!= NULL
) &&
709 "Register should be live if it's causing an anti-dependence!");
710 if (RC
== reinterpret_cast<TargetRegisterClass
*>(-1))
713 // Look for a suitable register to use to break the anti-depenence.
715 // TODO: Instead of picking the first free register, consider which might
717 if (AntiDepReg
!= 0) {
718 for (TargetRegisterClass::iterator R
= RC
->allocation_order_begin(MF
),
719 RE
= RC
->allocation_order_end(MF
); R
!= RE
; ++R
) {
720 unsigned NewReg
= *R
;
721 // Don't replace a register with itself.
722 if (NewReg
== AntiDepReg
) continue;
723 // Don't replace a register with one that was recently used to repair
724 // an anti-dependence with this AntiDepReg, because that would
725 // re-introduce that anti-dependence.
726 if (NewReg
== LastNewReg
[AntiDepReg
]) continue;
727 // If NewReg is dead and NewReg's most recent def is not before
728 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
729 assert(((KillIndices
[AntiDepReg
] == ~0u) != (DefIndices
[AntiDepReg
] == ~0u)) &&
730 "Kill and Def maps aren't consistent for AntiDepReg!");
731 assert(((KillIndices
[NewReg
] == ~0u) != (DefIndices
[NewReg
] == ~0u)) &&
732 "Kill and Def maps aren't consistent for NewReg!");
733 if (KillIndices
[NewReg
] == ~0u &&
734 Classes
[NewReg
] != reinterpret_cast<TargetRegisterClass
*>(-1) &&
735 KillIndices
[AntiDepReg
] <= DefIndices
[NewReg
]) {
736 DOUT
<< "Breaking anti-dependence edge on "
737 << TRI
->getName(AntiDepReg
)
738 << " with " << RegRefs
.count(AntiDepReg
) << " references"
739 << " using " << TRI
->getName(NewReg
) << "!\n";
741 // Update the references to the old register to refer to the new
743 std::pair
<std::multimap
<unsigned, MachineOperand
*>::iterator
,
744 std::multimap
<unsigned, MachineOperand
*>::iterator
>
745 Range
= RegRefs
.equal_range(AntiDepReg
);
746 for (std::multimap
<unsigned, MachineOperand
*>::iterator
747 Q
= Range
.first
, QE
= Range
.second
; Q
!= QE
; ++Q
)
748 Q
->second
->setReg(NewReg
);
750 // We just went back in time and modified history; the
751 // liveness information for the anti-depenence reg is now
752 // inconsistent. Set the state as if it were dead.
753 Classes
[NewReg
] = Classes
[AntiDepReg
];
754 DefIndices
[NewReg
] = DefIndices
[AntiDepReg
];
755 KillIndices
[NewReg
] = KillIndices
[AntiDepReg
];
756 assert(((KillIndices
[NewReg
] == ~0u) !=
757 (DefIndices
[NewReg
] == ~0u)) &&
758 "Kill and Def maps aren't consistent for NewReg!");
760 Classes
[AntiDepReg
] = 0;
761 DefIndices
[AntiDepReg
] = KillIndices
[AntiDepReg
];
762 KillIndices
[AntiDepReg
] = ~0u;
763 assert(((KillIndices
[AntiDepReg
] == ~0u) !=
764 (DefIndices
[AntiDepReg
] == ~0u)) &&
765 "Kill and Def maps aren't consistent for AntiDepReg!");
767 RegRefs
.erase(AntiDepReg
);
769 LastNewReg
[AntiDepReg
] = NewReg
;
775 ScanInstruction(MI
, Count
);
781 //===----------------------------------------------------------------------===//
782 // Top-Down Scheduling
783 //===----------------------------------------------------------------------===//
785 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
786 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
787 void SchedulePostRATDList::ReleaseSucc(SUnit
*SU
, SDep
*SuccEdge
) {
788 SUnit
*SuccSU
= SuccEdge
->getSUnit();
789 --SuccSU
->NumPredsLeft
;
792 if (SuccSU
->NumPredsLeft
< 0) {
793 cerr
<< "*** Scheduling failed! ***\n";
795 cerr
<< " has been released too many times!\n";
800 // Compute how many cycles it will be before this actually becomes
801 // available. This is the max of the start time of all predecessors plus
803 SuccSU
->setDepthToAtLeast(SU
->getDepth() + SuccEdge
->getLatency());
805 // If all the node's predecessors are scheduled, this node is ready
806 // to be scheduled. Ignore the special ExitSU node.
807 if (SuccSU
->NumPredsLeft
== 0 && SuccSU
!= &ExitSU
)
808 PendingQueue
.push_back(SuccSU
);
811 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
812 void SchedulePostRATDList::ReleaseSuccessors(SUnit
*SU
) {
813 for (SUnit::succ_iterator I
= SU
->Succs
.begin(), E
= SU
->Succs
.end();
815 ReleaseSucc(SU
, &*I
);
818 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
819 /// count of its successors. If a successor pending count is zero, add it to
820 /// the Available queue.
821 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit
*SU
, unsigned CurCycle
) {
822 DOUT
<< "*** Scheduling [" << CurCycle
<< "]: ";
823 DEBUG(SU
->dump(this));
825 Sequence
.push_back(SU
);
826 assert(CurCycle
>= SU
->getDepth() && "Node scheduled above its depth!");
827 SU
->setDepthToAtLeast(CurCycle
);
829 ReleaseSuccessors(SU
);
830 SU
->isScheduled
= true;
831 AvailableQueue
.ScheduledNode(SU
);
834 /// ListScheduleTopDown - The main loop of list scheduling for top-down
836 void SchedulePostRATDList::ListScheduleTopDown() {
837 unsigned CurCycle
= 0;
839 // Release any successors of the special Entry node.
840 ReleaseSuccessors(&EntrySU
);
842 // All leaves to Available queue.
843 for (unsigned i
= 0, e
= SUnits
.size(); i
!= e
; ++i
) {
844 // It is available if it has no predecessors.
845 if (SUnits
[i
].Preds
.empty()) {
846 AvailableQueue
.push(&SUnits
[i
]);
847 SUnits
[i
].isAvailable
= true;
851 // While Available queue is not empty, grab the node with the highest
852 // priority. If it is not ready put it back. Schedule the node.
853 std::vector
<SUnit
*> NotReady
;
854 Sequence
.reserve(SUnits
.size());
855 while (!AvailableQueue
.empty() || !PendingQueue
.empty()) {
856 // Check to see if any of the pending instructions are ready to issue. If
857 // so, add them to the available queue.
858 unsigned MinDepth
= ~0u;
859 for (unsigned i
= 0, e
= PendingQueue
.size(); i
!= e
; ++i
) {
860 if (PendingQueue
[i
]->getDepth() <= CurCycle
) {
861 AvailableQueue
.push(PendingQueue
[i
]);
862 PendingQueue
[i
]->isAvailable
= true;
863 PendingQueue
[i
] = PendingQueue
.back();
864 PendingQueue
.pop_back();
866 } else if (PendingQueue
[i
]->getDepth() < MinDepth
)
867 MinDepth
= PendingQueue
[i
]->getDepth();
870 // If there are no instructions available, don't try to issue anything, and
871 // don't advance the hazard recognizer.
872 if (AvailableQueue
.empty()) {
873 CurCycle
= MinDepth
!= ~0u ? MinDepth
: CurCycle
+ 1;
877 SUnit
*FoundSUnit
= 0;
879 bool HasNoopHazards
= false;
880 while (!AvailableQueue
.empty()) {
881 SUnit
*CurSUnit
= AvailableQueue
.pop();
883 ScheduleHazardRecognizer::HazardType HT
=
884 HazardRec
->getHazardType(CurSUnit
);
885 if (HT
== ScheduleHazardRecognizer::NoHazard
) {
886 FoundSUnit
= CurSUnit
;
890 // Remember if this is a noop hazard.
891 HasNoopHazards
|= HT
== ScheduleHazardRecognizer::NoopHazard
;
893 NotReady
.push_back(CurSUnit
);
896 // Add the nodes that aren't ready back onto the available list.
897 if (!NotReady
.empty()) {
898 AvailableQueue
.push_all(NotReady
);
902 // If we found a node to schedule, do it now.
904 ScheduleNodeTopDown(FoundSUnit
, CurCycle
);
905 HazardRec
->EmitInstruction(FoundSUnit
);
907 // If this is a pseudo-op node, we don't want to increment the current
909 if (FoundSUnit
->Latency
) // Don't increment CurCycle for pseudo-ops!
911 } else if (!HasNoopHazards
) {
912 // Otherwise, we have a pipeline stall, but no other problem, just advance
913 // the current cycle and try again.
914 DOUT
<< "*** Advancing cycle, no work to do\n";
915 HazardRec
->AdvanceCycle();
919 // Otherwise, we have no instructions to issue and we have instructions
920 // that will fault if we don't do this right. This is the case for
921 // processors without pipeline interlocks and other cases.
922 DOUT
<< "*** Emitting noop\n";
923 HazardRec
->EmitNoop();
924 Sequence
.push_back(0); // NULL here means noop
931 VerifySchedule(/*isBottomUp=*/false);
935 //===----------------------------------------------------------------------===//
936 // Public Constructor Functions
937 //===----------------------------------------------------------------------===//
939 FunctionPass
*llvm::createPostRAScheduler() {
940 return new PostRAScheduler();