1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // MachineScheduler schedules machine instructions after phi elimination. It
10 // preserves LiveIntervals so it can be invoked before register allocation.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/MachineScheduler.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PriorityQueue.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervals.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachinePassRegistry.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/RegisterClassInfo.h"
37 #include "llvm/CodeGen/RegisterPressure.h"
38 #include "llvm/CodeGen/ScheduleDAG.h"
39 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
40 #include "llvm/CodeGen/ScheduleDAGMutation.h"
41 #include "llvm/CodeGen/ScheduleDFS.h"
42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
43 #include "llvm/CodeGen/SlotIndexes.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetPassConfig.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSchedule.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/Config/llvm-config.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/MC/LaneBitmask.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/GraphWriter.h"
60 #include "llvm/Support/MachineValueType.h"
61 #include "llvm/Support/raw_ostream.h"
75 #define DEBUG_TYPE "machine-scheduler"
77 STATISTIC(NumClustered
, "Number of load/store pairs clustered");
81 cl::opt
<bool> ForceTopDown("misched-topdown", cl::Hidden
,
82 cl::desc("Force top-down list scheduling"));
83 cl::opt
<bool> ForceBottomUp("misched-bottomup", cl::Hidden
,
84 cl::desc("Force bottom-up list scheduling"));
86 DumpCriticalPathLength("misched-dcpl", cl::Hidden
,
87 cl::desc("Print critical path length to stdout"));
89 cl::opt
<bool> VerifyScheduling(
90 "verify-misched", cl::Hidden
,
91 cl::desc("Verify machine instrs before and after machine scheduling"));
93 } // end namespace llvm
96 static cl::opt
<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden
,
97 cl::desc("Pop up a window to show MISched dags after they are processed"));
99 /// In some situations a few uninteresting nodes depend on nearly all other
100 /// nodes in the graph, provide a cutoff to hide them.
101 static cl::opt
<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden
,
102 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
104 static cl::opt
<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden
,
105 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
107 static cl::opt
<std::string
> SchedOnlyFunc("misched-only-func", cl::Hidden
,
108 cl::desc("Only schedule this function"));
109 static cl::opt
<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden
,
110 cl::desc("Only schedule this MBB#"));
111 static cl::opt
<bool> PrintDAGs("misched-print-dags", cl::Hidden
,
112 cl::desc("Print schedule DAGs"));
114 static const bool ViewMISchedDAGs
= false;
115 static const bool PrintDAGs
= false;
118 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
119 /// size of the ready lists.
120 static cl::opt
<unsigned> ReadyListLimit("misched-limit", cl::Hidden
,
121 cl::desc("Limit ready list to N instructions"), cl::init(256));
123 static cl::opt
<bool> EnableRegPressure("misched-regpressure", cl::Hidden
,
124 cl::desc("Enable register pressure scheduling."), cl::init(true));
126 static cl::opt
<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden
,
127 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
129 static cl::opt
<bool> EnableMemOpCluster("misched-cluster", cl::Hidden
,
130 cl::desc("Enable memop clustering."),
133 ForceFastCluster("force-fast-cluster", cl::Hidden
,
134 cl::desc("Switch to fast cluster algorithm with the lost "
135 "of some fusion opportunities"),
137 static cl::opt
<unsigned>
138 FastClusterThreshold("fast-cluster-threshold", cl::Hidden
,
139 cl::desc("The threshold for fast cluster"),
142 // DAG subtrees must have at least this many nodes.
143 static const unsigned MinSubtreeSize
= 8;
145 // Pin the vtables to this file.
146 void MachineSchedStrategy::anchor() {}
148 void ScheduleDAGMutation::anchor() {}
150 //===----------------------------------------------------------------------===//
151 // Machine Instruction Scheduling Pass and Registry
152 //===----------------------------------------------------------------------===//
154 MachineSchedContext::MachineSchedContext() {
155 RegClassInfo
= new RegisterClassInfo();
158 MachineSchedContext::~MachineSchedContext() {
164 /// Base class for a machine scheduler class that can run at any point.
165 class MachineSchedulerBase
: public MachineSchedContext
,
166 public MachineFunctionPass
{
168 MachineSchedulerBase(char &ID
): MachineFunctionPass(ID
) {}
170 void print(raw_ostream
&O
, const Module
* = nullptr) const override
;
173 void scheduleRegions(ScheduleDAGInstrs
&Scheduler
, bool FixKillFlags
);
176 /// MachineScheduler runs after coalescing and before register allocation.
177 class MachineScheduler
: public MachineSchedulerBase
{
181 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
183 bool runOnMachineFunction(MachineFunction
&) override
;
185 static char ID
; // Class identification, replacement for typeinfo
188 ScheduleDAGInstrs
*createMachineScheduler();
191 /// PostMachineScheduler runs after shortly before code emission.
192 class PostMachineScheduler
: public MachineSchedulerBase
{
194 PostMachineScheduler();
196 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
198 bool runOnMachineFunction(MachineFunction
&) override
;
200 static char ID
; // Class identification, replacement for typeinfo
203 ScheduleDAGInstrs
*createPostMachineScheduler();
206 } // end anonymous namespace
208 char MachineScheduler::ID
= 0;
210 char &llvm::MachineSchedulerID
= MachineScheduler::ID
;
212 INITIALIZE_PASS_BEGIN(MachineScheduler
, DEBUG_TYPE
,
213 "Machine Instruction Scheduler", false, false)
214 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
215 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
216 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
217 INITIALIZE_PASS_DEPENDENCY(SlotIndexes
)
218 INITIALIZE_PASS_DEPENDENCY(LiveIntervals
)
219 INITIALIZE_PASS_END(MachineScheduler
, DEBUG_TYPE
,
220 "Machine Instruction Scheduler", false, false)
222 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID
) {
223 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
226 void MachineScheduler::getAnalysisUsage(AnalysisUsage
&AU
) const {
227 AU
.setPreservesCFG();
228 AU
.addRequired
<MachineDominatorTree
>();
229 AU
.addRequired
<MachineLoopInfo
>();
230 AU
.addRequired
<AAResultsWrapperPass
>();
231 AU
.addRequired
<TargetPassConfig
>();
232 AU
.addRequired
<SlotIndexes
>();
233 AU
.addPreserved
<SlotIndexes
>();
234 AU
.addRequired
<LiveIntervals
>();
235 AU
.addPreserved
<LiveIntervals
>();
236 MachineFunctionPass::getAnalysisUsage(AU
);
239 char PostMachineScheduler::ID
= 0;
241 char &llvm::PostMachineSchedulerID
= PostMachineScheduler::ID
;
243 INITIALIZE_PASS_BEGIN(PostMachineScheduler
, "postmisched",
244 "PostRA Machine Instruction Scheduler", false, false)
245 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
246 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
247 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
248 INITIALIZE_PASS_END(PostMachineScheduler
, "postmisched",
249 "PostRA Machine Instruction Scheduler", false, false)
251 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID
) {
252 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
255 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage
&AU
) const {
256 AU
.setPreservesCFG();
257 AU
.addRequired
<MachineDominatorTree
>();
258 AU
.addRequired
<MachineLoopInfo
>();
259 AU
.addRequired
<AAResultsWrapperPass
>();
260 AU
.addRequired
<TargetPassConfig
>();
261 MachineFunctionPass::getAnalysisUsage(AU
);
264 MachinePassRegistry
<MachineSchedRegistry::ScheduleDAGCtor
>
265 MachineSchedRegistry::Registry
;
267 /// A dummy default scheduler factory indicates whether the scheduler
268 /// is overridden on the command line.
269 static ScheduleDAGInstrs
*useDefaultMachineSched(MachineSchedContext
*C
) {
273 /// MachineSchedOpt allows command line selection of the scheduler.
274 static cl::opt
<MachineSchedRegistry::ScheduleDAGCtor
, false,
275 RegisterPassParser
<MachineSchedRegistry
>>
276 MachineSchedOpt("misched",
277 cl::init(&useDefaultMachineSched
), cl::Hidden
,
278 cl::desc("Machine instruction scheduler to use"));
280 static MachineSchedRegistry
281 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
282 useDefaultMachineSched
);
284 static cl::opt
<bool> EnableMachineSched(
286 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
289 static cl::opt
<bool> EnablePostRAMachineSched(
290 "enable-post-misched",
291 cl::desc("Enable the post-ra machine instruction scheduling pass."),
292 cl::init(true), cl::Hidden
);
294 /// Decrement this iterator until reaching the top or a non-debug instr.
295 static MachineBasicBlock::const_iterator
296 priorNonDebug(MachineBasicBlock::const_iterator I
,
297 MachineBasicBlock::const_iterator Beg
) {
298 assert(I
!= Beg
&& "reached the top of the region, cannot decrement");
300 if (!I
->isDebugOrPseudoInstr())
306 /// Non-const version.
307 static MachineBasicBlock::iterator
308 priorNonDebug(MachineBasicBlock::iterator I
,
309 MachineBasicBlock::const_iterator Beg
) {
310 return priorNonDebug(MachineBasicBlock::const_iterator(I
), Beg
)
311 .getNonConstIterator();
314 /// If this iterator is a debug value, increment until reaching the End or a
315 /// non-debug instruction.
316 static MachineBasicBlock::const_iterator
317 nextIfDebug(MachineBasicBlock::const_iterator I
,
318 MachineBasicBlock::const_iterator End
) {
319 for(; I
!= End
; ++I
) {
320 if (!I
->isDebugOrPseudoInstr())
326 /// Non-const version.
327 static MachineBasicBlock::iterator
328 nextIfDebug(MachineBasicBlock::iterator I
,
329 MachineBasicBlock::const_iterator End
) {
330 return nextIfDebug(MachineBasicBlock::const_iterator(I
), End
)
331 .getNonConstIterator();
334 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
335 ScheduleDAGInstrs
*MachineScheduler::createMachineScheduler() {
336 // Select the scheduler, or set the default.
337 MachineSchedRegistry::ScheduleDAGCtor Ctor
= MachineSchedOpt
;
338 if (Ctor
!= useDefaultMachineSched
)
341 // Get the default scheduler set by the target for this function.
342 ScheduleDAGInstrs
*Scheduler
= PassConfig
->createMachineScheduler(this);
346 // Default to GenericScheduler.
347 return createGenericSchedLive(this);
350 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
351 /// the caller. We don't have a command line option to override the postRA
352 /// scheduler. The Target must configure it.
353 ScheduleDAGInstrs
*PostMachineScheduler::createPostMachineScheduler() {
354 // Get the postRA scheduler set by the target for this function.
355 ScheduleDAGInstrs
*Scheduler
= PassConfig
->createPostMachineScheduler(this);
359 // Default to GenericScheduler.
360 return createGenericSchedPostRA(this);
363 /// Top-level MachineScheduler pass driver.
365 /// Visit blocks in function order. Divide each block into scheduling regions
366 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
367 /// consistent with the DAG builder, which traverses the interior of the
368 /// scheduling regions bottom-up.
370 /// This design avoids exposing scheduling boundaries to the DAG builder,
371 /// simplifying the DAG builder's support for "special" target instructions.
372 /// At the same time the design allows target schedulers to operate across
373 /// scheduling boundaries, for example to bundle the boundary instructions
374 /// without reordering them. This creates complexity, because the target
375 /// scheduler must update the RegionBegin and RegionEnd positions cached by
376 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
377 /// design would be to split blocks at scheduling boundaries, but LLVM has a
378 /// general bias against block splitting purely for implementation simplicity.
379 bool MachineScheduler::runOnMachineFunction(MachineFunction
&mf
) {
380 if (skipFunction(mf
.getFunction()))
383 if (EnableMachineSched
.getNumOccurrences()) {
384 if (!EnableMachineSched
)
386 } else if (!mf
.getSubtarget().enableMachineScheduler())
389 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf
.print(dbgs()));
391 // Initialize the context of the pass.
393 MLI
= &getAnalysis
<MachineLoopInfo
>();
394 MDT
= &getAnalysis
<MachineDominatorTree
>();
395 PassConfig
= &getAnalysis
<TargetPassConfig
>();
396 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
398 LIS
= &getAnalysis
<LiveIntervals
>();
400 if (VerifyScheduling
) {
401 LLVM_DEBUG(LIS
->dump());
402 MF
->verify(this, "Before machine scheduling.");
404 RegClassInfo
->runOnMachineFunction(*MF
);
406 // Instantiate the selected scheduler for this target, function, and
407 // optimization level.
408 std::unique_ptr
<ScheduleDAGInstrs
> Scheduler(createMachineScheduler());
409 scheduleRegions(*Scheduler
, false);
411 LLVM_DEBUG(LIS
->dump());
412 if (VerifyScheduling
)
413 MF
->verify(this, "After machine scheduling.");
417 bool PostMachineScheduler::runOnMachineFunction(MachineFunction
&mf
) {
418 if (skipFunction(mf
.getFunction()))
421 if (EnablePostRAMachineSched
.getNumOccurrences()) {
422 if (!EnablePostRAMachineSched
)
424 } else if (!mf
.getSubtarget().enablePostRAMachineScheduler()) {
425 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
428 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf
.print(dbgs()));
430 // Initialize the context of the pass.
432 MLI
= &getAnalysis
<MachineLoopInfo
>();
433 PassConfig
= &getAnalysis
<TargetPassConfig
>();
434 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
436 if (VerifyScheduling
)
437 MF
->verify(this, "Before post machine scheduling.");
439 // Instantiate the selected scheduler for this target, function, and
440 // optimization level.
441 std::unique_ptr
<ScheduleDAGInstrs
> Scheduler(createPostMachineScheduler());
442 scheduleRegions(*Scheduler
, true);
444 if (VerifyScheduling
)
445 MF
->verify(this, "After post machine scheduling.");
449 /// Return true of the given instruction should not be included in a scheduling
452 /// MachineScheduler does not currently support scheduling across calls. To
453 /// handle calls, the DAG builder needs to be modified to create register
454 /// anti/output dependencies on the registers clobbered by the call's regmask
455 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
456 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
457 /// the boundary, but there would be no benefit to postRA scheduling across
458 /// calls this late anyway.
459 static bool isSchedBoundary(MachineBasicBlock::iterator MI
,
460 MachineBasicBlock
*MBB
,
462 const TargetInstrInfo
*TII
) {
463 return MI
->isCall() || TII
->isSchedulingBoundary(*MI
, MBB
, *MF
);
466 /// A region of an MBB for scheduling.
469 /// RegionBegin is the first instruction in the scheduling region, and
470 /// RegionEnd is either MBB->end() or the scheduling boundary after the
471 /// last instruction in the scheduling region. These iterators cannot refer
472 /// to instructions outside of the identified scheduling region because
473 /// those may be reordered before scheduling this region.
474 MachineBasicBlock::iterator RegionBegin
;
475 MachineBasicBlock::iterator RegionEnd
;
476 unsigned NumRegionInstrs
;
478 SchedRegion(MachineBasicBlock::iterator B
, MachineBasicBlock::iterator E
,
480 RegionBegin(B
), RegionEnd(E
), NumRegionInstrs(N
) {}
482 } // end anonymous namespace
484 using MBBRegionsVector
= SmallVector
<SchedRegion
, 16>;
487 getSchedRegions(MachineBasicBlock
*MBB
,
488 MBBRegionsVector
&Regions
,
489 bool RegionsTopDown
) {
490 MachineFunction
*MF
= MBB
->getParent();
491 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
493 MachineBasicBlock::iterator I
= nullptr;
494 for(MachineBasicBlock::iterator RegionEnd
= MBB
->end();
495 RegionEnd
!= MBB
->begin(); RegionEnd
= I
) {
497 // Avoid decrementing RegionEnd for blocks with no terminator.
498 if (RegionEnd
!= MBB
->end() ||
499 isSchedBoundary(&*std::prev(RegionEnd
), &*MBB
, MF
, TII
)) {
503 // The next region starts above the previous region. Look backward in the
504 // instruction stream until we find the nearest boundary.
505 unsigned NumRegionInstrs
= 0;
507 for (;I
!= MBB
->begin(); --I
) {
508 MachineInstr
&MI
= *std::prev(I
);
509 if (isSchedBoundary(&MI
, &*MBB
, MF
, TII
))
511 if (!MI
.isDebugOrPseudoInstr()) {
512 // MBB::size() uses instr_iterator to count. Here we need a bundle to
513 // count as a single instruction.
518 // It's possible we found a scheduling region that only has debug
519 // instructions. Don't bother scheduling these.
520 if (NumRegionInstrs
!= 0)
521 Regions
.push_back(SchedRegion(I
, RegionEnd
, NumRegionInstrs
));
525 std::reverse(Regions
.begin(), Regions
.end());
528 /// Main driver for both MachineScheduler and PostMachineScheduler.
529 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs
&Scheduler
,
531 // Visit all machine basic blocks.
533 // TODO: Visit blocks in global postorder or postorder within the bottom-up
534 // loop tree. Then we can optionally compute global RegPressure.
535 for (MachineFunction::iterator MBB
= MF
->begin(), MBBEnd
= MF
->end();
536 MBB
!= MBBEnd
; ++MBB
) {
538 Scheduler
.startBlock(&*MBB
);
541 if (SchedOnlyFunc
.getNumOccurrences() && SchedOnlyFunc
!= MF
->getName())
543 if (SchedOnlyBlock
.getNumOccurrences()
544 && (int)SchedOnlyBlock
!= MBB
->getNumber())
548 // Break the block into scheduling regions [I, RegionEnd). RegionEnd
549 // points to the scheduling boundary at the bottom of the region. The DAG
550 // does not include RegionEnd, but the region does (i.e. the next
551 // RegionEnd is above the previous RegionBegin). If the current block has
552 // no terminator then RegionEnd == MBB->end() for the bottom region.
554 // All the regions of MBB are first found and stored in MBBRegions, which
555 // will be processed (MBB) top-down if initialized with true.
557 // The Scheduler may insert instructions during either schedule() or
558 // exitRegion(), even for empty regions. So the local iterators 'I' and
559 // 'RegionEnd' are invalid across these calls. Instructions must not be
560 // added to other regions than the current one without updating MBBRegions.
562 MBBRegionsVector MBBRegions
;
563 getSchedRegions(&*MBB
, MBBRegions
, Scheduler
.doMBBSchedRegionsTopDown());
564 for (MBBRegionsVector::iterator R
= MBBRegions
.begin();
565 R
!= MBBRegions
.end(); ++R
) {
566 MachineBasicBlock::iterator I
= R
->RegionBegin
;
567 MachineBasicBlock::iterator RegionEnd
= R
->RegionEnd
;
568 unsigned NumRegionInstrs
= R
->NumRegionInstrs
;
570 // Notify the scheduler of the region, even if we may skip scheduling
571 // it. Perhaps it still needs to be bundled.
572 Scheduler
.enterRegion(&*MBB
, I
, RegionEnd
, NumRegionInstrs
);
574 // Skip empty scheduling regions (0 or 1 schedulable instructions).
575 if (I
== RegionEnd
|| I
== std::prev(RegionEnd
)) {
576 // Close the current region. Bundle the terminator if needed.
577 // This invalidates 'RegionEnd' and 'I'.
578 Scheduler
.exitRegion();
581 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
582 LLVM_DEBUG(dbgs() << MF
->getName() << ":" << printMBBReference(*MBB
)
583 << " " << MBB
->getName() << "\n From: " << *I
585 if (RegionEnd
!= MBB
->end()) dbgs() << *RegionEnd
;
586 else dbgs() << "End";
587 dbgs() << " RegionInstrs: " << NumRegionInstrs
<< '\n');
588 if (DumpCriticalPathLength
) {
589 errs() << MF
->getName();
590 errs() << ":%bb. " << MBB
->getNumber();
591 errs() << " " << MBB
->getName() << " \n";
594 // Schedule a region: possibly reorder instructions.
595 // This invalidates the original region iterators.
596 Scheduler
.schedule();
598 // Close the current region.
599 Scheduler
.exitRegion();
601 Scheduler
.finishBlock();
602 // FIXME: Ideally, no further passes should rely on kill flags. However,
603 // thumb2 size reduction is currently an exception, so the PostMIScheduler
606 Scheduler
.fixupKills(*MBB
);
608 Scheduler
.finalizeSchedule();
611 void MachineSchedulerBase::print(raw_ostream
&O
, const Module
* m
) const {
615 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
616 LLVM_DUMP_METHOD
void ReadyQueue::dump() const {
617 dbgs() << "Queue " << Name
<< ": ";
618 for (const SUnit
*SU
: Queue
)
619 dbgs() << SU
->NodeNum
<< " ";
624 //===----------------------------------------------------------------------===//
625 // ScheduleDAGMI - Basic machine instruction scheduling. This is
626 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
627 // virtual registers.
628 // ===----------------------------------------------------------------------===/
630 // Provide a vtable anchor.
631 ScheduleDAGMI::~ScheduleDAGMI() = default;
633 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
634 /// NumPredsLeft reaches zero, release the successor node.
636 /// FIXME: Adjust SuccSU height based on MinLatency.
637 void ScheduleDAGMI::releaseSucc(SUnit
*SU
, SDep
*SuccEdge
) {
638 SUnit
*SuccSU
= SuccEdge
->getSUnit();
640 if (SuccEdge
->isWeak()) {
641 --SuccSU
->WeakPredsLeft
;
642 if (SuccEdge
->isCluster())
643 NextClusterSucc
= SuccSU
;
647 if (SuccSU
->NumPredsLeft
== 0) {
648 dbgs() << "*** Scheduling failed! ***\n";
650 dbgs() << " has been released too many times!\n";
651 llvm_unreachable(nullptr);
654 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
655 // CurrCycle may have advanced since then.
656 if (SuccSU
->TopReadyCycle
< SU
->TopReadyCycle
+ SuccEdge
->getLatency())
657 SuccSU
->TopReadyCycle
= SU
->TopReadyCycle
+ SuccEdge
->getLatency();
659 --SuccSU
->NumPredsLeft
;
660 if (SuccSU
->NumPredsLeft
== 0 && SuccSU
!= &ExitSU
)
661 SchedImpl
->releaseTopNode(SuccSU
);
664 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
665 void ScheduleDAGMI::releaseSuccessors(SUnit
*SU
) {
666 for (SDep
&Succ
: SU
->Succs
)
667 releaseSucc(SU
, &Succ
);
670 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
671 /// NumSuccsLeft reaches zero, release the predecessor node.
673 /// FIXME: Adjust PredSU height based on MinLatency.
674 void ScheduleDAGMI::releasePred(SUnit
*SU
, SDep
*PredEdge
) {
675 SUnit
*PredSU
= PredEdge
->getSUnit();
677 if (PredEdge
->isWeak()) {
678 --PredSU
->WeakSuccsLeft
;
679 if (PredEdge
->isCluster())
680 NextClusterPred
= PredSU
;
684 if (PredSU
->NumSuccsLeft
== 0) {
685 dbgs() << "*** Scheduling failed! ***\n";
687 dbgs() << " has been released too many times!\n";
688 llvm_unreachable(nullptr);
691 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
692 // CurrCycle may have advanced since then.
693 if (PredSU
->BotReadyCycle
< SU
->BotReadyCycle
+ PredEdge
->getLatency())
694 PredSU
->BotReadyCycle
= SU
->BotReadyCycle
+ PredEdge
->getLatency();
696 --PredSU
->NumSuccsLeft
;
697 if (PredSU
->NumSuccsLeft
== 0 && PredSU
!= &EntrySU
)
698 SchedImpl
->releaseBottomNode(PredSU
);
701 /// releasePredecessors - Call releasePred on each of SU's predecessors.
702 void ScheduleDAGMI::releasePredecessors(SUnit
*SU
) {
703 for (SDep
&Pred
: SU
->Preds
)
704 releasePred(SU
, &Pred
);
707 void ScheduleDAGMI::startBlock(MachineBasicBlock
*bb
) {
708 ScheduleDAGInstrs::startBlock(bb
);
709 SchedImpl
->enterMBB(bb
);
712 void ScheduleDAGMI::finishBlock() {
713 SchedImpl
->leaveMBB();
714 ScheduleDAGInstrs::finishBlock();
717 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
718 /// crossing a scheduling boundary. [begin, end) includes all instructions in
719 /// the region, including the boundary itself and single-instruction regions
720 /// that don't get scheduled.
721 void ScheduleDAGMI::enterRegion(MachineBasicBlock
*bb
,
722 MachineBasicBlock::iterator begin
,
723 MachineBasicBlock::iterator end
,
724 unsigned regioninstrs
)
726 ScheduleDAGInstrs::enterRegion(bb
, begin
, end
, regioninstrs
);
728 SchedImpl
->initPolicy(begin
, end
, regioninstrs
);
731 /// This is normally called from the main scheduler loop but may also be invoked
732 /// by the scheduling strategy to perform additional code motion.
733 void ScheduleDAGMI::moveInstruction(
734 MachineInstr
*MI
, MachineBasicBlock::iterator InsertPos
) {
735 // Advance RegionBegin if the first instruction moves down.
736 if (&*RegionBegin
== MI
)
739 // Update the instruction stream.
740 BB
->splice(InsertPos
, BB
, MI
);
742 // Update LiveIntervals
744 LIS
->handleMove(*MI
, /*UpdateFlags=*/true);
746 // Recede RegionBegin if an instruction moves above the first.
747 if (RegionBegin
== InsertPos
)
751 bool ScheduleDAGMI::checkSchedLimit() {
753 if (NumInstrsScheduled
== MISchedCutoff
&& MISchedCutoff
!= ~0U) {
754 CurrentTop
= CurrentBottom
;
757 ++NumInstrsScheduled
;
762 /// Per-region scheduling driver, called back from
763 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
764 /// does not consider liveness or register pressure. It is useful for PostRA
765 /// scheduling and potentially other custom schedulers.
766 void ScheduleDAGMI::schedule() {
767 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
768 LLVM_DEBUG(SchedImpl
->dumpPolicy());
775 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
776 findRootsAndBiasEdges(TopRoots
, BotRoots
);
779 if (PrintDAGs
) dump();
780 if (ViewMISchedDAGs
) viewGraph();
782 // Initialize the strategy before modifying the DAG.
783 // This may initialize a DFSResult to be used for queue priority.
784 SchedImpl
->initialize(this);
786 // Initialize ready queues now that the DAG and priority data are finalized.
787 initQueues(TopRoots
, BotRoots
);
789 bool IsTopNode
= false;
791 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
792 SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
);
795 assert(!SU
->isScheduled
&& "Node already scheduled");
796 if (!checkSchedLimit())
799 MachineInstr
*MI
= SU
->getInstr();
801 assert(SU
->isTopReady() && "node still has unscheduled dependencies");
802 if (&*CurrentTop
== MI
)
803 CurrentTop
= nextIfDebug(++CurrentTop
, CurrentBottom
);
805 moveInstruction(MI
, CurrentTop
);
807 assert(SU
->isBottomReady() && "node still has unscheduled dependencies");
808 MachineBasicBlock::iterator priorII
=
809 priorNonDebug(CurrentBottom
, CurrentTop
);
811 CurrentBottom
= priorII
;
813 if (&*CurrentTop
== MI
)
814 CurrentTop
= nextIfDebug(++CurrentTop
, priorII
);
815 moveInstruction(MI
, CurrentBottom
);
819 // Notify the scheduling strategy before updating the DAG.
820 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
821 // runs, it can then use the accurate ReadyCycle time to determine whether
822 // newly released nodes can move to the readyQ.
823 SchedImpl
->schedNode(SU
, IsTopNode
);
825 updateQueues(SU
, IsTopNode
);
827 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
832 dbgs() << "*** Final schedule for "
833 << printMBBReference(*begin()->getParent()) << " ***\n";
839 /// Apply each ScheduleDAGMutation step in order.
840 void ScheduleDAGMI::postprocessDAG() {
841 for (auto &m
: Mutations
)
846 findRootsAndBiasEdges(SmallVectorImpl
<SUnit
*> &TopRoots
,
847 SmallVectorImpl
<SUnit
*> &BotRoots
) {
848 for (SUnit
&SU
: SUnits
) {
849 assert(!SU
.isBoundaryNode() && "Boundary node should not be in SUnits");
851 // Order predecessors so DFSResult follows the critical path.
852 SU
.biasCriticalPath();
854 // A SUnit is ready to top schedule if it has no predecessors.
855 if (!SU
.NumPredsLeft
)
856 TopRoots
.push_back(&SU
);
857 // A SUnit is ready to bottom schedule if it has no successors.
858 if (!SU
.NumSuccsLeft
)
859 BotRoots
.push_back(&SU
);
861 ExitSU
.biasCriticalPath();
864 /// Identify DAG roots and setup scheduler queues.
865 void ScheduleDAGMI::initQueues(ArrayRef
<SUnit
*> TopRoots
,
866 ArrayRef
<SUnit
*> BotRoots
) {
867 NextClusterSucc
= nullptr;
868 NextClusterPred
= nullptr;
870 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
872 // Nodes with unreleased weak edges can still be roots.
873 // Release top roots in forward order.
874 for (SUnit
*SU
: TopRoots
)
875 SchedImpl
->releaseTopNode(SU
);
877 // Release bottom roots in reverse order so the higher priority nodes appear
878 // first. This is more natural and slightly more efficient.
879 for (SmallVectorImpl
<SUnit
*>::const_reverse_iterator
880 I
= BotRoots
.rbegin(), E
= BotRoots
.rend(); I
!= E
; ++I
) {
881 SchedImpl
->releaseBottomNode(*I
);
884 releaseSuccessors(&EntrySU
);
885 releasePredecessors(&ExitSU
);
887 SchedImpl
->registerRoots();
889 // Advance past initial DebugValues.
890 CurrentTop
= nextIfDebug(RegionBegin
, RegionEnd
);
891 CurrentBottom
= RegionEnd
;
894 /// Update scheduler queues after scheduling an instruction.
895 void ScheduleDAGMI::updateQueues(SUnit
*SU
, bool IsTopNode
) {
896 // Release dependent instructions for scheduling.
898 releaseSuccessors(SU
);
900 releasePredecessors(SU
);
902 SU
->isScheduled
= true;
905 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
906 void ScheduleDAGMI::placeDebugValues() {
907 // If first instruction was a DBG_VALUE then put it back.
909 BB
->splice(RegionBegin
, BB
, FirstDbgValue
);
910 RegionBegin
= FirstDbgValue
;
913 for (std::vector
<std::pair
<MachineInstr
*, MachineInstr
*>>::iterator
914 DI
= DbgValues
.end(), DE
= DbgValues
.begin(); DI
!= DE
; --DI
) {
915 std::pair
<MachineInstr
*, MachineInstr
*> P
= *std::prev(DI
);
916 MachineInstr
*DbgValue
= P
.first
;
917 MachineBasicBlock::iterator OrigPrevMI
= P
.second
;
918 if (&*RegionBegin
== DbgValue
)
920 BB
->splice(++OrigPrevMI
, BB
, DbgValue
);
921 if (OrigPrevMI
== std::prev(RegionEnd
))
922 RegionEnd
= DbgValue
;
925 FirstDbgValue
= nullptr;
928 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
929 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpSchedule() const {
930 for (MachineInstr
&MI
: *this) {
931 if (SUnit
*SU
= getSUnit(&MI
))
934 dbgs() << "Missing SUnit\n";
939 //===----------------------------------------------------------------------===//
940 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
942 //===----------------------------------------------------------------------===//
944 ScheduleDAGMILive::~ScheduleDAGMILive() {
948 void ScheduleDAGMILive::collectVRegUses(SUnit
&SU
) {
949 const MachineInstr
&MI
= *SU
.getInstr();
950 for (const MachineOperand
&MO
: MI
.operands()) {
955 if (TrackLaneMasks
&& !MO
.isUse())
958 Register Reg
= MO
.getReg();
959 if (!Register::isVirtualRegister(Reg
))
963 if (TrackLaneMasks
) {
964 bool FoundDef
= false;
965 for (const MachineOperand
&MO2
: MI
.operands()) {
966 if (MO2
.isReg() && MO2
.isDef() && MO2
.getReg() == Reg
&& !MO2
.isDead()) {
975 // Record this local VReg use.
976 VReg2SUnitMultiMap::iterator UI
= VRegUses
.find(Reg
);
977 for (; UI
!= VRegUses
.end(); ++UI
) {
981 if (UI
== VRegUses
.end())
982 VRegUses
.insert(VReg2SUnit(Reg
, LaneBitmask::getNone(), &SU
));
986 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
987 /// crossing a scheduling boundary. [begin, end) includes all instructions in
988 /// the region, including the boundary itself and single-instruction regions
989 /// that don't get scheduled.
990 void ScheduleDAGMILive::enterRegion(MachineBasicBlock
*bb
,
991 MachineBasicBlock::iterator begin
,
992 MachineBasicBlock::iterator end
,
993 unsigned regioninstrs
)
995 // ScheduleDAGMI initializes SchedImpl's per-region policy.
996 ScheduleDAGMI::enterRegion(bb
, begin
, end
, regioninstrs
);
998 // For convenience remember the end of the liveness region.
999 LiveRegionEnd
= (RegionEnd
== bb
->end()) ? RegionEnd
: std::next(RegionEnd
);
1001 SUPressureDiffs
.clear();
1003 ShouldTrackPressure
= SchedImpl
->shouldTrackPressure();
1004 ShouldTrackLaneMasks
= SchedImpl
->shouldTrackLaneMasks();
1006 assert((!ShouldTrackLaneMasks
|| ShouldTrackPressure
) &&
1007 "ShouldTrackLaneMasks requires ShouldTrackPressure");
1010 // Setup the register pressure trackers for the top scheduled and bottom
1011 // scheduled regions.
1012 void ScheduleDAGMILive::initRegPressure() {
1014 VRegUses
.setUniverse(MRI
.getNumVirtRegs());
1015 for (SUnit
&SU
: SUnits
)
1016 collectVRegUses(SU
);
1018 TopRPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, RegionBegin
,
1019 ShouldTrackLaneMasks
, false);
1020 BotRPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, LiveRegionEnd
,
1021 ShouldTrackLaneMasks
, false);
1023 // Close the RPTracker to finalize live ins.
1024 RPTracker
.closeRegion();
1026 LLVM_DEBUG(RPTracker
.dump());
1028 // Initialize the live ins and live outs.
1029 TopRPTracker
.addLiveRegs(RPTracker
.getPressure().LiveInRegs
);
1030 BotRPTracker
.addLiveRegs(RPTracker
.getPressure().LiveOutRegs
);
1032 // Close one end of the tracker so we can call
1033 // getMaxUpward/DownwardPressureDelta before advancing across any
1034 // instructions. This converts currently live regs into live ins/outs.
1035 TopRPTracker
.closeTop();
1036 BotRPTracker
.closeBottom();
1038 BotRPTracker
.initLiveThru(RPTracker
);
1039 if (!BotRPTracker
.getLiveThru().empty()) {
1040 TopRPTracker
.initLiveThru(BotRPTracker
.getLiveThru());
1041 LLVM_DEBUG(dbgs() << "Live Thru: ";
1042 dumpRegSetPressure(BotRPTracker
.getLiveThru(), TRI
));
1045 // For each live out vreg reduce the pressure change associated with other
1046 // uses of the same vreg below the live-out reaching def.
1047 updatePressureDiffs(RPTracker
.getPressure().LiveOutRegs
);
1049 // Account for liveness generated by the region boundary.
1050 if (LiveRegionEnd
!= RegionEnd
) {
1051 SmallVector
<RegisterMaskPair
, 8> LiveUses
;
1052 BotRPTracker
.recede(&LiveUses
);
1053 updatePressureDiffs(LiveUses
);
1056 LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1057 dumpRegSetPressure(TopRPTracker
.getRegSetPressureAtPos(), TRI
);
1058 dbgs() << "Bottom Pressure:\n";
1059 dumpRegSetPressure(BotRPTracker
.getRegSetPressureAtPos(), TRI
););
1061 assert((BotRPTracker
.getPos() == RegionEnd
||
1062 (RegionEnd
->isDebugInstr() &&
1063 BotRPTracker
.getPos() == priorNonDebug(RegionEnd
, RegionBegin
))) &&
1064 "Can't find the region bottom");
1066 // Cache the list of excess pressure sets in this region. This will also track
1067 // the max pressure in the scheduled code for these sets.
1068 RegionCriticalPSets
.clear();
1069 const std::vector
<unsigned> &RegionPressure
=
1070 RPTracker
.getPressure().MaxSetPressure
;
1071 for (unsigned i
= 0, e
= RegionPressure
.size(); i
< e
; ++i
) {
1072 unsigned Limit
= RegClassInfo
->getRegPressureSetLimit(i
);
1073 if (RegionPressure
[i
] > Limit
) {
1074 LLVM_DEBUG(dbgs() << TRI
->getRegPressureSetName(i
) << " Limit " << Limit
1075 << " Actual " << RegionPressure
[i
] << "\n");
1076 RegionCriticalPSets
.push_back(PressureChange(i
));
1079 LLVM_DEBUG(dbgs() << "Excess PSets: ";
1080 for (const PressureChange
&RCPS
1081 : RegionCriticalPSets
) dbgs()
1082 << TRI
->getRegPressureSetName(RCPS
.getPSet()) << " ";
1086 void ScheduleDAGMILive::
1087 updateScheduledPressure(const SUnit
*SU
,
1088 const std::vector
<unsigned> &NewMaxPressure
) {
1089 const PressureDiff
&PDiff
= getPressureDiff(SU
);
1090 unsigned CritIdx
= 0, CritEnd
= RegionCriticalPSets
.size();
1091 for (const PressureChange
&PC
: PDiff
) {
1094 unsigned ID
= PC
.getPSet();
1095 while (CritIdx
!= CritEnd
&& RegionCriticalPSets
[CritIdx
].getPSet() < ID
)
1097 if (CritIdx
!= CritEnd
&& RegionCriticalPSets
[CritIdx
].getPSet() == ID
) {
1098 if ((int)NewMaxPressure
[ID
] > RegionCriticalPSets
[CritIdx
].getUnitInc()
1099 && NewMaxPressure
[ID
] <= (unsigned)std::numeric_limits
<int16_t>::max())
1100 RegionCriticalPSets
[CritIdx
].setUnitInc(NewMaxPressure
[ID
]);
1102 unsigned Limit
= RegClassInfo
->getRegPressureSetLimit(ID
);
1103 if (NewMaxPressure
[ID
] >= Limit
- 2) {
1104 LLVM_DEBUG(dbgs() << " " << TRI
->getRegPressureSetName(ID
) << ": "
1105 << NewMaxPressure
[ID
]
1106 << ((NewMaxPressure
[ID
] > Limit
) ? " > " : " <= ")
1107 << Limit
<< "(+ " << BotRPTracker
.getLiveThru()[ID
]
1113 /// Update the PressureDiff array for liveness after scheduling this
1115 void ScheduleDAGMILive::updatePressureDiffs(
1116 ArrayRef
<RegisterMaskPair
> LiveUses
) {
1117 for (const RegisterMaskPair
&P
: LiveUses
) {
1118 Register Reg
= P
.RegUnit
;
1119 /// FIXME: Currently assuming single-use physregs.
1120 if (!Register::isVirtualRegister(Reg
))
1123 if (ShouldTrackLaneMasks
) {
1124 // If the register has just become live then other uses won't change
1125 // this fact anymore => decrement pressure.
1126 // If the register has just become dead then other uses make it come
1127 // back to life => increment pressure.
1128 bool Decrement
= P
.LaneMask
.any();
1130 for (const VReg2SUnit
&V2SU
1131 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1132 SUnit
&SU
= *V2SU
.SU
;
1133 if (SU
.isScheduled
|| &SU
== &ExitSU
)
1136 PressureDiff
&PDiff
= getPressureDiff(&SU
);
1137 PDiff
.addPressureChange(Reg
, Decrement
, &MRI
);
1138 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU
.NodeNum
<< ") "
1139 << printReg(Reg
, TRI
) << ':'
1140 << PrintLaneMask(P
.LaneMask
) << ' ' << *SU
.getInstr();
1141 dbgs() << " to "; PDiff
.dump(*TRI
););
1144 assert(P
.LaneMask
.any());
1145 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg
, TRI
) << "\n");
1146 // This may be called before CurrentBottom has been initialized. However,
1147 // BotRPTracker must have a valid position. We want the value live into the
1148 // instruction or live out of the block, so ask for the previous
1149 // instruction's live-out.
1150 const LiveInterval
&LI
= LIS
->getInterval(Reg
);
1152 MachineBasicBlock::const_iterator I
=
1153 nextIfDebug(BotRPTracker
.getPos(), BB
->end());
1155 VNI
= LI
.getVNInfoBefore(LIS
->getMBBEndIdx(BB
));
1157 LiveQueryResult LRQ
= LI
.Query(LIS
->getInstructionIndex(*I
));
1158 VNI
= LRQ
.valueIn();
1160 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1161 assert(VNI
&& "No live value at use.");
1162 for (const VReg2SUnit
&V2SU
1163 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1164 SUnit
*SU
= V2SU
.SU
;
1165 // If this use comes before the reaching def, it cannot be a last use,
1166 // so decrease its pressure change.
1167 if (!SU
->isScheduled
&& SU
!= &ExitSU
) {
1168 LiveQueryResult LRQ
=
1169 LI
.Query(LIS
->getInstructionIndex(*SU
->getInstr()));
1170 if (LRQ
.valueIn() == VNI
) {
1171 PressureDiff
&PDiff
= getPressureDiff(SU
);
1172 PDiff
.addPressureChange(Reg
, true, &MRI
);
1173 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU
->NodeNum
<< ") "
1175 dbgs() << " to "; PDiff
.dump(*TRI
););
1183 void ScheduleDAGMILive::dump() const {
1184 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1185 if (EntrySU
.getInstr() != nullptr)
1186 dumpNodeAll(EntrySU
);
1187 for (const SUnit
&SU
: SUnits
) {
1189 if (ShouldTrackPressure
) {
1190 dbgs() << " Pressure Diff : ";
1191 getPressureDiff(&SU
).dump(*TRI
);
1193 dbgs() << " Single Issue : ";
1194 if (SchedModel
.mustBeginGroup(SU
.getInstr()) &&
1195 SchedModel
.mustEndGroup(SU
.getInstr()))
1201 if (ExitSU
.getInstr() != nullptr)
1202 dumpNodeAll(ExitSU
);
1206 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1207 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1208 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1210 /// This is a skeletal driver, with all the functionality pushed into helpers,
1211 /// so that it can be easily extended by experimental schedulers. Generally,
1212 /// implementing MachineSchedStrategy should be sufficient to implement a new
1213 /// scheduling algorithm. However, if a scheduler further subclasses
1214 /// ScheduleDAGMILive then it will want to override this virtual method in order
1215 /// to update any specialized state.
1216 void ScheduleDAGMILive::schedule() {
1217 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1218 LLVM_DEBUG(SchedImpl
->dumpPolicy());
1219 buildDAGWithRegPressure();
1223 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
1224 findRootsAndBiasEdges(TopRoots
, BotRoots
);
1226 // Initialize the strategy before modifying the DAG.
1227 // This may initialize a DFSResult to be used for queue priority.
1228 SchedImpl
->initialize(this);
1231 if (PrintDAGs
) dump();
1232 if (ViewMISchedDAGs
) viewGraph();
1234 // Initialize ready queues now that the DAG and priority data are finalized.
1235 initQueues(TopRoots
, BotRoots
);
1237 bool IsTopNode
= false;
1239 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1240 SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
);
1243 assert(!SU
->isScheduled
&& "Node already scheduled");
1244 if (!checkSchedLimit())
1247 scheduleMI(SU
, IsTopNode
);
1250 unsigned SubtreeID
= DFSResult
->getSubtreeID(SU
);
1251 if (!ScheduledTrees
.test(SubtreeID
)) {
1252 ScheduledTrees
.set(SubtreeID
);
1253 DFSResult
->scheduleTree(SubtreeID
);
1254 SchedImpl
->scheduleTree(SubtreeID
);
1258 // Notify the scheduling strategy after updating the DAG.
1259 SchedImpl
->schedNode(SU
, IsTopNode
);
1261 updateQueues(SU
, IsTopNode
);
1263 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
1268 dbgs() << "*** Final schedule for "
1269 << printMBBReference(*begin()->getParent()) << " ***\n";
1275 /// Build the DAG and setup three register pressure trackers.
1276 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1277 if (!ShouldTrackPressure
) {
1279 RegionCriticalPSets
.clear();
1280 buildSchedGraph(AA
);
1284 // Initialize the register pressure tracker used by buildSchedGraph.
1285 RPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, LiveRegionEnd
,
1286 ShouldTrackLaneMasks
, /*TrackUntiedDefs=*/true);
1288 // Account for liveness generate by the region boundary.
1289 if (LiveRegionEnd
!= RegionEnd
)
1292 // Build the DAG, and compute current register pressure.
1293 buildSchedGraph(AA
, &RPTracker
, &SUPressureDiffs
, LIS
, ShouldTrackLaneMasks
);
1295 // Initialize top/bottom trackers after computing region pressure.
1299 void ScheduleDAGMILive::computeDFSResult() {
1301 DFSResult
= new SchedDFSResult(/*BottomU*/true, MinSubtreeSize
);
1303 ScheduledTrees
.clear();
1304 DFSResult
->resize(SUnits
.size());
1305 DFSResult
->compute(SUnits
);
1306 ScheduledTrees
.resize(DFSResult
->getNumSubtrees());
1309 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1310 /// only provides the critical path for single block loops. To handle loops that
1311 /// span blocks, we could use the vreg path latencies provided by
1312 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1313 /// available for use in the scheduler.
1315 /// The cyclic path estimation identifies a def-use pair that crosses the back
1316 /// edge and considers the depth and height of the nodes. For example, consider
1317 /// the following instruction sequence where each instruction has unit latency
1318 /// and defines an eponymous virtual register:
1320 /// a->b(a,c)->c(b)->d(c)->exit
1322 /// The cyclic critical path is a two cycles: b->c->b
1323 /// The acyclic critical path is four cycles: a->b->c->d->exit
1324 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1325 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1326 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1327 /// LiveInDepth = depth(b) = len(a->b) = 1
1329 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1330 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1331 /// CyclicCriticalPath = min(2, 2) = 2
1333 /// This could be relevant to PostRA scheduling, but is currently implemented
1334 /// assuming LiveIntervals.
1335 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1336 // This only applies to single block loop.
1337 if (!BB
->isSuccessor(BB
))
1340 unsigned MaxCyclicLatency
= 0;
1341 // Visit each live out vreg def to find def/use pairs that cross iterations.
1342 for (const RegisterMaskPair
&P
: RPTracker
.getPressure().LiveOutRegs
) {
1343 Register Reg
= P
.RegUnit
;
1344 if (!Register::isVirtualRegister(Reg
))
1346 const LiveInterval
&LI
= LIS
->getInterval(Reg
);
1347 const VNInfo
*DefVNI
= LI
.getVNInfoBefore(LIS
->getMBBEndIdx(BB
));
1351 MachineInstr
*DefMI
= LIS
->getInstructionFromIndex(DefVNI
->def
);
1352 const SUnit
*DefSU
= getSUnit(DefMI
);
1356 unsigned LiveOutHeight
= DefSU
->getHeight();
1357 unsigned LiveOutDepth
= DefSU
->getDepth() + DefSU
->Latency
;
1358 // Visit all local users of the vreg def.
1359 for (const VReg2SUnit
&V2SU
1360 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1361 SUnit
*SU
= V2SU
.SU
;
1365 // Only consider uses of the phi.
1366 LiveQueryResult LRQ
= LI
.Query(LIS
->getInstructionIndex(*SU
->getInstr()));
1367 if (!LRQ
.valueIn()->isPHIDef())
1370 // Assume that a path spanning two iterations is a cycle, which could
1371 // overestimate in strange cases. This allows cyclic latency to be
1372 // estimated as the minimum slack of the vreg's depth or height.
1373 unsigned CyclicLatency
= 0;
1374 if (LiveOutDepth
> SU
->getDepth())
1375 CyclicLatency
= LiveOutDepth
- SU
->getDepth();
1377 unsigned LiveInHeight
= SU
->getHeight() + DefSU
->Latency
;
1378 if (LiveInHeight
> LiveOutHeight
) {
1379 if (LiveInHeight
- LiveOutHeight
< CyclicLatency
)
1380 CyclicLatency
= LiveInHeight
- LiveOutHeight
;
1384 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU
->NodeNum
<< ") -> SU("
1385 << SU
->NodeNum
<< ") = " << CyclicLatency
<< "c\n");
1386 if (CyclicLatency
> MaxCyclicLatency
)
1387 MaxCyclicLatency
= CyclicLatency
;
1390 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency
<< "c\n");
1391 return MaxCyclicLatency
;
1394 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1395 /// the Top RP tracker in case the region beginning has changed.
1396 void ScheduleDAGMILive::initQueues(ArrayRef
<SUnit
*> TopRoots
,
1397 ArrayRef
<SUnit
*> BotRoots
) {
1398 ScheduleDAGMI::initQueues(TopRoots
, BotRoots
);
1399 if (ShouldTrackPressure
) {
1400 assert(TopRPTracker
.getPos() == RegionBegin
&& "bad initial Top tracker");
1401 TopRPTracker
.setPos(CurrentTop
);
1405 /// Move an instruction and update register pressure.
1406 void ScheduleDAGMILive::scheduleMI(SUnit
*SU
, bool IsTopNode
) {
1407 // Move the instruction to its new location in the instruction stream.
1408 MachineInstr
*MI
= SU
->getInstr();
1411 assert(SU
->isTopReady() && "node still has unscheduled dependencies");
1412 if (&*CurrentTop
== MI
)
1413 CurrentTop
= nextIfDebug(++CurrentTop
, CurrentBottom
);
1415 moveInstruction(MI
, CurrentTop
);
1416 TopRPTracker
.setPos(MI
);
1419 if (ShouldTrackPressure
) {
1420 // Update top scheduled pressure.
1421 RegisterOperands RegOpers
;
1422 RegOpers
.collect(*MI
, *TRI
, MRI
, ShouldTrackLaneMasks
, false);
1423 if (ShouldTrackLaneMasks
) {
1424 // Adjust liveness and add missing dead+read-undef flags.
1425 SlotIndex SlotIdx
= LIS
->getInstructionIndex(*MI
).getRegSlot();
1426 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
, MI
);
1428 // Adjust for missing dead-def flags.
1429 RegOpers
.detectDeadDefs(*MI
, *LIS
);
1432 TopRPTracker
.advance(RegOpers
);
1433 assert(TopRPTracker
.getPos() == CurrentTop
&& "out of sync");
1434 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1435 TopRPTracker
.getRegSetPressureAtPos(), TRI
););
1437 updateScheduledPressure(SU
, TopRPTracker
.getPressure().MaxSetPressure
);
1440 assert(SU
->isBottomReady() && "node still has unscheduled dependencies");
1441 MachineBasicBlock::iterator priorII
=
1442 priorNonDebug(CurrentBottom
, CurrentTop
);
1443 if (&*priorII
== MI
)
1444 CurrentBottom
= priorII
;
1446 if (&*CurrentTop
== MI
) {
1447 CurrentTop
= nextIfDebug(++CurrentTop
, priorII
);
1448 TopRPTracker
.setPos(CurrentTop
);
1450 moveInstruction(MI
, CurrentBottom
);
1452 BotRPTracker
.setPos(CurrentBottom
);
1454 if (ShouldTrackPressure
) {
1455 RegisterOperands RegOpers
;
1456 RegOpers
.collect(*MI
, *TRI
, MRI
, ShouldTrackLaneMasks
, false);
1457 if (ShouldTrackLaneMasks
) {
1458 // Adjust liveness and add missing dead+read-undef flags.
1459 SlotIndex SlotIdx
= LIS
->getInstructionIndex(*MI
).getRegSlot();
1460 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
, MI
);
1462 // Adjust for missing dead-def flags.
1463 RegOpers
.detectDeadDefs(*MI
, *LIS
);
1466 if (BotRPTracker
.getPos() != CurrentBottom
)
1467 BotRPTracker
.recedeSkipDebugValues();
1468 SmallVector
<RegisterMaskPair
, 8> LiveUses
;
1469 BotRPTracker
.recede(RegOpers
, &LiveUses
);
1470 assert(BotRPTracker
.getPos() == CurrentBottom
&& "out of sync");
1471 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1472 BotRPTracker
.getRegSetPressureAtPos(), TRI
););
1474 updateScheduledPressure(SU
, BotRPTracker
.getPressure().MaxSetPressure
);
1475 updatePressureDiffs(LiveUses
);
1480 //===----------------------------------------------------------------------===//
1481 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1482 //===----------------------------------------------------------------------===//
1486 /// Post-process the DAG to create cluster edges between neighboring
1487 /// loads or between neighboring stores.
1488 class BaseMemOpClusterMutation
: public ScheduleDAGMutation
{
1491 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1495 MemOpInfo(SUnit
*SU
, ArrayRef
<const MachineOperand
*> BaseOps
,
1496 int64_t Offset
, unsigned Width
)
1497 : SU(SU
), BaseOps(BaseOps
.begin(), BaseOps
.end()), Offset(Offset
),
1500 static bool Compare(const MachineOperand
*const &A
,
1501 const MachineOperand
*const &B
) {
1502 if (A
->getType() != B
->getType())
1503 return A
->getType() < B
->getType();
1505 return A
->getReg() < B
->getReg();
1507 const MachineFunction
&MF
= *A
->getParent()->getParent()->getParent();
1508 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1509 bool StackGrowsDown
= TFI
.getStackGrowthDirection() ==
1510 TargetFrameLowering::StackGrowsDown
;
1511 return StackGrowsDown
? A
->getIndex() > B
->getIndex()
1512 : A
->getIndex() < B
->getIndex();
1515 llvm_unreachable("MemOpClusterMutation only supports register or frame "
1519 bool operator<(const MemOpInfo
&RHS
) const {
1520 // FIXME: Don't compare everything twice. Maybe use C++20 three way
1521 // comparison instead when it's available.
1522 if (std::lexicographical_compare(BaseOps
.begin(), BaseOps
.end(),
1523 RHS
.BaseOps
.begin(), RHS
.BaseOps
.end(),
1526 if (std::lexicographical_compare(RHS
.BaseOps
.begin(), RHS
.BaseOps
.end(),
1527 BaseOps
.begin(), BaseOps
.end(), Compare
))
1529 if (Offset
!= RHS
.Offset
)
1530 return Offset
< RHS
.Offset
;
1531 return SU
->NodeNum
< RHS
.SU
->NodeNum
;
1535 const TargetInstrInfo
*TII
;
1536 const TargetRegisterInfo
*TRI
;
1540 BaseMemOpClusterMutation(const TargetInstrInfo
*tii
,
1541 const TargetRegisterInfo
*tri
, bool IsLoad
)
1542 : TII(tii
), TRI(tri
), IsLoad(IsLoad
) {}
1544 void apply(ScheduleDAGInstrs
*DAGInstrs
) override
;
1547 void clusterNeighboringMemOps(ArrayRef
<MemOpInfo
> MemOps
, bool FastCluster
,
1548 ScheduleDAGInstrs
*DAG
);
1549 void collectMemOpRecords(std::vector
<SUnit
> &SUnits
,
1550 SmallVectorImpl
<MemOpInfo
> &MemOpRecords
);
1551 bool groupMemOps(ArrayRef
<MemOpInfo
> MemOps
, ScheduleDAGInstrs
*DAG
,
1552 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> &Groups
);
1555 class StoreClusterMutation
: public BaseMemOpClusterMutation
{
1557 StoreClusterMutation(const TargetInstrInfo
*tii
,
1558 const TargetRegisterInfo
*tri
)
1559 : BaseMemOpClusterMutation(tii
, tri
, false) {}
1562 class LoadClusterMutation
: public BaseMemOpClusterMutation
{
1564 LoadClusterMutation(const TargetInstrInfo
*tii
, const TargetRegisterInfo
*tri
)
1565 : BaseMemOpClusterMutation(tii
, tri
, true) {}
1568 } // end anonymous namespace
1572 std::unique_ptr
<ScheduleDAGMutation
>
1573 createLoadClusterDAGMutation(const TargetInstrInfo
*TII
,
1574 const TargetRegisterInfo
*TRI
) {
1575 return EnableMemOpCluster
? std::make_unique
<LoadClusterMutation
>(TII
, TRI
)
1579 std::unique_ptr
<ScheduleDAGMutation
>
1580 createStoreClusterDAGMutation(const TargetInstrInfo
*TII
,
1581 const TargetRegisterInfo
*TRI
) {
1582 return EnableMemOpCluster
? std::make_unique
<StoreClusterMutation
>(TII
, TRI
)
1586 } // end namespace llvm
1588 // Sorting all the loads/stores first, then for each load/store, checking the
1589 // following load/store one by one, until reach the first non-dependent one and
1590 // call target hook to see if they can cluster.
1591 // If FastCluster is enabled, we assume that, all the loads/stores have been
1592 // preprocessed and now, they didn't have dependencies on each other.
1593 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1594 ArrayRef
<MemOpInfo
> MemOpRecords
, bool FastCluster
,
1595 ScheduleDAGInstrs
*DAG
) {
1596 // Keep track of the current cluster length and bytes for each SUnit.
1597 DenseMap
<unsigned, std::pair
<unsigned, unsigned>> SUnit2ClusterInfo
;
1599 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
1600 // cluster mem ops collected within `MemOpRecords` array.
1601 for (unsigned Idx
= 0, End
= MemOpRecords
.size(); Idx
< (End
- 1); ++Idx
) {
1602 // Decision to cluster mem ops is taken based on target dependent logic
1603 auto MemOpa
= MemOpRecords
[Idx
];
1605 // Seek for the next load/store to do the cluster.
1606 unsigned NextIdx
= Idx
+ 1;
1607 for (; NextIdx
< End
; ++NextIdx
)
1608 // Skip if MemOpb has been clustered already or has dependency with
1610 if (!SUnit2ClusterInfo
.count(MemOpRecords
[NextIdx
].SU
->NodeNum
) &&
1612 (!DAG
->IsReachable(MemOpRecords
[NextIdx
].SU
, MemOpa
.SU
) &&
1613 !DAG
->IsReachable(MemOpa
.SU
, MemOpRecords
[NextIdx
].SU
))))
1618 auto MemOpb
= MemOpRecords
[NextIdx
];
1619 unsigned ClusterLength
= 2;
1620 unsigned CurrentClusterBytes
= MemOpa
.Width
+ MemOpb
.Width
;
1621 if (SUnit2ClusterInfo
.count(MemOpa
.SU
->NodeNum
)) {
1622 ClusterLength
= SUnit2ClusterInfo
[MemOpa
.SU
->NodeNum
].first
+ 1;
1623 CurrentClusterBytes
=
1624 SUnit2ClusterInfo
[MemOpa
.SU
->NodeNum
].second
+ MemOpb
.Width
;
1627 if (!TII
->shouldClusterMemOps(MemOpa
.BaseOps
, MemOpb
.BaseOps
, ClusterLength
,
1628 CurrentClusterBytes
))
1631 SUnit
*SUa
= MemOpa
.SU
;
1632 SUnit
*SUb
= MemOpb
.SU
;
1633 if (SUa
->NodeNum
> SUb
->NodeNum
)
1634 std::swap(SUa
, SUb
);
1636 // FIXME: Is this check really required?
1637 if (!DAG
->addEdge(SUb
, SDep(SUa
, SDep::Cluster
)))
1640 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa
->NodeNum
<< ") - SU("
1641 << SUb
->NodeNum
<< ")\n");
1645 // Copy successor edges from SUa to SUb. Interleaving computation
1646 // dependent on SUa can prevent load combining due to register reuse.
1647 // Predecessor edges do not need to be copied from SUb to SUa since
1648 // nearby loads should have effectively the same inputs.
1649 for (const SDep
&Succ
: SUa
->Succs
) {
1650 if (Succ
.getSUnit() == SUb
)
1652 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ
.getSUnit()->NodeNum
1654 DAG
->addEdge(Succ
.getSUnit(), SDep(SUb
, SDep::Artificial
));
1657 // Copy predecessor edges from SUb to SUa to avoid the SUnits that
1658 // SUb dependent on scheduled in-between SUb and SUa. Successor edges
1659 // do not need to be copied from SUa to SUb since no one will depend
1661 // Notice that, we don't need to care about the memory dependency as
1662 // we won't try to cluster them if they have any memory dependency.
1663 for (const SDep
&Pred
: SUb
->Preds
) {
1664 if (Pred
.getSUnit() == SUa
)
1666 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred
.getSUnit()->NodeNum
1668 DAG
->addEdge(SUa
, SDep(Pred
.getSUnit(), SDep::Artificial
));
1672 SUnit2ClusterInfo
[MemOpb
.SU
->NodeNum
] = {ClusterLength
,
1673 CurrentClusterBytes
};
1675 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength
1676 << ", Curr cluster bytes: " << CurrentClusterBytes
1681 void BaseMemOpClusterMutation::collectMemOpRecords(
1682 std::vector
<SUnit
> &SUnits
, SmallVectorImpl
<MemOpInfo
> &MemOpRecords
) {
1683 for (auto &SU
: SUnits
) {
1684 if ((IsLoad
&& !SU
.getInstr()->mayLoad()) ||
1685 (!IsLoad
&& !SU
.getInstr()->mayStore()))
1688 const MachineInstr
&MI
= *SU
.getInstr();
1689 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1691 bool OffsetIsScalable
;
1693 if (TII
->getMemOperandsWithOffsetWidth(MI
, BaseOps
, Offset
,
1694 OffsetIsScalable
, Width
, TRI
)) {
1695 MemOpRecords
.push_back(MemOpInfo(&SU
, BaseOps
, Offset
, Width
));
1697 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps
.size() << ", Offset: "
1698 << Offset
<< ", OffsetIsScalable: " << OffsetIsScalable
1699 << ", Width: " << Width
<< "\n");
1702 for (auto *Op
: BaseOps
)
1708 bool BaseMemOpClusterMutation::groupMemOps(
1709 ArrayRef
<MemOpInfo
> MemOps
, ScheduleDAGInstrs
*DAG
,
1710 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> &Groups
) {
1713 MemOps
.size() * DAG
->SUnits
.size() / 1000 > FastClusterThreshold
;
1715 for (const auto &MemOp
: MemOps
) {
1716 unsigned ChainPredID
= DAG
->SUnits
.size();
1718 for (const SDep
&Pred
: MemOp
.SU
->Preds
) {
1719 // We only want to cluster the mem ops that have the same ctrl(non-data)
1720 // pred so that they didn't have ctrl dependency for each other. But for
1721 // store instrs, we can still cluster them if the pred is load instr.
1722 if ((Pred
.isCtrl() &&
1724 (Pred
.getSUnit() && Pred
.getSUnit()->getInstr()->mayStore()))) &&
1725 !Pred
.isArtificial()) {
1726 ChainPredID
= Pred
.getSUnit()->NodeNum
;
1733 Groups
[ChainPredID
].push_back(MemOp
);
1738 /// Callback from DAG postProcessing to create cluster edges for loads/stores.
1739 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs
*DAG
) {
1740 // Collect all the clusterable loads/stores
1741 SmallVector
<MemOpInfo
, 32> MemOpRecords
;
1742 collectMemOpRecords(DAG
->SUnits
, MemOpRecords
);
1744 if (MemOpRecords
.size() < 2)
1747 // Put the loads/stores without dependency into the same group with some
1748 // heuristic if the DAG is too complex to avoid compiling time blow up.
1749 // Notice that, some fusion pair could be lost with this.
1750 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> Groups
;
1751 bool FastCluster
= groupMemOps(MemOpRecords
, DAG
, Groups
);
1753 for (auto &Group
: Groups
) {
1754 // Sorting the loads/stores, so that, we can stop the cluster as early as
1756 llvm::sort(Group
.second
);
1758 // Trying to cluster all the neighboring loads/stores.
1759 clusterNeighboringMemOps(Group
.second
, FastCluster
, DAG
);
1763 //===----------------------------------------------------------------------===//
1764 // CopyConstrain - DAG post-processing to encourage copy elimination.
1765 //===----------------------------------------------------------------------===//
1769 /// Post-process the DAG to create weak edges from all uses of a copy to
1770 /// the one use that defines the copy's source vreg, most likely an induction
1771 /// variable increment.
1772 class CopyConstrain
: public ScheduleDAGMutation
{
1774 SlotIndex RegionBeginIdx
;
1776 // RegionEndIdx is the slot index of the last non-debug instruction in the
1777 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1778 SlotIndex RegionEndIdx
;
1781 CopyConstrain(const TargetInstrInfo
*, const TargetRegisterInfo
*) {}
1783 void apply(ScheduleDAGInstrs
*DAGInstrs
) override
;
1786 void constrainLocalCopy(SUnit
*CopySU
, ScheduleDAGMILive
*DAG
);
1789 } // end anonymous namespace
1793 std::unique_ptr
<ScheduleDAGMutation
>
1794 createCopyConstrainDAGMutation(const TargetInstrInfo
*TII
,
1795 const TargetRegisterInfo
*TRI
) {
1796 return std::make_unique
<CopyConstrain
>(TII
, TRI
);
1799 } // end namespace llvm
1801 /// constrainLocalCopy handles two possibilities:
1806 /// I3: dst = src (copy)
1807 /// (create pred->succ edges I0->I1, I2->I1)
1810 /// I0: dst = src (copy)
1814 /// (create pred->succ edges I1->I2, I3->I2)
1816 /// Although the MachineScheduler is currently constrained to single blocks,
1817 /// this algorithm should handle extended blocks. An EBB is a set of
1818 /// contiguously numbered blocks such that the previous block in the EBB is
1819 /// always the single predecessor.
1820 void CopyConstrain::constrainLocalCopy(SUnit
*CopySU
, ScheduleDAGMILive
*DAG
) {
1821 LiveIntervals
*LIS
= DAG
->getLIS();
1822 MachineInstr
*Copy
= CopySU
->getInstr();
1824 // Check for pure vreg copies.
1825 const MachineOperand
&SrcOp
= Copy
->getOperand(1);
1826 Register SrcReg
= SrcOp
.getReg();
1827 if (!Register::isVirtualRegister(SrcReg
) || !SrcOp
.readsReg())
1830 const MachineOperand
&DstOp
= Copy
->getOperand(0);
1831 Register DstReg
= DstOp
.getReg();
1832 if (!Register::isVirtualRegister(DstReg
) || DstOp
.isDead())
1835 // Check if either the dest or source is local. If it's live across a back
1836 // edge, it's not local. Note that if both vregs are live across the back
1837 // edge, we cannot successfully contrain the copy without cyclic scheduling.
1838 // If both the copy's source and dest are local live intervals, then we
1839 // should treat the dest as the global for the purpose of adding
1840 // constraints. This adds edges from source's other uses to the copy.
1841 unsigned LocalReg
= SrcReg
;
1842 unsigned GlobalReg
= DstReg
;
1843 LiveInterval
*LocalLI
= &LIS
->getInterval(LocalReg
);
1844 if (!LocalLI
->isLocal(RegionBeginIdx
, RegionEndIdx
)) {
1847 LocalLI
= &LIS
->getInterval(LocalReg
);
1848 if (!LocalLI
->isLocal(RegionBeginIdx
, RegionEndIdx
))
1851 LiveInterval
*GlobalLI
= &LIS
->getInterval(GlobalReg
);
1853 // Find the global segment after the start of the local LI.
1854 LiveInterval::iterator GlobalSegment
= GlobalLI
->find(LocalLI
->beginIndex());
1855 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1856 // local live range. We could create edges from other global uses to the local
1857 // start, but the coalescer should have already eliminated these cases, so
1858 // don't bother dealing with it.
1859 if (GlobalSegment
== GlobalLI
->end())
1862 // If GlobalSegment is killed at the LocalLI->start, the call to find()
1863 // returned the next global segment. But if GlobalSegment overlaps with
1864 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
1865 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1866 if (GlobalSegment
->contains(LocalLI
->beginIndex()))
1869 if (GlobalSegment
== GlobalLI
->end())
1872 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1873 if (GlobalSegment
!= GlobalLI
->begin()) {
1874 // Two address defs have no hole.
1875 if (SlotIndex::isSameInstr(std::prev(GlobalSegment
)->end
,
1876 GlobalSegment
->start
)) {
1879 // If the prior global segment may be defined by the same two-address
1880 // instruction that also defines LocalLI, then can't make a hole here.
1881 if (SlotIndex::isSameInstr(std::prev(GlobalSegment
)->start
,
1882 LocalLI
->beginIndex())) {
1885 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1886 // it would be a disconnected component in the live range.
1887 assert(std::prev(GlobalSegment
)->start
< LocalLI
->beginIndex() &&
1888 "Disconnected LRG within the scheduling region.");
1890 MachineInstr
*GlobalDef
= LIS
->getInstructionFromIndex(GlobalSegment
->start
);
1894 SUnit
*GlobalSU
= DAG
->getSUnit(GlobalDef
);
1898 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1899 // constraining the uses of the last local def to precede GlobalDef.
1900 SmallVector
<SUnit
*,8> LocalUses
;
1901 const VNInfo
*LastLocalVN
= LocalLI
->getVNInfoBefore(LocalLI
->endIndex());
1902 MachineInstr
*LastLocalDef
= LIS
->getInstructionFromIndex(LastLocalVN
->def
);
1903 SUnit
*LastLocalSU
= DAG
->getSUnit(LastLocalDef
);
1904 for (const SDep
&Succ
: LastLocalSU
->Succs
) {
1905 if (Succ
.getKind() != SDep::Data
|| Succ
.getReg() != LocalReg
)
1907 if (Succ
.getSUnit() == GlobalSU
)
1909 if (!DAG
->canAddEdge(GlobalSU
, Succ
.getSUnit()))
1911 LocalUses
.push_back(Succ
.getSUnit());
1913 // Open the top of the GlobalLI hole by constraining any earlier global uses
1914 // to precede the start of LocalLI.
1915 SmallVector
<SUnit
*,8> GlobalUses
;
1916 MachineInstr
*FirstLocalDef
=
1917 LIS
->getInstructionFromIndex(LocalLI
->beginIndex());
1918 SUnit
*FirstLocalSU
= DAG
->getSUnit(FirstLocalDef
);
1919 for (const SDep
&Pred
: GlobalSU
->Preds
) {
1920 if (Pred
.getKind() != SDep::Anti
|| Pred
.getReg() != GlobalReg
)
1922 if (Pred
.getSUnit() == FirstLocalSU
)
1924 if (!DAG
->canAddEdge(FirstLocalSU
, Pred
.getSUnit()))
1926 GlobalUses
.push_back(Pred
.getSUnit());
1928 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU
->NodeNum
<< ")\n");
1929 // Add the weak edges.
1930 for (SUnit
*LU
: LocalUses
) {
1931 LLVM_DEBUG(dbgs() << " Local use SU(" << LU
->NodeNum
<< ") -> SU("
1932 << GlobalSU
->NodeNum
<< ")\n");
1933 DAG
->addEdge(GlobalSU
, SDep(LU
, SDep::Weak
));
1935 for (SUnit
*GU
: GlobalUses
) {
1936 LLVM_DEBUG(dbgs() << " Global use SU(" << GU
->NodeNum
<< ") -> SU("
1937 << FirstLocalSU
->NodeNum
<< ")\n");
1938 DAG
->addEdge(FirstLocalSU
, SDep(GU
, SDep::Weak
));
1942 /// Callback from DAG postProcessing to create weak edges to encourage
1943 /// copy elimination.
1944 void CopyConstrain::apply(ScheduleDAGInstrs
*DAGInstrs
) {
1945 ScheduleDAGMI
*DAG
= static_cast<ScheduleDAGMI
*>(DAGInstrs
);
1946 assert(DAG
->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1948 MachineBasicBlock::iterator FirstPos
= nextIfDebug(DAG
->begin(), DAG
->end());
1949 if (FirstPos
== DAG
->end())
1951 RegionBeginIdx
= DAG
->getLIS()->getInstructionIndex(*FirstPos
);
1952 RegionEndIdx
= DAG
->getLIS()->getInstructionIndex(
1953 *priorNonDebug(DAG
->end(), DAG
->begin()));
1955 for (SUnit
&SU
: DAG
->SUnits
) {
1956 if (!SU
.getInstr()->isCopy())
1959 constrainLocalCopy(&SU
, static_cast<ScheduleDAGMILive
*>(DAG
));
1963 //===----------------------------------------------------------------------===//
1964 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1965 // and possibly other custom schedulers.
1966 //===----------------------------------------------------------------------===//
1968 static const unsigned InvalidCycle
= ~0U;
1970 SchedBoundary::~SchedBoundary() { delete HazardRec
; }
1972 /// Given a Count of resource usage and a Latency value, return true if a
1973 /// SchedBoundary becomes resource limited.
1974 /// If we are checking after scheduling a node, we should return true when
1975 /// we just reach the resource limit.
1976 static bool checkResourceLimit(unsigned LFactor
, unsigned Count
,
1977 unsigned Latency
, bool AfterSchedNode
) {
1978 int ResCntFactor
= (int)(Count
- (Latency
* LFactor
));
1980 return ResCntFactor
>= (int)LFactor
;
1982 return ResCntFactor
> (int)LFactor
;
1985 void SchedBoundary::reset() {
1986 // A new HazardRec is created for each DAG and owned by SchedBoundary.
1987 // Destroying and reconstructing it is very expensive though. So keep
1988 // invalid, placeholder HazardRecs.
1989 if (HazardRec
&& HazardRec
->isEnabled()) {
1991 HazardRec
= nullptr;
1995 CheckPending
= false;
1998 MinReadyCycle
= std::numeric_limits
<unsigned>::max();
1999 ExpectedLatency
= 0;
2000 DependentLatency
= 0;
2002 MaxExecutedResCount
= 0;
2004 IsResourceLimited
= false;
2005 ReservedCycles
.clear();
2006 ReservedCyclesIndex
.clear();
2007 ResourceGroupSubUnitMasks
.clear();
2009 // Track the maximum number of stall cycles that could arise either from the
2010 // latency of a DAG edge or the number of cycles that a processor resource is
2011 // reserved (SchedBoundary::ReservedCycles).
2012 MaxObservedStall
= 0;
2014 // Reserve a zero-count for invalid CritResIdx.
2015 ExecutedResCounts
.resize(1);
2016 assert(!ExecutedResCounts
[0] && "nonzero count for bad resource");
2019 void SchedRemainder::
2020 init(ScheduleDAGMI
*DAG
, const TargetSchedModel
*SchedModel
) {
2022 if (!SchedModel
->hasInstrSchedModel())
2024 RemainingCounts
.resize(SchedModel
->getNumProcResourceKinds());
2025 for (SUnit
&SU
: DAG
->SUnits
) {
2026 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(&SU
);
2027 RemIssueCount
+= SchedModel
->getNumMicroOps(SU
.getInstr(), SC
)
2028 * SchedModel
->getMicroOpFactor();
2029 for (TargetSchedModel::ProcResIter
2030 PI
= SchedModel
->getWriteProcResBegin(SC
),
2031 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2032 unsigned PIdx
= PI
->ProcResourceIdx
;
2033 unsigned Factor
= SchedModel
->getResourceFactor(PIdx
);
2034 RemainingCounts
[PIdx
] += (Factor
* PI
->Cycles
);
2039 void SchedBoundary::
2040 init(ScheduleDAGMI
*dag
, const TargetSchedModel
*smodel
, SchedRemainder
*rem
) {
2043 SchedModel
= smodel
;
2045 if (SchedModel
->hasInstrSchedModel()) {
2046 unsigned ResourceCount
= SchedModel
->getNumProcResourceKinds();
2047 ReservedCyclesIndex
.resize(ResourceCount
);
2048 ExecutedResCounts
.resize(ResourceCount
);
2049 ResourceGroupSubUnitMasks
.resize(ResourceCount
, APInt(ResourceCount
, 0));
2050 unsigned NumUnits
= 0;
2052 for (unsigned i
= 0; i
< ResourceCount
; ++i
) {
2053 ReservedCyclesIndex
[i
] = NumUnits
;
2054 NumUnits
+= SchedModel
->getProcResource(i
)->NumUnits
;
2055 if (isUnbufferedGroup(i
)) {
2056 auto SubUnits
= SchedModel
->getProcResource(i
)->SubUnitsIdxBegin
;
2057 for (unsigned U
= 0, UE
= SchedModel
->getProcResource(i
)->NumUnits
;
2059 ResourceGroupSubUnitMasks
[i
].setBit(SubUnits
[U
]);
2063 ReservedCycles
.resize(NumUnits
, InvalidCycle
);
2067 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
2068 /// these "soft stalls" differently than the hard stall cycles based on CPU
2069 /// resources and computed by checkHazard(). A fully in-order model
2070 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
2071 /// available for scheduling until they are ready. However, a weaker in-order
2072 /// model may use this for heuristics. For example, if a processor has in-order
2073 /// behavior when reading certain resources, this may come into play.
2074 unsigned SchedBoundary::getLatencyStallCycles(SUnit
*SU
) {
2075 if (!SU
->isUnbuffered
)
2078 unsigned ReadyCycle
= (isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
);
2079 if (ReadyCycle
> CurrCycle
)
2080 return ReadyCycle
- CurrCycle
;
2084 /// Compute the next cycle at which the given processor resource unit
2085 /// can be scheduled.
2086 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx
,
2088 unsigned NextUnreserved
= ReservedCycles
[InstanceIdx
];
2089 // If this resource has never been used, always return cycle zero.
2090 if (NextUnreserved
== InvalidCycle
)
2092 // For bottom-up scheduling add the cycles needed for the current operation.
2094 NextUnreserved
+= Cycles
;
2095 return NextUnreserved
;
2098 /// Compute the next cycle at which the given processor resource can be
2099 /// scheduled. Returns the next cycle and the index of the processor resource
2100 /// instance in the reserved cycles vector.
2101 std::pair
<unsigned, unsigned>
2102 SchedBoundary::getNextResourceCycle(const MCSchedClassDesc
*SC
, unsigned PIdx
,
2105 unsigned MinNextUnreserved
= InvalidCycle
;
2106 unsigned InstanceIdx
= 0;
2107 unsigned StartIndex
= ReservedCyclesIndex
[PIdx
];
2108 unsigned NumberOfInstances
= SchedModel
->getProcResource(PIdx
)->NumUnits
;
2109 assert(NumberOfInstances
> 0 &&
2110 "Cannot have zero instances of a ProcResource");
2112 if (isUnbufferedGroup(PIdx
)) {
2113 // If any subunits are used by the instruction, report that the resource
2114 // group is available at 0, effectively removing the group record from
2115 // hazarding and basing the hazarding decisions on the subunit records.
2116 // Otherwise, choose the first available instance from among the subunits.
2117 // Specifications which assign cycles to both the subunits and the group or
2118 // which use an unbuffered group with buffered subunits will appear to
2119 // schedule strangely. In the first case, the additional cycles for the
2120 // group will be ignored. In the second, the group will be ignored
2122 for (const MCWriteProcResEntry
&PE
:
2123 make_range(SchedModel
->getWriteProcResBegin(SC
),
2124 SchedModel
->getWriteProcResEnd(SC
)))
2125 if (ResourceGroupSubUnitMasks
[PIdx
][PE
.ProcResourceIdx
])
2126 return std::make_pair(0u, StartIndex
);
2128 auto SubUnits
= SchedModel
->getProcResource(PIdx
)->SubUnitsIdxBegin
;
2129 for (unsigned I
= 0, End
= NumberOfInstances
; I
< End
; ++I
) {
2130 unsigned NextUnreserved
, NextInstanceIdx
;
2131 std::tie(NextUnreserved
, NextInstanceIdx
) =
2132 getNextResourceCycle(SC
, SubUnits
[I
], Cycles
);
2133 if (MinNextUnreserved
> NextUnreserved
) {
2134 InstanceIdx
= NextInstanceIdx
;
2135 MinNextUnreserved
= NextUnreserved
;
2138 return std::make_pair(MinNextUnreserved
, InstanceIdx
);
2141 for (unsigned I
= StartIndex
, End
= StartIndex
+ NumberOfInstances
; I
< End
;
2143 unsigned NextUnreserved
= getNextResourceCycleByInstance(I
, Cycles
);
2144 if (MinNextUnreserved
> NextUnreserved
) {
2146 MinNextUnreserved
= NextUnreserved
;
2149 return std::make_pair(MinNextUnreserved
, InstanceIdx
);
2152 /// Does this SU have a hazard within the current instruction group.
2154 /// The scheduler supports two modes of hazard recognition. The first is the
2155 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
2156 /// supports highly complicated in-order reservation tables
2157 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
2159 /// The second is a streamlined mechanism that checks for hazards based on
2160 /// simple counters that the scheduler itself maintains. It explicitly checks
2161 /// for instruction dispatch limitations, including the number of micro-ops that
2162 /// can dispatch per cycle.
2164 /// TODO: Also check whether the SU must start a new group.
2165 bool SchedBoundary::checkHazard(SUnit
*SU
) {
2166 if (HazardRec
->isEnabled()
2167 && HazardRec
->getHazardType(SU
) != ScheduleHazardRecognizer::NoHazard
) {
2171 unsigned uops
= SchedModel
->getNumMicroOps(SU
->getInstr());
2172 if ((CurrMOps
> 0) && (CurrMOps
+ uops
> SchedModel
->getIssueWidth())) {
2173 LLVM_DEBUG(dbgs() << " SU(" << SU
->NodeNum
<< ") uops="
2174 << SchedModel
->getNumMicroOps(SU
->getInstr()) << '\n');
2179 ((isTop() && SchedModel
->mustBeginGroup(SU
->getInstr())) ||
2180 (!isTop() && SchedModel
->mustEndGroup(SU
->getInstr())))) {
2181 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU
->NodeNum
<< ") must "
2182 << (isTop() ? "begin" : "end") << " group\n");
2186 if (SchedModel
->hasInstrSchedModel() && SU
->hasReservedResource
) {
2187 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2188 for (const MCWriteProcResEntry
&PE
:
2189 make_range(SchedModel
->getWriteProcResBegin(SC
),
2190 SchedModel
->getWriteProcResEnd(SC
))) {
2191 unsigned ResIdx
= PE
.ProcResourceIdx
;
2192 unsigned Cycles
= PE
.Cycles
;
2193 unsigned NRCycle
, InstanceIdx
;
2194 std::tie(NRCycle
, InstanceIdx
) = getNextResourceCycle(SC
, ResIdx
, Cycles
);
2195 if (NRCycle
> CurrCycle
) {
2197 MaxObservedStall
= std::max(Cycles
, MaxObservedStall
);
2199 LLVM_DEBUG(dbgs() << " SU(" << SU
->NodeNum
<< ") "
2200 << SchedModel
->getResourceName(ResIdx
)
2201 << '[' << InstanceIdx
- ReservedCyclesIndex
[ResIdx
] << ']'
2202 << "=" << NRCycle
<< "c\n");
2210 // Find the unscheduled node in ReadySUs with the highest latency.
2211 unsigned SchedBoundary::
2212 findMaxLatency(ArrayRef
<SUnit
*> ReadySUs
) {
2213 SUnit
*LateSU
= nullptr;
2214 unsigned RemLatency
= 0;
2215 for (SUnit
*SU
: ReadySUs
) {
2216 unsigned L
= getUnscheduledLatency(SU
);
2217 if (L
> RemLatency
) {
2223 LLVM_DEBUG(dbgs() << Available
.getName() << " RemLatency SU("
2224 << LateSU
->NodeNum
<< ") " << RemLatency
<< "c\n");
2229 // Count resources in this zone and the remaining unscheduled
2230 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2231 // resource index, or zero if the zone is issue limited.
2232 unsigned SchedBoundary::
2233 getOtherResourceCount(unsigned &OtherCritIdx
) {
2235 if (!SchedModel
->hasInstrSchedModel())
2238 unsigned OtherCritCount
= Rem
->RemIssueCount
2239 + (RetiredMOps
* SchedModel
->getMicroOpFactor());
2240 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " + Remain MOps: "
2241 << OtherCritCount
/ SchedModel
->getMicroOpFactor() << '\n');
2242 for (unsigned PIdx
= 1, PEnd
= SchedModel
->getNumProcResourceKinds();
2243 PIdx
!= PEnd
; ++PIdx
) {
2244 unsigned OtherCount
= getResourceCount(PIdx
) + Rem
->RemainingCounts
[PIdx
];
2245 if (OtherCount
> OtherCritCount
) {
2246 OtherCritCount
= OtherCount
;
2247 OtherCritIdx
= PIdx
;
2252 dbgs() << " " << Available
.getName() << " + Remain CritRes: "
2253 << OtherCritCount
/ SchedModel
->getResourceFactor(OtherCritIdx
)
2254 << " " << SchedModel
->getResourceName(OtherCritIdx
) << "\n");
2256 return OtherCritCount
;
2259 void SchedBoundary::releaseNode(SUnit
*SU
, unsigned ReadyCycle
, bool InPQueue
,
2261 assert(SU
->getInstr() && "Scheduled SUnit must have instr");
2264 // ReadyCycle was been bumped up to the CurrCycle when this node was
2265 // scheduled, but CurrCycle may have been eagerly advanced immediately after
2266 // scheduling, so may now be greater than ReadyCycle.
2267 if (ReadyCycle
> CurrCycle
)
2268 MaxObservedStall
= std::max(ReadyCycle
- CurrCycle
, MaxObservedStall
);
2271 if (ReadyCycle
< MinReadyCycle
)
2272 MinReadyCycle
= ReadyCycle
;
2274 // Check for interlocks first. For the purpose of other heuristics, an
2275 // instruction that cannot issue appears as if it's not in the ReadyQueue.
2276 bool IsBuffered
= SchedModel
->getMicroOpBufferSize() != 0;
2277 bool HazardDetected
= (!IsBuffered
&& ReadyCycle
> CurrCycle
) ||
2278 checkHazard(SU
) || (Available
.size() >= ReadyListLimit
);
2280 if (!HazardDetected
) {
2284 Pending
.remove(Pending
.begin() + Idx
);
2292 /// Move the boundary of scheduled code by one cycle.
2293 void SchedBoundary::bumpCycle(unsigned NextCycle
) {
2294 if (SchedModel
->getMicroOpBufferSize() == 0) {
2295 assert(MinReadyCycle
< std::numeric_limits
<unsigned>::max() &&
2296 "MinReadyCycle uninitialized");
2297 if (MinReadyCycle
> NextCycle
)
2298 NextCycle
= MinReadyCycle
;
2300 // Update the current micro-ops, which will issue in the next cycle.
2301 unsigned DecMOps
= SchedModel
->getIssueWidth() * (NextCycle
- CurrCycle
);
2302 CurrMOps
= (CurrMOps
<= DecMOps
) ? 0 : CurrMOps
- DecMOps
;
2304 // Decrement DependentLatency based on the next cycle.
2305 if ((NextCycle
- CurrCycle
) > DependentLatency
)
2306 DependentLatency
= 0;
2308 DependentLatency
-= (NextCycle
- CurrCycle
);
2310 if (!HazardRec
->isEnabled()) {
2311 // Bypass HazardRec virtual calls.
2312 CurrCycle
= NextCycle
;
2314 // Bypass getHazardType calls in case of long latency.
2315 for (; CurrCycle
!= NextCycle
; ++CurrCycle
) {
2317 HazardRec
->AdvanceCycle();
2319 HazardRec
->RecedeCycle();
2322 CheckPending
= true;
2324 checkResourceLimit(SchedModel
->getLatencyFactor(), getCriticalCount(),
2325 getScheduledLatency(), true);
2327 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle
<< ' ' << Available
.getName()
2331 void SchedBoundary::incExecutedResources(unsigned PIdx
, unsigned Count
) {
2332 ExecutedResCounts
[PIdx
] += Count
;
2333 if (ExecutedResCounts
[PIdx
] > MaxExecutedResCount
)
2334 MaxExecutedResCount
= ExecutedResCounts
[PIdx
];
2337 /// Add the given processor resource to this scheduled zone.
2339 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2340 /// during which this resource is consumed.
2342 /// \return the next cycle at which the instruction may execute without
2343 /// oversubscribing resources.
2344 unsigned SchedBoundary::countResource(const MCSchedClassDesc
*SC
, unsigned PIdx
,
2345 unsigned Cycles
, unsigned NextCycle
) {
2346 unsigned Factor
= SchedModel
->getResourceFactor(PIdx
);
2347 unsigned Count
= Factor
* Cycles
;
2348 LLVM_DEBUG(dbgs() << " " << SchedModel
->getResourceName(PIdx
) << " +"
2349 << Cycles
<< "x" << Factor
<< "u\n");
2351 // Update Executed resources counts.
2352 incExecutedResources(PIdx
, Count
);
2353 assert(Rem
->RemainingCounts
[PIdx
] >= Count
&& "resource double counted");
2354 Rem
->RemainingCounts
[PIdx
] -= Count
;
2356 // Check if this resource exceeds the current critical resource. If so, it
2357 // becomes the critical resource.
2358 if (ZoneCritResIdx
!= PIdx
&& (getResourceCount(PIdx
) > getCriticalCount())) {
2359 ZoneCritResIdx
= PIdx
;
2360 LLVM_DEBUG(dbgs() << " *** Critical resource "
2361 << SchedModel
->getResourceName(PIdx
) << ": "
2362 << getResourceCount(PIdx
) / SchedModel
->getLatencyFactor()
2365 // For reserved resources, record the highest cycle using the resource.
2366 unsigned NextAvailable
, InstanceIdx
;
2367 std::tie(NextAvailable
, InstanceIdx
) = getNextResourceCycle(SC
, PIdx
, Cycles
);
2368 if (NextAvailable
> CurrCycle
) {
2369 LLVM_DEBUG(dbgs() << " Resource conflict: "
2370 << SchedModel
->getResourceName(PIdx
)
2371 << '[' << InstanceIdx
- ReservedCyclesIndex
[PIdx
] << ']'
2372 << " reserved until @" << NextAvailable
<< "\n");
2374 return NextAvailable
;
2377 /// Move the boundary of scheduled code by one SUnit.
2378 void SchedBoundary::bumpNode(SUnit
*SU
) {
2379 // Update the reservation table.
2380 if (HazardRec
->isEnabled()) {
2381 if (!isTop() && SU
->isCall
) {
2382 // Calls are scheduled with their preceding instructions. For bottom-up
2383 // scheduling, clear the pipeline state before emitting.
2386 HazardRec
->EmitInstruction(SU
);
2387 // Scheduling an instruction may have made pending instructions available.
2388 CheckPending
= true;
2390 // checkHazard should prevent scheduling multiple instructions per cycle that
2391 // exceed the issue width.
2392 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2393 unsigned IncMOps
= SchedModel
->getNumMicroOps(SU
->getInstr());
2395 (CurrMOps
== 0 || (CurrMOps
+ IncMOps
) <= SchedModel
->getIssueWidth()) &&
2396 "Cannot schedule this instruction's MicroOps in the current cycle.");
2398 unsigned ReadyCycle
= (isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
);
2399 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle
<< "c\n");
2401 unsigned NextCycle
= CurrCycle
;
2402 switch (SchedModel
->getMicroOpBufferSize()) {
2404 assert(ReadyCycle
<= CurrCycle
&& "Broken PendingQueue");
2407 if (ReadyCycle
> NextCycle
) {
2408 NextCycle
= ReadyCycle
;
2409 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle
<< "\n");
2413 // We don't currently model the OOO reorder buffer, so consider all
2414 // scheduled MOps to be "retired". We do loosely model in-order resource
2415 // latency. If this instruction uses an in-order resource, account for any
2416 // likely stall cycles.
2417 if (SU
->isUnbuffered
&& ReadyCycle
> NextCycle
)
2418 NextCycle
= ReadyCycle
;
2421 RetiredMOps
+= IncMOps
;
2423 // Update resource counts and critical resource.
2424 if (SchedModel
->hasInstrSchedModel()) {
2425 unsigned DecRemIssue
= IncMOps
* SchedModel
->getMicroOpFactor();
2426 assert(Rem
->RemIssueCount
>= DecRemIssue
&& "MOps double counted");
2427 Rem
->RemIssueCount
-= DecRemIssue
;
2428 if (ZoneCritResIdx
) {
2429 // Scale scheduled micro-ops for comparing with the critical resource.
2430 unsigned ScaledMOps
=
2431 RetiredMOps
* SchedModel
->getMicroOpFactor();
2433 // If scaled micro-ops are now more than the previous critical resource by
2434 // a full cycle, then micro-ops issue becomes critical.
2435 if ((int)(ScaledMOps
- getResourceCount(ZoneCritResIdx
))
2436 >= (int)SchedModel
->getLatencyFactor()) {
2438 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2439 << ScaledMOps
/ SchedModel
->getLatencyFactor()
2443 for (TargetSchedModel::ProcResIter
2444 PI
= SchedModel
->getWriteProcResBegin(SC
),
2445 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2447 countResource(SC
, PI
->ProcResourceIdx
, PI
->Cycles
, NextCycle
);
2448 if (RCycle
> NextCycle
)
2451 if (SU
->hasReservedResource
) {
2452 // For reserved resources, record the highest cycle using the resource.
2453 // For top-down scheduling, this is the cycle in which we schedule this
2454 // instruction plus the number of cycles the operations reserves the
2455 // resource. For bottom-up is it simply the instruction's cycle.
2456 for (TargetSchedModel::ProcResIter
2457 PI
= SchedModel
->getWriteProcResBegin(SC
),
2458 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2459 unsigned PIdx
= PI
->ProcResourceIdx
;
2460 if (SchedModel
->getProcResource(PIdx
)->BufferSize
== 0) {
2461 unsigned ReservedUntil
, InstanceIdx
;
2462 std::tie(ReservedUntil
, InstanceIdx
) =
2463 getNextResourceCycle(SC
, PIdx
, 0);
2465 ReservedCycles
[InstanceIdx
] =
2466 std::max(ReservedUntil
, NextCycle
+ PI
->Cycles
);
2468 ReservedCycles
[InstanceIdx
] = NextCycle
;
2473 // Update ExpectedLatency and DependentLatency.
2474 unsigned &TopLatency
= isTop() ? ExpectedLatency
: DependentLatency
;
2475 unsigned &BotLatency
= isTop() ? DependentLatency
: ExpectedLatency
;
2476 if (SU
->getDepth() > TopLatency
) {
2477 TopLatency
= SU
->getDepth();
2478 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " TopLatency SU("
2479 << SU
->NodeNum
<< ") " << TopLatency
<< "c\n");
2481 if (SU
->getHeight() > BotLatency
) {
2482 BotLatency
= SU
->getHeight();
2483 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " BotLatency SU("
2484 << SU
->NodeNum
<< ") " << BotLatency
<< "c\n");
2486 // If we stall for any reason, bump the cycle.
2487 if (NextCycle
> CurrCycle
)
2488 bumpCycle(NextCycle
);
2490 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2491 // resource limited. If a stall occurred, bumpCycle does this.
2493 checkResourceLimit(SchedModel
->getLatencyFactor(), getCriticalCount(),
2494 getScheduledLatency(), true);
2496 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2497 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2498 // one cycle. Since we commonly reach the max MOps here, opportunistically
2499 // bump the cycle to avoid uselessly checking everything in the readyQ.
2500 CurrMOps
+= IncMOps
;
2502 // Bump the cycle count for issue group constraints.
2503 // This must be done after NextCycle has been adjust for all other stalls.
2504 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2506 if ((isTop() && SchedModel
->mustEndGroup(SU
->getInstr())) ||
2507 (!isTop() && SchedModel
->mustBeginGroup(SU
->getInstr()))) {
2508 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
2510 bumpCycle(++NextCycle
);
2513 while (CurrMOps
>= SchedModel
->getIssueWidth()) {
2514 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps
<< " at cycle "
2515 << CurrCycle
<< '\n');
2516 bumpCycle(++NextCycle
);
2518 LLVM_DEBUG(dumpScheduledState());
2521 /// Release pending ready nodes in to the available queue. This makes them
2522 /// visible to heuristics.
2523 void SchedBoundary::releasePending() {
2524 // If the available queue is empty, it is safe to reset MinReadyCycle.
2525 if (Available
.empty())
2526 MinReadyCycle
= std::numeric_limits
<unsigned>::max();
2528 // Check to see if any of the pending instructions are ready to issue. If
2529 // so, add them to the available queue.
2530 for (unsigned I
= 0, E
= Pending
.size(); I
< E
; ++I
) {
2531 SUnit
*SU
= *(Pending
.begin() + I
);
2532 unsigned ReadyCycle
= isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
;
2534 if (ReadyCycle
< MinReadyCycle
)
2535 MinReadyCycle
= ReadyCycle
;
2537 if (Available
.size() >= ReadyListLimit
)
2540 releaseNode(SU
, ReadyCycle
, true, I
);
2541 if (E
!= Pending
.size()) {
2546 CheckPending
= false;
2549 /// Remove SU from the ready set for this boundary.
2550 void SchedBoundary::removeReady(SUnit
*SU
) {
2551 if (Available
.isInQueue(SU
))
2552 Available
.remove(Available
.find(SU
));
2554 assert(Pending
.isInQueue(SU
) && "bad ready count");
2555 Pending
.remove(Pending
.find(SU
));
2559 /// If this queue only has one ready candidate, return it. As a side effect,
2560 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2561 /// one node is ready. If multiple instructions are ready, return NULL.
2562 SUnit
*SchedBoundary::pickOnlyChoice() {
2566 // Defer any ready instrs that now have a hazard.
2567 for (ReadyQueue::iterator I
= Available
.begin(); I
!= Available
.end();) {
2568 if (checkHazard(*I
)) {
2570 I
= Available
.remove(I
);
2575 for (unsigned i
= 0; Available
.empty(); ++i
) {
2576 // FIXME: Re-enable assert once PR20057 is resolved.
2577 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2578 // "permanent hazard");
2580 bumpCycle(CurrCycle
+ 1);
2584 LLVM_DEBUG(Pending
.dump());
2585 LLVM_DEBUG(Available
.dump());
2587 if (Available
.size() == 1)
2588 return *Available
.begin();
2592 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2593 // This is useful information to dump after bumpNode.
2594 // Note that the Queue contents are more useful before pickNodeFromQueue.
2595 LLVM_DUMP_METHOD
void SchedBoundary::dumpScheduledState() const {
2598 if (ZoneCritResIdx
) {
2599 ResFactor
= SchedModel
->getResourceFactor(ZoneCritResIdx
);
2600 ResCount
= getResourceCount(ZoneCritResIdx
);
2602 ResFactor
= SchedModel
->getMicroOpFactor();
2603 ResCount
= RetiredMOps
* ResFactor
;
2605 unsigned LFactor
= SchedModel
->getLatencyFactor();
2606 dbgs() << Available
.getName() << " @" << CurrCycle
<< "c\n"
2607 << " Retired: " << RetiredMOps
;
2608 dbgs() << "\n Executed: " << getExecutedCount() / LFactor
<< "c";
2609 dbgs() << "\n Critical: " << ResCount
/ LFactor
<< "c, "
2610 << ResCount
/ ResFactor
<< " "
2611 << SchedModel
->getResourceName(ZoneCritResIdx
)
2612 << "\n ExpectedLatency: " << ExpectedLatency
<< "c\n"
2613 << (IsResourceLimited
? " - Resource" : " - Latency")
2618 //===----------------------------------------------------------------------===//
2619 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2620 //===----------------------------------------------------------------------===//
2622 void GenericSchedulerBase::SchedCandidate::
2623 initResourceDelta(const ScheduleDAGMI
*DAG
,
2624 const TargetSchedModel
*SchedModel
) {
2625 if (!Policy
.ReduceResIdx
&& !Policy
.DemandResIdx
)
2628 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2629 for (TargetSchedModel::ProcResIter
2630 PI
= SchedModel
->getWriteProcResBegin(SC
),
2631 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2632 if (PI
->ProcResourceIdx
== Policy
.ReduceResIdx
)
2633 ResDelta
.CritResources
+= PI
->Cycles
;
2634 if (PI
->ProcResourceIdx
== Policy
.DemandResIdx
)
2635 ResDelta
.DemandedResources
+= PI
->Cycles
;
2639 /// Compute remaining latency. We need this both to determine whether the
2640 /// overall schedule has become latency-limited and whether the instructions
2641 /// outside this zone are resource or latency limited.
2643 /// The "dependent" latency is updated incrementally during scheduling as the
2644 /// max height/depth of scheduled nodes minus the cycles since it was
2646 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2648 /// The "independent" latency is the max ready queue depth:
2649 /// ILat = max N.depth for N in Available|Pending
2651 /// RemainingLatency is the greater of independent and dependent latency.
2653 /// These computations are expensive, especially in DAGs with many edges, so
2654 /// only do them if necessary.
2655 static unsigned computeRemLatency(SchedBoundary
&CurrZone
) {
2656 unsigned RemLatency
= CurrZone
.getDependentLatency();
2657 RemLatency
= std::max(RemLatency
,
2658 CurrZone
.findMaxLatency(CurrZone
.Available
.elements()));
2659 RemLatency
= std::max(RemLatency
,
2660 CurrZone
.findMaxLatency(CurrZone
.Pending
.elements()));
2664 /// Returns true if the current cycle plus remaning latency is greater than
2665 /// the critical path in the scheduling region.
2666 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy
&Policy
,
2667 SchedBoundary
&CurrZone
,
2668 bool ComputeRemLatency
,
2669 unsigned &RemLatency
) const {
2670 // The current cycle is already greater than the critical path, so we are
2671 // already latency limited and don't need to compute the remaining latency.
2672 if (CurrZone
.getCurrCycle() > Rem
.CriticalPath
)
2675 // If we haven't scheduled anything yet, then we aren't latency limited.
2676 if (CurrZone
.getCurrCycle() == 0)
2679 if (ComputeRemLatency
)
2680 RemLatency
= computeRemLatency(CurrZone
);
2682 return RemLatency
+ CurrZone
.getCurrCycle() > Rem
.CriticalPath
;
2685 /// Set the CandPolicy given a scheduling zone given the current resources and
2686 /// latencies inside and outside the zone.
2687 void GenericSchedulerBase::setPolicy(CandPolicy
&Policy
, bool IsPostRA
,
2688 SchedBoundary
&CurrZone
,
2689 SchedBoundary
*OtherZone
) {
2690 // Apply preemptive heuristics based on the total latency and resources
2691 // inside and outside this zone. Potential stalls should be considered before
2692 // following this policy.
2694 // Compute the critical resource outside the zone.
2695 unsigned OtherCritIdx
= 0;
2696 unsigned OtherCount
=
2697 OtherZone
? OtherZone
->getOtherResourceCount(OtherCritIdx
) : 0;
2699 bool OtherResLimited
= false;
2700 unsigned RemLatency
= 0;
2701 bool RemLatencyComputed
= false;
2702 if (SchedModel
->hasInstrSchedModel() && OtherCount
!= 0) {
2703 RemLatency
= computeRemLatency(CurrZone
);
2704 RemLatencyComputed
= true;
2705 OtherResLimited
= checkResourceLimit(SchedModel
->getLatencyFactor(),
2706 OtherCount
, RemLatency
, false);
2709 // Schedule aggressively for latency in PostRA mode. We don't check for
2710 // acyclic latency during PostRA, and highly out-of-order processors will
2711 // skip PostRA scheduling.
2712 if (!OtherResLimited
&&
2713 (IsPostRA
|| shouldReduceLatency(Policy
, CurrZone
, !RemLatencyComputed
,
2715 Policy
.ReduceLatency
|= true;
2716 LLVM_DEBUG(dbgs() << " " << CurrZone
.Available
.getName()
2717 << " RemainingLatency " << RemLatency
<< " + "
2718 << CurrZone
.getCurrCycle() << "c > CritPath "
2719 << Rem
.CriticalPath
<< "\n");
2721 // If the same resource is limiting inside and outside the zone, do nothing.
2722 if (CurrZone
.getZoneCritResIdx() == OtherCritIdx
)
2725 LLVM_DEBUG(if (CurrZone
.isResourceLimited()) {
2726 dbgs() << " " << CurrZone
.Available
.getName() << " ResourceLimited: "
2727 << SchedModel
->getResourceName(CurrZone
.getZoneCritResIdx()) << "\n";
2728 } if (OtherResLimited
) dbgs()
2729 << " RemainingLimit: "
2730 << SchedModel
->getResourceName(OtherCritIdx
) << "\n";
2731 if (!CurrZone
.isResourceLimited() && !OtherResLimited
) dbgs()
2732 << " Latency limited both directions.\n");
2734 if (CurrZone
.isResourceLimited() && !Policy
.ReduceResIdx
)
2735 Policy
.ReduceResIdx
= CurrZone
.getZoneCritResIdx();
2737 if (OtherResLimited
)
2738 Policy
.DemandResIdx
= OtherCritIdx
;
2742 const char *GenericSchedulerBase::getReasonStr(
2743 GenericSchedulerBase::CandReason Reason
) {
2745 case NoCand
: return "NOCAND ";
2746 case Only1
: return "ONLY1 ";
2747 case PhysReg
: return "PHYS-REG ";
2748 case RegExcess
: return "REG-EXCESS";
2749 case RegCritical
: return "REG-CRIT ";
2750 case Stall
: return "STALL ";
2751 case Cluster
: return "CLUSTER ";
2752 case Weak
: return "WEAK ";
2753 case RegMax
: return "REG-MAX ";
2754 case ResourceReduce
: return "RES-REDUCE";
2755 case ResourceDemand
: return "RES-DEMAND";
2756 case TopDepthReduce
: return "TOP-DEPTH ";
2757 case TopPathReduce
: return "TOP-PATH ";
2758 case BotHeightReduce
:return "BOT-HEIGHT";
2759 case BotPathReduce
: return "BOT-PATH ";
2760 case NextDefUse
: return "DEF-USE ";
2761 case NodeOrder
: return "ORDER ";
2763 llvm_unreachable("Unknown reason!");
2766 void GenericSchedulerBase::traceCandidate(const SchedCandidate
&Cand
) {
2768 unsigned ResIdx
= 0;
2769 unsigned Latency
= 0;
2770 switch (Cand
.Reason
) {
2774 P
= Cand
.RPDelta
.Excess
;
2777 P
= Cand
.RPDelta
.CriticalMax
;
2780 P
= Cand
.RPDelta
.CurrentMax
;
2782 case ResourceReduce
:
2783 ResIdx
= Cand
.Policy
.ReduceResIdx
;
2785 case ResourceDemand
:
2786 ResIdx
= Cand
.Policy
.DemandResIdx
;
2788 case TopDepthReduce
:
2789 Latency
= Cand
.SU
->getDepth();
2792 Latency
= Cand
.SU
->getHeight();
2794 case BotHeightReduce
:
2795 Latency
= Cand
.SU
->getHeight();
2798 Latency
= Cand
.SU
->getDepth();
2801 dbgs() << " Cand SU(" << Cand
.SU
->NodeNum
<< ") " << getReasonStr(Cand
.Reason
);
2803 dbgs() << " " << TRI
->getRegPressureSetName(P
.getPSet())
2804 << ":" << P
.getUnitInc() << " ";
2808 dbgs() << " " << SchedModel
->getProcResource(ResIdx
)->Name
<< " ";
2812 dbgs() << " " << Latency
<< " cycles ";
2820 /// Return true if this heuristic determines order.
2821 /// TODO: Consider refactor return type of these functions as integer or enum,
2822 /// as we may need to differentiate whether TryCand is better than Cand.
2823 bool tryLess(int TryVal
, int CandVal
,
2824 GenericSchedulerBase::SchedCandidate
&TryCand
,
2825 GenericSchedulerBase::SchedCandidate
&Cand
,
2826 GenericSchedulerBase::CandReason Reason
) {
2827 if (TryVal
< CandVal
) {
2828 TryCand
.Reason
= Reason
;
2831 if (TryVal
> CandVal
) {
2832 if (Cand
.Reason
> Reason
)
2833 Cand
.Reason
= Reason
;
2839 bool tryGreater(int TryVal
, int CandVal
,
2840 GenericSchedulerBase::SchedCandidate
&TryCand
,
2841 GenericSchedulerBase::SchedCandidate
&Cand
,
2842 GenericSchedulerBase::CandReason Reason
) {
2843 if (TryVal
> CandVal
) {
2844 TryCand
.Reason
= Reason
;
2847 if (TryVal
< CandVal
) {
2848 if (Cand
.Reason
> Reason
)
2849 Cand
.Reason
= Reason
;
2855 bool tryLatency(GenericSchedulerBase::SchedCandidate
&TryCand
,
2856 GenericSchedulerBase::SchedCandidate
&Cand
,
2857 SchedBoundary
&Zone
) {
2859 // Prefer the candidate with the lesser depth, but only if one of them has
2860 // depth greater than the total latency scheduled so far, otherwise either
2861 // of them could be scheduled now with no stall.
2862 if (std::max(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth()) >
2863 Zone
.getScheduledLatency()) {
2864 if (tryLess(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth(),
2865 TryCand
, Cand
, GenericSchedulerBase::TopDepthReduce
))
2868 if (tryGreater(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight(),
2869 TryCand
, Cand
, GenericSchedulerBase::TopPathReduce
))
2872 // Prefer the candidate with the lesser height, but only if one of them has
2873 // height greater than the total latency scheduled so far, otherwise either
2874 // of them could be scheduled now with no stall.
2875 if (std::max(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight()) >
2876 Zone
.getScheduledLatency()) {
2877 if (tryLess(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight(),
2878 TryCand
, Cand
, GenericSchedulerBase::BotHeightReduce
))
2881 if (tryGreater(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth(),
2882 TryCand
, Cand
, GenericSchedulerBase::BotPathReduce
))
2887 } // end namespace llvm
2889 static void tracePick(GenericSchedulerBase::CandReason Reason
, bool IsTop
) {
2890 LLVM_DEBUG(dbgs() << "Pick " << (IsTop
? "Top " : "Bot ")
2891 << GenericSchedulerBase::getReasonStr(Reason
) << '\n');
2894 static void tracePick(const GenericSchedulerBase::SchedCandidate
&Cand
) {
2895 tracePick(Cand
.Reason
, Cand
.AtTop
);
2898 void GenericScheduler::initialize(ScheduleDAGMI
*dag
) {
2899 assert(dag
->hasVRegLiveness() &&
2900 "(PreRA)GenericScheduler needs vreg liveness");
2901 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
2902 SchedModel
= DAG
->getSchedModel();
2905 if (RegionPolicy
.ComputeDFSResult
)
2906 DAG
->computeDFSResult();
2908 Rem
.init(DAG
, SchedModel
);
2909 Top
.init(DAG
, SchedModel
, &Rem
);
2910 Bot
.init(DAG
, SchedModel
, &Rem
);
2912 // Initialize resource counts.
2914 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2915 // are disabled, then these HazardRecs will be disabled.
2916 const InstrItineraryData
*Itin
= SchedModel
->getInstrItineraries();
2917 if (!Top
.HazardRec
) {
2919 DAG
->MF
.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2922 if (!Bot
.HazardRec
) {
2924 DAG
->MF
.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2927 TopCand
.SU
= nullptr;
2928 BotCand
.SU
= nullptr;
2931 /// Initialize the per-region scheduling policy.
2932 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin
,
2933 MachineBasicBlock::iterator End
,
2934 unsigned NumRegionInstrs
) {
2935 const MachineFunction
&MF
= *Begin
->getMF();
2936 const TargetLowering
*TLI
= MF
.getSubtarget().getTargetLowering();
2938 // Avoid setting up the register pressure tracker for small regions to save
2939 // compile time. As a rough heuristic, only track pressure when the number of
2940 // schedulable instructions exceeds half the integer register file.
2941 RegionPolicy
.ShouldTrackPressure
= true;
2942 for (unsigned VT
= MVT::i32
; VT
> (unsigned)MVT::i1
; --VT
) {
2943 MVT::SimpleValueType LegalIntVT
= (MVT::SimpleValueType
)VT
;
2944 if (TLI
->isTypeLegal(LegalIntVT
)) {
2945 unsigned NIntRegs
= Context
->RegClassInfo
->getNumAllocatableRegs(
2946 TLI
->getRegClassFor(LegalIntVT
));
2947 RegionPolicy
.ShouldTrackPressure
= NumRegionInstrs
> (NIntRegs
/ 2);
2951 // For generic targets, we default to bottom-up, because it's simpler and more
2952 // compile-time optimizations have been implemented in that direction.
2953 RegionPolicy
.OnlyBottomUp
= true;
2955 // Allow the subtarget to override default policy.
2956 MF
.getSubtarget().overrideSchedPolicy(RegionPolicy
, NumRegionInstrs
);
2958 // After subtarget overrides, apply command line options.
2959 if (!EnableRegPressure
) {
2960 RegionPolicy
.ShouldTrackPressure
= false;
2961 RegionPolicy
.ShouldTrackLaneMasks
= false;
2964 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2965 // e.g. -misched-bottomup=false allows scheduling in both directions.
2966 assert((!ForceTopDown
|| !ForceBottomUp
) &&
2967 "-misched-topdown incompatible with -misched-bottomup");
2968 if (ForceBottomUp
.getNumOccurrences() > 0) {
2969 RegionPolicy
.OnlyBottomUp
= ForceBottomUp
;
2970 if (RegionPolicy
.OnlyBottomUp
)
2971 RegionPolicy
.OnlyTopDown
= false;
2973 if (ForceTopDown
.getNumOccurrences() > 0) {
2974 RegionPolicy
.OnlyTopDown
= ForceTopDown
;
2975 if (RegionPolicy
.OnlyTopDown
)
2976 RegionPolicy
.OnlyBottomUp
= false;
2980 void GenericScheduler::dumpPolicy() const {
2981 // Cannot completely remove virtual function even in release mode.
2982 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2983 dbgs() << "GenericScheduler RegionPolicy: "
2984 << " ShouldTrackPressure=" << RegionPolicy
.ShouldTrackPressure
2985 << " OnlyTopDown=" << RegionPolicy
.OnlyTopDown
2986 << " OnlyBottomUp=" << RegionPolicy
.OnlyBottomUp
2991 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2992 /// critical path by more cycles than it takes to drain the instruction buffer.
2993 /// We estimate an upper bounds on in-flight instructions as:
2995 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2996 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2997 /// InFlightResources = InFlightIterations * LoopResources
2999 /// TODO: Check execution resources in addition to IssueCount.
3000 void GenericScheduler::checkAcyclicLatency() {
3001 if (Rem
.CyclicCritPath
== 0 || Rem
.CyclicCritPath
>= Rem
.CriticalPath
)
3004 // Scaled number of cycles per loop iteration.
3005 unsigned IterCount
=
3006 std::max(Rem
.CyclicCritPath
* SchedModel
->getLatencyFactor(),
3008 // Scaled acyclic critical path.
3009 unsigned AcyclicCount
= Rem
.CriticalPath
* SchedModel
->getLatencyFactor();
3010 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
3011 unsigned InFlightCount
=
3012 (AcyclicCount
* Rem
.RemIssueCount
+ IterCount
-1) / IterCount
;
3013 unsigned BufferLimit
=
3014 SchedModel
->getMicroOpBufferSize() * SchedModel
->getMicroOpFactor();
3016 Rem
.IsAcyclicLatencyLimited
= InFlightCount
> BufferLimit
;
3019 dbgs() << "IssueCycles="
3020 << Rem
.RemIssueCount
/ SchedModel
->getLatencyFactor() << "c "
3021 << "IterCycles=" << IterCount
/ SchedModel
->getLatencyFactor()
3022 << "c NumIters=" << (AcyclicCount
+ IterCount
- 1) / IterCount
3023 << " InFlight=" << InFlightCount
/ SchedModel
->getMicroOpFactor()
3024 << "m BufferLim=" << SchedModel
->getMicroOpBufferSize() << "m\n";
3025 if (Rem
.IsAcyclicLatencyLimited
) dbgs() << " ACYCLIC LATENCY LIMIT\n");
3028 void GenericScheduler::registerRoots() {
3029 Rem
.CriticalPath
= DAG
->ExitSU
.getDepth();
3031 // Some roots may not feed into ExitSU. Check all of them in case.
3032 for (const SUnit
*SU
: Bot
.Available
) {
3033 if (SU
->getDepth() > Rem
.CriticalPath
)
3034 Rem
.CriticalPath
= SU
->getDepth();
3036 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem
.CriticalPath
<< '\n');
3037 if (DumpCriticalPathLength
) {
3038 errs() << "Critical Path(GS-RR ): " << Rem
.CriticalPath
<< " \n";
3041 if (EnableCyclicPath
&& SchedModel
->getMicroOpBufferSize() > 0) {
3042 Rem
.CyclicCritPath
= DAG
->computeCyclicCriticalPath();
3043 checkAcyclicLatency();
3048 bool tryPressure(const PressureChange
&TryP
,
3049 const PressureChange
&CandP
,
3050 GenericSchedulerBase::SchedCandidate
&TryCand
,
3051 GenericSchedulerBase::SchedCandidate
&Cand
,
3052 GenericSchedulerBase::CandReason Reason
,
3053 const TargetRegisterInfo
*TRI
,
3054 const MachineFunction
&MF
) {
3055 // If one candidate decreases and the other increases, go with it.
3056 // Invalid candidates have UnitInc==0.
3057 if (tryGreater(TryP
.getUnitInc() < 0, CandP
.getUnitInc() < 0, TryCand
, Cand
,
3061 // Do not compare the magnitude of pressure changes between top and bottom
3063 if (Cand
.AtTop
!= TryCand
.AtTop
)
3066 // If both candidates affect the same set in the same boundary, go with the
3067 // smallest increase.
3068 unsigned TryPSet
= TryP
.getPSetOrMax();
3069 unsigned CandPSet
= CandP
.getPSetOrMax();
3070 if (TryPSet
== CandPSet
) {
3071 return tryLess(TryP
.getUnitInc(), CandP
.getUnitInc(), TryCand
, Cand
,
3075 int TryRank
= TryP
.isValid() ? TRI
->getRegPressureSetScore(MF
, TryPSet
) :
3076 std::numeric_limits
<int>::max();
3078 int CandRank
= CandP
.isValid() ? TRI
->getRegPressureSetScore(MF
, CandPSet
) :
3079 std::numeric_limits
<int>::max();
3081 // If the candidates are decreasing pressure, reverse priority.
3082 if (TryP
.getUnitInc() < 0)
3083 std::swap(TryRank
, CandRank
);
3084 return tryGreater(TryRank
, CandRank
, TryCand
, Cand
, Reason
);
3087 unsigned getWeakLeft(const SUnit
*SU
, bool isTop
) {
3088 return (isTop
) ? SU
->WeakPredsLeft
: SU
->WeakSuccsLeft
;
3091 /// Minimize physical register live ranges. Regalloc wants them adjacent to
3092 /// their physreg def/use.
3094 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
3095 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
3096 /// with the operation that produces or consumes the physreg. We'll do this when
3097 /// regalloc has support for parallel copies.
3098 int biasPhysReg(const SUnit
*SU
, bool isTop
) {
3099 const MachineInstr
*MI
= SU
->getInstr();
3102 unsigned ScheduledOper
= isTop
? 1 : 0;
3103 unsigned UnscheduledOper
= isTop
? 0 : 1;
3104 // If we have already scheduled the physreg produce/consumer, immediately
3105 // schedule the copy.
3106 if (Register::isPhysicalRegister(MI
->getOperand(ScheduledOper
).getReg()))
3108 // If the physreg is at the boundary, defer it. Otherwise schedule it
3109 // immediately to free the dependent. We can hoist the copy later.
3110 bool AtBoundary
= isTop
? !SU
->NumSuccsLeft
: !SU
->NumPredsLeft
;
3111 if (Register::isPhysicalRegister(MI
->getOperand(UnscheduledOper
).getReg()))
3112 return AtBoundary
? -1 : 1;
3115 if (MI
->isMoveImmediate()) {
3116 // If we have a move immediate and all successors have been assigned, bias
3117 // towards scheduling this later. Make sure all register defs are to
3118 // physical registers.
3120 for (const MachineOperand
&Op
: MI
->defs()) {
3121 if (Op
.isReg() && !Register::isPhysicalRegister(Op
.getReg())) {
3128 return isTop
? -1 : 1;
3133 } // end namespace llvm
3135 void GenericScheduler::initCandidate(SchedCandidate
&Cand
, SUnit
*SU
,
3137 const RegPressureTracker
&RPTracker
,
3138 RegPressureTracker
&TempTracker
) {
3141 if (DAG
->isTrackingPressure()) {
3143 TempTracker
.getMaxDownwardPressureDelta(
3144 Cand
.SU
->getInstr(),
3146 DAG
->getRegionCriticalPSets(),
3147 DAG
->getRegPressure().MaxSetPressure
);
3149 if (VerifyScheduling
) {
3150 TempTracker
.getMaxUpwardPressureDelta(
3151 Cand
.SU
->getInstr(),
3152 &DAG
->getPressureDiff(Cand
.SU
),
3154 DAG
->getRegionCriticalPSets(),
3155 DAG
->getRegPressure().MaxSetPressure
);
3157 RPTracker
.getUpwardPressureDelta(
3158 Cand
.SU
->getInstr(),
3159 DAG
->getPressureDiff(Cand
.SU
),
3161 DAG
->getRegionCriticalPSets(),
3162 DAG
->getRegPressure().MaxSetPressure
);
3166 LLVM_DEBUG(if (Cand
.RPDelta
.Excess
.isValid()) dbgs()
3167 << " Try SU(" << Cand
.SU
->NodeNum
<< ") "
3168 << TRI
->getRegPressureSetName(Cand
.RPDelta
.Excess
.getPSet()) << ":"
3169 << Cand
.RPDelta
.Excess
.getUnitInc() << "\n");
3172 /// Apply a set of heuristics to a new candidate. Heuristics are currently
3173 /// hierarchical. This may be more efficient than a graduated cost model because
3174 /// we don't need to evaluate all aspects of the model for each node in the
3175 /// queue. But it's really done to make the heuristics easier to debug and
3176 /// statistically analyze.
3178 /// \param Cand provides the policy and current best candidate.
3179 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3180 /// \param Zone describes the scheduled zone that we are extending, or nullptr
3181 /// if Cand is from a different zone than TryCand.
3182 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3183 bool GenericScheduler::tryCandidate(SchedCandidate
&Cand
,
3184 SchedCandidate
&TryCand
,
3185 SchedBoundary
*Zone
) const {
3186 // Initialize the candidate if needed.
3187 if (!Cand
.isValid()) {
3188 TryCand
.Reason
= NodeOrder
;
3192 // Bias PhysReg Defs and copies to their uses and defined respectively.
3193 if (tryGreater(biasPhysReg(TryCand
.SU
, TryCand
.AtTop
),
3194 biasPhysReg(Cand
.SU
, Cand
.AtTop
), TryCand
, Cand
, PhysReg
))
3195 return TryCand
.Reason
!= NoCand
;
3197 // Avoid exceeding the target's limit.
3198 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.Excess
,
3199 Cand
.RPDelta
.Excess
,
3200 TryCand
, Cand
, RegExcess
, TRI
,
3202 return TryCand
.Reason
!= NoCand
;
3204 // Avoid increasing the max critical pressure in the scheduled region.
3205 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.CriticalMax
,
3206 Cand
.RPDelta
.CriticalMax
,
3207 TryCand
, Cand
, RegCritical
, TRI
,
3209 return TryCand
.Reason
!= NoCand
;
3211 // We only compare a subset of features when comparing nodes between
3212 // Top and Bottom boundary. Some properties are simply incomparable, in many
3213 // other instances we should only override the other boundary if something
3214 // is a clear good pick on one boundary. Skip heuristics that are more
3215 // "tie-breaking" in nature.
3216 bool SameBoundary
= Zone
!= nullptr;
3218 // For loops that are acyclic path limited, aggressively schedule for
3219 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3220 // heuristics to take precedence.
3221 if (Rem
.IsAcyclicLatencyLimited
&& !Zone
->getCurrMOps() &&
3222 tryLatency(TryCand
, Cand
, *Zone
))
3223 return TryCand
.Reason
!= NoCand
;
3225 // Prioritize instructions that read unbuffered resources by stall cycles.
3226 if (tryLess(Zone
->getLatencyStallCycles(TryCand
.SU
),
3227 Zone
->getLatencyStallCycles(Cand
.SU
), TryCand
, Cand
, Stall
))
3228 return TryCand
.Reason
!= NoCand
;
3231 // Keep clustered nodes together to encourage downstream peephole
3232 // optimizations which may reduce resource requirements.
3234 // This is a best effort to set things up for a post-RA pass. Optimizations
3235 // like generating loads of multiple registers should ideally be done within
3236 // the scheduler pass by combining the loads during DAG postprocessing.
3237 const SUnit
*CandNextClusterSU
=
3238 Cand
.AtTop
? DAG
->getNextClusterSucc() : DAG
->getNextClusterPred();
3239 const SUnit
*TryCandNextClusterSU
=
3240 TryCand
.AtTop
? DAG
->getNextClusterSucc() : DAG
->getNextClusterPred();
3241 if (tryGreater(TryCand
.SU
== TryCandNextClusterSU
,
3242 Cand
.SU
== CandNextClusterSU
,
3243 TryCand
, Cand
, Cluster
))
3244 return TryCand
.Reason
!= NoCand
;
3247 // Weak edges are for clustering and other constraints.
3248 if (tryLess(getWeakLeft(TryCand
.SU
, TryCand
.AtTop
),
3249 getWeakLeft(Cand
.SU
, Cand
.AtTop
),
3250 TryCand
, Cand
, Weak
))
3251 return TryCand
.Reason
!= NoCand
;
3254 // Avoid increasing the max pressure of the entire region.
3255 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.CurrentMax
,
3256 Cand
.RPDelta
.CurrentMax
,
3257 TryCand
, Cand
, RegMax
, TRI
,
3259 return TryCand
.Reason
!= NoCand
;
3262 // Avoid critical resource consumption and balance the schedule.
3263 TryCand
.initResourceDelta(DAG
, SchedModel
);
3264 if (tryLess(TryCand
.ResDelta
.CritResources
, Cand
.ResDelta
.CritResources
,
3265 TryCand
, Cand
, ResourceReduce
))
3266 return TryCand
.Reason
!= NoCand
;
3267 if (tryGreater(TryCand
.ResDelta
.DemandedResources
,
3268 Cand
.ResDelta
.DemandedResources
,
3269 TryCand
, Cand
, ResourceDemand
))
3270 return TryCand
.Reason
!= NoCand
;
3272 // Avoid serializing long latency dependence chains.
3273 // For acyclic path limited loops, latency was already checked above.
3274 if (!RegionPolicy
.DisableLatencyHeuristic
&& TryCand
.Policy
.ReduceLatency
&&
3275 !Rem
.IsAcyclicLatencyLimited
&& tryLatency(TryCand
, Cand
, *Zone
))
3276 return TryCand
.Reason
!= NoCand
;
3278 // Fall through to original instruction order.
3279 if ((Zone
->isTop() && TryCand
.SU
->NodeNum
< Cand
.SU
->NodeNum
)
3280 || (!Zone
->isTop() && TryCand
.SU
->NodeNum
> Cand
.SU
->NodeNum
)) {
3281 TryCand
.Reason
= NodeOrder
;
3289 /// Pick the best candidate from the queue.
3291 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3292 /// DAG building. To adjust for the current scheduling location we need to
3293 /// maintain the number of vreg uses remaining to be top-scheduled.
3294 void GenericScheduler::pickNodeFromQueue(SchedBoundary
&Zone
,
3295 const CandPolicy
&ZonePolicy
,
3296 const RegPressureTracker
&RPTracker
,
3297 SchedCandidate
&Cand
) {
3298 // getMaxPressureDelta temporarily modifies the tracker.
3299 RegPressureTracker
&TempTracker
= const_cast<RegPressureTracker
&>(RPTracker
);
3301 ReadyQueue
&Q
= Zone
.Available
;
3302 for (SUnit
*SU
: Q
) {
3304 SchedCandidate
TryCand(ZonePolicy
);
3305 initCandidate(TryCand
, SU
, Zone
.isTop(), RPTracker
, TempTracker
);
3306 // Pass SchedBoundary only when comparing nodes from the same boundary.
3307 SchedBoundary
*ZoneArg
= Cand
.AtTop
== TryCand
.AtTop
? &Zone
: nullptr;
3308 if (tryCandidate(Cand
, TryCand
, ZoneArg
)) {
3309 // Initialize resource delta if needed in case future heuristics query it.
3310 if (TryCand
.ResDelta
== SchedResourceDelta())
3311 TryCand
.initResourceDelta(DAG
, SchedModel
);
3312 Cand
.setBest(TryCand
);
3313 LLVM_DEBUG(traceCandidate(Cand
));
3318 /// Pick the best candidate node from either the top or bottom queue.
3319 SUnit
*GenericScheduler::pickNodeBidirectional(bool &IsTopNode
) {
3320 // Schedule as far as possible in the direction of no choice. This is most
3321 // efficient, but also provides the best heuristics for CriticalPSets.
3322 if (SUnit
*SU
= Bot
.pickOnlyChoice()) {
3324 tracePick(Only1
, false);
3327 if (SUnit
*SU
= Top
.pickOnlyChoice()) {
3329 tracePick(Only1
, true);
3332 // Set the bottom-up policy based on the state of the current bottom zone and
3333 // the instructions outside the zone, including the top zone.
3334 CandPolicy BotPolicy
;
3335 setPolicy(BotPolicy
, /*IsPostRA=*/false, Bot
, &Top
);
3336 // Set the top-down policy based on the state of the current top zone and
3337 // the instructions outside the zone, including the bottom zone.
3338 CandPolicy TopPolicy
;
3339 setPolicy(TopPolicy
, /*IsPostRA=*/false, Top
, &Bot
);
3341 // See if BotCand is still valid (because we previously scheduled from Top).
3342 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3343 if (!BotCand
.isValid() || BotCand
.SU
->isScheduled
||
3344 BotCand
.Policy
!= BotPolicy
) {
3345 BotCand
.reset(CandPolicy());
3346 pickNodeFromQueue(Bot
, BotPolicy
, DAG
->getBotRPTracker(), BotCand
);
3347 assert(BotCand
.Reason
!= NoCand
&& "failed to find the first candidate");
3349 LLVM_DEBUG(traceCandidate(BotCand
));
3351 if (VerifyScheduling
) {
3352 SchedCandidate TCand
;
3353 TCand
.reset(CandPolicy());
3354 pickNodeFromQueue(Bot
, BotPolicy
, DAG
->getBotRPTracker(), TCand
);
3355 assert(TCand
.SU
== BotCand
.SU
&&
3356 "Last pick result should correspond to re-picking right now");
3361 // Check if the top Q has a better candidate.
3362 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3363 if (!TopCand
.isValid() || TopCand
.SU
->isScheduled
||
3364 TopCand
.Policy
!= TopPolicy
) {
3365 TopCand
.reset(CandPolicy());
3366 pickNodeFromQueue(Top
, TopPolicy
, DAG
->getTopRPTracker(), TopCand
);
3367 assert(TopCand
.Reason
!= NoCand
&& "failed to find the first candidate");
3369 LLVM_DEBUG(traceCandidate(TopCand
));
3371 if (VerifyScheduling
) {
3372 SchedCandidate TCand
;
3373 TCand
.reset(CandPolicy());
3374 pickNodeFromQueue(Top
, TopPolicy
, DAG
->getTopRPTracker(), TCand
);
3375 assert(TCand
.SU
== TopCand
.SU
&&
3376 "Last pick result should correspond to re-picking right now");
3381 // Pick best from BotCand and TopCand.
3382 assert(BotCand
.isValid());
3383 assert(TopCand
.isValid());
3384 SchedCandidate Cand
= BotCand
;
3385 TopCand
.Reason
= NoCand
;
3386 if (tryCandidate(Cand
, TopCand
, nullptr)) {
3387 Cand
.setBest(TopCand
);
3388 LLVM_DEBUG(traceCandidate(Cand
));
3391 IsTopNode
= Cand
.AtTop
;
3396 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3397 SUnit
*GenericScheduler::pickNode(bool &IsTopNode
) {
3398 if (DAG
->top() == DAG
->bottom()) {
3399 assert(Top
.Available
.empty() && Top
.Pending
.empty() &&
3400 Bot
.Available
.empty() && Bot
.Pending
.empty() && "ReadyQ garbage");
3405 if (RegionPolicy
.OnlyTopDown
) {
3406 SU
= Top
.pickOnlyChoice();
3408 CandPolicy NoPolicy
;
3409 TopCand
.reset(NoPolicy
);
3410 pickNodeFromQueue(Top
, NoPolicy
, DAG
->getTopRPTracker(), TopCand
);
3411 assert(TopCand
.Reason
!= NoCand
&& "failed to find a candidate");
3416 } else if (RegionPolicy
.OnlyBottomUp
) {
3417 SU
= Bot
.pickOnlyChoice();
3419 CandPolicy NoPolicy
;
3420 BotCand
.reset(NoPolicy
);
3421 pickNodeFromQueue(Bot
, NoPolicy
, DAG
->getBotRPTracker(), BotCand
);
3422 assert(BotCand
.Reason
!= NoCand
&& "failed to find a candidate");
3428 SU
= pickNodeBidirectional(IsTopNode
);
3430 } while (SU
->isScheduled
);
3432 if (SU
->isTopReady())
3433 Top
.removeReady(SU
);
3434 if (SU
->isBottomReady())
3435 Bot
.removeReady(SU
);
3437 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU
->NodeNum
<< ") "
3438 << *SU
->getInstr());
3442 void GenericScheduler::reschedulePhysReg(SUnit
*SU
, bool isTop
) {
3443 MachineBasicBlock::iterator InsertPos
= SU
->getInstr();
3446 SmallVectorImpl
<SDep
> &Deps
= isTop
? SU
->Preds
: SU
->Succs
;
3448 // Find already scheduled copies with a single physreg dependence and move
3449 // them just above the scheduled instruction.
3450 for (SDep
&Dep
: Deps
) {
3451 if (Dep
.getKind() != SDep::Data
||
3452 !Register::isPhysicalRegister(Dep
.getReg()))
3454 SUnit
*DepSU
= Dep
.getSUnit();
3455 if (isTop
? DepSU
->Succs
.size() > 1 : DepSU
->Preds
.size() > 1)
3457 MachineInstr
*Copy
= DepSU
->getInstr();
3458 if (!Copy
->isCopy() && !Copy
->isMoveImmediate())
3460 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
3461 DAG
->dumpNode(*Dep
.getSUnit()));
3462 DAG
->moveInstruction(Copy
, InsertPos
);
3466 /// Update the scheduler's state after scheduling a node. This is the same node
3467 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3468 /// update it's state based on the current cycle before MachineSchedStrategy
3471 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3472 /// them here. See comments in biasPhysReg.
3473 void GenericScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
3475 SU
->TopReadyCycle
= std::max(SU
->TopReadyCycle
, Top
.getCurrCycle());
3477 if (SU
->hasPhysRegUses
)
3478 reschedulePhysReg(SU
, true);
3480 SU
->BotReadyCycle
= std::max(SU
->BotReadyCycle
, Bot
.getCurrCycle());
3482 if (SU
->hasPhysRegDefs
)
3483 reschedulePhysReg(SU
, false);
3487 /// Create the standard converging machine scheduler. This will be used as the
3488 /// default scheduler if the target does not set a default.
3489 ScheduleDAGMILive
*llvm::createGenericSchedLive(MachineSchedContext
*C
) {
3490 ScheduleDAGMILive
*DAG
=
3491 new ScheduleDAGMILive(C
, std::make_unique
<GenericScheduler
>(C
));
3492 // Register DAG post-processors.
3494 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3495 // data and pass it to later mutations. Have a single mutation that gathers
3496 // the interesting nodes in one pass.
3497 DAG
->addMutation(createCopyConstrainDAGMutation(DAG
->TII
, DAG
->TRI
));
3501 static ScheduleDAGInstrs
*createConvergingSched(MachineSchedContext
*C
) {
3502 return createGenericSchedLive(C
);
3505 static MachineSchedRegistry
3506 GenericSchedRegistry("converge", "Standard converging scheduler.",
3507 createConvergingSched
);
3509 //===----------------------------------------------------------------------===//
3510 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3511 //===----------------------------------------------------------------------===//
3513 void PostGenericScheduler::initialize(ScheduleDAGMI
*Dag
) {
3515 SchedModel
= DAG
->getSchedModel();
3518 Rem
.init(DAG
, SchedModel
);
3519 Top
.init(DAG
, SchedModel
, &Rem
);
3522 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3523 // or are disabled, then these HazardRecs will be disabled.
3524 const InstrItineraryData
*Itin
= SchedModel
->getInstrItineraries();
3525 if (!Top
.HazardRec
) {
3527 DAG
->MF
.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3532 void PostGenericScheduler::registerRoots() {
3533 Rem
.CriticalPath
= DAG
->ExitSU
.getDepth();
3535 // Some roots may not feed into ExitSU. Check all of them in case.
3536 for (const SUnit
*SU
: BotRoots
) {
3537 if (SU
->getDepth() > Rem
.CriticalPath
)
3538 Rem
.CriticalPath
= SU
->getDepth();
3540 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem
.CriticalPath
<< '\n');
3541 if (DumpCriticalPathLength
) {
3542 errs() << "Critical Path(PGS-RR ): " << Rem
.CriticalPath
<< " \n";
3546 /// Apply a set of heuristics to a new candidate for PostRA scheduling.
3548 /// \param Cand provides the policy and current best candidate.
3549 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3550 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3551 bool PostGenericScheduler::tryCandidate(SchedCandidate
&Cand
,
3552 SchedCandidate
&TryCand
) {
3553 // Initialize the candidate if needed.
3554 if (!Cand
.isValid()) {
3555 TryCand
.Reason
= NodeOrder
;
3559 // Prioritize instructions that read unbuffered resources by stall cycles.
3560 if (tryLess(Top
.getLatencyStallCycles(TryCand
.SU
),
3561 Top
.getLatencyStallCycles(Cand
.SU
), TryCand
, Cand
, Stall
))
3562 return TryCand
.Reason
!= NoCand
;
3564 // Keep clustered nodes together.
3565 if (tryGreater(TryCand
.SU
== DAG
->getNextClusterSucc(),
3566 Cand
.SU
== DAG
->getNextClusterSucc(),
3567 TryCand
, Cand
, Cluster
))
3568 return TryCand
.Reason
!= NoCand
;
3570 // Avoid critical resource consumption and balance the schedule.
3571 if (tryLess(TryCand
.ResDelta
.CritResources
, Cand
.ResDelta
.CritResources
,
3572 TryCand
, Cand
, ResourceReduce
))
3573 return TryCand
.Reason
!= NoCand
;
3574 if (tryGreater(TryCand
.ResDelta
.DemandedResources
,
3575 Cand
.ResDelta
.DemandedResources
,
3576 TryCand
, Cand
, ResourceDemand
))
3577 return TryCand
.Reason
!= NoCand
;
3579 // Avoid serializing long latency dependence chains.
3580 if (Cand
.Policy
.ReduceLatency
&& tryLatency(TryCand
, Cand
, Top
)) {
3581 return TryCand
.Reason
!= NoCand
;
3584 // Fall through to original instruction order.
3585 if (TryCand
.SU
->NodeNum
< Cand
.SU
->NodeNum
) {
3586 TryCand
.Reason
= NodeOrder
;
3593 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate
&Cand
) {
3594 ReadyQueue
&Q
= Top
.Available
;
3595 for (SUnit
*SU
: Q
) {
3596 SchedCandidate
TryCand(Cand
.Policy
);
3598 TryCand
.AtTop
= true;
3599 TryCand
.initResourceDelta(DAG
, SchedModel
);
3600 if (tryCandidate(Cand
, TryCand
)) {
3601 Cand
.setBest(TryCand
);
3602 LLVM_DEBUG(traceCandidate(Cand
));
3607 /// Pick the next node to schedule.
3608 SUnit
*PostGenericScheduler::pickNode(bool &IsTopNode
) {
3609 if (DAG
->top() == DAG
->bottom()) {
3610 assert(Top
.Available
.empty() && Top
.Pending
.empty() && "ReadyQ garbage");
3615 SU
= Top
.pickOnlyChoice();
3617 tracePick(Only1
, true);
3619 CandPolicy NoPolicy
;
3620 SchedCandidate
TopCand(NoPolicy
);
3621 // Set the top-down policy based on the state of the current top zone and
3622 // the instructions outside the zone, including the bottom zone.
3623 setPolicy(TopCand
.Policy
, /*IsPostRA=*/true, Top
, nullptr);
3624 pickNodeFromQueue(TopCand
);
3625 assert(TopCand
.Reason
!= NoCand
&& "failed to find a candidate");
3629 } while (SU
->isScheduled
);
3632 Top
.removeReady(SU
);
3634 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU
->NodeNum
<< ") "
3635 << *SU
->getInstr());
3639 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3640 /// scheduled/remaining flags in the DAG nodes.
3641 void PostGenericScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
3642 SU
->TopReadyCycle
= std::max(SU
->TopReadyCycle
, Top
.getCurrCycle());
3646 ScheduleDAGMI
*llvm::createGenericSchedPostRA(MachineSchedContext
*C
) {
3647 return new ScheduleDAGMI(C
, std::make_unique
<PostGenericScheduler
>(C
),
3648 /*RemoveKillFlags=*/true);
3651 //===----------------------------------------------------------------------===//
3652 // ILP Scheduler. Currently for experimental analysis of heuristics.
3653 //===----------------------------------------------------------------------===//
3657 /// Order nodes by the ILP metric.
3659 const SchedDFSResult
*DFSResult
= nullptr;
3660 const BitVector
*ScheduledTrees
= nullptr;
3663 ILPOrder(bool MaxILP
) : MaximizeILP(MaxILP
) {}
3665 /// Apply a less-than relation on node priority.
3667 /// (Return true if A comes after B in the Q.)
3668 bool operator()(const SUnit
*A
, const SUnit
*B
) const {
3669 unsigned SchedTreeA
= DFSResult
->getSubtreeID(A
);
3670 unsigned SchedTreeB
= DFSResult
->getSubtreeID(B
);
3671 if (SchedTreeA
!= SchedTreeB
) {
3672 // Unscheduled trees have lower priority.
3673 if (ScheduledTrees
->test(SchedTreeA
) != ScheduledTrees
->test(SchedTreeB
))
3674 return ScheduledTrees
->test(SchedTreeB
);
3676 // Trees with shallower connections have have lower priority.
3677 if (DFSResult
->getSubtreeLevel(SchedTreeA
)
3678 != DFSResult
->getSubtreeLevel(SchedTreeB
)) {
3679 return DFSResult
->getSubtreeLevel(SchedTreeA
)
3680 < DFSResult
->getSubtreeLevel(SchedTreeB
);
3684 return DFSResult
->getILP(A
) < DFSResult
->getILP(B
);
3686 return DFSResult
->getILP(A
) > DFSResult
->getILP(B
);
3690 /// Schedule based on the ILP metric.
3691 class ILPScheduler
: public MachineSchedStrategy
{
3692 ScheduleDAGMILive
*DAG
= nullptr;
3695 std::vector
<SUnit
*> ReadyQ
;
3698 ILPScheduler(bool MaximizeILP
) : Cmp(MaximizeILP
) {}
3700 void initialize(ScheduleDAGMI
*dag
) override
{
3701 assert(dag
->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3702 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
3703 DAG
->computeDFSResult();
3704 Cmp
.DFSResult
= DAG
->getDFSResult();
3705 Cmp
.ScheduledTrees
= &DAG
->getScheduledTrees();
3709 void registerRoots() override
{
3710 // Restore the heap in ReadyQ with the updated DFS results.
3711 std::make_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
3714 /// Implement MachineSchedStrategy interface.
3715 /// -----------------------------------------
3717 /// Callback to select the highest priority node from the ready Q.
3718 SUnit
*pickNode(bool &IsTopNode
) override
{
3719 if (ReadyQ
.empty()) return nullptr;
3720 std::pop_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
3721 SUnit
*SU
= ReadyQ
.back();
3724 LLVM_DEBUG(dbgs() << "Pick node "
3725 << "SU(" << SU
->NodeNum
<< ") "
3726 << " ILP: " << DAG
->getDFSResult()->getILP(SU
)
3727 << " Tree: " << DAG
->getDFSResult()->getSubtreeID(SU
)
3729 << DAG
->getDFSResult()->getSubtreeLevel(
3730 DAG
->getDFSResult()->getSubtreeID(SU
))
3732 << "Scheduling " << *SU
->getInstr());
3736 /// Scheduler callback to notify that a new subtree is scheduled.
3737 void scheduleTree(unsigned SubtreeID
) override
{
3738 std::make_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
3741 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3742 /// DFSResults, and resort the priority Q.
3743 void schedNode(SUnit
*SU
, bool IsTopNode
) override
{
3744 assert(!IsTopNode
&& "SchedDFSResult needs bottom-up");
3747 void releaseTopNode(SUnit
*) override
{ /*only called for top roots*/ }
3749 void releaseBottomNode(SUnit
*SU
) override
{
3750 ReadyQ
.push_back(SU
);
3751 std::push_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
3755 } // end anonymous namespace
3757 static ScheduleDAGInstrs
*createILPMaxScheduler(MachineSchedContext
*C
) {
3758 return new ScheduleDAGMILive(C
, std::make_unique
<ILPScheduler
>(true));
3760 static ScheduleDAGInstrs
*createILPMinScheduler(MachineSchedContext
*C
) {
3761 return new ScheduleDAGMILive(C
, std::make_unique
<ILPScheduler
>(false));
3764 static MachineSchedRegistry
ILPMaxRegistry(
3765 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler
);
3766 static MachineSchedRegistry
ILPMinRegistry(
3767 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler
);
3769 //===----------------------------------------------------------------------===//
3770 // Machine Instruction Shuffler for Correctness Testing
3771 //===----------------------------------------------------------------------===//
3776 /// Apply a less-than relation on the node order, which corresponds to the
3777 /// instruction order prior to scheduling. IsReverse implements greater-than.
3778 template<bool IsReverse
>
3780 bool operator()(SUnit
*A
, SUnit
*B
) const {
3782 return A
->NodeNum
> B
->NodeNum
;
3784 return A
->NodeNum
< B
->NodeNum
;
3788 /// Reorder instructions as much as possible.
3789 class InstructionShuffler
: public MachineSchedStrategy
{
3793 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3794 // gives nodes with a higher number higher priority causing the latest
3795 // instructions to be scheduled first.
3796 PriorityQueue
<SUnit
*, std::vector
<SUnit
*>, SUnitOrder
<false>>
3799 // When scheduling bottom-up, use greater-than as the queue priority.
3800 PriorityQueue
<SUnit
*, std::vector
<SUnit
*>, SUnitOrder
<true>>
3804 InstructionShuffler(bool alternate
, bool topdown
)
3805 : IsAlternating(alternate
), IsTopDown(topdown
) {}
3807 void initialize(ScheduleDAGMI
*) override
{
3812 /// Implement MachineSchedStrategy interface.
3813 /// -----------------------------------------
3815 SUnit
*pickNode(bool &IsTopNode
) override
{
3819 if (TopQ
.empty()) return nullptr;
3822 } while (SU
->isScheduled
);
3826 if (BottomQ
.empty()) return nullptr;
3829 } while (SU
->isScheduled
);
3833 IsTopDown
= !IsTopDown
;
3837 void schedNode(SUnit
*SU
, bool IsTopNode
) override
{}
3839 void releaseTopNode(SUnit
*SU
) override
{
3842 void releaseBottomNode(SUnit
*SU
) override
{
3847 } // end anonymous namespace
3849 static ScheduleDAGInstrs
*createInstructionShuffler(MachineSchedContext
*C
) {
3850 bool Alternate
= !ForceTopDown
&& !ForceBottomUp
;
3851 bool TopDown
= !ForceBottomUp
;
3852 assert((TopDown
|| !ForceTopDown
) &&
3853 "-misched-topdown incompatible with -misched-bottomup");
3854 return new ScheduleDAGMILive(
3855 C
, std::make_unique
<InstructionShuffler
>(Alternate
, TopDown
));
3858 static MachineSchedRegistry
ShufflerRegistry(
3859 "shuffle", "Shuffle machine instructions alternating directions",
3860 createInstructionShuffler
);
3863 //===----------------------------------------------------------------------===//
3864 // GraphWriter support for ScheduleDAGMILive.
3865 //===----------------------------------------------------------------------===//
3870 template<> struct GraphTraits
<
3871 ScheduleDAGMI
*> : public GraphTraits
<ScheduleDAG
*> {};
3874 struct DOTGraphTraits
<ScheduleDAGMI
*> : public DefaultDOTGraphTraits
{
3875 DOTGraphTraits(bool isSimple
= false) : DefaultDOTGraphTraits(isSimple
) {}
3877 static std::string
getGraphName(const ScheduleDAG
*G
) {
3878 return std::string(G
->MF
.getName());
3881 static bool renderGraphFromBottomUp() {
3885 static bool isNodeHidden(const SUnit
*Node
, const ScheduleDAG
*G
) {
3886 if (ViewMISchedCutoff
== 0)
3888 return (Node
->Preds
.size() > ViewMISchedCutoff
3889 || Node
->Succs
.size() > ViewMISchedCutoff
);
3892 /// If you want to override the dot attributes printed for a particular
3893 /// edge, override this method.
3894 static std::string
getEdgeAttributes(const SUnit
*Node
,
3896 const ScheduleDAG
*Graph
) {
3897 if (EI
.isArtificialDep())
3898 return "color=cyan,style=dashed";
3900 return "color=blue,style=dashed";
3904 static std::string
getNodeLabel(const SUnit
*SU
, const ScheduleDAG
*G
) {
3906 raw_string_ostream
SS(Str
);
3907 const ScheduleDAGMI
*DAG
= static_cast<const ScheduleDAGMI
*>(G
);
3908 const SchedDFSResult
*DFS
= DAG
->hasVRegLiveness() ?
3909 static_cast<const ScheduleDAGMILive
*>(G
)->getDFSResult() : nullptr;
3910 SS
<< "SU:" << SU
->NodeNum
;
3912 SS
<< " I:" << DFS
->getNumInstrs(SU
);
3916 static std::string
getNodeDescription(const SUnit
*SU
, const ScheduleDAG
*G
) {
3917 return G
->getGraphNodeLabel(SU
);
3920 static std::string
getNodeAttributes(const SUnit
*N
, const ScheduleDAG
*G
) {
3921 std::string
Str("shape=Mrecord");
3922 const ScheduleDAGMI
*DAG
= static_cast<const ScheduleDAGMI
*>(G
);
3923 const SchedDFSResult
*DFS
= DAG
->hasVRegLiveness() ?
3924 static_cast<const ScheduleDAGMILive
*>(G
)->getDFSResult() : nullptr;
3926 Str
+= ",style=filled,fillcolor=\"#";
3927 Str
+= DOT::getColorString(DFS
->getSubtreeID(N
));
3934 } // end namespace llvm
3937 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3938 /// rendered using 'dot'.
3939 void ScheduleDAGMI::viewGraph(const Twine
&Name
, const Twine
&Title
) {
3941 ViewGraph(this, Name
, false, Title
);
3943 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3944 << "systems with Graphviz or gv!\n";
3948 /// Out-of-line implementation with no arguments is handy for gdb.
3949 void ScheduleDAGMI::viewGraph() {
3950 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());