1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // MachineScheduler schedules machine instructions after phi elimination. It
10 // preserves LiveIntervals so it can be invoked before register allocation.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/MachineScheduler.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PriorityQueue.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervals.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachinePassRegistry.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/MachineValueType.h"
36 #include "llvm/CodeGen/RegisterClassInfo.h"
37 #include "llvm/CodeGen/RegisterPressure.h"
38 #include "llvm/CodeGen/ScheduleDAG.h"
39 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
40 #include "llvm/CodeGen/ScheduleDAGMutation.h"
41 #include "llvm/CodeGen/ScheduleDFS.h"
42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
43 #include "llvm/CodeGen/SlotIndexes.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetPassConfig.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSchedule.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/Config/llvm-config.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/MC/LaneBitmask.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/GraphWriter.h"
60 #include "llvm/Support/raw_ostream.h"
74 #define DEBUG_TYPE "machine-scheduler"
76 STATISTIC(NumClustered
, "Number of load/store pairs clustered");
80 cl::opt
<bool> ForceTopDown("misched-topdown", cl::Hidden
,
81 cl::desc("Force top-down list scheduling"));
82 cl::opt
<bool> ForceBottomUp("misched-bottomup", cl::Hidden
,
83 cl::desc("Force bottom-up list scheduling"));
85 DumpCriticalPathLength("misched-dcpl", cl::Hidden
,
86 cl::desc("Print critical path length to stdout"));
88 cl::opt
<bool> VerifyScheduling(
89 "verify-misched", cl::Hidden
,
90 cl::desc("Verify machine instrs before and after machine scheduling"));
93 cl::opt
<bool> ViewMISchedDAGs(
94 "view-misched-dags", cl::Hidden
,
95 cl::desc("Pop up a window to show MISched dags after they are processed"));
96 cl::opt
<bool> PrintDAGs("misched-print-dags", cl::Hidden
,
97 cl::desc("Print schedule DAGs"));
98 cl::opt
<bool> MISchedDumpReservedCycles(
99 "misched-dump-reserved-cycles", cl::Hidden
, cl::init(false),
100 cl::desc("Dump resource usage at schedule boundary."));
101 cl::opt
<bool> MischedDetailResourceBooking(
102 "misched-detail-resource-booking", cl::Hidden
, cl::init(false),
103 cl::desc("Show details of invoking getNextResoufceCycle."));
105 const bool ViewMISchedDAGs
= false;
106 const bool PrintDAGs
= false;
107 const bool MischedDetailResourceBooking
= false;
108 #ifdef LLVM_ENABLE_DUMP
109 const bool MISchedDumpReservedCycles
= false;
110 #endif // LLVM_ENABLE_DUMP
113 } // end namespace llvm
116 /// In some situations a few uninteresting nodes depend on nearly all other
117 /// nodes in the graph, provide a cutoff to hide them.
118 static cl::opt
<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden
,
119 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
121 static cl::opt
<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden
,
122 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
124 static cl::opt
<std::string
> SchedOnlyFunc("misched-only-func", cl::Hidden
,
125 cl::desc("Only schedule this function"));
126 static cl::opt
<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden
,
127 cl::desc("Only schedule this MBB#"));
130 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
131 /// size of the ready lists.
132 static cl::opt
<unsigned> ReadyListLimit("misched-limit", cl::Hidden
,
133 cl::desc("Limit ready list to N instructions"), cl::init(256));
135 static cl::opt
<bool> EnableRegPressure("misched-regpressure", cl::Hidden
,
136 cl::desc("Enable register pressure scheduling."), cl::init(true));
138 static cl::opt
<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden
,
139 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
141 static cl::opt
<bool> EnableMemOpCluster("misched-cluster", cl::Hidden
,
142 cl::desc("Enable memop clustering."),
145 ForceFastCluster("force-fast-cluster", cl::Hidden
,
146 cl::desc("Switch to fast cluster algorithm with the lost "
147 "of some fusion opportunities"),
149 static cl::opt
<unsigned>
150 FastClusterThreshold("fast-cluster-threshold", cl::Hidden
,
151 cl::desc("The threshold for fast cluster"),
154 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
155 static cl::opt
<bool> MISchedDumpScheduleTrace(
156 "misched-dump-schedule-trace", cl::Hidden
, cl::init(false),
157 cl::desc("Dump resource usage at schedule boundary."));
158 static cl::opt
<unsigned>
159 HeaderColWidth("misched-dump-schedule-trace-col-header-width", cl::Hidden
,
160 cl::desc("Set width of the columns with "
161 "the resources and schedule units"),
163 static cl::opt
<unsigned>
164 ColWidth("misched-dump-schedule-trace-col-width", cl::Hidden
,
165 cl::desc("Set width of the columns showing resource booking."),
167 static cl::opt
<bool> MISchedSortResourcesInTrace(
168 "misched-sort-resources-in-trace", cl::Hidden
, cl::init(true),
169 cl::desc("Sort the resources printed in the dump trace"));
172 static cl::opt
<unsigned>
173 MIResourceCutOff("misched-resource-cutoff", cl::Hidden
,
174 cl::desc("Number of intervals to track"), cl::init(10));
176 // DAG subtrees must have at least this many nodes.
177 static const unsigned MinSubtreeSize
= 8;
179 // Pin the vtables to this file.
180 void MachineSchedStrategy::anchor() {}
182 void ScheduleDAGMutation::anchor() {}
184 //===----------------------------------------------------------------------===//
185 // Machine Instruction Scheduling Pass and Registry
186 //===----------------------------------------------------------------------===//
188 MachineSchedContext::MachineSchedContext() {
189 RegClassInfo
= new RegisterClassInfo();
192 MachineSchedContext::~MachineSchedContext() {
198 /// Base class for a machine scheduler class that can run at any point.
199 class MachineSchedulerBase
: public MachineSchedContext
,
200 public MachineFunctionPass
{
202 MachineSchedulerBase(char &ID
): MachineFunctionPass(ID
) {}
204 void print(raw_ostream
&O
, const Module
* = nullptr) const override
;
207 void scheduleRegions(ScheduleDAGInstrs
&Scheduler
, bool FixKillFlags
);
210 /// MachineScheduler runs after coalescing and before register allocation.
211 class MachineScheduler
: public MachineSchedulerBase
{
215 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
217 bool runOnMachineFunction(MachineFunction
&) override
;
219 static char ID
; // Class identification, replacement for typeinfo
222 ScheduleDAGInstrs
*createMachineScheduler();
225 /// PostMachineScheduler runs after shortly before code emission.
226 class PostMachineScheduler
: public MachineSchedulerBase
{
228 PostMachineScheduler();
230 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
232 bool runOnMachineFunction(MachineFunction
&) override
;
234 static char ID
; // Class identification, replacement for typeinfo
237 ScheduleDAGInstrs
*createPostMachineScheduler();
240 } // end anonymous namespace
242 char MachineScheduler::ID
= 0;
244 char &llvm::MachineSchedulerID
= MachineScheduler::ID
;
246 INITIALIZE_PASS_BEGIN(MachineScheduler
, DEBUG_TYPE
,
247 "Machine Instruction Scheduler", false, false)
248 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
249 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
250 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
251 INITIALIZE_PASS_DEPENDENCY(SlotIndexes
)
252 INITIALIZE_PASS_DEPENDENCY(LiveIntervals
)
253 INITIALIZE_PASS_END(MachineScheduler
, DEBUG_TYPE
,
254 "Machine Instruction Scheduler", false, false)
256 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID
) {
257 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
260 void MachineScheduler::getAnalysisUsage(AnalysisUsage
&AU
) const {
261 AU
.setPreservesCFG();
262 AU
.addRequired
<MachineDominatorTree
>();
263 AU
.addRequired
<MachineLoopInfo
>();
264 AU
.addRequired
<AAResultsWrapperPass
>();
265 AU
.addRequired
<TargetPassConfig
>();
266 AU
.addRequired
<SlotIndexes
>();
267 AU
.addPreserved
<SlotIndexes
>();
268 AU
.addRequired
<LiveIntervals
>();
269 AU
.addPreserved
<LiveIntervals
>();
270 MachineFunctionPass::getAnalysisUsage(AU
);
273 char PostMachineScheduler::ID
= 0;
275 char &llvm::PostMachineSchedulerID
= PostMachineScheduler::ID
;
277 INITIALIZE_PASS_BEGIN(PostMachineScheduler
, "postmisched",
278 "PostRA Machine Instruction Scheduler", false, false)
279 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
280 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
281 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
282 INITIALIZE_PASS_END(PostMachineScheduler
, "postmisched",
283 "PostRA Machine Instruction Scheduler", false, false)
285 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID
) {
286 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
289 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage
&AU
) const {
290 AU
.setPreservesCFG();
291 AU
.addRequired
<MachineDominatorTree
>();
292 AU
.addRequired
<MachineLoopInfo
>();
293 AU
.addRequired
<AAResultsWrapperPass
>();
294 AU
.addRequired
<TargetPassConfig
>();
295 MachineFunctionPass::getAnalysisUsage(AU
);
298 MachinePassRegistry
<MachineSchedRegistry::ScheduleDAGCtor
>
299 MachineSchedRegistry::Registry
;
301 /// A dummy default scheduler factory indicates whether the scheduler
302 /// is overridden on the command line.
303 static ScheduleDAGInstrs
*useDefaultMachineSched(MachineSchedContext
*C
) {
307 /// MachineSchedOpt allows command line selection of the scheduler.
308 static cl::opt
<MachineSchedRegistry::ScheduleDAGCtor
, false,
309 RegisterPassParser
<MachineSchedRegistry
>>
310 MachineSchedOpt("misched",
311 cl::init(&useDefaultMachineSched
), cl::Hidden
,
312 cl::desc("Machine instruction scheduler to use"));
314 static MachineSchedRegistry
315 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
316 useDefaultMachineSched
);
318 static cl::opt
<bool> EnableMachineSched(
320 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
323 static cl::opt
<bool> EnablePostRAMachineSched(
324 "enable-post-misched",
325 cl::desc("Enable the post-ra machine instruction scheduling pass."),
326 cl::init(true), cl::Hidden
);
328 /// Decrement this iterator until reaching the top or a non-debug instr.
329 static MachineBasicBlock::const_iterator
330 priorNonDebug(MachineBasicBlock::const_iterator I
,
331 MachineBasicBlock::const_iterator Beg
) {
332 assert(I
!= Beg
&& "reached the top of the region, cannot decrement");
334 if (!I
->isDebugOrPseudoInstr())
340 /// Non-const version.
341 static MachineBasicBlock::iterator
342 priorNonDebug(MachineBasicBlock::iterator I
,
343 MachineBasicBlock::const_iterator Beg
) {
344 return priorNonDebug(MachineBasicBlock::const_iterator(I
), Beg
)
345 .getNonConstIterator();
348 /// If this iterator is a debug value, increment until reaching the End or a
349 /// non-debug instruction.
350 static MachineBasicBlock::const_iterator
351 nextIfDebug(MachineBasicBlock::const_iterator I
,
352 MachineBasicBlock::const_iterator End
) {
353 for(; I
!= End
; ++I
) {
354 if (!I
->isDebugOrPseudoInstr())
360 /// Non-const version.
361 static MachineBasicBlock::iterator
362 nextIfDebug(MachineBasicBlock::iterator I
,
363 MachineBasicBlock::const_iterator End
) {
364 return nextIfDebug(MachineBasicBlock::const_iterator(I
), End
)
365 .getNonConstIterator();
368 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
369 ScheduleDAGInstrs
*MachineScheduler::createMachineScheduler() {
370 // Select the scheduler, or set the default.
371 MachineSchedRegistry::ScheduleDAGCtor Ctor
= MachineSchedOpt
;
372 if (Ctor
!= useDefaultMachineSched
)
375 // Get the default scheduler set by the target for this function.
376 ScheduleDAGInstrs
*Scheduler
= PassConfig
->createMachineScheduler(this);
380 // Default to GenericScheduler.
381 return createGenericSchedLive(this);
384 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
385 /// the caller. We don't have a command line option to override the postRA
386 /// scheduler. The Target must configure it.
387 ScheduleDAGInstrs
*PostMachineScheduler::createPostMachineScheduler() {
388 // Get the postRA scheduler set by the target for this function.
389 ScheduleDAGInstrs
*Scheduler
= PassConfig
->createPostMachineScheduler(this);
393 // Default to GenericScheduler.
394 return createGenericSchedPostRA(this);
397 /// Top-level MachineScheduler pass driver.
399 /// Visit blocks in function order. Divide each block into scheduling regions
400 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
401 /// consistent with the DAG builder, which traverses the interior of the
402 /// scheduling regions bottom-up.
404 /// This design avoids exposing scheduling boundaries to the DAG builder,
405 /// simplifying the DAG builder's support for "special" target instructions.
406 /// At the same time the design allows target schedulers to operate across
407 /// scheduling boundaries, for example to bundle the boundary instructions
408 /// without reordering them. This creates complexity, because the target
409 /// scheduler must update the RegionBegin and RegionEnd positions cached by
410 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
411 /// design would be to split blocks at scheduling boundaries, but LLVM has a
412 /// general bias against block splitting purely for implementation simplicity.
413 bool MachineScheduler::runOnMachineFunction(MachineFunction
&mf
) {
414 if (skipFunction(mf
.getFunction()))
417 if (EnableMachineSched
.getNumOccurrences()) {
418 if (!EnableMachineSched
)
420 } else if (!mf
.getSubtarget().enableMachineScheduler())
423 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf
.print(dbgs()));
425 // Initialize the context of the pass.
427 MLI
= &getAnalysis
<MachineLoopInfo
>();
428 MDT
= &getAnalysis
<MachineDominatorTree
>();
429 PassConfig
= &getAnalysis
<TargetPassConfig
>();
430 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
432 LIS
= &getAnalysis
<LiveIntervals
>();
434 if (VerifyScheduling
) {
435 LLVM_DEBUG(LIS
->dump());
436 MF
->verify(this, "Before machine scheduling.");
438 RegClassInfo
->runOnMachineFunction(*MF
);
440 // Instantiate the selected scheduler for this target, function, and
441 // optimization level.
442 std::unique_ptr
<ScheduleDAGInstrs
> Scheduler(createMachineScheduler());
443 scheduleRegions(*Scheduler
, false);
445 LLVM_DEBUG(LIS
->dump());
446 if (VerifyScheduling
)
447 MF
->verify(this, "After machine scheduling.");
451 bool PostMachineScheduler::runOnMachineFunction(MachineFunction
&mf
) {
452 if (skipFunction(mf
.getFunction()))
455 if (EnablePostRAMachineSched
.getNumOccurrences()) {
456 if (!EnablePostRAMachineSched
)
458 } else if (!mf
.getSubtarget().enablePostRAMachineScheduler()) {
459 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
462 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf
.print(dbgs()));
464 // Initialize the context of the pass.
466 MLI
= &getAnalysis
<MachineLoopInfo
>();
467 PassConfig
= &getAnalysis
<TargetPassConfig
>();
468 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
470 if (VerifyScheduling
)
471 MF
->verify(this, "Before post machine scheduling.");
473 // Instantiate the selected scheduler for this target, function, and
474 // optimization level.
475 std::unique_ptr
<ScheduleDAGInstrs
> Scheduler(createPostMachineScheduler());
476 scheduleRegions(*Scheduler
, true);
478 if (VerifyScheduling
)
479 MF
->verify(this, "After post machine scheduling.");
483 /// Return true of the given instruction should not be included in a scheduling
486 /// MachineScheduler does not currently support scheduling across calls. To
487 /// handle calls, the DAG builder needs to be modified to create register
488 /// anti/output dependencies on the registers clobbered by the call's regmask
489 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
490 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
491 /// the boundary, but there would be no benefit to postRA scheduling across
492 /// calls this late anyway.
493 static bool isSchedBoundary(MachineBasicBlock::iterator MI
,
494 MachineBasicBlock
*MBB
,
496 const TargetInstrInfo
*TII
) {
497 return MI
->isCall() || TII
->isSchedulingBoundary(*MI
, MBB
, *MF
);
500 /// A region of an MBB for scheduling.
503 /// RegionBegin is the first instruction in the scheduling region, and
504 /// RegionEnd is either MBB->end() or the scheduling boundary after the
505 /// last instruction in the scheduling region. These iterators cannot refer
506 /// to instructions outside of the identified scheduling region because
507 /// those may be reordered before scheduling this region.
508 MachineBasicBlock::iterator RegionBegin
;
509 MachineBasicBlock::iterator RegionEnd
;
510 unsigned NumRegionInstrs
;
512 SchedRegion(MachineBasicBlock::iterator B
, MachineBasicBlock::iterator E
,
514 RegionBegin(B
), RegionEnd(E
), NumRegionInstrs(N
) {}
516 } // end anonymous namespace
518 using MBBRegionsVector
= SmallVector
<SchedRegion
, 16>;
521 getSchedRegions(MachineBasicBlock
*MBB
,
522 MBBRegionsVector
&Regions
,
523 bool RegionsTopDown
) {
524 MachineFunction
*MF
= MBB
->getParent();
525 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
527 MachineBasicBlock::iterator I
= nullptr;
528 for(MachineBasicBlock::iterator RegionEnd
= MBB
->end();
529 RegionEnd
!= MBB
->begin(); RegionEnd
= I
) {
531 // Avoid decrementing RegionEnd for blocks with no terminator.
532 if (RegionEnd
!= MBB
->end() ||
533 isSchedBoundary(&*std::prev(RegionEnd
), &*MBB
, MF
, TII
)) {
537 // The next region starts above the previous region. Look backward in the
538 // instruction stream until we find the nearest boundary.
539 unsigned NumRegionInstrs
= 0;
541 for (;I
!= MBB
->begin(); --I
) {
542 MachineInstr
&MI
= *std::prev(I
);
543 if (isSchedBoundary(&MI
, &*MBB
, MF
, TII
))
545 if (!MI
.isDebugOrPseudoInstr()) {
546 // MBB::size() uses instr_iterator to count. Here we need a bundle to
547 // count as a single instruction.
552 // It's possible we found a scheduling region that only has debug
553 // instructions. Don't bother scheduling these.
554 if (NumRegionInstrs
!= 0)
555 Regions
.push_back(SchedRegion(I
, RegionEnd
, NumRegionInstrs
));
559 std::reverse(Regions
.begin(), Regions
.end());
562 /// Main driver for both MachineScheduler and PostMachineScheduler.
563 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs
&Scheduler
,
565 // Visit all machine basic blocks.
567 // TODO: Visit blocks in global postorder or postorder within the bottom-up
568 // loop tree. Then we can optionally compute global RegPressure.
569 for (MachineFunction::iterator MBB
= MF
->begin(), MBBEnd
= MF
->end();
570 MBB
!= MBBEnd
; ++MBB
) {
572 Scheduler
.startBlock(&*MBB
);
575 if (SchedOnlyFunc
.getNumOccurrences() && SchedOnlyFunc
!= MF
->getName())
577 if (SchedOnlyBlock
.getNumOccurrences()
578 && (int)SchedOnlyBlock
!= MBB
->getNumber())
582 // Break the block into scheduling regions [I, RegionEnd). RegionEnd
583 // points to the scheduling boundary at the bottom of the region. The DAG
584 // does not include RegionEnd, but the region does (i.e. the next
585 // RegionEnd is above the previous RegionBegin). If the current block has
586 // no terminator then RegionEnd == MBB->end() for the bottom region.
588 // All the regions of MBB are first found and stored in MBBRegions, which
589 // will be processed (MBB) top-down if initialized with true.
591 // The Scheduler may insert instructions during either schedule() or
592 // exitRegion(), even for empty regions. So the local iterators 'I' and
593 // 'RegionEnd' are invalid across these calls. Instructions must not be
594 // added to other regions than the current one without updating MBBRegions.
596 MBBRegionsVector MBBRegions
;
597 getSchedRegions(&*MBB
, MBBRegions
, Scheduler
.doMBBSchedRegionsTopDown());
598 for (const SchedRegion
&R
: MBBRegions
) {
599 MachineBasicBlock::iterator I
= R
.RegionBegin
;
600 MachineBasicBlock::iterator RegionEnd
= R
.RegionEnd
;
601 unsigned NumRegionInstrs
= R
.NumRegionInstrs
;
603 // Notify the scheduler of the region, even if we may skip scheduling
604 // it. Perhaps it still needs to be bundled.
605 Scheduler
.enterRegion(&*MBB
, I
, RegionEnd
, NumRegionInstrs
);
607 // Skip empty scheduling regions (0 or 1 schedulable instructions).
608 if (I
== RegionEnd
|| I
== std::prev(RegionEnd
)) {
609 // Close the current region. Bundle the terminator if needed.
610 // This invalidates 'RegionEnd' and 'I'.
611 Scheduler
.exitRegion();
614 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
615 LLVM_DEBUG(dbgs() << MF
->getName() << ":" << printMBBReference(*MBB
)
616 << " " << MBB
->getName() << "\n From: " << *I
618 if (RegionEnd
!= MBB
->end()) dbgs() << *RegionEnd
;
619 else dbgs() << "End\n";
620 dbgs() << " RegionInstrs: " << NumRegionInstrs
<< '\n');
621 if (DumpCriticalPathLength
) {
622 errs() << MF
->getName();
623 errs() << ":%bb. " << MBB
->getNumber();
624 errs() << " " << MBB
->getName() << " \n";
627 // Schedule a region: possibly reorder instructions.
628 // This invalidates the original region iterators.
629 Scheduler
.schedule();
631 // Close the current region.
632 Scheduler
.exitRegion();
634 Scheduler
.finishBlock();
635 // FIXME: Ideally, no further passes should rely on kill flags. However,
636 // thumb2 size reduction is currently an exception, so the PostMIScheduler
639 Scheduler
.fixupKills(*MBB
);
641 Scheduler
.finalizeSchedule();
644 void MachineSchedulerBase::print(raw_ostream
&O
, const Module
* m
) const {
648 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
649 LLVM_DUMP_METHOD
void ReadyQueue::dump() const {
650 dbgs() << "Queue " << Name
<< ": ";
651 for (const SUnit
*SU
: Queue
)
652 dbgs() << SU
->NodeNum
<< " ";
657 //===----------------------------------------------------------------------===//
658 // ScheduleDAGMI - Basic machine instruction scheduling. This is
659 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
660 // virtual registers.
661 // ===----------------------------------------------------------------------===/
663 // Provide a vtable anchor.
664 ScheduleDAGMI::~ScheduleDAGMI() = default;
666 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
667 /// NumPredsLeft reaches zero, release the successor node.
669 /// FIXME: Adjust SuccSU height based on MinLatency.
670 void ScheduleDAGMI::releaseSucc(SUnit
*SU
, SDep
*SuccEdge
) {
671 SUnit
*SuccSU
= SuccEdge
->getSUnit();
673 if (SuccEdge
->isWeak()) {
674 --SuccSU
->WeakPredsLeft
;
675 if (SuccEdge
->isCluster())
676 NextClusterSucc
= SuccSU
;
680 if (SuccSU
->NumPredsLeft
== 0) {
681 dbgs() << "*** Scheduling failed! ***\n";
683 dbgs() << " has been released too many times!\n";
684 llvm_unreachable(nullptr);
687 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
688 // CurrCycle may have advanced since then.
689 if (SuccSU
->TopReadyCycle
< SU
->TopReadyCycle
+ SuccEdge
->getLatency())
690 SuccSU
->TopReadyCycle
= SU
->TopReadyCycle
+ SuccEdge
->getLatency();
692 --SuccSU
->NumPredsLeft
;
693 if (SuccSU
->NumPredsLeft
== 0 && SuccSU
!= &ExitSU
)
694 SchedImpl
->releaseTopNode(SuccSU
);
697 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
698 void ScheduleDAGMI::releaseSuccessors(SUnit
*SU
) {
699 for (SDep
&Succ
: SU
->Succs
)
700 releaseSucc(SU
, &Succ
);
703 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
704 /// NumSuccsLeft reaches zero, release the predecessor node.
706 /// FIXME: Adjust PredSU height based on MinLatency.
707 void ScheduleDAGMI::releasePred(SUnit
*SU
, SDep
*PredEdge
) {
708 SUnit
*PredSU
= PredEdge
->getSUnit();
710 if (PredEdge
->isWeak()) {
711 --PredSU
->WeakSuccsLeft
;
712 if (PredEdge
->isCluster())
713 NextClusterPred
= PredSU
;
717 if (PredSU
->NumSuccsLeft
== 0) {
718 dbgs() << "*** Scheduling failed! ***\n";
720 dbgs() << " has been released too many times!\n";
721 llvm_unreachable(nullptr);
724 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
725 // CurrCycle may have advanced since then.
726 if (PredSU
->BotReadyCycle
< SU
->BotReadyCycle
+ PredEdge
->getLatency())
727 PredSU
->BotReadyCycle
= SU
->BotReadyCycle
+ PredEdge
->getLatency();
729 --PredSU
->NumSuccsLeft
;
730 if (PredSU
->NumSuccsLeft
== 0 && PredSU
!= &EntrySU
)
731 SchedImpl
->releaseBottomNode(PredSU
);
734 /// releasePredecessors - Call releasePred on each of SU's predecessors.
735 void ScheduleDAGMI::releasePredecessors(SUnit
*SU
) {
736 for (SDep
&Pred
: SU
->Preds
)
737 releasePred(SU
, &Pred
);
740 void ScheduleDAGMI::startBlock(MachineBasicBlock
*bb
) {
741 ScheduleDAGInstrs::startBlock(bb
);
742 SchedImpl
->enterMBB(bb
);
745 void ScheduleDAGMI::finishBlock() {
746 SchedImpl
->leaveMBB();
747 ScheduleDAGInstrs::finishBlock();
750 /// enterRegion - Called back from PostMachineScheduler::runOnMachineFunction
751 /// after crossing a scheduling boundary. [begin, end) includes all instructions
752 /// in the region, including the boundary itself and single-instruction regions
753 /// that don't get scheduled.
754 void ScheduleDAGMI::enterRegion(MachineBasicBlock
*bb
,
755 MachineBasicBlock::iterator begin
,
756 MachineBasicBlock::iterator end
,
757 unsigned regioninstrs
)
759 ScheduleDAGInstrs::enterRegion(bb
, begin
, end
, regioninstrs
);
761 SchedImpl
->initPolicy(begin
, end
, regioninstrs
);
764 /// This is normally called from the main scheduler loop but may also be invoked
765 /// by the scheduling strategy to perform additional code motion.
766 void ScheduleDAGMI::moveInstruction(
767 MachineInstr
*MI
, MachineBasicBlock::iterator InsertPos
) {
768 // Advance RegionBegin if the first instruction moves down.
769 if (&*RegionBegin
== MI
)
772 // Update the instruction stream.
773 BB
->splice(InsertPos
, BB
, MI
);
775 // Update LiveIntervals
777 LIS
->handleMove(*MI
, /*UpdateFlags=*/true);
779 // Recede RegionBegin if an instruction moves above the first.
780 if (RegionBegin
== InsertPos
)
784 bool ScheduleDAGMI::checkSchedLimit() {
785 #if LLVM_ENABLE_ABI_BREAKING_CHECKS && !defined(NDEBUG)
786 if (NumInstrsScheduled
== MISchedCutoff
&& MISchedCutoff
!= ~0U) {
787 CurrentTop
= CurrentBottom
;
790 ++NumInstrsScheduled
;
795 /// Per-region scheduling driver, called back from
796 /// PostMachineScheduler::runOnMachineFunction. This is a simplified driver
797 /// that does not consider liveness or register pressure. It is useful for
798 /// PostRA scheduling and potentially other custom schedulers.
799 void ScheduleDAGMI::schedule() {
800 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
801 LLVM_DEBUG(SchedImpl
->dumpPolicy());
808 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
809 findRootsAndBiasEdges(TopRoots
, BotRoots
);
812 if (PrintDAGs
) dump();
813 if (ViewMISchedDAGs
) viewGraph();
815 // Initialize the strategy before modifying the DAG.
816 // This may initialize a DFSResult to be used for queue priority.
817 SchedImpl
->initialize(this);
819 // Initialize ready queues now that the DAG and priority data are finalized.
820 initQueues(TopRoots
, BotRoots
);
822 bool IsTopNode
= false;
824 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
825 SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
);
828 assert(!SU
->isScheduled
&& "Node already scheduled");
829 if (!checkSchedLimit())
832 MachineInstr
*MI
= SU
->getInstr();
834 assert(SU
->isTopReady() && "node still has unscheduled dependencies");
835 if (&*CurrentTop
== MI
)
836 CurrentTop
= nextIfDebug(++CurrentTop
, CurrentBottom
);
838 moveInstruction(MI
, CurrentTop
);
840 assert(SU
->isBottomReady() && "node still has unscheduled dependencies");
841 MachineBasicBlock::iterator priorII
=
842 priorNonDebug(CurrentBottom
, CurrentTop
);
844 CurrentBottom
= priorII
;
846 if (&*CurrentTop
== MI
)
847 CurrentTop
= nextIfDebug(++CurrentTop
, priorII
);
848 moveInstruction(MI
, CurrentBottom
);
852 // Notify the scheduling strategy before updating the DAG.
853 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
854 // runs, it can then use the accurate ReadyCycle time to determine whether
855 // newly released nodes can move to the readyQ.
856 SchedImpl
->schedNode(SU
, IsTopNode
);
858 updateQueues(SU
, IsTopNode
);
860 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
865 dbgs() << "*** Final schedule for "
866 << printMBBReference(*begin()->getParent()) << " ***\n";
872 /// Apply each ScheduleDAGMutation step in order.
873 void ScheduleDAGMI::postProcessDAG() {
874 for (auto &m
: Mutations
)
879 findRootsAndBiasEdges(SmallVectorImpl
<SUnit
*> &TopRoots
,
880 SmallVectorImpl
<SUnit
*> &BotRoots
) {
881 for (SUnit
&SU
: SUnits
) {
882 assert(!SU
.isBoundaryNode() && "Boundary node should not be in SUnits");
884 // Order predecessors so DFSResult follows the critical path.
885 SU
.biasCriticalPath();
887 // A SUnit is ready to top schedule if it has no predecessors.
888 if (!SU
.NumPredsLeft
)
889 TopRoots
.push_back(&SU
);
890 // A SUnit is ready to bottom schedule if it has no successors.
891 if (!SU
.NumSuccsLeft
)
892 BotRoots
.push_back(&SU
);
894 ExitSU
.biasCriticalPath();
897 /// Identify DAG roots and setup scheduler queues.
898 void ScheduleDAGMI::initQueues(ArrayRef
<SUnit
*> TopRoots
,
899 ArrayRef
<SUnit
*> BotRoots
) {
900 NextClusterSucc
= nullptr;
901 NextClusterPred
= nullptr;
903 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
905 // Nodes with unreleased weak edges can still be roots.
906 // Release top roots in forward order.
907 for (SUnit
*SU
: TopRoots
)
908 SchedImpl
->releaseTopNode(SU
);
910 // Release bottom roots in reverse order so the higher priority nodes appear
911 // first. This is more natural and slightly more efficient.
912 for (SmallVectorImpl
<SUnit
*>::const_reverse_iterator
913 I
= BotRoots
.rbegin(), E
= BotRoots
.rend(); I
!= E
; ++I
) {
914 SchedImpl
->releaseBottomNode(*I
);
917 releaseSuccessors(&EntrySU
);
918 releasePredecessors(&ExitSU
);
920 SchedImpl
->registerRoots();
922 // Advance past initial DebugValues.
923 CurrentTop
= nextIfDebug(RegionBegin
, RegionEnd
);
924 CurrentBottom
= RegionEnd
;
927 /// Update scheduler queues after scheduling an instruction.
928 void ScheduleDAGMI::updateQueues(SUnit
*SU
, bool IsTopNode
) {
929 // Release dependent instructions for scheduling.
931 releaseSuccessors(SU
);
933 releasePredecessors(SU
);
935 SU
->isScheduled
= true;
938 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
939 void ScheduleDAGMI::placeDebugValues() {
940 // If first instruction was a DBG_VALUE then put it back.
942 BB
->splice(RegionBegin
, BB
, FirstDbgValue
);
943 RegionBegin
= FirstDbgValue
;
946 for (std::vector
<std::pair
<MachineInstr
*, MachineInstr
*>>::iterator
947 DI
= DbgValues
.end(), DE
= DbgValues
.begin(); DI
!= DE
; --DI
) {
948 std::pair
<MachineInstr
*, MachineInstr
*> P
= *std::prev(DI
);
949 MachineInstr
*DbgValue
= P
.first
;
950 MachineBasicBlock::iterator OrigPrevMI
= P
.second
;
951 if (&*RegionBegin
== DbgValue
)
953 BB
->splice(std::next(OrigPrevMI
), BB
, DbgValue
);
954 if (RegionEnd
!= BB
->end() && OrigPrevMI
== &*RegionEnd
)
955 RegionEnd
= DbgValue
;
959 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
960 static const char *scheduleTableLegend
= " i: issue\n x: resource booked";
962 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpScheduleTraceTopDown() const {
963 // Bail off when there is no schedule model to query.
964 if (!SchedModel
.hasInstrSchedModel())
967 // Nothing to show if there is no or just one instruction.
971 dbgs() << " * Schedule table (TopDown):\n";
972 dbgs() << scheduleTableLegend
<< "\n";
973 const unsigned FirstCycle
= getSUnit(&*(std::begin(*this)))->TopReadyCycle
;
974 unsigned LastCycle
= getSUnit(&*(std::prev(std::end(*this))))->TopReadyCycle
;
975 for (MachineInstr
&MI
: *this) {
976 SUnit
*SU
= getSUnit(&MI
);
979 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
980 for (TargetSchedModel::ProcResIter PI
= SchedModel
.getWriteProcResBegin(SC
),
981 PE
= SchedModel
.getWriteProcResEnd(SC
);
983 if (SU
->TopReadyCycle
+ PI
->ReleaseAtCycle
- 1 > LastCycle
)
984 LastCycle
= SU
->TopReadyCycle
+ PI
->ReleaseAtCycle
- 1;
987 // Print the header with the cycles
988 dbgs() << llvm::left_justify("Cycle", HeaderColWidth
);
989 for (unsigned C
= FirstCycle
; C
<= LastCycle
; ++C
)
990 dbgs() << llvm::left_justify("| " + std::to_string(C
), ColWidth
);
993 for (MachineInstr
&MI
: *this) {
994 SUnit
*SU
= getSUnit(&MI
);
996 dbgs() << "Missing SUnit\n";
999 std::string
NodeName("SU(");
1000 NodeName
+= std::to_string(SU
->NodeNum
) + ")";
1001 dbgs() << llvm::left_justify(NodeName
, HeaderColWidth
);
1002 unsigned C
= FirstCycle
;
1003 for (; C
<= LastCycle
; ++C
) {
1004 if (C
== SU
->TopReadyCycle
)
1005 dbgs() << llvm::left_justify("| i", ColWidth
);
1007 dbgs() << llvm::left_justify("|", ColWidth
);
1010 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1012 SmallVector
<MCWriteProcResEntry
, 4> ResourcesIt(
1013 make_range(SchedModel
.getWriteProcResBegin(SC
),
1014 SchedModel
.getWriteProcResEnd(SC
)));
1016 if (MISchedSortResourcesInTrace
)
1017 llvm::stable_sort(ResourcesIt
,
1018 [](const MCWriteProcResEntry
&LHS
,
1019 const MCWriteProcResEntry
&RHS
) -> bool {
1020 return LHS
.AcquireAtCycle
< RHS
.AcquireAtCycle
||
1021 (LHS
.AcquireAtCycle
== RHS
.AcquireAtCycle
&&
1022 LHS
.ReleaseAtCycle
< RHS
.ReleaseAtCycle
);
1024 for (const MCWriteProcResEntry
&PI
: ResourcesIt
) {
1026 const std::string ResName
=
1027 SchedModel
.getResourceName(PI
.ProcResourceIdx
);
1028 dbgs() << llvm::right_justify(ResName
+ " ", HeaderColWidth
);
1029 for (; C
< SU
->TopReadyCycle
+ PI
.AcquireAtCycle
; ++C
) {
1030 dbgs() << llvm::left_justify("|", ColWidth
);
1032 for (unsigned I
= 0, E
= PI
.ReleaseAtCycle
- PI
.AcquireAtCycle
; I
!= E
;
1034 dbgs() << llvm::left_justify("| x", ColWidth
);
1035 while (C
++ <= LastCycle
)
1036 dbgs() << llvm::left_justify("|", ColWidth
);
1043 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpScheduleTraceBottomUp() const {
1044 // Bail off when there is no schedule model to query.
1045 if (!SchedModel
.hasInstrSchedModel())
1048 // Nothing to show if there is no or just one instruction.
1052 dbgs() << " * Schedule table (BottomUp):\n";
1053 dbgs() << scheduleTableLegend
<< "\n";
1055 const int FirstCycle
= getSUnit(&*(std::begin(*this)))->BotReadyCycle
;
1056 int LastCycle
= getSUnit(&*(std::prev(std::end(*this))))->BotReadyCycle
;
1057 for (MachineInstr
&MI
: *this) {
1058 SUnit
*SU
= getSUnit(&MI
);
1061 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1062 for (TargetSchedModel::ProcResIter PI
= SchedModel
.getWriteProcResBegin(SC
),
1063 PE
= SchedModel
.getWriteProcResEnd(SC
);
1065 if ((int)SU
->BotReadyCycle
- PI
->ReleaseAtCycle
+ 1 < LastCycle
)
1066 LastCycle
= (int)SU
->BotReadyCycle
- PI
->ReleaseAtCycle
+ 1;
1069 // Print the header with the cycles
1070 dbgs() << llvm::left_justify("Cycle", HeaderColWidth
);
1071 for (int C
= FirstCycle
; C
>= LastCycle
; --C
)
1072 dbgs() << llvm::left_justify("| " + std::to_string(C
), ColWidth
);
1075 for (MachineInstr
&MI
: *this) {
1076 SUnit
*SU
= getSUnit(&MI
);
1078 dbgs() << "Missing SUnit\n";
1081 std::string
NodeName("SU(");
1082 NodeName
+= std::to_string(SU
->NodeNum
) + ")";
1083 dbgs() << llvm::left_justify(NodeName
, HeaderColWidth
);
1085 for (; C
>= LastCycle
; --C
) {
1086 if (C
== (int)SU
->BotReadyCycle
)
1087 dbgs() << llvm::left_justify("| i", ColWidth
);
1089 dbgs() << llvm::left_justify("|", ColWidth
);
1092 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1093 SmallVector
<MCWriteProcResEntry
, 4> ResourcesIt(
1094 make_range(SchedModel
.getWriteProcResBegin(SC
),
1095 SchedModel
.getWriteProcResEnd(SC
)));
1097 if (MISchedSortResourcesInTrace
)
1098 llvm::stable_sort(ResourcesIt
,
1099 [](const MCWriteProcResEntry
&LHS
,
1100 const MCWriteProcResEntry
&RHS
) -> bool {
1101 return LHS
.AcquireAtCycle
< RHS
.AcquireAtCycle
||
1102 (LHS
.AcquireAtCycle
== RHS
.AcquireAtCycle
&&
1103 LHS
.ReleaseAtCycle
< RHS
.ReleaseAtCycle
);
1105 for (const MCWriteProcResEntry
&PI
: ResourcesIt
) {
1107 const std::string ResName
=
1108 SchedModel
.getResourceName(PI
.ProcResourceIdx
);
1109 dbgs() << llvm::right_justify(ResName
+ " ", HeaderColWidth
);
1110 for (; C
> ((int)SU
->BotReadyCycle
- (int)PI
.AcquireAtCycle
); --C
) {
1111 dbgs() << llvm::left_justify("|", ColWidth
);
1113 for (unsigned I
= 0, E
= PI
.ReleaseAtCycle
- PI
.AcquireAtCycle
; I
!= E
;
1115 dbgs() << llvm::left_justify("| x", ColWidth
);
1116 while (C
-- >= LastCycle
)
1117 dbgs() << llvm::left_justify("|", ColWidth
);
1125 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1126 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpSchedule() const {
1127 if (MISchedDumpScheduleTrace
) {
1129 dumpScheduleTraceTopDown();
1130 else if (ForceBottomUp
)
1131 dumpScheduleTraceBottomUp();
1133 dbgs() << "* Schedule table (Bidirectional): not implemented\n";
1137 for (MachineInstr
&MI
: *this) {
1138 if (SUnit
*SU
= getSUnit(&MI
))
1141 dbgs() << "Missing SUnit\n";
1146 //===----------------------------------------------------------------------===//
1147 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
1149 //===----------------------------------------------------------------------===//
1151 ScheduleDAGMILive::~ScheduleDAGMILive() {
1155 void ScheduleDAGMILive::collectVRegUses(SUnit
&SU
) {
1156 const MachineInstr
&MI
= *SU
.getInstr();
1157 for (const MachineOperand
&MO
: MI
.operands()) {
1162 if (TrackLaneMasks
&& !MO
.isUse())
1165 Register Reg
= MO
.getReg();
1166 if (!Reg
.isVirtual())
1170 if (TrackLaneMasks
) {
1171 bool FoundDef
= false;
1172 for (const MachineOperand
&MO2
: MI
.all_defs()) {
1173 if (MO2
.getReg() == Reg
&& !MO2
.isDead()) {
1182 // Record this local VReg use.
1183 VReg2SUnitMultiMap::iterator UI
= VRegUses
.find(Reg
);
1184 for (; UI
!= VRegUses
.end(); ++UI
) {
1188 if (UI
== VRegUses
.end())
1189 VRegUses
.insert(VReg2SUnit(Reg
, LaneBitmask::getNone(), &SU
));
1193 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
1194 /// crossing a scheduling boundary. [begin, end) includes all instructions in
1195 /// the region, including the boundary itself and single-instruction regions
1196 /// that don't get scheduled.
1197 void ScheduleDAGMILive::enterRegion(MachineBasicBlock
*bb
,
1198 MachineBasicBlock::iterator begin
,
1199 MachineBasicBlock::iterator end
,
1200 unsigned regioninstrs
)
1202 // ScheduleDAGMI initializes SchedImpl's per-region policy.
1203 ScheduleDAGMI::enterRegion(bb
, begin
, end
, regioninstrs
);
1205 // For convenience remember the end of the liveness region.
1206 LiveRegionEnd
= (RegionEnd
== bb
->end()) ? RegionEnd
: std::next(RegionEnd
);
1208 SUPressureDiffs
.clear();
1210 ShouldTrackPressure
= SchedImpl
->shouldTrackPressure();
1211 ShouldTrackLaneMasks
= SchedImpl
->shouldTrackLaneMasks();
1213 assert((!ShouldTrackLaneMasks
|| ShouldTrackPressure
) &&
1214 "ShouldTrackLaneMasks requires ShouldTrackPressure");
1217 // Setup the register pressure trackers for the top scheduled and bottom
1218 // scheduled regions.
1219 void ScheduleDAGMILive::initRegPressure() {
1221 VRegUses
.setUniverse(MRI
.getNumVirtRegs());
1222 for (SUnit
&SU
: SUnits
)
1223 collectVRegUses(SU
);
1225 TopRPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, RegionBegin
,
1226 ShouldTrackLaneMasks
, false);
1227 BotRPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, LiveRegionEnd
,
1228 ShouldTrackLaneMasks
, false);
1230 // Close the RPTracker to finalize live ins.
1231 RPTracker
.closeRegion();
1233 LLVM_DEBUG(RPTracker
.dump());
1235 // Initialize the live ins and live outs.
1236 TopRPTracker
.addLiveRegs(RPTracker
.getPressure().LiveInRegs
);
1237 BotRPTracker
.addLiveRegs(RPTracker
.getPressure().LiveOutRegs
);
1239 // Close one end of the tracker so we can call
1240 // getMaxUpward/DownwardPressureDelta before advancing across any
1241 // instructions. This converts currently live regs into live ins/outs.
1242 TopRPTracker
.closeTop();
1243 BotRPTracker
.closeBottom();
1245 BotRPTracker
.initLiveThru(RPTracker
);
1246 if (!BotRPTracker
.getLiveThru().empty()) {
1247 TopRPTracker
.initLiveThru(BotRPTracker
.getLiveThru());
1248 LLVM_DEBUG(dbgs() << "Live Thru: ";
1249 dumpRegSetPressure(BotRPTracker
.getLiveThru(), TRI
));
1252 // For each live out vreg reduce the pressure change associated with other
1253 // uses of the same vreg below the live-out reaching def.
1254 updatePressureDiffs(RPTracker
.getPressure().LiveOutRegs
);
1256 // Account for liveness generated by the region boundary.
1257 if (LiveRegionEnd
!= RegionEnd
) {
1258 SmallVector
<RegisterMaskPair
, 8> LiveUses
;
1259 BotRPTracker
.recede(&LiveUses
);
1260 updatePressureDiffs(LiveUses
);
1263 LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1264 dumpRegSetPressure(TopRPTracker
.getRegSetPressureAtPos(), TRI
);
1265 dbgs() << "Bottom Pressure:\n";
1266 dumpRegSetPressure(BotRPTracker
.getRegSetPressureAtPos(), TRI
););
1268 assert((BotRPTracker
.getPos() == RegionEnd
||
1269 (RegionEnd
->isDebugInstr() &&
1270 BotRPTracker
.getPos() == priorNonDebug(RegionEnd
, RegionBegin
))) &&
1271 "Can't find the region bottom");
1273 // Cache the list of excess pressure sets in this region. This will also track
1274 // the max pressure in the scheduled code for these sets.
1275 RegionCriticalPSets
.clear();
1276 const std::vector
<unsigned> &RegionPressure
=
1277 RPTracker
.getPressure().MaxSetPressure
;
1278 for (unsigned i
= 0, e
= RegionPressure
.size(); i
< e
; ++i
) {
1279 unsigned Limit
= RegClassInfo
->getRegPressureSetLimit(i
);
1280 if (RegionPressure
[i
] > Limit
) {
1281 LLVM_DEBUG(dbgs() << TRI
->getRegPressureSetName(i
) << " Limit " << Limit
1282 << " Actual " << RegionPressure
[i
] << "\n");
1283 RegionCriticalPSets
.push_back(PressureChange(i
));
1286 LLVM_DEBUG(dbgs() << "Excess PSets: ";
1287 for (const PressureChange
&RCPS
1288 : RegionCriticalPSets
) dbgs()
1289 << TRI
->getRegPressureSetName(RCPS
.getPSet()) << " ";
1293 void ScheduleDAGMILive::
1294 updateScheduledPressure(const SUnit
*SU
,
1295 const std::vector
<unsigned> &NewMaxPressure
) {
1296 const PressureDiff
&PDiff
= getPressureDiff(SU
);
1297 unsigned CritIdx
= 0, CritEnd
= RegionCriticalPSets
.size();
1298 for (const PressureChange
&PC
: PDiff
) {
1301 unsigned ID
= PC
.getPSet();
1302 while (CritIdx
!= CritEnd
&& RegionCriticalPSets
[CritIdx
].getPSet() < ID
)
1304 if (CritIdx
!= CritEnd
&& RegionCriticalPSets
[CritIdx
].getPSet() == ID
) {
1305 if ((int)NewMaxPressure
[ID
] > RegionCriticalPSets
[CritIdx
].getUnitInc()
1306 && NewMaxPressure
[ID
] <= (unsigned)std::numeric_limits
<int16_t>::max())
1307 RegionCriticalPSets
[CritIdx
].setUnitInc(NewMaxPressure
[ID
]);
1309 unsigned Limit
= RegClassInfo
->getRegPressureSetLimit(ID
);
1310 if (NewMaxPressure
[ID
] >= Limit
- 2) {
1311 LLVM_DEBUG(dbgs() << " " << TRI
->getRegPressureSetName(ID
) << ": "
1312 << NewMaxPressure
[ID
]
1313 << ((NewMaxPressure
[ID
] > Limit
) ? " > " : " <= ")
1314 << Limit
<< "(+ " << BotRPTracker
.getLiveThru()[ID
]
1320 /// Update the PressureDiff array for liveness after scheduling this
1322 void ScheduleDAGMILive::updatePressureDiffs(
1323 ArrayRef
<RegisterMaskPair
> LiveUses
) {
1324 for (const RegisterMaskPair
&P
: LiveUses
) {
1325 Register Reg
= P
.RegUnit
;
1326 /// FIXME: Currently assuming single-use physregs.
1327 if (!Reg
.isVirtual())
1330 if (ShouldTrackLaneMasks
) {
1331 // If the register has just become live then other uses won't change
1332 // this fact anymore => decrement pressure.
1333 // If the register has just become dead then other uses make it come
1334 // back to life => increment pressure.
1335 bool Decrement
= P
.LaneMask
.any();
1337 for (const VReg2SUnit
&V2SU
1338 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1339 SUnit
&SU
= *V2SU
.SU
;
1340 if (SU
.isScheduled
|| &SU
== &ExitSU
)
1343 PressureDiff
&PDiff
= getPressureDiff(&SU
);
1344 PDiff
.addPressureChange(Reg
, Decrement
, &MRI
);
1345 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU
.NodeNum
<< ") "
1346 << printReg(Reg
, TRI
) << ':'
1347 << PrintLaneMask(P
.LaneMask
) << ' ' << *SU
.getInstr();
1348 dbgs() << " to "; PDiff
.dump(*TRI
););
1351 assert(P
.LaneMask
.any());
1352 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg
, TRI
) << "\n");
1353 // This may be called before CurrentBottom has been initialized. However,
1354 // BotRPTracker must have a valid position. We want the value live into the
1355 // instruction or live out of the block, so ask for the previous
1356 // instruction's live-out.
1357 const LiveInterval
&LI
= LIS
->getInterval(Reg
);
1359 MachineBasicBlock::const_iterator I
=
1360 nextIfDebug(BotRPTracker
.getPos(), BB
->end());
1362 VNI
= LI
.getVNInfoBefore(LIS
->getMBBEndIdx(BB
));
1364 LiveQueryResult LRQ
= LI
.Query(LIS
->getInstructionIndex(*I
));
1365 VNI
= LRQ
.valueIn();
1367 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1368 assert(VNI
&& "No live value at use.");
1369 for (const VReg2SUnit
&V2SU
1370 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1371 SUnit
*SU
= V2SU
.SU
;
1372 // If this use comes before the reaching def, it cannot be a last use,
1373 // so decrease its pressure change.
1374 if (!SU
->isScheduled
&& SU
!= &ExitSU
) {
1375 LiveQueryResult LRQ
=
1376 LI
.Query(LIS
->getInstructionIndex(*SU
->getInstr()));
1377 if (LRQ
.valueIn() == VNI
) {
1378 PressureDiff
&PDiff
= getPressureDiff(SU
);
1379 PDiff
.addPressureChange(Reg
, true, &MRI
);
1380 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU
->NodeNum
<< ") "
1382 dbgs() << " to "; PDiff
.dump(*TRI
););
1390 void ScheduleDAGMILive::dump() const {
1391 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1392 if (EntrySU
.getInstr() != nullptr)
1393 dumpNodeAll(EntrySU
);
1394 for (const SUnit
&SU
: SUnits
) {
1396 if (ShouldTrackPressure
) {
1397 dbgs() << " Pressure Diff : ";
1398 getPressureDiff(&SU
).dump(*TRI
);
1400 dbgs() << " Single Issue : ";
1401 if (SchedModel
.mustBeginGroup(SU
.getInstr()) &&
1402 SchedModel
.mustEndGroup(SU
.getInstr()))
1408 if (ExitSU
.getInstr() != nullptr)
1409 dumpNodeAll(ExitSU
);
1413 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1414 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1415 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1417 /// This is a skeletal driver, with all the functionality pushed into helpers,
1418 /// so that it can be easily extended by experimental schedulers. Generally,
1419 /// implementing MachineSchedStrategy should be sufficient to implement a new
1420 /// scheduling algorithm. However, if a scheduler further subclasses
1421 /// ScheduleDAGMILive then it will want to override this virtual method in order
1422 /// to update any specialized state.
1423 void ScheduleDAGMILive::schedule() {
1424 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1425 LLVM_DEBUG(SchedImpl
->dumpPolicy());
1426 buildDAGWithRegPressure();
1430 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
1431 findRootsAndBiasEdges(TopRoots
, BotRoots
);
1433 // Initialize the strategy before modifying the DAG.
1434 // This may initialize a DFSResult to be used for queue priority.
1435 SchedImpl
->initialize(this);
1438 if (PrintDAGs
) dump();
1439 if (ViewMISchedDAGs
) viewGraph();
1441 // Initialize ready queues now that the DAG and priority data are finalized.
1442 initQueues(TopRoots
, BotRoots
);
1444 bool IsTopNode
= false;
1446 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1447 SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
);
1450 assert(!SU
->isScheduled
&& "Node already scheduled");
1451 if (!checkSchedLimit())
1454 scheduleMI(SU
, IsTopNode
);
1457 unsigned SubtreeID
= DFSResult
->getSubtreeID(SU
);
1458 if (!ScheduledTrees
.test(SubtreeID
)) {
1459 ScheduledTrees
.set(SubtreeID
);
1460 DFSResult
->scheduleTree(SubtreeID
);
1461 SchedImpl
->scheduleTree(SubtreeID
);
1465 // Notify the scheduling strategy after updating the DAG.
1466 SchedImpl
->schedNode(SU
, IsTopNode
);
1468 updateQueues(SU
, IsTopNode
);
1470 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
1475 dbgs() << "*** Final schedule for "
1476 << printMBBReference(*begin()->getParent()) << " ***\n";
1482 /// Build the DAG and setup three register pressure trackers.
1483 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1484 if (!ShouldTrackPressure
) {
1486 RegionCriticalPSets
.clear();
1487 buildSchedGraph(AA
);
1491 // Initialize the register pressure tracker used by buildSchedGraph.
1492 RPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, LiveRegionEnd
,
1493 ShouldTrackLaneMasks
, /*TrackUntiedDefs=*/true);
1495 // Account for liveness generate by the region boundary.
1496 if (LiveRegionEnd
!= RegionEnd
)
1499 // Build the DAG, and compute current register pressure.
1500 buildSchedGraph(AA
, &RPTracker
, &SUPressureDiffs
, LIS
, ShouldTrackLaneMasks
);
1502 // Initialize top/bottom trackers after computing region pressure.
1506 void ScheduleDAGMILive::computeDFSResult() {
1508 DFSResult
= new SchedDFSResult(/*BottomU*/true, MinSubtreeSize
);
1510 ScheduledTrees
.clear();
1511 DFSResult
->resize(SUnits
.size());
1512 DFSResult
->compute(SUnits
);
1513 ScheduledTrees
.resize(DFSResult
->getNumSubtrees());
1516 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1517 /// only provides the critical path for single block loops. To handle loops that
1518 /// span blocks, we could use the vreg path latencies provided by
1519 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1520 /// available for use in the scheduler.
1522 /// The cyclic path estimation identifies a def-use pair that crosses the back
1523 /// edge and considers the depth and height of the nodes. For example, consider
1524 /// the following instruction sequence where each instruction has unit latency
1525 /// and defines an eponymous virtual register:
1527 /// a->b(a,c)->c(b)->d(c)->exit
1529 /// The cyclic critical path is a two cycles: b->c->b
1530 /// The acyclic critical path is four cycles: a->b->c->d->exit
1531 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1532 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1533 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1534 /// LiveInDepth = depth(b) = len(a->b) = 1
1536 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1537 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1538 /// CyclicCriticalPath = min(2, 2) = 2
1540 /// This could be relevant to PostRA scheduling, but is currently implemented
1541 /// assuming LiveIntervals.
1542 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1543 // This only applies to single block loop.
1544 if (!BB
->isSuccessor(BB
))
1547 unsigned MaxCyclicLatency
= 0;
1548 // Visit each live out vreg def to find def/use pairs that cross iterations.
1549 for (const RegisterMaskPair
&P
: RPTracker
.getPressure().LiveOutRegs
) {
1550 Register Reg
= P
.RegUnit
;
1551 if (!Reg
.isVirtual())
1553 const LiveInterval
&LI
= LIS
->getInterval(Reg
);
1554 const VNInfo
*DefVNI
= LI
.getVNInfoBefore(LIS
->getMBBEndIdx(BB
));
1558 MachineInstr
*DefMI
= LIS
->getInstructionFromIndex(DefVNI
->def
);
1559 const SUnit
*DefSU
= getSUnit(DefMI
);
1563 unsigned LiveOutHeight
= DefSU
->getHeight();
1564 unsigned LiveOutDepth
= DefSU
->getDepth() + DefSU
->Latency
;
1565 // Visit all local users of the vreg def.
1566 for (const VReg2SUnit
&V2SU
1567 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1568 SUnit
*SU
= V2SU
.SU
;
1572 // Only consider uses of the phi.
1573 LiveQueryResult LRQ
= LI
.Query(LIS
->getInstructionIndex(*SU
->getInstr()));
1574 if (!LRQ
.valueIn()->isPHIDef())
1577 // Assume that a path spanning two iterations is a cycle, which could
1578 // overestimate in strange cases. This allows cyclic latency to be
1579 // estimated as the minimum slack of the vreg's depth or height.
1580 unsigned CyclicLatency
= 0;
1581 if (LiveOutDepth
> SU
->getDepth())
1582 CyclicLatency
= LiveOutDepth
- SU
->getDepth();
1584 unsigned LiveInHeight
= SU
->getHeight() + DefSU
->Latency
;
1585 if (LiveInHeight
> LiveOutHeight
) {
1586 if (LiveInHeight
- LiveOutHeight
< CyclicLatency
)
1587 CyclicLatency
= LiveInHeight
- LiveOutHeight
;
1591 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU
->NodeNum
<< ") -> SU("
1592 << SU
->NodeNum
<< ") = " << CyclicLatency
<< "c\n");
1593 if (CyclicLatency
> MaxCyclicLatency
)
1594 MaxCyclicLatency
= CyclicLatency
;
1597 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency
<< "c\n");
1598 return MaxCyclicLatency
;
1601 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1602 /// the Top RP tracker in case the region beginning has changed.
1603 void ScheduleDAGMILive::initQueues(ArrayRef
<SUnit
*> TopRoots
,
1604 ArrayRef
<SUnit
*> BotRoots
) {
1605 ScheduleDAGMI::initQueues(TopRoots
, BotRoots
);
1606 if (ShouldTrackPressure
) {
1607 assert(TopRPTracker
.getPos() == RegionBegin
&& "bad initial Top tracker");
1608 TopRPTracker
.setPos(CurrentTop
);
1612 /// Move an instruction and update register pressure.
1613 void ScheduleDAGMILive::scheduleMI(SUnit
*SU
, bool IsTopNode
) {
1614 // Move the instruction to its new location in the instruction stream.
1615 MachineInstr
*MI
= SU
->getInstr();
1618 assert(SU
->isTopReady() && "node still has unscheduled dependencies");
1619 if (&*CurrentTop
== MI
)
1620 CurrentTop
= nextIfDebug(++CurrentTop
, CurrentBottom
);
1622 moveInstruction(MI
, CurrentTop
);
1623 TopRPTracker
.setPos(MI
);
1626 if (ShouldTrackPressure
) {
1627 // Update top scheduled pressure.
1628 RegisterOperands RegOpers
;
1629 RegOpers
.collect(*MI
, *TRI
, MRI
, ShouldTrackLaneMasks
, false);
1630 if (ShouldTrackLaneMasks
) {
1631 // Adjust liveness and add missing dead+read-undef flags.
1632 SlotIndex SlotIdx
= LIS
->getInstructionIndex(*MI
).getRegSlot();
1633 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
, MI
);
1635 // Adjust for missing dead-def flags.
1636 RegOpers
.detectDeadDefs(*MI
, *LIS
);
1639 TopRPTracker
.advance(RegOpers
);
1640 assert(TopRPTracker
.getPos() == CurrentTop
&& "out of sync");
1641 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1642 TopRPTracker
.getRegSetPressureAtPos(), TRI
););
1644 updateScheduledPressure(SU
, TopRPTracker
.getPressure().MaxSetPressure
);
1647 assert(SU
->isBottomReady() && "node still has unscheduled dependencies");
1648 MachineBasicBlock::iterator priorII
=
1649 priorNonDebug(CurrentBottom
, CurrentTop
);
1650 if (&*priorII
== MI
)
1651 CurrentBottom
= priorII
;
1653 if (&*CurrentTop
== MI
) {
1654 CurrentTop
= nextIfDebug(++CurrentTop
, priorII
);
1655 TopRPTracker
.setPos(CurrentTop
);
1657 moveInstruction(MI
, CurrentBottom
);
1659 BotRPTracker
.setPos(CurrentBottom
);
1661 if (ShouldTrackPressure
) {
1662 RegisterOperands RegOpers
;
1663 RegOpers
.collect(*MI
, *TRI
, MRI
, ShouldTrackLaneMasks
, false);
1664 if (ShouldTrackLaneMasks
) {
1665 // Adjust liveness and add missing dead+read-undef flags.
1666 SlotIndex SlotIdx
= LIS
->getInstructionIndex(*MI
).getRegSlot();
1667 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
, MI
);
1669 // Adjust for missing dead-def flags.
1670 RegOpers
.detectDeadDefs(*MI
, *LIS
);
1673 if (BotRPTracker
.getPos() != CurrentBottom
)
1674 BotRPTracker
.recedeSkipDebugValues();
1675 SmallVector
<RegisterMaskPair
, 8> LiveUses
;
1676 BotRPTracker
.recede(RegOpers
, &LiveUses
);
1677 assert(BotRPTracker
.getPos() == CurrentBottom
&& "out of sync");
1678 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1679 BotRPTracker
.getRegSetPressureAtPos(), TRI
););
1681 updateScheduledPressure(SU
, BotRPTracker
.getPressure().MaxSetPressure
);
1682 updatePressureDiffs(LiveUses
);
1687 //===----------------------------------------------------------------------===//
1688 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1689 //===----------------------------------------------------------------------===//
1693 /// Post-process the DAG to create cluster edges between neighboring
1694 /// loads or between neighboring stores.
1695 class BaseMemOpClusterMutation
: public ScheduleDAGMutation
{
1698 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1701 bool OffsetIsScalable
;
1703 MemOpInfo(SUnit
*SU
, ArrayRef
<const MachineOperand
*> BaseOps
,
1704 int64_t Offset
, bool OffsetIsScalable
, unsigned Width
)
1705 : SU(SU
), BaseOps(BaseOps
.begin(), BaseOps
.end()), Offset(Offset
),
1706 Width(Width
), OffsetIsScalable(OffsetIsScalable
) {}
1708 static bool Compare(const MachineOperand
*const &A
,
1709 const MachineOperand
*const &B
) {
1710 if (A
->getType() != B
->getType())
1711 return A
->getType() < B
->getType();
1713 return A
->getReg() < B
->getReg();
1715 const MachineFunction
&MF
= *A
->getParent()->getParent()->getParent();
1716 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1717 bool StackGrowsDown
= TFI
.getStackGrowthDirection() ==
1718 TargetFrameLowering::StackGrowsDown
;
1719 return StackGrowsDown
? A
->getIndex() > B
->getIndex()
1720 : A
->getIndex() < B
->getIndex();
1723 llvm_unreachable("MemOpClusterMutation only supports register or frame "
1727 bool operator<(const MemOpInfo
&RHS
) const {
1728 // FIXME: Don't compare everything twice. Maybe use C++20 three way
1729 // comparison instead when it's available.
1730 if (std::lexicographical_compare(BaseOps
.begin(), BaseOps
.end(),
1731 RHS
.BaseOps
.begin(), RHS
.BaseOps
.end(),
1734 if (std::lexicographical_compare(RHS
.BaseOps
.begin(), RHS
.BaseOps
.end(),
1735 BaseOps
.begin(), BaseOps
.end(), Compare
))
1737 if (Offset
!= RHS
.Offset
)
1738 return Offset
< RHS
.Offset
;
1739 return SU
->NodeNum
< RHS
.SU
->NodeNum
;
1743 const TargetInstrInfo
*TII
;
1744 const TargetRegisterInfo
*TRI
;
1746 bool ReorderWhileClustering
;
1749 BaseMemOpClusterMutation(const TargetInstrInfo
*tii
,
1750 const TargetRegisterInfo
*tri
, bool IsLoad
,
1751 bool ReorderWhileClustering
)
1752 : TII(tii
), TRI(tri
), IsLoad(IsLoad
),
1753 ReorderWhileClustering(ReorderWhileClustering
) {}
1755 void apply(ScheduleDAGInstrs
*DAGInstrs
) override
;
1758 void clusterNeighboringMemOps(ArrayRef
<MemOpInfo
> MemOps
, bool FastCluster
,
1759 ScheduleDAGInstrs
*DAG
);
1760 void collectMemOpRecords(std::vector
<SUnit
> &SUnits
,
1761 SmallVectorImpl
<MemOpInfo
> &MemOpRecords
);
1762 bool groupMemOps(ArrayRef
<MemOpInfo
> MemOps
, ScheduleDAGInstrs
*DAG
,
1763 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> &Groups
);
1766 class StoreClusterMutation
: public BaseMemOpClusterMutation
{
1768 StoreClusterMutation(const TargetInstrInfo
*tii
,
1769 const TargetRegisterInfo
*tri
,
1770 bool ReorderWhileClustering
)
1771 : BaseMemOpClusterMutation(tii
, tri
, false, ReorderWhileClustering
) {}
1774 class LoadClusterMutation
: public BaseMemOpClusterMutation
{
1776 LoadClusterMutation(const TargetInstrInfo
*tii
, const TargetRegisterInfo
*tri
,
1777 bool ReorderWhileClustering
)
1778 : BaseMemOpClusterMutation(tii
, tri
, true, ReorderWhileClustering
) {}
1781 } // end anonymous namespace
1785 std::unique_ptr
<ScheduleDAGMutation
>
1786 createLoadClusterDAGMutation(const TargetInstrInfo
*TII
,
1787 const TargetRegisterInfo
*TRI
,
1788 bool ReorderWhileClustering
) {
1789 return EnableMemOpCluster
? std::make_unique
<LoadClusterMutation
>(
1790 TII
, TRI
, ReorderWhileClustering
)
1794 std::unique_ptr
<ScheduleDAGMutation
>
1795 createStoreClusterDAGMutation(const TargetInstrInfo
*TII
,
1796 const TargetRegisterInfo
*TRI
,
1797 bool ReorderWhileClustering
) {
1798 return EnableMemOpCluster
? std::make_unique
<StoreClusterMutation
>(
1799 TII
, TRI
, ReorderWhileClustering
)
1803 } // end namespace llvm
1805 // Sorting all the loads/stores first, then for each load/store, checking the
1806 // following load/store one by one, until reach the first non-dependent one and
1807 // call target hook to see if they can cluster.
1808 // If FastCluster is enabled, we assume that, all the loads/stores have been
1809 // preprocessed and now, they didn't have dependencies on each other.
1810 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1811 ArrayRef
<MemOpInfo
> MemOpRecords
, bool FastCluster
,
1812 ScheduleDAGInstrs
*DAG
) {
1813 // Keep track of the current cluster length and bytes for each SUnit.
1814 DenseMap
<unsigned, std::pair
<unsigned, unsigned>> SUnit2ClusterInfo
;
1816 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
1817 // cluster mem ops collected within `MemOpRecords` array.
1818 for (unsigned Idx
= 0, End
= MemOpRecords
.size(); Idx
< (End
- 1); ++Idx
) {
1819 // Decision to cluster mem ops is taken based on target dependent logic
1820 auto MemOpa
= MemOpRecords
[Idx
];
1822 // Seek for the next load/store to do the cluster.
1823 unsigned NextIdx
= Idx
+ 1;
1824 for (; NextIdx
< End
; ++NextIdx
)
1825 // Skip if MemOpb has been clustered already or has dependency with
1827 if (!SUnit2ClusterInfo
.count(MemOpRecords
[NextIdx
].SU
->NodeNum
) &&
1829 (!DAG
->IsReachable(MemOpRecords
[NextIdx
].SU
, MemOpa
.SU
) &&
1830 !DAG
->IsReachable(MemOpa
.SU
, MemOpRecords
[NextIdx
].SU
))))
1835 auto MemOpb
= MemOpRecords
[NextIdx
];
1836 unsigned ClusterLength
= 2;
1837 unsigned CurrentClusterBytes
= MemOpa
.Width
+ MemOpb
.Width
;
1838 if (SUnit2ClusterInfo
.count(MemOpa
.SU
->NodeNum
)) {
1839 ClusterLength
= SUnit2ClusterInfo
[MemOpa
.SU
->NodeNum
].first
+ 1;
1840 CurrentClusterBytes
=
1841 SUnit2ClusterInfo
[MemOpa
.SU
->NodeNum
].second
+ MemOpb
.Width
;
1844 if (!TII
->shouldClusterMemOps(MemOpa
.BaseOps
, MemOpa
.Offset
,
1845 MemOpa
.OffsetIsScalable
, MemOpb
.BaseOps
,
1846 MemOpb
.Offset
, MemOpb
.OffsetIsScalable
,
1847 ClusterLength
, CurrentClusterBytes
))
1850 SUnit
*SUa
= MemOpa
.SU
;
1851 SUnit
*SUb
= MemOpb
.SU
;
1852 if (!ReorderWhileClustering
&& SUa
->NodeNum
> SUb
->NodeNum
)
1853 std::swap(SUa
, SUb
);
1855 // FIXME: Is this check really required?
1856 if (!DAG
->addEdge(SUb
, SDep(SUa
, SDep::Cluster
)))
1859 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa
->NodeNum
<< ") - SU("
1860 << SUb
->NodeNum
<< ")\n");
1864 // Copy successor edges from SUa to SUb. Interleaving computation
1865 // dependent on SUa can prevent load combining due to register reuse.
1866 // Predecessor edges do not need to be copied from SUb to SUa since
1867 // nearby loads should have effectively the same inputs.
1868 for (const SDep
&Succ
: SUa
->Succs
) {
1869 if (Succ
.getSUnit() == SUb
)
1871 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ
.getSUnit()->NodeNum
1873 DAG
->addEdge(Succ
.getSUnit(), SDep(SUb
, SDep::Artificial
));
1876 // Copy predecessor edges from SUb to SUa to avoid the SUnits that
1877 // SUb dependent on scheduled in-between SUb and SUa. Successor edges
1878 // do not need to be copied from SUa to SUb since no one will depend
1880 // Notice that, we don't need to care about the memory dependency as
1881 // we won't try to cluster them if they have any memory dependency.
1882 for (const SDep
&Pred
: SUb
->Preds
) {
1883 if (Pred
.getSUnit() == SUa
)
1885 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred
.getSUnit()->NodeNum
1887 DAG
->addEdge(SUa
, SDep(Pred
.getSUnit(), SDep::Artificial
));
1891 SUnit2ClusterInfo
[MemOpb
.SU
->NodeNum
] = {ClusterLength
,
1892 CurrentClusterBytes
};
1894 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength
1895 << ", Curr cluster bytes: " << CurrentClusterBytes
1900 void BaseMemOpClusterMutation::collectMemOpRecords(
1901 std::vector
<SUnit
> &SUnits
, SmallVectorImpl
<MemOpInfo
> &MemOpRecords
) {
1902 for (auto &SU
: SUnits
) {
1903 if ((IsLoad
&& !SU
.getInstr()->mayLoad()) ||
1904 (!IsLoad
&& !SU
.getInstr()->mayStore()))
1907 const MachineInstr
&MI
= *SU
.getInstr();
1908 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1910 bool OffsetIsScalable
;
1912 if (TII
->getMemOperandsWithOffsetWidth(MI
, BaseOps
, Offset
,
1913 OffsetIsScalable
, Width
, TRI
)) {
1914 MemOpRecords
.push_back(
1915 MemOpInfo(&SU
, BaseOps
, Offset
, OffsetIsScalable
, Width
));
1917 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps
.size() << ", Offset: "
1918 << Offset
<< ", OffsetIsScalable: " << OffsetIsScalable
1919 << ", Width: " << Width
<< "\n");
1922 for (const auto *Op
: BaseOps
)
1928 bool BaseMemOpClusterMutation::groupMemOps(
1929 ArrayRef
<MemOpInfo
> MemOps
, ScheduleDAGInstrs
*DAG
,
1930 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> &Groups
) {
1933 MemOps
.size() * DAG
->SUnits
.size() / 1000 > FastClusterThreshold
;
1935 for (const auto &MemOp
: MemOps
) {
1936 unsigned ChainPredID
= DAG
->SUnits
.size();
1938 for (const SDep
&Pred
: MemOp
.SU
->Preds
) {
1939 // We only want to cluster the mem ops that have the same ctrl(non-data)
1940 // pred so that they didn't have ctrl dependency for each other. But for
1941 // store instrs, we can still cluster them if the pred is load instr.
1942 if ((Pred
.isCtrl() &&
1944 (Pred
.getSUnit() && Pred
.getSUnit()->getInstr()->mayStore()))) &&
1945 !Pred
.isArtificial()) {
1946 ChainPredID
= Pred
.getSUnit()->NodeNum
;
1953 Groups
[ChainPredID
].push_back(MemOp
);
1958 /// Callback from DAG postProcessing to create cluster edges for loads/stores.
1959 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs
*DAG
) {
1960 // Collect all the clusterable loads/stores
1961 SmallVector
<MemOpInfo
, 32> MemOpRecords
;
1962 collectMemOpRecords(DAG
->SUnits
, MemOpRecords
);
1964 if (MemOpRecords
.size() < 2)
1967 // Put the loads/stores without dependency into the same group with some
1968 // heuristic if the DAG is too complex to avoid compiling time blow up.
1969 // Notice that, some fusion pair could be lost with this.
1970 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> Groups
;
1971 bool FastCluster
= groupMemOps(MemOpRecords
, DAG
, Groups
);
1973 for (auto &Group
: Groups
) {
1974 // Sorting the loads/stores, so that, we can stop the cluster as early as
1976 llvm::sort(Group
.second
);
1978 // Trying to cluster all the neighboring loads/stores.
1979 clusterNeighboringMemOps(Group
.second
, FastCluster
, DAG
);
1983 //===----------------------------------------------------------------------===//
1984 // CopyConstrain - DAG post-processing to encourage copy elimination.
1985 //===----------------------------------------------------------------------===//
1989 /// Post-process the DAG to create weak edges from all uses of a copy to
1990 /// the one use that defines the copy's source vreg, most likely an induction
1991 /// variable increment.
1992 class CopyConstrain
: public ScheduleDAGMutation
{
1994 SlotIndex RegionBeginIdx
;
1996 // RegionEndIdx is the slot index of the last non-debug instruction in the
1997 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1998 SlotIndex RegionEndIdx
;
2001 CopyConstrain(const TargetInstrInfo
*, const TargetRegisterInfo
*) {}
2003 void apply(ScheduleDAGInstrs
*DAGInstrs
) override
;
2006 void constrainLocalCopy(SUnit
*CopySU
, ScheduleDAGMILive
*DAG
);
2009 } // end anonymous namespace
2013 std::unique_ptr
<ScheduleDAGMutation
>
2014 createCopyConstrainDAGMutation(const TargetInstrInfo
*TII
,
2015 const TargetRegisterInfo
*TRI
) {
2016 return std::make_unique
<CopyConstrain
>(TII
, TRI
);
2019 } // end namespace llvm
2021 /// constrainLocalCopy handles two possibilities:
2026 /// I3: dst = src (copy)
2027 /// (create pred->succ edges I0->I1, I2->I1)
2030 /// I0: dst = src (copy)
2034 /// (create pred->succ edges I1->I2, I3->I2)
2036 /// Although the MachineScheduler is currently constrained to single blocks,
2037 /// this algorithm should handle extended blocks. An EBB is a set of
2038 /// contiguously numbered blocks such that the previous block in the EBB is
2039 /// always the single predecessor.
2040 void CopyConstrain::constrainLocalCopy(SUnit
*CopySU
, ScheduleDAGMILive
*DAG
) {
2041 LiveIntervals
*LIS
= DAG
->getLIS();
2042 MachineInstr
*Copy
= CopySU
->getInstr();
2044 // Check for pure vreg copies.
2045 const MachineOperand
&SrcOp
= Copy
->getOperand(1);
2046 Register SrcReg
= SrcOp
.getReg();
2047 if (!SrcReg
.isVirtual() || !SrcOp
.readsReg())
2050 const MachineOperand
&DstOp
= Copy
->getOperand(0);
2051 Register DstReg
= DstOp
.getReg();
2052 if (!DstReg
.isVirtual() || DstOp
.isDead())
2055 // Check if either the dest or source is local. If it's live across a back
2056 // edge, it's not local. Note that if both vregs are live across the back
2057 // edge, we cannot successfully contrain the copy without cyclic scheduling.
2058 // If both the copy's source and dest are local live intervals, then we
2059 // should treat the dest as the global for the purpose of adding
2060 // constraints. This adds edges from source's other uses to the copy.
2061 unsigned LocalReg
= SrcReg
;
2062 unsigned GlobalReg
= DstReg
;
2063 LiveInterval
*LocalLI
= &LIS
->getInterval(LocalReg
);
2064 if (!LocalLI
->isLocal(RegionBeginIdx
, RegionEndIdx
)) {
2067 LocalLI
= &LIS
->getInterval(LocalReg
);
2068 if (!LocalLI
->isLocal(RegionBeginIdx
, RegionEndIdx
))
2071 LiveInterval
*GlobalLI
= &LIS
->getInterval(GlobalReg
);
2073 // Find the global segment after the start of the local LI.
2074 LiveInterval::iterator GlobalSegment
= GlobalLI
->find(LocalLI
->beginIndex());
2075 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
2076 // local live range. We could create edges from other global uses to the local
2077 // start, but the coalescer should have already eliminated these cases, so
2078 // don't bother dealing with it.
2079 if (GlobalSegment
== GlobalLI
->end())
2082 // If GlobalSegment is killed at the LocalLI->start, the call to find()
2083 // returned the next global segment. But if GlobalSegment overlaps with
2084 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
2085 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
2086 if (GlobalSegment
->contains(LocalLI
->beginIndex()))
2089 if (GlobalSegment
== GlobalLI
->end())
2092 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
2093 if (GlobalSegment
!= GlobalLI
->begin()) {
2094 // Two address defs have no hole.
2095 if (SlotIndex::isSameInstr(std::prev(GlobalSegment
)->end
,
2096 GlobalSegment
->start
)) {
2099 // If the prior global segment may be defined by the same two-address
2100 // instruction that also defines LocalLI, then can't make a hole here.
2101 if (SlotIndex::isSameInstr(std::prev(GlobalSegment
)->start
,
2102 LocalLI
->beginIndex())) {
2105 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
2106 // it would be a disconnected component in the live range.
2107 assert(std::prev(GlobalSegment
)->start
< LocalLI
->beginIndex() &&
2108 "Disconnected LRG within the scheduling region.");
2110 MachineInstr
*GlobalDef
= LIS
->getInstructionFromIndex(GlobalSegment
->start
);
2114 SUnit
*GlobalSU
= DAG
->getSUnit(GlobalDef
);
2118 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
2119 // constraining the uses of the last local def to precede GlobalDef.
2120 SmallVector
<SUnit
*,8> LocalUses
;
2121 const VNInfo
*LastLocalVN
= LocalLI
->getVNInfoBefore(LocalLI
->endIndex());
2122 MachineInstr
*LastLocalDef
= LIS
->getInstructionFromIndex(LastLocalVN
->def
);
2123 SUnit
*LastLocalSU
= DAG
->getSUnit(LastLocalDef
);
2124 for (const SDep
&Succ
: LastLocalSU
->Succs
) {
2125 if (Succ
.getKind() != SDep::Data
|| Succ
.getReg() != LocalReg
)
2127 if (Succ
.getSUnit() == GlobalSU
)
2129 if (!DAG
->canAddEdge(GlobalSU
, Succ
.getSUnit()))
2131 LocalUses
.push_back(Succ
.getSUnit());
2133 // Open the top of the GlobalLI hole by constraining any earlier global uses
2134 // to precede the start of LocalLI.
2135 SmallVector
<SUnit
*,8> GlobalUses
;
2136 MachineInstr
*FirstLocalDef
=
2137 LIS
->getInstructionFromIndex(LocalLI
->beginIndex());
2138 SUnit
*FirstLocalSU
= DAG
->getSUnit(FirstLocalDef
);
2139 for (const SDep
&Pred
: GlobalSU
->Preds
) {
2140 if (Pred
.getKind() != SDep::Anti
|| Pred
.getReg() != GlobalReg
)
2142 if (Pred
.getSUnit() == FirstLocalSU
)
2144 if (!DAG
->canAddEdge(FirstLocalSU
, Pred
.getSUnit()))
2146 GlobalUses
.push_back(Pred
.getSUnit());
2148 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU
->NodeNum
<< ")\n");
2149 // Add the weak edges.
2150 for (SUnit
*LU
: LocalUses
) {
2151 LLVM_DEBUG(dbgs() << " Local use SU(" << LU
->NodeNum
<< ") -> SU("
2152 << GlobalSU
->NodeNum
<< ")\n");
2153 DAG
->addEdge(GlobalSU
, SDep(LU
, SDep::Weak
));
2155 for (SUnit
*GU
: GlobalUses
) {
2156 LLVM_DEBUG(dbgs() << " Global use SU(" << GU
->NodeNum
<< ") -> SU("
2157 << FirstLocalSU
->NodeNum
<< ")\n");
2158 DAG
->addEdge(FirstLocalSU
, SDep(GU
, SDep::Weak
));
2162 /// Callback from DAG postProcessing to create weak edges to encourage
2163 /// copy elimination.
2164 void CopyConstrain::apply(ScheduleDAGInstrs
*DAGInstrs
) {
2165 ScheduleDAGMI
*DAG
= static_cast<ScheduleDAGMI
*>(DAGInstrs
);
2166 assert(DAG
->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
2168 MachineBasicBlock::iterator FirstPos
= nextIfDebug(DAG
->begin(), DAG
->end());
2169 if (FirstPos
== DAG
->end())
2171 RegionBeginIdx
= DAG
->getLIS()->getInstructionIndex(*FirstPos
);
2172 RegionEndIdx
= DAG
->getLIS()->getInstructionIndex(
2173 *priorNonDebug(DAG
->end(), DAG
->begin()));
2175 for (SUnit
&SU
: DAG
->SUnits
) {
2176 if (!SU
.getInstr()->isCopy())
2179 constrainLocalCopy(&SU
, static_cast<ScheduleDAGMILive
*>(DAG
));
2183 //===----------------------------------------------------------------------===//
2184 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
2185 // and possibly other custom schedulers.
2186 //===----------------------------------------------------------------------===//
2188 static const unsigned InvalidCycle
= ~0U;
2190 SchedBoundary::~SchedBoundary() { delete HazardRec
; }
2192 /// Given a Count of resource usage and a Latency value, return true if a
2193 /// SchedBoundary becomes resource limited.
2194 /// If we are checking after scheduling a node, we should return true when
2195 /// we just reach the resource limit.
2196 static bool checkResourceLimit(unsigned LFactor
, unsigned Count
,
2197 unsigned Latency
, bool AfterSchedNode
) {
2198 int ResCntFactor
= (int)(Count
- (Latency
* LFactor
));
2200 return ResCntFactor
>= (int)LFactor
;
2202 return ResCntFactor
> (int)LFactor
;
2205 void SchedBoundary::reset() {
2206 // A new HazardRec is created for each DAG and owned by SchedBoundary.
2207 // Destroying and reconstructing it is very expensive though. So keep
2208 // invalid, placeholder HazardRecs.
2209 if (HazardRec
&& HazardRec
->isEnabled()) {
2211 HazardRec
= nullptr;
2215 CheckPending
= false;
2218 MinReadyCycle
= std::numeric_limits
<unsigned>::max();
2219 ExpectedLatency
= 0;
2220 DependentLatency
= 0;
2222 MaxExecutedResCount
= 0;
2224 IsResourceLimited
= false;
2225 ReservedCycles
.clear();
2226 ReservedResourceSegments
.clear();
2227 ReservedCyclesIndex
.clear();
2228 ResourceGroupSubUnitMasks
.clear();
2229 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2230 // Track the maximum number of stall cycles that could arise either from the
2231 // latency of a DAG edge or the number of cycles that a processor resource is
2232 // reserved (SchedBoundary::ReservedCycles).
2233 MaxObservedStall
= 0;
2235 // Reserve a zero-count for invalid CritResIdx.
2236 ExecutedResCounts
.resize(1);
2237 assert(!ExecutedResCounts
[0] && "nonzero count for bad resource");
2240 void SchedRemainder::
2241 init(ScheduleDAGMI
*DAG
, const TargetSchedModel
*SchedModel
) {
2243 if (!SchedModel
->hasInstrSchedModel())
2245 RemainingCounts
.resize(SchedModel
->getNumProcResourceKinds());
2246 for (SUnit
&SU
: DAG
->SUnits
) {
2247 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(&SU
);
2248 RemIssueCount
+= SchedModel
->getNumMicroOps(SU
.getInstr(), SC
)
2249 * SchedModel
->getMicroOpFactor();
2250 for (TargetSchedModel::ProcResIter
2251 PI
= SchedModel
->getWriteProcResBegin(SC
),
2252 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2253 unsigned PIdx
= PI
->ProcResourceIdx
;
2254 unsigned Factor
= SchedModel
->getResourceFactor(PIdx
);
2255 assert(PI
->ReleaseAtCycle
>= PI
->AcquireAtCycle
);
2256 RemainingCounts
[PIdx
] +=
2257 (Factor
* (PI
->ReleaseAtCycle
- PI
->AcquireAtCycle
));
2262 void SchedBoundary::
2263 init(ScheduleDAGMI
*dag
, const TargetSchedModel
*smodel
, SchedRemainder
*rem
) {
2266 SchedModel
= smodel
;
2268 if (SchedModel
->hasInstrSchedModel()) {
2269 unsigned ResourceCount
= SchedModel
->getNumProcResourceKinds();
2270 ReservedCyclesIndex
.resize(ResourceCount
);
2271 ExecutedResCounts
.resize(ResourceCount
);
2272 ResourceGroupSubUnitMasks
.resize(ResourceCount
, APInt(ResourceCount
, 0));
2273 unsigned NumUnits
= 0;
2275 for (unsigned i
= 0; i
< ResourceCount
; ++i
) {
2276 ReservedCyclesIndex
[i
] = NumUnits
;
2277 NumUnits
+= SchedModel
->getProcResource(i
)->NumUnits
;
2278 if (isUnbufferedGroup(i
)) {
2279 auto SubUnits
= SchedModel
->getProcResource(i
)->SubUnitsIdxBegin
;
2280 for (unsigned U
= 0, UE
= SchedModel
->getProcResource(i
)->NumUnits
;
2282 ResourceGroupSubUnitMasks
[i
].setBit(SubUnits
[U
]);
2286 ReservedCycles
.resize(NumUnits
, InvalidCycle
);
2290 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
2291 /// these "soft stalls" differently than the hard stall cycles based on CPU
2292 /// resources and computed by checkHazard(). A fully in-order model
2293 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
2294 /// available for scheduling until they are ready. However, a weaker in-order
2295 /// model may use this for heuristics. For example, if a processor has in-order
2296 /// behavior when reading certain resources, this may come into play.
2297 unsigned SchedBoundary::getLatencyStallCycles(SUnit
*SU
) {
2298 if (!SU
->isUnbuffered
)
2301 unsigned ReadyCycle
= (isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
);
2302 if (ReadyCycle
> CurrCycle
)
2303 return ReadyCycle
- CurrCycle
;
2307 /// Compute the next cycle at which the given processor resource unit
2308 /// can be scheduled.
2309 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx
,
2310 unsigned ReleaseAtCycle
,
2311 unsigned AcquireAtCycle
) {
2312 if (SchedModel
&& SchedModel
->enableIntervals()) {
2314 return ReservedResourceSegments
[InstanceIdx
].getFirstAvailableAtFromTop(
2315 CurrCycle
, AcquireAtCycle
, ReleaseAtCycle
);
2317 return ReservedResourceSegments
[InstanceIdx
].getFirstAvailableAtFromBottom(
2318 CurrCycle
, AcquireAtCycle
, ReleaseAtCycle
);
2321 unsigned NextUnreserved
= ReservedCycles
[InstanceIdx
];
2322 // If this resource has never been used, always return cycle zero.
2323 if (NextUnreserved
== InvalidCycle
)
2325 // For bottom-up scheduling add the cycles needed for the current operation.
2327 NextUnreserved
= std::max(CurrCycle
, NextUnreserved
+ ReleaseAtCycle
);
2328 return NextUnreserved
;
2331 /// Compute the next cycle at which the given processor resource can be
2332 /// scheduled. Returns the next cycle and the index of the processor resource
2333 /// instance in the reserved cycles vector.
2334 std::pair
<unsigned, unsigned>
2335 SchedBoundary::getNextResourceCycle(const MCSchedClassDesc
*SC
, unsigned PIdx
,
2336 unsigned ReleaseAtCycle
,
2337 unsigned AcquireAtCycle
) {
2338 if (MischedDetailResourceBooking
) {
2339 LLVM_DEBUG(dbgs() << " Resource booking (@" << CurrCycle
<< "c): \n");
2340 LLVM_DEBUG(dumpReservedCycles());
2341 LLVM_DEBUG(dbgs() << " getNextResourceCycle (@" << CurrCycle
<< "c): \n");
2343 unsigned MinNextUnreserved
= InvalidCycle
;
2344 unsigned InstanceIdx
= 0;
2345 unsigned StartIndex
= ReservedCyclesIndex
[PIdx
];
2346 unsigned NumberOfInstances
= SchedModel
->getProcResource(PIdx
)->NumUnits
;
2347 assert(NumberOfInstances
> 0 &&
2348 "Cannot have zero instances of a ProcResource");
2350 if (isUnbufferedGroup(PIdx
)) {
2351 // If any subunits are used by the instruction, report that the
2352 // subunits of the resource group are available at the first cycle
2353 // in which the unit is available, effectively removing the group
2354 // record from hazarding and basing the hazarding decisions on the
2355 // subunit records. Otherwise, choose the first available instance
2356 // from among the subunits. Specifications which assign cycles to
2357 // both the subunits and the group or which use an unbuffered
2358 // group with buffered subunits will appear to schedule
2359 // strangely. In the first case, the additional cycles for the
2360 // group will be ignored. In the second, the group will be
2361 // ignored entirely.
2362 for (const MCWriteProcResEntry
&PE
:
2363 make_range(SchedModel
->getWriteProcResBegin(SC
),
2364 SchedModel
->getWriteProcResEnd(SC
)))
2365 if (ResourceGroupSubUnitMasks
[PIdx
][PE
.ProcResourceIdx
])
2366 return std::make_pair(getNextResourceCycleByInstance(
2367 StartIndex
, ReleaseAtCycle
, AcquireAtCycle
),
2370 auto SubUnits
= SchedModel
->getProcResource(PIdx
)->SubUnitsIdxBegin
;
2371 for (unsigned I
= 0, End
= NumberOfInstances
; I
< End
; ++I
) {
2372 unsigned NextUnreserved
, NextInstanceIdx
;
2373 std::tie(NextUnreserved
, NextInstanceIdx
) =
2374 getNextResourceCycle(SC
, SubUnits
[I
], ReleaseAtCycle
, AcquireAtCycle
);
2375 if (MinNextUnreserved
> NextUnreserved
) {
2376 InstanceIdx
= NextInstanceIdx
;
2377 MinNextUnreserved
= NextUnreserved
;
2380 return std::make_pair(MinNextUnreserved
, InstanceIdx
);
2383 for (unsigned I
= StartIndex
, End
= StartIndex
+ NumberOfInstances
; I
< End
;
2385 unsigned NextUnreserved
=
2386 getNextResourceCycleByInstance(I
, ReleaseAtCycle
, AcquireAtCycle
);
2387 if (MischedDetailResourceBooking
)
2388 LLVM_DEBUG(dbgs() << " Instance " << I
- StartIndex
<< " available @"
2389 << NextUnreserved
<< "c\n");
2390 if (MinNextUnreserved
> NextUnreserved
) {
2392 MinNextUnreserved
= NextUnreserved
;
2395 if (MischedDetailResourceBooking
)
2396 LLVM_DEBUG(dbgs() << " selecting " << SchedModel
->getResourceName(PIdx
)
2397 << "[" << InstanceIdx
- StartIndex
<< "]"
2398 << " available @" << MinNextUnreserved
<< "c"
2400 return std::make_pair(MinNextUnreserved
, InstanceIdx
);
2403 /// Does this SU have a hazard within the current instruction group.
2405 /// The scheduler supports two modes of hazard recognition. The first is the
2406 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
2407 /// supports highly complicated in-order reservation tables
2408 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
2410 /// The second is a streamlined mechanism that checks for hazards based on
2411 /// simple counters that the scheduler itself maintains. It explicitly checks
2412 /// for instruction dispatch limitations, including the number of micro-ops that
2413 /// can dispatch per cycle.
2415 /// TODO: Also check whether the SU must start a new group.
2416 bool SchedBoundary::checkHazard(SUnit
*SU
) {
2417 if (HazardRec
->isEnabled()
2418 && HazardRec
->getHazardType(SU
) != ScheduleHazardRecognizer::NoHazard
) {
2422 unsigned uops
= SchedModel
->getNumMicroOps(SU
->getInstr());
2423 if ((CurrMOps
> 0) && (CurrMOps
+ uops
> SchedModel
->getIssueWidth())) {
2424 LLVM_DEBUG(dbgs() << " SU(" << SU
->NodeNum
<< ") uops="
2425 << SchedModel
->getNumMicroOps(SU
->getInstr()) << '\n');
2430 ((isTop() && SchedModel
->mustBeginGroup(SU
->getInstr())) ||
2431 (!isTop() && SchedModel
->mustEndGroup(SU
->getInstr())))) {
2432 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU
->NodeNum
<< ") must "
2433 << (isTop() ? "begin" : "end") << " group\n");
2437 if (SchedModel
->hasInstrSchedModel() && SU
->hasReservedResource
) {
2438 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2439 for (const MCWriteProcResEntry
&PE
:
2440 make_range(SchedModel
->getWriteProcResBegin(SC
),
2441 SchedModel
->getWriteProcResEnd(SC
))) {
2442 unsigned ResIdx
= PE
.ProcResourceIdx
;
2443 unsigned ReleaseAtCycle
= PE
.ReleaseAtCycle
;
2444 unsigned AcquireAtCycle
= PE
.AcquireAtCycle
;
2445 unsigned NRCycle
, InstanceIdx
;
2446 std::tie(NRCycle
, InstanceIdx
) =
2447 getNextResourceCycle(SC
, ResIdx
, ReleaseAtCycle
, AcquireAtCycle
);
2448 if (NRCycle
> CurrCycle
) {
2449 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2450 MaxObservedStall
= std::max(ReleaseAtCycle
, MaxObservedStall
);
2452 LLVM_DEBUG(dbgs() << " SU(" << SU
->NodeNum
<< ") "
2453 << SchedModel
->getResourceName(ResIdx
)
2454 << '[' << InstanceIdx
- ReservedCyclesIndex
[ResIdx
] << ']'
2455 << "=" << NRCycle
<< "c\n");
2463 // Find the unscheduled node in ReadySUs with the highest latency.
2464 unsigned SchedBoundary::
2465 findMaxLatency(ArrayRef
<SUnit
*> ReadySUs
) {
2466 SUnit
*LateSU
= nullptr;
2467 unsigned RemLatency
= 0;
2468 for (SUnit
*SU
: ReadySUs
) {
2469 unsigned L
= getUnscheduledLatency(SU
);
2470 if (L
> RemLatency
) {
2476 LLVM_DEBUG(dbgs() << Available
.getName() << " RemLatency SU("
2477 << LateSU
->NodeNum
<< ") " << RemLatency
<< "c\n");
2482 // Count resources in this zone and the remaining unscheduled
2483 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2484 // resource index, or zero if the zone is issue limited.
2485 unsigned SchedBoundary::
2486 getOtherResourceCount(unsigned &OtherCritIdx
) {
2488 if (!SchedModel
->hasInstrSchedModel())
2491 unsigned OtherCritCount
= Rem
->RemIssueCount
2492 + (RetiredMOps
* SchedModel
->getMicroOpFactor());
2493 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " + Remain MOps: "
2494 << OtherCritCount
/ SchedModel
->getMicroOpFactor() << '\n');
2495 for (unsigned PIdx
= 1, PEnd
= SchedModel
->getNumProcResourceKinds();
2496 PIdx
!= PEnd
; ++PIdx
) {
2497 unsigned OtherCount
= getResourceCount(PIdx
) + Rem
->RemainingCounts
[PIdx
];
2498 if (OtherCount
> OtherCritCount
) {
2499 OtherCritCount
= OtherCount
;
2500 OtherCritIdx
= PIdx
;
2505 dbgs() << " " << Available
.getName() << " + Remain CritRes: "
2506 << OtherCritCount
/ SchedModel
->getResourceFactor(OtherCritIdx
)
2507 << " " << SchedModel
->getResourceName(OtherCritIdx
) << "\n");
2509 return OtherCritCount
;
2512 void SchedBoundary::releaseNode(SUnit
*SU
, unsigned ReadyCycle
, bool InPQueue
,
2514 assert(SU
->getInstr() && "Scheduled SUnit must have instr");
2516 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2517 // ReadyCycle was been bumped up to the CurrCycle when this node was
2518 // scheduled, but CurrCycle may have been eagerly advanced immediately after
2519 // scheduling, so may now be greater than ReadyCycle.
2520 if (ReadyCycle
> CurrCycle
)
2521 MaxObservedStall
= std::max(ReadyCycle
- CurrCycle
, MaxObservedStall
);
2524 if (ReadyCycle
< MinReadyCycle
)
2525 MinReadyCycle
= ReadyCycle
;
2527 // Check for interlocks first. For the purpose of other heuristics, an
2528 // instruction that cannot issue appears as if it's not in the ReadyQueue.
2529 bool IsBuffered
= SchedModel
->getMicroOpBufferSize() != 0;
2530 bool HazardDetected
= (!IsBuffered
&& ReadyCycle
> CurrCycle
) ||
2531 checkHazard(SU
) || (Available
.size() >= ReadyListLimit
);
2533 if (!HazardDetected
) {
2537 Pending
.remove(Pending
.begin() + Idx
);
2545 /// Move the boundary of scheduled code by one cycle.
2546 void SchedBoundary::bumpCycle(unsigned NextCycle
) {
2547 if (SchedModel
->getMicroOpBufferSize() == 0) {
2548 assert(MinReadyCycle
< std::numeric_limits
<unsigned>::max() &&
2549 "MinReadyCycle uninitialized");
2550 if (MinReadyCycle
> NextCycle
)
2551 NextCycle
= MinReadyCycle
;
2553 // Update the current micro-ops, which will issue in the next cycle.
2554 unsigned DecMOps
= SchedModel
->getIssueWidth() * (NextCycle
- CurrCycle
);
2555 CurrMOps
= (CurrMOps
<= DecMOps
) ? 0 : CurrMOps
- DecMOps
;
2557 // Decrement DependentLatency based on the next cycle.
2558 if ((NextCycle
- CurrCycle
) > DependentLatency
)
2559 DependentLatency
= 0;
2561 DependentLatency
-= (NextCycle
- CurrCycle
);
2563 if (!HazardRec
->isEnabled()) {
2564 // Bypass HazardRec virtual calls.
2565 CurrCycle
= NextCycle
;
2567 // Bypass getHazardType calls in case of long latency.
2568 for (; CurrCycle
!= NextCycle
; ++CurrCycle
) {
2570 HazardRec
->AdvanceCycle();
2572 HazardRec
->RecedeCycle();
2575 CheckPending
= true;
2577 checkResourceLimit(SchedModel
->getLatencyFactor(), getCriticalCount(),
2578 getScheduledLatency(), true);
2580 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle
<< ' ' << Available
.getName()
2584 void SchedBoundary::incExecutedResources(unsigned PIdx
, unsigned Count
) {
2585 ExecutedResCounts
[PIdx
] += Count
;
2586 if (ExecutedResCounts
[PIdx
] > MaxExecutedResCount
)
2587 MaxExecutedResCount
= ExecutedResCounts
[PIdx
];
2590 /// Add the given processor resource to this scheduled zone.
2592 /// \param ReleaseAtCycle indicates the number of consecutive (non-pipelined)
2593 /// cycles during which this resource is released.
2595 /// \param AcquireAtCycle indicates the number of consecutive (non-pipelined)
2596 /// cycles at which the resource is aquired after issue (assuming no stalls).
2598 /// \return the next cycle at which the instruction may execute without
2599 /// oversubscribing resources.
2600 unsigned SchedBoundary::countResource(const MCSchedClassDesc
*SC
, unsigned PIdx
,
2601 unsigned ReleaseAtCycle
,
2603 unsigned AcquireAtCycle
) {
2604 unsigned Factor
= SchedModel
->getResourceFactor(PIdx
);
2605 unsigned Count
= Factor
* (ReleaseAtCycle
- AcquireAtCycle
);
2606 LLVM_DEBUG(dbgs() << " " << SchedModel
->getResourceName(PIdx
) << " +"
2607 << ReleaseAtCycle
<< "x" << Factor
<< "u\n");
2609 // Update Executed resources counts.
2610 incExecutedResources(PIdx
, Count
);
2611 assert(Rem
->RemainingCounts
[PIdx
] >= Count
&& "resource double counted");
2612 Rem
->RemainingCounts
[PIdx
] -= Count
;
2614 // Check if this resource exceeds the current critical resource. If so, it
2615 // becomes the critical resource.
2616 if (ZoneCritResIdx
!= PIdx
&& (getResourceCount(PIdx
) > getCriticalCount())) {
2617 ZoneCritResIdx
= PIdx
;
2618 LLVM_DEBUG(dbgs() << " *** Critical resource "
2619 << SchedModel
->getResourceName(PIdx
) << ": "
2620 << getResourceCount(PIdx
) / SchedModel
->getLatencyFactor()
2623 // For reserved resources, record the highest cycle using the resource.
2624 unsigned NextAvailable
, InstanceIdx
;
2625 std::tie(NextAvailable
, InstanceIdx
) =
2626 getNextResourceCycle(SC
, PIdx
, ReleaseAtCycle
, AcquireAtCycle
);
2627 if (NextAvailable
> CurrCycle
) {
2628 LLVM_DEBUG(dbgs() << " Resource conflict: "
2629 << SchedModel
->getResourceName(PIdx
)
2630 << '[' << InstanceIdx
- ReservedCyclesIndex
[PIdx
] << ']'
2631 << " reserved until @" << NextAvailable
<< "\n");
2633 return NextAvailable
;
2636 /// Move the boundary of scheduled code by one SUnit.
2637 void SchedBoundary::bumpNode(SUnit
*SU
) {
2638 // Update the reservation table.
2639 if (HazardRec
->isEnabled()) {
2640 if (!isTop() && SU
->isCall
) {
2641 // Calls are scheduled with their preceding instructions. For bottom-up
2642 // scheduling, clear the pipeline state before emitting.
2645 HazardRec
->EmitInstruction(SU
);
2646 // Scheduling an instruction may have made pending instructions available.
2647 CheckPending
= true;
2649 // checkHazard should prevent scheduling multiple instructions per cycle that
2650 // exceed the issue width.
2651 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2652 unsigned IncMOps
= SchedModel
->getNumMicroOps(SU
->getInstr());
2654 (CurrMOps
== 0 || (CurrMOps
+ IncMOps
) <= SchedModel
->getIssueWidth()) &&
2655 "Cannot schedule this instruction's MicroOps in the current cycle.");
2657 unsigned ReadyCycle
= (isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
);
2658 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle
<< "c\n");
2660 unsigned NextCycle
= CurrCycle
;
2661 switch (SchedModel
->getMicroOpBufferSize()) {
2663 assert(ReadyCycle
<= CurrCycle
&& "Broken PendingQueue");
2666 if (ReadyCycle
> NextCycle
) {
2667 NextCycle
= ReadyCycle
;
2668 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle
<< "\n");
2672 // We don't currently model the OOO reorder buffer, so consider all
2673 // scheduled MOps to be "retired". We do loosely model in-order resource
2674 // latency. If this instruction uses an in-order resource, account for any
2675 // likely stall cycles.
2676 if (SU
->isUnbuffered
&& ReadyCycle
> NextCycle
)
2677 NextCycle
= ReadyCycle
;
2680 RetiredMOps
+= IncMOps
;
2682 // Update resource counts and critical resource.
2683 if (SchedModel
->hasInstrSchedModel()) {
2684 unsigned DecRemIssue
= IncMOps
* SchedModel
->getMicroOpFactor();
2685 assert(Rem
->RemIssueCount
>= DecRemIssue
&& "MOps double counted");
2686 Rem
->RemIssueCount
-= DecRemIssue
;
2687 if (ZoneCritResIdx
) {
2688 // Scale scheduled micro-ops for comparing with the critical resource.
2689 unsigned ScaledMOps
=
2690 RetiredMOps
* SchedModel
->getMicroOpFactor();
2692 // If scaled micro-ops are now more than the previous critical resource by
2693 // a full cycle, then micro-ops issue becomes critical.
2694 if ((int)(ScaledMOps
- getResourceCount(ZoneCritResIdx
))
2695 >= (int)SchedModel
->getLatencyFactor()) {
2697 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2698 << ScaledMOps
/ SchedModel
->getLatencyFactor()
2702 for (TargetSchedModel::ProcResIter
2703 PI
= SchedModel
->getWriteProcResBegin(SC
),
2704 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2706 countResource(SC
, PI
->ProcResourceIdx
, PI
->ReleaseAtCycle
, NextCycle
,
2707 PI
->AcquireAtCycle
);
2708 if (RCycle
> NextCycle
)
2711 if (SU
->hasReservedResource
) {
2712 // For reserved resources, record the highest cycle using the resource.
2713 // For top-down scheduling, this is the cycle in which we schedule this
2714 // instruction plus the number of cycles the operations reserves the
2715 // resource. For bottom-up is it simply the instruction's cycle.
2716 for (TargetSchedModel::ProcResIter
2717 PI
= SchedModel
->getWriteProcResBegin(SC
),
2718 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2719 unsigned PIdx
= PI
->ProcResourceIdx
;
2720 if (SchedModel
->getProcResource(PIdx
)->BufferSize
== 0) {
2722 if (SchedModel
&& SchedModel
->enableIntervals()) {
2723 unsigned ReservedUntil
, InstanceIdx
;
2724 std::tie(ReservedUntil
, InstanceIdx
) = getNextResourceCycle(
2725 SC
, PIdx
, PI
->ReleaseAtCycle
, PI
->AcquireAtCycle
);
2727 ReservedResourceSegments
[InstanceIdx
].add(
2728 ResourceSegments::getResourceIntervalTop(
2729 NextCycle
, PI
->AcquireAtCycle
, PI
->ReleaseAtCycle
),
2732 ReservedResourceSegments
[InstanceIdx
].add(
2733 ResourceSegments::getResourceIntervalBottom(
2734 NextCycle
, PI
->AcquireAtCycle
, PI
->ReleaseAtCycle
),
2739 unsigned ReservedUntil
, InstanceIdx
;
2740 std::tie(ReservedUntil
, InstanceIdx
) = getNextResourceCycle(
2741 SC
, PIdx
, PI
->ReleaseAtCycle
, PI
->AcquireAtCycle
);
2743 ReservedCycles
[InstanceIdx
] =
2744 std::max(ReservedUntil
, NextCycle
+ PI
->ReleaseAtCycle
);
2746 ReservedCycles
[InstanceIdx
] = NextCycle
;
2752 // Update ExpectedLatency and DependentLatency.
2753 unsigned &TopLatency
= isTop() ? ExpectedLatency
: DependentLatency
;
2754 unsigned &BotLatency
= isTop() ? DependentLatency
: ExpectedLatency
;
2755 if (SU
->getDepth() > TopLatency
) {
2756 TopLatency
= SU
->getDepth();
2757 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " TopLatency SU("
2758 << SU
->NodeNum
<< ") " << TopLatency
<< "c\n");
2760 if (SU
->getHeight() > BotLatency
) {
2761 BotLatency
= SU
->getHeight();
2762 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " BotLatency SU("
2763 << SU
->NodeNum
<< ") " << BotLatency
<< "c\n");
2765 // If we stall for any reason, bump the cycle.
2766 if (NextCycle
> CurrCycle
)
2767 bumpCycle(NextCycle
);
2769 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2770 // resource limited. If a stall occurred, bumpCycle does this.
2772 checkResourceLimit(SchedModel
->getLatencyFactor(), getCriticalCount(),
2773 getScheduledLatency(), true);
2775 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2776 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2777 // one cycle. Since we commonly reach the max MOps here, opportunistically
2778 // bump the cycle to avoid uselessly checking everything in the readyQ.
2779 CurrMOps
+= IncMOps
;
2781 // Bump the cycle count for issue group constraints.
2782 // This must be done after NextCycle has been adjust for all other stalls.
2783 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2785 if ((isTop() && SchedModel
->mustEndGroup(SU
->getInstr())) ||
2786 (!isTop() && SchedModel
->mustBeginGroup(SU
->getInstr()))) {
2787 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
2789 bumpCycle(++NextCycle
);
2792 while (CurrMOps
>= SchedModel
->getIssueWidth()) {
2793 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps
<< " at cycle "
2794 << CurrCycle
<< '\n');
2795 bumpCycle(++NextCycle
);
2797 LLVM_DEBUG(dumpScheduledState());
2800 /// Release pending ready nodes in to the available queue. This makes them
2801 /// visible to heuristics.
2802 void SchedBoundary::releasePending() {
2803 // If the available queue is empty, it is safe to reset MinReadyCycle.
2804 if (Available
.empty())
2805 MinReadyCycle
= std::numeric_limits
<unsigned>::max();
2807 // Check to see if any of the pending instructions are ready to issue. If
2808 // so, add them to the available queue.
2809 for (unsigned I
= 0, E
= Pending
.size(); I
< E
; ++I
) {
2810 SUnit
*SU
= *(Pending
.begin() + I
);
2811 unsigned ReadyCycle
= isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
;
2813 if (ReadyCycle
< MinReadyCycle
)
2814 MinReadyCycle
= ReadyCycle
;
2816 if (Available
.size() >= ReadyListLimit
)
2819 releaseNode(SU
, ReadyCycle
, true, I
);
2820 if (E
!= Pending
.size()) {
2825 CheckPending
= false;
2828 /// Remove SU from the ready set for this boundary.
2829 void SchedBoundary::removeReady(SUnit
*SU
) {
2830 if (Available
.isInQueue(SU
))
2831 Available
.remove(Available
.find(SU
));
2833 assert(Pending
.isInQueue(SU
) && "bad ready count");
2834 Pending
.remove(Pending
.find(SU
));
2838 /// If this queue only has one ready candidate, return it. As a side effect,
2839 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2840 /// one node is ready. If multiple instructions are ready, return NULL.
2841 SUnit
*SchedBoundary::pickOnlyChoice() {
2845 // Defer any ready instrs that now have a hazard.
2846 for (ReadyQueue::iterator I
= Available
.begin(); I
!= Available
.end();) {
2847 if (checkHazard(*I
)) {
2849 I
= Available
.remove(I
);
2854 for (unsigned i
= 0; Available
.empty(); ++i
) {
2855 // FIXME: Re-enable assert once PR20057 is resolved.
2856 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2857 // "permanent hazard");
2859 bumpCycle(CurrCycle
+ 1);
2863 LLVM_DEBUG(Pending
.dump());
2864 LLVM_DEBUG(Available
.dump());
2866 if (Available
.size() == 1)
2867 return *Available
.begin();
2871 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2873 /// Dump the content of the \ref ReservedCycles vector for the
2874 /// resources that are used in the basic block.
2876 LLVM_DUMP_METHOD
void SchedBoundary::dumpReservedCycles() const {
2877 if (!SchedModel
->hasInstrSchedModel())
2880 unsigned ResourceCount
= SchedModel
->getNumProcResourceKinds();
2881 unsigned StartIdx
= 0;
2883 for (unsigned ResIdx
= 0; ResIdx
< ResourceCount
; ++ResIdx
) {
2884 const unsigned NumUnits
= SchedModel
->getProcResource(ResIdx
)->NumUnits
;
2885 std::string ResName
= SchedModel
->getResourceName(ResIdx
);
2886 for (unsigned UnitIdx
= 0; UnitIdx
< NumUnits
; ++UnitIdx
) {
2887 dbgs() << ResName
<< "(" << UnitIdx
<< ") = ";
2888 if (SchedModel
&& SchedModel
->enableIntervals()) {
2889 if (ReservedResourceSegments
.count(StartIdx
+ UnitIdx
))
2890 dbgs() << ReservedResourceSegments
.at(StartIdx
+ UnitIdx
);
2894 dbgs() << ReservedCycles
[StartIdx
+ UnitIdx
] << "\n";
2896 StartIdx
+= NumUnits
;
2900 // This is useful information to dump after bumpNode.
2901 // Note that the Queue contents are more useful before pickNodeFromQueue.
2902 LLVM_DUMP_METHOD
void SchedBoundary::dumpScheduledState() const {
2905 if (ZoneCritResIdx
) {
2906 ResFactor
= SchedModel
->getResourceFactor(ZoneCritResIdx
);
2907 ResCount
= getResourceCount(ZoneCritResIdx
);
2909 ResFactor
= SchedModel
->getMicroOpFactor();
2910 ResCount
= RetiredMOps
* ResFactor
;
2912 unsigned LFactor
= SchedModel
->getLatencyFactor();
2913 dbgs() << Available
.getName() << " @" << CurrCycle
<< "c\n"
2914 << " Retired: " << RetiredMOps
;
2915 dbgs() << "\n Executed: " << getExecutedCount() / LFactor
<< "c";
2916 dbgs() << "\n Critical: " << ResCount
/ LFactor
<< "c, "
2917 << ResCount
/ ResFactor
<< " "
2918 << SchedModel
->getResourceName(ZoneCritResIdx
)
2919 << "\n ExpectedLatency: " << ExpectedLatency
<< "c\n"
2920 << (IsResourceLimited
? " - Resource" : " - Latency")
2922 if (MISchedDumpReservedCycles
)
2923 dumpReservedCycles();
2927 //===----------------------------------------------------------------------===//
2928 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2929 //===----------------------------------------------------------------------===//
2931 void GenericSchedulerBase::SchedCandidate::
2932 initResourceDelta(const ScheduleDAGMI
*DAG
,
2933 const TargetSchedModel
*SchedModel
) {
2934 if (!Policy
.ReduceResIdx
&& !Policy
.DemandResIdx
)
2937 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2938 for (TargetSchedModel::ProcResIter
2939 PI
= SchedModel
->getWriteProcResBegin(SC
),
2940 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2941 if (PI
->ProcResourceIdx
== Policy
.ReduceResIdx
)
2942 ResDelta
.CritResources
+= PI
->ReleaseAtCycle
;
2943 if (PI
->ProcResourceIdx
== Policy
.DemandResIdx
)
2944 ResDelta
.DemandedResources
+= PI
->ReleaseAtCycle
;
2948 /// Compute remaining latency. We need this both to determine whether the
2949 /// overall schedule has become latency-limited and whether the instructions
2950 /// outside this zone are resource or latency limited.
2952 /// The "dependent" latency is updated incrementally during scheduling as the
2953 /// max height/depth of scheduled nodes minus the cycles since it was
2955 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2957 /// The "independent" latency is the max ready queue depth:
2958 /// ILat = max N.depth for N in Available|Pending
2960 /// RemainingLatency is the greater of independent and dependent latency.
2962 /// These computations are expensive, especially in DAGs with many edges, so
2963 /// only do them if necessary.
2964 static unsigned computeRemLatency(SchedBoundary
&CurrZone
) {
2965 unsigned RemLatency
= CurrZone
.getDependentLatency();
2966 RemLatency
= std::max(RemLatency
,
2967 CurrZone
.findMaxLatency(CurrZone
.Available
.elements()));
2968 RemLatency
= std::max(RemLatency
,
2969 CurrZone
.findMaxLatency(CurrZone
.Pending
.elements()));
2973 /// Returns true if the current cycle plus remaning latency is greater than
2974 /// the critical path in the scheduling region.
2975 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy
&Policy
,
2976 SchedBoundary
&CurrZone
,
2977 bool ComputeRemLatency
,
2978 unsigned &RemLatency
) const {
2979 // The current cycle is already greater than the critical path, so we are
2980 // already latency limited and don't need to compute the remaining latency.
2981 if (CurrZone
.getCurrCycle() > Rem
.CriticalPath
)
2984 // If we haven't scheduled anything yet, then we aren't latency limited.
2985 if (CurrZone
.getCurrCycle() == 0)
2988 if (ComputeRemLatency
)
2989 RemLatency
= computeRemLatency(CurrZone
);
2991 return RemLatency
+ CurrZone
.getCurrCycle() > Rem
.CriticalPath
;
2994 /// Set the CandPolicy given a scheduling zone given the current resources and
2995 /// latencies inside and outside the zone.
2996 void GenericSchedulerBase::setPolicy(CandPolicy
&Policy
, bool IsPostRA
,
2997 SchedBoundary
&CurrZone
,
2998 SchedBoundary
*OtherZone
) {
2999 // Apply preemptive heuristics based on the total latency and resources
3000 // inside and outside this zone. Potential stalls should be considered before
3001 // following this policy.
3003 // Compute the critical resource outside the zone.
3004 unsigned OtherCritIdx
= 0;
3005 unsigned OtherCount
=
3006 OtherZone
? OtherZone
->getOtherResourceCount(OtherCritIdx
) : 0;
3008 bool OtherResLimited
= false;
3009 unsigned RemLatency
= 0;
3010 bool RemLatencyComputed
= false;
3011 if (SchedModel
->hasInstrSchedModel() && OtherCount
!= 0) {
3012 RemLatency
= computeRemLatency(CurrZone
);
3013 RemLatencyComputed
= true;
3014 OtherResLimited
= checkResourceLimit(SchedModel
->getLatencyFactor(),
3015 OtherCount
, RemLatency
, false);
3018 // Schedule aggressively for latency in PostRA mode. We don't check for
3019 // acyclic latency during PostRA, and highly out-of-order processors will
3020 // skip PostRA scheduling.
3021 if (!OtherResLimited
&&
3022 (IsPostRA
|| shouldReduceLatency(Policy
, CurrZone
, !RemLatencyComputed
,
3024 Policy
.ReduceLatency
|= true;
3025 LLVM_DEBUG(dbgs() << " " << CurrZone
.Available
.getName()
3026 << " RemainingLatency " << RemLatency
<< " + "
3027 << CurrZone
.getCurrCycle() << "c > CritPath "
3028 << Rem
.CriticalPath
<< "\n");
3030 // If the same resource is limiting inside and outside the zone, do nothing.
3031 if (CurrZone
.getZoneCritResIdx() == OtherCritIdx
)
3034 LLVM_DEBUG(if (CurrZone
.isResourceLimited()) {
3035 dbgs() << " " << CurrZone
.Available
.getName() << " ResourceLimited: "
3036 << SchedModel
->getResourceName(CurrZone
.getZoneCritResIdx()) << "\n";
3037 } if (OtherResLimited
) dbgs()
3038 << " RemainingLimit: "
3039 << SchedModel
->getResourceName(OtherCritIdx
) << "\n";
3040 if (!CurrZone
.isResourceLimited() && !OtherResLimited
) dbgs()
3041 << " Latency limited both directions.\n");
3043 if (CurrZone
.isResourceLimited() && !Policy
.ReduceResIdx
)
3044 Policy
.ReduceResIdx
= CurrZone
.getZoneCritResIdx();
3046 if (OtherResLimited
)
3047 Policy
.DemandResIdx
= OtherCritIdx
;
3051 const char *GenericSchedulerBase::getReasonStr(
3052 GenericSchedulerBase::CandReason Reason
) {
3054 case NoCand
: return "NOCAND ";
3055 case Only1
: return "ONLY1 ";
3056 case PhysReg
: return "PHYS-REG ";
3057 case RegExcess
: return "REG-EXCESS";
3058 case RegCritical
: return "REG-CRIT ";
3059 case Stall
: return "STALL ";
3060 case Cluster
: return "CLUSTER ";
3061 case Weak
: return "WEAK ";
3062 case RegMax
: return "REG-MAX ";
3063 case ResourceReduce
: return "RES-REDUCE";
3064 case ResourceDemand
: return "RES-DEMAND";
3065 case TopDepthReduce
: return "TOP-DEPTH ";
3066 case TopPathReduce
: return "TOP-PATH ";
3067 case BotHeightReduce
:return "BOT-HEIGHT";
3068 case BotPathReduce
: return "BOT-PATH ";
3069 case NextDefUse
: return "DEF-USE ";
3070 case NodeOrder
: return "ORDER ";
3072 llvm_unreachable("Unknown reason!");
3075 void GenericSchedulerBase::traceCandidate(const SchedCandidate
&Cand
) {
3077 unsigned ResIdx
= 0;
3078 unsigned Latency
= 0;
3079 switch (Cand
.Reason
) {
3083 P
= Cand
.RPDelta
.Excess
;
3086 P
= Cand
.RPDelta
.CriticalMax
;
3089 P
= Cand
.RPDelta
.CurrentMax
;
3091 case ResourceReduce
:
3092 ResIdx
= Cand
.Policy
.ReduceResIdx
;
3094 case ResourceDemand
:
3095 ResIdx
= Cand
.Policy
.DemandResIdx
;
3097 case TopDepthReduce
:
3098 Latency
= Cand
.SU
->getDepth();
3101 Latency
= Cand
.SU
->getHeight();
3103 case BotHeightReduce
:
3104 Latency
= Cand
.SU
->getHeight();
3107 Latency
= Cand
.SU
->getDepth();
3110 dbgs() << " Cand SU(" << Cand
.SU
->NodeNum
<< ") " << getReasonStr(Cand
.Reason
);
3112 dbgs() << " " << TRI
->getRegPressureSetName(P
.getPSet())
3113 << ":" << P
.getUnitInc() << " ";
3117 dbgs() << " " << SchedModel
->getProcResource(ResIdx
)->Name
<< " ";
3121 dbgs() << " " << Latency
<< " cycles ";
3129 /// Return true if this heuristic determines order.
3130 /// TODO: Consider refactor return type of these functions as integer or enum,
3131 /// as we may need to differentiate whether TryCand is better than Cand.
3132 bool tryLess(int TryVal
, int CandVal
,
3133 GenericSchedulerBase::SchedCandidate
&TryCand
,
3134 GenericSchedulerBase::SchedCandidate
&Cand
,
3135 GenericSchedulerBase::CandReason Reason
) {
3136 if (TryVal
< CandVal
) {
3137 TryCand
.Reason
= Reason
;
3140 if (TryVal
> CandVal
) {
3141 if (Cand
.Reason
> Reason
)
3142 Cand
.Reason
= Reason
;
3148 bool tryGreater(int TryVal
, int CandVal
,
3149 GenericSchedulerBase::SchedCandidate
&TryCand
,
3150 GenericSchedulerBase::SchedCandidate
&Cand
,
3151 GenericSchedulerBase::CandReason Reason
) {
3152 if (TryVal
> CandVal
) {
3153 TryCand
.Reason
= Reason
;
3156 if (TryVal
< CandVal
) {
3157 if (Cand
.Reason
> Reason
)
3158 Cand
.Reason
= Reason
;
3164 bool tryLatency(GenericSchedulerBase::SchedCandidate
&TryCand
,
3165 GenericSchedulerBase::SchedCandidate
&Cand
,
3166 SchedBoundary
&Zone
) {
3168 // Prefer the candidate with the lesser depth, but only if one of them has
3169 // depth greater than the total latency scheduled so far, otherwise either
3170 // of them could be scheduled now with no stall.
3171 if (std::max(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth()) >
3172 Zone
.getScheduledLatency()) {
3173 if (tryLess(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth(),
3174 TryCand
, Cand
, GenericSchedulerBase::TopDepthReduce
))
3177 if (tryGreater(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight(),
3178 TryCand
, Cand
, GenericSchedulerBase::TopPathReduce
))
3181 // Prefer the candidate with the lesser height, but only if one of them has
3182 // height greater than the total latency scheduled so far, otherwise either
3183 // of them could be scheduled now with no stall.
3184 if (std::max(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight()) >
3185 Zone
.getScheduledLatency()) {
3186 if (tryLess(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight(),
3187 TryCand
, Cand
, GenericSchedulerBase::BotHeightReduce
))
3190 if (tryGreater(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth(),
3191 TryCand
, Cand
, GenericSchedulerBase::BotPathReduce
))
3196 } // end namespace llvm
3198 static void tracePick(GenericSchedulerBase::CandReason Reason
, bool IsTop
) {
3199 LLVM_DEBUG(dbgs() << "Pick " << (IsTop
? "Top " : "Bot ")
3200 << GenericSchedulerBase::getReasonStr(Reason
) << '\n');
3203 static void tracePick(const GenericSchedulerBase::SchedCandidate
&Cand
) {
3204 tracePick(Cand
.Reason
, Cand
.AtTop
);
3207 void GenericScheduler::initialize(ScheduleDAGMI
*dag
) {
3208 assert(dag
->hasVRegLiveness() &&
3209 "(PreRA)GenericScheduler needs vreg liveness");
3210 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
3211 SchedModel
= DAG
->getSchedModel();
3214 if (RegionPolicy
.ComputeDFSResult
)
3215 DAG
->computeDFSResult();
3217 Rem
.init(DAG
, SchedModel
);
3218 Top
.init(DAG
, SchedModel
, &Rem
);
3219 Bot
.init(DAG
, SchedModel
, &Rem
);
3221 // Initialize resource counts.
3223 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
3224 // are disabled, then these HazardRecs will be disabled.
3225 const InstrItineraryData
*Itin
= SchedModel
->getInstrItineraries();
3226 if (!Top
.HazardRec
) {
3228 DAG
->MF
.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3231 if (!Bot
.HazardRec
) {
3233 DAG
->MF
.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3236 TopCand
.SU
= nullptr;
3237 BotCand
.SU
= nullptr;
3240 /// Initialize the per-region scheduling policy.
3241 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin
,
3242 MachineBasicBlock::iterator End
,
3243 unsigned NumRegionInstrs
) {
3244 const MachineFunction
&MF
= *Begin
->getMF();
3245 const TargetLowering
*TLI
= MF
.getSubtarget().getTargetLowering();
3247 // Avoid setting up the register pressure tracker for small regions to save
3248 // compile time. As a rough heuristic, only track pressure when the number of
3249 // schedulable instructions exceeds half the integer register file.
3250 RegionPolicy
.ShouldTrackPressure
= true;
3251 for (unsigned VT
= MVT::i32
; VT
> (unsigned)MVT::i1
; --VT
) {
3252 MVT::SimpleValueType LegalIntVT
= (MVT::SimpleValueType
)VT
;
3253 if (TLI
->isTypeLegal(LegalIntVT
)) {
3254 unsigned NIntRegs
= Context
->RegClassInfo
->getNumAllocatableRegs(
3255 TLI
->getRegClassFor(LegalIntVT
));
3256 RegionPolicy
.ShouldTrackPressure
= NumRegionInstrs
> (NIntRegs
/ 2);
3260 // For generic targets, we default to bottom-up, because it's simpler and more
3261 // compile-time optimizations have been implemented in that direction.
3262 RegionPolicy
.OnlyBottomUp
= true;
3264 // Allow the subtarget to override default policy.
3265 MF
.getSubtarget().overrideSchedPolicy(RegionPolicy
, NumRegionInstrs
);
3267 // After subtarget overrides, apply command line options.
3268 if (!EnableRegPressure
) {
3269 RegionPolicy
.ShouldTrackPressure
= false;
3270 RegionPolicy
.ShouldTrackLaneMasks
= false;
3273 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
3274 // e.g. -misched-bottomup=false allows scheduling in both directions.
3275 assert((!ForceTopDown
|| !ForceBottomUp
) &&
3276 "-misched-topdown incompatible with -misched-bottomup");
3277 if (ForceBottomUp
.getNumOccurrences() > 0) {
3278 RegionPolicy
.OnlyBottomUp
= ForceBottomUp
;
3279 if (RegionPolicy
.OnlyBottomUp
)
3280 RegionPolicy
.OnlyTopDown
= false;
3282 if (ForceTopDown
.getNumOccurrences() > 0) {
3283 RegionPolicy
.OnlyTopDown
= ForceTopDown
;
3284 if (RegionPolicy
.OnlyTopDown
)
3285 RegionPolicy
.OnlyBottomUp
= false;
3289 void GenericScheduler::dumpPolicy() const {
3290 // Cannot completely remove virtual function even in release mode.
3291 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3292 dbgs() << "GenericScheduler RegionPolicy: "
3293 << " ShouldTrackPressure=" << RegionPolicy
.ShouldTrackPressure
3294 << " OnlyTopDown=" << RegionPolicy
.OnlyTopDown
3295 << " OnlyBottomUp=" << RegionPolicy
.OnlyBottomUp
3300 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
3301 /// critical path by more cycles than it takes to drain the instruction buffer.
3302 /// We estimate an upper bounds on in-flight instructions as:
3304 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
3305 /// InFlightIterations = AcyclicPath / CyclesPerIteration
3306 /// InFlightResources = InFlightIterations * LoopResources
3308 /// TODO: Check execution resources in addition to IssueCount.
3309 void GenericScheduler::checkAcyclicLatency() {
3310 if (Rem
.CyclicCritPath
== 0 || Rem
.CyclicCritPath
>= Rem
.CriticalPath
)
3313 // Scaled number of cycles per loop iteration.
3314 unsigned IterCount
=
3315 std::max(Rem
.CyclicCritPath
* SchedModel
->getLatencyFactor(),
3317 // Scaled acyclic critical path.
3318 unsigned AcyclicCount
= Rem
.CriticalPath
* SchedModel
->getLatencyFactor();
3319 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
3320 unsigned InFlightCount
=
3321 (AcyclicCount
* Rem
.RemIssueCount
+ IterCount
-1) / IterCount
;
3322 unsigned BufferLimit
=
3323 SchedModel
->getMicroOpBufferSize() * SchedModel
->getMicroOpFactor();
3325 Rem
.IsAcyclicLatencyLimited
= InFlightCount
> BufferLimit
;
3328 dbgs() << "IssueCycles="
3329 << Rem
.RemIssueCount
/ SchedModel
->getLatencyFactor() << "c "
3330 << "IterCycles=" << IterCount
/ SchedModel
->getLatencyFactor()
3331 << "c NumIters=" << (AcyclicCount
+ IterCount
- 1) / IterCount
3332 << " InFlight=" << InFlightCount
/ SchedModel
->getMicroOpFactor()
3333 << "m BufferLim=" << SchedModel
->getMicroOpBufferSize() << "m\n";
3334 if (Rem
.IsAcyclicLatencyLimited
) dbgs() << " ACYCLIC LATENCY LIMIT\n");
3337 void GenericScheduler::registerRoots() {
3338 Rem
.CriticalPath
= DAG
->ExitSU
.getDepth();
3340 // Some roots may not feed into ExitSU. Check all of them in case.
3341 for (const SUnit
*SU
: Bot
.Available
) {
3342 if (SU
->getDepth() > Rem
.CriticalPath
)
3343 Rem
.CriticalPath
= SU
->getDepth();
3345 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem
.CriticalPath
<< '\n');
3346 if (DumpCriticalPathLength
) {
3347 errs() << "Critical Path(GS-RR ): " << Rem
.CriticalPath
<< " \n";
3350 if (EnableCyclicPath
&& SchedModel
->getMicroOpBufferSize() > 0) {
3351 Rem
.CyclicCritPath
= DAG
->computeCyclicCriticalPath();
3352 checkAcyclicLatency();
3357 bool tryPressure(const PressureChange
&TryP
,
3358 const PressureChange
&CandP
,
3359 GenericSchedulerBase::SchedCandidate
&TryCand
,
3360 GenericSchedulerBase::SchedCandidate
&Cand
,
3361 GenericSchedulerBase::CandReason Reason
,
3362 const TargetRegisterInfo
*TRI
,
3363 const MachineFunction
&MF
) {
3364 // If one candidate decreases and the other increases, go with it.
3365 // Invalid candidates have UnitInc==0.
3366 if (tryGreater(TryP
.getUnitInc() < 0, CandP
.getUnitInc() < 0, TryCand
, Cand
,
3370 // Do not compare the magnitude of pressure changes between top and bottom
3372 if (Cand
.AtTop
!= TryCand
.AtTop
)
3375 // If both candidates affect the same set in the same boundary, go with the
3376 // smallest increase.
3377 unsigned TryPSet
= TryP
.getPSetOrMax();
3378 unsigned CandPSet
= CandP
.getPSetOrMax();
3379 if (TryPSet
== CandPSet
) {
3380 return tryLess(TryP
.getUnitInc(), CandP
.getUnitInc(), TryCand
, Cand
,
3384 int TryRank
= TryP
.isValid() ? TRI
->getRegPressureSetScore(MF
, TryPSet
) :
3385 std::numeric_limits
<int>::max();
3387 int CandRank
= CandP
.isValid() ? TRI
->getRegPressureSetScore(MF
, CandPSet
) :
3388 std::numeric_limits
<int>::max();
3390 // If the candidates are decreasing pressure, reverse priority.
3391 if (TryP
.getUnitInc() < 0)
3392 std::swap(TryRank
, CandRank
);
3393 return tryGreater(TryRank
, CandRank
, TryCand
, Cand
, Reason
);
3396 unsigned getWeakLeft(const SUnit
*SU
, bool isTop
) {
3397 return (isTop
) ? SU
->WeakPredsLeft
: SU
->WeakSuccsLeft
;
3400 /// Minimize physical register live ranges. Regalloc wants them adjacent to
3401 /// their physreg def/use.
3403 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
3404 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
3405 /// with the operation that produces or consumes the physreg. We'll do this when
3406 /// regalloc has support for parallel copies.
3407 int biasPhysReg(const SUnit
*SU
, bool isTop
) {
3408 const MachineInstr
*MI
= SU
->getInstr();
3411 unsigned ScheduledOper
= isTop
? 1 : 0;
3412 unsigned UnscheduledOper
= isTop
? 0 : 1;
3413 // If we have already scheduled the physreg produce/consumer, immediately
3414 // schedule the copy.
3415 if (MI
->getOperand(ScheduledOper
).getReg().isPhysical())
3417 // If the physreg is at the boundary, defer it. Otherwise schedule it
3418 // immediately to free the dependent. We can hoist the copy later.
3419 bool AtBoundary
= isTop
? !SU
->NumSuccsLeft
: !SU
->NumPredsLeft
;
3420 if (MI
->getOperand(UnscheduledOper
).getReg().isPhysical())
3421 return AtBoundary
? -1 : 1;
3424 if (MI
->isMoveImmediate()) {
3425 // If we have a move immediate and all successors have been assigned, bias
3426 // towards scheduling this later. Make sure all register defs are to
3427 // physical registers.
3429 for (const MachineOperand
&Op
: MI
->defs()) {
3430 if (Op
.isReg() && !Op
.getReg().isPhysical()) {
3437 return isTop
? -1 : 1;
3442 } // end namespace llvm
3444 void GenericScheduler::initCandidate(SchedCandidate
&Cand
, SUnit
*SU
,
3446 const RegPressureTracker
&RPTracker
,
3447 RegPressureTracker
&TempTracker
) {
3450 if (DAG
->isTrackingPressure()) {
3452 TempTracker
.getMaxDownwardPressureDelta(
3453 Cand
.SU
->getInstr(),
3455 DAG
->getRegionCriticalPSets(),
3456 DAG
->getRegPressure().MaxSetPressure
);
3458 if (VerifyScheduling
) {
3459 TempTracker
.getMaxUpwardPressureDelta(
3460 Cand
.SU
->getInstr(),
3461 &DAG
->getPressureDiff(Cand
.SU
),
3463 DAG
->getRegionCriticalPSets(),
3464 DAG
->getRegPressure().MaxSetPressure
);
3466 RPTracker
.getUpwardPressureDelta(
3467 Cand
.SU
->getInstr(),
3468 DAG
->getPressureDiff(Cand
.SU
),
3470 DAG
->getRegionCriticalPSets(),
3471 DAG
->getRegPressure().MaxSetPressure
);
3475 LLVM_DEBUG(if (Cand
.RPDelta
.Excess
.isValid()) dbgs()
3476 << " Try SU(" << Cand
.SU
->NodeNum
<< ") "
3477 << TRI
->getRegPressureSetName(Cand
.RPDelta
.Excess
.getPSet()) << ":"
3478 << Cand
.RPDelta
.Excess
.getUnitInc() << "\n");
3481 /// Apply a set of heuristics to a new candidate. Heuristics are currently
3482 /// hierarchical. This may be more efficient than a graduated cost model because
3483 /// we don't need to evaluate all aspects of the model for each node in the
3484 /// queue. But it's really done to make the heuristics easier to debug and
3485 /// statistically analyze.
3487 /// \param Cand provides the policy and current best candidate.
3488 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3489 /// \param Zone describes the scheduled zone that we are extending, or nullptr
3490 /// if Cand is from a different zone than TryCand.
3491 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3492 bool GenericScheduler::tryCandidate(SchedCandidate
&Cand
,
3493 SchedCandidate
&TryCand
,
3494 SchedBoundary
*Zone
) const {
3495 // Initialize the candidate if needed.
3496 if (!Cand
.isValid()) {
3497 TryCand
.Reason
= NodeOrder
;
3501 // Bias PhysReg Defs and copies to their uses and defined respectively.
3502 if (tryGreater(biasPhysReg(TryCand
.SU
, TryCand
.AtTop
),
3503 biasPhysReg(Cand
.SU
, Cand
.AtTop
), TryCand
, Cand
, PhysReg
))
3504 return TryCand
.Reason
!= NoCand
;
3506 // Avoid exceeding the target's limit.
3507 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.Excess
,
3508 Cand
.RPDelta
.Excess
,
3509 TryCand
, Cand
, RegExcess
, TRI
,
3511 return TryCand
.Reason
!= NoCand
;
3513 // Avoid increasing the max critical pressure in the scheduled region.
3514 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.CriticalMax
,
3515 Cand
.RPDelta
.CriticalMax
,
3516 TryCand
, Cand
, RegCritical
, TRI
,
3518 return TryCand
.Reason
!= NoCand
;
3520 // We only compare a subset of features when comparing nodes between
3521 // Top and Bottom boundary. Some properties are simply incomparable, in many
3522 // other instances we should only override the other boundary if something
3523 // is a clear good pick on one boundary. Skip heuristics that are more
3524 // "tie-breaking" in nature.
3525 bool SameBoundary
= Zone
!= nullptr;
3527 // For loops that are acyclic path limited, aggressively schedule for
3528 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3529 // heuristics to take precedence.
3530 if (Rem
.IsAcyclicLatencyLimited
&& !Zone
->getCurrMOps() &&
3531 tryLatency(TryCand
, Cand
, *Zone
))
3532 return TryCand
.Reason
!= NoCand
;
3534 // Prioritize instructions that read unbuffered resources by stall cycles.
3535 if (tryLess(Zone
->getLatencyStallCycles(TryCand
.SU
),
3536 Zone
->getLatencyStallCycles(Cand
.SU
), TryCand
, Cand
, Stall
))
3537 return TryCand
.Reason
!= NoCand
;
3540 // Keep clustered nodes together to encourage downstream peephole
3541 // optimizations which may reduce resource requirements.
3543 // This is a best effort to set things up for a post-RA pass. Optimizations
3544 // like generating loads of multiple registers should ideally be done within
3545 // the scheduler pass by combining the loads during DAG postprocessing.
3546 const SUnit
*CandNextClusterSU
=
3547 Cand
.AtTop
? DAG
->getNextClusterSucc() : DAG
->getNextClusterPred();
3548 const SUnit
*TryCandNextClusterSU
=
3549 TryCand
.AtTop
? DAG
->getNextClusterSucc() : DAG
->getNextClusterPred();
3550 if (tryGreater(TryCand
.SU
== TryCandNextClusterSU
,
3551 Cand
.SU
== CandNextClusterSU
,
3552 TryCand
, Cand
, Cluster
))
3553 return TryCand
.Reason
!= NoCand
;
3556 // Weak edges are for clustering and other constraints.
3557 if (tryLess(getWeakLeft(TryCand
.SU
, TryCand
.AtTop
),
3558 getWeakLeft(Cand
.SU
, Cand
.AtTop
),
3559 TryCand
, Cand
, Weak
))
3560 return TryCand
.Reason
!= NoCand
;
3563 // Avoid increasing the max pressure of the entire region.
3564 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.CurrentMax
,
3565 Cand
.RPDelta
.CurrentMax
,
3566 TryCand
, Cand
, RegMax
, TRI
,
3568 return TryCand
.Reason
!= NoCand
;
3571 // Avoid critical resource consumption and balance the schedule.
3572 TryCand
.initResourceDelta(DAG
, SchedModel
);
3573 if (tryLess(TryCand
.ResDelta
.CritResources
, Cand
.ResDelta
.CritResources
,
3574 TryCand
, Cand
, ResourceReduce
))
3575 return TryCand
.Reason
!= NoCand
;
3576 if (tryGreater(TryCand
.ResDelta
.DemandedResources
,
3577 Cand
.ResDelta
.DemandedResources
,
3578 TryCand
, Cand
, ResourceDemand
))
3579 return TryCand
.Reason
!= NoCand
;
3581 // Avoid serializing long latency dependence chains.
3582 // For acyclic path limited loops, latency was already checked above.
3583 if (!RegionPolicy
.DisableLatencyHeuristic
&& TryCand
.Policy
.ReduceLatency
&&
3584 !Rem
.IsAcyclicLatencyLimited
&& tryLatency(TryCand
, Cand
, *Zone
))
3585 return TryCand
.Reason
!= NoCand
;
3587 // Fall through to original instruction order.
3588 if ((Zone
->isTop() && TryCand
.SU
->NodeNum
< Cand
.SU
->NodeNum
)
3589 || (!Zone
->isTop() && TryCand
.SU
->NodeNum
> Cand
.SU
->NodeNum
)) {
3590 TryCand
.Reason
= NodeOrder
;
3598 /// Pick the best candidate from the queue.
3600 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3601 /// DAG building. To adjust for the current scheduling location we need to
3602 /// maintain the number of vreg uses remaining to be top-scheduled.
3603 void GenericScheduler::pickNodeFromQueue(SchedBoundary
&Zone
,
3604 const CandPolicy
&ZonePolicy
,
3605 const RegPressureTracker
&RPTracker
,
3606 SchedCandidate
&Cand
) {
3607 // getMaxPressureDelta temporarily modifies the tracker.
3608 RegPressureTracker
&TempTracker
= const_cast<RegPressureTracker
&>(RPTracker
);
3610 ReadyQueue
&Q
= Zone
.Available
;
3611 for (SUnit
*SU
: Q
) {
3613 SchedCandidate
TryCand(ZonePolicy
);
3614 initCandidate(TryCand
, SU
, Zone
.isTop(), RPTracker
, TempTracker
);
3615 // Pass SchedBoundary only when comparing nodes from the same boundary.
3616 SchedBoundary
*ZoneArg
= Cand
.AtTop
== TryCand
.AtTop
? &Zone
: nullptr;
3617 if (tryCandidate(Cand
, TryCand
, ZoneArg
)) {
3618 // Initialize resource delta if needed in case future heuristics query it.
3619 if (TryCand
.ResDelta
== SchedResourceDelta())
3620 TryCand
.initResourceDelta(DAG
, SchedModel
);
3621 Cand
.setBest(TryCand
);
3622 LLVM_DEBUG(traceCandidate(Cand
));
3627 /// Pick the best candidate node from either the top or bottom queue.
3628 SUnit
*GenericScheduler::pickNodeBidirectional(bool &IsTopNode
) {
3629 // Schedule as far as possible in the direction of no choice. This is most
3630 // efficient, but also provides the best heuristics for CriticalPSets.
3631 if (SUnit
*SU
= Bot
.pickOnlyChoice()) {
3633 tracePick(Only1
, false);
3636 if (SUnit
*SU
= Top
.pickOnlyChoice()) {
3638 tracePick(Only1
, true);
3641 // Set the bottom-up policy based on the state of the current bottom zone and
3642 // the instructions outside the zone, including the top zone.
3643 CandPolicy BotPolicy
;
3644 setPolicy(BotPolicy
, /*IsPostRA=*/false, Bot
, &Top
);
3645 // Set the top-down policy based on the state of the current top zone and
3646 // the instructions outside the zone, including the bottom zone.
3647 CandPolicy TopPolicy
;
3648 setPolicy(TopPolicy
, /*IsPostRA=*/false, Top
, &Bot
);
3650 // See if BotCand is still valid (because we previously scheduled from Top).
3651 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3652 if (!BotCand
.isValid() || BotCand
.SU
->isScheduled
||
3653 BotCand
.Policy
!= BotPolicy
) {
3654 BotCand
.reset(CandPolicy());
3655 pickNodeFromQueue(Bot
, BotPolicy
, DAG
->getBotRPTracker(), BotCand
);
3656 assert(BotCand
.Reason
!= NoCand
&& "failed to find the first candidate");
3658 LLVM_DEBUG(traceCandidate(BotCand
));
3660 if (VerifyScheduling
) {
3661 SchedCandidate TCand
;
3662 TCand
.reset(CandPolicy());
3663 pickNodeFromQueue(Bot
, BotPolicy
, DAG
->getBotRPTracker(), TCand
);
3664 assert(TCand
.SU
== BotCand
.SU
&&
3665 "Last pick result should correspond to re-picking right now");
3670 // Check if the top Q has a better candidate.
3671 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3672 if (!TopCand
.isValid() || TopCand
.SU
->isScheduled
||
3673 TopCand
.Policy
!= TopPolicy
) {
3674 TopCand
.reset(CandPolicy());
3675 pickNodeFromQueue(Top
, TopPolicy
, DAG
->getTopRPTracker(), TopCand
);
3676 assert(TopCand
.Reason
!= NoCand
&& "failed to find the first candidate");
3678 LLVM_DEBUG(traceCandidate(TopCand
));
3680 if (VerifyScheduling
) {
3681 SchedCandidate TCand
;
3682 TCand
.reset(CandPolicy());
3683 pickNodeFromQueue(Top
, TopPolicy
, DAG
->getTopRPTracker(), TCand
);
3684 assert(TCand
.SU
== TopCand
.SU
&&
3685 "Last pick result should correspond to re-picking right now");
3690 // Pick best from BotCand and TopCand.
3691 assert(BotCand
.isValid());
3692 assert(TopCand
.isValid());
3693 SchedCandidate Cand
= BotCand
;
3694 TopCand
.Reason
= NoCand
;
3695 if (tryCandidate(Cand
, TopCand
, nullptr)) {
3696 Cand
.setBest(TopCand
);
3697 LLVM_DEBUG(traceCandidate(Cand
));
3700 IsTopNode
= Cand
.AtTop
;
3705 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3706 SUnit
*GenericScheduler::pickNode(bool &IsTopNode
) {
3707 if (DAG
->top() == DAG
->bottom()) {
3708 assert(Top
.Available
.empty() && Top
.Pending
.empty() &&
3709 Bot
.Available
.empty() && Bot
.Pending
.empty() && "ReadyQ garbage");
3714 if (RegionPolicy
.OnlyTopDown
) {
3715 SU
= Top
.pickOnlyChoice();
3717 CandPolicy NoPolicy
;
3718 TopCand
.reset(NoPolicy
);
3719 pickNodeFromQueue(Top
, NoPolicy
, DAG
->getTopRPTracker(), TopCand
);
3720 assert(TopCand
.Reason
!= NoCand
&& "failed to find a candidate");
3725 } else if (RegionPolicy
.OnlyBottomUp
) {
3726 SU
= Bot
.pickOnlyChoice();
3728 CandPolicy NoPolicy
;
3729 BotCand
.reset(NoPolicy
);
3730 pickNodeFromQueue(Bot
, NoPolicy
, DAG
->getBotRPTracker(), BotCand
);
3731 assert(BotCand
.Reason
!= NoCand
&& "failed to find a candidate");
3737 SU
= pickNodeBidirectional(IsTopNode
);
3739 } while (SU
->isScheduled
);
3741 if (SU
->isTopReady())
3742 Top
.removeReady(SU
);
3743 if (SU
->isBottomReady())
3744 Bot
.removeReady(SU
);
3746 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU
->NodeNum
<< ") "
3747 << *SU
->getInstr());
3751 void GenericScheduler::reschedulePhysReg(SUnit
*SU
, bool isTop
) {
3752 MachineBasicBlock::iterator InsertPos
= SU
->getInstr();
3755 SmallVectorImpl
<SDep
> &Deps
= isTop
? SU
->Preds
: SU
->Succs
;
3757 // Find already scheduled copies with a single physreg dependence and move
3758 // them just above the scheduled instruction.
3759 for (SDep
&Dep
: Deps
) {
3760 if (Dep
.getKind() != SDep::Data
||
3761 !Register::isPhysicalRegister(Dep
.getReg()))
3763 SUnit
*DepSU
= Dep
.getSUnit();
3764 if (isTop
? DepSU
->Succs
.size() > 1 : DepSU
->Preds
.size() > 1)
3766 MachineInstr
*Copy
= DepSU
->getInstr();
3767 if (!Copy
->isCopy() && !Copy
->isMoveImmediate())
3769 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
3770 DAG
->dumpNode(*Dep
.getSUnit()));
3771 DAG
->moveInstruction(Copy
, InsertPos
);
3775 /// Update the scheduler's state after scheduling a node. This is the same node
3776 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3777 /// update it's state based on the current cycle before MachineSchedStrategy
3780 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3781 /// them here. See comments in biasPhysReg.
3782 void GenericScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
3784 SU
->TopReadyCycle
= std::max(SU
->TopReadyCycle
, Top
.getCurrCycle());
3786 if (SU
->hasPhysRegUses
)
3787 reschedulePhysReg(SU
, true);
3789 SU
->BotReadyCycle
= std::max(SU
->BotReadyCycle
, Bot
.getCurrCycle());
3791 if (SU
->hasPhysRegDefs
)
3792 reschedulePhysReg(SU
, false);
3796 /// Create the standard converging machine scheduler. This will be used as the
3797 /// default scheduler if the target does not set a default.
3798 ScheduleDAGMILive
*llvm::createGenericSchedLive(MachineSchedContext
*C
) {
3799 ScheduleDAGMILive
*DAG
=
3800 new ScheduleDAGMILive(C
, std::make_unique
<GenericScheduler
>(C
));
3801 // Register DAG post-processors.
3803 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3804 // data and pass it to later mutations. Have a single mutation that gathers
3805 // the interesting nodes in one pass.
3806 DAG
->addMutation(createCopyConstrainDAGMutation(DAG
->TII
, DAG
->TRI
));
3810 static ScheduleDAGInstrs
*createConvergingSched(MachineSchedContext
*C
) {
3811 return createGenericSchedLive(C
);
3814 static MachineSchedRegistry
3815 GenericSchedRegistry("converge", "Standard converging scheduler.",
3816 createConvergingSched
);
3818 //===----------------------------------------------------------------------===//
3819 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3820 //===----------------------------------------------------------------------===//
3822 void PostGenericScheduler::initialize(ScheduleDAGMI
*Dag
) {
3824 SchedModel
= DAG
->getSchedModel();
3827 Rem
.init(DAG
, SchedModel
);
3828 Top
.init(DAG
, SchedModel
, &Rem
);
3831 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3832 // or are disabled, then these HazardRecs will be disabled.
3833 const InstrItineraryData
*Itin
= SchedModel
->getInstrItineraries();
3834 if (!Top
.HazardRec
) {
3836 DAG
->MF
.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3841 void PostGenericScheduler::registerRoots() {
3842 Rem
.CriticalPath
= DAG
->ExitSU
.getDepth();
3844 // Some roots may not feed into ExitSU. Check all of them in case.
3845 for (const SUnit
*SU
: BotRoots
) {
3846 if (SU
->getDepth() > Rem
.CriticalPath
)
3847 Rem
.CriticalPath
= SU
->getDepth();
3849 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem
.CriticalPath
<< '\n');
3850 if (DumpCriticalPathLength
) {
3851 errs() << "Critical Path(PGS-RR ): " << Rem
.CriticalPath
<< " \n";
3855 /// Apply a set of heuristics to a new candidate for PostRA scheduling.
3857 /// \param Cand provides the policy and current best candidate.
3858 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3859 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3860 bool PostGenericScheduler::tryCandidate(SchedCandidate
&Cand
,
3861 SchedCandidate
&TryCand
) {
3862 // Initialize the candidate if needed.
3863 if (!Cand
.isValid()) {
3864 TryCand
.Reason
= NodeOrder
;
3868 // Prioritize instructions that read unbuffered resources by stall cycles.
3869 if (tryLess(Top
.getLatencyStallCycles(TryCand
.SU
),
3870 Top
.getLatencyStallCycles(Cand
.SU
), TryCand
, Cand
, Stall
))
3871 return TryCand
.Reason
!= NoCand
;
3873 // Keep clustered nodes together.
3874 if (tryGreater(TryCand
.SU
== DAG
->getNextClusterSucc(),
3875 Cand
.SU
== DAG
->getNextClusterSucc(),
3876 TryCand
, Cand
, Cluster
))
3877 return TryCand
.Reason
!= NoCand
;
3879 // Avoid critical resource consumption and balance the schedule.
3880 if (tryLess(TryCand
.ResDelta
.CritResources
, Cand
.ResDelta
.CritResources
,
3881 TryCand
, Cand
, ResourceReduce
))
3882 return TryCand
.Reason
!= NoCand
;
3883 if (tryGreater(TryCand
.ResDelta
.DemandedResources
,
3884 Cand
.ResDelta
.DemandedResources
,
3885 TryCand
, Cand
, ResourceDemand
))
3886 return TryCand
.Reason
!= NoCand
;
3888 // Avoid serializing long latency dependence chains.
3889 if (Cand
.Policy
.ReduceLatency
&& tryLatency(TryCand
, Cand
, Top
)) {
3890 return TryCand
.Reason
!= NoCand
;
3893 // Fall through to original instruction order.
3894 if (TryCand
.SU
->NodeNum
< Cand
.SU
->NodeNum
) {
3895 TryCand
.Reason
= NodeOrder
;
3902 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate
&Cand
) {
3903 ReadyQueue
&Q
= Top
.Available
;
3904 for (SUnit
*SU
: Q
) {
3905 SchedCandidate
TryCand(Cand
.Policy
);
3907 TryCand
.AtTop
= true;
3908 TryCand
.initResourceDelta(DAG
, SchedModel
);
3909 if (tryCandidate(Cand
, TryCand
)) {
3910 Cand
.setBest(TryCand
);
3911 LLVM_DEBUG(traceCandidate(Cand
));
3916 /// Pick the next node to schedule.
3917 SUnit
*PostGenericScheduler::pickNode(bool &IsTopNode
) {
3918 if (DAG
->top() == DAG
->bottom()) {
3919 assert(Top
.Available
.empty() && Top
.Pending
.empty() && "ReadyQ garbage");
3924 SU
= Top
.pickOnlyChoice();
3926 tracePick(Only1
, true);
3928 CandPolicy NoPolicy
;
3929 SchedCandidate
TopCand(NoPolicy
);
3930 // Set the top-down policy based on the state of the current top zone and
3931 // the instructions outside the zone, including the bottom zone.
3932 setPolicy(TopCand
.Policy
, /*IsPostRA=*/true, Top
, nullptr);
3933 pickNodeFromQueue(TopCand
);
3934 assert(TopCand
.Reason
!= NoCand
&& "failed to find a candidate");
3938 } while (SU
->isScheduled
);
3941 Top
.removeReady(SU
);
3943 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU
->NodeNum
<< ") "
3944 << *SU
->getInstr());
3948 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3949 /// scheduled/remaining flags in the DAG nodes.
3950 void PostGenericScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
3951 SU
->TopReadyCycle
= std::max(SU
->TopReadyCycle
, Top
.getCurrCycle());
3955 ScheduleDAGMI
*llvm::createGenericSchedPostRA(MachineSchedContext
*C
) {
3956 return new ScheduleDAGMI(C
, std::make_unique
<PostGenericScheduler
>(C
),
3957 /*RemoveKillFlags=*/true);
3960 //===----------------------------------------------------------------------===//
3961 // ILP Scheduler. Currently for experimental analysis of heuristics.
3962 //===----------------------------------------------------------------------===//
3966 /// Order nodes by the ILP metric.
3968 const SchedDFSResult
*DFSResult
= nullptr;
3969 const BitVector
*ScheduledTrees
= nullptr;
3972 ILPOrder(bool MaxILP
) : MaximizeILP(MaxILP
) {}
3974 /// Apply a less-than relation on node priority.
3976 /// (Return true if A comes after B in the Q.)
3977 bool operator()(const SUnit
*A
, const SUnit
*B
) const {
3978 unsigned SchedTreeA
= DFSResult
->getSubtreeID(A
);
3979 unsigned SchedTreeB
= DFSResult
->getSubtreeID(B
);
3980 if (SchedTreeA
!= SchedTreeB
) {
3981 // Unscheduled trees have lower priority.
3982 if (ScheduledTrees
->test(SchedTreeA
) != ScheduledTrees
->test(SchedTreeB
))
3983 return ScheduledTrees
->test(SchedTreeB
);
3985 // Trees with shallower connections have lower priority.
3986 if (DFSResult
->getSubtreeLevel(SchedTreeA
)
3987 != DFSResult
->getSubtreeLevel(SchedTreeB
)) {
3988 return DFSResult
->getSubtreeLevel(SchedTreeA
)
3989 < DFSResult
->getSubtreeLevel(SchedTreeB
);
3993 return DFSResult
->getILP(A
) < DFSResult
->getILP(B
);
3995 return DFSResult
->getILP(A
) > DFSResult
->getILP(B
);
3999 /// Schedule based on the ILP metric.
4000 class ILPScheduler
: public MachineSchedStrategy
{
4001 ScheduleDAGMILive
*DAG
= nullptr;
4004 std::vector
<SUnit
*> ReadyQ
;
4007 ILPScheduler(bool MaximizeILP
) : Cmp(MaximizeILP
) {}
4009 void initialize(ScheduleDAGMI
*dag
) override
{
4010 assert(dag
->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
4011 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
4012 DAG
->computeDFSResult();
4013 Cmp
.DFSResult
= DAG
->getDFSResult();
4014 Cmp
.ScheduledTrees
= &DAG
->getScheduledTrees();
4018 void registerRoots() override
{
4019 // Restore the heap in ReadyQ with the updated DFS results.
4020 std::make_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4023 /// Implement MachineSchedStrategy interface.
4024 /// -----------------------------------------
4026 /// Callback to select the highest priority node from the ready Q.
4027 SUnit
*pickNode(bool &IsTopNode
) override
{
4028 if (ReadyQ
.empty()) return nullptr;
4029 std::pop_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4030 SUnit
*SU
= ReadyQ
.back();
4033 LLVM_DEBUG(dbgs() << "Pick node "
4034 << "SU(" << SU
->NodeNum
<< ") "
4035 << " ILP: " << DAG
->getDFSResult()->getILP(SU
)
4036 << " Tree: " << DAG
->getDFSResult()->getSubtreeID(SU
)
4038 << DAG
->getDFSResult()->getSubtreeLevel(
4039 DAG
->getDFSResult()->getSubtreeID(SU
))
4041 << "Scheduling " << *SU
->getInstr());
4045 /// Scheduler callback to notify that a new subtree is scheduled.
4046 void scheduleTree(unsigned SubtreeID
) override
{
4047 std::make_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4050 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
4051 /// DFSResults, and resort the priority Q.
4052 void schedNode(SUnit
*SU
, bool IsTopNode
) override
{
4053 assert(!IsTopNode
&& "SchedDFSResult needs bottom-up");
4056 void releaseTopNode(SUnit
*) override
{ /*only called for top roots*/ }
4058 void releaseBottomNode(SUnit
*SU
) override
{
4059 ReadyQ
.push_back(SU
);
4060 std::push_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4064 } // end anonymous namespace
4066 static ScheduleDAGInstrs
*createILPMaxScheduler(MachineSchedContext
*C
) {
4067 return new ScheduleDAGMILive(C
, std::make_unique
<ILPScheduler
>(true));
4069 static ScheduleDAGInstrs
*createILPMinScheduler(MachineSchedContext
*C
) {
4070 return new ScheduleDAGMILive(C
, std::make_unique
<ILPScheduler
>(false));
4073 static MachineSchedRegistry
ILPMaxRegistry(
4074 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler
);
4075 static MachineSchedRegistry
ILPMinRegistry(
4076 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler
);
4078 //===----------------------------------------------------------------------===//
4079 // Machine Instruction Shuffler for Correctness Testing
4080 //===----------------------------------------------------------------------===//
4085 /// Apply a less-than relation on the node order, which corresponds to the
4086 /// instruction order prior to scheduling. IsReverse implements greater-than.
4087 template<bool IsReverse
>
4089 bool operator()(SUnit
*A
, SUnit
*B
) const {
4091 return A
->NodeNum
> B
->NodeNum
;
4093 return A
->NodeNum
< B
->NodeNum
;
4097 /// Reorder instructions as much as possible.
4098 class InstructionShuffler
: public MachineSchedStrategy
{
4102 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
4103 // gives nodes with a higher number higher priority causing the latest
4104 // instructions to be scheduled first.
4105 PriorityQueue
<SUnit
*, std::vector
<SUnit
*>, SUnitOrder
<false>>
4108 // When scheduling bottom-up, use greater-than as the queue priority.
4109 PriorityQueue
<SUnit
*, std::vector
<SUnit
*>, SUnitOrder
<true>>
4113 InstructionShuffler(bool alternate
, bool topdown
)
4114 : IsAlternating(alternate
), IsTopDown(topdown
) {}
4116 void initialize(ScheduleDAGMI
*) override
{
4121 /// Implement MachineSchedStrategy interface.
4122 /// -----------------------------------------
4124 SUnit
*pickNode(bool &IsTopNode
) override
{
4128 if (TopQ
.empty()) return nullptr;
4131 } while (SU
->isScheduled
);
4135 if (BottomQ
.empty()) return nullptr;
4138 } while (SU
->isScheduled
);
4142 IsTopDown
= !IsTopDown
;
4146 void schedNode(SUnit
*SU
, bool IsTopNode
) override
{}
4148 void releaseTopNode(SUnit
*SU
) override
{
4151 void releaseBottomNode(SUnit
*SU
) override
{
4156 } // end anonymous namespace
4158 static ScheduleDAGInstrs
*createInstructionShuffler(MachineSchedContext
*C
) {
4159 bool Alternate
= !ForceTopDown
&& !ForceBottomUp
;
4160 bool TopDown
= !ForceBottomUp
;
4161 assert((TopDown
|| !ForceTopDown
) &&
4162 "-misched-topdown incompatible with -misched-bottomup");
4163 return new ScheduleDAGMILive(
4164 C
, std::make_unique
<InstructionShuffler
>(Alternate
, TopDown
));
4167 static MachineSchedRegistry
ShufflerRegistry(
4168 "shuffle", "Shuffle machine instructions alternating directions",
4169 createInstructionShuffler
);
4172 //===----------------------------------------------------------------------===//
4173 // GraphWriter support for ScheduleDAGMILive.
4174 //===----------------------------------------------------------------------===//
4179 template<> struct GraphTraits
<
4180 ScheduleDAGMI
*> : public GraphTraits
<ScheduleDAG
*> {};
4183 struct DOTGraphTraits
<ScheduleDAGMI
*> : public DefaultDOTGraphTraits
{
4184 DOTGraphTraits(bool isSimple
= false) : DefaultDOTGraphTraits(isSimple
) {}
4186 static std::string
getGraphName(const ScheduleDAG
*G
) {
4187 return std::string(G
->MF
.getName());
4190 static bool renderGraphFromBottomUp() {
4194 static bool isNodeHidden(const SUnit
*Node
, const ScheduleDAG
*G
) {
4195 if (ViewMISchedCutoff
== 0)
4197 return (Node
->Preds
.size() > ViewMISchedCutoff
4198 || Node
->Succs
.size() > ViewMISchedCutoff
);
4201 /// If you want to override the dot attributes printed for a particular
4202 /// edge, override this method.
4203 static std::string
getEdgeAttributes(const SUnit
*Node
,
4205 const ScheduleDAG
*Graph
) {
4206 if (EI
.isArtificialDep())
4207 return "color=cyan,style=dashed";
4209 return "color=blue,style=dashed";
4213 static std::string
getNodeLabel(const SUnit
*SU
, const ScheduleDAG
*G
) {
4215 raw_string_ostream
SS(Str
);
4216 const ScheduleDAGMI
*DAG
= static_cast<const ScheduleDAGMI
*>(G
);
4217 const SchedDFSResult
*DFS
= DAG
->hasVRegLiveness() ?
4218 static_cast<const ScheduleDAGMILive
*>(G
)->getDFSResult() : nullptr;
4219 SS
<< "SU:" << SU
->NodeNum
;
4221 SS
<< " I:" << DFS
->getNumInstrs(SU
);
4225 static std::string
getNodeDescription(const SUnit
*SU
, const ScheduleDAG
*G
) {
4226 return G
->getGraphNodeLabel(SU
);
4229 static std::string
getNodeAttributes(const SUnit
*N
, const ScheduleDAG
*G
) {
4230 std::string
Str("shape=Mrecord");
4231 const ScheduleDAGMI
*DAG
= static_cast<const ScheduleDAGMI
*>(G
);
4232 const SchedDFSResult
*DFS
= DAG
->hasVRegLiveness() ?
4233 static_cast<const ScheduleDAGMILive
*>(G
)->getDFSResult() : nullptr;
4235 Str
+= ",style=filled,fillcolor=\"#";
4236 Str
+= DOT::getColorString(DFS
->getSubtreeID(N
));
4243 } // end namespace llvm
4246 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
4247 /// rendered using 'dot'.
4248 void ScheduleDAGMI::viewGraph(const Twine
&Name
, const Twine
&Title
) {
4250 ViewGraph(this, Name
, false, Title
);
4252 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
4253 << "systems with Graphviz or gv!\n";
4257 /// Out-of-line implementation with no arguments is handy for gdb.
4258 void ScheduleDAGMI::viewGraph() {
4259 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
4262 /// Sort predicate for the intervals stored in an instance of
4263 /// ResourceSegments. Intervals are always disjoint (no intersection
4264 /// for any pairs of intervals), therefore we can sort the totality of
4265 /// the intervals by looking only at the left boundary.
4266 static bool sortIntervals(const ResourceSegments::IntervalTy
&A
,
4267 const ResourceSegments::IntervalTy
&B
) {
4268 return A
.first
< B
.first
;
4271 unsigned ResourceSegments::getFirstAvailableAt(
4272 unsigned CurrCycle
, unsigned AcquireAtCycle
, unsigned ReleaseAtCycle
,
4273 std::function
<ResourceSegments::IntervalTy(unsigned, unsigned, unsigned)>
4274 IntervalBuilder
) const {
4275 assert(std::is_sorted(std::begin(_Intervals
), std::end(_Intervals
),
4277 "Cannot execute on an un-sorted set of intervals.");
4278 unsigned RetCycle
= CurrCycle
;
4279 ResourceSegments::IntervalTy NewInterval
=
4280 IntervalBuilder(RetCycle
, AcquireAtCycle
, ReleaseAtCycle
);
4281 for (auto &Interval
: _Intervals
) {
4282 if (!intersects(NewInterval
, Interval
))
4285 // Move the interval right next to the top of the one it
4287 assert(Interval
.second
> NewInterval
.first
&&
4288 "Invalid intervals configuration.");
4289 RetCycle
+= (unsigned)Interval
.second
- (unsigned)NewInterval
.first
;
4290 NewInterval
= IntervalBuilder(RetCycle
, AcquireAtCycle
, ReleaseAtCycle
);
4295 void ResourceSegments::add(ResourceSegments::IntervalTy A
,
4296 const unsigned CutOff
) {
4297 assert(A
.first
< A
.second
&& "Cannot add empty resource usage");
4298 assert(CutOff
> 0 && "0-size interval history has no use.");
4299 assert(all_of(_Intervals
,
4300 [&A
](const ResourceSegments::IntervalTy
&Interval
) -> bool {
4301 return !intersects(A
, Interval
);
4303 "A resource is being overwritten");
4304 _Intervals
.push_back(A
);
4308 // Do not keep the full history of the intervals, just the
4310 while (_Intervals
.size() > CutOff
)
4311 _Intervals
.pop_front();
4314 bool ResourceSegments::intersects(ResourceSegments::IntervalTy A
,
4315 ResourceSegments::IntervalTy B
) {
4316 assert(A
.first
<= A
.second
&& "Invalid interval");
4317 assert(B
.first
<= B
.second
&& "Invalid interval");
4319 // Share one boundary.
4320 if ((A
.first
== B
.first
) || (A
.second
== B
.second
))
4323 // full intersersect: [ *** ) B
4325 if ((A
.first
> B
.first
) && (A
.second
< B
.second
))
4328 // right intersect: [ ***) B
4330 if ((A
.first
> B
.first
) && (A
.first
< B
.second
) && (A
.second
> B
.second
))
4333 // left intersect: [*** ) B
4335 if ((A
.first
< B
.first
) && (B
.first
< A
.second
) && (B
.second
> B
.first
))
4341 void ResourceSegments::sortAndMerge() {
4342 if (_Intervals
.size() <= 1)
4345 // First sort the collection.
4346 _Intervals
.sort(sortIntervals
);
4348 // can use next because I have at least 2 elements in the list
4349 auto next
= std::next(std::begin(_Intervals
));
4350 auto E
= std::end(_Intervals
);
4351 for (; next
!= E
; ++next
) {
4352 if (std::prev(next
)->second
>= next
->first
) {
4353 next
->first
= std::prev(next
)->first
;
4354 _Intervals
.erase(std::prev(next
));