1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // MachineScheduler schedules machine instructions after phi elimination. It
10 // preserves LiveIntervals so it can be invoked before register allocation.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/MachineScheduler.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PriorityQueue.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervals.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachinePassRegistry.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/RegisterClassInfo.h"
36 #include "llvm/CodeGen/RegisterPressure.h"
37 #include "llvm/CodeGen/ScheduleDAG.h"
38 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
39 #include "llvm/CodeGen/ScheduleDAGMutation.h"
40 #include "llvm/CodeGen/ScheduleDFS.h"
41 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
42 #include "llvm/CodeGen/SlotIndexes.h"
43 #include "llvm/CodeGen/TargetFrameLowering.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/CodeGen/TargetPassConfig.h"
47 #include "llvm/CodeGen/TargetRegisterInfo.h"
48 #include "llvm/CodeGen/TargetSchedule.h"
49 #include "llvm/CodeGen/TargetSubtargetInfo.h"
50 #include "llvm/CodeGenTypes/MachineValueType.h"
51 #include "llvm/Config/llvm-config.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/MC/LaneBitmask.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/GraphWriter.h"
60 #include "llvm/Support/raw_ostream.h"
74 #define DEBUG_TYPE "machine-scheduler"
76 STATISTIC(NumClustered
, "Number of load/store pairs clustered");
80 cl::opt
<bool> ForceTopDown("misched-topdown", cl::Hidden
,
81 cl::desc("Force top-down list scheduling"));
82 cl::opt
<bool> ForceBottomUp("misched-bottomup", cl::Hidden
,
83 cl::desc("Force bottom-up list scheduling"));
84 namespace MISchedPostRASched
{
90 } // end namespace MISchedPostRASched
91 cl::opt
<MISchedPostRASched::Direction
> PostRADirection(
92 "misched-postra-direction", cl::Hidden
,
93 cl::desc("Post reg-alloc list scheduling direction"),
94 // Default to top-down because it was implemented first and existing targets
95 // expect that behavior by default.
96 cl::init(MISchedPostRASched::TopDown
),
98 clEnumValN(MISchedPostRASched::TopDown
, "topdown",
99 "Force top-down post reg-alloc list scheduling"),
100 clEnumValN(MISchedPostRASched::BottomUp
, "bottomup",
101 "Force bottom-up post reg-alloc list scheduling"),
102 clEnumValN(MISchedPostRASched::Bidirectional
, "bidirectional",
103 "Force bidirectional post reg-alloc list scheduling")));
105 DumpCriticalPathLength("misched-dcpl", cl::Hidden
,
106 cl::desc("Print critical path length to stdout"));
108 cl::opt
<bool> VerifyScheduling(
109 "verify-misched", cl::Hidden
,
110 cl::desc("Verify machine instrs before and after machine scheduling"));
113 cl::opt
<bool> ViewMISchedDAGs(
114 "view-misched-dags", cl::Hidden
,
115 cl::desc("Pop up a window to show MISched dags after they are processed"));
116 cl::opt
<bool> PrintDAGs("misched-print-dags", cl::Hidden
,
117 cl::desc("Print schedule DAGs"));
118 cl::opt
<bool> MISchedDumpReservedCycles(
119 "misched-dump-reserved-cycles", cl::Hidden
, cl::init(false),
120 cl::desc("Dump resource usage at schedule boundary."));
121 cl::opt
<bool> MischedDetailResourceBooking(
122 "misched-detail-resource-booking", cl::Hidden
, cl::init(false),
123 cl::desc("Show details of invoking getNextResoufceCycle."));
125 const bool ViewMISchedDAGs
= false;
126 const bool PrintDAGs
= false;
127 const bool MischedDetailResourceBooking
= false;
128 #ifdef LLVM_ENABLE_DUMP
129 const bool MISchedDumpReservedCycles
= false;
130 #endif // LLVM_ENABLE_DUMP
133 } // end namespace llvm
136 /// In some situations a few uninteresting nodes depend on nearly all other
137 /// nodes in the graph, provide a cutoff to hide them.
138 static cl::opt
<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden
,
139 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
141 static cl::opt
<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden
,
142 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
144 static cl::opt
<std::string
> SchedOnlyFunc("misched-only-func", cl::Hidden
,
145 cl::desc("Only schedule this function"));
146 static cl::opt
<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden
,
147 cl::desc("Only schedule this MBB#"));
150 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
151 /// size of the ready lists.
152 static cl::opt
<unsigned> ReadyListLimit("misched-limit", cl::Hidden
,
153 cl::desc("Limit ready list to N instructions"), cl::init(256));
155 static cl::opt
<bool> EnableRegPressure("misched-regpressure", cl::Hidden
,
156 cl::desc("Enable register pressure scheduling."), cl::init(true));
158 static cl::opt
<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden
,
159 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
161 static cl::opt
<bool> EnableMemOpCluster("misched-cluster", cl::Hidden
,
162 cl::desc("Enable memop clustering."),
165 ForceFastCluster("force-fast-cluster", cl::Hidden
,
166 cl::desc("Switch to fast cluster algorithm with the lost "
167 "of some fusion opportunities"),
169 static cl::opt
<unsigned>
170 FastClusterThreshold("fast-cluster-threshold", cl::Hidden
,
171 cl::desc("The threshold for fast cluster"),
174 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
175 static cl::opt
<bool> MISchedDumpScheduleTrace(
176 "misched-dump-schedule-trace", cl::Hidden
, cl::init(false),
177 cl::desc("Dump resource usage at schedule boundary."));
178 static cl::opt
<unsigned>
179 HeaderColWidth("misched-dump-schedule-trace-col-header-width", cl::Hidden
,
180 cl::desc("Set width of the columns with "
181 "the resources and schedule units"),
183 static cl::opt
<unsigned>
184 ColWidth("misched-dump-schedule-trace-col-width", cl::Hidden
,
185 cl::desc("Set width of the columns showing resource booking."),
187 static cl::opt
<bool> MISchedSortResourcesInTrace(
188 "misched-sort-resources-in-trace", cl::Hidden
, cl::init(true),
189 cl::desc("Sort the resources printed in the dump trace"));
192 static cl::opt
<unsigned>
193 MIResourceCutOff("misched-resource-cutoff", cl::Hidden
,
194 cl::desc("Number of intervals to track"), cl::init(10));
196 // DAG subtrees must have at least this many nodes.
197 static const unsigned MinSubtreeSize
= 8;
199 // Pin the vtables to this file.
200 void MachineSchedStrategy::anchor() {}
202 void ScheduleDAGMutation::anchor() {}
204 //===----------------------------------------------------------------------===//
205 // Machine Instruction Scheduling Pass and Registry
206 //===----------------------------------------------------------------------===//
208 MachineSchedContext::MachineSchedContext() {
209 RegClassInfo
= new RegisterClassInfo();
212 MachineSchedContext::~MachineSchedContext() {
218 /// Base class for a machine scheduler class that can run at any point.
219 class MachineSchedulerBase
: public MachineSchedContext
,
220 public MachineFunctionPass
{
222 MachineSchedulerBase(char &ID
): MachineFunctionPass(ID
) {}
224 void print(raw_ostream
&O
, const Module
* = nullptr) const override
;
227 void scheduleRegions(ScheduleDAGInstrs
&Scheduler
, bool FixKillFlags
);
230 /// MachineScheduler runs after coalescing and before register allocation.
231 class MachineScheduler
: public MachineSchedulerBase
{
235 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
237 bool runOnMachineFunction(MachineFunction
&) override
;
239 static char ID
; // Class identification, replacement for typeinfo
242 ScheduleDAGInstrs
*createMachineScheduler();
245 /// PostMachineScheduler runs after shortly before code emission.
246 class PostMachineScheduler
: public MachineSchedulerBase
{
248 PostMachineScheduler();
250 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
252 bool runOnMachineFunction(MachineFunction
&) override
;
254 static char ID
; // Class identification, replacement for typeinfo
257 ScheduleDAGInstrs
*createPostMachineScheduler();
260 } // end anonymous namespace
262 char MachineScheduler::ID
= 0;
264 char &llvm::MachineSchedulerID
= MachineScheduler::ID
;
266 INITIALIZE_PASS_BEGIN(MachineScheduler
, DEBUG_TYPE
,
267 "Machine Instruction Scheduler", false, false)
268 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
269 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass
)
270 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass
)
271 INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass
)
272 INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass
)
273 INITIALIZE_PASS_END(MachineScheduler
, DEBUG_TYPE
,
274 "Machine Instruction Scheduler", false, false)
276 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID
) {
277 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
280 void MachineScheduler::getAnalysisUsage(AnalysisUsage
&AU
) const {
281 AU
.setPreservesCFG();
282 AU
.addRequired
<MachineDominatorTreeWrapperPass
>();
283 AU
.addRequired
<MachineLoopInfoWrapperPass
>();
284 AU
.addRequired
<AAResultsWrapperPass
>();
285 AU
.addRequired
<TargetPassConfig
>();
286 AU
.addRequired
<SlotIndexesWrapperPass
>();
287 AU
.addPreserved
<SlotIndexesWrapperPass
>();
288 AU
.addRequired
<LiveIntervalsWrapperPass
>();
289 AU
.addPreserved
<LiveIntervalsWrapperPass
>();
290 MachineFunctionPass::getAnalysisUsage(AU
);
293 char PostMachineScheduler::ID
= 0;
295 char &llvm::PostMachineSchedulerID
= PostMachineScheduler::ID
;
297 INITIALIZE_PASS_BEGIN(PostMachineScheduler
, "postmisched",
298 "PostRA Machine Instruction Scheduler", false, false)
299 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass
)
300 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass
)
301 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
302 INITIALIZE_PASS_END(PostMachineScheduler
, "postmisched",
303 "PostRA Machine Instruction Scheduler", false, false)
305 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID
) {
306 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
309 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage
&AU
) const {
310 AU
.setPreservesCFG();
311 AU
.addRequired
<MachineDominatorTreeWrapperPass
>();
312 AU
.addRequired
<MachineLoopInfoWrapperPass
>();
313 AU
.addRequired
<AAResultsWrapperPass
>();
314 AU
.addRequired
<TargetPassConfig
>();
315 MachineFunctionPass::getAnalysisUsage(AU
);
318 MachinePassRegistry
<MachineSchedRegistry::ScheduleDAGCtor
>
319 MachineSchedRegistry::Registry
;
321 /// A dummy default scheduler factory indicates whether the scheduler
322 /// is overridden on the command line.
323 static ScheduleDAGInstrs
*useDefaultMachineSched(MachineSchedContext
*C
) {
327 /// MachineSchedOpt allows command line selection of the scheduler.
328 static cl::opt
<MachineSchedRegistry::ScheduleDAGCtor
, false,
329 RegisterPassParser
<MachineSchedRegistry
>>
330 MachineSchedOpt("misched",
331 cl::init(&useDefaultMachineSched
), cl::Hidden
,
332 cl::desc("Machine instruction scheduler to use"));
334 static MachineSchedRegistry
335 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
336 useDefaultMachineSched
);
338 static cl::opt
<bool> EnableMachineSched(
340 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
343 static cl::opt
<bool> EnablePostRAMachineSched(
344 "enable-post-misched",
345 cl::desc("Enable the post-ra machine instruction scheduling pass."),
346 cl::init(true), cl::Hidden
);
348 /// Decrement this iterator until reaching the top or a non-debug instr.
349 static MachineBasicBlock::const_iterator
350 priorNonDebug(MachineBasicBlock::const_iterator I
,
351 MachineBasicBlock::const_iterator Beg
) {
352 assert(I
!= Beg
&& "reached the top of the region, cannot decrement");
354 if (!I
->isDebugOrPseudoInstr())
360 /// Non-const version.
361 static MachineBasicBlock::iterator
362 priorNonDebug(MachineBasicBlock::iterator I
,
363 MachineBasicBlock::const_iterator Beg
) {
364 return priorNonDebug(MachineBasicBlock::const_iterator(I
), Beg
)
365 .getNonConstIterator();
368 /// If this iterator is a debug value, increment until reaching the End or a
369 /// non-debug instruction.
370 static MachineBasicBlock::const_iterator
371 nextIfDebug(MachineBasicBlock::const_iterator I
,
372 MachineBasicBlock::const_iterator End
) {
373 for(; I
!= End
; ++I
) {
374 if (!I
->isDebugOrPseudoInstr())
380 /// Non-const version.
381 static MachineBasicBlock::iterator
382 nextIfDebug(MachineBasicBlock::iterator I
,
383 MachineBasicBlock::const_iterator End
) {
384 return nextIfDebug(MachineBasicBlock::const_iterator(I
), End
)
385 .getNonConstIterator();
388 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
389 ScheduleDAGInstrs
*MachineScheduler::createMachineScheduler() {
390 // Select the scheduler, or set the default.
391 MachineSchedRegistry::ScheduleDAGCtor Ctor
= MachineSchedOpt
;
392 if (Ctor
!= useDefaultMachineSched
)
395 // Get the default scheduler set by the target for this function.
396 ScheduleDAGInstrs
*Scheduler
= PassConfig
->createMachineScheduler(this);
400 // Default to GenericScheduler.
401 return createGenericSchedLive(this);
404 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
405 /// the caller. We don't have a command line option to override the postRA
406 /// scheduler. The Target must configure it.
407 ScheduleDAGInstrs
*PostMachineScheduler::createPostMachineScheduler() {
408 // Get the postRA scheduler set by the target for this function.
409 ScheduleDAGInstrs
*Scheduler
= PassConfig
->createPostMachineScheduler(this);
413 // Default to GenericScheduler.
414 return createGenericSchedPostRA(this);
417 /// Top-level MachineScheduler pass driver.
419 /// Visit blocks in function order. Divide each block into scheduling regions
420 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
421 /// consistent with the DAG builder, which traverses the interior of the
422 /// scheduling regions bottom-up.
424 /// This design avoids exposing scheduling boundaries to the DAG builder,
425 /// simplifying the DAG builder's support for "special" target instructions.
426 /// At the same time the design allows target schedulers to operate across
427 /// scheduling boundaries, for example to bundle the boundary instructions
428 /// without reordering them. This creates complexity, because the target
429 /// scheduler must update the RegionBegin and RegionEnd positions cached by
430 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
431 /// design would be to split blocks at scheduling boundaries, but LLVM has a
432 /// general bias against block splitting purely for implementation simplicity.
433 bool MachineScheduler::runOnMachineFunction(MachineFunction
&mf
) {
434 if (skipFunction(mf
.getFunction()))
437 if (EnableMachineSched
.getNumOccurrences()) {
438 if (!EnableMachineSched
)
440 } else if (!mf
.getSubtarget().enableMachineScheduler())
443 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf
.print(dbgs()));
445 // Initialize the context of the pass.
447 MLI
= &getAnalysis
<MachineLoopInfoWrapperPass
>().getLI();
448 MDT
= &getAnalysis
<MachineDominatorTreeWrapperPass
>().getDomTree();
449 PassConfig
= &getAnalysis
<TargetPassConfig
>();
450 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
452 LIS
= &getAnalysis
<LiveIntervalsWrapperPass
>().getLIS();
454 if (VerifyScheduling
) {
455 LLVM_DEBUG(LIS
->dump());
456 MF
->verify(this, "Before machine scheduling.");
458 RegClassInfo
->runOnMachineFunction(*MF
);
460 // Instantiate the selected scheduler for this target, function, and
461 // optimization level.
462 std::unique_ptr
<ScheduleDAGInstrs
> Scheduler(createMachineScheduler());
463 ScheduleDAGMI::DumpDirection D
;
465 D
= ScheduleDAGMI::DumpDirection::TopDown
;
466 else if (ForceBottomUp
)
467 D
= ScheduleDAGMI::DumpDirection::BottomUp
;
469 D
= ScheduleDAGMI::DumpDirection::Bidirectional
;
470 Scheduler
->setDumpDirection(D
);
471 scheduleRegions(*Scheduler
, false);
473 LLVM_DEBUG(LIS
->dump());
474 if (VerifyScheduling
)
475 MF
->verify(this, "After machine scheduling.");
479 bool PostMachineScheduler::runOnMachineFunction(MachineFunction
&mf
) {
480 if (skipFunction(mf
.getFunction()))
483 if (EnablePostRAMachineSched
.getNumOccurrences()) {
484 if (!EnablePostRAMachineSched
)
486 } else if (!mf
.getSubtarget().enablePostRAMachineScheduler()) {
487 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
490 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf
.print(dbgs()));
492 // Initialize the context of the pass.
494 MLI
= &getAnalysis
<MachineLoopInfoWrapperPass
>().getLI();
495 PassConfig
= &getAnalysis
<TargetPassConfig
>();
496 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
498 if (VerifyScheduling
)
499 MF
->verify(this, "Before post machine scheduling.");
501 // Instantiate the selected scheduler for this target, function, and
502 // optimization level.
503 std::unique_ptr
<ScheduleDAGInstrs
> Scheduler(createPostMachineScheduler());
504 ScheduleDAGMI::DumpDirection D
;
505 if (PostRADirection
== MISchedPostRASched::TopDown
)
506 D
= ScheduleDAGMI::DumpDirection::TopDown
;
507 else if (PostRADirection
== MISchedPostRASched::BottomUp
)
508 D
= ScheduleDAGMI::DumpDirection::BottomUp
;
510 D
= ScheduleDAGMI::DumpDirection::Bidirectional
;
511 Scheduler
->setDumpDirection(D
);
512 scheduleRegions(*Scheduler
, true);
514 if (VerifyScheduling
)
515 MF
->verify(this, "After post machine scheduling.");
519 /// Return true of the given instruction should not be included in a scheduling
522 /// MachineScheduler does not currently support scheduling across calls. To
523 /// handle calls, the DAG builder needs to be modified to create register
524 /// anti/output dependencies on the registers clobbered by the call's regmask
525 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
526 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
527 /// the boundary, but there would be no benefit to postRA scheduling across
528 /// calls this late anyway.
529 static bool isSchedBoundary(MachineBasicBlock::iterator MI
,
530 MachineBasicBlock
*MBB
,
532 const TargetInstrInfo
*TII
) {
533 return MI
->isCall() || TII
->isSchedulingBoundary(*MI
, MBB
, *MF
);
536 /// A region of an MBB for scheduling.
539 /// RegionBegin is the first instruction in the scheduling region, and
540 /// RegionEnd is either MBB->end() or the scheduling boundary after the
541 /// last instruction in the scheduling region. These iterators cannot refer
542 /// to instructions outside of the identified scheduling region because
543 /// those may be reordered before scheduling this region.
544 MachineBasicBlock::iterator RegionBegin
;
545 MachineBasicBlock::iterator RegionEnd
;
546 unsigned NumRegionInstrs
;
548 SchedRegion(MachineBasicBlock::iterator B
, MachineBasicBlock::iterator E
,
550 RegionBegin(B
), RegionEnd(E
), NumRegionInstrs(N
) {}
552 } // end anonymous namespace
554 using MBBRegionsVector
= SmallVector
<SchedRegion
, 16>;
557 getSchedRegions(MachineBasicBlock
*MBB
,
558 MBBRegionsVector
&Regions
,
559 bool RegionsTopDown
) {
560 MachineFunction
*MF
= MBB
->getParent();
561 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
563 MachineBasicBlock::iterator I
= nullptr;
564 for(MachineBasicBlock::iterator RegionEnd
= MBB
->end();
565 RegionEnd
!= MBB
->begin(); RegionEnd
= I
) {
567 // Avoid decrementing RegionEnd for blocks with no terminator.
568 if (RegionEnd
!= MBB
->end() ||
569 isSchedBoundary(&*std::prev(RegionEnd
), &*MBB
, MF
, TII
)) {
573 // The next region starts above the previous region. Look backward in the
574 // instruction stream until we find the nearest boundary.
575 unsigned NumRegionInstrs
= 0;
577 for (;I
!= MBB
->begin(); --I
) {
578 MachineInstr
&MI
= *std::prev(I
);
579 if (isSchedBoundary(&MI
, &*MBB
, MF
, TII
))
581 if (!MI
.isDebugOrPseudoInstr()) {
582 // MBB::size() uses instr_iterator to count. Here we need a bundle to
583 // count as a single instruction.
588 // It's possible we found a scheduling region that only has debug
589 // instructions. Don't bother scheduling these.
590 if (NumRegionInstrs
!= 0)
591 Regions
.push_back(SchedRegion(I
, RegionEnd
, NumRegionInstrs
));
595 std::reverse(Regions
.begin(), Regions
.end());
598 /// Main driver for both MachineScheduler and PostMachineScheduler.
599 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs
&Scheduler
,
601 // Visit all machine basic blocks.
603 // TODO: Visit blocks in global postorder or postorder within the bottom-up
604 // loop tree. Then we can optionally compute global RegPressure.
605 for (MachineFunction::iterator MBB
= MF
->begin(), MBBEnd
= MF
->end();
606 MBB
!= MBBEnd
; ++MBB
) {
608 Scheduler
.startBlock(&*MBB
);
611 if (SchedOnlyFunc
.getNumOccurrences() && SchedOnlyFunc
!= MF
->getName())
613 if (SchedOnlyBlock
.getNumOccurrences()
614 && (int)SchedOnlyBlock
!= MBB
->getNumber())
618 // Break the block into scheduling regions [I, RegionEnd). RegionEnd
619 // points to the scheduling boundary at the bottom of the region. The DAG
620 // does not include RegionEnd, but the region does (i.e. the next
621 // RegionEnd is above the previous RegionBegin). If the current block has
622 // no terminator then RegionEnd == MBB->end() for the bottom region.
624 // All the regions of MBB are first found and stored in MBBRegions, which
625 // will be processed (MBB) top-down if initialized with true.
627 // The Scheduler may insert instructions during either schedule() or
628 // exitRegion(), even for empty regions. So the local iterators 'I' and
629 // 'RegionEnd' are invalid across these calls. Instructions must not be
630 // added to other regions than the current one without updating MBBRegions.
632 MBBRegionsVector MBBRegions
;
633 getSchedRegions(&*MBB
, MBBRegions
, Scheduler
.doMBBSchedRegionsTopDown());
634 for (const SchedRegion
&R
: MBBRegions
) {
635 MachineBasicBlock::iterator I
= R
.RegionBegin
;
636 MachineBasicBlock::iterator RegionEnd
= R
.RegionEnd
;
637 unsigned NumRegionInstrs
= R
.NumRegionInstrs
;
639 // Notify the scheduler of the region, even if we may skip scheduling
640 // it. Perhaps it still needs to be bundled.
641 Scheduler
.enterRegion(&*MBB
, I
, RegionEnd
, NumRegionInstrs
);
643 // Skip empty scheduling regions (0 or 1 schedulable instructions).
644 if (I
== RegionEnd
|| I
== std::prev(RegionEnd
)) {
645 // Close the current region. Bundle the terminator if needed.
646 // This invalidates 'RegionEnd' and 'I'.
647 Scheduler
.exitRegion();
650 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
651 LLVM_DEBUG(dbgs() << MF
->getName() << ":" << printMBBReference(*MBB
)
652 << " " << MBB
->getName() << "\n From: " << *I
654 if (RegionEnd
!= MBB
->end()) dbgs() << *RegionEnd
;
655 else dbgs() << "End\n";
656 dbgs() << " RegionInstrs: " << NumRegionInstrs
<< '\n');
657 if (DumpCriticalPathLength
) {
658 errs() << MF
->getName();
659 errs() << ":%bb. " << MBB
->getNumber();
660 errs() << " " << MBB
->getName() << " \n";
663 // Schedule a region: possibly reorder instructions.
664 // This invalidates the original region iterators.
665 Scheduler
.schedule();
667 // Close the current region.
668 Scheduler
.exitRegion();
670 Scheduler
.finishBlock();
671 // FIXME: Ideally, no further passes should rely on kill flags. However,
672 // thumb2 size reduction is currently an exception, so the PostMIScheduler
675 Scheduler
.fixupKills(*MBB
);
677 Scheduler
.finalizeSchedule();
680 void MachineSchedulerBase::print(raw_ostream
&O
, const Module
* m
) const {
684 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
685 LLVM_DUMP_METHOD
void ReadyQueue::dump() const {
686 dbgs() << "Queue " << Name
<< ": ";
687 for (const SUnit
*SU
: Queue
)
688 dbgs() << SU
->NodeNum
<< " ";
693 //===----------------------------------------------------------------------===//
694 // ScheduleDAGMI - Basic machine instruction scheduling. This is
695 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
696 // virtual registers.
697 // ===----------------------------------------------------------------------===/
699 // Provide a vtable anchor.
700 ScheduleDAGMI::~ScheduleDAGMI() = default;
702 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
703 /// NumPredsLeft reaches zero, release the successor node.
705 /// FIXME: Adjust SuccSU height based on MinLatency.
706 void ScheduleDAGMI::releaseSucc(SUnit
*SU
, SDep
*SuccEdge
) {
707 SUnit
*SuccSU
= SuccEdge
->getSUnit();
709 if (SuccEdge
->isWeak()) {
710 --SuccSU
->WeakPredsLeft
;
711 if (SuccEdge
->isCluster())
712 NextClusterSucc
= SuccSU
;
716 if (SuccSU
->NumPredsLeft
== 0) {
717 dbgs() << "*** Scheduling failed! ***\n";
719 dbgs() << " has been released too many times!\n";
720 llvm_unreachable(nullptr);
723 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
724 // CurrCycle may have advanced since then.
725 if (SuccSU
->TopReadyCycle
< SU
->TopReadyCycle
+ SuccEdge
->getLatency())
726 SuccSU
->TopReadyCycle
= SU
->TopReadyCycle
+ SuccEdge
->getLatency();
728 --SuccSU
->NumPredsLeft
;
729 if (SuccSU
->NumPredsLeft
== 0 && SuccSU
!= &ExitSU
)
730 SchedImpl
->releaseTopNode(SuccSU
);
733 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
734 void ScheduleDAGMI::releaseSuccessors(SUnit
*SU
) {
735 for (SDep
&Succ
: SU
->Succs
)
736 releaseSucc(SU
, &Succ
);
739 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
740 /// NumSuccsLeft reaches zero, release the predecessor node.
742 /// FIXME: Adjust PredSU height based on MinLatency.
743 void ScheduleDAGMI::releasePred(SUnit
*SU
, SDep
*PredEdge
) {
744 SUnit
*PredSU
= PredEdge
->getSUnit();
746 if (PredEdge
->isWeak()) {
747 --PredSU
->WeakSuccsLeft
;
748 if (PredEdge
->isCluster())
749 NextClusterPred
= PredSU
;
753 if (PredSU
->NumSuccsLeft
== 0) {
754 dbgs() << "*** Scheduling failed! ***\n";
756 dbgs() << " has been released too many times!\n";
757 llvm_unreachable(nullptr);
760 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
761 // CurrCycle may have advanced since then.
762 if (PredSU
->BotReadyCycle
< SU
->BotReadyCycle
+ PredEdge
->getLatency())
763 PredSU
->BotReadyCycle
= SU
->BotReadyCycle
+ PredEdge
->getLatency();
765 --PredSU
->NumSuccsLeft
;
766 if (PredSU
->NumSuccsLeft
== 0 && PredSU
!= &EntrySU
)
767 SchedImpl
->releaseBottomNode(PredSU
);
770 /// releasePredecessors - Call releasePred on each of SU's predecessors.
771 void ScheduleDAGMI::releasePredecessors(SUnit
*SU
) {
772 for (SDep
&Pred
: SU
->Preds
)
773 releasePred(SU
, &Pred
);
776 void ScheduleDAGMI::startBlock(MachineBasicBlock
*bb
) {
777 ScheduleDAGInstrs::startBlock(bb
);
778 SchedImpl
->enterMBB(bb
);
781 void ScheduleDAGMI::finishBlock() {
782 SchedImpl
->leaveMBB();
783 ScheduleDAGInstrs::finishBlock();
786 /// enterRegion - Called back from PostMachineScheduler::runOnMachineFunction
787 /// after crossing a scheduling boundary. [begin, end) includes all instructions
788 /// in the region, including the boundary itself and single-instruction regions
789 /// that don't get scheduled.
790 void ScheduleDAGMI::enterRegion(MachineBasicBlock
*bb
,
791 MachineBasicBlock::iterator begin
,
792 MachineBasicBlock::iterator end
,
793 unsigned regioninstrs
)
795 ScheduleDAGInstrs::enterRegion(bb
, begin
, end
, regioninstrs
);
797 SchedImpl
->initPolicy(begin
, end
, regioninstrs
);
800 /// This is normally called from the main scheduler loop but may also be invoked
801 /// by the scheduling strategy to perform additional code motion.
802 void ScheduleDAGMI::moveInstruction(
803 MachineInstr
*MI
, MachineBasicBlock::iterator InsertPos
) {
804 // Advance RegionBegin if the first instruction moves down.
805 if (&*RegionBegin
== MI
)
808 // Update the instruction stream.
809 BB
->splice(InsertPos
, BB
, MI
);
811 // Update LiveIntervals
813 LIS
->handleMove(*MI
, /*UpdateFlags=*/true);
815 // Recede RegionBegin if an instruction moves above the first.
816 if (RegionBegin
== InsertPos
)
820 bool ScheduleDAGMI::checkSchedLimit() {
821 #if LLVM_ENABLE_ABI_BREAKING_CHECKS && !defined(NDEBUG)
822 if (NumInstrsScheduled
== MISchedCutoff
&& MISchedCutoff
!= ~0U) {
823 CurrentTop
= CurrentBottom
;
826 ++NumInstrsScheduled
;
831 /// Per-region scheduling driver, called back from
832 /// PostMachineScheduler::runOnMachineFunction. This is a simplified driver
833 /// that does not consider liveness or register pressure. It is useful for
834 /// PostRA scheduling and potentially other custom schedulers.
835 void ScheduleDAGMI::schedule() {
836 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
837 LLVM_DEBUG(SchedImpl
->dumpPolicy());
844 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
845 findRootsAndBiasEdges(TopRoots
, BotRoots
);
848 if (PrintDAGs
) dump();
849 if (ViewMISchedDAGs
) viewGraph();
851 // Initialize the strategy before modifying the DAG.
852 // This may initialize a DFSResult to be used for queue priority.
853 SchedImpl
->initialize(this);
855 // Initialize ready queues now that the DAG and priority data are finalized.
856 initQueues(TopRoots
, BotRoots
);
858 bool IsTopNode
= false;
860 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
861 SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
);
864 assert(!SU
->isScheduled
&& "Node already scheduled");
865 if (!checkSchedLimit())
868 MachineInstr
*MI
= SU
->getInstr();
870 assert(SU
->isTopReady() && "node still has unscheduled dependencies");
871 if (&*CurrentTop
== MI
)
872 CurrentTop
= nextIfDebug(++CurrentTop
, CurrentBottom
);
874 moveInstruction(MI
, CurrentTop
);
876 assert(SU
->isBottomReady() && "node still has unscheduled dependencies");
877 MachineBasicBlock::iterator priorII
=
878 priorNonDebug(CurrentBottom
, CurrentTop
);
880 CurrentBottom
= priorII
;
882 if (&*CurrentTop
== MI
)
883 CurrentTop
= nextIfDebug(++CurrentTop
, priorII
);
884 moveInstruction(MI
, CurrentBottom
);
888 // Notify the scheduling strategy before updating the DAG.
889 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
890 // runs, it can then use the accurate ReadyCycle time to determine whether
891 // newly released nodes can move to the readyQ.
892 SchedImpl
->schedNode(SU
, IsTopNode
);
894 updateQueues(SU
, IsTopNode
);
896 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
901 dbgs() << "*** Final schedule for "
902 << printMBBReference(*begin()->getParent()) << " ***\n";
908 /// Apply each ScheduleDAGMutation step in order.
909 void ScheduleDAGMI::postProcessDAG() {
910 for (auto &m
: Mutations
)
915 findRootsAndBiasEdges(SmallVectorImpl
<SUnit
*> &TopRoots
,
916 SmallVectorImpl
<SUnit
*> &BotRoots
) {
917 for (SUnit
&SU
: SUnits
) {
918 assert(!SU
.isBoundaryNode() && "Boundary node should not be in SUnits");
920 // Order predecessors so DFSResult follows the critical path.
921 SU
.biasCriticalPath();
923 // A SUnit is ready to top schedule if it has no predecessors.
924 if (!SU
.NumPredsLeft
)
925 TopRoots
.push_back(&SU
);
926 // A SUnit is ready to bottom schedule if it has no successors.
927 if (!SU
.NumSuccsLeft
)
928 BotRoots
.push_back(&SU
);
930 ExitSU
.biasCriticalPath();
933 /// Identify DAG roots and setup scheduler queues.
934 void ScheduleDAGMI::initQueues(ArrayRef
<SUnit
*> TopRoots
,
935 ArrayRef
<SUnit
*> BotRoots
) {
936 NextClusterSucc
= nullptr;
937 NextClusterPred
= nullptr;
939 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
941 // Nodes with unreleased weak edges can still be roots.
942 // Release top roots in forward order.
943 for (SUnit
*SU
: TopRoots
)
944 SchedImpl
->releaseTopNode(SU
);
946 // Release bottom roots in reverse order so the higher priority nodes appear
947 // first. This is more natural and slightly more efficient.
948 for (SmallVectorImpl
<SUnit
*>::const_reverse_iterator
949 I
= BotRoots
.rbegin(), E
= BotRoots
.rend(); I
!= E
; ++I
) {
950 SchedImpl
->releaseBottomNode(*I
);
953 releaseSuccessors(&EntrySU
);
954 releasePredecessors(&ExitSU
);
956 SchedImpl
->registerRoots();
958 // Advance past initial DebugValues.
959 CurrentTop
= nextIfDebug(RegionBegin
, RegionEnd
);
960 CurrentBottom
= RegionEnd
;
963 /// Update scheduler queues after scheduling an instruction.
964 void ScheduleDAGMI::updateQueues(SUnit
*SU
, bool IsTopNode
) {
965 // Release dependent instructions for scheduling.
967 releaseSuccessors(SU
);
969 releasePredecessors(SU
);
971 SU
->isScheduled
= true;
974 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
975 void ScheduleDAGMI::placeDebugValues() {
976 // If first instruction was a DBG_VALUE then put it back.
978 BB
->splice(RegionBegin
, BB
, FirstDbgValue
);
979 RegionBegin
= FirstDbgValue
;
982 for (std::vector
<std::pair
<MachineInstr
*, MachineInstr
*>>::iterator
983 DI
= DbgValues
.end(), DE
= DbgValues
.begin(); DI
!= DE
; --DI
) {
984 std::pair
<MachineInstr
*, MachineInstr
*> P
= *std::prev(DI
);
985 MachineInstr
*DbgValue
= P
.first
;
986 MachineBasicBlock::iterator OrigPrevMI
= P
.second
;
987 if (&*RegionBegin
== DbgValue
)
989 BB
->splice(std::next(OrigPrevMI
), BB
, DbgValue
);
990 if (RegionEnd
!= BB
->end() && OrigPrevMI
== &*RegionEnd
)
991 RegionEnd
= DbgValue
;
995 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
996 static const char *scheduleTableLegend
= " i: issue\n x: resource booked";
998 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpScheduleTraceTopDown() const {
999 // Bail off when there is no schedule model to query.
1000 if (!SchedModel
.hasInstrSchedModel())
1003 // Nothing to show if there is no or just one instruction.
1007 dbgs() << " * Schedule table (TopDown):\n";
1008 dbgs() << scheduleTableLegend
<< "\n";
1009 const unsigned FirstCycle
= getSUnit(&*(std::begin(*this)))->TopReadyCycle
;
1010 unsigned LastCycle
= getSUnit(&*(std::prev(std::end(*this))))->TopReadyCycle
;
1011 for (MachineInstr
&MI
: *this) {
1012 SUnit
*SU
= getSUnit(&MI
);
1015 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1016 for (TargetSchedModel::ProcResIter PI
= SchedModel
.getWriteProcResBegin(SC
),
1017 PE
= SchedModel
.getWriteProcResEnd(SC
);
1019 if (SU
->TopReadyCycle
+ PI
->ReleaseAtCycle
- 1 > LastCycle
)
1020 LastCycle
= SU
->TopReadyCycle
+ PI
->ReleaseAtCycle
- 1;
1023 // Print the header with the cycles
1024 dbgs() << llvm::left_justify("Cycle", HeaderColWidth
);
1025 for (unsigned C
= FirstCycle
; C
<= LastCycle
; ++C
)
1026 dbgs() << llvm::left_justify("| " + std::to_string(C
), ColWidth
);
1029 for (MachineInstr
&MI
: *this) {
1030 SUnit
*SU
= getSUnit(&MI
);
1032 dbgs() << "Missing SUnit\n";
1035 std::string
NodeName("SU(");
1036 NodeName
+= std::to_string(SU
->NodeNum
) + ")";
1037 dbgs() << llvm::left_justify(NodeName
, HeaderColWidth
);
1038 unsigned C
= FirstCycle
;
1039 for (; C
<= LastCycle
; ++C
) {
1040 if (C
== SU
->TopReadyCycle
)
1041 dbgs() << llvm::left_justify("| i", ColWidth
);
1043 dbgs() << llvm::left_justify("|", ColWidth
);
1046 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1048 SmallVector
<MCWriteProcResEntry
, 4> ResourcesIt(
1049 make_range(SchedModel
.getWriteProcResBegin(SC
),
1050 SchedModel
.getWriteProcResEnd(SC
)));
1052 if (MISchedSortResourcesInTrace
)
1053 llvm::stable_sort(ResourcesIt
,
1054 [](const MCWriteProcResEntry
&LHS
,
1055 const MCWriteProcResEntry
&RHS
) -> bool {
1056 return LHS
.AcquireAtCycle
< RHS
.AcquireAtCycle
||
1057 (LHS
.AcquireAtCycle
== RHS
.AcquireAtCycle
&&
1058 LHS
.ReleaseAtCycle
< RHS
.ReleaseAtCycle
);
1060 for (const MCWriteProcResEntry
&PI
: ResourcesIt
) {
1062 const std::string ResName
=
1063 SchedModel
.getResourceName(PI
.ProcResourceIdx
);
1064 dbgs() << llvm::right_justify(ResName
+ " ", HeaderColWidth
);
1065 for (; C
< SU
->TopReadyCycle
+ PI
.AcquireAtCycle
; ++C
) {
1066 dbgs() << llvm::left_justify("|", ColWidth
);
1068 for (unsigned I
= 0, E
= PI
.ReleaseAtCycle
- PI
.AcquireAtCycle
; I
!= E
;
1070 dbgs() << llvm::left_justify("| x", ColWidth
);
1071 while (C
++ <= LastCycle
)
1072 dbgs() << llvm::left_justify("|", ColWidth
);
1079 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpScheduleTraceBottomUp() const {
1080 // Bail off when there is no schedule model to query.
1081 if (!SchedModel
.hasInstrSchedModel())
1084 // Nothing to show if there is no or just one instruction.
1088 dbgs() << " * Schedule table (BottomUp):\n";
1089 dbgs() << scheduleTableLegend
<< "\n";
1091 const int FirstCycle
= getSUnit(&*(std::begin(*this)))->BotReadyCycle
;
1092 int LastCycle
= getSUnit(&*(std::prev(std::end(*this))))->BotReadyCycle
;
1093 for (MachineInstr
&MI
: *this) {
1094 SUnit
*SU
= getSUnit(&MI
);
1097 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1098 for (TargetSchedModel::ProcResIter PI
= SchedModel
.getWriteProcResBegin(SC
),
1099 PE
= SchedModel
.getWriteProcResEnd(SC
);
1101 if ((int)SU
->BotReadyCycle
- PI
->ReleaseAtCycle
+ 1 < LastCycle
)
1102 LastCycle
= (int)SU
->BotReadyCycle
- PI
->ReleaseAtCycle
+ 1;
1105 // Print the header with the cycles
1106 dbgs() << llvm::left_justify("Cycle", HeaderColWidth
);
1107 for (int C
= FirstCycle
; C
>= LastCycle
; --C
)
1108 dbgs() << llvm::left_justify("| " + std::to_string(C
), ColWidth
);
1111 for (MachineInstr
&MI
: *this) {
1112 SUnit
*SU
= getSUnit(&MI
);
1114 dbgs() << "Missing SUnit\n";
1117 std::string
NodeName("SU(");
1118 NodeName
+= std::to_string(SU
->NodeNum
) + ")";
1119 dbgs() << llvm::left_justify(NodeName
, HeaderColWidth
);
1121 for (; C
>= LastCycle
; --C
) {
1122 if (C
== (int)SU
->BotReadyCycle
)
1123 dbgs() << llvm::left_justify("| i", ColWidth
);
1125 dbgs() << llvm::left_justify("|", ColWidth
);
1128 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
1129 SmallVector
<MCWriteProcResEntry
, 4> ResourcesIt(
1130 make_range(SchedModel
.getWriteProcResBegin(SC
),
1131 SchedModel
.getWriteProcResEnd(SC
)));
1133 if (MISchedSortResourcesInTrace
)
1134 llvm::stable_sort(ResourcesIt
,
1135 [](const MCWriteProcResEntry
&LHS
,
1136 const MCWriteProcResEntry
&RHS
) -> bool {
1137 return LHS
.AcquireAtCycle
< RHS
.AcquireAtCycle
||
1138 (LHS
.AcquireAtCycle
== RHS
.AcquireAtCycle
&&
1139 LHS
.ReleaseAtCycle
< RHS
.ReleaseAtCycle
);
1141 for (const MCWriteProcResEntry
&PI
: ResourcesIt
) {
1143 const std::string ResName
=
1144 SchedModel
.getResourceName(PI
.ProcResourceIdx
);
1145 dbgs() << llvm::right_justify(ResName
+ " ", HeaderColWidth
);
1146 for (; C
> ((int)SU
->BotReadyCycle
- (int)PI
.AcquireAtCycle
); --C
) {
1147 dbgs() << llvm::left_justify("|", ColWidth
);
1149 for (unsigned I
= 0, E
= PI
.ReleaseAtCycle
- PI
.AcquireAtCycle
; I
!= E
;
1151 dbgs() << llvm::left_justify("| x", ColWidth
);
1152 while (C
-- >= LastCycle
)
1153 dbgs() << llvm::left_justify("|", ColWidth
);
1161 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1162 LLVM_DUMP_METHOD
void ScheduleDAGMI::dumpSchedule() const {
1163 if (MISchedDumpScheduleTrace
) {
1164 if (DumpDir
== DumpDirection::TopDown
)
1165 dumpScheduleTraceTopDown();
1166 else if (DumpDir
== DumpDirection::BottomUp
)
1167 dumpScheduleTraceBottomUp();
1168 else if (DumpDir
== DumpDirection::Bidirectional
) {
1169 dbgs() << "* Schedule table (Bidirectional): not implemented\n";
1171 dbgs() << "* Schedule table: DumpDirection not set.\n";
1175 for (MachineInstr
&MI
: *this) {
1176 if (SUnit
*SU
= getSUnit(&MI
))
1179 dbgs() << "Missing SUnit\n";
1184 //===----------------------------------------------------------------------===//
1185 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
1187 //===----------------------------------------------------------------------===//
1189 ScheduleDAGMILive::~ScheduleDAGMILive() {
1193 void ScheduleDAGMILive::collectVRegUses(SUnit
&SU
) {
1194 const MachineInstr
&MI
= *SU
.getInstr();
1195 for (const MachineOperand
&MO
: MI
.operands()) {
1200 if (TrackLaneMasks
&& !MO
.isUse())
1203 Register Reg
= MO
.getReg();
1204 if (!Reg
.isVirtual())
1208 if (TrackLaneMasks
) {
1209 bool FoundDef
= false;
1210 for (const MachineOperand
&MO2
: MI
.all_defs()) {
1211 if (MO2
.getReg() == Reg
&& !MO2
.isDead()) {
1220 // Record this local VReg use.
1221 VReg2SUnitMultiMap::iterator UI
= VRegUses
.find(Reg
);
1222 for (; UI
!= VRegUses
.end(); ++UI
) {
1226 if (UI
== VRegUses
.end())
1227 VRegUses
.insert(VReg2SUnit(Reg
, LaneBitmask::getNone(), &SU
));
1231 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
1232 /// crossing a scheduling boundary. [begin, end) includes all instructions in
1233 /// the region, including the boundary itself and single-instruction regions
1234 /// that don't get scheduled.
1235 void ScheduleDAGMILive::enterRegion(MachineBasicBlock
*bb
,
1236 MachineBasicBlock::iterator begin
,
1237 MachineBasicBlock::iterator end
,
1238 unsigned regioninstrs
)
1240 // ScheduleDAGMI initializes SchedImpl's per-region policy.
1241 ScheduleDAGMI::enterRegion(bb
, begin
, end
, regioninstrs
);
1243 // For convenience remember the end of the liveness region.
1244 LiveRegionEnd
= (RegionEnd
== bb
->end()) ? RegionEnd
: std::next(RegionEnd
);
1246 SUPressureDiffs
.clear();
1248 ShouldTrackPressure
= SchedImpl
->shouldTrackPressure();
1249 ShouldTrackLaneMasks
= SchedImpl
->shouldTrackLaneMasks();
1251 assert((!ShouldTrackLaneMasks
|| ShouldTrackPressure
) &&
1252 "ShouldTrackLaneMasks requires ShouldTrackPressure");
1255 // Setup the register pressure trackers for the top scheduled and bottom
1256 // scheduled regions.
1257 void ScheduleDAGMILive::initRegPressure() {
1259 VRegUses
.setUniverse(MRI
.getNumVirtRegs());
1260 for (SUnit
&SU
: SUnits
)
1261 collectVRegUses(SU
);
1263 TopRPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, RegionBegin
,
1264 ShouldTrackLaneMasks
, false);
1265 BotRPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, LiveRegionEnd
,
1266 ShouldTrackLaneMasks
, false);
1268 // Close the RPTracker to finalize live ins.
1269 RPTracker
.closeRegion();
1271 LLVM_DEBUG(RPTracker
.dump());
1273 // Initialize the live ins and live outs.
1274 TopRPTracker
.addLiveRegs(RPTracker
.getPressure().LiveInRegs
);
1275 BotRPTracker
.addLiveRegs(RPTracker
.getPressure().LiveOutRegs
);
1277 // Close one end of the tracker so we can call
1278 // getMaxUpward/DownwardPressureDelta before advancing across any
1279 // instructions. This converts currently live regs into live ins/outs.
1280 TopRPTracker
.closeTop();
1281 BotRPTracker
.closeBottom();
1283 BotRPTracker
.initLiveThru(RPTracker
);
1284 if (!BotRPTracker
.getLiveThru().empty()) {
1285 TopRPTracker
.initLiveThru(BotRPTracker
.getLiveThru());
1286 LLVM_DEBUG(dbgs() << "Live Thru: ";
1287 dumpRegSetPressure(BotRPTracker
.getLiveThru(), TRI
));
1290 // For each live out vreg reduce the pressure change associated with other
1291 // uses of the same vreg below the live-out reaching def.
1292 updatePressureDiffs(RPTracker
.getPressure().LiveOutRegs
);
1294 // Account for liveness generated by the region boundary.
1295 if (LiveRegionEnd
!= RegionEnd
) {
1296 SmallVector
<RegisterMaskPair
, 8> LiveUses
;
1297 BotRPTracker
.recede(&LiveUses
);
1298 updatePressureDiffs(LiveUses
);
1301 LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1302 dumpRegSetPressure(TopRPTracker
.getRegSetPressureAtPos(), TRI
);
1303 dbgs() << "Bottom Pressure:\n";
1304 dumpRegSetPressure(BotRPTracker
.getRegSetPressureAtPos(), TRI
););
1306 assert((BotRPTracker
.getPos() == RegionEnd
||
1307 (RegionEnd
->isDebugInstr() &&
1308 BotRPTracker
.getPos() == priorNonDebug(RegionEnd
, RegionBegin
))) &&
1309 "Can't find the region bottom");
1311 // Cache the list of excess pressure sets in this region. This will also track
1312 // the max pressure in the scheduled code for these sets.
1313 RegionCriticalPSets
.clear();
1314 const std::vector
<unsigned> &RegionPressure
=
1315 RPTracker
.getPressure().MaxSetPressure
;
1316 for (unsigned i
= 0, e
= RegionPressure
.size(); i
< e
; ++i
) {
1317 unsigned Limit
= RegClassInfo
->getRegPressureSetLimit(i
);
1318 if (RegionPressure
[i
] > Limit
) {
1319 LLVM_DEBUG(dbgs() << TRI
->getRegPressureSetName(i
) << " Limit " << Limit
1320 << " Actual " << RegionPressure
[i
] << "\n");
1321 RegionCriticalPSets
.push_back(PressureChange(i
));
1324 LLVM_DEBUG(dbgs() << "Excess PSets: ";
1325 for (const PressureChange
&RCPS
1326 : RegionCriticalPSets
) dbgs()
1327 << TRI
->getRegPressureSetName(RCPS
.getPSet()) << " ";
1331 void ScheduleDAGMILive::
1332 updateScheduledPressure(const SUnit
*SU
,
1333 const std::vector
<unsigned> &NewMaxPressure
) {
1334 const PressureDiff
&PDiff
= getPressureDiff(SU
);
1335 unsigned CritIdx
= 0, CritEnd
= RegionCriticalPSets
.size();
1336 for (const PressureChange
&PC
: PDiff
) {
1339 unsigned ID
= PC
.getPSet();
1340 while (CritIdx
!= CritEnd
&& RegionCriticalPSets
[CritIdx
].getPSet() < ID
)
1342 if (CritIdx
!= CritEnd
&& RegionCriticalPSets
[CritIdx
].getPSet() == ID
) {
1343 if ((int)NewMaxPressure
[ID
] > RegionCriticalPSets
[CritIdx
].getUnitInc()
1344 && NewMaxPressure
[ID
] <= (unsigned)std::numeric_limits
<int16_t>::max())
1345 RegionCriticalPSets
[CritIdx
].setUnitInc(NewMaxPressure
[ID
]);
1347 unsigned Limit
= RegClassInfo
->getRegPressureSetLimit(ID
);
1348 if (NewMaxPressure
[ID
] >= Limit
- 2) {
1349 LLVM_DEBUG(dbgs() << " " << TRI
->getRegPressureSetName(ID
) << ": "
1350 << NewMaxPressure
[ID
]
1351 << ((NewMaxPressure
[ID
] > Limit
) ? " > " : " <= ")
1352 << Limit
<< "(+ " << BotRPTracker
.getLiveThru()[ID
]
1358 /// Update the PressureDiff array for liveness after scheduling this
1360 void ScheduleDAGMILive::updatePressureDiffs(
1361 ArrayRef
<RegisterMaskPair
> LiveUses
) {
1362 for (const RegisterMaskPair
&P
: LiveUses
) {
1363 Register Reg
= P
.RegUnit
;
1364 /// FIXME: Currently assuming single-use physregs.
1365 if (!Reg
.isVirtual())
1368 if (ShouldTrackLaneMasks
) {
1369 // If the register has just become live then other uses won't change
1370 // this fact anymore => decrement pressure.
1371 // If the register has just become dead then other uses make it come
1372 // back to life => increment pressure.
1373 bool Decrement
= P
.LaneMask
.any();
1375 for (const VReg2SUnit
&V2SU
1376 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1377 SUnit
&SU
= *V2SU
.SU
;
1378 if (SU
.isScheduled
|| &SU
== &ExitSU
)
1381 PressureDiff
&PDiff
= getPressureDiff(&SU
);
1382 PDiff
.addPressureChange(Reg
, Decrement
, &MRI
);
1383 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU
.NodeNum
<< ") "
1384 << printReg(Reg
, TRI
) << ':'
1385 << PrintLaneMask(P
.LaneMask
) << ' ' << *SU
.getInstr();
1386 dbgs() << " to "; PDiff
.dump(*TRI
););
1389 assert(P
.LaneMask
.any());
1390 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg
, TRI
) << "\n");
1391 // This may be called before CurrentBottom has been initialized. However,
1392 // BotRPTracker must have a valid position. We want the value live into the
1393 // instruction or live out of the block, so ask for the previous
1394 // instruction's live-out.
1395 const LiveInterval
&LI
= LIS
->getInterval(Reg
);
1397 MachineBasicBlock::const_iterator I
=
1398 nextIfDebug(BotRPTracker
.getPos(), BB
->end());
1400 VNI
= LI
.getVNInfoBefore(LIS
->getMBBEndIdx(BB
));
1402 LiveQueryResult LRQ
= LI
.Query(LIS
->getInstructionIndex(*I
));
1403 VNI
= LRQ
.valueIn();
1405 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1406 assert(VNI
&& "No live value at use.");
1407 for (const VReg2SUnit
&V2SU
1408 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1409 SUnit
*SU
= V2SU
.SU
;
1410 // If this use comes before the reaching def, it cannot be a last use,
1411 // so decrease its pressure change.
1412 if (!SU
->isScheduled
&& SU
!= &ExitSU
) {
1413 LiveQueryResult LRQ
=
1414 LI
.Query(LIS
->getInstructionIndex(*SU
->getInstr()));
1415 if (LRQ
.valueIn() == VNI
) {
1416 PressureDiff
&PDiff
= getPressureDiff(SU
);
1417 PDiff
.addPressureChange(Reg
, true, &MRI
);
1418 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU
->NodeNum
<< ") "
1420 dbgs() << " to "; PDiff
.dump(*TRI
););
1428 void ScheduleDAGMILive::dump() const {
1429 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1430 if (EntrySU
.getInstr() != nullptr)
1431 dumpNodeAll(EntrySU
);
1432 for (const SUnit
&SU
: SUnits
) {
1434 if (ShouldTrackPressure
) {
1435 dbgs() << " Pressure Diff : ";
1436 getPressureDiff(&SU
).dump(*TRI
);
1438 dbgs() << " Single Issue : ";
1439 if (SchedModel
.mustBeginGroup(SU
.getInstr()) &&
1440 SchedModel
.mustEndGroup(SU
.getInstr()))
1446 if (ExitSU
.getInstr() != nullptr)
1447 dumpNodeAll(ExitSU
);
1451 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1452 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1453 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1455 /// This is a skeletal driver, with all the functionality pushed into helpers,
1456 /// so that it can be easily extended by experimental schedulers. Generally,
1457 /// implementing MachineSchedStrategy should be sufficient to implement a new
1458 /// scheduling algorithm. However, if a scheduler further subclasses
1459 /// ScheduleDAGMILive then it will want to override this virtual method in order
1460 /// to update any specialized state.
1461 void ScheduleDAGMILive::schedule() {
1462 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1463 LLVM_DEBUG(SchedImpl
->dumpPolicy());
1464 buildDAGWithRegPressure();
1468 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
1469 findRootsAndBiasEdges(TopRoots
, BotRoots
);
1471 // Initialize the strategy before modifying the DAG.
1472 // This may initialize a DFSResult to be used for queue priority.
1473 SchedImpl
->initialize(this);
1476 if (PrintDAGs
) dump();
1477 if (ViewMISchedDAGs
) viewGraph();
1479 // Initialize ready queues now that the DAG and priority data are finalized.
1480 initQueues(TopRoots
, BotRoots
);
1482 bool IsTopNode
= false;
1484 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1485 SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
);
1488 assert(!SU
->isScheduled
&& "Node already scheduled");
1489 if (!checkSchedLimit())
1492 scheduleMI(SU
, IsTopNode
);
1495 unsigned SubtreeID
= DFSResult
->getSubtreeID(SU
);
1496 if (!ScheduledTrees
.test(SubtreeID
)) {
1497 ScheduledTrees
.set(SubtreeID
);
1498 DFSResult
->scheduleTree(SubtreeID
);
1499 SchedImpl
->scheduleTree(SubtreeID
);
1503 // Notify the scheduling strategy after updating the DAG.
1504 SchedImpl
->schedNode(SU
, IsTopNode
);
1506 updateQueues(SU
, IsTopNode
);
1508 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
1513 dbgs() << "*** Final schedule for "
1514 << printMBBReference(*begin()->getParent()) << " ***\n";
1520 /// Build the DAG and setup three register pressure trackers.
1521 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1522 if (!ShouldTrackPressure
) {
1524 RegionCriticalPSets
.clear();
1525 buildSchedGraph(AA
);
1529 // Initialize the register pressure tracker used by buildSchedGraph.
1530 RPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, LiveRegionEnd
,
1531 ShouldTrackLaneMasks
, /*TrackUntiedDefs=*/true);
1533 // Account for liveness generate by the region boundary.
1534 if (LiveRegionEnd
!= RegionEnd
)
1537 // Build the DAG, and compute current register pressure.
1538 buildSchedGraph(AA
, &RPTracker
, &SUPressureDiffs
, LIS
, ShouldTrackLaneMasks
);
1540 // Initialize top/bottom trackers after computing region pressure.
1544 void ScheduleDAGMILive::computeDFSResult() {
1546 DFSResult
= new SchedDFSResult(/*BottomU*/true, MinSubtreeSize
);
1548 ScheduledTrees
.clear();
1549 DFSResult
->resize(SUnits
.size());
1550 DFSResult
->compute(SUnits
);
1551 ScheduledTrees
.resize(DFSResult
->getNumSubtrees());
1554 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1555 /// only provides the critical path for single block loops. To handle loops that
1556 /// span blocks, we could use the vreg path latencies provided by
1557 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1558 /// available for use in the scheduler.
1560 /// The cyclic path estimation identifies a def-use pair that crosses the back
1561 /// edge and considers the depth and height of the nodes. For example, consider
1562 /// the following instruction sequence where each instruction has unit latency
1563 /// and defines an eponymous virtual register:
1565 /// a->b(a,c)->c(b)->d(c)->exit
1567 /// The cyclic critical path is a two cycles: b->c->b
1568 /// The acyclic critical path is four cycles: a->b->c->d->exit
1569 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1570 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1571 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1572 /// LiveInDepth = depth(b) = len(a->b) = 1
1574 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1575 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1576 /// CyclicCriticalPath = min(2, 2) = 2
1578 /// This could be relevant to PostRA scheduling, but is currently implemented
1579 /// assuming LiveIntervals.
1580 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1581 // This only applies to single block loop.
1582 if (!BB
->isSuccessor(BB
))
1585 unsigned MaxCyclicLatency
= 0;
1586 // Visit each live out vreg def to find def/use pairs that cross iterations.
1587 for (const RegisterMaskPair
&P
: RPTracker
.getPressure().LiveOutRegs
) {
1588 Register Reg
= P
.RegUnit
;
1589 if (!Reg
.isVirtual())
1591 const LiveInterval
&LI
= LIS
->getInterval(Reg
);
1592 const VNInfo
*DefVNI
= LI
.getVNInfoBefore(LIS
->getMBBEndIdx(BB
));
1596 MachineInstr
*DefMI
= LIS
->getInstructionFromIndex(DefVNI
->def
);
1597 const SUnit
*DefSU
= getSUnit(DefMI
);
1601 unsigned LiveOutHeight
= DefSU
->getHeight();
1602 unsigned LiveOutDepth
= DefSU
->getDepth() + DefSU
->Latency
;
1603 // Visit all local users of the vreg def.
1604 for (const VReg2SUnit
&V2SU
1605 : make_range(VRegUses
.find(Reg
), VRegUses
.end())) {
1606 SUnit
*SU
= V2SU
.SU
;
1610 // Only consider uses of the phi.
1611 LiveQueryResult LRQ
= LI
.Query(LIS
->getInstructionIndex(*SU
->getInstr()));
1612 if (!LRQ
.valueIn()->isPHIDef())
1615 // Assume that a path spanning two iterations is a cycle, which could
1616 // overestimate in strange cases. This allows cyclic latency to be
1617 // estimated as the minimum slack of the vreg's depth or height.
1618 unsigned CyclicLatency
= 0;
1619 if (LiveOutDepth
> SU
->getDepth())
1620 CyclicLatency
= LiveOutDepth
- SU
->getDepth();
1622 unsigned LiveInHeight
= SU
->getHeight() + DefSU
->Latency
;
1623 if (LiveInHeight
> LiveOutHeight
) {
1624 if (LiveInHeight
- LiveOutHeight
< CyclicLatency
)
1625 CyclicLatency
= LiveInHeight
- LiveOutHeight
;
1629 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU
->NodeNum
<< ") -> SU("
1630 << SU
->NodeNum
<< ") = " << CyclicLatency
<< "c\n");
1631 if (CyclicLatency
> MaxCyclicLatency
)
1632 MaxCyclicLatency
= CyclicLatency
;
1635 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency
<< "c\n");
1636 return MaxCyclicLatency
;
1639 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1640 /// the Top RP tracker in case the region beginning has changed.
1641 void ScheduleDAGMILive::initQueues(ArrayRef
<SUnit
*> TopRoots
,
1642 ArrayRef
<SUnit
*> BotRoots
) {
1643 ScheduleDAGMI::initQueues(TopRoots
, BotRoots
);
1644 if (ShouldTrackPressure
) {
1645 assert(TopRPTracker
.getPos() == RegionBegin
&& "bad initial Top tracker");
1646 TopRPTracker
.setPos(CurrentTop
);
1650 /// Move an instruction and update register pressure.
1651 void ScheduleDAGMILive::scheduleMI(SUnit
*SU
, bool IsTopNode
) {
1652 // Move the instruction to its new location in the instruction stream.
1653 MachineInstr
*MI
= SU
->getInstr();
1656 assert(SU
->isTopReady() && "node still has unscheduled dependencies");
1657 if (&*CurrentTop
== MI
)
1658 CurrentTop
= nextIfDebug(++CurrentTop
, CurrentBottom
);
1660 moveInstruction(MI
, CurrentTop
);
1661 TopRPTracker
.setPos(MI
);
1664 if (ShouldTrackPressure
) {
1665 // Update top scheduled pressure.
1666 RegisterOperands RegOpers
;
1667 RegOpers
.collect(*MI
, *TRI
, MRI
, ShouldTrackLaneMasks
,
1668 /*IgnoreDead=*/false);
1669 if (ShouldTrackLaneMasks
) {
1670 // Adjust liveness and add missing dead+read-undef flags.
1671 SlotIndex SlotIdx
= LIS
->getInstructionIndex(*MI
).getRegSlot();
1672 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
, MI
);
1674 // Adjust for missing dead-def flags.
1675 RegOpers
.detectDeadDefs(*MI
, *LIS
);
1678 TopRPTracker
.advance(RegOpers
);
1679 assert(TopRPTracker
.getPos() == CurrentTop
&& "out of sync");
1680 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1681 TopRPTracker
.getRegSetPressureAtPos(), TRI
););
1683 updateScheduledPressure(SU
, TopRPTracker
.getPressure().MaxSetPressure
);
1686 assert(SU
->isBottomReady() && "node still has unscheduled dependencies");
1687 MachineBasicBlock::iterator priorII
=
1688 priorNonDebug(CurrentBottom
, CurrentTop
);
1689 if (&*priorII
== MI
)
1690 CurrentBottom
= priorII
;
1692 if (&*CurrentTop
== MI
) {
1693 CurrentTop
= nextIfDebug(++CurrentTop
, priorII
);
1694 TopRPTracker
.setPos(CurrentTop
);
1696 moveInstruction(MI
, CurrentBottom
);
1698 BotRPTracker
.setPos(CurrentBottom
);
1700 if (ShouldTrackPressure
) {
1701 RegisterOperands RegOpers
;
1702 RegOpers
.collect(*MI
, *TRI
, MRI
, ShouldTrackLaneMasks
,
1703 /*IgnoreDead=*/false);
1704 if (ShouldTrackLaneMasks
) {
1705 // Adjust liveness and add missing dead+read-undef flags.
1706 SlotIndex SlotIdx
= LIS
->getInstructionIndex(*MI
).getRegSlot();
1707 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
, MI
);
1709 // Adjust for missing dead-def flags.
1710 RegOpers
.detectDeadDefs(*MI
, *LIS
);
1713 if (BotRPTracker
.getPos() != CurrentBottom
)
1714 BotRPTracker
.recedeSkipDebugValues();
1715 SmallVector
<RegisterMaskPair
, 8> LiveUses
;
1716 BotRPTracker
.recede(RegOpers
, &LiveUses
);
1717 assert(BotRPTracker
.getPos() == CurrentBottom
&& "out of sync");
1718 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1719 BotRPTracker
.getRegSetPressureAtPos(), TRI
););
1721 updateScheduledPressure(SU
, BotRPTracker
.getPressure().MaxSetPressure
);
1722 updatePressureDiffs(LiveUses
);
1727 //===----------------------------------------------------------------------===//
1728 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1729 //===----------------------------------------------------------------------===//
1733 /// Post-process the DAG to create cluster edges between neighboring
1734 /// loads or between neighboring stores.
1735 class BaseMemOpClusterMutation
: public ScheduleDAGMutation
{
1738 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1741 bool OffsetIsScalable
;
1743 MemOpInfo(SUnit
*SU
, ArrayRef
<const MachineOperand
*> BaseOps
,
1744 int64_t Offset
, bool OffsetIsScalable
, LocationSize Width
)
1745 : SU(SU
), BaseOps(BaseOps
.begin(), BaseOps
.end()), Offset(Offset
),
1746 Width(Width
), OffsetIsScalable(OffsetIsScalable
) {}
1748 static bool Compare(const MachineOperand
*const &A
,
1749 const MachineOperand
*const &B
) {
1750 if (A
->getType() != B
->getType())
1751 return A
->getType() < B
->getType();
1753 return A
->getReg() < B
->getReg();
1755 const MachineFunction
&MF
= *A
->getParent()->getParent()->getParent();
1756 const TargetFrameLowering
&TFI
= *MF
.getSubtarget().getFrameLowering();
1757 bool StackGrowsDown
= TFI
.getStackGrowthDirection() ==
1758 TargetFrameLowering::StackGrowsDown
;
1759 return StackGrowsDown
? A
->getIndex() > B
->getIndex()
1760 : A
->getIndex() < B
->getIndex();
1763 llvm_unreachable("MemOpClusterMutation only supports register or frame "
1767 bool operator<(const MemOpInfo
&RHS
) const {
1768 // FIXME: Don't compare everything twice. Maybe use C++20 three way
1769 // comparison instead when it's available.
1770 if (std::lexicographical_compare(BaseOps
.begin(), BaseOps
.end(),
1771 RHS
.BaseOps
.begin(), RHS
.BaseOps
.end(),
1774 if (std::lexicographical_compare(RHS
.BaseOps
.begin(), RHS
.BaseOps
.end(),
1775 BaseOps
.begin(), BaseOps
.end(), Compare
))
1777 if (Offset
!= RHS
.Offset
)
1778 return Offset
< RHS
.Offset
;
1779 return SU
->NodeNum
< RHS
.SU
->NodeNum
;
1783 const TargetInstrInfo
*TII
;
1784 const TargetRegisterInfo
*TRI
;
1786 bool ReorderWhileClustering
;
1789 BaseMemOpClusterMutation(const TargetInstrInfo
*tii
,
1790 const TargetRegisterInfo
*tri
, bool IsLoad
,
1791 bool ReorderWhileClustering
)
1792 : TII(tii
), TRI(tri
), IsLoad(IsLoad
),
1793 ReorderWhileClustering(ReorderWhileClustering
) {}
1795 void apply(ScheduleDAGInstrs
*DAGInstrs
) override
;
1798 void clusterNeighboringMemOps(ArrayRef
<MemOpInfo
> MemOps
, bool FastCluster
,
1799 ScheduleDAGInstrs
*DAG
);
1800 void collectMemOpRecords(std::vector
<SUnit
> &SUnits
,
1801 SmallVectorImpl
<MemOpInfo
> &MemOpRecords
);
1802 bool groupMemOps(ArrayRef
<MemOpInfo
> MemOps
, ScheduleDAGInstrs
*DAG
,
1803 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> &Groups
);
1806 class StoreClusterMutation
: public BaseMemOpClusterMutation
{
1808 StoreClusterMutation(const TargetInstrInfo
*tii
,
1809 const TargetRegisterInfo
*tri
,
1810 bool ReorderWhileClustering
)
1811 : BaseMemOpClusterMutation(tii
, tri
, false, ReorderWhileClustering
) {}
1814 class LoadClusterMutation
: public BaseMemOpClusterMutation
{
1816 LoadClusterMutation(const TargetInstrInfo
*tii
, const TargetRegisterInfo
*tri
,
1817 bool ReorderWhileClustering
)
1818 : BaseMemOpClusterMutation(tii
, tri
, true, ReorderWhileClustering
) {}
1821 } // end anonymous namespace
1825 std::unique_ptr
<ScheduleDAGMutation
>
1826 createLoadClusterDAGMutation(const TargetInstrInfo
*TII
,
1827 const TargetRegisterInfo
*TRI
,
1828 bool ReorderWhileClustering
) {
1829 return EnableMemOpCluster
? std::make_unique
<LoadClusterMutation
>(
1830 TII
, TRI
, ReorderWhileClustering
)
1834 std::unique_ptr
<ScheduleDAGMutation
>
1835 createStoreClusterDAGMutation(const TargetInstrInfo
*TII
,
1836 const TargetRegisterInfo
*TRI
,
1837 bool ReorderWhileClustering
) {
1838 return EnableMemOpCluster
? std::make_unique
<StoreClusterMutation
>(
1839 TII
, TRI
, ReorderWhileClustering
)
1843 } // end namespace llvm
1845 // Sorting all the loads/stores first, then for each load/store, checking the
1846 // following load/store one by one, until reach the first non-dependent one and
1847 // call target hook to see if they can cluster.
1848 // If FastCluster is enabled, we assume that, all the loads/stores have been
1849 // preprocessed and now, they didn't have dependencies on each other.
1850 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1851 ArrayRef
<MemOpInfo
> MemOpRecords
, bool FastCluster
,
1852 ScheduleDAGInstrs
*DAG
) {
1853 // Keep track of the current cluster length and bytes for each SUnit.
1854 DenseMap
<unsigned, std::pair
<unsigned, unsigned>> SUnit2ClusterInfo
;
1856 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
1857 // cluster mem ops collected within `MemOpRecords` array.
1858 for (unsigned Idx
= 0, End
= MemOpRecords
.size(); Idx
< (End
- 1); ++Idx
) {
1859 // Decision to cluster mem ops is taken based on target dependent logic
1860 auto MemOpa
= MemOpRecords
[Idx
];
1862 // Seek for the next load/store to do the cluster.
1863 unsigned NextIdx
= Idx
+ 1;
1864 for (; NextIdx
< End
; ++NextIdx
)
1865 // Skip if MemOpb has been clustered already or has dependency with
1867 if (!SUnit2ClusterInfo
.count(MemOpRecords
[NextIdx
].SU
->NodeNum
) &&
1869 (!DAG
->IsReachable(MemOpRecords
[NextIdx
].SU
, MemOpa
.SU
) &&
1870 !DAG
->IsReachable(MemOpa
.SU
, MemOpRecords
[NextIdx
].SU
))))
1875 auto MemOpb
= MemOpRecords
[NextIdx
];
1876 unsigned ClusterLength
= 2;
1877 unsigned CurrentClusterBytes
= MemOpa
.Width
.getValue().getKnownMinValue() +
1878 MemOpb
.Width
.getValue().getKnownMinValue();
1879 if (SUnit2ClusterInfo
.count(MemOpa
.SU
->NodeNum
)) {
1880 ClusterLength
= SUnit2ClusterInfo
[MemOpa
.SU
->NodeNum
].first
+ 1;
1881 CurrentClusterBytes
= SUnit2ClusterInfo
[MemOpa
.SU
->NodeNum
].second
+
1882 MemOpb
.Width
.getValue().getKnownMinValue();
1885 if (!TII
->shouldClusterMemOps(MemOpa
.BaseOps
, MemOpa
.Offset
,
1886 MemOpa
.OffsetIsScalable
, MemOpb
.BaseOps
,
1887 MemOpb
.Offset
, MemOpb
.OffsetIsScalable
,
1888 ClusterLength
, CurrentClusterBytes
))
1891 SUnit
*SUa
= MemOpa
.SU
;
1892 SUnit
*SUb
= MemOpb
.SU
;
1893 if (!ReorderWhileClustering
&& SUa
->NodeNum
> SUb
->NodeNum
)
1894 std::swap(SUa
, SUb
);
1896 // FIXME: Is this check really required?
1897 if (!DAG
->addEdge(SUb
, SDep(SUa
, SDep::Cluster
)))
1900 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa
->NodeNum
<< ") - SU("
1901 << SUb
->NodeNum
<< ")\n");
1905 // Copy successor edges from SUa to SUb. Interleaving computation
1906 // dependent on SUa can prevent load combining due to register reuse.
1907 // Predecessor edges do not need to be copied from SUb to SUa since
1908 // nearby loads should have effectively the same inputs.
1909 for (const SDep
&Succ
: SUa
->Succs
) {
1910 if (Succ
.getSUnit() == SUb
)
1912 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ
.getSUnit()->NodeNum
1914 DAG
->addEdge(Succ
.getSUnit(), SDep(SUb
, SDep::Artificial
));
1917 // Copy predecessor edges from SUb to SUa to avoid the SUnits that
1918 // SUb dependent on scheduled in-between SUb and SUa. Successor edges
1919 // do not need to be copied from SUa to SUb since no one will depend
1921 // Notice that, we don't need to care about the memory dependency as
1922 // we won't try to cluster them if they have any memory dependency.
1923 for (const SDep
&Pred
: SUb
->Preds
) {
1924 if (Pred
.getSUnit() == SUa
)
1926 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred
.getSUnit()->NodeNum
1928 DAG
->addEdge(SUa
, SDep(Pred
.getSUnit(), SDep::Artificial
));
1932 SUnit2ClusterInfo
[MemOpb
.SU
->NodeNum
] = {ClusterLength
,
1933 CurrentClusterBytes
};
1935 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength
1936 << ", Curr cluster bytes: " << CurrentClusterBytes
1941 void BaseMemOpClusterMutation::collectMemOpRecords(
1942 std::vector
<SUnit
> &SUnits
, SmallVectorImpl
<MemOpInfo
> &MemOpRecords
) {
1943 for (auto &SU
: SUnits
) {
1944 if ((IsLoad
&& !SU
.getInstr()->mayLoad()) ||
1945 (!IsLoad
&& !SU
.getInstr()->mayStore()))
1948 const MachineInstr
&MI
= *SU
.getInstr();
1949 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1951 bool OffsetIsScalable
;
1952 LocationSize Width
= 0;
1953 if (TII
->getMemOperandsWithOffsetWidth(MI
, BaseOps
, Offset
,
1954 OffsetIsScalable
, Width
, TRI
)) {
1955 MemOpRecords
.push_back(
1956 MemOpInfo(&SU
, BaseOps
, Offset
, OffsetIsScalable
, Width
));
1958 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps
.size() << ", Offset: "
1959 << Offset
<< ", OffsetIsScalable: " << OffsetIsScalable
1960 << ", Width: " << Width
<< "\n");
1963 for (const auto *Op
: BaseOps
)
1969 bool BaseMemOpClusterMutation::groupMemOps(
1970 ArrayRef
<MemOpInfo
> MemOps
, ScheduleDAGInstrs
*DAG
,
1971 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> &Groups
) {
1974 MemOps
.size() * DAG
->SUnits
.size() / 1000 > FastClusterThreshold
;
1976 for (const auto &MemOp
: MemOps
) {
1977 unsigned ChainPredID
= DAG
->SUnits
.size();
1979 for (const SDep
&Pred
: MemOp
.SU
->Preds
) {
1980 // We only want to cluster the mem ops that have the same ctrl(non-data)
1981 // pred so that they didn't have ctrl dependency for each other. But for
1982 // store instrs, we can still cluster them if the pred is load instr.
1983 if ((Pred
.isCtrl() &&
1985 (Pred
.getSUnit() && Pred
.getSUnit()->getInstr()->mayStore()))) &&
1986 !Pred
.isArtificial()) {
1987 ChainPredID
= Pred
.getSUnit()->NodeNum
;
1994 Groups
[ChainPredID
].push_back(MemOp
);
1999 /// Callback from DAG postProcessing to create cluster edges for loads/stores.
2000 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs
*DAG
) {
2001 // Collect all the clusterable loads/stores
2002 SmallVector
<MemOpInfo
, 32> MemOpRecords
;
2003 collectMemOpRecords(DAG
->SUnits
, MemOpRecords
);
2005 if (MemOpRecords
.size() < 2)
2008 // Put the loads/stores without dependency into the same group with some
2009 // heuristic if the DAG is too complex to avoid compiling time blow up.
2010 // Notice that, some fusion pair could be lost with this.
2011 DenseMap
<unsigned, SmallVector
<MemOpInfo
, 32>> Groups
;
2012 bool FastCluster
= groupMemOps(MemOpRecords
, DAG
, Groups
);
2014 for (auto &Group
: Groups
) {
2015 // Sorting the loads/stores, so that, we can stop the cluster as early as
2017 llvm::sort(Group
.second
);
2019 // Trying to cluster all the neighboring loads/stores.
2020 clusterNeighboringMemOps(Group
.second
, FastCluster
, DAG
);
2024 //===----------------------------------------------------------------------===//
2025 // CopyConstrain - DAG post-processing to encourage copy elimination.
2026 //===----------------------------------------------------------------------===//
2030 /// Post-process the DAG to create weak edges from all uses of a copy to
2031 /// the one use that defines the copy's source vreg, most likely an induction
2032 /// variable increment.
2033 class CopyConstrain
: public ScheduleDAGMutation
{
2035 SlotIndex RegionBeginIdx
;
2037 // RegionEndIdx is the slot index of the last non-debug instruction in the
2038 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
2039 SlotIndex RegionEndIdx
;
2042 CopyConstrain(const TargetInstrInfo
*, const TargetRegisterInfo
*) {}
2044 void apply(ScheduleDAGInstrs
*DAGInstrs
) override
;
2047 void constrainLocalCopy(SUnit
*CopySU
, ScheduleDAGMILive
*DAG
);
2050 } // end anonymous namespace
2054 std::unique_ptr
<ScheduleDAGMutation
>
2055 createCopyConstrainDAGMutation(const TargetInstrInfo
*TII
,
2056 const TargetRegisterInfo
*TRI
) {
2057 return std::make_unique
<CopyConstrain
>(TII
, TRI
);
2060 } // end namespace llvm
2062 /// constrainLocalCopy handles two possibilities:
2067 /// I3: dst = src (copy)
2068 /// (create pred->succ edges I0->I1, I2->I1)
2071 /// I0: dst = src (copy)
2075 /// (create pred->succ edges I1->I2, I3->I2)
2077 /// Although the MachineScheduler is currently constrained to single blocks,
2078 /// this algorithm should handle extended blocks. An EBB is a set of
2079 /// contiguously numbered blocks such that the previous block in the EBB is
2080 /// always the single predecessor.
2081 void CopyConstrain::constrainLocalCopy(SUnit
*CopySU
, ScheduleDAGMILive
*DAG
) {
2082 LiveIntervals
*LIS
= DAG
->getLIS();
2083 MachineInstr
*Copy
= CopySU
->getInstr();
2085 // Check for pure vreg copies.
2086 const MachineOperand
&SrcOp
= Copy
->getOperand(1);
2087 Register SrcReg
= SrcOp
.getReg();
2088 if (!SrcReg
.isVirtual() || !SrcOp
.readsReg())
2091 const MachineOperand
&DstOp
= Copy
->getOperand(0);
2092 Register DstReg
= DstOp
.getReg();
2093 if (!DstReg
.isVirtual() || DstOp
.isDead())
2096 // Check if either the dest or source is local. If it's live across a back
2097 // edge, it's not local. Note that if both vregs are live across the back
2098 // edge, we cannot successfully contrain the copy without cyclic scheduling.
2099 // If both the copy's source and dest are local live intervals, then we
2100 // should treat the dest as the global for the purpose of adding
2101 // constraints. This adds edges from source's other uses to the copy.
2102 unsigned LocalReg
= SrcReg
;
2103 unsigned GlobalReg
= DstReg
;
2104 LiveInterval
*LocalLI
= &LIS
->getInterval(LocalReg
);
2105 if (!LocalLI
->isLocal(RegionBeginIdx
, RegionEndIdx
)) {
2108 LocalLI
= &LIS
->getInterval(LocalReg
);
2109 if (!LocalLI
->isLocal(RegionBeginIdx
, RegionEndIdx
))
2112 LiveInterval
*GlobalLI
= &LIS
->getInterval(GlobalReg
);
2114 // Find the global segment after the start of the local LI.
2115 LiveInterval::iterator GlobalSegment
= GlobalLI
->find(LocalLI
->beginIndex());
2116 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
2117 // local live range. We could create edges from other global uses to the local
2118 // start, but the coalescer should have already eliminated these cases, so
2119 // don't bother dealing with it.
2120 if (GlobalSegment
== GlobalLI
->end())
2123 // If GlobalSegment is killed at the LocalLI->start, the call to find()
2124 // returned the next global segment. But if GlobalSegment overlaps with
2125 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
2126 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
2127 if (GlobalSegment
->contains(LocalLI
->beginIndex()))
2130 if (GlobalSegment
== GlobalLI
->end())
2133 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
2134 if (GlobalSegment
!= GlobalLI
->begin()) {
2135 // Two address defs have no hole.
2136 if (SlotIndex::isSameInstr(std::prev(GlobalSegment
)->end
,
2137 GlobalSegment
->start
)) {
2140 // If the prior global segment may be defined by the same two-address
2141 // instruction that also defines LocalLI, then can't make a hole here.
2142 if (SlotIndex::isSameInstr(std::prev(GlobalSegment
)->start
,
2143 LocalLI
->beginIndex())) {
2146 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
2147 // it would be a disconnected component in the live range.
2148 assert(std::prev(GlobalSegment
)->start
< LocalLI
->beginIndex() &&
2149 "Disconnected LRG within the scheduling region.");
2151 MachineInstr
*GlobalDef
= LIS
->getInstructionFromIndex(GlobalSegment
->start
);
2155 SUnit
*GlobalSU
= DAG
->getSUnit(GlobalDef
);
2159 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
2160 // constraining the uses of the last local def to precede GlobalDef.
2161 SmallVector
<SUnit
*,8> LocalUses
;
2162 const VNInfo
*LastLocalVN
= LocalLI
->getVNInfoBefore(LocalLI
->endIndex());
2163 MachineInstr
*LastLocalDef
= LIS
->getInstructionFromIndex(LastLocalVN
->def
);
2164 SUnit
*LastLocalSU
= DAG
->getSUnit(LastLocalDef
);
2165 for (const SDep
&Succ
: LastLocalSU
->Succs
) {
2166 if (Succ
.getKind() != SDep::Data
|| Succ
.getReg() != LocalReg
)
2168 if (Succ
.getSUnit() == GlobalSU
)
2170 if (!DAG
->canAddEdge(GlobalSU
, Succ
.getSUnit()))
2172 LocalUses
.push_back(Succ
.getSUnit());
2174 // Open the top of the GlobalLI hole by constraining any earlier global uses
2175 // to precede the start of LocalLI.
2176 SmallVector
<SUnit
*,8> GlobalUses
;
2177 MachineInstr
*FirstLocalDef
=
2178 LIS
->getInstructionFromIndex(LocalLI
->beginIndex());
2179 SUnit
*FirstLocalSU
= DAG
->getSUnit(FirstLocalDef
);
2180 for (const SDep
&Pred
: GlobalSU
->Preds
) {
2181 if (Pred
.getKind() != SDep::Anti
|| Pred
.getReg() != GlobalReg
)
2183 if (Pred
.getSUnit() == FirstLocalSU
)
2185 if (!DAG
->canAddEdge(FirstLocalSU
, Pred
.getSUnit()))
2187 GlobalUses
.push_back(Pred
.getSUnit());
2189 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU
->NodeNum
<< ")\n");
2190 // Add the weak edges.
2191 for (SUnit
*LU
: LocalUses
) {
2192 LLVM_DEBUG(dbgs() << " Local use SU(" << LU
->NodeNum
<< ") -> SU("
2193 << GlobalSU
->NodeNum
<< ")\n");
2194 DAG
->addEdge(GlobalSU
, SDep(LU
, SDep::Weak
));
2196 for (SUnit
*GU
: GlobalUses
) {
2197 LLVM_DEBUG(dbgs() << " Global use SU(" << GU
->NodeNum
<< ") -> SU("
2198 << FirstLocalSU
->NodeNum
<< ")\n");
2199 DAG
->addEdge(FirstLocalSU
, SDep(GU
, SDep::Weak
));
2203 /// Callback from DAG postProcessing to create weak edges to encourage
2204 /// copy elimination.
2205 void CopyConstrain::apply(ScheduleDAGInstrs
*DAGInstrs
) {
2206 ScheduleDAGMI
*DAG
= static_cast<ScheduleDAGMI
*>(DAGInstrs
);
2207 assert(DAG
->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
2209 MachineBasicBlock::iterator FirstPos
= nextIfDebug(DAG
->begin(), DAG
->end());
2210 if (FirstPos
== DAG
->end())
2212 RegionBeginIdx
= DAG
->getLIS()->getInstructionIndex(*FirstPos
);
2213 RegionEndIdx
= DAG
->getLIS()->getInstructionIndex(
2214 *priorNonDebug(DAG
->end(), DAG
->begin()));
2216 for (SUnit
&SU
: DAG
->SUnits
) {
2217 if (!SU
.getInstr()->isCopy())
2220 constrainLocalCopy(&SU
, static_cast<ScheduleDAGMILive
*>(DAG
));
2224 //===----------------------------------------------------------------------===//
2225 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
2226 // and possibly other custom schedulers.
2227 //===----------------------------------------------------------------------===//
2229 static const unsigned InvalidCycle
= ~0U;
2231 SchedBoundary::~SchedBoundary() { delete HazardRec
; }
2233 /// Given a Count of resource usage and a Latency value, return true if a
2234 /// SchedBoundary becomes resource limited.
2235 /// If we are checking after scheduling a node, we should return true when
2236 /// we just reach the resource limit.
2237 static bool checkResourceLimit(unsigned LFactor
, unsigned Count
,
2238 unsigned Latency
, bool AfterSchedNode
) {
2239 int ResCntFactor
= (int)(Count
- (Latency
* LFactor
));
2241 return ResCntFactor
>= (int)LFactor
;
2243 return ResCntFactor
> (int)LFactor
;
2246 void SchedBoundary::reset() {
2247 // A new HazardRec is created for each DAG and owned by SchedBoundary.
2248 // Destroying and reconstructing it is very expensive though. So keep
2249 // invalid, placeholder HazardRecs.
2250 if (HazardRec
&& HazardRec
->isEnabled()) {
2252 HazardRec
= nullptr;
2256 CheckPending
= false;
2259 MinReadyCycle
= std::numeric_limits
<unsigned>::max();
2260 ExpectedLatency
= 0;
2261 DependentLatency
= 0;
2263 MaxExecutedResCount
= 0;
2265 IsResourceLimited
= false;
2266 ReservedCycles
.clear();
2267 ReservedResourceSegments
.clear();
2268 ReservedCyclesIndex
.clear();
2269 ResourceGroupSubUnitMasks
.clear();
2270 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2271 // Track the maximum number of stall cycles that could arise either from the
2272 // latency of a DAG edge or the number of cycles that a processor resource is
2273 // reserved (SchedBoundary::ReservedCycles).
2274 MaxObservedStall
= 0;
2276 // Reserve a zero-count for invalid CritResIdx.
2277 ExecutedResCounts
.resize(1);
2278 assert(!ExecutedResCounts
[0] && "nonzero count for bad resource");
2281 void SchedRemainder::
2282 init(ScheduleDAGMI
*DAG
, const TargetSchedModel
*SchedModel
) {
2284 if (!SchedModel
->hasInstrSchedModel())
2286 RemainingCounts
.resize(SchedModel
->getNumProcResourceKinds());
2287 for (SUnit
&SU
: DAG
->SUnits
) {
2288 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(&SU
);
2289 RemIssueCount
+= SchedModel
->getNumMicroOps(SU
.getInstr(), SC
)
2290 * SchedModel
->getMicroOpFactor();
2291 for (TargetSchedModel::ProcResIter
2292 PI
= SchedModel
->getWriteProcResBegin(SC
),
2293 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2294 unsigned PIdx
= PI
->ProcResourceIdx
;
2295 unsigned Factor
= SchedModel
->getResourceFactor(PIdx
);
2296 assert(PI
->ReleaseAtCycle
>= PI
->AcquireAtCycle
);
2297 RemainingCounts
[PIdx
] +=
2298 (Factor
* (PI
->ReleaseAtCycle
- PI
->AcquireAtCycle
));
2303 void SchedBoundary::
2304 init(ScheduleDAGMI
*dag
, const TargetSchedModel
*smodel
, SchedRemainder
*rem
) {
2307 SchedModel
= smodel
;
2309 if (SchedModel
->hasInstrSchedModel()) {
2310 unsigned ResourceCount
= SchedModel
->getNumProcResourceKinds();
2311 ReservedCyclesIndex
.resize(ResourceCount
);
2312 ExecutedResCounts
.resize(ResourceCount
);
2313 ResourceGroupSubUnitMasks
.resize(ResourceCount
, APInt(ResourceCount
, 0));
2314 unsigned NumUnits
= 0;
2316 for (unsigned i
= 0; i
< ResourceCount
; ++i
) {
2317 ReservedCyclesIndex
[i
] = NumUnits
;
2318 NumUnits
+= SchedModel
->getProcResource(i
)->NumUnits
;
2319 if (isUnbufferedGroup(i
)) {
2320 auto SubUnits
= SchedModel
->getProcResource(i
)->SubUnitsIdxBegin
;
2321 for (unsigned U
= 0, UE
= SchedModel
->getProcResource(i
)->NumUnits
;
2323 ResourceGroupSubUnitMasks
[i
].setBit(SubUnits
[U
]);
2327 ReservedCycles
.resize(NumUnits
, InvalidCycle
);
2331 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
2332 /// these "soft stalls" differently than the hard stall cycles based on CPU
2333 /// resources and computed by checkHazard(). A fully in-order model
2334 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
2335 /// available for scheduling until they are ready. However, a weaker in-order
2336 /// model may use this for heuristics. For example, if a processor has in-order
2337 /// behavior when reading certain resources, this may come into play.
2338 unsigned SchedBoundary::getLatencyStallCycles(SUnit
*SU
) {
2339 if (!SU
->isUnbuffered
)
2342 unsigned ReadyCycle
= (isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
);
2343 if (ReadyCycle
> CurrCycle
)
2344 return ReadyCycle
- CurrCycle
;
2348 /// Compute the next cycle at which the given processor resource unit
2349 /// can be scheduled.
2350 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx
,
2351 unsigned ReleaseAtCycle
,
2352 unsigned AcquireAtCycle
) {
2353 if (SchedModel
&& SchedModel
->enableIntervals()) {
2355 return ReservedResourceSegments
[InstanceIdx
].getFirstAvailableAtFromTop(
2356 CurrCycle
, AcquireAtCycle
, ReleaseAtCycle
);
2358 return ReservedResourceSegments
[InstanceIdx
].getFirstAvailableAtFromBottom(
2359 CurrCycle
, AcquireAtCycle
, ReleaseAtCycle
);
2362 unsigned NextUnreserved
= ReservedCycles
[InstanceIdx
];
2363 // If this resource has never been used, always return cycle zero.
2364 if (NextUnreserved
== InvalidCycle
)
2366 // For bottom-up scheduling add the cycles needed for the current operation.
2368 NextUnreserved
= std::max(CurrCycle
, NextUnreserved
+ ReleaseAtCycle
);
2369 return NextUnreserved
;
2372 /// Compute the next cycle at which the given processor resource can be
2373 /// scheduled. Returns the next cycle and the index of the processor resource
2374 /// instance in the reserved cycles vector.
2375 std::pair
<unsigned, unsigned>
2376 SchedBoundary::getNextResourceCycle(const MCSchedClassDesc
*SC
, unsigned PIdx
,
2377 unsigned ReleaseAtCycle
,
2378 unsigned AcquireAtCycle
) {
2379 if (MischedDetailResourceBooking
) {
2380 LLVM_DEBUG(dbgs() << " Resource booking (@" << CurrCycle
<< "c): \n");
2381 LLVM_DEBUG(dumpReservedCycles());
2382 LLVM_DEBUG(dbgs() << " getNextResourceCycle (@" << CurrCycle
<< "c): \n");
2384 unsigned MinNextUnreserved
= InvalidCycle
;
2385 unsigned InstanceIdx
= 0;
2386 unsigned StartIndex
= ReservedCyclesIndex
[PIdx
];
2387 unsigned NumberOfInstances
= SchedModel
->getProcResource(PIdx
)->NumUnits
;
2388 assert(NumberOfInstances
> 0 &&
2389 "Cannot have zero instances of a ProcResource");
2391 if (isUnbufferedGroup(PIdx
)) {
2392 // If any subunits are used by the instruction, report that the
2393 // subunits of the resource group are available at the first cycle
2394 // in which the unit is available, effectively removing the group
2395 // record from hazarding and basing the hazarding decisions on the
2396 // subunit records. Otherwise, choose the first available instance
2397 // from among the subunits. Specifications which assign cycles to
2398 // both the subunits and the group or which use an unbuffered
2399 // group with buffered subunits will appear to schedule
2400 // strangely. In the first case, the additional cycles for the
2401 // group will be ignored. In the second, the group will be
2402 // ignored entirely.
2403 for (const MCWriteProcResEntry
&PE
:
2404 make_range(SchedModel
->getWriteProcResBegin(SC
),
2405 SchedModel
->getWriteProcResEnd(SC
)))
2406 if (ResourceGroupSubUnitMasks
[PIdx
][PE
.ProcResourceIdx
])
2407 return std::make_pair(getNextResourceCycleByInstance(
2408 StartIndex
, ReleaseAtCycle
, AcquireAtCycle
),
2411 auto SubUnits
= SchedModel
->getProcResource(PIdx
)->SubUnitsIdxBegin
;
2412 for (unsigned I
= 0, End
= NumberOfInstances
; I
< End
; ++I
) {
2413 unsigned NextUnreserved
, NextInstanceIdx
;
2414 std::tie(NextUnreserved
, NextInstanceIdx
) =
2415 getNextResourceCycle(SC
, SubUnits
[I
], ReleaseAtCycle
, AcquireAtCycle
);
2416 if (MinNextUnreserved
> NextUnreserved
) {
2417 InstanceIdx
= NextInstanceIdx
;
2418 MinNextUnreserved
= NextUnreserved
;
2421 return std::make_pair(MinNextUnreserved
, InstanceIdx
);
2424 for (unsigned I
= StartIndex
, End
= StartIndex
+ NumberOfInstances
; I
< End
;
2426 unsigned NextUnreserved
=
2427 getNextResourceCycleByInstance(I
, ReleaseAtCycle
, AcquireAtCycle
);
2428 if (MischedDetailResourceBooking
)
2429 LLVM_DEBUG(dbgs() << " Instance " << I
- StartIndex
<< " available @"
2430 << NextUnreserved
<< "c\n");
2431 if (MinNextUnreserved
> NextUnreserved
) {
2433 MinNextUnreserved
= NextUnreserved
;
2436 if (MischedDetailResourceBooking
)
2437 LLVM_DEBUG(dbgs() << " selecting " << SchedModel
->getResourceName(PIdx
)
2438 << "[" << InstanceIdx
- StartIndex
<< "]"
2439 << " available @" << MinNextUnreserved
<< "c"
2441 return std::make_pair(MinNextUnreserved
, InstanceIdx
);
2444 /// Does this SU have a hazard within the current instruction group.
2446 /// The scheduler supports two modes of hazard recognition. The first is the
2447 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
2448 /// supports highly complicated in-order reservation tables
2449 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
2451 /// The second is a streamlined mechanism that checks for hazards based on
2452 /// simple counters that the scheduler itself maintains. It explicitly checks
2453 /// for instruction dispatch limitations, including the number of micro-ops that
2454 /// can dispatch per cycle.
2456 /// TODO: Also check whether the SU must start a new group.
2457 bool SchedBoundary::checkHazard(SUnit
*SU
) {
2458 if (HazardRec
->isEnabled()
2459 && HazardRec
->getHazardType(SU
) != ScheduleHazardRecognizer::NoHazard
) {
2463 unsigned uops
= SchedModel
->getNumMicroOps(SU
->getInstr());
2464 if ((CurrMOps
> 0) && (CurrMOps
+ uops
> SchedModel
->getIssueWidth())) {
2465 LLVM_DEBUG(dbgs() << " SU(" << SU
->NodeNum
<< ") uops="
2466 << SchedModel
->getNumMicroOps(SU
->getInstr()) << '\n');
2471 ((isTop() && SchedModel
->mustBeginGroup(SU
->getInstr())) ||
2472 (!isTop() && SchedModel
->mustEndGroup(SU
->getInstr())))) {
2473 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU
->NodeNum
<< ") must "
2474 << (isTop() ? "begin" : "end") << " group\n");
2478 if (SchedModel
->hasInstrSchedModel() && SU
->hasReservedResource
) {
2479 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2480 for (const MCWriteProcResEntry
&PE
:
2481 make_range(SchedModel
->getWriteProcResBegin(SC
),
2482 SchedModel
->getWriteProcResEnd(SC
))) {
2483 unsigned ResIdx
= PE
.ProcResourceIdx
;
2484 unsigned ReleaseAtCycle
= PE
.ReleaseAtCycle
;
2485 unsigned AcquireAtCycle
= PE
.AcquireAtCycle
;
2486 unsigned NRCycle
, InstanceIdx
;
2487 std::tie(NRCycle
, InstanceIdx
) =
2488 getNextResourceCycle(SC
, ResIdx
, ReleaseAtCycle
, AcquireAtCycle
);
2489 if (NRCycle
> CurrCycle
) {
2490 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2491 MaxObservedStall
= std::max(ReleaseAtCycle
, MaxObservedStall
);
2493 LLVM_DEBUG(dbgs() << " SU(" << SU
->NodeNum
<< ") "
2494 << SchedModel
->getResourceName(ResIdx
)
2495 << '[' << InstanceIdx
- ReservedCyclesIndex
[ResIdx
] << ']'
2496 << "=" << NRCycle
<< "c\n");
2504 // Find the unscheduled node in ReadySUs with the highest latency.
2505 unsigned SchedBoundary::
2506 findMaxLatency(ArrayRef
<SUnit
*> ReadySUs
) {
2507 SUnit
*LateSU
= nullptr;
2508 unsigned RemLatency
= 0;
2509 for (SUnit
*SU
: ReadySUs
) {
2510 unsigned L
= getUnscheduledLatency(SU
);
2511 if (L
> RemLatency
) {
2517 LLVM_DEBUG(dbgs() << Available
.getName() << " RemLatency SU("
2518 << LateSU
->NodeNum
<< ") " << RemLatency
<< "c\n");
2523 // Count resources in this zone and the remaining unscheduled
2524 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2525 // resource index, or zero if the zone is issue limited.
2526 unsigned SchedBoundary::
2527 getOtherResourceCount(unsigned &OtherCritIdx
) {
2529 if (!SchedModel
->hasInstrSchedModel())
2532 unsigned OtherCritCount
= Rem
->RemIssueCount
2533 + (RetiredMOps
* SchedModel
->getMicroOpFactor());
2534 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " + Remain MOps: "
2535 << OtherCritCount
/ SchedModel
->getMicroOpFactor() << '\n');
2536 for (unsigned PIdx
= 1, PEnd
= SchedModel
->getNumProcResourceKinds();
2537 PIdx
!= PEnd
; ++PIdx
) {
2538 unsigned OtherCount
= getResourceCount(PIdx
) + Rem
->RemainingCounts
[PIdx
];
2539 if (OtherCount
> OtherCritCount
) {
2540 OtherCritCount
= OtherCount
;
2541 OtherCritIdx
= PIdx
;
2546 dbgs() << " " << Available
.getName() << " + Remain CritRes: "
2547 << OtherCritCount
/ SchedModel
->getResourceFactor(OtherCritIdx
)
2548 << " " << SchedModel
->getResourceName(OtherCritIdx
) << "\n");
2550 return OtherCritCount
;
2553 void SchedBoundary::releaseNode(SUnit
*SU
, unsigned ReadyCycle
, bool InPQueue
,
2555 assert(SU
->getInstr() && "Scheduled SUnit must have instr");
2557 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2558 // ReadyCycle was been bumped up to the CurrCycle when this node was
2559 // scheduled, but CurrCycle may have been eagerly advanced immediately after
2560 // scheduling, so may now be greater than ReadyCycle.
2561 if (ReadyCycle
> CurrCycle
)
2562 MaxObservedStall
= std::max(ReadyCycle
- CurrCycle
, MaxObservedStall
);
2565 if (ReadyCycle
< MinReadyCycle
)
2566 MinReadyCycle
= ReadyCycle
;
2568 // Check for interlocks first. For the purpose of other heuristics, an
2569 // instruction that cannot issue appears as if it's not in the ReadyQueue.
2570 bool IsBuffered
= SchedModel
->getMicroOpBufferSize() != 0;
2571 bool HazardDetected
= (!IsBuffered
&& ReadyCycle
> CurrCycle
) ||
2572 checkHazard(SU
) || (Available
.size() >= ReadyListLimit
);
2574 if (!HazardDetected
) {
2578 Pending
.remove(Pending
.begin() + Idx
);
2586 /// Move the boundary of scheduled code by one cycle.
2587 void SchedBoundary::bumpCycle(unsigned NextCycle
) {
2588 if (SchedModel
->getMicroOpBufferSize() == 0) {
2589 assert(MinReadyCycle
< std::numeric_limits
<unsigned>::max() &&
2590 "MinReadyCycle uninitialized");
2591 if (MinReadyCycle
> NextCycle
)
2592 NextCycle
= MinReadyCycle
;
2594 // Update the current micro-ops, which will issue in the next cycle.
2595 unsigned DecMOps
= SchedModel
->getIssueWidth() * (NextCycle
- CurrCycle
);
2596 CurrMOps
= (CurrMOps
<= DecMOps
) ? 0 : CurrMOps
- DecMOps
;
2598 // Decrement DependentLatency based on the next cycle.
2599 if ((NextCycle
- CurrCycle
) > DependentLatency
)
2600 DependentLatency
= 0;
2602 DependentLatency
-= (NextCycle
- CurrCycle
);
2604 if (!HazardRec
->isEnabled()) {
2605 // Bypass HazardRec virtual calls.
2606 CurrCycle
= NextCycle
;
2608 // Bypass getHazardType calls in case of long latency.
2609 for (; CurrCycle
!= NextCycle
; ++CurrCycle
) {
2611 HazardRec
->AdvanceCycle();
2613 HazardRec
->RecedeCycle();
2616 CheckPending
= true;
2618 checkResourceLimit(SchedModel
->getLatencyFactor(), getCriticalCount(),
2619 getScheduledLatency(), true);
2621 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle
<< ' ' << Available
.getName()
2625 void SchedBoundary::incExecutedResources(unsigned PIdx
, unsigned Count
) {
2626 ExecutedResCounts
[PIdx
] += Count
;
2627 if (ExecutedResCounts
[PIdx
] > MaxExecutedResCount
)
2628 MaxExecutedResCount
= ExecutedResCounts
[PIdx
];
2631 /// Add the given processor resource to this scheduled zone.
2633 /// \param ReleaseAtCycle indicates the number of consecutive (non-pipelined)
2634 /// cycles during which this resource is released.
2636 /// \param AcquireAtCycle indicates the number of consecutive (non-pipelined)
2637 /// cycles at which the resource is aquired after issue (assuming no stalls).
2639 /// \return the next cycle at which the instruction may execute without
2640 /// oversubscribing resources.
2641 unsigned SchedBoundary::countResource(const MCSchedClassDesc
*SC
, unsigned PIdx
,
2642 unsigned ReleaseAtCycle
,
2644 unsigned AcquireAtCycle
) {
2645 unsigned Factor
= SchedModel
->getResourceFactor(PIdx
);
2646 unsigned Count
= Factor
* (ReleaseAtCycle
- AcquireAtCycle
);
2647 LLVM_DEBUG(dbgs() << " " << SchedModel
->getResourceName(PIdx
) << " +"
2648 << ReleaseAtCycle
<< "x" << Factor
<< "u\n");
2650 // Update Executed resources counts.
2651 incExecutedResources(PIdx
, Count
);
2652 assert(Rem
->RemainingCounts
[PIdx
] >= Count
&& "resource double counted");
2653 Rem
->RemainingCounts
[PIdx
] -= Count
;
2655 // Check if this resource exceeds the current critical resource. If so, it
2656 // becomes the critical resource.
2657 if (ZoneCritResIdx
!= PIdx
&& (getResourceCount(PIdx
) > getCriticalCount())) {
2658 ZoneCritResIdx
= PIdx
;
2659 LLVM_DEBUG(dbgs() << " *** Critical resource "
2660 << SchedModel
->getResourceName(PIdx
) << ": "
2661 << getResourceCount(PIdx
) / SchedModel
->getLatencyFactor()
2664 // For reserved resources, record the highest cycle using the resource.
2665 unsigned NextAvailable
, InstanceIdx
;
2666 std::tie(NextAvailable
, InstanceIdx
) =
2667 getNextResourceCycle(SC
, PIdx
, ReleaseAtCycle
, AcquireAtCycle
);
2668 if (NextAvailable
> CurrCycle
) {
2669 LLVM_DEBUG(dbgs() << " Resource conflict: "
2670 << SchedModel
->getResourceName(PIdx
)
2671 << '[' << InstanceIdx
- ReservedCyclesIndex
[PIdx
] << ']'
2672 << " reserved until @" << NextAvailable
<< "\n");
2674 return NextAvailable
;
2677 /// Move the boundary of scheduled code by one SUnit.
2678 void SchedBoundary::bumpNode(SUnit
*SU
) {
2679 // Update the reservation table.
2680 if (HazardRec
->isEnabled()) {
2681 if (!isTop() && SU
->isCall
) {
2682 // Calls are scheduled with their preceding instructions. For bottom-up
2683 // scheduling, clear the pipeline state before emitting.
2686 HazardRec
->EmitInstruction(SU
);
2687 // Scheduling an instruction may have made pending instructions available.
2688 CheckPending
= true;
2690 // checkHazard should prevent scheduling multiple instructions per cycle that
2691 // exceed the issue width.
2692 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2693 unsigned IncMOps
= SchedModel
->getNumMicroOps(SU
->getInstr());
2695 (CurrMOps
== 0 || (CurrMOps
+ IncMOps
) <= SchedModel
->getIssueWidth()) &&
2696 "Cannot schedule this instruction's MicroOps in the current cycle.");
2698 unsigned ReadyCycle
= (isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
);
2699 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle
<< "c\n");
2701 unsigned NextCycle
= CurrCycle
;
2702 switch (SchedModel
->getMicroOpBufferSize()) {
2704 assert(ReadyCycle
<= CurrCycle
&& "Broken PendingQueue");
2707 if (ReadyCycle
> NextCycle
) {
2708 NextCycle
= ReadyCycle
;
2709 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle
<< "\n");
2713 // We don't currently model the OOO reorder buffer, so consider all
2714 // scheduled MOps to be "retired". We do loosely model in-order resource
2715 // latency. If this instruction uses an in-order resource, account for any
2716 // likely stall cycles.
2717 if (SU
->isUnbuffered
&& ReadyCycle
> NextCycle
)
2718 NextCycle
= ReadyCycle
;
2721 RetiredMOps
+= IncMOps
;
2723 // Update resource counts and critical resource.
2724 if (SchedModel
->hasInstrSchedModel()) {
2725 unsigned DecRemIssue
= IncMOps
* SchedModel
->getMicroOpFactor();
2726 assert(Rem
->RemIssueCount
>= DecRemIssue
&& "MOps double counted");
2727 Rem
->RemIssueCount
-= DecRemIssue
;
2728 if (ZoneCritResIdx
) {
2729 // Scale scheduled micro-ops for comparing with the critical resource.
2730 unsigned ScaledMOps
=
2731 RetiredMOps
* SchedModel
->getMicroOpFactor();
2733 // If scaled micro-ops are now more than the previous critical resource by
2734 // a full cycle, then micro-ops issue becomes critical.
2735 if ((int)(ScaledMOps
- getResourceCount(ZoneCritResIdx
))
2736 >= (int)SchedModel
->getLatencyFactor()) {
2738 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2739 << ScaledMOps
/ SchedModel
->getLatencyFactor()
2743 for (TargetSchedModel::ProcResIter
2744 PI
= SchedModel
->getWriteProcResBegin(SC
),
2745 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2747 countResource(SC
, PI
->ProcResourceIdx
, PI
->ReleaseAtCycle
, NextCycle
,
2748 PI
->AcquireAtCycle
);
2749 if (RCycle
> NextCycle
)
2752 if (SU
->hasReservedResource
) {
2753 // For reserved resources, record the highest cycle using the resource.
2754 // For top-down scheduling, this is the cycle in which we schedule this
2755 // instruction plus the number of cycles the operations reserves the
2756 // resource. For bottom-up is it simply the instruction's cycle.
2757 for (TargetSchedModel::ProcResIter
2758 PI
= SchedModel
->getWriteProcResBegin(SC
),
2759 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2760 unsigned PIdx
= PI
->ProcResourceIdx
;
2761 if (SchedModel
->getProcResource(PIdx
)->BufferSize
== 0) {
2763 if (SchedModel
&& SchedModel
->enableIntervals()) {
2764 unsigned ReservedUntil
, InstanceIdx
;
2765 std::tie(ReservedUntil
, InstanceIdx
) = getNextResourceCycle(
2766 SC
, PIdx
, PI
->ReleaseAtCycle
, PI
->AcquireAtCycle
);
2768 ReservedResourceSegments
[InstanceIdx
].add(
2769 ResourceSegments::getResourceIntervalTop(
2770 NextCycle
, PI
->AcquireAtCycle
, PI
->ReleaseAtCycle
),
2773 ReservedResourceSegments
[InstanceIdx
].add(
2774 ResourceSegments::getResourceIntervalBottom(
2775 NextCycle
, PI
->AcquireAtCycle
, PI
->ReleaseAtCycle
),
2780 unsigned ReservedUntil
, InstanceIdx
;
2781 std::tie(ReservedUntil
, InstanceIdx
) = getNextResourceCycle(
2782 SC
, PIdx
, PI
->ReleaseAtCycle
, PI
->AcquireAtCycle
);
2784 ReservedCycles
[InstanceIdx
] =
2785 std::max(ReservedUntil
, NextCycle
+ PI
->ReleaseAtCycle
);
2787 ReservedCycles
[InstanceIdx
] = NextCycle
;
2793 // Update ExpectedLatency and DependentLatency.
2794 unsigned &TopLatency
= isTop() ? ExpectedLatency
: DependentLatency
;
2795 unsigned &BotLatency
= isTop() ? DependentLatency
: ExpectedLatency
;
2796 if (SU
->getDepth() > TopLatency
) {
2797 TopLatency
= SU
->getDepth();
2798 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " TopLatency SU("
2799 << SU
->NodeNum
<< ") " << TopLatency
<< "c\n");
2801 if (SU
->getHeight() > BotLatency
) {
2802 BotLatency
= SU
->getHeight();
2803 LLVM_DEBUG(dbgs() << " " << Available
.getName() << " BotLatency SU("
2804 << SU
->NodeNum
<< ") " << BotLatency
<< "c\n");
2806 // If we stall for any reason, bump the cycle.
2807 if (NextCycle
> CurrCycle
)
2808 bumpCycle(NextCycle
);
2810 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2811 // resource limited. If a stall occurred, bumpCycle does this.
2813 checkResourceLimit(SchedModel
->getLatencyFactor(), getCriticalCount(),
2814 getScheduledLatency(), true);
2816 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2817 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2818 // one cycle. Since we commonly reach the max MOps here, opportunistically
2819 // bump the cycle to avoid uselessly checking everything in the readyQ.
2820 CurrMOps
+= IncMOps
;
2822 // Bump the cycle count for issue group constraints.
2823 // This must be done after NextCycle has been adjust for all other stalls.
2824 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2826 if ((isTop() && SchedModel
->mustEndGroup(SU
->getInstr())) ||
2827 (!isTop() && SchedModel
->mustBeginGroup(SU
->getInstr()))) {
2828 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
2830 bumpCycle(++NextCycle
);
2833 while (CurrMOps
>= SchedModel
->getIssueWidth()) {
2834 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps
<< " at cycle "
2835 << CurrCycle
<< '\n');
2836 bumpCycle(++NextCycle
);
2838 LLVM_DEBUG(dumpScheduledState());
2841 /// Release pending ready nodes in to the available queue. This makes them
2842 /// visible to heuristics.
2843 void SchedBoundary::releasePending() {
2844 // If the available queue is empty, it is safe to reset MinReadyCycle.
2845 if (Available
.empty())
2846 MinReadyCycle
= std::numeric_limits
<unsigned>::max();
2848 // Check to see if any of the pending instructions are ready to issue. If
2849 // so, add them to the available queue.
2850 for (unsigned I
= 0, E
= Pending
.size(); I
< E
; ++I
) {
2851 SUnit
*SU
= *(Pending
.begin() + I
);
2852 unsigned ReadyCycle
= isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
;
2854 if (ReadyCycle
< MinReadyCycle
)
2855 MinReadyCycle
= ReadyCycle
;
2857 if (Available
.size() >= ReadyListLimit
)
2860 releaseNode(SU
, ReadyCycle
, true, I
);
2861 if (E
!= Pending
.size()) {
2866 CheckPending
= false;
2869 /// Remove SU from the ready set for this boundary.
2870 void SchedBoundary::removeReady(SUnit
*SU
) {
2871 if (Available
.isInQueue(SU
))
2872 Available
.remove(Available
.find(SU
));
2874 assert(Pending
.isInQueue(SU
) && "bad ready count");
2875 Pending
.remove(Pending
.find(SU
));
2879 /// If this queue only has one ready candidate, return it. As a side effect,
2880 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2881 /// one node is ready. If multiple instructions are ready, return NULL.
2882 SUnit
*SchedBoundary::pickOnlyChoice() {
2886 // Defer any ready instrs that now have a hazard.
2887 for (ReadyQueue::iterator I
= Available
.begin(); I
!= Available
.end();) {
2888 if (checkHazard(*I
)) {
2890 I
= Available
.remove(I
);
2895 for (unsigned i
= 0; Available
.empty(); ++i
) {
2896 // FIXME: Re-enable assert once PR20057 is resolved.
2897 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2898 // "permanent hazard");
2900 bumpCycle(CurrCycle
+ 1);
2904 LLVM_DEBUG(Pending
.dump());
2905 LLVM_DEBUG(Available
.dump());
2907 if (Available
.size() == 1)
2908 return *Available
.begin();
2912 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2914 /// Dump the content of the \ref ReservedCycles vector for the
2915 /// resources that are used in the basic block.
2917 LLVM_DUMP_METHOD
void SchedBoundary::dumpReservedCycles() const {
2918 if (!SchedModel
->hasInstrSchedModel())
2921 unsigned ResourceCount
= SchedModel
->getNumProcResourceKinds();
2922 unsigned StartIdx
= 0;
2924 for (unsigned ResIdx
= 0; ResIdx
< ResourceCount
; ++ResIdx
) {
2925 const unsigned NumUnits
= SchedModel
->getProcResource(ResIdx
)->NumUnits
;
2926 std::string ResName
= SchedModel
->getResourceName(ResIdx
);
2927 for (unsigned UnitIdx
= 0; UnitIdx
< NumUnits
; ++UnitIdx
) {
2928 dbgs() << ResName
<< "(" << UnitIdx
<< ") = ";
2929 if (SchedModel
&& SchedModel
->enableIntervals()) {
2930 if (ReservedResourceSegments
.count(StartIdx
+ UnitIdx
))
2931 dbgs() << ReservedResourceSegments
.at(StartIdx
+ UnitIdx
);
2935 dbgs() << ReservedCycles
[StartIdx
+ UnitIdx
] << "\n";
2937 StartIdx
+= NumUnits
;
2941 // This is useful information to dump after bumpNode.
2942 // Note that the Queue contents are more useful before pickNodeFromQueue.
2943 LLVM_DUMP_METHOD
void SchedBoundary::dumpScheduledState() const {
2946 if (ZoneCritResIdx
) {
2947 ResFactor
= SchedModel
->getResourceFactor(ZoneCritResIdx
);
2948 ResCount
= getResourceCount(ZoneCritResIdx
);
2950 ResFactor
= SchedModel
->getMicroOpFactor();
2951 ResCount
= RetiredMOps
* ResFactor
;
2953 unsigned LFactor
= SchedModel
->getLatencyFactor();
2954 dbgs() << Available
.getName() << " @" << CurrCycle
<< "c\n"
2955 << " Retired: " << RetiredMOps
;
2956 dbgs() << "\n Executed: " << getExecutedCount() / LFactor
<< "c";
2957 dbgs() << "\n Critical: " << ResCount
/ LFactor
<< "c, "
2958 << ResCount
/ ResFactor
<< " "
2959 << SchedModel
->getResourceName(ZoneCritResIdx
)
2960 << "\n ExpectedLatency: " << ExpectedLatency
<< "c\n"
2961 << (IsResourceLimited
? " - Resource" : " - Latency")
2963 if (MISchedDumpReservedCycles
)
2964 dumpReservedCycles();
2968 //===----------------------------------------------------------------------===//
2969 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2970 //===----------------------------------------------------------------------===//
2972 void GenericSchedulerBase::SchedCandidate::
2973 initResourceDelta(const ScheduleDAGMI
*DAG
,
2974 const TargetSchedModel
*SchedModel
) {
2975 if (!Policy
.ReduceResIdx
&& !Policy
.DemandResIdx
)
2978 const MCSchedClassDesc
*SC
= DAG
->getSchedClass(SU
);
2979 for (TargetSchedModel::ProcResIter
2980 PI
= SchedModel
->getWriteProcResBegin(SC
),
2981 PE
= SchedModel
->getWriteProcResEnd(SC
); PI
!= PE
; ++PI
) {
2982 if (PI
->ProcResourceIdx
== Policy
.ReduceResIdx
)
2983 ResDelta
.CritResources
+= PI
->ReleaseAtCycle
;
2984 if (PI
->ProcResourceIdx
== Policy
.DemandResIdx
)
2985 ResDelta
.DemandedResources
+= PI
->ReleaseAtCycle
;
2989 /// Compute remaining latency. We need this both to determine whether the
2990 /// overall schedule has become latency-limited and whether the instructions
2991 /// outside this zone are resource or latency limited.
2993 /// The "dependent" latency is updated incrementally during scheduling as the
2994 /// max height/depth of scheduled nodes minus the cycles since it was
2996 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2998 /// The "independent" latency is the max ready queue depth:
2999 /// ILat = max N.depth for N in Available|Pending
3001 /// RemainingLatency is the greater of independent and dependent latency.
3003 /// These computations are expensive, especially in DAGs with many edges, so
3004 /// only do them if necessary.
3005 static unsigned computeRemLatency(SchedBoundary
&CurrZone
) {
3006 unsigned RemLatency
= CurrZone
.getDependentLatency();
3007 RemLatency
= std::max(RemLatency
,
3008 CurrZone
.findMaxLatency(CurrZone
.Available
.elements()));
3009 RemLatency
= std::max(RemLatency
,
3010 CurrZone
.findMaxLatency(CurrZone
.Pending
.elements()));
3014 /// Returns true if the current cycle plus remaning latency is greater than
3015 /// the critical path in the scheduling region.
3016 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy
&Policy
,
3017 SchedBoundary
&CurrZone
,
3018 bool ComputeRemLatency
,
3019 unsigned &RemLatency
) const {
3020 // The current cycle is already greater than the critical path, so we are
3021 // already latency limited and don't need to compute the remaining latency.
3022 if (CurrZone
.getCurrCycle() > Rem
.CriticalPath
)
3025 // If we haven't scheduled anything yet, then we aren't latency limited.
3026 if (CurrZone
.getCurrCycle() == 0)
3029 if (ComputeRemLatency
)
3030 RemLatency
= computeRemLatency(CurrZone
);
3032 return RemLatency
+ CurrZone
.getCurrCycle() > Rem
.CriticalPath
;
3035 /// Set the CandPolicy given a scheduling zone given the current resources and
3036 /// latencies inside and outside the zone.
3037 void GenericSchedulerBase::setPolicy(CandPolicy
&Policy
, bool IsPostRA
,
3038 SchedBoundary
&CurrZone
,
3039 SchedBoundary
*OtherZone
) {
3040 // Apply preemptive heuristics based on the total latency and resources
3041 // inside and outside this zone. Potential stalls should be considered before
3042 // following this policy.
3044 // Compute the critical resource outside the zone.
3045 unsigned OtherCritIdx
= 0;
3046 unsigned OtherCount
=
3047 OtherZone
? OtherZone
->getOtherResourceCount(OtherCritIdx
) : 0;
3049 bool OtherResLimited
= false;
3050 unsigned RemLatency
= 0;
3051 bool RemLatencyComputed
= false;
3052 if (SchedModel
->hasInstrSchedModel() && OtherCount
!= 0) {
3053 RemLatency
= computeRemLatency(CurrZone
);
3054 RemLatencyComputed
= true;
3055 OtherResLimited
= checkResourceLimit(SchedModel
->getLatencyFactor(),
3056 OtherCount
, RemLatency
, false);
3059 // Schedule aggressively for latency in PostRA mode. We don't check for
3060 // acyclic latency during PostRA, and highly out-of-order processors will
3061 // skip PostRA scheduling.
3062 if (!OtherResLimited
&&
3063 (IsPostRA
|| shouldReduceLatency(Policy
, CurrZone
, !RemLatencyComputed
,
3065 Policy
.ReduceLatency
|= true;
3066 LLVM_DEBUG(dbgs() << " " << CurrZone
.Available
.getName()
3067 << " RemainingLatency " << RemLatency
<< " + "
3068 << CurrZone
.getCurrCycle() << "c > CritPath "
3069 << Rem
.CriticalPath
<< "\n");
3071 // If the same resource is limiting inside and outside the zone, do nothing.
3072 if (CurrZone
.getZoneCritResIdx() == OtherCritIdx
)
3075 LLVM_DEBUG(if (CurrZone
.isResourceLimited()) {
3076 dbgs() << " " << CurrZone
.Available
.getName() << " ResourceLimited: "
3077 << SchedModel
->getResourceName(CurrZone
.getZoneCritResIdx()) << "\n";
3078 } if (OtherResLimited
) dbgs()
3079 << " RemainingLimit: "
3080 << SchedModel
->getResourceName(OtherCritIdx
) << "\n";
3081 if (!CurrZone
.isResourceLimited() && !OtherResLimited
) dbgs()
3082 << " Latency limited both directions.\n");
3084 if (CurrZone
.isResourceLimited() && !Policy
.ReduceResIdx
)
3085 Policy
.ReduceResIdx
= CurrZone
.getZoneCritResIdx();
3087 if (OtherResLimited
)
3088 Policy
.DemandResIdx
= OtherCritIdx
;
3092 const char *GenericSchedulerBase::getReasonStr(
3093 GenericSchedulerBase::CandReason Reason
) {
3095 case NoCand
: return "NOCAND ";
3096 case Only1
: return "ONLY1 ";
3097 case PhysReg
: return "PHYS-REG ";
3098 case RegExcess
: return "REG-EXCESS";
3099 case RegCritical
: return "REG-CRIT ";
3100 case Stall
: return "STALL ";
3101 case Cluster
: return "CLUSTER ";
3102 case Weak
: return "WEAK ";
3103 case RegMax
: return "REG-MAX ";
3104 case ResourceReduce
: return "RES-REDUCE";
3105 case ResourceDemand
: return "RES-DEMAND";
3106 case TopDepthReduce
: return "TOP-DEPTH ";
3107 case TopPathReduce
: return "TOP-PATH ";
3108 case BotHeightReduce
:return "BOT-HEIGHT";
3109 case BotPathReduce
: return "BOT-PATH ";
3110 case NextDefUse
: return "DEF-USE ";
3111 case NodeOrder
: return "ORDER ";
3113 llvm_unreachable("Unknown reason!");
3116 void GenericSchedulerBase::traceCandidate(const SchedCandidate
&Cand
) {
3118 unsigned ResIdx
= 0;
3119 unsigned Latency
= 0;
3120 switch (Cand
.Reason
) {
3124 P
= Cand
.RPDelta
.Excess
;
3127 P
= Cand
.RPDelta
.CriticalMax
;
3130 P
= Cand
.RPDelta
.CurrentMax
;
3132 case ResourceReduce
:
3133 ResIdx
= Cand
.Policy
.ReduceResIdx
;
3135 case ResourceDemand
:
3136 ResIdx
= Cand
.Policy
.DemandResIdx
;
3138 case TopDepthReduce
:
3139 Latency
= Cand
.SU
->getDepth();
3142 Latency
= Cand
.SU
->getHeight();
3144 case BotHeightReduce
:
3145 Latency
= Cand
.SU
->getHeight();
3148 Latency
= Cand
.SU
->getDepth();
3151 dbgs() << " Cand SU(" << Cand
.SU
->NodeNum
<< ") " << getReasonStr(Cand
.Reason
);
3153 dbgs() << " " << TRI
->getRegPressureSetName(P
.getPSet())
3154 << ":" << P
.getUnitInc() << " ";
3158 dbgs() << " " << SchedModel
->getProcResource(ResIdx
)->Name
<< " ";
3162 dbgs() << " " << Latency
<< " cycles ";
3170 /// Return true if this heuristic determines order.
3171 /// TODO: Consider refactor return type of these functions as integer or enum,
3172 /// as we may need to differentiate whether TryCand is better than Cand.
3173 bool tryLess(int TryVal
, int CandVal
,
3174 GenericSchedulerBase::SchedCandidate
&TryCand
,
3175 GenericSchedulerBase::SchedCandidate
&Cand
,
3176 GenericSchedulerBase::CandReason Reason
) {
3177 if (TryVal
< CandVal
) {
3178 TryCand
.Reason
= Reason
;
3181 if (TryVal
> CandVal
) {
3182 if (Cand
.Reason
> Reason
)
3183 Cand
.Reason
= Reason
;
3189 bool tryGreater(int TryVal
, int CandVal
,
3190 GenericSchedulerBase::SchedCandidate
&TryCand
,
3191 GenericSchedulerBase::SchedCandidate
&Cand
,
3192 GenericSchedulerBase::CandReason Reason
) {
3193 if (TryVal
> CandVal
) {
3194 TryCand
.Reason
= Reason
;
3197 if (TryVal
< CandVal
) {
3198 if (Cand
.Reason
> Reason
)
3199 Cand
.Reason
= Reason
;
3205 bool tryLatency(GenericSchedulerBase::SchedCandidate
&TryCand
,
3206 GenericSchedulerBase::SchedCandidate
&Cand
,
3207 SchedBoundary
&Zone
) {
3209 // Prefer the candidate with the lesser depth, but only if one of them has
3210 // depth greater than the total latency scheduled so far, otherwise either
3211 // of them could be scheduled now with no stall.
3212 if (std::max(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth()) >
3213 Zone
.getScheduledLatency()) {
3214 if (tryLess(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth(),
3215 TryCand
, Cand
, GenericSchedulerBase::TopDepthReduce
))
3218 if (tryGreater(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight(),
3219 TryCand
, Cand
, GenericSchedulerBase::TopPathReduce
))
3222 // Prefer the candidate with the lesser height, but only if one of them has
3223 // height greater than the total latency scheduled so far, otherwise either
3224 // of them could be scheduled now with no stall.
3225 if (std::max(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight()) >
3226 Zone
.getScheduledLatency()) {
3227 if (tryLess(TryCand
.SU
->getHeight(), Cand
.SU
->getHeight(),
3228 TryCand
, Cand
, GenericSchedulerBase::BotHeightReduce
))
3231 if (tryGreater(TryCand
.SU
->getDepth(), Cand
.SU
->getDepth(),
3232 TryCand
, Cand
, GenericSchedulerBase::BotPathReduce
))
3237 } // end namespace llvm
3239 static void tracePick(GenericSchedulerBase::CandReason Reason
, bool IsTop
) {
3240 LLVM_DEBUG(dbgs() << "Pick " << (IsTop
? "Top " : "Bot ")
3241 << GenericSchedulerBase::getReasonStr(Reason
) << '\n');
3244 static void tracePick(const GenericSchedulerBase::SchedCandidate
&Cand
) {
3245 tracePick(Cand
.Reason
, Cand
.AtTop
);
3248 void GenericScheduler::initialize(ScheduleDAGMI
*dag
) {
3249 assert(dag
->hasVRegLiveness() &&
3250 "(PreRA)GenericScheduler needs vreg liveness");
3251 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
3252 SchedModel
= DAG
->getSchedModel();
3255 if (RegionPolicy
.ComputeDFSResult
)
3256 DAG
->computeDFSResult();
3258 Rem
.init(DAG
, SchedModel
);
3259 Top
.init(DAG
, SchedModel
, &Rem
);
3260 Bot
.init(DAG
, SchedModel
, &Rem
);
3262 // Initialize resource counts.
3264 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
3265 // are disabled, then these HazardRecs will be disabled.
3266 const InstrItineraryData
*Itin
= SchedModel
->getInstrItineraries();
3267 if (!Top
.HazardRec
) {
3268 Top
.HazardRec
= DAG
->TII
->CreateTargetMIHazardRecognizer(Itin
, DAG
);
3270 if (!Bot
.HazardRec
) {
3271 Bot
.HazardRec
= DAG
->TII
->CreateTargetMIHazardRecognizer(Itin
, DAG
);
3273 TopCand
.SU
= nullptr;
3274 BotCand
.SU
= nullptr;
3277 /// Initialize the per-region scheduling policy.
3278 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin
,
3279 MachineBasicBlock::iterator End
,
3280 unsigned NumRegionInstrs
) {
3281 const MachineFunction
&MF
= *Begin
->getMF();
3282 const TargetLowering
*TLI
= MF
.getSubtarget().getTargetLowering();
3284 // Avoid setting up the register pressure tracker for small regions to save
3285 // compile time. As a rough heuristic, only track pressure when the number of
3286 // schedulable instructions exceeds half the allocatable integer register file
3287 // that is the largest legal integer regiser type.
3288 RegionPolicy
.ShouldTrackPressure
= true;
3289 for (unsigned VT
= MVT::i64
; VT
> (unsigned)MVT::i1
; --VT
) {
3290 MVT::SimpleValueType LegalIntVT
= (MVT::SimpleValueType
)VT
;
3291 if (TLI
->isTypeLegal(LegalIntVT
)) {
3292 unsigned NIntRegs
= Context
->RegClassInfo
->getNumAllocatableRegs(
3293 TLI
->getRegClassFor(LegalIntVT
));
3294 RegionPolicy
.ShouldTrackPressure
= NumRegionInstrs
> (NIntRegs
/ 2);
3299 // For generic targets, we default to bottom-up, because it's simpler and more
3300 // compile-time optimizations have been implemented in that direction.
3301 RegionPolicy
.OnlyBottomUp
= true;
3303 // Allow the subtarget to override default policy.
3304 MF
.getSubtarget().overrideSchedPolicy(RegionPolicy
, NumRegionInstrs
);
3306 // After subtarget overrides, apply command line options.
3307 if (!EnableRegPressure
) {
3308 RegionPolicy
.ShouldTrackPressure
= false;
3309 RegionPolicy
.ShouldTrackLaneMasks
= false;
3312 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
3313 // e.g. -misched-bottomup=false allows scheduling in both directions.
3314 assert((!ForceTopDown
|| !ForceBottomUp
) &&
3315 "-misched-topdown incompatible with -misched-bottomup");
3316 if (ForceBottomUp
.getNumOccurrences() > 0) {
3317 RegionPolicy
.OnlyBottomUp
= ForceBottomUp
;
3318 if (RegionPolicy
.OnlyBottomUp
)
3319 RegionPolicy
.OnlyTopDown
= false;
3321 if (ForceTopDown
.getNumOccurrences() > 0) {
3322 RegionPolicy
.OnlyTopDown
= ForceTopDown
;
3323 if (RegionPolicy
.OnlyTopDown
)
3324 RegionPolicy
.OnlyBottomUp
= false;
3328 void GenericScheduler::dumpPolicy() const {
3329 // Cannot completely remove virtual function even in release mode.
3330 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3331 dbgs() << "GenericScheduler RegionPolicy: "
3332 << " ShouldTrackPressure=" << RegionPolicy
.ShouldTrackPressure
3333 << " OnlyTopDown=" << RegionPolicy
.OnlyTopDown
3334 << " OnlyBottomUp=" << RegionPolicy
.OnlyBottomUp
3339 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
3340 /// critical path by more cycles than it takes to drain the instruction buffer.
3341 /// We estimate an upper bounds on in-flight instructions as:
3343 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
3344 /// InFlightIterations = AcyclicPath / CyclesPerIteration
3345 /// InFlightResources = InFlightIterations * LoopResources
3347 /// TODO: Check execution resources in addition to IssueCount.
3348 void GenericScheduler::checkAcyclicLatency() {
3349 if (Rem
.CyclicCritPath
== 0 || Rem
.CyclicCritPath
>= Rem
.CriticalPath
)
3352 // Scaled number of cycles per loop iteration.
3353 unsigned IterCount
=
3354 std::max(Rem
.CyclicCritPath
* SchedModel
->getLatencyFactor(),
3356 // Scaled acyclic critical path.
3357 unsigned AcyclicCount
= Rem
.CriticalPath
* SchedModel
->getLatencyFactor();
3358 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
3359 unsigned InFlightCount
=
3360 (AcyclicCount
* Rem
.RemIssueCount
+ IterCount
-1) / IterCount
;
3361 unsigned BufferLimit
=
3362 SchedModel
->getMicroOpBufferSize() * SchedModel
->getMicroOpFactor();
3364 Rem
.IsAcyclicLatencyLimited
= InFlightCount
> BufferLimit
;
3367 dbgs() << "IssueCycles="
3368 << Rem
.RemIssueCount
/ SchedModel
->getLatencyFactor() << "c "
3369 << "IterCycles=" << IterCount
/ SchedModel
->getLatencyFactor()
3370 << "c NumIters=" << (AcyclicCount
+ IterCount
- 1) / IterCount
3371 << " InFlight=" << InFlightCount
/ SchedModel
->getMicroOpFactor()
3372 << "m BufferLim=" << SchedModel
->getMicroOpBufferSize() << "m\n";
3373 if (Rem
.IsAcyclicLatencyLimited
) dbgs() << " ACYCLIC LATENCY LIMIT\n");
3376 void GenericScheduler::registerRoots() {
3377 Rem
.CriticalPath
= DAG
->ExitSU
.getDepth();
3379 // Some roots may not feed into ExitSU. Check all of them in case.
3380 for (const SUnit
*SU
: Bot
.Available
) {
3381 if (SU
->getDepth() > Rem
.CriticalPath
)
3382 Rem
.CriticalPath
= SU
->getDepth();
3384 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem
.CriticalPath
<< '\n');
3385 if (DumpCriticalPathLength
) {
3386 errs() << "Critical Path(GS-RR ): " << Rem
.CriticalPath
<< " \n";
3389 if (EnableCyclicPath
&& SchedModel
->getMicroOpBufferSize() > 0) {
3390 Rem
.CyclicCritPath
= DAG
->computeCyclicCriticalPath();
3391 checkAcyclicLatency();
3396 bool tryPressure(const PressureChange
&TryP
,
3397 const PressureChange
&CandP
,
3398 GenericSchedulerBase::SchedCandidate
&TryCand
,
3399 GenericSchedulerBase::SchedCandidate
&Cand
,
3400 GenericSchedulerBase::CandReason Reason
,
3401 const TargetRegisterInfo
*TRI
,
3402 const MachineFunction
&MF
) {
3403 // If one candidate decreases and the other increases, go with it.
3404 // Invalid candidates have UnitInc==0.
3405 if (tryGreater(TryP
.getUnitInc() < 0, CandP
.getUnitInc() < 0, TryCand
, Cand
,
3409 // Do not compare the magnitude of pressure changes between top and bottom
3411 if (Cand
.AtTop
!= TryCand
.AtTop
)
3414 // If both candidates affect the same set in the same boundary, go with the
3415 // smallest increase.
3416 unsigned TryPSet
= TryP
.getPSetOrMax();
3417 unsigned CandPSet
= CandP
.getPSetOrMax();
3418 if (TryPSet
== CandPSet
) {
3419 return tryLess(TryP
.getUnitInc(), CandP
.getUnitInc(), TryCand
, Cand
,
3423 int TryRank
= TryP
.isValid() ? TRI
->getRegPressureSetScore(MF
, TryPSet
) :
3424 std::numeric_limits
<int>::max();
3426 int CandRank
= CandP
.isValid() ? TRI
->getRegPressureSetScore(MF
, CandPSet
) :
3427 std::numeric_limits
<int>::max();
3429 // If the candidates are decreasing pressure, reverse priority.
3430 if (TryP
.getUnitInc() < 0)
3431 std::swap(TryRank
, CandRank
);
3432 return tryGreater(TryRank
, CandRank
, TryCand
, Cand
, Reason
);
3435 unsigned getWeakLeft(const SUnit
*SU
, bool isTop
) {
3436 return (isTop
) ? SU
->WeakPredsLeft
: SU
->WeakSuccsLeft
;
3439 /// Minimize physical register live ranges. Regalloc wants them adjacent to
3440 /// their physreg def/use.
3442 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
3443 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
3444 /// with the operation that produces or consumes the physreg. We'll do this when
3445 /// regalloc has support for parallel copies.
3446 int biasPhysReg(const SUnit
*SU
, bool isTop
) {
3447 const MachineInstr
*MI
= SU
->getInstr();
3450 unsigned ScheduledOper
= isTop
? 1 : 0;
3451 unsigned UnscheduledOper
= isTop
? 0 : 1;
3452 // If we have already scheduled the physreg produce/consumer, immediately
3453 // schedule the copy.
3454 if (MI
->getOperand(ScheduledOper
).getReg().isPhysical())
3456 // If the physreg is at the boundary, defer it. Otherwise schedule it
3457 // immediately to free the dependent. We can hoist the copy later.
3458 bool AtBoundary
= isTop
? !SU
->NumSuccsLeft
: !SU
->NumPredsLeft
;
3459 if (MI
->getOperand(UnscheduledOper
).getReg().isPhysical())
3460 return AtBoundary
? -1 : 1;
3463 if (MI
->isMoveImmediate()) {
3464 // If we have a move immediate and all successors have been assigned, bias
3465 // towards scheduling this later. Make sure all register defs are to
3466 // physical registers.
3468 for (const MachineOperand
&Op
: MI
->defs()) {
3469 if (Op
.isReg() && !Op
.getReg().isPhysical()) {
3476 return isTop
? -1 : 1;
3481 } // end namespace llvm
3483 void GenericScheduler::initCandidate(SchedCandidate
&Cand
, SUnit
*SU
,
3485 const RegPressureTracker
&RPTracker
,
3486 RegPressureTracker
&TempTracker
) {
3489 if (DAG
->isTrackingPressure()) {
3491 TempTracker
.getMaxDownwardPressureDelta(
3492 Cand
.SU
->getInstr(),
3494 DAG
->getRegionCriticalPSets(),
3495 DAG
->getRegPressure().MaxSetPressure
);
3497 if (VerifyScheduling
) {
3498 TempTracker
.getMaxUpwardPressureDelta(
3499 Cand
.SU
->getInstr(),
3500 &DAG
->getPressureDiff(Cand
.SU
),
3502 DAG
->getRegionCriticalPSets(),
3503 DAG
->getRegPressure().MaxSetPressure
);
3505 RPTracker
.getUpwardPressureDelta(
3506 Cand
.SU
->getInstr(),
3507 DAG
->getPressureDiff(Cand
.SU
),
3509 DAG
->getRegionCriticalPSets(),
3510 DAG
->getRegPressure().MaxSetPressure
);
3514 LLVM_DEBUG(if (Cand
.RPDelta
.Excess
.isValid()) dbgs()
3515 << " Try SU(" << Cand
.SU
->NodeNum
<< ") "
3516 << TRI
->getRegPressureSetName(Cand
.RPDelta
.Excess
.getPSet()) << ":"
3517 << Cand
.RPDelta
.Excess
.getUnitInc() << "\n");
3520 /// Apply a set of heuristics to a new candidate. Heuristics are currently
3521 /// hierarchical. This may be more efficient than a graduated cost model because
3522 /// we don't need to evaluate all aspects of the model for each node in the
3523 /// queue. But it's really done to make the heuristics easier to debug and
3524 /// statistically analyze.
3526 /// \param Cand provides the policy and current best candidate.
3527 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3528 /// \param Zone describes the scheduled zone that we are extending, or nullptr
3529 /// if Cand is from a different zone than TryCand.
3530 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3531 bool GenericScheduler::tryCandidate(SchedCandidate
&Cand
,
3532 SchedCandidate
&TryCand
,
3533 SchedBoundary
*Zone
) const {
3534 // Initialize the candidate if needed.
3535 if (!Cand
.isValid()) {
3536 TryCand
.Reason
= NodeOrder
;
3540 // Bias PhysReg Defs and copies to their uses and defined respectively.
3541 if (tryGreater(biasPhysReg(TryCand
.SU
, TryCand
.AtTop
),
3542 biasPhysReg(Cand
.SU
, Cand
.AtTop
), TryCand
, Cand
, PhysReg
))
3543 return TryCand
.Reason
!= NoCand
;
3545 // Avoid exceeding the target's limit.
3546 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.Excess
,
3547 Cand
.RPDelta
.Excess
,
3548 TryCand
, Cand
, RegExcess
, TRI
,
3550 return TryCand
.Reason
!= NoCand
;
3552 // Avoid increasing the max critical pressure in the scheduled region.
3553 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.CriticalMax
,
3554 Cand
.RPDelta
.CriticalMax
,
3555 TryCand
, Cand
, RegCritical
, TRI
,
3557 return TryCand
.Reason
!= NoCand
;
3559 // We only compare a subset of features when comparing nodes between
3560 // Top and Bottom boundary. Some properties are simply incomparable, in many
3561 // other instances we should only override the other boundary if something
3562 // is a clear good pick on one boundary. Skip heuristics that are more
3563 // "tie-breaking" in nature.
3564 bool SameBoundary
= Zone
!= nullptr;
3566 // For loops that are acyclic path limited, aggressively schedule for
3567 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3568 // heuristics to take precedence.
3569 if (Rem
.IsAcyclicLatencyLimited
&& !Zone
->getCurrMOps() &&
3570 tryLatency(TryCand
, Cand
, *Zone
))
3571 return TryCand
.Reason
!= NoCand
;
3573 // Prioritize instructions that read unbuffered resources by stall cycles.
3574 if (tryLess(Zone
->getLatencyStallCycles(TryCand
.SU
),
3575 Zone
->getLatencyStallCycles(Cand
.SU
), TryCand
, Cand
, Stall
))
3576 return TryCand
.Reason
!= NoCand
;
3579 // Keep clustered nodes together to encourage downstream peephole
3580 // optimizations which may reduce resource requirements.
3582 // This is a best effort to set things up for a post-RA pass. Optimizations
3583 // like generating loads of multiple registers should ideally be done within
3584 // the scheduler pass by combining the loads during DAG postprocessing.
3585 const SUnit
*CandNextClusterSU
=
3586 Cand
.AtTop
? DAG
->getNextClusterSucc() : DAG
->getNextClusterPred();
3587 const SUnit
*TryCandNextClusterSU
=
3588 TryCand
.AtTop
? DAG
->getNextClusterSucc() : DAG
->getNextClusterPred();
3589 if (tryGreater(TryCand
.SU
== TryCandNextClusterSU
,
3590 Cand
.SU
== CandNextClusterSU
,
3591 TryCand
, Cand
, Cluster
))
3592 return TryCand
.Reason
!= NoCand
;
3595 // Weak edges are for clustering and other constraints.
3596 if (tryLess(getWeakLeft(TryCand
.SU
, TryCand
.AtTop
),
3597 getWeakLeft(Cand
.SU
, Cand
.AtTop
),
3598 TryCand
, Cand
, Weak
))
3599 return TryCand
.Reason
!= NoCand
;
3602 // Avoid increasing the max pressure of the entire region.
3603 if (DAG
->isTrackingPressure() && tryPressure(TryCand
.RPDelta
.CurrentMax
,
3604 Cand
.RPDelta
.CurrentMax
,
3605 TryCand
, Cand
, RegMax
, TRI
,
3607 return TryCand
.Reason
!= NoCand
;
3610 // Avoid critical resource consumption and balance the schedule.
3611 TryCand
.initResourceDelta(DAG
, SchedModel
);
3612 if (tryLess(TryCand
.ResDelta
.CritResources
, Cand
.ResDelta
.CritResources
,
3613 TryCand
, Cand
, ResourceReduce
))
3614 return TryCand
.Reason
!= NoCand
;
3615 if (tryGreater(TryCand
.ResDelta
.DemandedResources
,
3616 Cand
.ResDelta
.DemandedResources
,
3617 TryCand
, Cand
, ResourceDemand
))
3618 return TryCand
.Reason
!= NoCand
;
3620 // Avoid serializing long latency dependence chains.
3621 // For acyclic path limited loops, latency was already checked above.
3622 if (!RegionPolicy
.DisableLatencyHeuristic
&& TryCand
.Policy
.ReduceLatency
&&
3623 !Rem
.IsAcyclicLatencyLimited
&& tryLatency(TryCand
, Cand
, *Zone
))
3624 return TryCand
.Reason
!= NoCand
;
3626 // Fall through to original instruction order.
3627 if ((Zone
->isTop() && TryCand
.SU
->NodeNum
< Cand
.SU
->NodeNum
)
3628 || (!Zone
->isTop() && TryCand
.SU
->NodeNum
> Cand
.SU
->NodeNum
)) {
3629 TryCand
.Reason
= NodeOrder
;
3637 /// Pick the best candidate from the queue.
3639 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3640 /// DAG building. To adjust for the current scheduling location we need to
3641 /// maintain the number of vreg uses remaining to be top-scheduled.
3642 void GenericScheduler::pickNodeFromQueue(SchedBoundary
&Zone
,
3643 const CandPolicy
&ZonePolicy
,
3644 const RegPressureTracker
&RPTracker
,
3645 SchedCandidate
&Cand
) {
3646 // getMaxPressureDelta temporarily modifies the tracker.
3647 RegPressureTracker
&TempTracker
= const_cast<RegPressureTracker
&>(RPTracker
);
3649 ReadyQueue
&Q
= Zone
.Available
;
3650 for (SUnit
*SU
: Q
) {
3652 SchedCandidate
TryCand(ZonePolicy
);
3653 initCandidate(TryCand
, SU
, Zone
.isTop(), RPTracker
, TempTracker
);
3654 // Pass SchedBoundary only when comparing nodes from the same boundary.
3655 SchedBoundary
*ZoneArg
= Cand
.AtTop
== TryCand
.AtTop
? &Zone
: nullptr;
3656 if (tryCandidate(Cand
, TryCand
, ZoneArg
)) {
3657 // Initialize resource delta if needed in case future heuristics query it.
3658 if (TryCand
.ResDelta
== SchedResourceDelta())
3659 TryCand
.initResourceDelta(DAG
, SchedModel
);
3660 Cand
.setBest(TryCand
);
3661 LLVM_DEBUG(traceCandidate(Cand
));
3666 /// Pick the best candidate node from either the top or bottom queue.
3667 SUnit
*GenericScheduler::pickNodeBidirectional(bool &IsTopNode
) {
3668 // Schedule as far as possible in the direction of no choice. This is most
3669 // efficient, but also provides the best heuristics for CriticalPSets.
3670 if (SUnit
*SU
= Bot
.pickOnlyChoice()) {
3672 tracePick(Only1
, false);
3675 if (SUnit
*SU
= Top
.pickOnlyChoice()) {
3677 tracePick(Only1
, true);
3680 // Set the bottom-up policy based on the state of the current bottom zone and
3681 // the instructions outside the zone, including the top zone.
3682 CandPolicy BotPolicy
;
3683 setPolicy(BotPolicy
, /*IsPostRA=*/false, Bot
, &Top
);
3684 // Set the top-down policy based on the state of the current top zone and
3685 // the instructions outside the zone, including the bottom zone.
3686 CandPolicy TopPolicy
;
3687 setPolicy(TopPolicy
, /*IsPostRA=*/false, Top
, &Bot
);
3689 // See if BotCand is still valid (because we previously scheduled from Top).
3690 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3691 if (!BotCand
.isValid() || BotCand
.SU
->isScheduled
||
3692 BotCand
.Policy
!= BotPolicy
) {
3693 BotCand
.reset(CandPolicy());
3694 pickNodeFromQueue(Bot
, BotPolicy
, DAG
->getBotRPTracker(), BotCand
);
3695 assert(BotCand
.Reason
!= NoCand
&& "failed to find the first candidate");
3697 LLVM_DEBUG(traceCandidate(BotCand
));
3699 if (VerifyScheduling
) {
3700 SchedCandidate TCand
;
3701 TCand
.reset(CandPolicy());
3702 pickNodeFromQueue(Bot
, BotPolicy
, DAG
->getBotRPTracker(), TCand
);
3703 assert(TCand
.SU
== BotCand
.SU
&&
3704 "Last pick result should correspond to re-picking right now");
3709 // Check if the top Q has a better candidate.
3710 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3711 if (!TopCand
.isValid() || TopCand
.SU
->isScheduled
||
3712 TopCand
.Policy
!= TopPolicy
) {
3713 TopCand
.reset(CandPolicy());
3714 pickNodeFromQueue(Top
, TopPolicy
, DAG
->getTopRPTracker(), TopCand
);
3715 assert(TopCand
.Reason
!= NoCand
&& "failed to find the first candidate");
3717 LLVM_DEBUG(traceCandidate(TopCand
));
3719 if (VerifyScheduling
) {
3720 SchedCandidate TCand
;
3721 TCand
.reset(CandPolicy());
3722 pickNodeFromQueue(Top
, TopPolicy
, DAG
->getTopRPTracker(), TCand
);
3723 assert(TCand
.SU
== TopCand
.SU
&&
3724 "Last pick result should correspond to re-picking right now");
3729 // Pick best from BotCand and TopCand.
3730 assert(BotCand
.isValid());
3731 assert(TopCand
.isValid());
3732 SchedCandidate Cand
= BotCand
;
3733 TopCand
.Reason
= NoCand
;
3734 if (tryCandidate(Cand
, TopCand
, nullptr)) {
3735 Cand
.setBest(TopCand
);
3736 LLVM_DEBUG(traceCandidate(Cand
));
3739 IsTopNode
= Cand
.AtTop
;
3744 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3745 SUnit
*GenericScheduler::pickNode(bool &IsTopNode
) {
3746 if (DAG
->top() == DAG
->bottom()) {
3747 assert(Top
.Available
.empty() && Top
.Pending
.empty() &&
3748 Bot
.Available
.empty() && Bot
.Pending
.empty() && "ReadyQ garbage");
3753 if (RegionPolicy
.OnlyTopDown
) {
3754 SU
= Top
.pickOnlyChoice();
3756 CandPolicy NoPolicy
;
3757 TopCand
.reset(NoPolicy
);
3758 pickNodeFromQueue(Top
, NoPolicy
, DAG
->getTopRPTracker(), TopCand
);
3759 assert(TopCand
.Reason
!= NoCand
&& "failed to find a candidate");
3764 } else if (RegionPolicy
.OnlyBottomUp
) {
3765 SU
= Bot
.pickOnlyChoice();
3767 CandPolicy NoPolicy
;
3768 BotCand
.reset(NoPolicy
);
3769 pickNodeFromQueue(Bot
, NoPolicy
, DAG
->getBotRPTracker(), BotCand
);
3770 assert(BotCand
.Reason
!= NoCand
&& "failed to find a candidate");
3776 SU
= pickNodeBidirectional(IsTopNode
);
3778 } while (SU
->isScheduled
);
3780 // If IsTopNode, then SU is in Top.Available and must be removed. Otherwise,
3781 // if isTopReady(), then SU is in either Top.Available or Top.Pending.
3782 // If !IsTopNode, then SU is in Bot.Available and must be removed. Otherwise,
3783 // if isBottomReady(), then SU is in either Bot.Available or Bot.Pending.
3785 // It is coincidental when !IsTopNode && isTopReady or when IsTopNode &&
3786 // isBottomReady. That is, it didn't factor into the decision to choose SU
3787 // because it isTopReady or isBottomReady, respectively. In fact, if the
3788 // RegionPolicy is OnlyTopDown or OnlyBottomUp, then the Bot queues and Top
3789 // queues respectivley contain the original roots and don't get updated when
3790 // picking a node. So if SU isTopReady on a OnlyBottomUp pick, then it was
3791 // because we schduled everything but the top roots. Conversley, if SU
3792 // isBottomReady on OnlyTopDown, then it was because we scheduled everything
3793 // but the bottom roots. If its in a queue even coincidentally, it should be
3794 // removed so it does not get re-picked in a subsequent pickNode call.
3795 if (SU
->isTopReady())
3796 Top
.removeReady(SU
);
3797 if (SU
->isBottomReady())
3798 Bot
.removeReady(SU
);
3800 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU
->NodeNum
<< ") "
3801 << *SU
->getInstr());
3805 void GenericScheduler::reschedulePhysReg(SUnit
*SU
, bool isTop
) {
3806 MachineBasicBlock::iterator InsertPos
= SU
->getInstr();
3809 SmallVectorImpl
<SDep
> &Deps
= isTop
? SU
->Preds
: SU
->Succs
;
3811 // Find already scheduled copies with a single physreg dependence and move
3812 // them just above the scheduled instruction.
3813 for (SDep
&Dep
: Deps
) {
3814 if (Dep
.getKind() != SDep::Data
||
3815 !Register::isPhysicalRegister(Dep
.getReg()))
3817 SUnit
*DepSU
= Dep
.getSUnit();
3818 if (isTop
? DepSU
->Succs
.size() > 1 : DepSU
->Preds
.size() > 1)
3820 MachineInstr
*Copy
= DepSU
->getInstr();
3821 if (!Copy
->isCopy() && !Copy
->isMoveImmediate())
3823 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
3824 DAG
->dumpNode(*Dep
.getSUnit()));
3825 DAG
->moveInstruction(Copy
, InsertPos
);
3829 /// Update the scheduler's state after scheduling a node. This is the same node
3830 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3831 /// update it's state based on the current cycle before MachineSchedStrategy
3834 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3835 /// them here. See comments in biasPhysReg.
3836 void GenericScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
3838 SU
->TopReadyCycle
= std::max(SU
->TopReadyCycle
, Top
.getCurrCycle());
3840 if (SU
->hasPhysRegUses
)
3841 reschedulePhysReg(SU
, true);
3843 SU
->BotReadyCycle
= std::max(SU
->BotReadyCycle
, Bot
.getCurrCycle());
3845 if (SU
->hasPhysRegDefs
)
3846 reschedulePhysReg(SU
, false);
3850 /// Create the standard converging machine scheduler. This will be used as the
3851 /// default scheduler if the target does not set a default.
3852 ScheduleDAGMILive
*llvm::createGenericSchedLive(MachineSchedContext
*C
) {
3853 ScheduleDAGMILive
*DAG
=
3854 new ScheduleDAGMILive(C
, std::make_unique
<GenericScheduler
>(C
));
3855 // Register DAG post-processors.
3857 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3858 // data and pass it to later mutations. Have a single mutation that gathers
3859 // the interesting nodes in one pass.
3860 DAG
->addMutation(createCopyConstrainDAGMutation(DAG
->TII
, DAG
->TRI
));
3862 const TargetSubtargetInfo
&STI
= C
->MF
->getSubtarget();
3863 // Add MacroFusion mutation if fusions are not empty.
3864 const auto &MacroFusions
= STI
.getMacroFusions();
3865 if (!MacroFusions
.empty())
3866 DAG
->addMutation(createMacroFusionDAGMutation(MacroFusions
));
3870 static ScheduleDAGInstrs
*createConvergingSched(MachineSchedContext
*C
) {
3871 return createGenericSchedLive(C
);
3874 static MachineSchedRegistry
3875 GenericSchedRegistry("converge", "Standard converging scheduler.",
3876 createConvergingSched
);
3878 //===----------------------------------------------------------------------===//
3879 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3880 //===----------------------------------------------------------------------===//
3882 void PostGenericScheduler::initialize(ScheduleDAGMI
*Dag
) {
3884 SchedModel
= DAG
->getSchedModel();
3887 Rem
.init(DAG
, SchedModel
);
3888 Top
.init(DAG
, SchedModel
, &Rem
);
3889 Bot
.init(DAG
, SchedModel
, &Rem
);
3891 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3892 // or are disabled, then these HazardRecs will be disabled.
3893 const InstrItineraryData
*Itin
= SchedModel
->getInstrItineraries();
3894 if (!Top
.HazardRec
) {
3895 Top
.HazardRec
= DAG
->TII
->CreateTargetMIHazardRecognizer(Itin
, DAG
);
3897 if (!Bot
.HazardRec
) {
3898 Bot
.HazardRec
= DAG
->TII
->CreateTargetMIHazardRecognizer(Itin
, DAG
);
3902 void PostGenericScheduler::initPolicy(MachineBasicBlock::iterator Begin
,
3903 MachineBasicBlock::iterator End
,
3904 unsigned NumRegionInstrs
) {
3905 if (PostRADirection
== MISchedPostRASched::TopDown
) {
3906 RegionPolicy
.OnlyTopDown
= true;
3907 RegionPolicy
.OnlyBottomUp
= false;
3908 } else if (PostRADirection
== MISchedPostRASched::BottomUp
) {
3909 RegionPolicy
.OnlyTopDown
= false;
3910 RegionPolicy
.OnlyBottomUp
= true;
3911 } else if (PostRADirection
== MISchedPostRASched::Bidirectional
) {
3912 RegionPolicy
.OnlyBottomUp
= false;
3913 RegionPolicy
.OnlyTopDown
= false;
3917 void PostGenericScheduler::registerRoots() {
3918 Rem
.CriticalPath
= DAG
->ExitSU
.getDepth();
3920 // Some roots may not feed into ExitSU. Check all of them in case.
3921 for (const SUnit
*SU
: Bot
.Available
) {
3922 if (SU
->getDepth() > Rem
.CriticalPath
)
3923 Rem
.CriticalPath
= SU
->getDepth();
3925 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem
.CriticalPath
<< '\n');
3926 if (DumpCriticalPathLength
) {
3927 errs() << "Critical Path(PGS-RR ): " << Rem
.CriticalPath
<< " \n";
3931 /// Apply a set of heuristics to a new candidate for PostRA scheduling.
3933 /// \param Cand provides the policy and current best candidate.
3934 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3935 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3936 bool PostGenericScheduler::tryCandidate(SchedCandidate
&Cand
,
3937 SchedCandidate
&TryCand
) {
3938 // Initialize the candidate if needed.
3939 if (!Cand
.isValid()) {
3940 TryCand
.Reason
= NodeOrder
;
3944 // Prioritize instructions that read unbuffered resources by stall cycles.
3945 if (tryLess(Top
.getLatencyStallCycles(TryCand
.SU
),
3946 Top
.getLatencyStallCycles(Cand
.SU
), TryCand
, Cand
, Stall
))
3947 return TryCand
.Reason
!= NoCand
;
3949 // Keep clustered nodes together.
3950 if (tryGreater(TryCand
.SU
== DAG
->getNextClusterSucc(),
3951 Cand
.SU
== DAG
->getNextClusterSucc(),
3952 TryCand
, Cand
, Cluster
))
3953 return TryCand
.Reason
!= NoCand
;
3955 // Avoid critical resource consumption and balance the schedule.
3956 if (tryLess(TryCand
.ResDelta
.CritResources
, Cand
.ResDelta
.CritResources
,
3957 TryCand
, Cand
, ResourceReduce
))
3958 return TryCand
.Reason
!= NoCand
;
3959 if (tryGreater(TryCand
.ResDelta
.DemandedResources
,
3960 Cand
.ResDelta
.DemandedResources
,
3961 TryCand
, Cand
, ResourceDemand
))
3962 return TryCand
.Reason
!= NoCand
;
3964 // Avoid serializing long latency dependence chains.
3965 if (Cand
.Policy
.ReduceLatency
&& tryLatency(TryCand
, Cand
, Top
)) {
3966 return TryCand
.Reason
!= NoCand
;
3969 // Fall through to original instruction order.
3970 if (TryCand
.SU
->NodeNum
< Cand
.SU
->NodeNum
) {
3971 TryCand
.Reason
= NodeOrder
;
3978 void PostGenericScheduler::pickNodeFromQueue(SchedBoundary
&Zone
,
3979 SchedCandidate
&Cand
) {
3980 ReadyQueue
&Q
= Zone
.Available
;
3981 for (SUnit
*SU
: Q
) {
3982 SchedCandidate
TryCand(Cand
.Policy
);
3984 TryCand
.AtTop
= Zone
.isTop();
3985 TryCand
.initResourceDelta(DAG
, SchedModel
);
3986 if (tryCandidate(Cand
, TryCand
)) {
3987 Cand
.setBest(TryCand
);
3988 LLVM_DEBUG(traceCandidate(Cand
));
3993 /// Pick the best candidate node from either the top or bottom queue.
3994 SUnit
*PostGenericScheduler::pickNodeBidirectional(bool &IsTopNode
) {
3995 // FIXME: This is similiar to GenericScheduler::pickNodeBidirectional. Factor
3996 // out common parts.
3998 // Schedule as far as possible in the direction of no choice. This is most
3999 // efficient, but also provides the best heuristics for CriticalPSets.
4000 if (SUnit
*SU
= Bot
.pickOnlyChoice()) {
4002 tracePick(Only1
, false);
4005 if (SUnit
*SU
= Top
.pickOnlyChoice()) {
4007 tracePick(Only1
, true);
4010 // Set the bottom-up policy based on the state of the current bottom zone and
4011 // the instructions outside the zone, including the top zone.
4012 CandPolicy BotPolicy
;
4013 setPolicy(BotPolicy
, /*IsPostRA=*/true, Bot
, &Top
);
4014 // Set the top-down policy based on the state of the current top zone and
4015 // the instructions outside the zone, including the bottom zone.
4016 CandPolicy TopPolicy
;
4017 setPolicy(TopPolicy
, /*IsPostRA=*/true, Top
, &Bot
);
4019 // See if BotCand is still valid (because we previously scheduled from Top).
4020 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
4021 if (!BotCand
.isValid() || BotCand
.SU
->isScheduled
||
4022 BotCand
.Policy
!= BotPolicy
) {
4023 BotCand
.reset(CandPolicy());
4024 pickNodeFromQueue(Bot
, BotCand
);
4025 assert(BotCand
.Reason
!= NoCand
&& "failed to find the first candidate");
4027 LLVM_DEBUG(traceCandidate(BotCand
));
4029 if (VerifyScheduling
) {
4030 SchedCandidate TCand
;
4031 TCand
.reset(CandPolicy());
4032 pickNodeFromQueue(Bot
, BotCand
);
4033 assert(TCand
.SU
== BotCand
.SU
&&
4034 "Last pick result should correspond to re-picking right now");
4039 // Check if the top Q has a better candidate.
4040 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
4041 if (!TopCand
.isValid() || TopCand
.SU
->isScheduled
||
4042 TopCand
.Policy
!= TopPolicy
) {
4043 TopCand
.reset(CandPolicy());
4044 pickNodeFromQueue(Top
, TopCand
);
4045 assert(TopCand
.Reason
!= NoCand
&& "failed to find the first candidate");
4047 LLVM_DEBUG(traceCandidate(TopCand
));
4049 if (VerifyScheduling
) {
4050 SchedCandidate TCand
;
4051 TCand
.reset(CandPolicy());
4052 pickNodeFromQueue(Top
, TopCand
);
4053 assert(TCand
.SU
== TopCand
.SU
&&
4054 "Last pick result should correspond to re-picking right now");
4059 // Pick best from BotCand and TopCand.
4060 assert(BotCand
.isValid());
4061 assert(TopCand
.isValid());
4062 SchedCandidate Cand
= BotCand
;
4063 TopCand
.Reason
= NoCand
;
4064 if (tryCandidate(Cand
, TopCand
)) {
4065 Cand
.setBest(TopCand
);
4066 LLVM_DEBUG(traceCandidate(Cand
));
4069 IsTopNode
= Cand
.AtTop
;
4074 /// Pick the next node to schedule.
4075 SUnit
*PostGenericScheduler::pickNode(bool &IsTopNode
) {
4076 if (DAG
->top() == DAG
->bottom()) {
4077 assert(Top
.Available
.empty() && Top
.Pending
.empty() &&
4078 Bot
.Available
.empty() && Bot
.Pending
.empty() && "ReadyQ garbage");
4083 if (RegionPolicy
.OnlyBottomUp
) {
4084 SU
= Bot
.pickOnlyChoice();
4086 tracePick(Only1
, true);
4088 CandPolicy NoPolicy
;
4089 BotCand
.reset(NoPolicy
);
4090 // Set the bottom-up policy based on the state of the current bottom
4091 // zone and the instructions outside the zone, including the top zone.
4092 setPolicy(BotCand
.Policy
, /*IsPostRA=*/true, Bot
, nullptr);
4093 pickNodeFromQueue(Bot
, BotCand
);
4094 assert(BotCand
.Reason
!= NoCand
&& "failed to find a candidate");
4099 } else if (RegionPolicy
.OnlyTopDown
) {
4100 SU
= Top
.pickOnlyChoice();
4102 tracePick(Only1
, true);
4104 CandPolicy NoPolicy
;
4105 TopCand
.reset(NoPolicy
);
4106 // Set the top-down policy based on the state of the current top zone
4107 // and the instructions outside the zone, including the bottom zone.
4108 setPolicy(TopCand
.Policy
, /*IsPostRA=*/true, Top
, nullptr);
4109 pickNodeFromQueue(Top
, TopCand
);
4110 assert(TopCand
.Reason
!= NoCand
&& "failed to find a candidate");
4116 SU
= pickNodeBidirectional(IsTopNode
);
4118 } while (SU
->isScheduled
);
4120 if (SU
->isTopReady())
4121 Top
.removeReady(SU
);
4122 if (SU
->isBottomReady())
4123 Bot
.removeReady(SU
);
4125 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU
->NodeNum
<< ") "
4126 << *SU
->getInstr());
4130 /// Called after ScheduleDAGMI has scheduled an instruction and updated
4131 /// scheduled/remaining flags in the DAG nodes.
4132 void PostGenericScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
4134 SU
->TopReadyCycle
= std::max(SU
->TopReadyCycle
, Top
.getCurrCycle());
4137 SU
->BotReadyCycle
= std::max(SU
->BotReadyCycle
, Bot
.getCurrCycle());
4142 ScheduleDAGMI
*llvm::createGenericSchedPostRA(MachineSchedContext
*C
) {
4143 ScheduleDAGMI
*DAG
=
4144 new ScheduleDAGMI(C
, std::make_unique
<PostGenericScheduler
>(C
),
4145 /*RemoveKillFlags=*/true);
4146 const TargetSubtargetInfo
&STI
= C
->MF
->getSubtarget();
4147 // Add MacroFusion mutation if fusions are not empty.
4148 const auto &MacroFusions
= STI
.getMacroFusions();
4149 if (!MacroFusions
.empty())
4150 DAG
->addMutation(createMacroFusionDAGMutation(MacroFusions
));
4154 //===----------------------------------------------------------------------===//
4155 // ILP Scheduler. Currently for experimental analysis of heuristics.
4156 //===----------------------------------------------------------------------===//
4160 /// Order nodes by the ILP metric.
4162 const SchedDFSResult
*DFSResult
= nullptr;
4163 const BitVector
*ScheduledTrees
= nullptr;
4166 ILPOrder(bool MaxILP
) : MaximizeILP(MaxILP
) {}
4168 /// Apply a less-than relation on node priority.
4170 /// (Return true if A comes after B in the Q.)
4171 bool operator()(const SUnit
*A
, const SUnit
*B
) const {
4172 unsigned SchedTreeA
= DFSResult
->getSubtreeID(A
);
4173 unsigned SchedTreeB
= DFSResult
->getSubtreeID(B
);
4174 if (SchedTreeA
!= SchedTreeB
) {
4175 // Unscheduled trees have lower priority.
4176 if (ScheduledTrees
->test(SchedTreeA
) != ScheduledTrees
->test(SchedTreeB
))
4177 return ScheduledTrees
->test(SchedTreeB
);
4179 // Trees with shallower connections have lower priority.
4180 if (DFSResult
->getSubtreeLevel(SchedTreeA
)
4181 != DFSResult
->getSubtreeLevel(SchedTreeB
)) {
4182 return DFSResult
->getSubtreeLevel(SchedTreeA
)
4183 < DFSResult
->getSubtreeLevel(SchedTreeB
);
4187 return DFSResult
->getILP(A
) < DFSResult
->getILP(B
);
4189 return DFSResult
->getILP(A
) > DFSResult
->getILP(B
);
4193 /// Schedule based on the ILP metric.
4194 class ILPScheduler
: public MachineSchedStrategy
{
4195 ScheduleDAGMILive
*DAG
= nullptr;
4198 std::vector
<SUnit
*> ReadyQ
;
4201 ILPScheduler(bool MaximizeILP
) : Cmp(MaximizeILP
) {}
4203 void initialize(ScheduleDAGMI
*dag
) override
{
4204 assert(dag
->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
4205 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
4206 DAG
->computeDFSResult();
4207 Cmp
.DFSResult
= DAG
->getDFSResult();
4208 Cmp
.ScheduledTrees
= &DAG
->getScheduledTrees();
4212 void registerRoots() override
{
4213 // Restore the heap in ReadyQ with the updated DFS results.
4214 std::make_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4217 /// Implement MachineSchedStrategy interface.
4218 /// -----------------------------------------
4220 /// Callback to select the highest priority node from the ready Q.
4221 SUnit
*pickNode(bool &IsTopNode
) override
{
4222 if (ReadyQ
.empty()) return nullptr;
4223 std::pop_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4224 SUnit
*SU
= ReadyQ
.back();
4227 LLVM_DEBUG(dbgs() << "Pick node "
4228 << "SU(" << SU
->NodeNum
<< ") "
4229 << " ILP: " << DAG
->getDFSResult()->getILP(SU
)
4230 << " Tree: " << DAG
->getDFSResult()->getSubtreeID(SU
)
4232 << DAG
->getDFSResult()->getSubtreeLevel(
4233 DAG
->getDFSResult()->getSubtreeID(SU
))
4235 << "Scheduling " << *SU
->getInstr());
4239 /// Scheduler callback to notify that a new subtree is scheduled.
4240 void scheduleTree(unsigned SubtreeID
) override
{
4241 std::make_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4244 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
4245 /// DFSResults, and resort the priority Q.
4246 void schedNode(SUnit
*SU
, bool IsTopNode
) override
{
4247 assert(!IsTopNode
&& "SchedDFSResult needs bottom-up");
4250 void releaseTopNode(SUnit
*) override
{ /*only called for top roots*/ }
4252 void releaseBottomNode(SUnit
*SU
) override
{
4253 ReadyQ
.push_back(SU
);
4254 std::push_heap(ReadyQ
.begin(), ReadyQ
.end(), Cmp
);
4258 } // end anonymous namespace
4260 static ScheduleDAGInstrs
*createILPMaxScheduler(MachineSchedContext
*C
) {
4261 return new ScheduleDAGMILive(C
, std::make_unique
<ILPScheduler
>(true));
4263 static ScheduleDAGInstrs
*createILPMinScheduler(MachineSchedContext
*C
) {
4264 return new ScheduleDAGMILive(C
, std::make_unique
<ILPScheduler
>(false));
4267 static MachineSchedRegistry
ILPMaxRegistry(
4268 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler
);
4269 static MachineSchedRegistry
ILPMinRegistry(
4270 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler
);
4272 //===----------------------------------------------------------------------===//
4273 // Machine Instruction Shuffler for Correctness Testing
4274 //===----------------------------------------------------------------------===//
4279 /// Apply a less-than relation on the node order, which corresponds to the
4280 /// instruction order prior to scheduling. IsReverse implements greater-than.
4281 template<bool IsReverse
>
4283 bool operator()(SUnit
*A
, SUnit
*B
) const {
4285 return A
->NodeNum
> B
->NodeNum
;
4287 return A
->NodeNum
< B
->NodeNum
;
4291 /// Reorder instructions as much as possible.
4292 class InstructionShuffler
: public MachineSchedStrategy
{
4296 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
4297 // gives nodes with a higher number higher priority causing the latest
4298 // instructions to be scheduled first.
4299 PriorityQueue
<SUnit
*, std::vector
<SUnit
*>, SUnitOrder
<false>>
4302 // When scheduling bottom-up, use greater-than as the queue priority.
4303 PriorityQueue
<SUnit
*, std::vector
<SUnit
*>, SUnitOrder
<true>>
4307 InstructionShuffler(bool alternate
, bool topdown
)
4308 : IsAlternating(alternate
), IsTopDown(topdown
) {}
4310 void initialize(ScheduleDAGMI
*) override
{
4315 /// Implement MachineSchedStrategy interface.
4316 /// -----------------------------------------
4318 SUnit
*pickNode(bool &IsTopNode
) override
{
4322 if (TopQ
.empty()) return nullptr;
4325 } while (SU
->isScheduled
);
4329 if (BottomQ
.empty()) return nullptr;
4332 } while (SU
->isScheduled
);
4336 IsTopDown
= !IsTopDown
;
4340 void schedNode(SUnit
*SU
, bool IsTopNode
) override
{}
4342 void releaseTopNode(SUnit
*SU
) override
{
4345 void releaseBottomNode(SUnit
*SU
) override
{
4350 } // end anonymous namespace
4352 static ScheduleDAGInstrs
*createInstructionShuffler(MachineSchedContext
*C
) {
4353 bool Alternate
= !ForceTopDown
&& !ForceBottomUp
;
4354 bool TopDown
= !ForceBottomUp
;
4355 assert((TopDown
|| !ForceTopDown
) &&
4356 "-misched-topdown incompatible with -misched-bottomup");
4357 return new ScheduleDAGMILive(
4358 C
, std::make_unique
<InstructionShuffler
>(Alternate
, TopDown
));
4361 static MachineSchedRegistry
ShufflerRegistry(
4362 "shuffle", "Shuffle machine instructions alternating directions",
4363 createInstructionShuffler
);
4366 //===----------------------------------------------------------------------===//
4367 // GraphWriter support for ScheduleDAGMILive.
4368 //===----------------------------------------------------------------------===//
4373 template<> struct GraphTraits
<
4374 ScheduleDAGMI
*> : public GraphTraits
<ScheduleDAG
*> {};
4377 struct DOTGraphTraits
<ScheduleDAGMI
*> : public DefaultDOTGraphTraits
{
4378 DOTGraphTraits(bool isSimple
= false) : DefaultDOTGraphTraits(isSimple
) {}
4380 static std::string
getGraphName(const ScheduleDAG
*G
) {
4381 return std::string(G
->MF
.getName());
4384 static bool renderGraphFromBottomUp() {
4388 static bool isNodeHidden(const SUnit
*Node
, const ScheduleDAG
*G
) {
4389 if (ViewMISchedCutoff
== 0)
4391 return (Node
->Preds
.size() > ViewMISchedCutoff
4392 || Node
->Succs
.size() > ViewMISchedCutoff
);
4395 /// If you want to override the dot attributes printed for a particular
4396 /// edge, override this method.
4397 static std::string
getEdgeAttributes(const SUnit
*Node
,
4399 const ScheduleDAG
*Graph
) {
4400 if (EI
.isArtificialDep())
4401 return "color=cyan,style=dashed";
4403 return "color=blue,style=dashed";
4407 static std::string
getNodeLabel(const SUnit
*SU
, const ScheduleDAG
*G
) {
4409 raw_string_ostream
SS(Str
);
4410 const ScheduleDAGMI
*DAG
= static_cast<const ScheduleDAGMI
*>(G
);
4411 const SchedDFSResult
*DFS
= DAG
->hasVRegLiveness() ?
4412 static_cast<const ScheduleDAGMILive
*>(G
)->getDFSResult() : nullptr;
4413 SS
<< "SU:" << SU
->NodeNum
;
4415 SS
<< " I:" << DFS
->getNumInstrs(SU
);
4419 static std::string
getNodeDescription(const SUnit
*SU
, const ScheduleDAG
*G
) {
4420 return G
->getGraphNodeLabel(SU
);
4423 static std::string
getNodeAttributes(const SUnit
*N
, const ScheduleDAG
*G
) {
4424 std::string
Str("shape=Mrecord");
4425 const ScheduleDAGMI
*DAG
= static_cast<const ScheduleDAGMI
*>(G
);
4426 const SchedDFSResult
*DFS
= DAG
->hasVRegLiveness() ?
4427 static_cast<const ScheduleDAGMILive
*>(G
)->getDFSResult() : nullptr;
4429 Str
+= ",style=filled,fillcolor=\"#";
4430 Str
+= DOT::getColorString(DFS
->getSubtreeID(N
));
4437 } // end namespace llvm
4440 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
4441 /// rendered using 'dot'.
4442 void ScheduleDAGMI::viewGraph(const Twine
&Name
, const Twine
&Title
) {
4444 ViewGraph(this, Name
, false, Title
);
4446 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
4447 << "systems with Graphviz or gv!\n";
4451 /// Out-of-line implementation with no arguments is handy for gdb.
4452 void ScheduleDAGMI::viewGraph() {
4453 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
4456 /// Sort predicate for the intervals stored in an instance of
4457 /// ResourceSegments. Intervals are always disjoint (no intersection
4458 /// for any pairs of intervals), therefore we can sort the totality of
4459 /// the intervals by looking only at the left boundary.
4460 static bool sortIntervals(const ResourceSegments::IntervalTy
&A
,
4461 const ResourceSegments::IntervalTy
&B
) {
4462 return A
.first
< B
.first
;
4465 unsigned ResourceSegments::getFirstAvailableAt(
4466 unsigned CurrCycle
, unsigned AcquireAtCycle
, unsigned ReleaseAtCycle
,
4467 std::function
<ResourceSegments::IntervalTy(unsigned, unsigned, unsigned)>
4468 IntervalBuilder
) const {
4469 assert(std::is_sorted(std::begin(_Intervals
), std::end(_Intervals
),
4471 "Cannot execute on an un-sorted set of intervals.");
4473 // Zero resource usage is allowed by TargetSchedule.td but we do not construct
4474 // a ResourceSegment interval for that situation.
4475 if (AcquireAtCycle
== ReleaseAtCycle
)
4478 unsigned RetCycle
= CurrCycle
;
4479 ResourceSegments::IntervalTy NewInterval
=
4480 IntervalBuilder(RetCycle
, AcquireAtCycle
, ReleaseAtCycle
);
4481 for (auto &Interval
: _Intervals
) {
4482 if (!intersects(NewInterval
, Interval
))
4485 // Move the interval right next to the top of the one it
4487 assert(Interval
.second
> NewInterval
.first
&&
4488 "Invalid intervals configuration.");
4489 RetCycle
+= (unsigned)Interval
.second
- (unsigned)NewInterval
.first
;
4490 NewInterval
= IntervalBuilder(RetCycle
, AcquireAtCycle
, ReleaseAtCycle
);
4495 void ResourceSegments::add(ResourceSegments::IntervalTy A
,
4496 const unsigned CutOff
) {
4497 assert(A
.first
<= A
.second
&& "Cannot add negative resource usage");
4498 assert(CutOff
> 0 && "0-size interval history has no use.");
4499 // Zero resource usage is allowed by TargetSchedule.td, in the case that the
4500 // instruction needed the resource to be available but does not use it.
4501 // However, ResourceSegment represents an interval that is closed on the left
4502 // and open on the right. It is impossible to represent an empty interval when
4503 // the left is closed. Do not add it to Intervals.
4504 if (A
.first
== A
.second
)
4507 assert(all_of(_Intervals
,
4508 [&A
](const ResourceSegments::IntervalTy
&Interval
) -> bool {
4509 return !intersects(A
, Interval
);
4511 "A resource is being overwritten");
4512 _Intervals
.push_back(A
);
4516 // Do not keep the full history of the intervals, just the
4518 while (_Intervals
.size() > CutOff
)
4519 _Intervals
.pop_front();
4522 bool ResourceSegments::intersects(ResourceSegments::IntervalTy A
,
4523 ResourceSegments::IntervalTy B
) {
4524 assert(A
.first
<= A
.second
&& "Invalid interval");
4525 assert(B
.first
<= B
.second
&& "Invalid interval");
4527 // Share one boundary.
4528 if ((A
.first
== B
.first
) || (A
.second
== B
.second
))
4531 // full intersersect: [ *** ) B
4533 if ((A
.first
> B
.first
) && (A
.second
< B
.second
))
4536 // right intersect: [ ***) B
4538 if ((A
.first
> B
.first
) && (A
.first
< B
.second
) && (A
.second
> B
.second
))
4541 // left intersect: [*** ) B
4543 if ((A
.first
< B
.first
) && (B
.first
< A
.second
) && (B
.second
> B
.first
))
4549 void ResourceSegments::sortAndMerge() {
4550 if (_Intervals
.size() <= 1)
4553 // First sort the collection.
4554 _Intervals
.sort(sortIntervals
);
4556 // can use next because I have at least 2 elements in the list
4557 auto next
= std::next(std::begin(_Intervals
));
4558 auto E
= std::end(_Intervals
);
4559 for (; next
!= E
; ++next
) {
4560 if (std::prev(next
)->second
>= next
->first
) {
4561 next
->first
= std::prev(next
)->first
;
4562 _Intervals
.erase(std::prev(next
));