1 //===-- SIMachineScheduler.h - SI Scheduler Interface -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI Machine Scheduler interface
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_AMDGPU_SIMACHINESCHEDULER_H
15 #define LLVM_LIB_TARGET_AMDGPU_SIMACHINESCHEDULER_H
17 #include "llvm/CodeGen/MachineScheduler.h"
18 #include "llvm/CodeGen/RegisterPressure.h"
19 #include "llvm/CodeGen/ScheduleDAG.h"
28 class SIScheduleDAGMI
;
29 class SIScheduleBlockCreator
;
31 enum SIScheduleCandReason
{
40 struct SISchedulerCandidate
{
41 // The reason for this candidate.
42 SIScheduleCandReason Reason
= NoCand
;
44 // Set of reasons that apply to multiple candidates.
45 uint32_t RepeatReasonSet
= 0;
47 SISchedulerCandidate() = default;
49 bool isRepeat(SIScheduleCandReason R
) { return RepeatReasonSet
& (1 << R
); }
50 void setRepeat(SIScheduleCandReason R
) { RepeatReasonSet
|= (1 << R
); }
53 enum SIScheduleBlockLinkKind
{
58 class SIScheduleBlock
{
60 SIScheduleBlockCreator
*BC
;
62 std::vector
<SUnit
*> SUnits
;
63 std::map
<unsigned, unsigned> NodeNum2Index
;
64 std::vector
<SUnit
*> TopReadySUs
;
65 std::vector
<SUnit
*> ScheduledSUnits
;
67 /// The top of the unscheduled zone.
68 IntervalPressure TopPressure
;
69 RegPressureTracker TopRPTracker
;
71 // Pressure: number of said class of registers needed to
72 // store the live virtual and real registers.
73 // We do care only of SGPR32 and VGPR32 and do track only virtual registers.
74 // Pressure of additional registers required inside the block.
75 std::vector
<unsigned> InternalAdditionalPressure
;
76 // Pressure of input and output registers
77 std::vector
<unsigned> LiveInPressure
;
78 std::vector
<unsigned> LiveOutPressure
;
79 // Registers required by the block, and outputs.
80 // We do track only virtual registers.
81 // Note that some registers are not 32 bits,
82 // and thus the pressure is not equal
83 // to the number of live registers.
84 std::set
<unsigned> LiveInRegs
;
85 std::set
<unsigned> LiveOutRegs
;
87 bool Scheduled
= false;
88 bool HighLatencyBlock
= false;
90 std::vector
<unsigned> HasLowLatencyNonWaitedParent
;
92 // Unique ID, the index of the Block in the SIScheduleDAGMI Blocks table.
95 std::vector
<SIScheduleBlock
*> Preds
; // All blocks predecessors.
96 // All blocks successors, and the kind of link
97 std::vector
<std::pair
<SIScheduleBlock
*, SIScheduleBlockLinkKind
>> Succs
;
98 unsigned NumHighLatencySuccessors
= 0;
101 SIScheduleBlock(SIScheduleDAGMI
*DAG
, SIScheduleBlockCreator
*BC
,
103 DAG(DAG
), BC(BC
), TopRPTracker(TopPressure
), ID(ID
) {}
105 ~SIScheduleBlock() = default;
107 unsigned getID() const { return ID
; }
109 /// Functions for Block construction.
110 void addUnit(SUnit
*SU
);
112 // When all SUs have been added.
113 void finalizeUnits();
115 // Add block pred, which has instruction predecessor of SU.
116 void addPred(SIScheduleBlock
*Pred
);
117 void addSucc(SIScheduleBlock
*Succ
, SIScheduleBlockLinkKind Kind
);
119 const std::vector
<SIScheduleBlock
*>& getPreds() const { return Preds
; }
120 ArrayRef
<std::pair
<SIScheduleBlock
*, SIScheduleBlockLinkKind
>>
121 getSuccs() const { return Succs
; }
123 unsigned Height
= 0; // Maximum topdown path length to block without outputs
124 unsigned Depth
= 0; // Maximum bottomup path length to block without inputs
126 unsigned getNumHighLatencySuccessors() const {
127 return NumHighLatencySuccessors
;
130 bool isHighLatencyBlock() { return HighLatencyBlock
; }
132 // This is approximative.
133 // Ideally should take into accounts some instructions (rcp, etc)
134 // are 4 times slower.
135 int getCost() { return SUnits
.size(); }
137 // The block Predecessors and Successors must be all registered
138 // before fastSchedule().
139 // Fast schedule with no particular requirement.
142 std::vector
<SUnit
*> getScheduledUnits() { return ScheduledSUnits
; }
144 // Complete schedule that will try to minimize reg pressure and
145 // low latencies, and will fill liveins and liveouts.
146 // Needs all MIs to be grouped between BeginBlock and EndBlock.
147 // The MIs can be moved after the scheduling,
148 // it is just used to allow correct track of live registers.
149 void schedule(MachineBasicBlock::iterator BeginBlock
,
150 MachineBasicBlock::iterator EndBlock
);
152 bool isScheduled() { return Scheduled
; }
154 // Needs the block to be scheduled inside
155 // TODO: find a way to compute it.
156 std::vector
<unsigned> &getInternalAdditionalRegUsage() {
157 return InternalAdditionalPressure
;
160 std::set
<unsigned> &getInRegs() { return LiveInRegs
; }
161 std::set
<unsigned> &getOutRegs() { return LiveOutRegs
; }
163 void printDebug(bool Full
);
166 struct SISchedCandidate
: SISchedulerCandidate
{
167 // The best SUnit candidate.
173 unsigned LowLatencyOffset
;
174 bool HasLowLatencyNonWaitedParent
;
176 SISchedCandidate() = default;
178 bool isValid() const { return SU
; }
180 // Copy the status of another candidate without changing policy.
181 void setBest(SISchedCandidate
&Best
) {
182 assert(Best
.Reason
!= NoCand
&& "uninitialized Sched candidate");
184 Reason
= Best
.Reason
;
185 SGPRUsage
= Best
.SGPRUsage
;
186 VGPRUsage
= Best
.VGPRUsage
;
187 IsLowLatency
= Best
.IsLowLatency
;
188 LowLatencyOffset
= Best
.LowLatencyOffset
;
189 HasLowLatencyNonWaitedParent
= Best
.HasLowLatencyNonWaitedParent
;
195 void undoReleaseSucc(SUnit
*SU
, SDep
*SuccEdge
);
196 void releaseSucc(SUnit
*SU
, SDep
*SuccEdge
);
197 // InOrOutBlock: restrict to links pointing inside the block (true),
198 // or restrict to links pointing outside the block (false).
199 void releaseSuccessors(SUnit
*SU
, bool InOrOutBlock
);
201 void nodeScheduled(SUnit
*SU
);
202 void tryCandidateTopDown(SISchedCandidate
&Cand
, SISchedCandidate
&TryCand
);
203 void tryCandidateBottomUp(SISchedCandidate
&Cand
, SISchedCandidate
&TryCand
);
205 void traceCandidate(const SISchedCandidate
&Cand
);
206 void initRegPressure(MachineBasicBlock::iterator BeginBlock
,
207 MachineBasicBlock::iterator EndBlock
);
210 struct SIScheduleBlocks
{
211 std::vector
<SIScheduleBlock
*> Blocks
;
212 std::vector
<int> TopDownIndex2Block
;
213 std::vector
<int> TopDownBlock2Index
;
216 enum SISchedulerBlockCreatorVariant
{
219 LatenciesAlonePlusConsecutive
222 class SIScheduleBlockCreator
{
223 SIScheduleDAGMI
*DAG
;
224 // unique_ptr handles freeing memory for us.
225 std::vector
<std::unique_ptr
<SIScheduleBlock
>> BlockPtrs
;
226 std::map
<SISchedulerBlockCreatorVariant
,
227 SIScheduleBlocks
> Blocks
;
228 std::vector
<SIScheduleBlock
*> CurrentBlocks
;
229 std::vector
<int> Node2CurrentBlock
;
232 // Maps topological index to the node number.
233 std::vector
<int> TopDownIndex2Block
;
234 std::vector
<int> TopDownBlock2Index
;
235 std::vector
<int> BottomUpIndex2Block
;
237 // 0 -> Color not given.
238 // 1 to SUnits.size() -> Reserved group (you should only add elements to them).
239 // Above -> Other groups.
241 int NextNonReservedID
;
242 std::vector
<int> CurrentColoring
;
243 std::vector
<int> CurrentTopDownReservedDependencyColoring
;
244 std::vector
<int> CurrentBottomUpReservedDependencyColoring
;
247 SIScheduleBlockCreator(SIScheduleDAGMI
*DAG
);
250 getBlocks(SISchedulerBlockCreatorVariant BlockVariant
);
252 bool isSUInBlock(SUnit
*SU
, unsigned ID
);
255 // Give a Reserved color to every high latency.
256 void colorHighLatenciesAlone();
258 // Create groups of high latencies with a Reserved color.
259 void colorHighLatenciesGroups();
261 // Compute coloring for topdown and bottom traversals with
262 // different colors depending on dependencies on Reserved colors.
263 void colorComputeReservedDependencies();
265 // Give color to all non-colored SUs according to Reserved groups dependencies.
266 void colorAccordingToReservedDependencies();
268 // Divides Blocks having no bottom up or top down dependencies on Reserved groups.
269 // The new colors are computed according to the dependencies on the other blocks
270 // formed with colorAccordingToReservedDependencies.
271 void colorEndsAccordingToDependencies();
273 // Cut groups into groups with SUs in consecutive order (except for Reserved groups).
274 void colorForceConsecutiveOrderInGroup();
276 // Merge Constant loads that have all their users into another group to the group.
277 // (TODO: else if all their users depend on the same group, put them there)
278 void colorMergeConstantLoadsNextGroup();
280 // Merge SUs that have all their users into another group to the group
281 void colorMergeIfPossibleNextGroup();
283 // Merge SUs that have all their users into another group to the group,
284 // but only for Reserved groups.
285 void colorMergeIfPossibleNextGroupOnlyForReserved();
287 // Merge SUs that have all their users into another group to the group,
288 // but only if the group is no more than a few SUs.
289 void colorMergeIfPossibleSmallGroupsToNextGroup();
291 // Divides Blocks with important size.
292 // Idea of implementation: attribute new colors depending on topdown and
293 // bottom up links to other blocks.
294 void cutHugeBlocks();
296 // Put in one group all instructions with no users in this scheduling region
297 // (we'd want these groups be at the end).
298 void regroupNoUserInstructions();
300 // Give Reserved color to export instructions
303 void createBlocksForVariant(SISchedulerBlockCreatorVariant BlockVariant
);
305 void topologicalSort();
307 void scheduleInsideBlocks();
312 enum SISchedulerBlockSchedulerVariant
{
313 BlockLatencyRegUsage
,
314 BlockRegUsageLatency
,
318 class SIScheduleBlockScheduler
{
319 SIScheduleDAGMI
*DAG
;
320 SISchedulerBlockSchedulerVariant Variant
;
321 std::vector
<SIScheduleBlock
*> Blocks
;
323 std::vector
<std::map
<unsigned, unsigned>> LiveOutRegsNumUsages
;
324 std::set
<unsigned> LiveRegs
;
325 // Num of schedulable unscheduled blocks reading the register.
326 std::map
<unsigned, unsigned> LiveRegsConsumers
;
328 std::vector
<unsigned> LastPosHighLatencyParentScheduled
;
329 int LastPosWaitedHighLatency
;
331 std::vector
<SIScheduleBlock
*> BlocksScheduled
;
332 unsigned NumBlockScheduled
;
333 std::vector
<SIScheduleBlock
*> ReadyBlocks
;
335 unsigned VregCurrentUsage
;
336 unsigned SregCurrentUsage
;
338 // Currently is only approximation.
339 unsigned maxVregUsage
;
340 unsigned maxSregUsage
;
342 std::vector
<unsigned> BlockNumPredsLeft
;
343 std::vector
<unsigned> BlockNumSuccsLeft
;
346 SIScheduleBlockScheduler(SIScheduleDAGMI
*DAG
,
347 SISchedulerBlockSchedulerVariant Variant
,
348 SIScheduleBlocks BlocksStruct
);
349 ~SIScheduleBlockScheduler() = default;
351 std::vector
<SIScheduleBlock
*> getBlocks() { return BlocksScheduled
; }
353 unsigned getVGPRUsage() { return maxVregUsage
; }
354 unsigned getSGPRUsage() { return maxSregUsage
; }
357 struct SIBlockSchedCandidate
: SISchedulerCandidate
{
358 // The best Block candidate.
359 SIScheduleBlock
*Block
= nullptr;
363 unsigned NumSuccessors
;
364 unsigned NumHighLatencySuccessors
;
365 unsigned LastPosHighLatParentScheduled
;
368 SIBlockSchedCandidate() = default;
370 bool isValid() const { return Block
; }
372 // Copy the status of another candidate without changing policy.
373 void setBest(SIBlockSchedCandidate
&Best
) {
374 assert(Best
.Reason
!= NoCand
&& "uninitialized Sched candidate");
376 Reason
= Best
.Reason
;
377 IsHighLatency
= Best
.IsHighLatency
;
378 VGPRUsageDiff
= Best
.VGPRUsageDiff
;
379 NumSuccessors
= Best
.NumSuccessors
;
380 NumHighLatencySuccessors
= Best
.NumHighLatencySuccessors
;
381 LastPosHighLatParentScheduled
= Best
.LastPosHighLatParentScheduled
;
382 Height
= Best
.Height
;
386 bool tryCandidateLatency(SIBlockSchedCandidate
&Cand
,
387 SIBlockSchedCandidate
&TryCand
);
388 bool tryCandidateRegUsage(SIBlockSchedCandidate
&Cand
,
389 SIBlockSchedCandidate
&TryCand
);
390 SIScheduleBlock
*pickBlock();
392 void addLiveRegs(std::set
<unsigned> &Regs
);
393 void decreaseLiveRegs(SIScheduleBlock
*Block
, std::set
<unsigned> &Regs
);
394 void releaseBlockSuccs(SIScheduleBlock
*Parent
);
395 void blockScheduled(SIScheduleBlock
*Block
);
397 // Check register pressure change
398 // by scheduling a block with these LiveIn and LiveOut.
399 std::vector
<int> checkRegUsageImpact(std::set
<unsigned> &InRegs
,
400 std::set
<unsigned> &OutRegs
);
405 struct SIScheduleBlockResult
{
406 std::vector
<unsigned> SUs
;
407 unsigned MaxSGPRUsage
;
408 unsigned MaxVGPRUsage
;
412 SIScheduleDAGMI
*DAG
;
413 SIScheduleBlockCreator BlockCreator
;
416 SIScheduler(SIScheduleDAGMI
*DAG
) : DAG(DAG
), BlockCreator(DAG
) {}
418 ~SIScheduler() = default;
420 struct SIScheduleBlockResult
421 scheduleVariant(SISchedulerBlockCreatorVariant BlockVariant
,
422 SISchedulerBlockSchedulerVariant ScheduleVariant
);
425 class SIScheduleDAGMI final
: public ScheduleDAGMILive
{
426 const SIInstrInfo
*SITII
;
427 const SIRegisterInfo
*SITRI
;
429 std::vector
<SUnit
> SUnitsLinksBackup
;
431 // For moveLowLatencies. After all Scheduling variants are tested.
432 std::vector
<unsigned> ScheduledSUnits
;
433 std::vector
<unsigned> ScheduledSUnitsInv
;
436 SIScheduleDAGMI(MachineSchedContext
*C
);
438 ~SIScheduleDAGMI() override
;
440 // Entry point for the schedule.
441 void schedule() override
;
443 // To init Block's RPTracker.
444 void initRPTracker(RegPressureTracker
&RPTracker
) {
445 RPTracker
.init(&MF
, RegClassInfo
, LIS
, BB
, RegionBegin
, false, false);
448 MachineBasicBlock
*getBB() { return BB
; }
449 MachineBasicBlock::iterator
getCurrentTop() { return CurrentTop
; }
450 MachineBasicBlock::iterator
getCurrentBottom() { return CurrentBottom
; }
451 LiveIntervals
*getLIS() { return LIS
; }
452 MachineRegisterInfo
*getMRI() { return &MRI
; }
453 const TargetRegisterInfo
*getTRI() { return TRI
; }
454 ScheduleDAGTopologicalSort
*GetTopo() { return &Topo
; }
455 SUnit
&getEntrySU() { return EntrySU
; }
456 SUnit
& getExitSU() { return ExitSU
; }
458 void restoreSULinksLeft();
460 template<typename _Iterator
> void fillVgprSgprCost(_Iterator First
,
463 unsigned &SgprUsage
);
465 std::set
<unsigned> getInRegs() {
466 std::set
<unsigned> InRegs
;
467 for (const auto &RegMaskPair
: RPTracker
.getPressure().LiveInRegs
) {
468 InRegs
.insert(RegMaskPair
.RegUnit
);
473 std::set
<unsigned> getOutRegs() {
474 std::set
<unsigned> OutRegs
;
475 for (const auto &RegMaskPair
: RPTracker
.getPressure().LiveOutRegs
) {
476 OutRegs
.insert(RegMaskPair
.RegUnit
);
482 void topologicalSort();
483 // After scheduling is done, improve low latency placements.
484 void moveLowLatencies();
487 // Some stats for scheduling inside blocks.
488 std::vector
<unsigned> IsLowLatencySU
;
489 std::vector
<unsigned> LowLatencyOffset
;
490 std::vector
<unsigned> IsHighLatencySU
;
492 // Maps topological index to the node number.
493 std::vector
<int> TopDownIndex2SU
;
494 std::vector
<int> BottomUpIndex2SU
;
497 } // end namespace llvm
499 #endif // LLVM_LIB_TARGET_AMDGPU_SIMACHINESCHEDULER_H