1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// R600 Machine Scheduler interface
12 //===----------------------------------------------------------------------===//
14 #include "R600MachineScheduler.h"
15 #include "MCTargetDesc/R600MCTargetDesc.h"
16 #include "R600Subtarget.h"
20 #define DEBUG_TYPE "machine-scheduler"
22 void R600SchedStrategy::initialize(ScheduleDAGMI
*dag
) {
23 assert(dag
->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
24 DAG
= static_cast<ScheduleDAGMILive
*>(dag
);
25 const R600Subtarget
&ST
= DAG
->MF
.getSubtarget
<R600Subtarget
>();
26 TII
= static_cast<const R600InstrInfo
*>(DAG
->TII
);
27 TRI
= static_cast<const R600RegisterInfo
*>(DAG
->TRI
);
28 VLIW5
= !ST
.hasCaymanISA();
30 CurInstKind
= IDOther
;
32 OccupiedSlotsMask
= 31;
33 InstKindLimit
[IDAlu
] = TII
->getMaxAlusPerClause();
34 InstKindLimit
[IDOther
] = 32;
35 InstKindLimit
[IDFetch
] = ST
.getTexVTXClauseSize();
40 void R600SchedStrategy::MoveUnits(std::vector
<SUnit
*> &QSrc
,
41 std::vector
<SUnit
*> &QDst
)
43 llvm::append_range(QDst
, QSrc
);
47 static unsigned getWFCountLimitedByGPR(unsigned GPRCount
) {
48 assert (GPRCount
&& "GPRCount cannot be 0");
49 return 248 / GPRCount
;
52 SUnit
* R600SchedStrategy::pickNode(bool &IsTopNode
) {
54 NextInstKind
= IDOther
;
58 // check if we might want to switch current clause type
59 bool AllowSwitchToAlu
= (CurEmitted
>= InstKindLimit
[CurInstKind
]) ||
60 (Available
[CurInstKind
].empty());
61 bool AllowSwitchFromAlu
= (CurEmitted
>= InstKindLimit
[CurInstKind
]) &&
62 (!Available
[IDFetch
].empty() || !Available
[IDOther
].empty());
64 if (CurInstKind
== IDAlu
&& !Available
[IDFetch
].empty()) {
65 // We use the heuristic provided by AMD Accelerated Parallel Processing
66 // OpenCL Programming Guide :
67 // The approx. number of WF that allows TEX inst to hide ALU inst is :
68 // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
69 float ALUFetchRationEstimate
=
70 (AluInstCount
+ AvailablesAluCount() + Pending
[IDAlu
].size()) /
71 (FetchInstCount
+ Available
[IDFetch
].size());
72 if (ALUFetchRationEstimate
== 0) {
73 AllowSwitchFromAlu
= true;
75 unsigned NeededWF
= 62.5f
/ ALUFetchRationEstimate
;
76 LLVM_DEBUG(dbgs() << NeededWF
<< " approx. Wavefronts Required\n");
77 // We assume the local GPR requirements to be "dominated" by the requirement
78 // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
79 // after TEX are indeed likely to consume or generate values from/for the
81 // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
82 // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
83 // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
84 // (TODO : use RegisterPressure)
85 // If we are going too use too many GPR, we flush Fetch instruction to lower
86 // register pressure on 128 bits regs.
87 unsigned NearRegisterRequirement
= 2 * Available
[IDFetch
].size();
88 if (NeededWF
> getWFCountLimitedByGPR(NearRegisterRequirement
))
89 AllowSwitchFromAlu
= true;
93 if (!SU
&& ((AllowSwitchToAlu
&& CurInstKind
!= IDAlu
) ||
94 (!AllowSwitchFromAlu
&& CurInstKind
== IDAlu
))) {
97 if (!SU
&& !PhysicalRegCopy
.empty()) {
98 SU
= PhysicalRegCopy
.front();
99 PhysicalRegCopy
.erase(PhysicalRegCopy
.begin());
102 if (CurEmitted
>= InstKindLimit
[IDAlu
])
104 NextInstKind
= IDAlu
;
110 SU
= pickOther(IDFetch
);
112 NextInstKind
= IDFetch
;
117 SU
= pickOther(IDOther
);
119 NextInstKind
= IDOther
;
123 dbgs() << " ** Pick node **\n";
126 dbgs() << "NO NODE \n";
127 for (const SUnit
&S
: DAG
->SUnits
)
135 void R600SchedStrategy::schedNode(SUnit
*SU
, bool IsTopNode
) {
136 if (NextInstKind
!= CurInstKind
) {
137 LLVM_DEBUG(dbgs() << "Instruction Type Switch\n");
138 if (NextInstKind
!= IDAlu
)
139 OccupiedSlotsMask
|= 31;
141 CurInstKind
= NextInstKind
;
144 if (CurInstKind
== IDAlu
) {
146 switch (getAluKind(SU
)) {
154 for (MachineInstr::mop_iterator It
= SU
->getInstr()->operands_begin(),
155 E
= SU
->getInstr()->operands_end(); It
!= E
; ++It
) {
156 MachineOperand
&MO
= *It
;
157 if (MO
.isReg() && MO
.getReg() == R600::ALU_LITERAL_X
)
166 LLVM_DEBUG(dbgs() << CurEmitted
<< " Instructions Emitted in this clause\n");
168 if (CurInstKind
!= IDFetch
) {
169 MoveUnits(Pending
[IDFetch
], Available
[IDFetch
]);
175 isPhysicalRegCopy(MachineInstr
*MI
) {
176 if (MI
->getOpcode() != R600::COPY
)
179 return !MI
->getOperand(1).getReg().isVirtual();
182 void R600SchedStrategy::releaseTopNode(SUnit
*SU
) {
183 LLVM_DEBUG(dbgs() << "Top Releasing "; DAG
->dumpNode(*SU
));
186 void R600SchedStrategy::releaseBottomNode(SUnit
*SU
) {
187 LLVM_DEBUG(dbgs() << "Bottom Releasing "; DAG
->dumpNode(*SU
));
188 if (isPhysicalRegCopy(SU
->getInstr())) {
189 PhysicalRegCopy
.push_back(SU
);
193 int IK
= getInstKind(SU
);
195 // There is no export clause, we can schedule one as soon as its ready
197 Available
[IDOther
].push_back(SU
);
199 Pending
[IK
].push_back(SU
);
203 bool R600SchedStrategy::regBelongsToClass(Register Reg
,
204 const TargetRegisterClass
*RC
) const {
205 if (!Reg
.isVirtual())
206 return RC
->contains(Reg
);
207 return MRI
->getRegClass(Reg
) == RC
;
210 R600SchedStrategy::AluKind
R600SchedStrategy::getAluKind(SUnit
*SU
) const {
211 MachineInstr
*MI
= SU
->getInstr();
213 if (TII
->isTransOnly(*MI
))
216 switch (MI
->getOpcode()) {
219 case R600::INTERP_PAIR_XY
:
220 case R600::INTERP_PAIR_ZW
:
221 case R600::INTERP_VEC_LOAD
:
225 if (MI
->getOperand(1).isUndef()) {
226 // MI will become a KILL, don't considers it in scheduling
234 // Does the instruction take a whole IG ?
235 // XXX: Is it possible to add a helper function in R600InstrInfo that can
236 // be used here and in R600PacketizerList::isSoloInstruction() ?
237 if(TII
->isVector(*MI
) ||
238 TII
->isCubeOp(MI
->getOpcode()) ||
239 TII
->isReductionOp(MI
->getOpcode()) ||
240 MI
->getOpcode() == R600::GROUP_BARRIER
) {
244 if (TII
->isLDSInstr(MI
->getOpcode())) {
248 // Is the result already assigned to a channel ?
249 unsigned DestSubReg
= MI
->getOperand(0).getSubReg();
250 switch (DestSubReg
) {
263 // Is the result already member of a X/Y/Z/W class ?
264 Register DestReg
= MI
->getOperand(0).getReg();
265 if (regBelongsToClass(DestReg
, &R600::R600_TReg32_XRegClass
) ||
266 regBelongsToClass(DestReg
, &R600::R600_AddrRegClass
))
268 if (regBelongsToClass(DestReg
, &R600::R600_TReg32_YRegClass
))
270 if (regBelongsToClass(DestReg
, &R600::R600_TReg32_ZRegClass
))
272 if (regBelongsToClass(DestReg
, &R600::R600_TReg32_WRegClass
))
274 if (regBelongsToClass(DestReg
, &R600::R600_Reg128RegClass
))
277 // LDS src registers cannot be used in the Trans slot.
278 if (TII
->readsLDSSrcReg(*MI
))
284 int R600SchedStrategy::getInstKind(SUnit
* SU
) {
285 int Opcode
= SU
->getInstr()->getOpcode();
287 if (TII
->usesTextureCache(Opcode
) || TII
->usesVertexCache(Opcode
))
290 if (TII
->isALUInstr(Opcode
)) {
297 case R600::CONST_COPY
:
298 case R600::INTERP_PAIR_XY
:
299 case R600::INTERP_PAIR_ZW
:
300 case R600::INTERP_VEC_LOAD
:
308 SUnit
*R600SchedStrategy::PopInst(std::vector
<SUnit
*> &Q
, bool AnyALU
) {
311 for (std::vector
<SUnit
*>::reverse_iterator It
= Q
.rbegin(), E
= Q
.rend();
314 InstructionsGroupCandidate
.push_back(SU
->getInstr());
315 if (TII
->fitsConstReadLimitations(InstructionsGroupCandidate
) &&
316 (!AnyALU
|| !TII
->isVectorOnly(*SU
->getInstr()))) {
317 InstructionsGroupCandidate
.pop_back();
318 Q
.erase((It
+ 1).base());
321 InstructionsGroupCandidate
.pop_back();
326 void R600SchedStrategy::LoadAlu() {
327 std::vector
<SUnit
*> &QSrc
= Pending
[IDAlu
];
328 for (SUnit
*SU
: QSrc
) {
329 AluKind AK
= getAluKind(SU
);
330 AvailableAlus
[AK
].push_back(SU
);
335 void R600SchedStrategy::PrepareNextSlot() {
336 LLVM_DEBUG(dbgs() << "New Slot\n");
337 assert(OccupiedSlotsMask
&& "Slot wasn't filled");
338 OccupiedSlotsMask
= 0;
339 // if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
340 // OccupiedSlotsMask |= 16;
341 InstructionsGroupCandidate
.clear();
345 void R600SchedStrategy::AssignSlot(MachineInstr
* MI
, unsigned Slot
) {
346 int DstIndex
= TII
->getOperandIdx(MI
->getOpcode(), R600::OpName::dst
);
347 if (DstIndex
== -1) {
350 Register DestReg
= MI
->getOperand(DstIndex
).getReg();
351 // PressureRegister crashes if an operand is def and used in the same inst
352 // and we try to constraint its regclass
353 for (const MachineOperand
&MO
: MI
->all_uses())
354 if (MO
.getReg() == DestReg
)
356 // Constrains the regclass of DestReg to assign it to Slot
359 MRI
->constrainRegClass(DestReg
, &R600::R600_TReg32_XRegClass
);
362 MRI
->constrainRegClass(DestReg
, &R600::R600_TReg32_YRegClass
);
365 MRI
->constrainRegClass(DestReg
, &R600::R600_TReg32_ZRegClass
);
368 MRI
->constrainRegClass(DestReg
, &R600::R600_TReg32_WRegClass
);
373 SUnit
*R600SchedStrategy::AttemptFillSlot(unsigned Slot
, bool AnyAlu
) {
374 static const AluKind IndexToID
[] = {AluT_X
, AluT_Y
, AluT_Z
, AluT_W
};
375 SUnit
*SlotedSU
= PopInst(AvailableAlus
[IndexToID
[Slot
]], AnyAlu
);
378 SUnit
*UnslotedSU
= PopInst(AvailableAlus
[AluAny
], AnyAlu
);
380 AssignSlot(UnslotedSU
->getInstr(), Slot
);
384 unsigned R600SchedStrategy::AvailablesAluCount() const {
385 return AvailableAlus
[AluAny
].size() + AvailableAlus
[AluT_XYZW
].size() +
386 AvailableAlus
[AluT_X
].size() + AvailableAlus
[AluT_Y
].size() +
387 AvailableAlus
[AluT_Z
].size() + AvailableAlus
[AluT_W
].size() +
388 AvailableAlus
[AluTrans
].size() + AvailableAlus
[AluDiscarded
].size() +
389 AvailableAlus
[AluPredX
].size();
392 SUnit
* R600SchedStrategy::pickAlu() {
393 while (AvailablesAluCount() || !Pending
[IDAlu
].empty()) {
394 if (!OccupiedSlotsMask
) {
395 // Bottom up scheduling : predX must comes first
396 if (!AvailableAlus
[AluPredX
].empty()) {
397 OccupiedSlotsMask
|= 31;
398 return PopInst(AvailableAlus
[AluPredX
], false);
400 // Flush physical reg copies (RA will discard them)
401 if (!AvailableAlus
[AluDiscarded
].empty()) {
402 OccupiedSlotsMask
|= 31;
403 return PopInst(AvailableAlus
[AluDiscarded
], false);
405 // If there is a T_XYZW alu available, use it
406 if (!AvailableAlus
[AluT_XYZW
].empty()) {
407 OccupiedSlotsMask
|= 15;
408 return PopInst(AvailableAlus
[AluT_XYZW
], false);
411 bool TransSlotOccupied
= OccupiedSlotsMask
& 16;
412 if (!TransSlotOccupied
&& VLIW5
) {
413 if (!AvailableAlus
[AluTrans
].empty()) {
414 OccupiedSlotsMask
|= 16;
415 return PopInst(AvailableAlus
[AluTrans
], false);
417 SUnit
*SU
= AttemptFillSlot(3, true);
419 OccupiedSlotsMask
|= 16;
423 for (int Chan
= 3; Chan
> -1; --Chan
) {
424 bool isOccupied
= OccupiedSlotsMask
& (1 << Chan
);
426 SUnit
*SU
= AttemptFillSlot(Chan
, false);
428 OccupiedSlotsMask
|= (1 << Chan
);
429 InstructionsGroupCandidate
.push_back(SU
->getInstr());
439 SUnit
* R600SchedStrategy::pickOther(int QID
) {
441 std::vector
<SUnit
*> &AQ
= Available
[QID
];
444 MoveUnits(Pending
[QID
], AQ
);