1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements hazard recognizers for scheduling on GCN processors.
12 //===----------------------------------------------------------------------===//
14 #include "GCNHazardRecognizer.h"
15 #include "AMDGPUSubtarget.h"
16 #include "SIDefines.h"
17 #include "SIInstrInfo.h"
18 #include "SIRegisterInfo.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "Utils/AMDGPUBaseInfo.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineOperand.h"
25 #include "llvm/CodeGen/ScheduleDAG.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/Support/ErrorHandling.h"
36 //===----------------------------------------------------------------------===//
37 // Hazard Recoginizer Implementation
38 //===----------------------------------------------------------------------===//
40 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction
&MF
) :
41 CurrCycleInstr(nullptr),
43 ST(MF
.getSubtarget
<GCNSubtarget
>()),
44 TII(*ST
.getInstrInfo()),
45 TRI(TII
.getRegisterInfo()),
46 ClauseUses(TRI
.getNumRegUnits()),
47 ClauseDefs(TRI
.getNumRegUnits()) {
51 void GCNHazardRecognizer::EmitInstruction(SUnit
*SU
) {
52 EmitInstruction(SU
->getInstr());
55 void GCNHazardRecognizer::EmitInstruction(MachineInstr
*MI
) {
59 static bool isDivFMas(unsigned Opcode
) {
60 return Opcode
== AMDGPU::V_DIV_FMAS_F32
|| Opcode
== AMDGPU::V_DIV_FMAS_F64
;
63 static bool isSGetReg(unsigned Opcode
) {
64 return Opcode
== AMDGPU::S_GETREG_B32
;
67 static bool isSSetReg(unsigned Opcode
) {
68 return Opcode
== AMDGPU::S_SETREG_B32
|| Opcode
== AMDGPU::S_SETREG_IMM32_B32
;
71 static bool isRWLane(unsigned Opcode
) {
72 return Opcode
== AMDGPU::V_READLANE_B32
|| Opcode
== AMDGPU::V_WRITELANE_B32
;
75 static bool isRFE(unsigned Opcode
) {
76 return Opcode
== AMDGPU::S_RFE_B64
;
79 static bool isSMovRel(unsigned Opcode
) {
81 case AMDGPU::S_MOVRELS_B32
:
82 case AMDGPU::S_MOVRELS_B64
:
83 case AMDGPU::S_MOVRELD_B32
:
84 case AMDGPU::S_MOVRELD_B64
:
91 static bool isSendMsgTraceDataOrGDS(const MachineInstr
&MI
) {
92 switch (MI
.getOpcode()) {
93 case AMDGPU::S_SENDMSG
:
94 case AMDGPU::S_SENDMSGHALT
:
95 case AMDGPU::S_TTRACEDATA
:
103 static unsigned getHWReg(const SIInstrInfo
*TII
, const MachineInstr
&RegInstr
) {
104 const MachineOperand
*RegOp
= TII
->getNamedOperand(RegInstr
,
105 AMDGPU::OpName::simm16
);
106 return RegOp
->getImm() & AMDGPU::Hwreg::ID_MASK_
;
109 ScheduleHazardRecognizer::HazardType
110 GCNHazardRecognizer::getHazardType(SUnit
*SU
, int Stalls
) {
111 MachineInstr
*MI
= SU
->getInstr();
113 if (SIInstrInfo::isSMRD(*MI
) && checkSMRDHazards(MI
) > 0)
116 // FIXME: Should flat be considered vmem?
117 if ((SIInstrInfo::isVMEM(*MI
) ||
118 SIInstrInfo::isFLAT(*MI
))
119 && checkVMEMHazards(MI
) > 0)
122 if (SIInstrInfo::isVALU(*MI
) && checkVALUHazards(MI
) > 0)
125 if (SIInstrInfo::isDPP(*MI
) && checkDPPHazards(MI
) > 0)
128 if (isDivFMas(MI
->getOpcode()) && checkDivFMasHazards(MI
) > 0)
131 if (isRWLane(MI
->getOpcode()) && checkRWLaneHazards(MI
) > 0)
134 if (isSGetReg(MI
->getOpcode()) && checkGetRegHazards(MI
) > 0)
137 if (isSSetReg(MI
->getOpcode()) && checkSetRegHazards(MI
) > 0)
140 if (isRFE(MI
->getOpcode()) && checkRFEHazards(MI
) > 0)
143 if (ST
.hasReadM0MovRelInterpHazard() &&
144 (TII
.isVINTRP(*MI
) || isSMovRel(MI
->getOpcode())) &&
145 checkReadM0Hazards(MI
) > 0)
148 if (ST
.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(*MI
) &&
149 checkReadM0Hazards(MI
) > 0)
152 if (MI
->isInlineAsm() && checkInlineAsmHazards(MI
) > 0)
155 if (checkAnyInstHazards(MI
) > 0)
161 unsigned GCNHazardRecognizer::PreEmitNoops(SUnit
*SU
) {
162 return PreEmitNoops(SU
->getInstr());
165 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr
*MI
) {
166 int WaitStates
= std::max(0, checkAnyInstHazards(MI
));
168 if (SIInstrInfo::isSMRD(*MI
))
169 return std::max(WaitStates
, checkSMRDHazards(MI
));
171 if (SIInstrInfo::isVALU(*MI
))
172 WaitStates
= std::max(WaitStates
, checkVALUHazards(MI
));
174 if (SIInstrInfo::isVMEM(*MI
) || SIInstrInfo::isFLAT(*MI
))
175 WaitStates
= std::max(WaitStates
, checkVMEMHazards(MI
));
177 if (SIInstrInfo::isDPP(*MI
))
178 WaitStates
= std::max(WaitStates
, checkDPPHazards(MI
));
180 if (isDivFMas(MI
->getOpcode()))
181 WaitStates
= std::max(WaitStates
, checkDivFMasHazards(MI
));
183 if (isRWLane(MI
->getOpcode()))
184 WaitStates
= std::max(WaitStates
, checkRWLaneHazards(MI
));
186 if (MI
->isInlineAsm())
187 return std::max(WaitStates
, checkInlineAsmHazards(MI
));
189 if (isSGetReg(MI
->getOpcode()))
190 return std::max(WaitStates
, checkGetRegHazards(MI
));
192 if (isSSetReg(MI
->getOpcode()))
193 return std::max(WaitStates
, checkSetRegHazards(MI
));
195 if (isRFE(MI
->getOpcode()))
196 return std::max(WaitStates
, checkRFEHazards(MI
));
198 if (ST
.hasReadM0MovRelInterpHazard() && (TII
.isVINTRP(*MI
) ||
199 isSMovRel(MI
->getOpcode())))
200 return std::max(WaitStates
, checkReadM0Hazards(MI
));
202 if (ST
.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(*MI
))
203 return std::max(WaitStates
, checkReadM0Hazards(MI
));
208 void GCNHazardRecognizer::EmitNoop() {
209 EmittedInstrs
.push_front(nullptr);
212 void GCNHazardRecognizer::AdvanceCycle() {
213 // When the scheduler detects a stall, it will call AdvanceCycle() without
214 // emitting any instructions.
218 // Do not track non-instructions which do not affect the wait states.
219 // If included, these instructions can lead to buffer overflow such that
220 // detectable hazards are missed.
221 if (CurrCycleInstr
->getOpcode() == AMDGPU::IMPLICIT_DEF
)
223 else if (CurrCycleInstr
->isDebugInstr())
226 unsigned NumWaitStates
= TII
.getNumWaitStates(*CurrCycleInstr
);
228 // Keep track of emitted instructions
229 EmittedInstrs
.push_front(CurrCycleInstr
);
231 // Add a nullptr for each additional wait state after the first. Make sure
232 // not to add more than getMaxLookAhead() items to the list, since we
233 // truncate the list to that size right after this loop.
234 for (unsigned i
= 1, e
= std::min(NumWaitStates
, getMaxLookAhead());
236 EmittedInstrs
.push_front(nullptr);
239 // getMaxLookahead() is the largest number of wait states we will ever need
240 // to insert, so there is no point in keeping track of more than that many
242 EmittedInstrs
.resize(getMaxLookAhead());
244 CurrCycleInstr
= nullptr;
247 void GCNHazardRecognizer::RecedeCycle() {
248 llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
251 //===----------------------------------------------------------------------===//
253 //===----------------------------------------------------------------------===//
255 int GCNHazardRecognizer::getWaitStatesSince(
256 function_ref
<bool(MachineInstr
*)> IsHazard
) {
258 for (MachineInstr
*MI
: EmittedInstrs
) {
263 unsigned Opcode
= MI
->getOpcode();
264 if (Opcode
== AMDGPU::INLINEASM
)
269 return std::numeric_limits
<int>::max();
272 int GCNHazardRecognizer::getWaitStatesSinceDef(
273 unsigned Reg
, function_ref
<bool(MachineInstr
*)> IsHazardDef
) {
274 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
276 auto IsHazardFn
= [IsHazardDef
, TRI
, Reg
] (MachineInstr
*MI
) {
277 return IsHazardDef(MI
) && MI
->modifiesRegister(Reg
, TRI
);
280 return getWaitStatesSince(IsHazardFn
);
283 int GCNHazardRecognizer::getWaitStatesSinceSetReg(
284 function_ref
<bool(MachineInstr
*)> IsHazard
) {
285 auto IsHazardFn
= [IsHazard
] (MachineInstr
*MI
) {
286 return isSSetReg(MI
->getOpcode()) && IsHazard(MI
);
289 return getWaitStatesSince(IsHazardFn
);
292 //===----------------------------------------------------------------------===//
293 // No-op Hazard Detection
294 //===----------------------------------------------------------------------===//
296 static void addRegUnits(const SIRegisterInfo
&TRI
,
297 BitVector
&BV
, unsigned Reg
) {
298 for (MCRegUnitIterator
RUI(Reg
, &TRI
); RUI
.isValid(); ++RUI
)
302 static void addRegsToSet(const SIRegisterInfo
&TRI
,
303 iterator_range
<MachineInstr::const_mop_iterator
> Ops
,
305 for (const MachineOperand
&Op
: Ops
) {
307 addRegUnits(TRI
, Set
, Op
.getReg());
311 void GCNHazardRecognizer::addClauseInst(const MachineInstr
&MI
) {
312 // XXX: Do we need to worry about implicit operands
313 addRegsToSet(TRI
, MI
.defs(), ClauseDefs
);
314 addRegsToSet(TRI
, MI
.uses(), ClauseUses
);
317 int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr
*MEM
) {
318 // SMEM soft clause are only present on VI+, and only matter if xnack is
320 if (!ST
.isXNACKEnabled())
323 bool IsSMRD
= TII
.isSMRD(*MEM
);
327 // A soft-clause is any group of consecutive SMEM instructions. The
328 // instructions in this group may return out of order and/or may be
329 // replayed (i.e. the same instruction issued more than once).
331 // In order to handle these situations correctly we need to make sure
332 // that when a clause has more than one instruction, no instruction in the
333 // clause writes to a register that is read another instruction in the clause
334 // (including itself). If we encounter this situaion, we need to break the
335 // clause by inserting a non SMEM instruction.
337 for (MachineInstr
*MI
: EmittedInstrs
) {
338 // When we hit a non-SMEM instruction then we have passed the start of the
339 // clause and we can stop.
343 if (IsSMRD
!= SIInstrInfo::isSMRD(*MI
))
349 if (ClauseDefs
.none())
352 // We need to make sure not to put loads and stores in the same clause if they
353 // use the same address. For now, just start a new clause whenever we see a
360 // If the set of defs and uses intersect then we cannot add this instruction
361 // to the clause, so we have a hazard.
362 return ClauseDefs
.anyCommon(ClauseUses
) ? 1 : 0;
365 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr
*SMRD
) {
366 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
367 int WaitStatesNeeded
= 0;
369 WaitStatesNeeded
= checkSoftClauseHazards(SMRD
);
371 // This SMRD hazard only affects SI.
372 if (ST
.getGeneration() != AMDGPUSubtarget::SOUTHERN_ISLANDS
)
373 return WaitStatesNeeded
;
375 // A read of an SGPR by SMRD instruction requires 4 wait states when the
376 // SGPR was written by a VALU instruction.
377 int SmrdSgprWaitStates
= 4;
378 auto IsHazardDefFn
= [this] (MachineInstr
*MI
) { return TII
.isVALU(*MI
); };
379 auto IsBufferHazardDefFn
= [this] (MachineInstr
*MI
) { return TII
.isSALU(*MI
); };
381 bool IsBufferSMRD
= TII
.isBufferSMRD(*SMRD
);
383 for (const MachineOperand
&Use
: SMRD
->uses()) {
386 int WaitStatesNeededForUse
=
387 SmrdSgprWaitStates
- getWaitStatesSinceDef(Use
.getReg(), IsHazardDefFn
);
388 WaitStatesNeeded
= std::max(WaitStatesNeeded
, WaitStatesNeededForUse
);
390 // This fixes what appears to be undocumented hardware behavior in SI where
391 // s_mov writing a descriptor and s_buffer_load_dword reading the descriptor
392 // needs some number of nops in between. We don't know how many we need, but
393 // let's use 4. This wasn't discovered before probably because the only
394 // case when this happens is when we expand a 64-bit pointer into a full
395 // descriptor and use s_buffer_load_dword instead of s_load_dword, which was
396 // probably never encountered in the closed-source land.
398 int WaitStatesNeededForUse
=
399 SmrdSgprWaitStates
- getWaitStatesSinceDef(Use
.getReg(),
400 IsBufferHazardDefFn
);
401 WaitStatesNeeded
= std::max(WaitStatesNeeded
, WaitStatesNeededForUse
);
405 return WaitStatesNeeded
;
408 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr
* VMEM
) {
409 if (ST
.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS
)
412 int WaitStatesNeeded
= checkSoftClauseHazards(VMEM
);
414 // A read of an SGPR by a VMEM instruction requires 5 wait states when the
415 // SGPR was written by a VALU Instruction.
416 const int VmemSgprWaitStates
= 5;
417 auto IsHazardDefFn
= [this] (MachineInstr
*MI
) { return TII
.isVALU(*MI
); };
419 for (const MachineOperand
&Use
: VMEM
->uses()) {
420 if (!Use
.isReg() || TRI
.isVGPR(MF
.getRegInfo(), Use
.getReg()))
423 int WaitStatesNeededForUse
=
424 VmemSgprWaitStates
- getWaitStatesSinceDef(Use
.getReg(), IsHazardDefFn
);
425 WaitStatesNeeded
= std::max(WaitStatesNeeded
, WaitStatesNeededForUse
);
427 return WaitStatesNeeded
;
430 int GCNHazardRecognizer::checkDPPHazards(MachineInstr
*DPP
) {
431 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
432 const SIInstrInfo
*TII
= ST
.getInstrInfo();
434 // Check for DPP VGPR read after VALU VGPR write and EXEC write.
435 int DppVgprWaitStates
= 2;
436 int DppExecWaitStates
= 5;
437 int WaitStatesNeeded
= 0;
438 auto IsHazardDefFn
= [TII
] (MachineInstr
*MI
) { return TII
->isVALU(*MI
); };
440 for (const MachineOperand
&Use
: DPP
->uses()) {
441 if (!Use
.isReg() || !TRI
->isVGPR(MF
.getRegInfo(), Use
.getReg()))
443 int WaitStatesNeededForUse
=
444 DppVgprWaitStates
- getWaitStatesSinceDef(Use
.getReg());
445 WaitStatesNeeded
= std::max(WaitStatesNeeded
, WaitStatesNeededForUse
);
448 WaitStatesNeeded
= std::max(
450 DppExecWaitStates
- getWaitStatesSinceDef(AMDGPU::EXEC
, IsHazardDefFn
));
452 return WaitStatesNeeded
;
455 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr
*DivFMas
) {
456 const SIInstrInfo
*TII
= ST
.getInstrInfo();
458 // v_div_fmas requires 4 wait states after a write to vcc from a VALU
460 const int DivFMasWaitStates
= 4;
461 auto IsHazardDefFn
= [TII
] (MachineInstr
*MI
) { return TII
->isVALU(*MI
); };
462 int WaitStatesNeeded
= getWaitStatesSinceDef(AMDGPU::VCC
, IsHazardDefFn
);
464 return DivFMasWaitStates
- WaitStatesNeeded
;
467 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr
*GetRegInstr
) {
468 const SIInstrInfo
*TII
= ST
.getInstrInfo();
469 unsigned GetRegHWReg
= getHWReg(TII
, *GetRegInstr
);
471 const int GetRegWaitStates
= 2;
472 auto IsHazardFn
= [TII
, GetRegHWReg
] (MachineInstr
*MI
) {
473 return GetRegHWReg
== getHWReg(TII
, *MI
);
475 int WaitStatesNeeded
= getWaitStatesSinceSetReg(IsHazardFn
);
477 return GetRegWaitStates
- WaitStatesNeeded
;
480 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr
*SetRegInstr
) {
481 const SIInstrInfo
*TII
= ST
.getInstrInfo();
482 unsigned HWReg
= getHWReg(TII
, *SetRegInstr
);
484 const int SetRegWaitStates
=
485 ST
.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS
? 1 : 2;
486 auto IsHazardFn
= [TII
, HWReg
] (MachineInstr
*MI
) {
487 return HWReg
== getHWReg(TII
, *MI
);
489 int WaitStatesNeeded
= getWaitStatesSinceSetReg(IsHazardFn
);
490 return SetRegWaitStates
- WaitStatesNeeded
;
493 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr
&MI
) {
497 const SIInstrInfo
*TII
= ST
.getInstrInfo();
498 unsigned Opcode
= MI
.getOpcode();
499 const MCInstrDesc
&Desc
= MI
.getDesc();
501 int VDataIdx
= AMDGPU::getNamedOperandIdx(Opcode
, AMDGPU::OpName::vdata
);
504 VDataRCID
= Desc
.OpInfo
[VDataIdx
].RegClass
;
506 if (TII
->isMUBUF(MI
) || TII
->isMTBUF(MI
)) {
507 // There is no hazard if the instruction does not use vector regs
511 // For MUBUF/MTBUF instructions this hazard only exists if the
512 // instruction is not using a register in the soffset field.
513 const MachineOperand
*SOffset
=
514 TII
->getNamedOperand(MI
, AMDGPU::OpName::soffset
);
515 // If we have no soffset operand, then assume this field has been
516 // hardcoded to zero.
517 if (AMDGPU::getRegBitWidth(VDataRCID
) > 64 &&
518 (!SOffset
|| !SOffset
->isReg()))
522 // MIMG instructions create a hazard if they don't use a 256-bit T# and
523 // the store size is greater than 8 bytes and they have more than two bits
524 // of their dmask set.
525 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
526 if (TII
->isMIMG(MI
)) {
527 int SRsrcIdx
= AMDGPU::getNamedOperandIdx(Opcode
, AMDGPU::OpName::srsrc
);
528 assert(SRsrcIdx
!= -1 &&
529 AMDGPU::getRegBitWidth(Desc
.OpInfo
[SRsrcIdx
].RegClass
) == 256);
533 if (TII
->isFLAT(MI
)) {
534 int DataIdx
= AMDGPU::getNamedOperandIdx(Opcode
, AMDGPU::OpName::vdata
);
535 if (AMDGPU::getRegBitWidth(Desc
.OpInfo
[DataIdx
].RegClass
) > 64)
542 int GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand
&Def
,
543 const MachineRegisterInfo
&MRI
) {
544 // Helper to check for the hazard where VMEM instructions that store more than
545 // 8 bytes can have there store data over written by the next instruction.
546 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
548 const int VALUWaitStates
= 1;
549 int WaitStatesNeeded
= 0;
551 if (!TRI
->isVGPR(MRI
, Def
.getReg()))
552 return WaitStatesNeeded
;
553 unsigned Reg
= Def
.getReg();
554 auto IsHazardFn
= [this, Reg
, TRI
] (MachineInstr
*MI
) {
555 int DataIdx
= createsVALUHazard(*MI
);
556 return DataIdx
>= 0 &&
557 TRI
->regsOverlap(MI
->getOperand(DataIdx
).getReg(), Reg
);
559 int WaitStatesNeededForDef
=
560 VALUWaitStates
- getWaitStatesSince(IsHazardFn
);
561 WaitStatesNeeded
= std::max(WaitStatesNeeded
, WaitStatesNeededForDef
);
563 return WaitStatesNeeded
;
566 int GCNHazardRecognizer::checkVALUHazards(MachineInstr
*VALU
) {
567 // This checks for the hazard where VMEM instructions that store more than
568 // 8 bytes can have there store data over written by the next instruction.
569 if (!ST
.has12DWordStoreHazard())
572 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
573 int WaitStatesNeeded
= 0;
575 for (const MachineOperand
&Def
: VALU
->defs()) {
576 WaitStatesNeeded
= std::max(WaitStatesNeeded
, checkVALUHazardsHelper(Def
, MRI
));
579 return WaitStatesNeeded
;
582 int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr
*IA
) {
583 // This checks for hazards associated with inline asm statements.
584 // Since inline asms can contain just about anything, we use this
585 // to call/leverage other check*Hazard routines. Note that
586 // this function doesn't attempt to address all possible inline asm
587 // hazards (good luck), but is a collection of what has been
588 // problematic thus far.
590 // see checkVALUHazards()
591 if (!ST
.has12DWordStoreHazard())
594 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
595 int WaitStatesNeeded
= 0;
597 for (unsigned I
= InlineAsm::MIOp_FirstOperand
, E
= IA
->getNumOperands();
599 const MachineOperand
&Op
= IA
->getOperand(I
);
600 if (Op
.isReg() && Op
.isDef()) {
601 WaitStatesNeeded
= std::max(WaitStatesNeeded
, checkVALUHazardsHelper(Op
, MRI
));
605 return WaitStatesNeeded
;
608 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr
*RWLane
) {
609 const SIInstrInfo
*TII
= ST
.getInstrInfo();
610 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
611 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
613 const MachineOperand
*LaneSelectOp
=
614 TII
->getNamedOperand(*RWLane
, AMDGPU::OpName::src1
);
616 if (!LaneSelectOp
->isReg() || !TRI
->isSGPRReg(MRI
, LaneSelectOp
->getReg()))
619 unsigned LaneSelectReg
= LaneSelectOp
->getReg();
620 auto IsHazardFn
= [TII
] (MachineInstr
*MI
) {
621 return TII
->isVALU(*MI
);
624 const int RWLaneWaitStates
= 4;
625 int WaitStatesSince
= getWaitStatesSinceDef(LaneSelectReg
, IsHazardFn
);
626 return RWLaneWaitStates
- WaitStatesSince
;
629 int GCNHazardRecognizer::checkRFEHazards(MachineInstr
*RFE
) {
630 if (ST
.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS
)
633 const SIInstrInfo
*TII
= ST
.getInstrInfo();
635 const int RFEWaitStates
= 1;
637 auto IsHazardFn
= [TII
] (MachineInstr
*MI
) {
638 return getHWReg(TII
, *MI
) == AMDGPU::Hwreg::ID_TRAPSTS
;
640 int WaitStatesNeeded
= getWaitStatesSinceSetReg(IsHazardFn
);
641 return RFEWaitStates
- WaitStatesNeeded
;
644 int GCNHazardRecognizer::checkAnyInstHazards(MachineInstr
*MI
) {
645 if (MI
->isDebugInstr())
648 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
649 if (!ST
.hasSMovFedHazard())
652 // Check for any instruction reading an SGPR after a write from
654 int MovFedWaitStates
= 1;
655 int WaitStatesNeeded
= 0;
657 for (const MachineOperand
&Use
: MI
->uses()) {
658 if (!Use
.isReg() || TRI
->isVGPR(MF
.getRegInfo(), Use
.getReg()))
660 auto IsHazardFn
= [] (MachineInstr
*MI
) {
661 return MI
->getOpcode() == AMDGPU::S_MOV_FED_B32
;
663 int WaitStatesNeededForUse
=
664 MovFedWaitStates
- getWaitStatesSinceDef(Use
.getReg(), IsHazardFn
);
665 WaitStatesNeeded
= std::max(WaitStatesNeeded
, WaitStatesNeededForUse
);
668 return WaitStatesNeeded
;
671 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr
*MI
) {
672 const SIInstrInfo
*TII
= ST
.getInstrInfo();
673 const int SMovRelWaitStates
= 1;
674 auto IsHazardFn
= [TII
] (MachineInstr
*MI
) {
675 return TII
->isSALU(*MI
);
677 return SMovRelWaitStates
- getWaitStatesSinceDef(AMDGPU::M0
, IsHazardFn
);