1 //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
11 /// will sometimes generate these illegal copies in situations like this:
13 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
16 /// %0 <sgpr> = SCALAR_INST
17 /// %1 <vsrc> = COPY %0 <sgpr>
19 /// BRANCH %cond BB1, BB2
21 /// %2 <vgpr> = VECTOR_INST
22 /// %3 <vsrc> = COPY %2 <vgpr>
24 /// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
25 /// %5 <vgpr> = VECTOR_INST %4 <vsrc>
28 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
29 /// code will look like this:
32 /// %0 <sgpr> = SCALAR_INST
34 /// BRANCH %cond BB1, BB2
36 /// %2 <vgpr> = VECTOR_INST
37 /// %3 <vsrc> = COPY %2 <vgpr>
39 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
40 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
42 /// Now that the result of the PHI instruction is an SGPR, the register
43 /// allocator is now forced to constrain the register class of %3 to
44 /// <sgpr> so we end up with final code like this:
47 /// %0 <sgpr> = SCALAR_INST
49 /// BRANCH %cond BB1, BB2
51 /// %2 <vgpr> = VECTOR_INST
52 /// %3 <sgpr> = COPY %2 <vgpr>
54 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
55 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
57 /// Now this code contains an illegal copy from a VGPR to an SGPR.
59 /// In order to avoid this problem, this pass searches for PHI instructions
60 /// which define a <vsrc> register and constrains its definition class to
61 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
62 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
63 /// will be unable to perform the COPY removal from the above example which
64 /// ultimately led to the creation of an illegal COPY.
65 //===----------------------------------------------------------------------===//
68 #include "AMDGPUSubtarget.h"
69 #include "SIInstrInfo.h"
70 #include "SIRegisterInfo.h"
71 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
72 #include "llvm/ADT/DenseSet.h"
73 #include "llvm/ADT/STLExtras.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/CodeGen/MachineBasicBlock.h"
77 #include "llvm/CodeGen/MachineDominators.h"
78 #include "llvm/CodeGen/MachineFunction.h"
79 #include "llvm/CodeGen/MachineFunctionPass.h"
80 #include "llvm/CodeGen/MachineInstr.h"
81 #include "llvm/CodeGen/MachineInstrBuilder.h"
82 #include "llvm/CodeGen/MachineOperand.h"
83 #include "llvm/CodeGen/MachineRegisterInfo.h"
84 #include "llvm/CodeGen/TargetRegisterInfo.h"
85 #include "llvm/Pass.h"
86 #include "llvm/Support/CodeGen.h"
87 #include "llvm/Support/CommandLine.h"
88 #include "llvm/Support/Debug.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include "llvm/Target/TargetMachine.h"
101 #define DEBUG_TYPE "si-fix-sgpr-copies"
103 static cl::opt
<bool> EnableM0Merge(
104 "amdgpu-enable-merge-m0",
105 cl::desc("Merge and hoist M0 initializations"),
110 class SIFixSGPRCopies
: public MachineFunctionPass
{
111 MachineDominatorTree
*MDT
;
116 SIFixSGPRCopies() : MachineFunctionPass(ID
) {}
118 bool runOnMachineFunction(MachineFunction
&MF
) override
;
120 StringRef
getPassName() const override
{ return "SI Fix SGPR copies"; }
122 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
123 AU
.addRequired
<MachineDominatorTree
>();
124 AU
.addPreserved
<MachineDominatorTree
>();
125 AU
.setPreservesCFG();
126 MachineFunctionPass::getAnalysisUsage(AU
);
130 } // end anonymous namespace
132 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies
, DEBUG_TYPE
,
133 "SI Fix SGPR copies", false, false)
134 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
135 INITIALIZE_PASS_END(SIFixSGPRCopies
, DEBUG_TYPE
,
136 "SI Fix SGPR copies", false, false)
138 char SIFixSGPRCopies::ID
= 0;
140 char &llvm::SIFixSGPRCopiesID
= SIFixSGPRCopies::ID
;
142 FunctionPass
*llvm::createSIFixSGPRCopiesPass() {
143 return new SIFixSGPRCopies();
146 static bool hasVGPROperands(const MachineInstr
&MI
, const SIRegisterInfo
*TRI
) {
147 const MachineRegisterInfo
&MRI
= MI
.getParent()->getParent()->getRegInfo();
148 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
149 if (!MI
.getOperand(i
).isReg() ||
150 !TargetRegisterInfo::isVirtualRegister(MI
.getOperand(i
).getReg()))
153 if (TRI
->hasVGPRs(MRI
.getRegClass(MI
.getOperand(i
).getReg())))
159 static std::pair
<const TargetRegisterClass
*, const TargetRegisterClass
*>
160 getCopyRegClasses(const MachineInstr
&Copy
,
161 const SIRegisterInfo
&TRI
,
162 const MachineRegisterInfo
&MRI
) {
163 unsigned DstReg
= Copy
.getOperand(0).getReg();
164 unsigned SrcReg
= Copy
.getOperand(1).getReg();
166 const TargetRegisterClass
*SrcRC
=
167 TargetRegisterInfo::isVirtualRegister(SrcReg
) ?
168 MRI
.getRegClass(SrcReg
) :
169 TRI
.getPhysRegClass(SrcReg
);
171 // We don't really care about the subregister here.
172 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
174 const TargetRegisterClass
*DstRC
=
175 TargetRegisterInfo::isVirtualRegister(DstReg
) ?
176 MRI
.getRegClass(DstReg
) :
177 TRI
.getPhysRegClass(DstReg
);
179 return std::make_pair(SrcRC
, DstRC
);
182 static bool isVGPRToSGPRCopy(const TargetRegisterClass
*SrcRC
,
183 const TargetRegisterClass
*DstRC
,
184 const SIRegisterInfo
&TRI
) {
185 return SrcRC
!= &AMDGPU::VReg_1RegClass
&& TRI
.isSGPRClass(DstRC
) &&
189 static bool isSGPRToVGPRCopy(const TargetRegisterClass
*SrcRC
,
190 const TargetRegisterClass
*DstRC
,
191 const SIRegisterInfo
&TRI
) {
192 return DstRC
!= &AMDGPU::VReg_1RegClass
&& TRI
.isSGPRClass(SrcRC
) &&
196 static bool tryChangeVGPRtoSGPRinCopy(MachineInstr
&MI
,
197 const SIRegisterInfo
*TRI
,
198 const SIInstrInfo
*TII
) {
199 MachineRegisterInfo
&MRI
= MI
.getParent()->getParent()->getRegInfo();
200 auto &Src
= MI
.getOperand(1);
201 unsigned DstReg
= MI
.getOperand(0).getReg();
202 unsigned SrcReg
= Src
.getReg();
203 if (!TargetRegisterInfo::isVirtualRegister(SrcReg
) ||
204 !TargetRegisterInfo::isVirtualRegister(DstReg
))
207 for (const auto &MO
: MRI
.reg_nodbg_operands(DstReg
)) {
208 const auto *UseMI
= MO
.getParent();
211 if (MO
.isDef() || UseMI
->getParent() != MI
.getParent() ||
212 UseMI
->getOpcode() <= TargetOpcode::GENERIC_OP_END
||
213 !TII
->isOperandLegal(*UseMI
, UseMI
->getOperandNo(&MO
), &Src
))
216 // Change VGPR to SGPR destination.
217 MRI
.setRegClass(DstReg
, TRI
->getEquivalentSGPRClass(MRI
.getRegClass(DstReg
)));
221 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
224 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
225 // VGPRz = COPY SGPRy
229 // VGPRx = COPY SGPRx
230 // VGPRz = REG_SEQUENCE VGPRx, sub0
232 // This exposes immediate folding opportunities when materializing 64-bit
234 static bool foldVGPRCopyIntoRegSequence(MachineInstr
&MI
,
235 const SIRegisterInfo
*TRI
,
236 const SIInstrInfo
*TII
,
237 MachineRegisterInfo
&MRI
) {
238 assert(MI
.isRegSequence());
240 unsigned DstReg
= MI
.getOperand(0).getReg();
241 if (!TRI
->isSGPRClass(MRI
.getRegClass(DstReg
)))
244 if (!MRI
.hasOneUse(DstReg
))
247 MachineInstr
&CopyUse
= *MRI
.use_instr_begin(DstReg
);
248 if (!CopyUse
.isCopy())
251 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
252 if (TargetRegisterInfo::isPhysicalRegister(CopyUse
.getOperand(0).getReg()))
255 const TargetRegisterClass
*SrcRC
, *DstRC
;
256 std::tie(SrcRC
, DstRC
) = getCopyRegClasses(CopyUse
, *TRI
, MRI
);
258 if (!isSGPRToVGPRCopy(SrcRC
, DstRC
, *TRI
))
261 if (tryChangeVGPRtoSGPRinCopy(CopyUse
, TRI
, TII
))
264 // TODO: Could have multiple extracts?
265 unsigned SubReg
= CopyUse
.getOperand(1).getSubReg();
266 if (SubReg
!= AMDGPU::NoSubRegister
)
269 MRI
.setRegClass(DstReg
, DstRC
);
272 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
273 // VGPRz = COPY SGPRy
276 // VGPRx = COPY SGPRx
277 // VGPRz = REG_SEQUENCE VGPRx, sub0
279 MI
.getOperand(0).setReg(CopyUse
.getOperand(0).getReg());
281 for (unsigned I
= 1, N
= MI
.getNumOperands(); I
!= N
; I
+= 2) {
282 unsigned SrcReg
= MI
.getOperand(I
).getReg();
283 unsigned SrcSubReg
= MI
.getOperand(I
).getSubReg();
285 const TargetRegisterClass
*SrcRC
= MRI
.getRegClass(SrcReg
);
286 assert(TRI
->isSGPRClass(SrcRC
) &&
287 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
289 SrcRC
= TRI
->getSubRegClass(SrcRC
, SrcSubReg
);
290 const TargetRegisterClass
*NewSrcRC
= TRI
->getEquivalentVGPRClass(SrcRC
);
292 unsigned TmpReg
= MRI
.createVirtualRegister(NewSrcRC
);
294 BuildMI(*MI
.getParent(), &MI
, MI
.getDebugLoc(), TII
->get(AMDGPU::COPY
),
296 .add(MI
.getOperand(I
));
298 MI
.getOperand(I
).setReg(TmpReg
);
301 CopyUse
.eraseFromParent();
305 static bool phiHasVGPROperands(const MachineInstr
&PHI
,
306 const MachineRegisterInfo
&MRI
,
307 const SIRegisterInfo
*TRI
,
308 const SIInstrInfo
*TII
) {
309 for (unsigned i
= 1; i
< PHI
.getNumOperands(); i
+= 2) {
310 unsigned Reg
= PHI
.getOperand(i
).getReg();
311 if (TRI
->hasVGPRs(MRI
.getRegClass(Reg
)))
317 static bool phiHasBreakDef(const MachineInstr
&PHI
,
318 const MachineRegisterInfo
&MRI
,
319 SmallSet
<unsigned, 8> &Visited
) {
320 for (unsigned i
= 1; i
< PHI
.getNumOperands(); i
+= 2) {
321 unsigned Reg
= PHI
.getOperand(i
).getReg();
322 if (Visited
.count(Reg
))
327 MachineInstr
*DefInstr
= MRI
.getVRegDef(Reg
);
328 switch (DefInstr
->getOpcode()) {
331 case AMDGPU::SI_IF_BREAK
:
334 if (phiHasBreakDef(*DefInstr
, MRI
, Visited
))
341 static bool hasTerminatorThatModifiesExec(const MachineBasicBlock
&MBB
,
342 const TargetRegisterInfo
&TRI
) {
343 for (MachineBasicBlock::const_iterator I
= MBB
.getFirstTerminator(),
344 E
= MBB
.end(); I
!= E
; ++I
) {
345 if (I
->modifiesRegister(AMDGPU::EXEC
, &TRI
))
351 static bool isSafeToFoldImmIntoCopy(const MachineInstr
*Copy
,
352 const MachineInstr
*MoveImm
,
353 const SIInstrInfo
*TII
,
356 if (Copy
->getOpcode() != AMDGPU::COPY
)
359 if (!MoveImm
->isMoveImmediate())
362 const MachineOperand
*ImmOp
=
363 TII
->getNamedOperand(*MoveImm
, AMDGPU::OpName::src0
);
367 // FIXME: Handle copies with sub-regs.
368 if (Copy
->getOperand(0).getSubReg())
371 switch (MoveImm
->getOpcode()) {
374 case AMDGPU::V_MOV_B32_e32
:
375 SMovOp
= AMDGPU::S_MOV_B32
;
377 case AMDGPU::V_MOV_B64_PSEUDO
:
378 SMovOp
= AMDGPU::S_MOV_B64
;
381 Imm
= ImmOp
->getImm();
385 template <class UnaryPredicate
>
386 bool searchPredecessors(const MachineBasicBlock
*MBB
,
387 const MachineBasicBlock
*CutOff
,
388 UnaryPredicate Predicate
) {
392 DenseSet
<const MachineBasicBlock
*> Visited
;
393 SmallVector
<MachineBasicBlock
*, 4> Worklist(MBB
->pred_begin(),
396 while (!Worklist
.empty()) {
397 MachineBasicBlock
*MBB
= Worklist
.pop_back_val();
399 if (!Visited
.insert(MBB
).second
)
406 Worklist
.append(MBB
->pred_begin(), MBB
->pred_end());
412 static bool predsHasDivergentTerminator(MachineBasicBlock
*MBB
,
413 const TargetRegisterInfo
*TRI
) {
414 return searchPredecessors(MBB
, nullptr, [TRI
](MachineBasicBlock
*MBB
) {
415 return hasTerminatorThatModifiesExec(*MBB
, *TRI
); });
418 // Checks if there is potential path From instruction To instruction.
419 // If CutOff is specified and it sits in between of that path we ignore
420 // a higher portion of the path and report it is not reachable.
421 static bool isReachable(const MachineInstr
*From
,
422 const MachineInstr
*To
,
423 const MachineBasicBlock
*CutOff
,
424 MachineDominatorTree
&MDT
) {
425 // If either From block dominates To block or instructions are in the same
426 // block and From is higher.
427 if (MDT
.dominates(From
, To
))
430 const MachineBasicBlock
*MBBFrom
= From
->getParent();
431 const MachineBasicBlock
*MBBTo
= To
->getParent();
432 if (MBBFrom
== MBBTo
)
435 // Instructions are in different blocks, do predecessor search.
436 // We should almost never get here since we do not usually produce M0 stores
438 return searchPredecessors(MBBTo
, CutOff
, [MBBFrom
]
439 (const MachineBasicBlock
*MBB
) { return MBB
== MBBFrom
; });
442 // Hoist and merge identical SGPR initializations into a common predecessor.
443 // This is intended to combine M0 initializations, but can work with any
444 // SGPR. A VGPR cannot be processed since we cannot guarantee vector
446 static bool hoistAndMergeSGPRInits(unsigned Reg
,
447 const MachineRegisterInfo
&MRI
,
448 MachineDominatorTree
&MDT
) {
449 // List of inits by immediate value.
450 using InitListMap
= std::map
<unsigned, std::list
<MachineInstr
*>>;
452 // List of clobbering instructions.
453 SmallVector
<MachineInstr
*, 8> Clobbers
;
454 bool Changed
= false;
456 for (auto &MI
: MRI
.def_instructions(Reg
)) {
457 MachineOperand
*Imm
= nullptr;
458 for (auto &MO
: MI
.operands()) {
459 if ((MO
.isReg() && ((MO
.isDef() && MO
.getReg() != Reg
) || !MO
.isDef())) ||
460 (!MO
.isImm() && !MO
.isReg()) || (MO
.isImm() && Imm
)) {
463 } else if (MO
.isImm())
467 Inits
[Imm
->getImm()].push_front(&MI
);
469 Clobbers
.push_back(&MI
);
472 for (auto &Init
: Inits
) {
473 auto &Defs
= Init
.second
;
475 for (auto I1
= Defs
.begin(), E
= Defs
.end(); I1
!= E
; ) {
476 MachineInstr
*MI1
= *I1
;
478 for (auto I2
= std::next(I1
); I2
!= E
; ) {
479 MachineInstr
*MI2
= *I2
;
481 // Check any possible interference
482 auto intereferes
= [&](MachineBasicBlock::iterator From
,
483 MachineBasicBlock::iterator To
) -> bool {
485 assert(MDT
.dominates(&*To
, &*From
));
487 auto interferes
= [&MDT
, From
, To
](MachineInstr
* &Clobber
) -> bool {
488 const MachineBasicBlock
*MBBFrom
= From
->getParent();
489 const MachineBasicBlock
*MBBTo
= To
->getParent();
490 bool MayClobberFrom
= isReachable(Clobber
, &*From
, MBBTo
, MDT
);
491 bool MayClobberTo
= isReachable(Clobber
, &*To
, MBBTo
, MDT
);
492 if (!MayClobberFrom
&& !MayClobberTo
)
494 if ((MayClobberFrom
&& !MayClobberTo
) ||
495 (!MayClobberFrom
&& MayClobberTo
))
497 // Both can clobber, this is not an interference only if both are
498 // dominated by Clobber and belong to the same block or if Clobber
499 // properly dominates To, given that To >> From, so it dominates
500 // both and located in a common dominator.
501 return !((MBBFrom
== MBBTo
&&
502 MDT
.dominates(Clobber
, &*From
) &&
503 MDT
.dominates(Clobber
, &*To
)) ||
504 MDT
.properlyDominates(Clobber
->getParent(), MBBTo
));
507 return (llvm::any_of(Clobbers
, interferes
)) ||
508 (llvm::any_of(Inits
, [&](InitListMap::value_type
&C
) {
509 return C
.first
!= Init
.first
&&
510 llvm::any_of(C
.second
, interferes
);
514 if (MDT
.dominates(MI1
, MI2
)) {
515 if (!intereferes(MI2
, MI1
)) {
518 << printMBBReference(*MI2
->getParent()) << " " << *MI2
);
519 MI2
->eraseFromParent();
524 } else if (MDT
.dominates(MI2
, MI1
)) {
525 if (!intereferes(MI1
, MI2
)) {
528 << printMBBReference(*MI1
->getParent()) << " " << *MI1
);
529 MI1
->eraseFromParent();
535 auto *MBB
= MDT
.findNearestCommonDominator(MI1
->getParent(),
542 MachineBasicBlock::iterator I
= MBB
->getFirstNonPHI();
543 if (!intereferes(MI1
, I
) && !intereferes(MI2
, I
)) {
546 << printMBBReference(*MI1
->getParent()) << " " << *MI1
547 << "and moving from "
548 << printMBBReference(*MI2
->getParent()) << " to "
549 << printMBBReference(*I
->getParent()) << " " << *MI2
);
550 I
->getParent()->splice(I
, MI2
->getParent(), MI2
);
551 MI1
->eraseFromParent();
564 MRI
.clearKillFlags(Reg
);
569 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction
&MF
) {
570 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
571 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
572 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
573 const SIInstrInfo
*TII
= ST
.getInstrInfo();
574 MDT
= &getAnalysis
<MachineDominatorTree
>();
576 SmallVector
<MachineInstr
*, 16> Worklist
;
578 for (MachineFunction::iterator BI
= MF
.begin(), BE
= MF
.end();
580 MachineBasicBlock
&MBB
= *BI
;
581 for (MachineBasicBlock::iterator I
= MBB
.begin(), E
= MBB
.end();
583 MachineInstr
&MI
= *I
;
585 switch (MI
.getOpcode()) {
591 // If the destination register is a physical register there isn't really
592 // much we can do to fix this.
593 if (!TargetRegisterInfo::isVirtualRegister(MI
.getOperand(0).getReg()))
596 const TargetRegisterClass
*SrcRC
, *DstRC
;
597 std::tie(SrcRC
, DstRC
) = getCopyRegClasses(MI
, *TRI
, MRI
);
598 if (isVGPRToSGPRCopy(SrcRC
, DstRC
, *TRI
)) {
599 unsigned SrcReg
= MI
.getOperand(1).getReg();
600 if (!TargetRegisterInfo::isVirtualRegister(SrcReg
)) {
601 TII
->moveToVALU(MI
, MDT
);
605 MachineInstr
*DefMI
= MRI
.getVRegDef(SrcReg
);
608 // If we are just copying an immediate, we can replace the copy with
610 if (isSafeToFoldImmIntoCopy(&MI
, DefMI
, TII
, SMovOp
, Imm
)) {
611 MI
.getOperand(1).ChangeToImmediate(Imm
);
612 MI
.addImplicitDefUseOperands(MF
);
613 MI
.setDesc(TII
->get(SMovOp
));
616 TII
->moveToVALU(MI
, MDT
);
617 } else if (isSGPRToVGPRCopy(SrcRC
, DstRC
, *TRI
)) {
618 tryChangeVGPRtoSGPRinCopy(MI
, TRI
, TII
);
624 unsigned Reg
= MI
.getOperand(0).getReg();
625 if (!TRI
->isSGPRClass(MRI
.getRegClass(Reg
)))
628 // We don't need to fix the PHI if the common dominator of the
629 // two incoming blocks terminates with a uniform branch.
630 bool HasVGPROperand
= phiHasVGPROperands(MI
, MRI
, TRI
, TII
);
631 if (MI
.getNumExplicitOperands() == 5 && !HasVGPROperand
) {
632 MachineBasicBlock
*MBB0
= MI
.getOperand(2).getMBB();
633 MachineBasicBlock
*MBB1
= MI
.getOperand(4).getMBB();
635 if (!predsHasDivergentTerminator(MBB0
, TRI
) &&
636 !predsHasDivergentTerminator(MBB1
, TRI
)) {
638 << "Not fixing PHI for uniform branch: " << MI
<< '\n');
643 // If a PHI node defines an SGPR and any of its operands are VGPRs,
644 // then we need to move it to the VALU.
646 // Also, if a PHI node defines an SGPR and has all SGPR operands
647 // we must move it to the VALU, because the SGPR operands will
648 // all end up being assigned the same register, which means
649 // there is a potential for a conflict if different threads take
650 // different control flow paths.
658 // sgpr2 = PHI sgpr0, sgpr1
669 // The one exception to this rule is when one of the operands
670 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
671 // instruction. In this case, there we know the program will
672 // never enter the second block (the loop) without entering
673 // the first block (where the condition is computed), so there
674 // is no chance for values to be over-written.
676 SmallSet
<unsigned, 8> Visited
;
677 if (HasVGPROperand
|| !phiHasBreakDef(MI
, MRI
, Visited
)) {
678 LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI
);
679 TII
->moveToVALU(MI
, MDT
);
683 case AMDGPU::REG_SEQUENCE
:
684 if (TRI
->hasVGPRs(TII
->getOpRegClass(MI
, 0)) ||
685 !hasVGPROperands(MI
, TRI
)) {
686 foldVGPRCopyIntoRegSequence(MI
, TRI
, TII
, MRI
);
690 LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI
);
692 TII
->moveToVALU(MI
, MDT
);
694 case AMDGPU::INSERT_SUBREG
: {
695 const TargetRegisterClass
*DstRC
, *Src0RC
, *Src1RC
;
696 DstRC
= MRI
.getRegClass(MI
.getOperand(0).getReg());
697 Src0RC
= MRI
.getRegClass(MI
.getOperand(1).getReg());
698 Src1RC
= MRI
.getRegClass(MI
.getOperand(2).getReg());
699 if (TRI
->isSGPRClass(DstRC
) &&
700 (TRI
->hasVGPRs(Src0RC
) || TRI
->hasVGPRs(Src1RC
))) {
701 LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI
);
702 TII
->moveToVALU(MI
, MDT
);
710 if (MF
.getTarget().getOptLevel() > CodeGenOpt::None
&& EnableM0Merge
)
711 hoistAndMergeSGPRInits(AMDGPU::M0
, MRI
, *MDT
);