1 //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
11 /// will sometimes generate these illegal copies in situations like this:
13 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
16 /// %0 <sgpr> = SCALAR_INST
17 /// %1 <vsrc> = COPY %0 <sgpr>
19 /// BRANCH %cond BB1, BB2
21 /// %2 <vgpr> = VECTOR_INST
22 /// %3 <vsrc> = COPY %2 <vgpr>
24 /// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
25 /// %5 <vgpr> = VECTOR_INST %4 <vsrc>
28 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
29 /// code will look like this:
32 /// %0 <sgpr> = SCALAR_INST
34 /// BRANCH %cond BB1, BB2
36 /// %2 <vgpr> = VECTOR_INST
37 /// %3 <vsrc> = COPY %2 <vgpr>
39 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
40 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
42 /// Now that the result of the PHI instruction is an SGPR, the register
43 /// allocator is now forced to constrain the register class of %3 to
44 /// <sgpr> so we end up with final code like this:
47 /// %0 <sgpr> = SCALAR_INST
49 /// BRANCH %cond BB1, BB2
51 /// %2 <vgpr> = VECTOR_INST
52 /// %3 <sgpr> = COPY %2 <vgpr>
54 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
55 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
57 /// Now this code contains an illegal copy from a VGPR to an SGPR.
59 /// In order to avoid this problem, this pass searches for PHI instructions
60 /// which define a <vsrc> register and constrains its definition class to
61 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
62 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
63 /// will be unable to perform the COPY removal from the above example which
64 /// ultimately led to the creation of an illegal COPY.
65 //===----------------------------------------------------------------------===//
68 #include "GCNSubtarget.h"
69 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
70 #include "llvm/ADT/SetOperations.h"
71 #include "llvm/CodeGen/MachineDominators.h"
72 #include "llvm/InitializePasses.h"
73 #include "llvm/Target/TargetMachine.h"
77 #define DEBUG_TYPE "si-fix-sgpr-copies"
79 static cl::opt
<bool> EnableM0Merge(
80 "amdgpu-enable-merge-m0",
81 cl::desc("Merge and hoist M0 initializations"),
88 // VGPR to SGPR copy being processed
90 // All SALU instructions reachable from this copy in SSA graph
91 SetVector
<MachineInstr
*> SChain
;
92 // Number of SGPR to VGPR copies that are used to put the SALU computation
93 // results back to VALU.
97 // Actual count of v_readfirstlane_b32
98 // which need to be inserted to keep SChain SALU
99 unsigned NumReadfirstlanes
;
100 // Current score state. To speedup selection V2SCopyInfos for processing
101 bool NeedToBeConvertedToVALU
= false;
102 // Unique ID. Used as a key for mapping to keep permanent order.
105 // Count of another VGPR to SGPR copies that contribute to the
106 // current copy SChain
107 unsigned SiblingPenalty
= 0;
108 SetVector
<unsigned> Siblings
;
109 V2SCopyInfo() : Copy(nullptr), ID(0){};
110 V2SCopyInfo(unsigned Id
, MachineInstr
*C
, unsigned Width
)
111 : Copy(C
), NumSVCopies(0), NumReadfirstlanes(Width
/ 32), ID(Id
){};
112 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
114 dbgs() << ID
<< " : " << *Copy
<< "\n\tS:" << SChain
.size()
115 << "\n\tSV:" << NumSVCopies
<< "\n\tSP: " << SiblingPenalty
116 << "\nScore: " << Score
<< "\n";
121 class SIFixSGPRCopies
: public MachineFunctionPass
{
122 MachineDominatorTree
*MDT
;
123 SmallVector
<MachineInstr
*, 4> SCCCopies
;
124 SmallVector
<MachineInstr
*, 4> RegSequences
;
125 SmallVector
<MachineInstr
*, 4> PHINodes
;
126 SmallVector
<MachineInstr
*, 4> S2VCopies
;
127 unsigned NextVGPRToSGPRCopyID
= 0;
128 MapVector
<unsigned, V2SCopyInfo
> V2SCopies
;
129 DenseMap
<MachineInstr
*, SetVector
<unsigned>> SiblingPenalty
;
134 MachineRegisterInfo
*MRI
;
135 const SIRegisterInfo
*TRI
;
136 const SIInstrInfo
*TII
;
138 SIFixSGPRCopies() : MachineFunctionPass(ID
) {}
140 bool runOnMachineFunction(MachineFunction
&MF
) override
;
141 void fixSCCCopies(MachineFunction
&MF
);
142 void prepareRegSequenceAndPHIs(MachineFunction
&MF
);
143 unsigned getNextVGPRToSGPRCopyId() { return ++NextVGPRToSGPRCopyID
; }
144 bool needToBeConvertedToVALU(V2SCopyInfo
*I
);
145 void analyzeVGPRToSGPRCopy(MachineInstr
*MI
);
146 void lowerVGPR2SGPRCopies(MachineFunction
&MF
);
147 // Handles copies which source register is:
148 // 1. Physical register
150 // 3. Defined by the instruction the merely moves the immediate
151 bool lowerSpecialCase(MachineInstr
&MI
, MachineBasicBlock::iterator
&I
);
153 void processPHINode(MachineInstr
&MI
);
155 // Check if MO is an immediate materialized into a VGPR, and if so replace it
156 // with an SGPR immediate. The VGPR immediate is also deleted if it does not
157 // have any other uses.
158 bool tryMoveVGPRConstToSGPR(MachineOperand
&MO
, Register NewDst
,
159 MachineBasicBlock
*BlockToInsertTo
,
160 MachineBasicBlock::iterator PointToInsertTo
);
162 StringRef
getPassName() const override
{ return "SI Fix SGPR copies"; }
164 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
165 AU
.addRequired
<MachineDominatorTreeWrapperPass
>();
166 AU
.addPreserved
<MachineDominatorTreeWrapperPass
>();
167 AU
.setPreservesCFG();
168 MachineFunctionPass::getAnalysisUsage(AU
);
172 } // end anonymous namespace
174 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies
, DEBUG_TYPE
,
175 "SI Fix SGPR copies", false, false)
176 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass
)
177 INITIALIZE_PASS_END(SIFixSGPRCopies
, DEBUG_TYPE
,
178 "SI Fix SGPR copies", false, false)
180 char SIFixSGPRCopies::ID
= 0;
182 char &llvm::SIFixSGPRCopiesID
= SIFixSGPRCopies::ID
;
184 FunctionPass
*llvm::createSIFixSGPRCopiesPass() {
185 return new SIFixSGPRCopies();
188 static std::pair
<const TargetRegisterClass
*, const TargetRegisterClass
*>
189 getCopyRegClasses(const MachineInstr
&Copy
,
190 const SIRegisterInfo
&TRI
,
191 const MachineRegisterInfo
&MRI
) {
192 Register DstReg
= Copy
.getOperand(0).getReg();
193 Register SrcReg
= Copy
.getOperand(1).getReg();
195 const TargetRegisterClass
*SrcRC
= SrcReg
.isVirtual()
196 ? MRI
.getRegClass(SrcReg
)
197 : TRI
.getPhysRegBaseClass(SrcReg
);
199 // We don't really care about the subregister here.
200 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
202 const TargetRegisterClass
*DstRC
= DstReg
.isVirtual()
203 ? MRI
.getRegClass(DstReg
)
204 : TRI
.getPhysRegBaseClass(DstReg
);
206 return std::pair(SrcRC
, DstRC
);
209 static bool isVGPRToSGPRCopy(const TargetRegisterClass
*SrcRC
,
210 const TargetRegisterClass
*DstRC
,
211 const SIRegisterInfo
&TRI
) {
212 return SrcRC
!= &AMDGPU::VReg_1RegClass
&& TRI
.isSGPRClass(DstRC
) &&
213 TRI
.hasVectorRegisters(SrcRC
);
216 static bool isSGPRToVGPRCopy(const TargetRegisterClass
*SrcRC
,
217 const TargetRegisterClass
*DstRC
,
218 const SIRegisterInfo
&TRI
) {
219 return DstRC
!= &AMDGPU::VReg_1RegClass
&& TRI
.isSGPRClass(SrcRC
) &&
220 TRI
.hasVectorRegisters(DstRC
);
223 static bool tryChangeVGPRtoSGPRinCopy(MachineInstr
&MI
,
224 const SIRegisterInfo
*TRI
,
225 const SIInstrInfo
*TII
) {
226 MachineRegisterInfo
&MRI
= MI
.getParent()->getParent()->getRegInfo();
227 auto &Src
= MI
.getOperand(1);
228 Register DstReg
= MI
.getOperand(0).getReg();
229 Register SrcReg
= Src
.getReg();
230 if (!SrcReg
.isVirtual() || !DstReg
.isVirtual())
233 for (const auto &MO
: MRI
.reg_nodbg_operands(DstReg
)) {
234 const auto *UseMI
= MO
.getParent();
237 if (MO
.isDef() || UseMI
->getParent() != MI
.getParent() ||
238 UseMI
->getOpcode() <= TargetOpcode::GENERIC_OP_END
)
241 unsigned OpIdx
= MO
.getOperandNo();
242 if (OpIdx
>= UseMI
->getDesc().getNumOperands() ||
243 !TII
->isOperandLegal(*UseMI
, OpIdx
, &Src
))
246 // Change VGPR to SGPR destination.
247 MRI
.setRegClass(DstReg
, TRI
->getEquivalentSGPRClass(MRI
.getRegClass(DstReg
)));
251 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
254 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
255 // VGPRz = COPY SGPRy
259 // VGPRx = COPY SGPRx
260 // VGPRz = REG_SEQUENCE VGPRx, sub0
262 // This exposes immediate folding opportunities when materializing 64-bit
264 static bool foldVGPRCopyIntoRegSequence(MachineInstr
&MI
,
265 const SIRegisterInfo
*TRI
,
266 const SIInstrInfo
*TII
,
267 MachineRegisterInfo
&MRI
) {
268 assert(MI
.isRegSequence());
270 Register DstReg
= MI
.getOperand(0).getReg();
271 if (!TRI
->isSGPRClass(MRI
.getRegClass(DstReg
)))
274 if (!MRI
.hasOneUse(DstReg
))
277 MachineInstr
&CopyUse
= *MRI
.use_instr_begin(DstReg
);
278 if (!CopyUse
.isCopy())
281 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
282 if (CopyUse
.getOperand(0).getReg().isPhysical())
285 const TargetRegisterClass
*SrcRC
, *DstRC
;
286 std::tie(SrcRC
, DstRC
) = getCopyRegClasses(CopyUse
, *TRI
, MRI
);
288 if (!isSGPRToVGPRCopy(SrcRC
, DstRC
, *TRI
))
291 if (tryChangeVGPRtoSGPRinCopy(CopyUse
, TRI
, TII
))
294 // TODO: Could have multiple extracts?
295 unsigned SubReg
= CopyUse
.getOperand(1).getSubReg();
296 if (SubReg
!= AMDGPU::NoSubRegister
)
299 MRI
.setRegClass(DstReg
, DstRC
);
302 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
303 // VGPRz = COPY SGPRy
306 // VGPRx = COPY SGPRx
307 // VGPRz = REG_SEQUENCE VGPRx, sub0
309 MI
.getOperand(0).setReg(CopyUse
.getOperand(0).getReg());
310 bool IsAGPR
= TRI
->isAGPRClass(DstRC
);
312 for (unsigned I
= 1, N
= MI
.getNumOperands(); I
!= N
; I
+= 2) {
313 const TargetRegisterClass
*SrcRC
=
314 TRI
->getRegClassForOperandReg(MRI
, MI
.getOperand(I
));
315 assert(TRI
->isSGPRClass(SrcRC
) &&
316 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
317 const TargetRegisterClass
*NewSrcRC
= TRI
->getEquivalentVGPRClass(SrcRC
);
319 Register TmpReg
= MRI
.createVirtualRegister(NewSrcRC
);
321 BuildMI(*MI
.getParent(), &MI
, MI
.getDebugLoc(), TII
->get(AMDGPU::COPY
),
323 .add(MI
.getOperand(I
));
326 const TargetRegisterClass
*NewSrcRC
= TRI
->getEquivalentAGPRClass(SrcRC
);
327 Register TmpAReg
= MRI
.createVirtualRegister(NewSrcRC
);
328 unsigned Opc
= NewSrcRC
== &AMDGPU::AGPR_32RegClass
?
329 AMDGPU::V_ACCVGPR_WRITE_B32_e64
: AMDGPU::COPY
;
330 BuildMI(*MI
.getParent(), &MI
, MI
.getDebugLoc(), TII
->get(Opc
),
332 .addReg(TmpReg
, RegState::Kill
);
336 MI
.getOperand(I
).setReg(TmpReg
);
339 CopyUse
.eraseFromParent();
343 static bool isSafeToFoldImmIntoCopy(const MachineInstr
*Copy
,
344 const MachineInstr
*MoveImm
,
345 const SIInstrInfo
*TII
,
348 if (Copy
->getOpcode() != AMDGPU::COPY
)
351 if (!MoveImm
->isMoveImmediate())
354 const MachineOperand
*ImmOp
=
355 TII
->getNamedOperand(*MoveImm
, AMDGPU::OpName::src0
);
359 // FIXME: Handle copies with sub-regs.
360 if (Copy
->getOperand(1).getSubReg())
363 switch (MoveImm
->getOpcode()) {
366 case AMDGPU::V_MOV_B32_e32
:
367 SMovOp
= AMDGPU::S_MOV_B32
;
369 case AMDGPU::V_MOV_B64_PSEUDO
:
370 SMovOp
= AMDGPU::S_MOV_B64_IMM_PSEUDO
;
373 Imm
= ImmOp
->getImm();
377 template <class UnaryPredicate
>
378 bool searchPredecessors(const MachineBasicBlock
*MBB
,
379 const MachineBasicBlock
*CutOff
,
380 UnaryPredicate Predicate
) {
384 DenseSet
<const MachineBasicBlock
*> Visited
;
385 SmallVector
<MachineBasicBlock
*, 4> Worklist(MBB
->predecessors());
387 while (!Worklist
.empty()) {
388 MachineBasicBlock
*MBB
= Worklist
.pop_back_val();
390 if (!Visited
.insert(MBB
).second
)
397 Worklist
.append(MBB
->pred_begin(), MBB
->pred_end());
403 // Checks if there is potential path From instruction To instruction.
404 // If CutOff is specified and it sits in between of that path we ignore
405 // a higher portion of the path and report it is not reachable.
406 static bool isReachable(const MachineInstr
*From
,
407 const MachineInstr
*To
,
408 const MachineBasicBlock
*CutOff
,
409 MachineDominatorTree
&MDT
) {
410 if (MDT
.dominates(From
, To
))
413 const MachineBasicBlock
*MBBFrom
= From
->getParent();
414 const MachineBasicBlock
*MBBTo
= To
->getParent();
416 // Do predecessor search.
417 // We should almost never get here since we do not usually produce M0 stores
419 return searchPredecessors(MBBTo
, CutOff
, [MBBFrom
]
420 (const MachineBasicBlock
*MBB
) { return MBB
== MBBFrom
; });
423 // Return the first non-prologue instruction in the block.
424 static MachineBasicBlock::iterator
425 getFirstNonPrologue(MachineBasicBlock
*MBB
, const TargetInstrInfo
*TII
) {
426 MachineBasicBlock::iterator I
= MBB
->getFirstNonPHI();
427 while (I
!= MBB
->end() && TII
->isBasicBlockPrologue(*I
))
433 // Hoist and merge identical SGPR initializations into a common predecessor.
434 // This is intended to combine M0 initializations, but can work with any
435 // SGPR. A VGPR cannot be processed since we cannot guarantee vector
437 static bool hoistAndMergeSGPRInits(unsigned Reg
,
438 const MachineRegisterInfo
&MRI
,
439 const TargetRegisterInfo
*TRI
,
440 MachineDominatorTree
&MDT
,
441 const TargetInstrInfo
*TII
) {
442 // List of inits by immediate value.
443 using InitListMap
= std::map
<unsigned, std::list
<MachineInstr
*>>;
445 // List of clobbering instructions.
446 SmallVector
<MachineInstr
*, 8> Clobbers
;
447 // List of instructions marked for deletion.
448 SmallSet
<MachineInstr
*, 8> MergedInstrs
;
450 bool Changed
= false;
452 for (auto &MI
: MRI
.def_instructions(Reg
)) {
453 MachineOperand
*Imm
= nullptr;
454 for (auto &MO
: MI
.operands()) {
455 if ((MO
.isReg() && ((MO
.isDef() && MO
.getReg() != Reg
) || !MO
.isDef())) ||
456 (!MO
.isImm() && !MO
.isReg()) || (MO
.isImm() && Imm
)) {
464 Inits
[Imm
->getImm()].push_front(&MI
);
466 Clobbers
.push_back(&MI
);
469 for (auto &Init
: Inits
) {
470 auto &Defs
= Init
.second
;
472 for (auto I1
= Defs
.begin(), E
= Defs
.end(); I1
!= E
; ) {
473 MachineInstr
*MI1
= *I1
;
475 for (auto I2
= std::next(I1
); I2
!= E
; ) {
476 MachineInstr
*MI2
= *I2
;
478 // Check any possible interference
479 auto interferes
= [&](MachineBasicBlock::iterator From
,
480 MachineBasicBlock::iterator To
) -> bool {
482 assert(MDT
.dominates(&*To
, &*From
));
484 auto interferes
= [&MDT
, From
, To
](MachineInstr
* &Clobber
) -> bool {
485 const MachineBasicBlock
*MBBFrom
= From
->getParent();
486 const MachineBasicBlock
*MBBTo
= To
->getParent();
487 bool MayClobberFrom
= isReachable(Clobber
, &*From
, MBBTo
, MDT
);
488 bool MayClobberTo
= isReachable(Clobber
, &*To
, MBBTo
, MDT
);
489 if (!MayClobberFrom
&& !MayClobberTo
)
491 if ((MayClobberFrom
&& !MayClobberTo
) ||
492 (!MayClobberFrom
&& MayClobberTo
))
494 // Both can clobber, this is not an interference only if both are
495 // dominated by Clobber and belong to the same block or if Clobber
496 // properly dominates To, given that To >> From, so it dominates
497 // both and located in a common dominator.
498 return !((MBBFrom
== MBBTo
&&
499 MDT
.dominates(Clobber
, &*From
) &&
500 MDT
.dominates(Clobber
, &*To
)) ||
501 MDT
.properlyDominates(Clobber
->getParent(), MBBTo
));
504 return (llvm::any_of(Clobbers
, interferes
)) ||
505 (llvm::any_of(Inits
, [&](InitListMap::value_type
&C
) {
506 return C
.first
!= Init
.first
&&
507 llvm::any_of(C
.second
, interferes
);
511 if (MDT
.dominates(MI1
, MI2
)) {
512 if (!interferes(MI2
, MI1
)) {
515 << printMBBReference(*MI2
->getParent()) << " " << *MI2
);
516 MergedInstrs
.insert(MI2
);
521 } else if (MDT
.dominates(MI2
, MI1
)) {
522 if (!interferes(MI1
, MI2
)) {
525 << printMBBReference(*MI1
->getParent()) << " " << *MI1
);
526 MergedInstrs
.insert(MI1
);
532 auto *MBB
= MDT
.findNearestCommonDominator(MI1
->getParent(),
539 MachineBasicBlock::iterator I
= getFirstNonPrologue(MBB
, TII
);
540 if (!interferes(MI1
, I
) && !interferes(MI2
, I
)) {
543 << printMBBReference(*MI1
->getParent()) << " " << *MI1
544 << "and moving from "
545 << printMBBReference(*MI2
->getParent()) << " to "
546 << printMBBReference(*I
->getParent()) << " " << *MI2
);
547 I
->getParent()->splice(I
, MI2
->getParent(), MI2
);
548 MergedInstrs
.insert(MI1
);
560 // Remove initializations that were merged into another.
561 for (auto &Init
: Inits
) {
562 auto &Defs
= Init
.second
;
563 auto I
= Defs
.begin();
564 while (I
!= Defs
.end()) {
565 if (MergedInstrs
.count(*I
)) {
566 (*I
)->eraseFromParent();
573 // Try to schedule SGPR initializations as early as possible in the MBB.
574 for (auto &Init
: Inits
) {
575 auto &Defs
= Init
.second
;
576 for (auto *MI
: Defs
) {
577 auto MBB
= MI
->getParent();
578 MachineInstr
&BoundaryMI
= *getFirstNonPrologue(MBB
, TII
);
579 MachineBasicBlock::reverse_iterator
B(BoundaryMI
);
580 // Check if B should actually be a boundary. If not set the previous
581 // instruction as the boundary instead.
582 if (!TII
->isBasicBlockPrologue(*B
))
585 auto R
= std::next(MI
->getReverseIterator());
586 const unsigned Threshold
= 50;
587 // Search until B or Threshold for a place to insert the initialization.
588 for (unsigned I
= 0; R
!= B
&& I
< Threshold
; ++R
, ++I
)
589 if (R
->readsRegister(Reg
, TRI
) || R
->definesRegister(Reg
, TRI
) ||
590 TII
->isSchedulingBoundary(*R
, MBB
, *MBB
->getParent()))
593 // Move to directly after R.
595 MBB
->splice(*R
, MBB
, MI
);
600 MRI
.clearKillFlags(Reg
);
605 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction
&MF
) {
606 // Only need to run this in SelectionDAG path.
607 if (MF
.getProperties().hasProperty(
608 MachineFunctionProperties::Property::Selected
))
611 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
612 MRI
= &MF
.getRegInfo();
613 TRI
= ST
.getRegisterInfo();
614 TII
= ST
.getInstrInfo();
615 MDT
= &getAnalysis
<MachineDominatorTreeWrapperPass
>().getDomTree();
617 for (MachineBasicBlock
&MBB
: MF
) {
618 for (MachineBasicBlock::iterator I
= MBB
.begin(), E
= MBB
.end(); I
!= E
;
620 MachineInstr
&MI
= *I
;
622 switch (MI
.getOpcode()) {
627 case AMDGPU::STRICT_WQM
:
628 case AMDGPU::SOFT_WQM
:
629 case AMDGPU::STRICT_WWM
: {
630 const TargetRegisterClass
*SrcRC
, *DstRC
;
631 std::tie(SrcRC
, DstRC
) = getCopyRegClasses(MI
, *TRI
, *MRI
);
633 if (isSGPRToVGPRCopy(SrcRC
, DstRC
, *TRI
)) {
634 // Since VGPR to SGPR copies affect VGPR to SGPR copy
635 // score and, hence the lowering decision, let's try to get rid of
636 // them as early as possible
637 if (tryChangeVGPRtoSGPRinCopy(MI
, TRI
, TII
))
640 // Collect those not changed to try them after VGPR to SGPR copies
641 // lowering as there will be more opportunities.
642 S2VCopies
.push_back(&MI
);
644 if (!isVGPRToSGPRCopy(SrcRC
, DstRC
, *TRI
))
646 if (lowerSpecialCase(MI
, I
))
649 analyzeVGPRToSGPRCopy(&MI
);
653 case AMDGPU::INSERT_SUBREG
:
655 case AMDGPU::REG_SEQUENCE
: {
656 if (TRI
->isSGPRClass(TII
->getOpRegClass(MI
, 0))) {
657 for (MachineOperand
&MO
: MI
.operands()) {
658 if (!MO
.isReg() || !MO
.getReg().isVirtual())
660 const TargetRegisterClass
*SrcRC
= MRI
->getRegClass(MO
.getReg());
661 if (TRI
->hasVectorRegisters(SrcRC
)) {
662 const TargetRegisterClass
*DestRC
=
663 TRI
->getEquivalentSGPRClass(SrcRC
);
664 Register NewDst
= MRI
->createVirtualRegister(DestRC
);
665 MachineBasicBlock
*BlockToInsertCopy
=
666 MI
.isPHI() ? MI
.getOperand(MO
.getOperandNo() + 1).getMBB()
668 MachineBasicBlock::iterator PointToInsertCopy
=
669 MI
.isPHI() ? BlockToInsertCopy
->getFirstInstrTerminator() : I
;
671 if (!tryMoveVGPRConstToSGPR(MO
, NewDst
, BlockToInsertCopy
,
672 PointToInsertCopy
)) {
673 MachineInstr
*NewCopy
=
674 BuildMI(*BlockToInsertCopy
, PointToInsertCopy
,
675 PointToInsertCopy
->getDebugLoc(),
676 TII
->get(AMDGPU::COPY
), NewDst
)
677 .addReg(MO
.getReg());
679 analyzeVGPRToSGPRCopy(NewCopy
);
686 PHINodes
.push_back(&MI
);
687 else if (MI
.isRegSequence())
688 RegSequences
.push_back(&MI
);
692 case AMDGPU::V_WRITELANE_B32
: {
693 // Some architectures allow more than one constant bus access without
695 if (ST
.getConstantBusLimit(MI
.getOpcode()) != 1)
698 // Writelane is special in that it can use SGPR and M0 (which would
699 // normally count as using the constant bus twice - but in this case it
700 // is allowed since the lane selector doesn't count as a use of the
701 // constant bus). However, it is still required to abide by the 1 SGPR
702 // rule. Apply a fix here as we might have multiple SGPRs after
703 // legalizing VGPRs to SGPRs
705 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::src0
);
707 AMDGPU::getNamedOperandIdx(MI
.getOpcode(), AMDGPU::OpName::src1
);
708 MachineOperand
&Src0
= MI
.getOperand(Src0Idx
);
709 MachineOperand
&Src1
= MI
.getOperand(Src1Idx
);
711 // Check to see if the instruction violates the 1 SGPR rule
712 if ((Src0
.isReg() && TRI
->isSGPRReg(*MRI
, Src0
.getReg()) &&
713 Src0
.getReg() != AMDGPU::M0
) &&
714 (Src1
.isReg() && TRI
->isSGPRReg(*MRI
, Src1
.getReg()) &&
715 Src1
.getReg() != AMDGPU::M0
)) {
717 // Check for trivially easy constant prop into one of the operands
718 // If this is the case then perform the operation now to resolve SGPR
719 // issue. If we don't do that here we will always insert a mov to m0
720 // that can't be resolved in later operand folding pass
721 bool Resolved
= false;
722 for (MachineOperand
*MO
: {&Src0
, &Src1
}) {
723 if (MO
->getReg().isVirtual()) {
724 MachineInstr
*DefMI
= MRI
->getVRegDef(MO
->getReg());
725 if (DefMI
&& TII
->isFoldableCopy(*DefMI
)) {
726 const MachineOperand
&Def
= DefMI
->getOperand(0);
728 MO
->getReg() == Def
.getReg() &&
729 MO
->getSubReg() == Def
.getSubReg()) {
730 const MachineOperand
&Copied
= DefMI
->getOperand(1);
731 if (Copied
.isImm() &&
732 TII
->isInlineConstant(APInt(64, Copied
.getImm(), true))) {
733 MO
->ChangeToImmediate(Copied
.getImm());
743 // Haven't managed to resolve by replacing an SGPR with an immediate
744 // Move src1 to be in M0
745 BuildMI(*MI
.getParent(), MI
, MI
.getDebugLoc(),
746 TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
748 Src1
.ChangeToRegister(AMDGPU::M0
, false);
757 lowerVGPR2SGPRCopies(MF
);
760 for (auto MI
: S2VCopies
) {
761 // Check if it is still valid
763 const TargetRegisterClass
*SrcRC
, *DstRC
;
764 std::tie(SrcRC
, DstRC
) = getCopyRegClasses(*MI
, *TRI
, *MRI
);
765 if (isSGPRToVGPRCopy(SrcRC
, DstRC
, *TRI
))
766 tryChangeVGPRtoSGPRinCopy(*MI
, TRI
, TII
);
769 for (auto MI
: RegSequences
) {
770 // Check if it is still valid
771 if (MI
->isRegSequence())
772 foldVGPRCopyIntoRegSequence(*MI
, TRI
, TII
, *MRI
);
774 for (auto MI
: PHINodes
) {
777 if (MF
.getTarget().getOptLevel() > CodeGenOptLevel::None
&& EnableM0Merge
)
778 hoistAndMergeSGPRInits(AMDGPU::M0
, *MRI
, TRI
, *MDT
, TII
);
780 SiblingPenalty
.clear();
783 RegSequences
.clear();
790 void SIFixSGPRCopies::processPHINode(MachineInstr
&MI
) {
791 bool AllAGPRUses
= true;
792 SetVector
<const MachineInstr
*> worklist
;
793 SmallSet
<const MachineInstr
*, 4> Visited
;
794 SetVector
<MachineInstr
*> PHIOperands
;
795 worklist
.insert(&MI
);
797 // HACK to make MIR tests with no uses happy
798 bool HasUses
= false;
799 while (!worklist
.empty()) {
800 const MachineInstr
*Instr
= worklist
.pop_back_val();
801 Register Reg
= Instr
->getOperand(0).getReg();
802 for (const auto &Use
: MRI
->use_operands(Reg
)) {
804 const MachineInstr
*UseMI
= Use
.getParent();
805 AllAGPRUses
&= (UseMI
->isCopy() &&
806 TRI
->isAGPR(*MRI
, UseMI
->getOperand(0).getReg())) ||
807 TRI
->isAGPR(*MRI
, Use
.getReg());
808 if (UseMI
->isCopy() || UseMI
->isRegSequence()) {
809 if (Visited
.insert(UseMI
).second
)
810 worklist
.insert(UseMI
);
817 Register PHIRes
= MI
.getOperand(0).getReg();
818 const TargetRegisterClass
*RC0
= MRI
->getRegClass(PHIRes
);
819 if (HasUses
&& AllAGPRUses
&& !TRI
->isAGPRClass(RC0
)) {
820 LLVM_DEBUG(dbgs() << "Moving PHI to AGPR: " << MI
);
821 MRI
->setRegClass(PHIRes
, TRI
->getEquivalentAGPRClass(RC0
));
822 for (unsigned I
= 1, N
= MI
.getNumOperands(); I
!= N
; I
+= 2) {
823 MachineInstr
*DefMI
= MRI
->getVRegDef(MI
.getOperand(I
).getReg());
824 if (DefMI
&& DefMI
->isPHI())
825 PHIOperands
.insert(DefMI
);
829 if (TRI
->isVectorRegister(*MRI
, PHIRes
) ||
830 RC0
== &AMDGPU::VReg_1RegClass
) {
831 LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI
);
832 TII
->legalizeOperands(MI
, MDT
);
835 // Propagate register class back to PHI operands which are PHI themselves.
836 while (!PHIOperands
.empty()) {
837 processPHINode(*PHIOperands
.pop_back_val());
841 bool SIFixSGPRCopies::tryMoveVGPRConstToSGPR(
842 MachineOperand
&MaybeVGPRConstMO
, Register DstReg
,
843 MachineBasicBlock
*BlockToInsertTo
,
844 MachineBasicBlock::iterator PointToInsertTo
) {
846 MachineInstr
*DefMI
= MRI
->getVRegDef(MaybeVGPRConstMO
.getReg());
847 if (!DefMI
|| !DefMI
->isMoveImmediate())
850 MachineOperand
*SrcConst
= TII
->getNamedOperand(*DefMI
, AMDGPU::OpName::src0
);
851 if (SrcConst
->isReg())
854 const TargetRegisterClass
*SrcRC
=
855 MRI
->getRegClass(MaybeVGPRConstMO
.getReg());
856 unsigned MoveSize
= TRI
->getRegSizeInBits(*SrcRC
);
857 unsigned MoveOp
= MoveSize
== 64 ? AMDGPU::S_MOV_B64
: AMDGPU::S_MOV_B32
;
858 BuildMI(*BlockToInsertTo
, PointToInsertTo
, PointToInsertTo
->getDebugLoc(),
859 TII
->get(MoveOp
), DstReg
)
861 if (MRI
->hasOneUse(MaybeVGPRConstMO
.getReg()))
862 DefMI
->eraseFromParent();
863 MaybeVGPRConstMO
.setReg(DstReg
);
867 bool SIFixSGPRCopies::lowerSpecialCase(MachineInstr
&MI
,
868 MachineBasicBlock::iterator
&I
) {
869 Register DstReg
= MI
.getOperand(0).getReg();
870 Register SrcReg
= MI
.getOperand(1).getReg();
871 if (!DstReg
.isVirtual()) {
872 // If the destination register is a physical register there isn't
873 // really much we can do to fix this.
874 // Some special instructions use M0 as an input. Some even only use
875 // the first lane. Insert a readfirstlane and hope for the best.
876 if (DstReg
== AMDGPU::M0
&&
877 TRI
->hasVectorRegisters(MRI
->getRegClass(SrcReg
))) {
879 MRI
->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
880 BuildMI(*MI
.getParent(), MI
, MI
.getDebugLoc(),
881 TII
->get(AMDGPU::V_READFIRSTLANE_B32
), TmpReg
)
882 .add(MI
.getOperand(1));
883 MI
.getOperand(1).setReg(TmpReg
);
884 } else if (tryMoveVGPRConstToSGPR(MI
.getOperand(1), DstReg
, MI
.getParent(),
887 MI
.eraseFromParent();
891 if (!SrcReg
.isVirtual() || TRI
->isAGPR(*MRI
, SrcReg
)) {
892 SIInstrWorklist worklist
;
893 worklist
.insert(&MI
);
894 TII
->moveToVALU(worklist
, MDT
);
900 // If we are just copying an immediate, we can replace the copy with
902 if (isSafeToFoldImmIntoCopy(&MI
, MRI
->getVRegDef(SrcReg
), TII
, SMovOp
, Imm
)) {
903 MI
.getOperand(1).ChangeToImmediate(Imm
);
904 MI
.addImplicitDefUseOperands(*MI
.getParent()->getParent());
905 MI
.setDesc(TII
->get(SMovOp
));
911 void SIFixSGPRCopies::analyzeVGPRToSGPRCopy(MachineInstr
* MI
) {
912 Register DstReg
= MI
->getOperand(0).getReg();
913 const TargetRegisterClass
*DstRC
= MRI
->getRegClass(DstReg
);
915 V2SCopyInfo
Info(getNextVGPRToSGPRCopyId(), MI
,
916 TRI
->getRegSizeInBits(*DstRC
));
917 SmallVector
<MachineInstr
*, 8> AnalysisWorklist
;
918 // Needed because the SSA is not a tree but a graph and may have
919 // forks and joins. We should not then go same way twice.
920 DenseSet
<MachineInstr
*> Visited
;
921 AnalysisWorklist
.push_back(Info
.Copy
);
922 while (!AnalysisWorklist
.empty()) {
924 MachineInstr
*Inst
= AnalysisWorklist
.pop_back_val();
926 if (!Visited
.insert(Inst
).second
)
929 // Copies and REG_SEQUENCE do not contribute to the final assembly
930 // So, skip them but take care of the SGPR to VGPR copies bookkeeping.
931 if (Inst
->isCopy() || Inst
->isRegSequence()) {
932 if (TRI
->isVGPR(*MRI
, Inst
->getOperand(0).getReg())) {
933 if (!Inst
->isCopy() ||
934 !tryChangeVGPRtoSGPRinCopy(*Inst
, TRI
, TII
)) {
941 SiblingPenalty
[Inst
].insert(Info
.ID
);
943 SmallVector
<MachineInstr
*, 4> Users
;
944 if ((TII
->isSALU(*Inst
) && Inst
->isCompare()) ||
945 (Inst
->isCopy() && Inst
->getOperand(0).getReg() == AMDGPU::SCC
)) {
946 auto I
= Inst
->getIterator();
947 auto E
= Inst
->getParent()->end();
949 !I
->findRegisterDefOperand(AMDGPU::SCC
, /*TRI=*/nullptr)) {
950 if (I
->readsRegister(AMDGPU::SCC
, /*TRI=*/nullptr))
951 Users
.push_back(&*I
);
953 } else if (Inst
->getNumExplicitDefs() != 0) {
954 Register Reg
= Inst
->getOperand(0).getReg();
955 if (TRI
->isSGPRReg(*MRI
, Reg
) && !TII
->isVALU(*Inst
))
956 for (auto &U
: MRI
->use_instructions(Reg
))
959 for (auto U
: Users
) {
961 Info
.SChain
.insert(U
);
962 AnalysisWorklist
.push_back(U
);
965 V2SCopies
[Info
.ID
] = Info
;
968 // The main function that computes the VGPR to SGPR copy score
969 // and determines copy further lowering way: v_readfirstlane_b32 or moveToVALU
970 bool SIFixSGPRCopies::needToBeConvertedToVALU(V2SCopyInfo
*Info
) {
971 if (Info
->SChain
.empty()) {
975 Info
->Siblings
= SiblingPenalty
[*llvm::max_element(
976 Info
->SChain
, [&](MachineInstr
*A
, MachineInstr
*B
) -> bool {
977 return SiblingPenalty
[A
].size() < SiblingPenalty
[B
].size();
979 Info
->Siblings
.remove_if([&](unsigned ID
) { return ID
== Info
->ID
; });
980 // The loop below computes the number of another VGPR to SGPR V2SCopies
981 // which contribute to the current copy SALU chain. We assume that all the
982 // V2SCopies with the same source virtual register will be squashed to one
983 // by regalloc. Also we take care of the V2SCopies of the differnt subregs
984 // of the same register.
985 SmallSet
<std::pair
<Register
, unsigned>, 4> SrcRegs
;
986 for (auto J
: Info
->Siblings
) {
987 auto InfoIt
= V2SCopies
.find(J
);
988 if (InfoIt
!= V2SCopies
.end()) {
989 MachineInstr
*SiblingCopy
= InfoIt
->second
.Copy
;
990 if (SiblingCopy
->isImplicitDef())
991 // the COPY has already been MoveToVALUed
994 SrcRegs
.insert(std::pair(SiblingCopy
->getOperand(1).getReg(),
995 SiblingCopy
->getOperand(1).getSubReg()));
998 Info
->SiblingPenalty
= SrcRegs
.size();
1001 Info
->NumSVCopies
+ Info
->SiblingPenalty
+ Info
->NumReadfirstlanes
;
1002 unsigned Profit
= Info
->SChain
.size();
1003 Info
->Score
= Penalty
> Profit
? 0 : Profit
- Penalty
;
1004 Info
->NeedToBeConvertedToVALU
= Info
->Score
< 3;
1005 return Info
->NeedToBeConvertedToVALU
;
1008 void SIFixSGPRCopies::lowerVGPR2SGPRCopies(MachineFunction
&MF
) {
1010 SmallVector
<unsigned, 8> LoweringWorklist
;
1011 for (auto &C
: V2SCopies
) {
1012 if (needToBeConvertedToVALU(&C
.second
))
1013 LoweringWorklist
.push_back(C
.second
.ID
);
1016 // Store all the V2S copy instructions that need to be moved to VALU
1017 // in the Copies worklist.
1018 SIInstrWorklist Copies
;
1020 while (!LoweringWorklist
.empty()) {
1021 unsigned CurID
= LoweringWorklist
.pop_back_val();
1022 auto CurInfoIt
= V2SCopies
.find(CurID
);
1023 if (CurInfoIt
!= V2SCopies
.end()) {
1024 V2SCopyInfo C
= CurInfoIt
->second
;
1025 LLVM_DEBUG(dbgs() << "Processing ...\n"; C
.dump());
1026 for (auto S
: C
.Siblings
) {
1027 auto SibInfoIt
= V2SCopies
.find(S
);
1028 if (SibInfoIt
!= V2SCopies
.end()) {
1029 V2SCopyInfo
&SI
= SibInfoIt
->second
;
1030 LLVM_DEBUG(dbgs() << "Sibling:\n"; SI
.dump());
1031 if (!SI
.NeedToBeConvertedToVALU
) {
1032 SI
.SChain
.set_subtract(C
.SChain
);
1033 if (needToBeConvertedToVALU(&SI
))
1034 LoweringWorklist
.push_back(SI
.ID
);
1036 SI
.Siblings
.remove_if([&](unsigned ID
) { return ID
== C
.ID
; });
1039 LLVM_DEBUG(dbgs() << "V2S copy " << *C
.Copy
1040 << " is being turned to VALU\n");
1041 // TODO: MapVector::erase is inefficient. Do bulk removal with remove_if
1043 V2SCopies
.erase(C
.ID
);
1044 Copies
.insert(C
.Copy
);
1048 TII
->moveToVALU(Copies
, MDT
);
1051 // Now do actual lowering
1052 for (auto C
: V2SCopies
) {
1053 MachineInstr
*MI
= C
.second
.Copy
;
1054 MachineBasicBlock
*MBB
= MI
->getParent();
1055 // We decide to turn V2S copy to v_readfirstlane_b32
1056 // remove it from the V2SCopies and remove it from all its siblings
1057 LLVM_DEBUG(dbgs() << "V2S copy " << *MI
1058 << " is being turned to v_readfirstlane_b32"
1059 << " Score: " << C
.second
.Score
<< "\n");
1060 Register DstReg
= MI
->getOperand(0).getReg();
1061 Register SrcReg
= MI
->getOperand(1).getReg();
1062 unsigned SubReg
= MI
->getOperand(1).getSubReg();
1063 const TargetRegisterClass
*SrcRC
=
1064 TRI
->getRegClassForOperandReg(*MRI
, MI
->getOperand(1));
1065 size_t SrcSize
= TRI
->getRegSizeInBits(*SrcRC
);
1066 if (SrcSize
== 16) {
1067 // HACK to handle possible 16bit VGPR source
1068 auto MIB
= BuildMI(*MBB
, MI
, MI
->getDebugLoc(),
1069 TII
->get(AMDGPU::V_READFIRSTLANE_B32
), DstReg
);
1070 MIB
.addReg(SrcReg
, 0, AMDGPU::NoSubRegister
);
1071 } else if (SrcSize
== 32) {
1072 auto MIB
= BuildMI(*MBB
, MI
, MI
->getDebugLoc(),
1073 TII
->get(AMDGPU::V_READFIRSTLANE_B32
), DstReg
);
1074 MIB
.addReg(SrcReg
, 0, SubReg
);
1076 auto Result
= BuildMI(*MBB
, MI
, MI
->getDebugLoc(),
1077 TII
->get(AMDGPU::REG_SEQUENCE
), DstReg
);
1078 int N
= TRI
->getRegSizeInBits(*SrcRC
) / 32;
1079 for (int i
= 0; i
< N
; i
++) {
1080 Register PartialSrc
= TII
->buildExtractSubReg(
1081 Result
, *MRI
, MI
->getOperand(1), SrcRC
,
1082 TRI
->getSubRegFromChannel(i
), &AMDGPU::VGPR_32RegClass
);
1083 Register PartialDst
=
1084 MRI
->createVirtualRegister(&AMDGPU::SReg_32RegClass
);
1085 BuildMI(*MBB
, *Result
, Result
->getDebugLoc(),
1086 TII
->get(AMDGPU::V_READFIRSTLANE_B32
), PartialDst
)
1087 .addReg(PartialSrc
);
1088 Result
.addReg(PartialDst
).addImm(TRI
->getSubRegFromChannel(i
));
1091 MI
->eraseFromParent();
1095 void SIFixSGPRCopies::fixSCCCopies(MachineFunction
&MF
) {
1096 bool IsWave32
= MF
.getSubtarget
<GCNSubtarget
>().isWave32();
1097 for (MachineBasicBlock
&MBB
: MF
) {
1098 for (MachineBasicBlock::iterator I
= MBB
.begin(), E
= MBB
.end(); I
!= E
;
1100 MachineInstr
&MI
= *I
;
1101 // May already have been lowered.
1104 Register SrcReg
= MI
.getOperand(1).getReg();
1105 Register DstReg
= MI
.getOperand(0).getReg();
1106 if (SrcReg
== AMDGPU::SCC
) {
1107 Register SCCCopy
= MRI
->createVirtualRegister(
1108 TRI
->getRegClass(AMDGPU::SReg_1_XEXECRegClassID
));
1109 I
= BuildMI(*MI
.getParent(), std::next(MachineBasicBlock::iterator(MI
)),
1111 TII
->get(IsWave32
? AMDGPU::S_CSELECT_B32
1112 : AMDGPU::S_CSELECT_B64
),
1116 I
= BuildMI(*MI
.getParent(), std::next(I
), I
->getDebugLoc(),
1117 TII
->get(AMDGPU::COPY
), DstReg
)
1119 MI
.eraseFromParent();
1122 if (DstReg
== AMDGPU::SCC
) {
1123 unsigned Opcode
= IsWave32
? AMDGPU::S_AND_B32
: AMDGPU::S_AND_B64
;
1124 Register Exec
= IsWave32
? AMDGPU::EXEC_LO
: AMDGPU::EXEC
;
1125 Register Tmp
= MRI
->createVirtualRegister(TRI
->getBoolRC());
1126 I
= BuildMI(*MI
.getParent(), std::next(MachineBasicBlock::iterator(MI
)),
1127 MI
.getDebugLoc(), TII
->get(Opcode
))
1128 .addReg(Tmp
, getDefRegState(true))
1131 MI
.eraseFromParent();