[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / AMDGPU / SIFixSGPRCopies.cpp
blobd5c56bf2a32101d3f990cbb0a22e2db1d688df6e
1 //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
11 /// will sometimes generate these illegal copies in situations like this:
12 ///
13 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
14 ///
15 /// BB0:
16 /// %0 <sgpr> = SCALAR_INST
17 /// %1 <vsrc> = COPY %0 <sgpr>
18 /// ...
19 /// BRANCH %cond BB1, BB2
20 /// BB1:
21 /// %2 <vgpr> = VECTOR_INST
22 /// %3 <vsrc> = COPY %2 <vgpr>
23 /// BB2:
24 /// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
25 /// %5 <vgpr> = VECTOR_INST %4 <vsrc>
26 ///
27 ///
28 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
29 /// code will look like this:
30 ///
31 /// BB0:
32 /// %0 <sgpr> = SCALAR_INST
33 /// ...
34 /// BRANCH %cond BB1, BB2
35 /// BB1:
36 /// %2 <vgpr> = VECTOR_INST
37 /// %3 <vsrc> = COPY %2 <vgpr>
38 /// BB2:
39 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
40 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
41 ///
42 /// Now that the result of the PHI instruction is an SGPR, the register
43 /// allocator is now forced to constrain the register class of %3 to
44 /// <sgpr> so we end up with final code like this:
45 ///
46 /// BB0:
47 /// %0 <sgpr> = SCALAR_INST
48 /// ...
49 /// BRANCH %cond BB1, BB2
50 /// BB1:
51 /// %2 <vgpr> = VECTOR_INST
52 /// %3 <sgpr> = COPY %2 <vgpr>
53 /// BB2:
54 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
55 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
56 ///
57 /// Now this code contains an illegal copy from a VGPR to an SGPR.
58 ///
59 /// In order to avoid this problem, this pass searches for PHI instructions
60 /// which define a <vsrc> register and constrains its definition class to
61 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
62 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
63 /// will be unable to perform the COPY removal from the above example which
64 /// ultimately led to the creation of an illegal COPY.
65 //===----------------------------------------------------------------------===//
67 #include "AMDGPU.h"
68 #include "GCNSubtarget.h"
69 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
70 #include "llvm/CodeGen/MachineDominators.h"
71 #include "llvm/InitializePasses.h"
72 #include "llvm/Target/TargetMachine.h"
74 using namespace llvm;
76 #define DEBUG_TYPE "si-fix-sgpr-copies"
78 static cl::opt<bool> EnableM0Merge(
79 "amdgpu-enable-merge-m0",
80 cl::desc("Merge and hoist M0 initializations"),
81 cl::init(true));
83 namespace {
85 class SIFixSGPRCopies : public MachineFunctionPass {
86 MachineDominatorTree *MDT;
88 public:
89 static char ID;
91 MachineRegisterInfo *MRI;
92 const SIRegisterInfo *TRI;
93 const SIInstrInfo *TII;
95 SIFixSGPRCopies() : MachineFunctionPass(ID) {}
97 bool runOnMachineFunction(MachineFunction &MF) override;
99 MachineBasicBlock *processPHINode(MachineInstr &MI);
101 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
103 void getAnalysisUsage(AnalysisUsage &AU) const override {
104 AU.addRequired<MachineDominatorTree>();
105 AU.addPreserved<MachineDominatorTree>();
106 AU.setPreservesCFG();
107 MachineFunctionPass::getAnalysisUsage(AU);
111 } // end anonymous namespace
113 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
114 "SI Fix SGPR copies", false, false)
115 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
116 INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
117 "SI Fix SGPR copies", false, false)
119 char SIFixSGPRCopies::ID = 0;
121 char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
123 FunctionPass *llvm::createSIFixSGPRCopiesPass() {
124 return new SIFixSGPRCopies();
127 static bool hasVectorOperands(const MachineInstr &MI,
128 const SIRegisterInfo *TRI) {
129 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
130 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
131 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
132 continue;
134 if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
135 return true;
137 return false;
140 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
141 getCopyRegClasses(const MachineInstr &Copy,
142 const SIRegisterInfo &TRI,
143 const MachineRegisterInfo &MRI) {
144 Register DstReg = Copy.getOperand(0).getReg();
145 Register SrcReg = Copy.getOperand(1).getReg();
147 const TargetRegisterClass *SrcRC = SrcReg.isVirtual()
148 ? MRI.getRegClass(SrcReg)
149 : TRI.getPhysRegClass(SrcReg);
151 // We don't really care about the subregister here.
152 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
154 const TargetRegisterClass *DstRC = DstReg.isVirtual()
155 ? MRI.getRegClass(DstReg)
156 : TRI.getPhysRegClass(DstReg);
158 return std::make_pair(SrcRC, DstRC);
161 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
162 const TargetRegisterClass *DstRC,
163 const SIRegisterInfo &TRI) {
164 return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) &&
165 TRI.hasVectorRegisters(SrcRC);
168 static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
169 const TargetRegisterClass *DstRC,
170 const SIRegisterInfo &TRI) {
171 return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) &&
172 TRI.hasVectorRegisters(DstRC);
175 static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
176 const SIRegisterInfo *TRI,
177 const SIInstrInfo *TII) {
178 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
179 auto &Src = MI.getOperand(1);
180 Register DstReg = MI.getOperand(0).getReg();
181 Register SrcReg = Src.getReg();
182 if (!SrcReg.isVirtual() || !DstReg.isVirtual())
183 return false;
185 for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
186 const auto *UseMI = MO.getParent();
187 if (UseMI == &MI)
188 continue;
189 if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
190 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END)
191 return false;
193 unsigned OpIdx = UseMI->getOperandNo(&MO);
194 if (OpIdx >= UseMI->getDesc().getNumOperands() ||
195 !TII->isOperandLegal(*UseMI, OpIdx, &Src))
196 return false;
198 // Change VGPR to SGPR destination.
199 MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
200 return true;
203 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
205 // SGPRx = ...
206 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
207 // VGPRz = COPY SGPRy
209 // ==>
211 // VGPRx = COPY SGPRx
212 // VGPRz = REG_SEQUENCE VGPRx, sub0
214 // This exposes immediate folding opportunities when materializing 64-bit
215 // immediates.
216 static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
217 const SIRegisterInfo *TRI,
218 const SIInstrInfo *TII,
219 MachineRegisterInfo &MRI) {
220 assert(MI.isRegSequence());
222 Register DstReg = MI.getOperand(0).getReg();
223 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
224 return false;
226 if (!MRI.hasOneUse(DstReg))
227 return false;
229 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
230 if (!CopyUse.isCopy())
231 return false;
233 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
234 if (CopyUse.getOperand(0).getReg().isPhysical())
235 return false;
237 const TargetRegisterClass *SrcRC, *DstRC;
238 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
240 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
241 return false;
243 if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
244 return true;
246 // TODO: Could have multiple extracts?
247 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
248 if (SubReg != AMDGPU::NoSubRegister)
249 return false;
251 MRI.setRegClass(DstReg, DstRC);
253 // SGPRx = ...
254 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
255 // VGPRz = COPY SGPRy
257 // =>
258 // VGPRx = COPY SGPRx
259 // VGPRz = REG_SEQUENCE VGPRx, sub0
261 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
262 bool IsAGPR = TRI->hasAGPRs(DstRC);
264 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
265 Register SrcReg = MI.getOperand(I).getReg();
266 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
268 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
269 assert(TRI->isSGPRClass(SrcRC) &&
270 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
272 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
273 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
275 Register TmpReg = MRI.createVirtualRegister(NewSrcRC);
277 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
278 TmpReg)
279 .add(MI.getOperand(I));
281 if (IsAGPR) {
282 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC);
283 Register TmpAReg = MRI.createVirtualRegister(NewSrcRC);
284 unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ?
285 AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::COPY;
286 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc),
287 TmpAReg)
288 .addReg(TmpReg, RegState::Kill);
289 TmpReg = TmpAReg;
292 MI.getOperand(I).setReg(TmpReg);
295 CopyUse.eraseFromParent();
296 return true;
299 static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
300 const MachineInstr *MoveImm,
301 const SIInstrInfo *TII,
302 unsigned &SMovOp,
303 int64_t &Imm) {
304 if (Copy->getOpcode() != AMDGPU::COPY)
305 return false;
307 if (!MoveImm->isMoveImmediate())
308 return false;
310 const MachineOperand *ImmOp =
311 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
312 if (!ImmOp->isImm())
313 return false;
315 // FIXME: Handle copies with sub-regs.
316 if (Copy->getOperand(0).getSubReg())
317 return false;
319 switch (MoveImm->getOpcode()) {
320 default:
321 return false;
322 case AMDGPU::V_MOV_B32_e32:
323 SMovOp = AMDGPU::S_MOV_B32;
324 break;
325 case AMDGPU::V_MOV_B64_PSEUDO:
326 SMovOp = AMDGPU::S_MOV_B64;
327 break;
329 Imm = ImmOp->getImm();
330 return true;
333 template <class UnaryPredicate>
334 bool searchPredecessors(const MachineBasicBlock *MBB,
335 const MachineBasicBlock *CutOff,
336 UnaryPredicate Predicate) {
337 if (MBB == CutOff)
338 return false;
340 DenseSet<const MachineBasicBlock *> Visited;
341 SmallVector<MachineBasicBlock *, 4> Worklist(MBB->predecessors());
343 while (!Worklist.empty()) {
344 MachineBasicBlock *MBB = Worklist.pop_back_val();
346 if (!Visited.insert(MBB).second)
347 continue;
348 if (MBB == CutOff)
349 continue;
350 if (Predicate(MBB))
351 return true;
353 Worklist.append(MBB->pred_begin(), MBB->pred_end());
356 return false;
359 // Checks if there is potential path From instruction To instruction.
360 // If CutOff is specified and it sits in between of that path we ignore
361 // a higher portion of the path and report it is not reachable.
362 static bool isReachable(const MachineInstr *From,
363 const MachineInstr *To,
364 const MachineBasicBlock *CutOff,
365 MachineDominatorTree &MDT) {
366 if (MDT.dominates(From, To))
367 return true;
369 const MachineBasicBlock *MBBFrom = From->getParent();
370 const MachineBasicBlock *MBBTo = To->getParent();
372 // Do predecessor search.
373 // We should almost never get here since we do not usually produce M0 stores
374 // other than -1.
375 return searchPredecessors(MBBTo, CutOff, [MBBFrom]
376 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
379 // Return the first non-prologue instruction in the block.
380 static MachineBasicBlock::iterator
381 getFirstNonPrologue(MachineBasicBlock *MBB, const TargetInstrInfo *TII) {
382 MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
383 while (I != MBB->end() && TII->isBasicBlockPrologue(*I))
384 ++I;
386 return I;
389 // Hoist and merge identical SGPR initializations into a common predecessor.
390 // This is intended to combine M0 initializations, but can work with any
391 // SGPR. A VGPR cannot be processed since we cannot guarantee vector
392 // executioon.
393 static bool hoistAndMergeSGPRInits(unsigned Reg,
394 const MachineRegisterInfo &MRI,
395 const TargetRegisterInfo *TRI,
396 MachineDominatorTree &MDT,
397 const TargetInstrInfo *TII) {
398 // List of inits by immediate value.
399 using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
400 InitListMap Inits;
401 // List of clobbering instructions.
402 SmallVector<MachineInstr*, 8> Clobbers;
403 // List of instructions marked for deletion.
404 SmallSet<MachineInstr*, 8> MergedInstrs;
406 bool Changed = false;
408 for (auto &MI : MRI.def_instructions(Reg)) {
409 MachineOperand *Imm = nullptr;
410 for (auto &MO : MI.operands()) {
411 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
412 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
413 Imm = nullptr;
414 break;
415 } else if (MO.isImm())
416 Imm = &MO;
418 if (Imm)
419 Inits[Imm->getImm()].push_front(&MI);
420 else
421 Clobbers.push_back(&MI);
424 for (auto &Init : Inits) {
425 auto &Defs = Init.second;
427 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
428 MachineInstr *MI1 = *I1;
430 for (auto I2 = std::next(I1); I2 != E; ) {
431 MachineInstr *MI2 = *I2;
433 // Check any possible interference
434 auto interferes = [&](MachineBasicBlock::iterator From,
435 MachineBasicBlock::iterator To) -> bool {
437 assert(MDT.dominates(&*To, &*From));
439 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
440 const MachineBasicBlock *MBBFrom = From->getParent();
441 const MachineBasicBlock *MBBTo = To->getParent();
442 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
443 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
444 if (!MayClobberFrom && !MayClobberTo)
445 return false;
446 if ((MayClobberFrom && !MayClobberTo) ||
447 (!MayClobberFrom && MayClobberTo))
448 return true;
449 // Both can clobber, this is not an interference only if both are
450 // dominated by Clobber and belong to the same block or if Clobber
451 // properly dominates To, given that To >> From, so it dominates
452 // both and located in a common dominator.
453 return !((MBBFrom == MBBTo &&
454 MDT.dominates(Clobber, &*From) &&
455 MDT.dominates(Clobber, &*To)) ||
456 MDT.properlyDominates(Clobber->getParent(), MBBTo));
459 return (llvm::any_of(Clobbers, interferes)) ||
460 (llvm::any_of(Inits, [&](InitListMap::value_type &C) {
461 return C.first != Init.first &&
462 llvm::any_of(C.second, interferes);
463 }));
466 if (MDT.dominates(MI1, MI2)) {
467 if (!interferes(MI2, MI1)) {
468 LLVM_DEBUG(dbgs()
469 << "Erasing from "
470 << printMBBReference(*MI2->getParent()) << " " << *MI2);
471 MergedInstrs.insert(MI2);
472 Changed = true;
473 ++I2;
474 continue;
476 } else if (MDT.dominates(MI2, MI1)) {
477 if (!interferes(MI1, MI2)) {
478 LLVM_DEBUG(dbgs()
479 << "Erasing from "
480 << printMBBReference(*MI1->getParent()) << " " << *MI1);
481 MergedInstrs.insert(MI1);
482 Changed = true;
483 ++I1;
484 break;
486 } else {
487 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
488 MI2->getParent());
489 if (!MBB) {
490 ++I2;
491 continue;
494 MachineBasicBlock::iterator I = getFirstNonPrologue(MBB, TII);
495 if (!interferes(MI1, I) && !interferes(MI2, I)) {
496 LLVM_DEBUG(dbgs()
497 << "Erasing from "
498 << printMBBReference(*MI1->getParent()) << " " << *MI1
499 << "and moving from "
500 << printMBBReference(*MI2->getParent()) << " to "
501 << printMBBReference(*I->getParent()) << " " << *MI2);
502 I->getParent()->splice(I, MI2->getParent(), MI2);
503 MergedInstrs.insert(MI1);
504 Changed = true;
505 ++I1;
506 break;
509 ++I2;
511 ++I1;
515 // Remove initializations that were merged into another.
516 for (auto &Init : Inits) {
517 auto &Defs = Init.second;
518 auto I = Defs.begin();
519 while (I != Defs.end()) {
520 if (MergedInstrs.count(*I)) {
521 (*I)->eraseFromParent();
522 I = Defs.erase(I);
523 } else
524 ++I;
528 // Try to schedule SGPR initializations as early as possible in the MBB.
529 for (auto &Init : Inits) {
530 auto &Defs = Init.second;
531 for (auto MI : Defs) {
532 auto MBB = MI->getParent();
533 MachineInstr &BoundaryMI = *getFirstNonPrologue(MBB, TII);
534 MachineBasicBlock::reverse_iterator B(BoundaryMI);
535 // Check if B should actually be a boundary. If not set the previous
536 // instruction as the boundary instead.
537 if (!TII->isBasicBlockPrologue(*B))
538 B++;
540 auto R = std::next(MI->getReverseIterator());
541 const unsigned Threshold = 50;
542 // Search until B or Threshold for a place to insert the initialization.
543 for (unsigned I = 0; R != B && I < Threshold; ++R, ++I)
544 if (R->readsRegister(Reg, TRI) || R->definesRegister(Reg, TRI) ||
545 TII->isSchedulingBoundary(*R, MBB, *MBB->getParent()))
546 break;
548 // Move to directly after R.
549 if (&*--R != MI)
550 MBB->splice(*R, MBB, MI);
554 if (Changed)
555 MRI.clearKillFlags(Reg);
557 return Changed;
560 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
561 // Only need to run this in SelectionDAG path.
562 if (MF.getProperties().hasProperty(
563 MachineFunctionProperties::Property::Selected))
564 return false;
566 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
567 MRI = &MF.getRegInfo();
568 TRI = ST.getRegisterInfo();
569 TII = ST.getInstrInfo();
570 MDT = &getAnalysis<MachineDominatorTree>();
572 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
573 BI != BE; ++BI) {
574 MachineBasicBlock *MBB = &*BI;
575 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
576 ++I) {
577 MachineInstr &MI = *I;
579 switch (MI.getOpcode()) {
580 default:
581 continue;
582 case AMDGPU::COPY:
583 case AMDGPU::WQM:
584 case AMDGPU::STRICT_WQM:
585 case AMDGPU::SOFT_WQM:
586 case AMDGPU::STRICT_WWM: {
587 Register DstReg = MI.getOperand(0).getReg();
589 const TargetRegisterClass *SrcRC, *DstRC;
590 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, *MRI);
592 if (!DstReg.isVirtual()) {
593 // If the destination register is a physical register there isn't
594 // really much we can do to fix this.
595 // Some special instructions use M0 as an input. Some even only use
596 // the first lane. Insert a readfirstlane and hope for the best.
597 if (DstReg == AMDGPU::M0 && TRI->hasVectorRegisters(SrcRC)) {
598 Register TmpReg
599 = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
601 BuildMI(*MBB, MI, MI.getDebugLoc(),
602 TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)
603 .add(MI.getOperand(1));
604 MI.getOperand(1).setReg(TmpReg);
607 continue;
610 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
611 Register SrcReg = MI.getOperand(1).getReg();
612 if (!SrcReg.isVirtual()) {
613 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
614 if (NewBB && NewBB != MBB) {
615 MBB = NewBB;
616 E = MBB->end();
617 BI = MachineFunction::iterator(MBB);
618 BE = MF.end();
620 assert((!NewBB || NewBB == I->getParent()) &&
621 "moveToVALU did not return the right basic block");
622 break;
625 MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
626 unsigned SMovOp;
627 int64_t Imm;
628 // If we are just copying an immediate, we can replace the copy with
629 // s_mov_b32.
630 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
631 MI.getOperand(1).ChangeToImmediate(Imm);
632 MI.addImplicitDefUseOperands(MF);
633 MI.setDesc(TII->get(SMovOp));
634 break;
636 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
637 if (NewBB && NewBB != MBB) {
638 MBB = NewBB;
639 E = MBB->end();
640 BI = MachineFunction::iterator(MBB);
641 BE = MF.end();
643 assert((!NewBB || NewBB == I->getParent()) &&
644 "moveToVALU did not return the right basic block");
645 } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
646 tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
649 break;
651 case AMDGPU::PHI: {
652 MachineBasicBlock *NewBB = processPHINode(MI);
653 if (NewBB && NewBB != MBB) {
654 MBB = NewBB;
655 E = MBB->end();
656 BI = MachineFunction::iterator(MBB);
657 BE = MF.end();
659 assert((!NewBB || NewBB == I->getParent()) &&
660 "moveToVALU did not return the right basic block");
661 break;
663 case AMDGPU::REG_SEQUENCE: {
664 if (TRI->hasVectorRegisters(TII->getOpRegClass(MI, 0)) ||
665 !hasVectorOperands(MI, TRI)) {
666 foldVGPRCopyIntoRegSequence(MI, TRI, TII, *MRI);
667 continue;
670 LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
672 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
673 if (NewBB && NewBB != MBB) {
674 MBB = NewBB;
675 E = MBB->end();
676 BI = MachineFunction::iterator(MBB);
677 BE = MF.end();
679 assert((!NewBB || NewBB == I->getParent()) &&
680 "moveToVALU did not return the right basic block");
681 break;
683 case AMDGPU::INSERT_SUBREG: {
684 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
685 DstRC = MRI->getRegClass(MI.getOperand(0).getReg());
686 Src0RC = MRI->getRegClass(MI.getOperand(1).getReg());
687 Src1RC = MRI->getRegClass(MI.getOperand(2).getReg());
688 if (TRI->isSGPRClass(DstRC) &&
689 (TRI->hasVectorRegisters(Src0RC) ||
690 TRI->hasVectorRegisters(Src1RC))) {
691 LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
692 MachineBasicBlock *NewBB = TII->moveToVALU(MI, MDT);
693 if (NewBB && NewBB != MBB) {
694 MBB = NewBB;
695 E = MBB->end();
696 BI = MachineFunction::iterator(MBB);
697 BE = MF.end();
699 assert((!NewBB || NewBB == I->getParent()) &&
700 "moveToVALU did not return the right basic block");
702 break;
704 case AMDGPU::V_WRITELANE_B32: {
705 // Some architectures allow more than one constant bus access without
706 // SGPR restriction
707 if (ST.getConstantBusLimit(MI.getOpcode()) != 1)
708 break;
710 // Writelane is special in that it can use SGPR and M0 (which would
711 // normally count as using the constant bus twice - but in this case it
712 // is allowed since the lane selector doesn't count as a use of the
713 // constant bus). However, it is still required to abide by the 1 SGPR
714 // rule. Apply a fix here as we might have multiple SGPRs after
715 // legalizing VGPRs to SGPRs
716 int Src0Idx =
717 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
718 int Src1Idx =
719 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
720 MachineOperand &Src0 = MI.getOperand(Src0Idx);
721 MachineOperand &Src1 = MI.getOperand(Src1Idx);
723 // Check to see if the instruction violates the 1 SGPR rule
724 if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) &&
725 Src0.getReg() != AMDGPU::M0) &&
726 (Src1.isReg() && TRI->isSGPRReg(*MRI, Src1.getReg()) &&
727 Src1.getReg() != AMDGPU::M0)) {
729 // Check for trivially easy constant prop into one of the operands
730 // If this is the case then perform the operation now to resolve SGPR
731 // issue. If we don't do that here we will always insert a mov to m0
732 // that can't be resolved in later operand folding pass
733 bool Resolved = false;
734 for (MachineOperand *MO : {&Src0, &Src1}) {
735 if (MO->getReg().isVirtual()) {
736 MachineInstr *DefMI = MRI->getVRegDef(MO->getReg());
737 if (DefMI && TII->isFoldableCopy(*DefMI)) {
738 const MachineOperand &Def = DefMI->getOperand(0);
739 if (Def.isReg() &&
740 MO->getReg() == Def.getReg() &&
741 MO->getSubReg() == Def.getSubReg()) {
742 const MachineOperand &Copied = DefMI->getOperand(1);
743 if (Copied.isImm() &&
744 TII->isInlineConstant(APInt(64, Copied.getImm(), true))) {
745 MO->ChangeToImmediate(Copied.getImm());
746 Resolved = true;
747 break;
754 if (!Resolved) {
755 // Haven't managed to resolve by replacing an SGPR with an immediate
756 // Move src1 to be in M0
757 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
758 TII->get(AMDGPU::COPY), AMDGPU::M0)
759 .add(Src1);
760 Src1.ChangeToRegister(AMDGPU::M0, false);
763 break;
769 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
770 hoistAndMergeSGPRInits(AMDGPU::M0, *MRI, TRI, *MDT, TII);
772 return true;
775 MachineBasicBlock *SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
776 unsigned numVGPRUses = 0;
777 bool AllAGPRUses = true;
778 SetVector<const MachineInstr *> worklist;
779 SmallSet<const MachineInstr *, 4> Visited;
780 SetVector<MachineInstr *> PHIOperands;
781 MachineBasicBlock *CreatedBB = nullptr;
782 worklist.insert(&MI);
783 Visited.insert(&MI);
784 while (!worklist.empty()) {
785 const MachineInstr *Instr = worklist.pop_back_val();
786 Register Reg = Instr->getOperand(0).getReg();
787 for (const auto &Use : MRI->use_operands(Reg)) {
788 const MachineInstr *UseMI = Use.getParent();
789 AllAGPRUses &= (UseMI->isCopy() &&
790 TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg())) ||
791 TRI->isAGPR(*MRI, Use.getReg());
792 if (UseMI->isCopy() || UseMI->isRegSequence()) {
793 if (UseMI->isCopy() &&
794 UseMI->getOperand(0).getReg().isPhysical() &&
795 !TRI->isSGPRReg(*MRI, UseMI->getOperand(0).getReg())) {
796 numVGPRUses++;
798 if (Visited.insert(UseMI).second)
799 worklist.insert(UseMI);
801 continue;
804 if (UseMI->isPHI()) {
805 const TargetRegisterClass *UseRC = MRI->getRegClass(Use.getReg());
806 if (!TRI->isSGPRReg(*MRI, Use.getReg()) &&
807 UseRC != &AMDGPU::VReg_1RegClass)
808 numVGPRUses++;
809 continue;
812 const TargetRegisterClass *OpRC =
813 TII->getOpRegClass(*UseMI, UseMI->getOperandNo(&Use));
814 if (!TRI->isSGPRClass(OpRC) && OpRC != &AMDGPU::VS_32RegClass &&
815 OpRC != &AMDGPU::VS_64RegClass) {
816 numVGPRUses++;
821 Register PHIRes = MI.getOperand(0).getReg();
822 const TargetRegisterClass *RC0 = MRI->getRegClass(PHIRes);
823 if (AllAGPRUses && numVGPRUses && !TRI->hasAGPRs(RC0)) {
824 LLVM_DEBUG(dbgs() << "Moving PHI to AGPR: " << MI);
825 MRI->setRegClass(PHIRes, TRI->getEquivalentAGPRClass(RC0));
826 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
827 MachineInstr *DefMI = MRI->getVRegDef(MI.getOperand(I).getReg());
828 if (DefMI && DefMI->isPHI())
829 PHIOperands.insert(DefMI);
833 bool hasVGPRInput = false;
834 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
835 Register InputReg = MI.getOperand(i).getReg();
836 MachineInstr *Def = MRI->getVRegDef(InputReg);
837 if (TRI->isVectorRegister(*MRI, InputReg)) {
838 if (Def->isCopy()) {
839 Register SrcReg = Def->getOperand(1).getReg();
840 const TargetRegisterClass *RC =
841 TRI->getRegClassForReg(*MRI, SrcReg);
842 if (TRI->isSGPRClass(RC))
843 continue;
845 hasVGPRInput = true;
846 break;
848 else if (Def->isCopy() &&
849 TRI->isVectorRegister(*MRI, Def->getOperand(1).getReg())) {
850 Register SrcReg = Def->getOperand(1).getReg();
851 MachineInstr *SrcDef = MRI->getVRegDef(SrcReg);
852 unsigned SMovOp;
853 int64_t Imm;
854 if (!isSafeToFoldImmIntoCopy(Def, SrcDef, TII, SMovOp, Imm)) {
855 hasVGPRInput = true;
856 break;
857 } else {
858 // Formally, if we did not do this right away
859 // it would be done on the next iteration of the
860 // runOnMachineFunction main loop. But why not if we can?
861 MachineFunction *MF = MI.getParent()->getParent();
862 Def->getOperand(1).ChangeToImmediate(Imm);
863 Def->addImplicitDefUseOperands(*MF);
864 Def->setDesc(TII->get(SMovOp));
869 if ((!TRI->isVectorRegister(*MRI, PHIRes) &&
870 RC0 != &AMDGPU::VReg_1RegClass) &&
871 (hasVGPRInput || numVGPRUses > 1)) {
872 LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
873 CreatedBB = TII->moveToVALU(MI);
875 else {
876 LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI);
877 TII->legalizeOperands(MI, MDT);
880 // Propagate register class back to PHI operands which are PHI themselves.
881 while (!PHIOperands.empty()) {
882 processPHINode(*PHIOperands.pop_back_val());
884 return CreatedBB;