1 //===-- SIFixupVectorISel.cpp - Fixup post ISel vector issues -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 /// SIFixupVectorISel pass cleans up post ISEL Vector issues.
9 /// Currently this will convert GLOBAL_{LOAD|STORE}_*
10 /// and GLOBAL_Atomic_* instructions into their _SADDR variants,
11 /// feeding the sreg into the saddr field of the new instruction.
12 /// We currently handle a REG_SEQUENCE feeding the vaddr
13 /// and decompose it into a base and index.
16 /// %17:vgpr_32, %19:sreg_64_xexec = V_ADD_I32_e64 %21:sgpr_32, %22:vgpr_32
17 /// %18:vgpr_32, %20:sreg_64_xexec = V_ADDC_U32_e64 %25:vgpr_32,
18 /// %24:vgpr_32, %19:sreg_64_xexec
19 /// %16:vreg_64 = REG_SEQUENCE %17:vgpr_32, %sub0, %18:vgpr_32, %sub1
20 /// %11:vreg_64 = COPY %16:vreg_64
21 /// %10:vgpr_32 = GLOBAL_LOAD_DWORD killed %11:vreg_64, 16, 0, 0
23 /// %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1:sgpr_64, 36, 0
24 /// %14:vreg_64 = REG_SEQUENCE %6:vgpr_32, %sub0, %15:vgpr_32, %sub1
25 /// %10:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %14:vreg_64, %4:sreg_64_xexec,16...
27 //===----------------------------------------------------------------------===//
31 #include "AMDGPUSubtarget.h"
32 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Target/TargetMachine.h"
41 #define DEBUG_TYPE "si-fixup-vector-isel"
45 static cl::opt
<bool> EnableGlobalSGPRAddr(
46 "amdgpu-enable-global-sgpr-addr",
47 cl::desc("Enable use of SGPR regs for GLOBAL LOAD/STORE instructions"),
50 STATISTIC(NumSGPRGlobalOccurs
, "Number of global ld/st opportunities");
51 STATISTIC(NumSGPRGlobalSaddrs
, "Number of global sgpr instructions converted");
55 class SIFixupVectorISel
: public MachineFunctionPass
{
60 SIFixupVectorISel() : MachineFunctionPass(ID
) {
61 initializeSIFixupVectorISelPass(*PassRegistry::getPassRegistry());
64 bool runOnMachineFunction(MachineFunction
&MF
) override
;
66 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
68 MachineFunctionPass::getAnalysisUsage(AU
);
72 } // End anonymous namespace.
74 INITIALIZE_PASS(SIFixupVectorISel
, DEBUG_TYPE
,
75 "SI Fixup Vector ISel", false, false)
77 char SIFixupVectorISel::ID
= 0;
79 char &llvm::SIFixupVectorISelID
= SIFixupVectorISel::ID
;
81 FunctionPass
*llvm::createSIFixupVectorISelPass() {
82 return new SIFixupVectorISel();
85 static bool findSRegBaseAndIndex(MachineOperand
*Op
,
88 MachineRegisterInfo
&MRI
,
89 const SIRegisterInfo
*TRI
) {
90 SmallVector
<MachineOperand
*, 8> Worklist
;
91 Worklist
.push_back(Op
);
92 while (!Worklist
.empty()) {
93 MachineOperand
*WOp
= Worklist
.pop_back_val();
94 if (!WOp
->isReg() || !Register::isVirtualRegister(WOp
->getReg()))
96 MachineInstr
*DefInst
= MRI
.getUniqueVRegDef(WOp
->getReg());
97 switch (DefInst
->getOpcode()) {
101 Worklist
.push_back(&DefInst
->getOperand(1));
103 case AMDGPU::REG_SEQUENCE
:
104 if (DefInst
->getNumOperands() != 5)
106 Worklist
.push_back(&DefInst
->getOperand(1));
107 Worklist
.push_back(&DefInst
->getOperand(3));
109 case AMDGPU::V_ADD_I32_e64
:
110 // The V_ADD_* and its analogous V_ADDCV_* are generated by
111 // a previous pass which lowered from an ADD_64_PSEUDO,
112 // which generates subregs to break up the 64 bit args.
113 if (DefInst
->getOperand(2).getSubReg() != AMDGPU::NoSubRegister
)
115 BaseReg
= DefInst
->getOperand(2).getReg();
116 if (DefInst
->getOperand(3).getSubReg() != AMDGPU::NoSubRegister
)
118 IndexReg
= DefInst
->getOperand(3).getReg();
119 // Chase the IndexReg.
120 MachineInstr
*MI
= MRI
.getUniqueVRegDef(IndexReg
);
121 if (!MI
|| !MI
->isCopy())
123 // Make sure the reg class is 64 bit for Index.
124 // If the Index register is a subreg, we want it to reference
125 // a 64 bit register which we will use as the Index reg.
126 const TargetRegisterClass
*IdxRC
, *BaseRC
;
127 IdxRC
= MRI
.getRegClass(MI
->getOperand(1).getReg());
128 if (AMDGPU::getRegBitWidth(IdxRC
->getID()) != 64)
130 IndexReg
= MI
->getOperand(1).getReg();
131 // Chase the BaseReg.
132 MI
= MRI
.getUniqueVRegDef(BaseReg
);
133 if (!MI
|| !MI
->isCopy())
135 // Make sure the register class is 64 bit for Base.
136 BaseReg
= MI
->getOperand(1).getReg();
137 BaseRC
= MRI
.getRegClass(BaseReg
);
138 if (AMDGPU::getRegBitWidth(BaseRC
->getID()) != 64)
140 // Make sure Base is SReg and Index is VReg.
141 if (!TRI
->isSGPRReg(MRI
, BaseReg
))
143 if (!TRI
->hasVGPRs(MRI
.getRegClass(IndexReg
)))
145 // clear any killed flags on Index and Base regs, used later.
146 MRI
.clearKillFlags(IndexReg
);
147 MRI
.clearKillFlags(BaseReg
);
154 // Identify Global LOAD|STORE/ATOMIC and try to convert to _SADDR.
155 static bool fixupGlobalSaddr(MachineBasicBlock
&MBB
,
157 MachineRegisterInfo
&MRI
,
158 const GCNSubtarget
&ST
,
159 const SIInstrInfo
*TII
,
160 const SIRegisterInfo
*TRI
) {
161 if (!EnableGlobalSGPRAddr
)
163 bool FuncModified
= false;
164 MachineBasicBlock::iterator I
, Next
;
165 for (I
= MBB
.begin(); I
!= MBB
.end(); I
= Next
) {
167 MachineInstr
&MI
= *I
;
168 int NewOpcd
= AMDGPU::getGlobalSaddrOp(MI
.getOpcode());
171 // Update our statistics on opportunities seen.
172 ++NumSGPRGlobalOccurs
;
173 LLVM_DEBUG(dbgs() << "Global Mem opp " << MI
<< '\n');
174 // Need a Base and Index or we cant transform to _SADDR.
175 unsigned BaseReg
= 0;
176 unsigned IndexReg
= 0;
177 MachineOperand
*Op
= TII
->getNamedOperand(MI
, AMDGPU::OpName::vaddr
);
178 if (!findSRegBaseAndIndex(Op
, BaseReg
, IndexReg
, MRI
, TRI
))
180 ++NumSGPRGlobalSaddrs
;
182 // Create the new _SADDR Memory instruction.
183 bool HasVdst
= TII
->getNamedOperand(MI
, AMDGPU::OpName::vdst
) != nullptr;
184 MachineOperand
*VData
= TII
->getNamedOperand(MI
, AMDGPU::OpName::vdata
);
185 MachineInstr
*NewGlob
= nullptr;
186 NewGlob
= BuildMI(MBB
, I
, MI
.getDebugLoc(), TII
->get(NewOpcd
));
188 NewGlob
->addOperand(MF
, MI
.getOperand(0));
189 NewGlob
->addOperand(MF
, MachineOperand::CreateReg(IndexReg
, false));
191 NewGlob
->addOperand(MF
, *VData
);
192 NewGlob
->addOperand(MF
, MachineOperand::CreateReg(BaseReg
, false));
193 NewGlob
->addOperand(*TII
->getNamedOperand(MI
, AMDGPU::OpName::offset
));
195 MachineOperand
*Glc
= TII
->getNamedOperand(MI
, AMDGPU::OpName::glc
);
196 // Atomics dont have a GLC, so omit the field if not there.
198 NewGlob
->addOperand(MF
, *Glc
);
200 MachineOperand
*DLC
= TII
->getNamedOperand(MI
, AMDGPU::OpName::dlc
);
202 NewGlob
->addOperand(MF
, *DLC
);
204 NewGlob
->addOperand(*TII
->getNamedOperand(MI
, AMDGPU::OpName::slc
));
205 // _D16 have an vdst_in operand, copy it in.
206 MachineOperand
*VDstInOp
= TII
->getNamedOperand(MI
,
207 AMDGPU::OpName::vdst_in
);
209 NewGlob
->addOperand(MF
, *VDstInOp
);
210 NewGlob
->copyImplicitOps(MF
, MI
);
211 NewGlob
->cloneMemRefs(MF
, MI
);
212 // Remove the old Global Memop instruction.
213 MI
.eraseFromParent();
214 LLVM_DEBUG(dbgs() << "New Global Mem " << *NewGlob
<< '\n');
219 bool SIFixupVectorISel::runOnMachineFunction(MachineFunction
&MF
) {
220 if (skipFunction(MF
.getFunction()))
223 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
224 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
225 const SIInstrInfo
*TII
= ST
.getInstrInfo();
226 const SIRegisterInfo
*TRI
= ST
.getRegisterInfo();
228 bool FuncModified
= false;
229 for (MachineBasicBlock
&MBB
: MF
) {
230 // Cleanup missed Saddr opportunites from ISel.
231 FuncModified
|= fixupGlobalSaddr(MBB
, MF
, MRI
, ST
, TII
, TRI
);