Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / Mips / MipsInstructionSelector.cpp
blobc97fac9d21b6b5e4d80345fe30d5dd985e5f152e
1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// Mips.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MipsRegisterBankInfo.h"
15 #include "MipsTargetMachine.h"
16 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
19 #define DEBUG_TYPE "mips-isel"
21 using namespace llvm;
23 namespace {
25 #define GET_GLOBALISEL_PREDICATE_BITSET
26 #include "MipsGenGlobalISel.inc"
27 #undef GET_GLOBALISEL_PREDICATE_BITSET
29 class MipsInstructionSelector : public InstructionSelector {
30 public:
31 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
32 const MipsRegisterBankInfo &RBI);
34 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
35 static const char *getName() { return DEBUG_TYPE; }
37 private:
38 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
40 const MipsTargetMachine &TM;
41 const MipsSubtarget &STI;
42 const MipsInstrInfo &TII;
43 const MipsRegisterInfo &TRI;
44 const MipsRegisterBankInfo &RBI;
46 #define GET_GLOBALISEL_PREDICATES_DECL
47 #include "MipsGenGlobalISel.inc"
48 #undef GET_GLOBALISEL_PREDICATES_DECL
50 #define GET_GLOBALISEL_TEMPORARIES_DECL
51 #include "MipsGenGlobalISel.inc"
52 #undef GET_GLOBALISEL_TEMPORARIES_DECL
55 } // end anonymous namespace
57 #define GET_GLOBALISEL_IMPL
58 #include "MipsGenGlobalISel.inc"
59 #undef GET_GLOBALISEL_IMPL
61 MipsInstructionSelector::MipsInstructionSelector(
62 const MipsTargetMachine &TM, const MipsSubtarget &STI,
63 const MipsRegisterBankInfo &RBI)
64 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
65 TRI(*STI.getRegisterInfo()), RBI(RBI),
67 #define GET_GLOBALISEL_PREDICATES_INIT
68 #include "MipsGenGlobalISel.inc"
69 #undef GET_GLOBALISEL_PREDICATES_INIT
70 #define GET_GLOBALISEL_TEMPORARIES_INIT
71 #include "MipsGenGlobalISel.inc"
72 #undef GET_GLOBALISEL_TEMPORARIES_INIT
76 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
77 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
78 const RegisterBankInfo &RBI) {
79 unsigned DstReg = I.getOperand(0).getReg();
80 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
81 return true;
83 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
85 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
86 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
87 << " operand\n");
88 return false;
90 return true;
93 /// Returning Opc indicates that we failed to select MIPS instruction opcode.
94 static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes) {
95 if (Opc == TargetOpcode::G_STORE)
96 switch (MemSizeInBytes) {
97 case 4:
98 return Mips::SW;
99 case 2:
100 return Mips::SH;
101 case 1:
102 return Mips::SB;
103 default:
104 return Opc;
106 else
107 // Unspecified extending load is selected into zeroExtending load.
108 switch (MemSizeInBytes) {
109 case 4:
110 return Mips::LW;
111 case 2:
112 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
113 case 1:
114 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
115 default:
116 return Opc;
120 bool MipsInstructionSelector::select(MachineInstr &I,
121 CodeGenCoverage &CoverageInfo) const {
123 MachineBasicBlock &MBB = *I.getParent();
124 MachineFunction &MF = *MBB.getParent();
125 MachineRegisterInfo &MRI = MF.getRegInfo();
127 if (!isPreISelGenericOpcode(I.getOpcode())) {
128 if (I.isCopy())
129 return selectCopy(I, TII, MRI, TRI, RBI);
131 return true;
134 if (selectImpl(I, CoverageInfo)) {
135 return true;
138 MachineInstr *MI = nullptr;
139 using namespace TargetOpcode;
141 switch (I.getOpcode()) {
142 case G_GEP: {
143 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
144 .add(I.getOperand(0))
145 .add(I.getOperand(1))
146 .add(I.getOperand(2));
147 break;
149 case G_FRAME_INDEX: {
150 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
151 .add(I.getOperand(0))
152 .add(I.getOperand(1))
153 .addImm(0);
154 break;
156 case G_BRCOND: {
157 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE))
158 .add(I.getOperand(0))
159 .addUse(Mips::ZERO)
160 .add(I.getOperand(1));
161 break;
163 case G_PHI: {
164 const unsigned DestReg = I.getOperand(0).getReg();
165 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
166 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
168 if (DestRegBank != Mips::GPRBRegBankID || OpSize != 32)
169 return false;
171 const TargetRegisterClass *DefRC = &Mips::GPR32RegClass;
172 I.setDesc(TII.get(TargetOpcode::PHI));
173 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
175 case G_STORE:
176 case G_LOAD:
177 case G_ZEXTLOAD:
178 case G_SEXTLOAD: {
179 const unsigned DestReg = I.getOperand(0).getReg();
180 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
181 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
182 const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize();
184 if (DestRegBank != Mips::GPRBRegBankID || OpSize != 32)
185 return false;
187 const unsigned NewOpc =
188 selectLoadStoreOpCode(I.getOpcode(), OpMemSizeInBytes);
189 if (NewOpc == I.getOpcode())
190 return false;
192 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
193 .add(I.getOperand(0))
194 .add(I.getOperand(1))
195 .addImm(0)
196 .addMemOperand(*I.memoperands_begin());
197 break;
199 case G_UDIV:
200 case G_UREM:
201 case G_SDIV:
202 case G_SREM: {
203 unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
204 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
205 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
207 MachineInstr *PseudoDIV, *PseudoMove;
208 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
209 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
210 .addDef(HILOReg)
211 .add(I.getOperand(1))
212 .add(I.getOperand(2));
213 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
214 return false;
216 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
217 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
218 .addDef(I.getOperand(0).getReg())
219 .addUse(HILOReg);
220 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
221 return false;
223 I.eraseFromParent();
224 return true;
226 case G_SELECT: {
227 // Handle operands with pointer type.
228 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
229 .add(I.getOperand(0))
230 .add(I.getOperand(2))
231 .add(I.getOperand(1))
232 .add(I.getOperand(3));
233 break;
235 case G_CONSTANT: {
236 int Imm = I.getOperand(1).getCImm()->getValue().getLimitedValue();
237 unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
238 MachineInstr *LUi, *ORi;
240 LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
241 .addDef(LUiReg)
242 .addImm(Imm >> 16);
244 ORi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ORi))
245 .addDef(I.getOperand(0).getReg())
246 .addUse(LUiReg)
247 .addImm(Imm & 0xFFFF);
249 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
250 return false;
251 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
252 return false;
254 I.eraseFromParent();
255 return true;
257 case G_GLOBAL_VALUE: {
258 if (MF.getTarget().isPositionIndependent())
259 return false;
261 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
262 unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
263 MachineInstr *LUi, *ADDiu;
265 LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
266 .addDef(LUiReg)
267 .addGlobalAddress(GVal);
268 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
270 ADDiu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
271 .addDef(I.getOperand(0).getReg())
272 .addUse(LUiReg)
273 .addGlobalAddress(GVal);
274 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
276 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
277 return false;
278 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
279 return false;
281 I.eraseFromParent();
282 return true;
284 case G_ICMP: {
285 struct Instr {
286 unsigned Opcode, Def, LHS, RHS;
287 Instr(unsigned Opcode, unsigned Def, unsigned LHS, unsigned RHS)
288 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
290 bool hasImm() const {
291 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
292 return true;
293 return false;
297 SmallVector<struct Instr, 2> Instructions;
298 unsigned ICMPReg = I.getOperand(0).getReg();
299 unsigned Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
300 unsigned LHS = I.getOperand(2).getReg();
301 unsigned RHS = I.getOperand(3).getReg();
302 CmpInst::Predicate Cond =
303 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
305 switch (Cond) {
306 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
307 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
308 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
309 break;
310 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
311 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
312 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
313 break;
314 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
315 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
316 break;
317 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
318 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
319 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
320 break;
321 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
322 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
323 break;
324 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
325 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
326 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
327 break;
328 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
329 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
330 break;
331 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
332 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
333 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
334 break;
335 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
336 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
337 break;
338 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
339 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
340 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
341 break;
342 default:
343 return false;
346 MachineIRBuilder B(I);
347 for (const struct Instr &Instruction : Instructions) {
348 MachineInstrBuilder MIB = B.buildInstr(
349 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
351 if (Instruction.hasImm())
352 MIB.addImm(Instruction.RHS);
353 else
354 MIB.addUse(Instruction.RHS);
356 if (!MIB.constrainAllUses(TII, TRI, RBI))
357 return false;
360 I.eraseFromParent();
361 return true;
363 default:
364 return false;
367 I.eraseFromParent();
368 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
371 namespace llvm {
372 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM,
373 MipsSubtarget &Subtarget,
374 MipsRegisterBankInfo &RBI) {
375 return new MipsInstructionSelector(TM, Subtarget, RBI);
377 } // end namespace llvm