Recommit r373598 "[yaml2obj/obj2yaml] - Add support for SHT_LLVM_ADDRSIG sections."
[llvm-complete.git] / lib / Target / Mips / MipsInstructionSelector.cpp
blob33e2d2ac4c589d5e3cefcf66423b81e52a391f65
1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// Mips.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #define DEBUG_TYPE "mips-isel"
24 using namespace llvm;
26 namespace {
28 #define GET_GLOBALISEL_PREDICATE_BITSET
29 #include "MipsGenGlobalISel.inc"
30 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 class MipsInstructionSelector : public InstructionSelector {
33 public:
34 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
35 const MipsRegisterBankInfo &RBI);
37 bool select(MachineInstr &I) override;
38 static const char *getName() { return DEBUG_TYPE; }
40 private:
41 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
42 bool materialize32BitImm(Register DestReg, APInt Imm,
43 MachineIRBuilder &B) const;
44 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
45 const TargetRegisterClass *
46 getRegClassForTypeOnBank(unsigned OpSize, const RegisterBank &RB,
47 const RegisterBankInfo &RBI) const;
49 const MipsTargetMachine &TM;
50 const MipsSubtarget &STI;
51 const MipsInstrInfo &TII;
52 const MipsRegisterInfo &TRI;
53 const MipsRegisterBankInfo &RBI;
55 #define GET_GLOBALISEL_PREDICATES_DECL
56 #include "MipsGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_DECL
59 #define GET_GLOBALISEL_TEMPORARIES_DECL
60 #include "MipsGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_TEMPORARIES_DECL
64 } // end anonymous namespace
66 #define GET_GLOBALISEL_IMPL
67 #include "MipsGenGlobalISel.inc"
68 #undef GET_GLOBALISEL_IMPL
70 MipsInstructionSelector::MipsInstructionSelector(
71 const MipsTargetMachine &TM, const MipsSubtarget &STI,
72 const MipsRegisterBankInfo &RBI)
73 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
74 TRI(*STI.getRegisterInfo()), RBI(RBI),
76 #define GET_GLOBALISEL_PREDICATES_INIT
77 #include "MipsGenGlobalISel.inc"
78 #undef GET_GLOBALISEL_PREDICATES_INIT
79 #define GET_GLOBALISEL_TEMPORARIES_INIT
80 #include "MipsGenGlobalISel.inc"
81 #undef GET_GLOBALISEL_TEMPORARIES_INIT
85 bool MipsInstructionSelector::selectCopy(MachineInstr &I,
86 MachineRegisterInfo &MRI) const {
87 Register DstReg = I.getOperand(0).getReg();
88 if (Register::isPhysicalRegister(DstReg))
89 return true;
91 const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI);
92 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
94 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
95 if (RegBank->getID() == Mips::FPRBRegBankID) {
96 if (DstSize == 32)
97 RC = &Mips::FGR32RegClass;
98 else if (DstSize == 64)
99 RC = STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
100 else
101 llvm_unreachable("Unsupported destination size");
103 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
104 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
105 << " operand\n");
106 return false;
108 return true;
111 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
112 unsigned OpSize, const RegisterBank &RB,
113 const RegisterBankInfo &RBI) const {
114 if (RB.getID() == Mips::GPRBRegBankID)
115 return &Mips::GPR32RegClass;
117 if (RB.getID() == Mips::FPRBRegBankID)
118 return OpSize == 32
119 ? &Mips::FGR32RegClass
120 : STI.hasMips32r6() || STI.isFP64bit() ? &Mips::FGR64RegClass
121 : &Mips::AFGR64RegClass;
123 llvm_unreachable("getRegClassForTypeOnBank can't find register class.");
124 return nullptr;
127 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
128 MachineIRBuilder &B) const {
129 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
130 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
131 if (Imm.getHiBits(16).isNullValue()) {
132 MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
133 .addImm(Imm.getLoBits(16).getLimitedValue());
134 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
136 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
137 if (Imm.getLoBits(16).isNullValue()) {
138 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
139 .addImm(Imm.getHiBits(16).getLimitedValue());
140 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
142 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
143 if (Imm.isSignedIntN(16)) {
144 MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
145 .addImm(Imm.getLoBits(16).getLimitedValue());
146 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
148 // Values that cannot be materialized with single immediate instruction.
149 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
150 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
151 .addImm(Imm.getHiBits(16).getLimitedValue());
152 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
153 .addImm(Imm.getLoBits(16).getLimitedValue());
154 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
155 return false;
156 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
157 return false;
158 return true;
161 /// Returning Opc indicates that we failed to select MIPS instruction opcode.
162 static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes,
163 unsigned RegBank, bool isFP64) {
164 bool isStore = Opc == TargetOpcode::G_STORE;
165 if (RegBank == Mips::GPRBRegBankID) {
166 if (isStore)
167 switch (MemSizeInBytes) {
168 case 4:
169 return Mips::SW;
170 case 2:
171 return Mips::SH;
172 case 1:
173 return Mips::SB;
174 default:
175 return Opc;
177 else
178 // Unspecified extending load is selected into zeroExtending load.
179 switch (MemSizeInBytes) {
180 case 4:
181 return Mips::LW;
182 case 2:
183 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
184 case 1:
185 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
186 default:
187 return Opc;
191 if (RegBank == Mips::FPRBRegBankID) {
192 switch (MemSizeInBytes) {
193 case 4:
194 return isStore ? Mips::SWC1 : Mips::LWC1;
195 case 8:
196 if (isFP64)
197 return isStore ? Mips::SDC164 : Mips::LDC164;
198 else
199 return isStore ? Mips::SDC1 : Mips::LDC1;
200 default:
201 return Opc;
204 return Opc;
207 bool MipsInstructionSelector::select(MachineInstr &I) {
209 MachineBasicBlock &MBB = *I.getParent();
210 MachineFunction &MF = *MBB.getParent();
211 MachineRegisterInfo &MRI = MF.getRegInfo();
213 if (!isPreISelGenericOpcode(I.getOpcode())) {
214 if (I.isCopy())
215 return selectCopy(I, MRI);
217 return true;
220 if (I.getOpcode() == Mips::G_MUL) {
221 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
222 .add(I.getOperand(0))
223 .add(I.getOperand(1))
224 .add(I.getOperand(2));
225 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI))
226 return false;
227 Mul->getOperand(3).setIsDead(true);
228 Mul->getOperand(4).setIsDead(true);
230 I.eraseFromParent();
231 return true;
234 if (selectImpl(I, *CoverageInfo))
235 return true;
237 MachineInstr *MI = nullptr;
238 using namespace TargetOpcode;
240 switch (I.getOpcode()) {
241 case G_UMULH: {
242 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
243 MachineInstr *PseudoMULTu, *PseudoMove;
245 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
246 .addDef(PseudoMULTuReg)
247 .add(I.getOperand(1))
248 .add(I.getOperand(2));
249 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
250 return false;
252 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
253 .addDef(I.getOperand(0).getReg())
254 .addUse(PseudoMULTuReg);
255 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
256 return false;
258 I.eraseFromParent();
259 return true;
261 case G_GEP: {
262 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
263 .add(I.getOperand(0))
264 .add(I.getOperand(1))
265 .add(I.getOperand(2));
266 break;
268 case G_INTTOPTR:
269 case G_PTRTOINT: {
270 I.setDesc(TII.get(COPY));
271 return selectCopy(I, MRI);
273 case G_FRAME_INDEX: {
274 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
275 .add(I.getOperand(0))
276 .add(I.getOperand(1))
277 .addImm(0);
278 break;
280 case G_BRCOND: {
281 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE))
282 .add(I.getOperand(0))
283 .addUse(Mips::ZERO)
284 .add(I.getOperand(1));
285 break;
287 case G_BRJT: {
288 unsigned EntrySize =
289 MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout());
290 assert(isPowerOf2_32(EntrySize) &&
291 "Non-power-of-two jump-table entry size not supported.");
293 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
294 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
295 .addDef(JTIndex)
296 .addUse(I.getOperand(2).getReg())
297 .addImm(Log2_32(EntrySize));
298 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
299 return false;
301 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
302 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
303 .addDef(DestAddress)
304 .addUse(I.getOperand(0).getReg())
305 .addUse(JTIndex);
306 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
307 return false;
309 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
310 MachineInstr *LW =
311 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
312 .addDef(Dest)
313 .addUse(DestAddress)
314 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
315 .addMemOperand(MF.getMachineMemOperand(
316 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4));
317 if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI))
318 return false;
320 if (MF.getTarget().isPositionIndependent()) {
321 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
322 LW->getOperand(0).setReg(DestTmp);
323 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
324 .addDef(Dest)
325 .addUse(DestTmp)
326 .addUse(MF.getInfo<MipsFunctionInfo>()
327 ->getGlobalBaseRegForGlobalISel());
328 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
329 return false;
332 MachineInstr *Branch =
333 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
334 .addUse(Dest);
335 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
336 return false;
338 I.eraseFromParent();
339 return true;
341 case G_BRINDIRECT: {
342 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
343 .add(I.getOperand(0));
344 break;
346 case G_PHI: {
347 const Register DestReg = I.getOperand(0).getReg();
348 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
350 const TargetRegisterClass *DefRC = nullptr;
351 if (Register::isPhysicalRegister(DestReg))
352 DefRC = TRI.getRegClass(DestReg);
353 else
354 DefRC = getRegClassForTypeOnBank(OpSize,
355 *RBI.getRegBank(DestReg, MRI, TRI), RBI);
357 I.setDesc(TII.get(TargetOpcode::PHI));
358 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
360 case G_STORE:
361 case G_LOAD:
362 case G_ZEXTLOAD:
363 case G_SEXTLOAD: {
364 const Register DestReg = I.getOperand(0).getReg();
365 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
366 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
367 const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize();
369 if (DestRegBank == Mips::GPRBRegBankID && OpSize != 32)
370 return false;
372 if (DestRegBank == Mips::FPRBRegBankID && OpSize != 32 && OpSize != 64)
373 return false;
375 const unsigned NewOpc = selectLoadStoreOpCode(
376 I.getOpcode(), OpMemSizeInBytes, DestRegBank, STI.isFP64bit());
377 if (NewOpc == I.getOpcode())
378 return false;
380 MachineOperand BaseAddr = I.getOperand(1);
381 int64_t SignedOffset = 0;
382 // Try to fold load/store + G_GEP + G_CONSTANT
383 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
384 // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
385 // %LoadResult/%StoreSrc = load/store %Addr(p0)
386 // into:
387 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
389 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
390 if (Addr->getOpcode() == G_GEP) {
391 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
392 if (Offset->getOpcode() == G_CONSTANT) {
393 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
394 if (OffsetValue.isSignedIntN(16)) {
395 BaseAddr = Addr->getOperand(1);
396 SignedOffset = OffsetValue.getSExtValue();
401 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
402 .add(I.getOperand(0))
403 .add(BaseAddr)
404 .addImm(SignedOffset)
405 .addMemOperand(*I.memoperands_begin());
406 break;
408 case G_UDIV:
409 case G_UREM:
410 case G_SDIV:
411 case G_SREM: {
412 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
413 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
414 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
416 MachineInstr *PseudoDIV, *PseudoMove;
417 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
418 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
419 .addDef(HILOReg)
420 .add(I.getOperand(1))
421 .add(I.getOperand(2));
422 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
423 return false;
425 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
426 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
427 .addDef(I.getOperand(0).getReg())
428 .addUse(HILOReg);
429 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
430 return false;
432 I.eraseFromParent();
433 return true;
435 case G_SELECT: {
436 // Handle operands with pointer type.
437 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
438 .add(I.getOperand(0))
439 .add(I.getOperand(2))
440 .add(I.getOperand(1))
441 .add(I.getOperand(3));
442 break;
444 case G_IMPLICIT_DEF: {
445 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
446 .add(I.getOperand(0));
448 // Set class based on register bank, there can be fpr and gpr implicit def.
449 MRI.setRegClass(MI->getOperand(0).getReg(),
450 getRegClassForTypeOnBank(
451 MRI.getType(I.getOperand(0).getReg()).getSizeInBits(),
452 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI),
453 RBI));
454 break;
456 case G_CONSTANT: {
457 MachineIRBuilder B(I);
458 if (!materialize32BitImm(I.getOperand(0).getReg(),
459 I.getOperand(1).getCImm()->getValue(), B))
460 return false;
462 I.eraseFromParent();
463 return true;
465 case G_FCONSTANT: {
466 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
467 APInt APImm = FPimm.bitcastToAPInt();
468 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
470 if (Size == 32) {
471 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
472 MachineIRBuilder B(I);
473 if (!materialize32BitImm(GPRReg, APImm, B))
474 return false;
476 MachineInstrBuilder MTC1 =
477 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
478 if (!MTC1.constrainAllUses(TII, TRI, RBI))
479 return false;
481 if (Size == 64) {
482 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
483 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
484 MachineIRBuilder B(I);
485 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
486 return false;
487 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
488 return false;
490 MachineInstrBuilder PairF64 = B.buildInstr(
491 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
492 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
493 if (!PairF64.constrainAllUses(TII, TRI, RBI))
494 return false;
497 I.eraseFromParent();
498 return true;
500 case G_FABS: {
501 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
502 unsigned FABSOpcode =
503 Size == 32 ? Mips::FABS_S
504 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
505 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
506 .add(I.getOperand(0))
507 .add(I.getOperand(1));
508 break;
510 case G_FPTOSI: {
511 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
512 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
513 (void)ToSize;
514 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
515 assert((FromSize == 32 || FromSize == 64) &&
516 "Unsupported floating point size for G_FPTOSI");
518 unsigned Opcode;
519 if (FromSize == 32)
520 Opcode = Mips::TRUNC_W_S;
521 else
522 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
523 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
524 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
525 .addDef(ResultInFPR)
526 .addUse(I.getOperand(1).getReg());
527 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
528 return false;
530 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
531 .addDef(I.getOperand(0).getReg())
532 .addUse(ResultInFPR);
533 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
534 return false;
536 I.eraseFromParent();
537 return true;
539 case G_GLOBAL_VALUE: {
540 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
541 if (MF.getTarget().isPositionIndependent()) {
542 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
543 .addDef(I.getOperand(0).getReg())
544 .addReg(MF.getInfo<MipsFunctionInfo>()
545 ->getGlobalBaseRegForGlobalISel())
546 .addGlobalAddress(GVal);
547 // Global Values that don't have local linkage are handled differently
548 // when they are part of call sequence. MipsCallLowering::lowerCall
549 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
550 // MO_GOT_CALL flag when Callee doesn't have local linkage.
551 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
552 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL);
553 else
554 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT);
555 LWGOT->addMemOperand(
556 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
557 MachineMemOperand::MOLoad, 4, 4));
558 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
559 return false;
561 if (GVal->hasLocalLinkage()) {
562 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
563 LWGOT->getOperand(0).setReg(LWGOTDef);
565 MachineInstr *ADDiu =
566 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
567 .addDef(I.getOperand(0).getReg())
568 .addReg(LWGOTDef)
569 .addGlobalAddress(GVal);
570 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
571 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
572 return false;
574 } else {
575 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
577 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
578 .addDef(LUiReg)
579 .addGlobalAddress(GVal);
580 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
581 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
582 return false;
584 MachineInstr *ADDiu =
585 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
586 .addDef(I.getOperand(0).getReg())
587 .addUse(LUiReg)
588 .addGlobalAddress(GVal);
589 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
590 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
591 return false;
593 I.eraseFromParent();
594 return true;
596 case G_JUMP_TABLE: {
597 if (MF.getTarget().isPositionIndependent()) {
598 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
599 .addDef(I.getOperand(0).getReg())
600 .addReg(MF.getInfo<MipsFunctionInfo>()
601 ->getGlobalBaseRegForGlobalISel())
602 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
603 .addMemOperand(
604 MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
605 MachineMemOperand::MOLoad, 4, 4));
606 } else {
607 MI =
608 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
609 .addDef(I.getOperand(0).getReg())
610 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
612 break;
614 case G_ICMP: {
615 struct Instr {
616 unsigned Opcode;
617 Register Def, LHS, RHS;
618 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
619 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
621 bool hasImm() const {
622 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
623 return true;
624 return false;
628 SmallVector<struct Instr, 2> Instructions;
629 Register ICMPReg = I.getOperand(0).getReg();
630 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
631 Register LHS = I.getOperand(2).getReg();
632 Register RHS = I.getOperand(3).getReg();
633 CmpInst::Predicate Cond =
634 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
636 switch (Cond) {
637 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
638 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
639 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
640 break;
641 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
642 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
643 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
644 break;
645 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
646 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
647 break;
648 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
649 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
650 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
651 break;
652 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
653 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
654 break;
655 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
656 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
657 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
658 break;
659 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
660 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
661 break;
662 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
663 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
664 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
665 break;
666 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
667 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
668 break;
669 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
670 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
671 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
672 break;
673 default:
674 return false;
677 MachineIRBuilder B(I);
678 for (const struct Instr &Instruction : Instructions) {
679 MachineInstrBuilder MIB = B.buildInstr(
680 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
682 if (Instruction.hasImm())
683 MIB.addImm(Instruction.RHS);
684 else
685 MIB.addUse(Instruction.RHS);
687 if (!MIB.constrainAllUses(TII, TRI, RBI))
688 return false;
691 I.eraseFromParent();
692 return true;
694 case G_FCMP: {
695 unsigned MipsFCMPCondCode;
696 bool isLogicallyNegated;
697 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
698 I.getOperand(1).getPredicate())) {
699 case CmpInst::FCMP_UNO: // Unordered
700 case CmpInst::FCMP_ORD: // Ordered (OR)
701 MipsFCMPCondCode = Mips::FCOND_UN;
702 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
703 break;
704 case CmpInst::FCMP_OEQ: // Equal
705 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
706 MipsFCMPCondCode = Mips::FCOND_OEQ;
707 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
708 break;
709 case CmpInst::FCMP_UEQ: // Unordered or Equal
710 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
711 MipsFCMPCondCode = Mips::FCOND_UEQ;
712 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
713 break;
714 case CmpInst::FCMP_OLT: // Ordered or Less Than
715 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
716 MipsFCMPCondCode = Mips::FCOND_OLT;
717 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
718 break;
719 case CmpInst::FCMP_ULT: // Unordered or Less Than
720 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
721 MipsFCMPCondCode = Mips::FCOND_ULT;
722 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
723 break;
724 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
725 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
726 MipsFCMPCondCode = Mips::FCOND_OLE;
727 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
728 break;
729 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
730 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
731 MipsFCMPCondCode = Mips::FCOND_ULE;
732 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
733 break;
734 default:
735 return false;
738 // Default compare result in gpr register will be `true`.
739 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
740 // using MOVF_I. When orignal predicate (Cond) is logically negated
741 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
742 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
744 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
745 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
746 .addDef(TrueInReg)
747 .addUse(Mips::ZERO)
748 .addImm(1);
750 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
751 unsigned FCMPOpcode =
752 Size == 32 ? Mips::FCMP_S32
753 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
754 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
755 .addUse(I.getOperand(2).getReg())
756 .addUse(I.getOperand(3).getReg())
757 .addImm(MipsFCMPCondCode);
758 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
759 return false;
761 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
762 .addDef(I.getOperand(0).getReg())
763 .addUse(Mips::ZERO)
764 .addUse(Mips::FCC0)
765 .addUse(TrueInReg);
766 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
767 return false;
769 I.eraseFromParent();
770 return true;
772 case G_FENCE: {
773 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
774 break;
776 case G_VASTART: {
777 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
778 int FI = FuncInfo->getVarArgsFrameIndex();
780 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
781 MachineInstr *LEA_ADDiu =
782 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
783 .addDef(LeaReg)
784 .addFrameIndex(FI)
785 .addImm(0);
786 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
787 return false;
789 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
790 .addUse(LeaReg)
791 .addUse(I.getOperand(0).getReg())
792 .addImm(0);
793 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
794 return false;
796 I.eraseFromParent();
797 return true;
799 default:
800 return false;
803 I.eraseFromParent();
804 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
807 namespace llvm {
808 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM,
809 MipsSubtarget &Subtarget,
810 MipsRegisterBankInfo &RBI) {
811 return new MipsInstructionSelector(TM, Subtarget, RBI);
813 } // end namespace llvm