1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #define DEBUG_TYPE "mips-isel"
28 #define GET_GLOBALISEL_PREDICATE_BITSET
29 #include "MipsGenGlobalISel.inc"
30 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 class MipsInstructionSelector
: public InstructionSelector
{
34 MipsInstructionSelector(const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
35 const MipsRegisterBankInfo
&RBI
);
37 bool select(MachineInstr
&I
) override
;
38 static const char *getName() { return DEBUG_TYPE
; }
41 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
42 bool materialize32BitImm(Register DestReg
, APInt Imm
,
43 MachineIRBuilder
&B
) const;
44 bool selectCopy(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
45 const TargetRegisterClass
*
46 getRegClassForTypeOnBank(unsigned OpSize
, const RegisterBank
&RB
,
47 const RegisterBankInfo
&RBI
) const;
48 unsigned selectLoadStoreOpCode(MachineInstr
&I
,
49 MachineRegisterInfo
&MRI
) const;
51 const MipsTargetMachine
&TM
;
52 const MipsSubtarget
&STI
;
53 const MipsInstrInfo
&TII
;
54 const MipsRegisterInfo
&TRI
;
55 const MipsRegisterBankInfo
&RBI
;
57 #define GET_GLOBALISEL_PREDICATES_DECL
58 #include "MipsGenGlobalISel.inc"
59 #undef GET_GLOBALISEL_PREDICATES_DECL
61 #define GET_GLOBALISEL_TEMPORARIES_DECL
62 #include "MipsGenGlobalISel.inc"
63 #undef GET_GLOBALISEL_TEMPORARIES_DECL
66 } // end anonymous namespace
68 #define GET_GLOBALISEL_IMPL
69 #include "MipsGenGlobalISel.inc"
70 #undef GET_GLOBALISEL_IMPL
72 MipsInstructionSelector::MipsInstructionSelector(
73 const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
74 const MipsRegisterBankInfo
&RBI
)
75 : InstructionSelector(), TM(TM
), STI(STI
), TII(*STI
.getInstrInfo()),
76 TRI(*STI
.getRegisterInfo()), RBI(RBI
),
78 #define GET_GLOBALISEL_PREDICATES_INIT
79 #include "MipsGenGlobalISel.inc"
80 #undef GET_GLOBALISEL_PREDICATES_INIT
81 #define GET_GLOBALISEL_TEMPORARIES_INIT
82 #include "MipsGenGlobalISel.inc"
83 #undef GET_GLOBALISEL_TEMPORARIES_INIT
87 bool MipsInstructionSelector::selectCopy(MachineInstr
&I
,
88 MachineRegisterInfo
&MRI
) const {
89 Register DstReg
= I
.getOperand(0).getReg();
90 if (Register::isPhysicalRegister(DstReg
))
93 const RegisterBank
*RegBank
= RBI
.getRegBank(DstReg
, MRI
, TRI
);
94 const unsigned DstSize
= MRI
.getType(DstReg
).getSizeInBits();
96 const TargetRegisterClass
*RC
= &Mips::GPR32RegClass
;
97 if (RegBank
->getID() == Mips::FPRBRegBankID
) {
99 RC
= &Mips::FGR32RegClass
;
100 else if (DstSize
== 64)
101 RC
= STI
.isFP64bit() ? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
103 llvm_unreachable("Unsupported destination size");
105 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
106 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
113 const TargetRegisterClass
*MipsInstructionSelector::getRegClassForTypeOnBank(
114 unsigned OpSize
, const RegisterBank
&RB
,
115 const RegisterBankInfo
&RBI
) const {
116 if (RB
.getID() == Mips::GPRBRegBankID
)
117 return &Mips::GPR32RegClass
;
119 if (RB
.getID() == Mips::FPRBRegBankID
)
121 ? &Mips::FGR32RegClass
122 : STI
.hasMips32r6() || STI
.isFP64bit() ? &Mips::FGR64RegClass
123 : &Mips::AFGR64RegClass
;
125 llvm_unreachable("getRegClassForTypeOnBank can't find register class.");
129 bool MipsInstructionSelector::materialize32BitImm(Register DestReg
, APInt Imm
,
130 MachineIRBuilder
&B
) const {
131 assert(Imm
.getBitWidth() == 32 && "Unsupported immediate size.");
132 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
133 if (Imm
.getHiBits(16).isNullValue()) {
134 MachineInstr
*Inst
= B
.buildInstr(Mips::ORi
, {DestReg
}, {Register(Mips::ZERO
)})
135 .addImm(Imm
.getLoBits(16).getLimitedValue());
136 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
138 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
139 if (Imm
.getLoBits(16).isNullValue()) {
140 MachineInstr
*Inst
= B
.buildInstr(Mips::LUi
, {DestReg
}, {})
141 .addImm(Imm
.getHiBits(16).getLimitedValue());
142 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
144 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
145 if (Imm
.isSignedIntN(16)) {
146 MachineInstr
*Inst
= B
.buildInstr(Mips::ADDiu
, {DestReg
}, {Register(Mips::ZERO
)})
147 .addImm(Imm
.getLoBits(16).getLimitedValue());
148 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
150 // Values that cannot be materialized with single immediate instruction.
151 Register LUiReg
= B
.getMRI()->createVirtualRegister(&Mips::GPR32RegClass
);
152 MachineInstr
*LUi
= B
.buildInstr(Mips::LUi
, {LUiReg
}, {})
153 .addImm(Imm
.getHiBits(16).getLimitedValue());
154 MachineInstr
*ORi
= B
.buildInstr(Mips::ORi
, {DestReg
}, {LUiReg
})
155 .addImm(Imm
.getLoBits(16).getLimitedValue());
156 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
158 if (!constrainSelectedInstRegOperands(*ORi
, TII
, TRI
, RBI
))
163 /// Returning Opc indicates that we failed to select MIPS instruction opcode.
165 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr
&I
,
166 MachineRegisterInfo
&MRI
) const {
167 STI
.getRegisterInfo();
168 const Register DestReg
= I
.getOperand(0).getReg();
169 const unsigned RegBank
= RBI
.getRegBank(DestReg
, MRI
, TRI
)->getID();
170 const unsigned MemSizeInBytes
= (*I
.memoperands_begin())->getSize();
171 unsigned Opc
= I
.getOpcode();
172 const bool isStore
= Opc
== TargetOpcode::G_STORE
;
173 if (RegBank
== Mips::GPRBRegBankID
) {
175 switch (MemSizeInBytes
) {
186 // Unspecified extending load is selected into zeroExtending load.
187 switch (MemSizeInBytes
) {
191 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LH
: Mips::LHu
;
193 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LB
: Mips::LBu
;
199 if (RegBank
== Mips::FPRBRegBankID
) {
200 switch (MemSizeInBytes
) {
202 return isStore
? Mips::SWC1
: Mips::LWC1
;
205 return isStore
? Mips::SDC164
: Mips::LDC164
;
207 return isStore
? Mips::SDC1
: Mips::LDC1
;
209 assert(STI
.hasMSA() && "Vector instructions require target with MSA.");
210 const unsigned VectorElementSizeInBytes
=
211 MRI
.getType(DestReg
).getElementType().getSizeInBytes();
212 if (VectorElementSizeInBytes
== 1)
213 return isStore
? Mips::ST_B
: Mips::LD_B
;
214 if (VectorElementSizeInBytes
== 2)
215 return isStore
? Mips::ST_H
: Mips::LD_H
;
216 if (VectorElementSizeInBytes
== 4)
217 return isStore
? Mips::ST_W
: Mips::LD_W
;
218 if (VectorElementSizeInBytes
== 8)
219 return isStore
? Mips::ST_D
: Mips::LD_D
;
229 bool MipsInstructionSelector::select(MachineInstr
&I
) {
231 MachineBasicBlock
&MBB
= *I
.getParent();
232 MachineFunction
&MF
= *MBB
.getParent();
233 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
235 if (!isPreISelGenericOpcode(I
.getOpcode())) {
237 return selectCopy(I
, MRI
);
242 if (I
.getOpcode() == Mips::G_MUL
) {
243 MachineInstr
*Mul
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MUL
))
244 .add(I
.getOperand(0))
245 .add(I
.getOperand(1))
246 .add(I
.getOperand(2));
247 if (!constrainSelectedInstRegOperands(*Mul
, TII
, TRI
, RBI
))
249 Mul
->getOperand(3).setIsDead(true);
250 Mul
->getOperand(4).setIsDead(true);
256 if (selectImpl(I
, *CoverageInfo
))
259 MachineInstr
*MI
= nullptr;
260 using namespace TargetOpcode
;
262 switch (I
.getOpcode()) {
264 Register PseudoMULTuReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
265 MachineInstr
*PseudoMULTu
, *PseudoMove
;
267 PseudoMULTu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMULTu
))
268 .addDef(PseudoMULTuReg
)
269 .add(I
.getOperand(1))
270 .add(I
.getOperand(2));
271 if (!constrainSelectedInstRegOperands(*PseudoMULTu
, TII
, TRI
, RBI
))
274 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMFHI
))
275 .addDef(I
.getOperand(0).getReg())
276 .addUse(PseudoMULTuReg
);
277 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
284 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
285 .add(I
.getOperand(0))
286 .add(I
.getOperand(1))
287 .add(I
.getOperand(2));
292 I
.setDesc(TII
.get(COPY
));
293 return selectCopy(I
, MRI
);
295 case G_FRAME_INDEX
: {
296 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
297 .add(I
.getOperand(0))
298 .add(I
.getOperand(1))
303 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::BNE
))
304 .add(I
.getOperand(0))
306 .add(I
.getOperand(1));
311 MF
.getJumpTableInfo()->getEntrySize(MF
.getDataLayout());
312 assert(isPowerOf2_32(EntrySize
) &&
313 "Non-power-of-two jump-table entry size not supported.");
315 Register JTIndex
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
316 MachineInstr
*SLL
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SLL
))
318 .addUse(I
.getOperand(2).getReg())
319 .addImm(Log2_32(EntrySize
));
320 if (!constrainSelectedInstRegOperands(*SLL
, TII
, TRI
, RBI
))
323 Register DestAddress
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
324 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
326 .addUse(I
.getOperand(0).getReg())
328 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
331 Register Dest
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
333 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
336 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_LO
)
337 .addMemOperand(MF
.getMachineMemOperand(
338 MachinePointerInfo(), MachineMemOperand::MOLoad
, 4, 4));
339 if (!constrainSelectedInstRegOperands(*LW
, TII
, TRI
, RBI
))
342 if (MF
.getTarget().isPositionIndependent()) {
343 Register DestTmp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
344 LW
->getOperand(0).setReg(DestTmp
);
345 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
348 .addUse(MF
.getInfo
<MipsFunctionInfo
>()
349 ->getGlobalBaseRegForGlobalISel());
350 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
354 MachineInstr
*Branch
=
355 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
357 if (!constrainSelectedInstRegOperands(*Branch
, TII
, TRI
, RBI
))
364 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
365 .add(I
.getOperand(0));
369 const Register DestReg
= I
.getOperand(0).getReg();
370 const unsigned OpSize
= MRI
.getType(DestReg
).getSizeInBits();
372 const TargetRegisterClass
*DefRC
= nullptr;
373 if (Register::isPhysicalRegister(DestReg
))
374 DefRC
= TRI
.getRegClass(DestReg
);
376 DefRC
= getRegClassForTypeOnBank(OpSize
,
377 *RBI
.getRegBank(DestReg
, MRI
, TRI
), RBI
);
379 I
.setDesc(TII
.get(TargetOpcode::PHI
));
380 return RBI
.constrainGenericRegister(DestReg
, *DefRC
, MRI
);
386 const unsigned NewOpc
= selectLoadStoreOpCode(I
, MRI
);
387 if (NewOpc
== I
.getOpcode())
390 MachineOperand BaseAddr
= I
.getOperand(1);
391 int64_t SignedOffset
= 0;
392 // Try to fold load/store + G_GEP + G_CONSTANT
393 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
394 // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
395 // %LoadResult/%StoreSrc = load/store %Addr(p0)
397 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
399 MachineInstr
*Addr
= MRI
.getVRegDef(I
.getOperand(1).getReg());
400 if (Addr
->getOpcode() == G_GEP
) {
401 MachineInstr
*Offset
= MRI
.getVRegDef(Addr
->getOperand(2).getReg());
402 if (Offset
->getOpcode() == G_CONSTANT
) {
403 APInt OffsetValue
= Offset
->getOperand(1).getCImm()->getValue();
404 if (OffsetValue
.isSignedIntN(16)) {
405 BaseAddr
= Addr
->getOperand(1);
406 SignedOffset
= OffsetValue
.getSExtValue();
411 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(NewOpc
))
412 .add(I
.getOperand(0))
414 .addImm(SignedOffset
)
415 .addMemOperand(*I
.memoperands_begin());
422 Register HILOReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
423 bool IsSigned
= I
.getOpcode() == G_SREM
|| I
.getOpcode() == G_SDIV
;
424 bool IsDiv
= I
.getOpcode() == G_UDIV
|| I
.getOpcode() == G_SDIV
;
426 MachineInstr
*PseudoDIV
, *PseudoMove
;
427 PseudoDIV
= BuildMI(MBB
, I
, I
.getDebugLoc(),
428 TII
.get(IsSigned
? Mips::PseudoSDIV
: Mips::PseudoUDIV
))
430 .add(I
.getOperand(1))
431 .add(I
.getOperand(2));
432 if (!constrainSelectedInstRegOperands(*PseudoDIV
, TII
, TRI
, RBI
))
435 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(),
436 TII
.get(IsDiv
? Mips::PseudoMFLO
: Mips::PseudoMFHI
))
437 .addDef(I
.getOperand(0).getReg())
439 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
446 // Handle operands with pointer type.
447 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MOVN_I_I
))
448 .add(I
.getOperand(0))
449 .add(I
.getOperand(2))
450 .add(I
.getOperand(1))
451 .add(I
.getOperand(3));
454 case G_IMPLICIT_DEF
: {
455 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::IMPLICIT_DEF
))
456 .add(I
.getOperand(0));
458 // Set class based on register bank, there can be fpr and gpr implicit def.
459 MRI
.setRegClass(MI
->getOperand(0).getReg(),
460 getRegClassForTypeOnBank(
461 MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits(),
462 *RBI
.getRegBank(I
.getOperand(0).getReg(), MRI
, TRI
),
467 MachineIRBuilder
B(I
);
468 if (!materialize32BitImm(I
.getOperand(0).getReg(),
469 I
.getOperand(1).getCImm()->getValue(), B
))
476 const APFloat
&FPimm
= I
.getOperand(1).getFPImm()->getValueAPF();
477 APInt APImm
= FPimm
.bitcastToAPInt();
478 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
481 Register GPRReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
482 MachineIRBuilder
B(I
);
483 if (!materialize32BitImm(GPRReg
, APImm
, B
))
486 MachineInstrBuilder MTC1
=
487 B
.buildInstr(Mips::MTC1
, {I
.getOperand(0).getReg()}, {GPRReg
});
488 if (!MTC1
.constrainAllUses(TII
, TRI
, RBI
))
492 Register GPRRegHigh
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
493 Register GPRRegLow
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
494 MachineIRBuilder
B(I
);
495 if (!materialize32BitImm(GPRRegHigh
, APImm
.getHiBits(32).trunc(32), B
))
497 if (!materialize32BitImm(GPRRegLow
, APImm
.getLoBits(32).trunc(32), B
))
500 MachineInstrBuilder PairF64
= B
.buildInstr(
501 STI
.isFP64bit() ? Mips::BuildPairF64_64
: Mips::BuildPairF64
,
502 {I
.getOperand(0).getReg()}, {GPRRegLow
, GPRRegHigh
});
503 if (!PairF64
.constrainAllUses(TII
, TRI
, RBI
))
511 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
512 unsigned FABSOpcode
=
513 Size
== 32 ? Mips::FABS_S
514 : STI
.isFP64bit() ? Mips::FABS_D64
: Mips::FABS_D32
;
515 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FABSOpcode
))
516 .add(I
.getOperand(0))
517 .add(I
.getOperand(1));
521 unsigned FromSize
= MRI
.getType(I
.getOperand(1).getReg()).getSizeInBits();
522 unsigned ToSize
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
524 assert((ToSize
== 32) && "Unsupported integer size for G_FPTOSI");
525 assert((FromSize
== 32 || FromSize
== 64) &&
526 "Unsupported floating point size for G_FPTOSI");
530 Opcode
= Mips::TRUNC_W_S
;
532 Opcode
= STI
.isFP64bit() ? Mips::TRUNC_W_D64
: Mips::TRUNC_W_D32
;
533 Register ResultInFPR
= MRI
.createVirtualRegister(&Mips::FGR32RegClass
);
534 MachineInstr
*Trunc
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
536 .addUse(I
.getOperand(1).getReg());
537 if (!constrainSelectedInstRegOperands(*Trunc
, TII
, TRI
, RBI
))
540 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MFC1
))
541 .addDef(I
.getOperand(0).getReg())
542 .addUse(ResultInFPR
);
543 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
549 case G_GLOBAL_VALUE
: {
550 const llvm::GlobalValue
*GVal
= I
.getOperand(1).getGlobal();
551 if (MF
.getTarget().isPositionIndependent()) {
552 MachineInstr
*LWGOT
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
553 .addDef(I
.getOperand(0).getReg())
554 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
555 ->getGlobalBaseRegForGlobalISel())
556 .addGlobalAddress(GVal
);
557 // Global Values that don't have local linkage are handled differently
558 // when they are part of call sequence. MipsCallLowering::lowerCall
559 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
560 // MO_GOT_CALL flag when Callee doesn't have local linkage.
561 if (I
.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL
)
562 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL
);
564 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT
);
565 LWGOT
->addMemOperand(
566 MF
, MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
567 MachineMemOperand::MOLoad
, 4, 4));
568 if (!constrainSelectedInstRegOperands(*LWGOT
, TII
, TRI
, RBI
))
571 if (GVal
->hasLocalLinkage()) {
572 Register LWGOTDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
573 LWGOT
->getOperand(0).setReg(LWGOTDef
);
575 MachineInstr
*ADDiu
=
576 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
577 .addDef(I
.getOperand(0).getReg())
579 .addGlobalAddress(GVal
);
580 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
581 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
585 Register LUiReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
587 MachineInstr
*LUi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
589 .addGlobalAddress(GVal
);
590 LUi
->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI
);
591 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
594 MachineInstr
*ADDiu
=
595 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
596 .addDef(I
.getOperand(0).getReg())
598 .addGlobalAddress(GVal
);
599 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
600 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
607 if (MF
.getTarget().isPositionIndependent()) {
608 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
609 .addDef(I
.getOperand(0).getReg())
610 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
611 ->getGlobalBaseRegForGlobalISel())
612 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_GOT
)
614 MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
615 MachineMemOperand::MOLoad
, 4, 4));
618 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
619 .addDef(I
.getOperand(0).getReg())
620 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_HI
);
627 Register Def
, LHS
, RHS
;
628 Instr(unsigned Opcode
, Register Def
, Register LHS
, Register RHS
)
629 : Opcode(Opcode
), Def(Def
), LHS(LHS
), RHS(RHS
){};
631 bool hasImm() const {
632 if (Opcode
== Mips::SLTiu
|| Opcode
== Mips::XORi
)
638 SmallVector
<struct Instr
, 2> Instructions
;
639 Register ICMPReg
= I
.getOperand(0).getReg();
640 Register Temp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
641 Register LHS
= I
.getOperand(2).getReg();
642 Register RHS
= I
.getOperand(3).getReg();
643 CmpInst::Predicate Cond
=
644 static_cast<CmpInst::Predicate
>(I
.getOperand(1).getPredicate());
647 case CmpInst::ICMP_EQ
: // LHS == RHS -> (LHS ^ RHS) < 1
648 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
649 Instructions
.emplace_back(Mips::SLTiu
, ICMPReg
, Temp
, 1);
651 case CmpInst::ICMP_NE
: // LHS != RHS -> 0 < (LHS ^ RHS)
652 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
653 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, Mips::ZERO
, Temp
);
655 case CmpInst::ICMP_UGT
: // LHS > RHS -> RHS < LHS
656 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, RHS
, LHS
);
658 case CmpInst::ICMP_UGE
: // LHS >= RHS -> !(LHS < RHS)
659 Instructions
.emplace_back(Mips::SLTu
, Temp
, LHS
, RHS
);
660 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
662 case CmpInst::ICMP_ULT
: // LHS < RHS -> LHS < RHS
663 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, LHS
, RHS
);
665 case CmpInst::ICMP_ULE
: // LHS <= RHS -> !(RHS < LHS)
666 Instructions
.emplace_back(Mips::SLTu
, Temp
, RHS
, LHS
);
667 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
669 case CmpInst::ICMP_SGT
: // LHS > RHS -> RHS < LHS
670 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, RHS
, LHS
);
672 case CmpInst::ICMP_SGE
: // LHS >= RHS -> !(LHS < RHS)
673 Instructions
.emplace_back(Mips::SLT
, Temp
, LHS
, RHS
);
674 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
676 case CmpInst::ICMP_SLT
: // LHS < RHS -> LHS < RHS
677 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, LHS
, RHS
);
679 case CmpInst::ICMP_SLE
: // LHS <= RHS -> !(RHS < LHS)
680 Instructions
.emplace_back(Mips::SLT
, Temp
, RHS
, LHS
);
681 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
687 MachineIRBuilder
B(I
);
688 for (const struct Instr
&Instruction
: Instructions
) {
689 MachineInstrBuilder MIB
= B
.buildInstr(
690 Instruction
.Opcode
, {Instruction
.Def
}, {Instruction
.LHS
});
692 if (Instruction
.hasImm())
693 MIB
.addImm(Instruction
.RHS
);
695 MIB
.addUse(Instruction
.RHS
);
697 if (!MIB
.constrainAllUses(TII
, TRI
, RBI
))
705 unsigned MipsFCMPCondCode
;
706 bool isLogicallyNegated
;
707 switch (CmpInst::Predicate Cond
= static_cast<CmpInst::Predicate
>(
708 I
.getOperand(1).getPredicate())) {
709 case CmpInst::FCMP_UNO
: // Unordered
710 case CmpInst::FCMP_ORD
: // Ordered (OR)
711 MipsFCMPCondCode
= Mips::FCOND_UN
;
712 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UNO
;
714 case CmpInst::FCMP_OEQ
: // Equal
715 case CmpInst::FCMP_UNE
: // Not Equal (NEQ)
716 MipsFCMPCondCode
= Mips::FCOND_OEQ
;
717 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OEQ
;
719 case CmpInst::FCMP_UEQ
: // Unordered or Equal
720 case CmpInst::FCMP_ONE
: // Ordered or Greater Than or Less Than (OGL)
721 MipsFCMPCondCode
= Mips::FCOND_UEQ
;
722 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UEQ
;
724 case CmpInst::FCMP_OLT
: // Ordered or Less Than
725 case CmpInst::FCMP_UGE
: // Unordered or Greater Than or Equal (UGE)
726 MipsFCMPCondCode
= Mips::FCOND_OLT
;
727 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLT
;
729 case CmpInst::FCMP_ULT
: // Unordered or Less Than
730 case CmpInst::FCMP_OGE
: // Ordered or Greater Than or Equal (OGE)
731 MipsFCMPCondCode
= Mips::FCOND_ULT
;
732 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULT
;
734 case CmpInst::FCMP_OLE
: // Ordered or Less Than or Equal
735 case CmpInst::FCMP_UGT
: // Unordered or Greater Than (UGT)
736 MipsFCMPCondCode
= Mips::FCOND_OLE
;
737 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLE
;
739 case CmpInst::FCMP_ULE
: // Unordered or Less Than or Equal
740 case CmpInst::FCMP_OGT
: // Ordered or Greater Than (OGT)
741 MipsFCMPCondCode
= Mips::FCOND_ULE
;
742 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULE
;
748 // Default compare result in gpr register will be `true`.
749 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
750 // using MOVF_I. When orignal predicate (Cond) is logically negated
751 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
752 unsigned MoveOpcode
= isLogicallyNegated
? Mips::MOVT_I
: Mips::MOVF_I
;
754 Register TrueInReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
755 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
760 unsigned Size
= MRI
.getType(I
.getOperand(2).getReg()).getSizeInBits();
761 unsigned FCMPOpcode
=
762 Size
== 32 ? Mips::FCMP_S32
763 : STI
.isFP64bit() ? Mips::FCMP_D64
: Mips::FCMP_D32
;
764 MachineInstr
*FCMP
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FCMPOpcode
))
765 .addUse(I
.getOperand(2).getReg())
766 .addUse(I
.getOperand(3).getReg())
767 .addImm(MipsFCMPCondCode
);
768 if (!constrainSelectedInstRegOperands(*FCMP
, TII
, TRI
, RBI
))
771 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(MoveOpcode
))
772 .addDef(I
.getOperand(0).getReg())
776 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
783 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SYNC
)).addImm(0);
787 MipsFunctionInfo
*FuncInfo
= MF
.getInfo
<MipsFunctionInfo
>();
788 int FI
= FuncInfo
->getVarArgsFrameIndex();
790 Register LeaReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
791 MachineInstr
*LEA_ADDiu
=
792 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LEA_ADDiu
))
796 if (!constrainSelectedInstRegOperands(*LEA_ADDiu
, TII
, TRI
, RBI
))
799 MachineInstr
*Store
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SW
))
801 .addUse(I
.getOperand(0).getReg())
803 if (!constrainSelectedInstRegOperands(*Store
, TII
, TRI
, RBI
))
814 return constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
818 InstructionSelector
*createMipsInstructionSelector(const MipsTargetMachine
&TM
,
819 MipsSubtarget
&Subtarget
,
820 MipsRegisterBankInfo
&RBI
) {
821 return new MipsInstructionSelector(TM
, Subtarget
, RBI
);
823 } // end namespace llvm