1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #define DEBUG_TYPE "mips-isel"
27 #define GET_GLOBALISEL_PREDICATE_BITSET
28 #include "MipsGenGlobalISel.inc"
29 #undef GET_GLOBALISEL_PREDICATE_BITSET
31 class MipsInstructionSelector
: public InstructionSelector
{
33 MipsInstructionSelector(const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
34 const MipsRegisterBankInfo
&RBI
);
36 bool select(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const override
;
37 static const char *getName() { return DEBUG_TYPE
; }
40 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
41 bool materialize32BitImm(Register DestReg
, APInt Imm
,
42 MachineIRBuilder
&B
) const;
43 bool selectCopy(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
44 const TargetRegisterClass
*
45 getRegClassForTypeOnBank(unsigned OpSize
, const RegisterBank
&RB
,
46 const RegisterBankInfo
&RBI
) const;
48 const MipsTargetMachine
&TM
;
49 const MipsSubtarget
&STI
;
50 const MipsInstrInfo
&TII
;
51 const MipsRegisterInfo
&TRI
;
52 const MipsRegisterBankInfo
&RBI
;
54 #define GET_GLOBALISEL_PREDICATES_DECL
55 #include "MipsGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_PREDICATES_DECL
58 #define GET_GLOBALISEL_TEMPORARIES_DECL
59 #include "MipsGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_DECL
63 } // end anonymous namespace
65 #define GET_GLOBALISEL_IMPL
66 #include "MipsGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_IMPL
69 MipsInstructionSelector::MipsInstructionSelector(
70 const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
71 const MipsRegisterBankInfo
&RBI
)
72 : InstructionSelector(), TM(TM
), STI(STI
), TII(*STI
.getInstrInfo()),
73 TRI(*STI
.getRegisterInfo()), RBI(RBI
),
75 #define GET_GLOBALISEL_PREDICATES_INIT
76 #include "MipsGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_PREDICATES_INIT
78 #define GET_GLOBALISEL_TEMPORARIES_INIT
79 #include "MipsGenGlobalISel.inc"
80 #undef GET_GLOBALISEL_TEMPORARIES_INIT
84 bool MipsInstructionSelector::selectCopy(MachineInstr
&I
,
85 MachineRegisterInfo
&MRI
) const {
86 Register DstReg
= I
.getOperand(0).getReg();
87 if (TargetRegisterInfo::isPhysicalRegister(DstReg
))
90 const RegisterBank
*RegBank
= RBI
.getRegBank(DstReg
, MRI
, TRI
);
91 const unsigned DstSize
= MRI
.getType(DstReg
).getSizeInBits();
93 const TargetRegisterClass
*RC
= &Mips::GPR32RegClass
;
94 if (RegBank
->getID() == Mips::FPRBRegBankID
) {
96 RC
= &Mips::FGR32RegClass
;
97 else if (DstSize
== 64)
98 RC
= STI
.isFP64bit() ? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
100 llvm_unreachable("Unsupported destination size");
102 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
103 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
110 const TargetRegisterClass
*MipsInstructionSelector::getRegClassForTypeOnBank(
111 unsigned OpSize
, const RegisterBank
&RB
,
112 const RegisterBankInfo
&RBI
) const {
113 if (RB
.getID() == Mips::GPRBRegBankID
)
114 return &Mips::GPR32RegClass
;
116 if (RB
.getID() == Mips::FPRBRegBankID
)
118 ? &Mips::FGR32RegClass
119 : STI
.hasMips32r6() || STI
.isFP64bit() ? &Mips::FGR64RegClass
120 : &Mips::AFGR64RegClass
;
122 llvm_unreachable("getRegClassForTypeOnBank can't find register class.");
126 bool MipsInstructionSelector::materialize32BitImm(Register DestReg
, APInt Imm
,
127 MachineIRBuilder
&B
) const {
128 assert(Imm
.getBitWidth() == 32 && "Unsupported immediate size.");
129 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
130 if (Imm
.getHiBits(16).isNullValue()) {
131 MachineInstr
*Inst
= B
.buildInstr(Mips::ORi
, {DestReg
}, {Register(Mips::ZERO
)})
132 .addImm(Imm
.getLoBits(16).getLimitedValue());
133 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
135 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
136 if (Imm
.getLoBits(16).isNullValue()) {
137 MachineInstr
*Inst
= B
.buildInstr(Mips::LUi
, {DestReg
}, {})
138 .addImm(Imm
.getHiBits(16).getLimitedValue());
139 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
141 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
142 if (Imm
.isSignedIntN(16)) {
143 MachineInstr
*Inst
= B
.buildInstr(Mips::ADDiu
, {DestReg
}, {Register(Mips::ZERO
)})
144 .addImm(Imm
.getLoBits(16).getLimitedValue());
145 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
147 // Values that cannot be materialized with single immediate instruction.
148 Register LUiReg
= B
.getMRI()->createVirtualRegister(&Mips::GPR32RegClass
);
149 MachineInstr
*LUi
= B
.buildInstr(Mips::LUi
, {LUiReg
}, {})
150 .addImm(Imm
.getHiBits(16).getLimitedValue());
151 MachineInstr
*ORi
= B
.buildInstr(Mips::ORi
, {DestReg
}, {LUiReg
})
152 .addImm(Imm
.getLoBits(16).getLimitedValue());
153 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
155 if (!constrainSelectedInstRegOperands(*ORi
, TII
, TRI
, RBI
))
160 /// Returning Opc indicates that we failed to select MIPS instruction opcode.
161 static unsigned selectLoadStoreOpCode(unsigned Opc
, unsigned MemSizeInBytes
,
162 unsigned RegBank
, bool isFP64
) {
163 bool isStore
= Opc
== TargetOpcode::G_STORE
;
164 if (RegBank
== Mips::GPRBRegBankID
) {
166 switch (MemSizeInBytes
) {
177 // Unspecified extending load is selected into zeroExtending load.
178 switch (MemSizeInBytes
) {
182 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LH
: Mips::LHu
;
184 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LB
: Mips::LBu
;
190 if (RegBank
== Mips::FPRBRegBankID
) {
191 switch (MemSizeInBytes
) {
193 return isStore
? Mips::SWC1
: Mips::LWC1
;
196 return isStore
? Mips::SDC164
: Mips::LDC164
;
198 return isStore
? Mips::SDC1
: Mips::LDC1
;
206 bool MipsInstructionSelector::select(MachineInstr
&I
,
207 CodeGenCoverage
&CoverageInfo
) const {
209 MachineBasicBlock
&MBB
= *I
.getParent();
210 MachineFunction
&MF
= *MBB
.getParent();
211 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
213 if (!isPreISelGenericOpcode(I
.getOpcode())) {
215 return selectCopy(I
, MRI
);
220 if (I
.getOpcode() == Mips::G_MUL
) {
221 MachineInstr
*Mul
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MUL
))
222 .add(I
.getOperand(0))
223 .add(I
.getOperand(1))
224 .add(I
.getOperand(2));
225 if (!constrainSelectedInstRegOperands(*Mul
, TII
, TRI
, RBI
))
227 Mul
->getOperand(3).setIsDead(true);
228 Mul
->getOperand(4).setIsDead(true);
234 if (selectImpl(I
, CoverageInfo
))
237 MachineInstr
*MI
= nullptr;
238 using namespace TargetOpcode
;
240 switch (I
.getOpcode()) {
242 Register PseudoMULTuReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
243 MachineInstr
*PseudoMULTu
, *PseudoMove
;
245 PseudoMULTu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMULTu
))
246 .addDef(PseudoMULTuReg
)
247 .add(I
.getOperand(1))
248 .add(I
.getOperand(2));
249 if (!constrainSelectedInstRegOperands(*PseudoMULTu
, TII
, TRI
, RBI
))
252 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMFHI
))
253 .addDef(I
.getOperand(0).getReg())
254 .addUse(PseudoMULTuReg
);
255 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
262 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
263 .add(I
.getOperand(0))
264 .add(I
.getOperand(1))
265 .add(I
.getOperand(2));
270 I
.setDesc(TII
.get(COPY
));
271 return selectCopy(I
, MRI
);
273 case G_FRAME_INDEX
: {
274 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
275 .add(I
.getOperand(0))
276 .add(I
.getOperand(1))
281 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::BNE
))
282 .add(I
.getOperand(0))
284 .add(I
.getOperand(1));
288 const Register DestReg
= I
.getOperand(0).getReg();
289 const unsigned OpSize
= MRI
.getType(DestReg
).getSizeInBits();
291 const TargetRegisterClass
*DefRC
= nullptr;
292 if (TargetRegisterInfo::isPhysicalRegister(DestReg
))
293 DefRC
= TRI
.getRegClass(DestReg
);
295 DefRC
= getRegClassForTypeOnBank(OpSize
,
296 *RBI
.getRegBank(DestReg
, MRI
, TRI
), RBI
);
298 I
.setDesc(TII
.get(TargetOpcode::PHI
));
299 return RBI
.constrainGenericRegister(DestReg
, *DefRC
, MRI
);
305 const Register DestReg
= I
.getOperand(0).getReg();
306 const unsigned DestRegBank
= RBI
.getRegBank(DestReg
, MRI
, TRI
)->getID();
307 const unsigned OpSize
= MRI
.getType(DestReg
).getSizeInBits();
308 const unsigned OpMemSizeInBytes
= (*I
.memoperands_begin())->getSize();
310 if (DestRegBank
== Mips::GPRBRegBankID
&& OpSize
!= 32)
313 if (DestRegBank
== Mips::FPRBRegBankID
&& OpSize
!= 32 && OpSize
!= 64)
316 const unsigned NewOpc
= selectLoadStoreOpCode(
317 I
.getOpcode(), OpMemSizeInBytes
, DestRegBank
, STI
.isFP64bit());
318 if (NewOpc
== I
.getOpcode())
321 MachineOperand BaseAddr
= I
.getOperand(1);
322 int64_t SignedOffset
= 0;
323 // Try to fold load/store + G_GEP + G_CONSTANT
324 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
325 // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
326 // %LoadResult/%StoreSrc = load/store %Addr(p0)
328 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
330 MachineInstr
*Addr
= MRI
.getVRegDef(I
.getOperand(1).getReg());
331 if (Addr
->getOpcode() == G_GEP
) {
332 MachineInstr
*Offset
= MRI
.getVRegDef(Addr
->getOperand(2).getReg());
333 if (Offset
->getOpcode() == G_CONSTANT
) {
334 APInt OffsetValue
= Offset
->getOperand(1).getCImm()->getValue();
335 if (OffsetValue
.isSignedIntN(16)) {
336 BaseAddr
= Addr
->getOperand(1);
337 SignedOffset
= OffsetValue
.getSExtValue();
342 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(NewOpc
))
343 .add(I
.getOperand(0))
345 .addImm(SignedOffset
)
346 .addMemOperand(*I
.memoperands_begin());
353 Register HILOReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
354 bool IsSigned
= I
.getOpcode() == G_SREM
|| I
.getOpcode() == G_SDIV
;
355 bool IsDiv
= I
.getOpcode() == G_UDIV
|| I
.getOpcode() == G_SDIV
;
357 MachineInstr
*PseudoDIV
, *PseudoMove
;
358 PseudoDIV
= BuildMI(MBB
, I
, I
.getDebugLoc(),
359 TII
.get(IsSigned
? Mips::PseudoSDIV
: Mips::PseudoUDIV
))
361 .add(I
.getOperand(1))
362 .add(I
.getOperand(2));
363 if (!constrainSelectedInstRegOperands(*PseudoDIV
, TII
, TRI
, RBI
))
366 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(),
367 TII
.get(IsDiv
? Mips::PseudoMFLO
: Mips::PseudoMFHI
))
368 .addDef(I
.getOperand(0).getReg())
370 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
377 // Handle operands with pointer type.
378 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MOVN_I_I
))
379 .add(I
.getOperand(0))
380 .add(I
.getOperand(2))
381 .add(I
.getOperand(1))
382 .add(I
.getOperand(3));
386 MachineIRBuilder
B(I
);
387 if (!materialize32BitImm(I
.getOperand(0).getReg(),
388 I
.getOperand(1).getCImm()->getValue(), B
))
395 const APFloat
&FPimm
= I
.getOperand(1).getFPImm()->getValueAPF();
396 APInt APImm
= FPimm
.bitcastToAPInt();
397 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
400 Register GPRReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
401 MachineIRBuilder
B(I
);
402 if (!materialize32BitImm(GPRReg
, APImm
, B
))
405 MachineInstrBuilder MTC1
=
406 B
.buildInstr(Mips::MTC1
, {I
.getOperand(0).getReg()}, {GPRReg
});
407 if (!MTC1
.constrainAllUses(TII
, TRI
, RBI
))
411 Register GPRRegHigh
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
412 Register GPRRegLow
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
413 MachineIRBuilder
B(I
);
414 if (!materialize32BitImm(GPRRegHigh
, APImm
.getHiBits(32).trunc(32), B
))
416 if (!materialize32BitImm(GPRRegLow
, APImm
.getLoBits(32).trunc(32), B
))
419 MachineInstrBuilder PairF64
= B
.buildInstr(
420 STI
.isFP64bit() ? Mips::BuildPairF64_64
: Mips::BuildPairF64
,
421 {I
.getOperand(0).getReg()}, {GPRRegLow
, GPRRegHigh
});
422 if (!PairF64
.constrainAllUses(TII
, TRI
, RBI
))
430 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
431 unsigned FABSOpcode
=
432 Size
== 32 ? Mips::FABS_S
433 : STI
.isFP64bit() ? Mips::FABS_D64
: Mips::FABS_D32
;
434 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FABSOpcode
))
435 .add(I
.getOperand(0))
436 .add(I
.getOperand(1));
440 unsigned FromSize
= MRI
.getType(I
.getOperand(1).getReg()).getSizeInBits();
441 unsigned ToSize
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
443 assert((ToSize
== 32) && "Unsupported integer size for G_FPTOSI");
444 assert((FromSize
== 32 || FromSize
== 64) &&
445 "Unsupported floating point size for G_FPTOSI");
449 Opcode
= Mips::TRUNC_W_S
;
451 Opcode
= STI
.isFP64bit() ? Mips::TRUNC_W_D64
: Mips::TRUNC_W_D32
;
452 unsigned ResultInFPR
= MRI
.createVirtualRegister(&Mips::FGR32RegClass
);
453 MachineInstr
*Trunc
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
455 .addUse(I
.getOperand(1).getReg());
456 if (!constrainSelectedInstRegOperands(*Trunc
, TII
, TRI
, RBI
))
459 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MFC1
))
460 .addDef(I
.getOperand(0).getReg())
461 .addUse(ResultInFPR
);
462 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
468 case G_GLOBAL_VALUE
: {
469 const llvm::GlobalValue
*GVal
= I
.getOperand(1).getGlobal();
470 if (MF
.getTarget().isPositionIndependent()) {
471 MachineInstr
*LWGOT
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
472 .addDef(I
.getOperand(0).getReg())
473 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
474 ->getGlobalBaseRegForGlobalISel())
475 .addGlobalAddress(GVal
);
476 // Global Values that don't have local linkage are handled differently
477 // when they are part of call sequence. MipsCallLowering::lowerCall
478 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
479 // MO_GOT_CALL flag when Callee doesn't have local linkage.
480 if (I
.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL
)
481 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL
);
483 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT
);
484 LWGOT
->addMemOperand(
485 MF
, MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
486 MachineMemOperand::MOLoad
, 4, 4));
487 if (!constrainSelectedInstRegOperands(*LWGOT
, TII
, TRI
, RBI
))
490 if (GVal
->hasLocalLinkage()) {
491 Register LWGOTDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
492 LWGOT
->getOperand(0).setReg(LWGOTDef
);
494 MachineInstr
*ADDiu
=
495 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
496 .addDef(I
.getOperand(0).getReg())
498 .addGlobalAddress(GVal
);
499 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
500 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
504 Register LUiReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
506 MachineInstr
*LUi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
508 .addGlobalAddress(GVal
);
509 LUi
->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI
);
510 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
513 MachineInstr
*ADDiu
=
514 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
515 .addDef(I
.getOperand(0).getReg())
517 .addGlobalAddress(GVal
);
518 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
519 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
528 Register Def
, LHS
, RHS
;
529 Instr(unsigned Opcode
, Register Def
, Register LHS
, Register RHS
)
530 : Opcode(Opcode
), Def(Def
), LHS(LHS
), RHS(RHS
){};
532 bool hasImm() const {
533 if (Opcode
== Mips::SLTiu
|| Opcode
== Mips::XORi
)
539 SmallVector
<struct Instr
, 2> Instructions
;
540 Register ICMPReg
= I
.getOperand(0).getReg();
541 Register Temp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
542 Register LHS
= I
.getOperand(2).getReg();
543 Register RHS
= I
.getOperand(3).getReg();
544 CmpInst::Predicate Cond
=
545 static_cast<CmpInst::Predicate
>(I
.getOperand(1).getPredicate());
548 case CmpInst::ICMP_EQ
: // LHS == RHS -> (LHS ^ RHS) < 1
549 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
550 Instructions
.emplace_back(Mips::SLTiu
, ICMPReg
, Temp
, 1);
552 case CmpInst::ICMP_NE
: // LHS != RHS -> 0 < (LHS ^ RHS)
553 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
554 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, Mips::ZERO
, Temp
);
556 case CmpInst::ICMP_UGT
: // LHS > RHS -> RHS < LHS
557 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, RHS
, LHS
);
559 case CmpInst::ICMP_UGE
: // LHS >= RHS -> !(LHS < RHS)
560 Instructions
.emplace_back(Mips::SLTu
, Temp
, LHS
, RHS
);
561 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
563 case CmpInst::ICMP_ULT
: // LHS < RHS -> LHS < RHS
564 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, LHS
, RHS
);
566 case CmpInst::ICMP_ULE
: // LHS <= RHS -> !(RHS < LHS)
567 Instructions
.emplace_back(Mips::SLTu
, Temp
, RHS
, LHS
);
568 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
570 case CmpInst::ICMP_SGT
: // LHS > RHS -> RHS < LHS
571 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, RHS
, LHS
);
573 case CmpInst::ICMP_SGE
: // LHS >= RHS -> !(LHS < RHS)
574 Instructions
.emplace_back(Mips::SLT
, Temp
, LHS
, RHS
);
575 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
577 case CmpInst::ICMP_SLT
: // LHS < RHS -> LHS < RHS
578 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, LHS
, RHS
);
580 case CmpInst::ICMP_SLE
: // LHS <= RHS -> !(RHS < LHS)
581 Instructions
.emplace_back(Mips::SLT
, Temp
, RHS
, LHS
);
582 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
588 MachineIRBuilder
B(I
);
589 for (const struct Instr
&Instruction
: Instructions
) {
590 MachineInstrBuilder MIB
= B
.buildInstr(
591 Instruction
.Opcode
, {Instruction
.Def
}, {Instruction
.LHS
});
593 if (Instruction
.hasImm())
594 MIB
.addImm(Instruction
.RHS
);
596 MIB
.addUse(Instruction
.RHS
);
598 if (!MIB
.constrainAllUses(TII
, TRI
, RBI
))
606 unsigned MipsFCMPCondCode
;
607 bool isLogicallyNegated
;
608 switch (CmpInst::Predicate Cond
= static_cast<CmpInst::Predicate
>(
609 I
.getOperand(1).getPredicate())) {
610 case CmpInst::FCMP_UNO
: // Unordered
611 case CmpInst::FCMP_ORD
: // Ordered (OR)
612 MipsFCMPCondCode
= Mips::FCOND_UN
;
613 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UNO
;
615 case CmpInst::FCMP_OEQ
: // Equal
616 case CmpInst::FCMP_UNE
: // Not Equal (NEQ)
617 MipsFCMPCondCode
= Mips::FCOND_OEQ
;
618 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OEQ
;
620 case CmpInst::FCMP_UEQ
: // Unordered or Equal
621 case CmpInst::FCMP_ONE
: // Ordered or Greater Than or Less Than (OGL)
622 MipsFCMPCondCode
= Mips::FCOND_UEQ
;
623 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UEQ
;
625 case CmpInst::FCMP_OLT
: // Ordered or Less Than
626 case CmpInst::FCMP_UGE
: // Unordered or Greater Than or Equal (UGE)
627 MipsFCMPCondCode
= Mips::FCOND_OLT
;
628 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLT
;
630 case CmpInst::FCMP_ULT
: // Unordered or Less Than
631 case CmpInst::FCMP_OGE
: // Ordered or Greater Than or Equal (OGE)
632 MipsFCMPCondCode
= Mips::FCOND_ULT
;
633 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULT
;
635 case CmpInst::FCMP_OLE
: // Ordered or Less Than or Equal
636 case CmpInst::FCMP_UGT
: // Unordered or Greater Than (UGT)
637 MipsFCMPCondCode
= Mips::FCOND_OLE
;
638 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLE
;
640 case CmpInst::FCMP_ULE
: // Unordered or Less Than or Equal
641 case CmpInst::FCMP_OGT
: // Ordered or Greater Than (OGT)
642 MipsFCMPCondCode
= Mips::FCOND_ULE
;
643 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULE
;
649 // Default compare result in gpr register will be `true`.
650 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
651 // using MOVF_I. When orignal predicate (Cond) is logically negated
652 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
653 unsigned MoveOpcode
= isLogicallyNegated
? Mips::MOVT_I
: Mips::MOVF_I
;
655 unsigned TrueInReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
656 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
661 unsigned Size
= MRI
.getType(I
.getOperand(2).getReg()).getSizeInBits();
662 unsigned FCMPOpcode
=
663 Size
== 32 ? Mips::FCMP_S32
664 : STI
.isFP64bit() ? Mips::FCMP_D64
: Mips::FCMP_D32
;
665 MachineInstr
*FCMP
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FCMPOpcode
))
666 .addUse(I
.getOperand(2).getReg())
667 .addUse(I
.getOperand(3).getReg())
668 .addImm(MipsFCMPCondCode
);
669 if (!constrainSelectedInstRegOperands(*FCMP
, TII
, TRI
, RBI
))
672 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(MoveOpcode
))
673 .addDef(I
.getOperand(0).getReg())
677 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
688 return constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
692 InstructionSelector
*createMipsInstructionSelector(const MipsTargetMachine
&TM
,
693 MipsSubtarget
&Subtarget
,
694 MipsRegisterBankInfo
&RBI
) {
695 return new MipsInstructionSelector(TM
, Subtarget
, RBI
);
697 } // end namespace llvm