1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #define DEBUG_TYPE "mips-isel"
28 #define GET_GLOBALISEL_PREDICATE_BITSET
29 #include "MipsGenGlobalISel.inc"
30 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 class MipsInstructionSelector
: public InstructionSelector
{
34 MipsInstructionSelector(const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
35 const MipsRegisterBankInfo
&RBI
);
37 bool select(MachineInstr
&I
) override
;
38 static const char *getName() { return DEBUG_TYPE
; }
41 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
42 bool materialize32BitImm(Register DestReg
, APInt Imm
,
43 MachineIRBuilder
&B
) const;
44 bool selectCopy(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
45 const TargetRegisterClass
*
46 getRegClassForTypeOnBank(unsigned OpSize
, const RegisterBank
&RB
,
47 const RegisterBankInfo
&RBI
) const;
49 const MipsTargetMachine
&TM
;
50 const MipsSubtarget
&STI
;
51 const MipsInstrInfo
&TII
;
52 const MipsRegisterInfo
&TRI
;
53 const MipsRegisterBankInfo
&RBI
;
55 #define GET_GLOBALISEL_PREDICATES_DECL
56 #include "MipsGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_DECL
59 #define GET_GLOBALISEL_TEMPORARIES_DECL
60 #include "MipsGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_TEMPORARIES_DECL
64 } // end anonymous namespace
66 #define GET_GLOBALISEL_IMPL
67 #include "MipsGenGlobalISel.inc"
68 #undef GET_GLOBALISEL_IMPL
70 MipsInstructionSelector::MipsInstructionSelector(
71 const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
72 const MipsRegisterBankInfo
&RBI
)
73 : InstructionSelector(), TM(TM
), STI(STI
), TII(*STI
.getInstrInfo()),
74 TRI(*STI
.getRegisterInfo()), RBI(RBI
),
76 #define GET_GLOBALISEL_PREDICATES_INIT
77 #include "MipsGenGlobalISel.inc"
78 #undef GET_GLOBALISEL_PREDICATES_INIT
79 #define GET_GLOBALISEL_TEMPORARIES_INIT
80 #include "MipsGenGlobalISel.inc"
81 #undef GET_GLOBALISEL_TEMPORARIES_INIT
85 bool MipsInstructionSelector::selectCopy(MachineInstr
&I
,
86 MachineRegisterInfo
&MRI
) const {
87 Register DstReg
= I
.getOperand(0).getReg();
88 if (Register::isPhysicalRegister(DstReg
))
91 const RegisterBank
*RegBank
= RBI
.getRegBank(DstReg
, MRI
, TRI
);
92 const unsigned DstSize
= MRI
.getType(DstReg
).getSizeInBits();
94 const TargetRegisterClass
*RC
= &Mips::GPR32RegClass
;
95 if (RegBank
->getID() == Mips::FPRBRegBankID
) {
97 RC
= &Mips::FGR32RegClass
;
98 else if (DstSize
== 64)
99 RC
= STI
.isFP64bit() ? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
101 llvm_unreachable("Unsupported destination size");
103 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
104 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
111 const TargetRegisterClass
*MipsInstructionSelector::getRegClassForTypeOnBank(
112 unsigned OpSize
, const RegisterBank
&RB
,
113 const RegisterBankInfo
&RBI
) const {
114 if (RB
.getID() == Mips::GPRBRegBankID
)
115 return &Mips::GPR32RegClass
;
117 if (RB
.getID() == Mips::FPRBRegBankID
)
119 ? &Mips::FGR32RegClass
120 : STI
.hasMips32r6() || STI
.isFP64bit() ? &Mips::FGR64RegClass
121 : &Mips::AFGR64RegClass
;
123 llvm_unreachable("getRegClassForTypeOnBank can't find register class.");
127 bool MipsInstructionSelector::materialize32BitImm(Register DestReg
, APInt Imm
,
128 MachineIRBuilder
&B
) const {
129 assert(Imm
.getBitWidth() == 32 && "Unsupported immediate size.");
130 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
131 if (Imm
.getHiBits(16).isNullValue()) {
132 MachineInstr
*Inst
= B
.buildInstr(Mips::ORi
, {DestReg
}, {Register(Mips::ZERO
)})
133 .addImm(Imm
.getLoBits(16).getLimitedValue());
134 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
136 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
137 if (Imm
.getLoBits(16).isNullValue()) {
138 MachineInstr
*Inst
= B
.buildInstr(Mips::LUi
, {DestReg
}, {})
139 .addImm(Imm
.getHiBits(16).getLimitedValue());
140 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
142 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
143 if (Imm
.isSignedIntN(16)) {
144 MachineInstr
*Inst
= B
.buildInstr(Mips::ADDiu
, {DestReg
}, {Register(Mips::ZERO
)})
145 .addImm(Imm
.getLoBits(16).getLimitedValue());
146 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
148 // Values that cannot be materialized with single immediate instruction.
149 Register LUiReg
= B
.getMRI()->createVirtualRegister(&Mips::GPR32RegClass
);
150 MachineInstr
*LUi
= B
.buildInstr(Mips::LUi
, {LUiReg
}, {})
151 .addImm(Imm
.getHiBits(16).getLimitedValue());
152 MachineInstr
*ORi
= B
.buildInstr(Mips::ORi
, {DestReg
}, {LUiReg
})
153 .addImm(Imm
.getLoBits(16).getLimitedValue());
154 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
156 if (!constrainSelectedInstRegOperands(*ORi
, TII
, TRI
, RBI
))
161 /// Returning Opc indicates that we failed to select MIPS instruction opcode.
162 static unsigned selectLoadStoreOpCode(unsigned Opc
, unsigned MemSizeInBytes
,
163 unsigned RegBank
, bool isFP64
) {
164 bool isStore
= Opc
== TargetOpcode::G_STORE
;
165 if (RegBank
== Mips::GPRBRegBankID
) {
167 switch (MemSizeInBytes
) {
178 // Unspecified extending load is selected into zeroExtending load.
179 switch (MemSizeInBytes
) {
183 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LH
: Mips::LHu
;
185 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LB
: Mips::LBu
;
191 if (RegBank
== Mips::FPRBRegBankID
) {
192 switch (MemSizeInBytes
) {
194 return isStore
? Mips::SWC1
: Mips::LWC1
;
197 return isStore
? Mips::SDC164
: Mips::LDC164
;
199 return isStore
? Mips::SDC1
: Mips::LDC1
;
207 bool MipsInstructionSelector::select(MachineInstr
&I
) {
209 MachineBasicBlock
&MBB
= *I
.getParent();
210 MachineFunction
&MF
= *MBB
.getParent();
211 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
213 if (!isPreISelGenericOpcode(I
.getOpcode())) {
215 return selectCopy(I
, MRI
);
220 if (I
.getOpcode() == Mips::G_MUL
) {
221 MachineInstr
*Mul
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MUL
))
222 .add(I
.getOperand(0))
223 .add(I
.getOperand(1))
224 .add(I
.getOperand(2));
225 if (!constrainSelectedInstRegOperands(*Mul
, TII
, TRI
, RBI
))
227 Mul
->getOperand(3).setIsDead(true);
228 Mul
->getOperand(4).setIsDead(true);
234 if (selectImpl(I
, *CoverageInfo
))
237 MachineInstr
*MI
= nullptr;
238 using namespace TargetOpcode
;
240 switch (I
.getOpcode()) {
242 Register PseudoMULTuReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
243 MachineInstr
*PseudoMULTu
, *PseudoMove
;
245 PseudoMULTu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMULTu
))
246 .addDef(PseudoMULTuReg
)
247 .add(I
.getOperand(1))
248 .add(I
.getOperand(2));
249 if (!constrainSelectedInstRegOperands(*PseudoMULTu
, TII
, TRI
, RBI
))
252 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMFHI
))
253 .addDef(I
.getOperand(0).getReg())
254 .addUse(PseudoMULTuReg
);
255 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
262 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
263 .add(I
.getOperand(0))
264 .add(I
.getOperand(1))
265 .add(I
.getOperand(2));
270 I
.setDesc(TII
.get(COPY
));
271 return selectCopy(I
, MRI
);
273 case G_FRAME_INDEX
: {
274 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
275 .add(I
.getOperand(0))
276 .add(I
.getOperand(1))
281 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::BNE
))
282 .add(I
.getOperand(0))
284 .add(I
.getOperand(1));
289 MF
.getJumpTableInfo()->getEntrySize(MF
.getDataLayout());
290 assert(isPowerOf2_32(EntrySize
) &&
291 "Non-power-of-two jump-table entry size not supported.");
293 Register JTIndex
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
294 MachineInstr
*SLL
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SLL
))
296 .addUse(I
.getOperand(2).getReg())
297 .addImm(Log2_32(EntrySize
));
298 if (!constrainSelectedInstRegOperands(*SLL
, TII
, TRI
, RBI
))
301 Register DestAddress
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
302 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
304 .addUse(I
.getOperand(0).getReg())
306 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
309 Register Dest
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
311 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
314 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_LO
)
315 .addMemOperand(MF
.getMachineMemOperand(
316 MachinePointerInfo(), MachineMemOperand::MOLoad
, 4, 4));
317 if (!constrainSelectedInstRegOperands(*LW
, TII
, TRI
, RBI
))
320 if (MF
.getTarget().isPositionIndependent()) {
321 Register DestTmp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
322 LW
->getOperand(0).setReg(DestTmp
);
323 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
326 .addUse(MF
.getInfo
<MipsFunctionInfo
>()
327 ->getGlobalBaseRegForGlobalISel());
328 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
332 MachineInstr
*Branch
=
333 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
335 if (!constrainSelectedInstRegOperands(*Branch
, TII
, TRI
, RBI
))
342 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
343 .add(I
.getOperand(0));
347 const Register DestReg
= I
.getOperand(0).getReg();
348 const unsigned OpSize
= MRI
.getType(DestReg
).getSizeInBits();
350 const TargetRegisterClass
*DefRC
= nullptr;
351 if (Register::isPhysicalRegister(DestReg
))
352 DefRC
= TRI
.getRegClass(DestReg
);
354 DefRC
= getRegClassForTypeOnBank(OpSize
,
355 *RBI
.getRegBank(DestReg
, MRI
, TRI
), RBI
);
357 I
.setDesc(TII
.get(TargetOpcode::PHI
));
358 return RBI
.constrainGenericRegister(DestReg
, *DefRC
, MRI
);
364 const Register DestReg
= I
.getOperand(0).getReg();
365 const unsigned DestRegBank
= RBI
.getRegBank(DestReg
, MRI
, TRI
)->getID();
366 const unsigned OpSize
= MRI
.getType(DestReg
).getSizeInBits();
367 const unsigned OpMemSizeInBytes
= (*I
.memoperands_begin())->getSize();
369 if (DestRegBank
== Mips::GPRBRegBankID
&& OpSize
!= 32)
372 if (DestRegBank
== Mips::FPRBRegBankID
&& OpSize
!= 32 && OpSize
!= 64)
375 const unsigned NewOpc
= selectLoadStoreOpCode(
376 I
.getOpcode(), OpMemSizeInBytes
, DestRegBank
, STI
.isFP64bit());
377 if (NewOpc
== I
.getOpcode())
380 MachineOperand BaseAddr
= I
.getOperand(1);
381 int64_t SignedOffset
= 0;
382 // Try to fold load/store + G_GEP + G_CONSTANT
383 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
384 // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
385 // %LoadResult/%StoreSrc = load/store %Addr(p0)
387 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
389 MachineInstr
*Addr
= MRI
.getVRegDef(I
.getOperand(1).getReg());
390 if (Addr
->getOpcode() == G_GEP
) {
391 MachineInstr
*Offset
= MRI
.getVRegDef(Addr
->getOperand(2).getReg());
392 if (Offset
->getOpcode() == G_CONSTANT
) {
393 APInt OffsetValue
= Offset
->getOperand(1).getCImm()->getValue();
394 if (OffsetValue
.isSignedIntN(16)) {
395 BaseAddr
= Addr
->getOperand(1);
396 SignedOffset
= OffsetValue
.getSExtValue();
401 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(NewOpc
))
402 .add(I
.getOperand(0))
404 .addImm(SignedOffset
)
405 .addMemOperand(*I
.memoperands_begin());
412 Register HILOReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
413 bool IsSigned
= I
.getOpcode() == G_SREM
|| I
.getOpcode() == G_SDIV
;
414 bool IsDiv
= I
.getOpcode() == G_UDIV
|| I
.getOpcode() == G_SDIV
;
416 MachineInstr
*PseudoDIV
, *PseudoMove
;
417 PseudoDIV
= BuildMI(MBB
, I
, I
.getDebugLoc(),
418 TII
.get(IsSigned
? Mips::PseudoSDIV
: Mips::PseudoUDIV
))
420 .add(I
.getOperand(1))
421 .add(I
.getOperand(2));
422 if (!constrainSelectedInstRegOperands(*PseudoDIV
, TII
, TRI
, RBI
))
425 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(),
426 TII
.get(IsDiv
? Mips::PseudoMFLO
: Mips::PseudoMFHI
))
427 .addDef(I
.getOperand(0).getReg())
429 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
436 // Handle operands with pointer type.
437 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MOVN_I_I
))
438 .add(I
.getOperand(0))
439 .add(I
.getOperand(2))
440 .add(I
.getOperand(1))
441 .add(I
.getOperand(3));
444 case G_IMPLICIT_DEF
: {
445 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::IMPLICIT_DEF
))
446 .add(I
.getOperand(0));
448 // Set class based on register bank, there can be fpr and gpr implicit def.
449 MRI
.setRegClass(MI
->getOperand(0).getReg(),
450 getRegClassForTypeOnBank(
451 MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits(),
452 *RBI
.getRegBank(I
.getOperand(0).getReg(), MRI
, TRI
),
457 MachineIRBuilder
B(I
);
458 if (!materialize32BitImm(I
.getOperand(0).getReg(),
459 I
.getOperand(1).getCImm()->getValue(), B
))
466 const APFloat
&FPimm
= I
.getOperand(1).getFPImm()->getValueAPF();
467 APInt APImm
= FPimm
.bitcastToAPInt();
468 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
471 Register GPRReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
472 MachineIRBuilder
B(I
);
473 if (!materialize32BitImm(GPRReg
, APImm
, B
))
476 MachineInstrBuilder MTC1
=
477 B
.buildInstr(Mips::MTC1
, {I
.getOperand(0).getReg()}, {GPRReg
});
478 if (!MTC1
.constrainAllUses(TII
, TRI
, RBI
))
482 Register GPRRegHigh
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
483 Register GPRRegLow
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
484 MachineIRBuilder
B(I
);
485 if (!materialize32BitImm(GPRRegHigh
, APImm
.getHiBits(32).trunc(32), B
))
487 if (!materialize32BitImm(GPRRegLow
, APImm
.getLoBits(32).trunc(32), B
))
490 MachineInstrBuilder PairF64
= B
.buildInstr(
491 STI
.isFP64bit() ? Mips::BuildPairF64_64
: Mips::BuildPairF64
,
492 {I
.getOperand(0).getReg()}, {GPRRegLow
, GPRRegHigh
});
493 if (!PairF64
.constrainAllUses(TII
, TRI
, RBI
))
501 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
502 unsigned FABSOpcode
=
503 Size
== 32 ? Mips::FABS_S
504 : STI
.isFP64bit() ? Mips::FABS_D64
: Mips::FABS_D32
;
505 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FABSOpcode
))
506 .add(I
.getOperand(0))
507 .add(I
.getOperand(1));
511 unsigned FromSize
= MRI
.getType(I
.getOperand(1).getReg()).getSizeInBits();
512 unsigned ToSize
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
514 assert((ToSize
== 32) && "Unsupported integer size for G_FPTOSI");
515 assert((FromSize
== 32 || FromSize
== 64) &&
516 "Unsupported floating point size for G_FPTOSI");
520 Opcode
= Mips::TRUNC_W_S
;
522 Opcode
= STI
.isFP64bit() ? Mips::TRUNC_W_D64
: Mips::TRUNC_W_D32
;
523 Register ResultInFPR
= MRI
.createVirtualRegister(&Mips::FGR32RegClass
);
524 MachineInstr
*Trunc
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
526 .addUse(I
.getOperand(1).getReg());
527 if (!constrainSelectedInstRegOperands(*Trunc
, TII
, TRI
, RBI
))
530 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MFC1
))
531 .addDef(I
.getOperand(0).getReg())
532 .addUse(ResultInFPR
);
533 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
539 case G_GLOBAL_VALUE
: {
540 const llvm::GlobalValue
*GVal
= I
.getOperand(1).getGlobal();
541 if (MF
.getTarget().isPositionIndependent()) {
542 MachineInstr
*LWGOT
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
543 .addDef(I
.getOperand(0).getReg())
544 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
545 ->getGlobalBaseRegForGlobalISel())
546 .addGlobalAddress(GVal
);
547 // Global Values that don't have local linkage are handled differently
548 // when they are part of call sequence. MipsCallLowering::lowerCall
549 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
550 // MO_GOT_CALL flag when Callee doesn't have local linkage.
551 if (I
.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL
)
552 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL
);
554 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT
);
555 LWGOT
->addMemOperand(
556 MF
, MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
557 MachineMemOperand::MOLoad
, 4, 4));
558 if (!constrainSelectedInstRegOperands(*LWGOT
, TII
, TRI
, RBI
))
561 if (GVal
->hasLocalLinkage()) {
562 Register LWGOTDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
563 LWGOT
->getOperand(0).setReg(LWGOTDef
);
565 MachineInstr
*ADDiu
=
566 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
567 .addDef(I
.getOperand(0).getReg())
569 .addGlobalAddress(GVal
);
570 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
571 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
575 Register LUiReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
577 MachineInstr
*LUi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
579 .addGlobalAddress(GVal
);
580 LUi
->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI
);
581 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
584 MachineInstr
*ADDiu
=
585 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
586 .addDef(I
.getOperand(0).getReg())
588 .addGlobalAddress(GVal
);
589 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
590 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
597 if (MF
.getTarget().isPositionIndependent()) {
598 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
599 .addDef(I
.getOperand(0).getReg())
600 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
601 ->getGlobalBaseRegForGlobalISel())
602 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_GOT
)
604 MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
605 MachineMemOperand::MOLoad
, 4, 4));
608 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
609 .addDef(I
.getOperand(0).getReg())
610 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_HI
);
617 Register Def
, LHS
, RHS
;
618 Instr(unsigned Opcode
, Register Def
, Register LHS
, Register RHS
)
619 : Opcode(Opcode
), Def(Def
), LHS(LHS
), RHS(RHS
){};
621 bool hasImm() const {
622 if (Opcode
== Mips::SLTiu
|| Opcode
== Mips::XORi
)
628 SmallVector
<struct Instr
, 2> Instructions
;
629 Register ICMPReg
= I
.getOperand(0).getReg();
630 Register Temp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
631 Register LHS
= I
.getOperand(2).getReg();
632 Register RHS
= I
.getOperand(3).getReg();
633 CmpInst::Predicate Cond
=
634 static_cast<CmpInst::Predicate
>(I
.getOperand(1).getPredicate());
637 case CmpInst::ICMP_EQ
: // LHS == RHS -> (LHS ^ RHS) < 1
638 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
639 Instructions
.emplace_back(Mips::SLTiu
, ICMPReg
, Temp
, 1);
641 case CmpInst::ICMP_NE
: // LHS != RHS -> 0 < (LHS ^ RHS)
642 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
643 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, Mips::ZERO
, Temp
);
645 case CmpInst::ICMP_UGT
: // LHS > RHS -> RHS < LHS
646 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, RHS
, LHS
);
648 case CmpInst::ICMP_UGE
: // LHS >= RHS -> !(LHS < RHS)
649 Instructions
.emplace_back(Mips::SLTu
, Temp
, LHS
, RHS
);
650 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
652 case CmpInst::ICMP_ULT
: // LHS < RHS -> LHS < RHS
653 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, LHS
, RHS
);
655 case CmpInst::ICMP_ULE
: // LHS <= RHS -> !(RHS < LHS)
656 Instructions
.emplace_back(Mips::SLTu
, Temp
, RHS
, LHS
);
657 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
659 case CmpInst::ICMP_SGT
: // LHS > RHS -> RHS < LHS
660 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, RHS
, LHS
);
662 case CmpInst::ICMP_SGE
: // LHS >= RHS -> !(LHS < RHS)
663 Instructions
.emplace_back(Mips::SLT
, Temp
, LHS
, RHS
);
664 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
666 case CmpInst::ICMP_SLT
: // LHS < RHS -> LHS < RHS
667 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, LHS
, RHS
);
669 case CmpInst::ICMP_SLE
: // LHS <= RHS -> !(RHS < LHS)
670 Instructions
.emplace_back(Mips::SLT
, Temp
, RHS
, LHS
);
671 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
677 MachineIRBuilder
B(I
);
678 for (const struct Instr
&Instruction
: Instructions
) {
679 MachineInstrBuilder MIB
= B
.buildInstr(
680 Instruction
.Opcode
, {Instruction
.Def
}, {Instruction
.LHS
});
682 if (Instruction
.hasImm())
683 MIB
.addImm(Instruction
.RHS
);
685 MIB
.addUse(Instruction
.RHS
);
687 if (!MIB
.constrainAllUses(TII
, TRI
, RBI
))
695 unsigned MipsFCMPCondCode
;
696 bool isLogicallyNegated
;
697 switch (CmpInst::Predicate Cond
= static_cast<CmpInst::Predicate
>(
698 I
.getOperand(1).getPredicate())) {
699 case CmpInst::FCMP_UNO
: // Unordered
700 case CmpInst::FCMP_ORD
: // Ordered (OR)
701 MipsFCMPCondCode
= Mips::FCOND_UN
;
702 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UNO
;
704 case CmpInst::FCMP_OEQ
: // Equal
705 case CmpInst::FCMP_UNE
: // Not Equal (NEQ)
706 MipsFCMPCondCode
= Mips::FCOND_OEQ
;
707 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OEQ
;
709 case CmpInst::FCMP_UEQ
: // Unordered or Equal
710 case CmpInst::FCMP_ONE
: // Ordered or Greater Than or Less Than (OGL)
711 MipsFCMPCondCode
= Mips::FCOND_UEQ
;
712 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UEQ
;
714 case CmpInst::FCMP_OLT
: // Ordered or Less Than
715 case CmpInst::FCMP_UGE
: // Unordered or Greater Than or Equal (UGE)
716 MipsFCMPCondCode
= Mips::FCOND_OLT
;
717 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLT
;
719 case CmpInst::FCMP_ULT
: // Unordered or Less Than
720 case CmpInst::FCMP_OGE
: // Ordered or Greater Than or Equal (OGE)
721 MipsFCMPCondCode
= Mips::FCOND_ULT
;
722 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULT
;
724 case CmpInst::FCMP_OLE
: // Ordered or Less Than or Equal
725 case CmpInst::FCMP_UGT
: // Unordered or Greater Than (UGT)
726 MipsFCMPCondCode
= Mips::FCOND_OLE
;
727 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLE
;
729 case CmpInst::FCMP_ULE
: // Unordered or Less Than or Equal
730 case CmpInst::FCMP_OGT
: // Ordered or Greater Than (OGT)
731 MipsFCMPCondCode
= Mips::FCOND_ULE
;
732 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULE
;
738 // Default compare result in gpr register will be `true`.
739 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
740 // using MOVF_I. When orignal predicate (Cond) is logically negated
741 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
742 unsigned MoveOpcode
= isLogicallyNegated
? Mips::MOVT_I
: Mips::MOVF_I
;
744 Register TrueInReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
745 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
750 unsigned Size
= MRI
.getType(I
.getOperand(2).getReg()).getSizeInBits();
751 unsigned FCMPOpcode
=
752 Size
== 32 ? Mips::FCMP_S32
753 : STI
.isFP64bit() ? Mips::FCMP_D64
: Mips::FCMP_D32
;
754 MachineInstr
*FCMP
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FCMPOpcode
))
755 .addUse(I
.getOperand(2).getReg())
756 .addUse(I
.getOperand(3).getReg())
757 .addImm(MipsFCMPCondCode
);
758 if (!constrainSelectedInstRegOperands(*FCMP
, TII
, TRI
, RBI
))
761 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(MoveOpcode
))
762 .addDef(I
.getOperand(0).getReg())
766 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
773 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SYNC
)).addImm(0);
777 MipsFunctionInfo
*FuncInfo
= MF
.getInfo
<MipsFunctionInfo
>();
778 int FI
= FuncInfo
->getVarArgsFrameIndex();
780 Register LeaReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
781 MachineInstr
*LEA_ADDiu
=
782 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LEA_ADDiu
))
786 if (!constrainSelectedInstRegOperands(*LEA_ADDiu
, TII
, TRI
, RBI
))
789 MachineInstr
*Store
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SW
))
791 .addUse(I
.getOperand(0).getReg())
793 if (!constrainSelectedInstRegOperands(*Store
, TII
, TRI
, RBI
))
804 return constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
808 InstructionSelector
*createMipsInstructionSelector(const MipsTargetMachine
&TM
,
809 MipsSubtarget
&Subtarget
,
810 MipsRegisterBankInfo
&RBI
) {
811 return new MipsInstructionSelector(TM
, Subtarget
, RBI
);
813 } // end namespace llvm