1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/IR/IntrinsicsMips.h"
23 #define DEBUG_TYPE "mips-isel"
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "MipsGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
33 class MipsInstructionSelector
: public InstructionSelector
{
35 MipsInstructionSelector(const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
36 const MipsRegisterBankInfo
&RBI
);
38 bool select(MachineInstr
&I
) override
;
39 static const char *getName() { return DEBUG_TYPE
; }
42 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
43 bool isRegInGprb(Register Reg
, MachineRegisterInfo
&MRI
) const;
44 bool isRegInFprb(Register Reg
, MachineRegisterInfo
&MRI
) const;
45 bool materialize32BitImm(Register DestReg
, APInt Imm
,
46 MachineIRBuilder
&B
) const;
47 bool selectCopy(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
48 const TargetRegisterClass
*
49 getRegClassForTypeOnBank(Register Reg
, MachineRegisterInfo
&MRI
) const;
50 unsigned selectLoadStoreOpCode(MachineInstr
&I
,
51 MachineRegisterInfo
&MRI
) const;
52 bool buildUnalignedStore(MachineInstr
&I
, unsigned Opc
,
53 MachineOperand
&BaseAddr
, unsigned Offset
,
54 MachineMemOperand
*MMO
) const;
55 bool buildUnalignedLoad(MachineInstr
&I
, unsigned Opc
, Register Dest
,
56 MachineOperand
&BaseAddr
, unsigned Offset
,
57 Register TiedDest
, MachineMemOperand
*MMO
) const;
59 const MipsTargetMachine
&TM
;
60 const MipsSubtarget
&STI
;
61 const MipsInstrInfo
&TII
;
62 const MipsRegisterInfo
&TRI
;
63 const MipsRegisterBankInfo
&RBI
;
65 #define GET_GLOBALISEL_PREDICATES_DECL
66 #include "MipsGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_PREDICATES_DECL
69 #define GET_GLOBALISEL_TEMPORARIES_DECL
70 #include "MipsGenGlobalISel.inc"
71 #undef GET_GLOBALISEL_TEMPORARIES_DECL
74 } // end anonymous namespace
76 #define GET_GLOBALISEL_IMPL
77 #include "MipsGenGlobalISel.inc"
78 #undef GET_GLOBALISEL_IMPL
80 MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
82 const MipsRegisterBankInfo
&RBI
)
83 : TM(TM
), STI(STI
), TII(*STI
.getInstrInfo()), TRI(*STI
.getRegisterInfo()),
86 #define GET_GLOBALISEL_PREDICATES_INIT
87 #include "MipsGenGlobalISel.inc"
88 #undef GET_GLOBALISEL_PREDICATES_INIT
89 #define GET_GLOBALISEL_TEMPORARIES_INIT
90 #include "MipsGenGlobalISel.inc"
91 #undef GET_GLOBALISEL_TEMPORARIES_INIT
95 bool MipsInstructionSelector::isRegInGprb(Register Reg
,
96 MachineRegisterInfo
&MRI
) const {
97 return RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() == Mips::GPRBRegBankID
;
100 bool MipsInstructionSelector::isRegInFprb(Register Reg
,
101 MachineRegisterInfo
&MRI
) const {
102 return RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() == Mips::FPRBRegBankID
;
105 bool MipsInstructionSelector::selectCopy(MachineInstr
&I
,
106 MachineRegisterInfo
&MRI
) const {
107 Register DstReg
= I
.getOperand(0).getReg();
108 if (DstReg
.isPhysical())
111 const TargetRegisterClass
*RC
= getRegClassForTypeOnBank(DstReg
, MRI
);
112 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
120 const TargetRegisterClass
*MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg
, MachineRegisterInfo
&MRI
) const {
122 const LLT Ty
= MRI
.getType(Reg
);
123 const unsigned TySize
= Ty
.getSizeInBits();
125 if (isRegInGprb(Reg
, MRI
)) {
126 assert((Ty
.isScalar() || Ty
.isPointer()) && TySize
== 32 &&
127 "Register class not available for LLT, register bank combination");
128 return &Mips::GPR32RegClass
;
131 if (isRegInFprb(Reg
, MRI
)) {
133 assert((TySize
== 32 || TySize
== 64) &&
134 "Register class not available for LLT, register bank combination");
136 return &Mips::FGR32RegClass
;
137 return STI
.isFP64bit() ? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
141 llvm_unreachable("Unsupported register bank.");
144 bool MipsInstructionSelector::materialize32BitImm(Register DestReg
, APInt Imm
,
145 MachineIRBuilder
&B
) const {
146 assert(Imm
.getBitWidth() == 32 && "Unsupported immediate size.");
147 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148 if (Imm
.getHiBits(16).isZero()) {
150 B
.buildInstr(Mips::ORi
, {DestReg
}, {Register(Mips::ZERO
)})
151 .addImm(Imm
.getLoBits(16).getLimitedValue());
152 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
154 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155 if (Imm
.getLoBits(16).isZero()) {
156 MachineInstr
*Inst
= B
.buildInstr(Mips::LUi
, {DestReg
}, {})
157 .addImm(Imm
.getHiBits(16).getLimitedValue());
158 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
160 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161 if (Imm
.isSignedIntN(16)) {
163 B
.buildInstr(Mips::ADDiu
, {DestReg
}, {Register(Mips::ZERO
)})
164 .addImm(Imm
.getLoBits(16).getLimitedValue());
165 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
167 // Values that cannot be materialized with single immediate instruction.
168 Register LUiReg
= B
.getMRI()->createVirtualRegister(&Mips::GPR32RegClass
);
169 MachineInstr
*LUi
= B
.buildInstr(Mips::LUi
, {LUiReg
}, {})
170 .addImm(Imm
.getHiBits(16).getLimitedValue());
171 MachineInstr
*ORi
= B
.buildInstr(Mips::ORi
, {DestReg
}, {LUiReg
})
172 .addImm(Imm
.getLoBits(16).getLimitedValue());
173 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
175 if (!constrainSelectedInstRegOperands(*ORi
, TII
, TRI
, RBI
))
180 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
182 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr
&I
,
183 MachineRegisterInfo
&MRI
) const {
184 const Register ValueReg
= I
.getOperand(0).getReg();
185 const LLT Ty
= MRI
.getType(ValueReg
);
186 const unsigned TySize
= Ty
.getSizeInBits();
187 const unsigned MemSizeInBytes
=
188 (*I
.memoperands_begin())->getSize().getValue();
189 unsigned Opc
= I
.getOpcode();
190 const bool isStore
= Opc
== TargetOpcode::G_STORE
;
192 if (isRegInGprb(ValueReg
, MRI
)) {
193 assert(((Ty
.isScalar() && TySize
== 32) ||
194 (Ty
.isPointer() && TySize
== 32 && MemSizeInBytes
== 4)) &&
195 "Unsupported register bank, LLT, MemSizeInBytes combination");
198 switch (MemSizeInBytes
) {
209 // Unspecified extending load is selected into zeroExtending load.
210 switch (MemSizeInBytes
) {
214 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LH
: Mips::LHu
;
216 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LB
: Mips::LBu
;
222 if (isRegInFprb(ValueReg
, MRI
)) {
224 assert(((TySize
== 32 && MemSizeInBytes
== 4) ||
225 (TySize
== 64 && MemSizeInBytes
== 8)) &&
226 "Unsupported register bank, LLT, MemSizeInBytes combination");
228 if (MemSizeInBytes
== 4)
229 return isStore
? Mips::SWC1
: Mips::LWC1
;
232 return isStore
? Mips::SDC164
: Mips::LDC164
;
233 return isStore
? Mips::SDC1
: Mips::LDC1
;
237 assert(STI
.hasMSA() && "Vector instructions require target with MSA.");
238 assert((TySize
== 128 && MemSizeInBytes
== 16) &&
239 "Unsupported register bank, LLT, MemSizeInBytes combination");
240 switch (Ty
.getElementType().getSizeInBits()) {
242 return isStore
? Mips::ST_B
: Mips::LD_B
;
244 return isStore
? Mips::ST_H
: Mips::LD_H
;
246 return isStore
? Mips::ST_W
: Mips::LD_W
;
248 return isStore
? Mips::ST_D
: Mips::LD_D
;
258 bool MipsInstructionSelector::buildUnalignedStore(
259 MachineInstr
&I
, unsigned Opc
, MachineOperand
&BaseAddr
, unsigned Offset
,
260 MachineMemOperand
*MMO
) const {
261 MachineInstr
*NewInst
=
262 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opc
))
263 .add(I
.getOperand(0))
267 if (!constrainSelectedInstRegOperands(*NewInst
, TII
, TRI
, RBI
))
272 bool MipsInstructionSelector::buildUnalignedLoad(
273 MachineInstr
&I
, unsigned Opc
, Register Dest
, MachineOperand
&BaseAddr
,
274 unsigned Offset
, Register TiedDest
, MachineMemOperand
*MMO
) const {
275 MachineInstr
*NewInst
=
276 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opc
))
281 .addMemOperand(*I
.memoperands_begin());
282 if (!constrainSelectedInstRegOperands(*NewInst
, TII
, TRI
, RBI
))
287 bool MipsInstructionSelector::select(MachineInstr
&I
) {
289 MachineBasicBlock
&MBB
= *I
.getParent();
290 MachineFunction
&MF
= *MBB
.getParent();
291 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
293 if (!isPreISelGenericOpcode(I
.getOpcode())) {
295 return selectCopy(I
, MRI
);
300 if (I
.getOpcode() == Mips::G_MUL
&&
301 isRegInGprb(I
.getOperand(0).getReg(), MRI
)) {
302 MachineInstr
*Mul
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MUL
))
303 .add(I
.getOperand(0))
304 .add(I
.getOperand(1))
305 .add(I
.getOperand(2));
306 if (!constrainSelectedInstRegOperands(*Mul
, TII
, TRI
, RBI
))
308 Mul
->getOperand(3).setIsDead(true);
309 Mul
->getOperand(4).setIsDead(true);
315 if (selectImpl(I
, *CoverageInfo
))
318 MachineInstr
*MI
= nullptr;
319 using namespace TargetOpcode
;
321 switch (I
.getOpcode()) {
323 Register PseudoMULTuReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
324 MachineInstr
*PseudoMULTu
, *PseudoMove
;
326 PseudoMULTu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMULTu
))
327 .addDef(PseudoMULTuReg
)
328 .add(I
.getOperand(1))
329 .add(I
.getOperand(2));
330 if (!constrainSelectedInstRegOperands(*PseudoMULTu
, TII
, TRI
, RBI
))
333 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMFHI
))
334 .addDef(I
.getOperand(0).getReg())
335 .addUse(PseudoMULTuReg
);
336 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
343 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
344 .add(I
.getOperand(0))
345 .add(I
.getOperand(1))
346 .add(I
.getOperand(2));
351 I
.setDesc(TII
.get(COPY
));
352 return selectCopy(I
, MRI
);
354 case G_FRAME_INDEX
: {
355 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
356 .add(I
.getOperand(0))
357 .add(I
.getOperand(1))
363 MF
.getJumpTableInfo()->getEntrySize(MF
.getDataLayout());
364 assert(isPowerOf2_32(EntrySize
) &&
365 "Non-power-of-two jump-table entry size not supported.");
367 Register JTIndex
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
368 MachineInstr
*SLL
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SLL
))
370 .addUse(I
.getOperand(2).getReg())
371 .addImm(Log2_32(EntrySize
));
372 if (!constrainSelectedInstRegOperands(*SLL
, TII
, TRI
, RBI
))
375 Register DestAddress
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
376 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
378 .addUse(I
.getOperand(0).getReg())
380 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
383 Register Dest
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
385 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
388 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_LO
)
389 .addMemOperand(MF
.getMachineMemOperand(
390 MachinePointerInfo(), MachineMemOperand::MOLoad
, 4, Align(4)));
391 if (!constrainSelectedInstRegOperands(*LW
, TII
, TRI
, RBI
))
394 if (MF
.getTarget().isPositionIndependent()) {
395 Register DestTmp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
396 LW
->getOperand(0).setReg(DestTmp
);
397 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
400 .addUse(MF
.getInfo
<MipsFunctionInfo
>()
401 ->getGlobalBaseRegForGlobalISel(MF
));
402 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
406 MachineInstr
*Branch
=
407 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
409 if (!constrainSelectedInstRegOperands(*Branch
, TII
, TRI
, RBI
))
416 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
417 .add(I
.getOperand(0));
421 const Register DestReg
= I
.getOperand(0).getReg();
423 const TargetRegisterClass
*DefRC
= nullptr;
424 if (DestReg
.isPhysical())
425 DefRC
= TRI
.getRegClass(DestReg
);
427 DefRC
= getRegClassForTypeOnBank(DestReg
, MRI
);
429 I
.setDesc(TII
.get(TargetOpcode::PHI
));
430 return RBI
.constrainGenericRegister(DestReg
, *DefRC
, MRI
);
436 auto MMO
= *I
.memoperands_begin();
437 MachineOperand BaseAddr
= I
.getOperand(1);
438 int64_t SignedOffset
= 0;
439 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
440 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
441 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
442 // %LoadResult/%StoreSrc = load/store %Addr(p0)
444 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
446 MachineInstr
*Addr
= MRI
.getVRegDef(I
.getOperand(1).getReg());
447 if (Addr
->getOpcode() == G_PTR_ADD
) {
448 MachineInstr
*Offset
= MRI
.getVRegDef(Addr
->getOperand(2).getReg());
449 if (Offset
->getOpcode() == G_CONSTANT
) {
450 APInt OffsetValue
= Offset
->getOperand(1).getCImm()->getValue();
451 if (OffsetValue
.isSignedIntN(16)) {
452 BaseAddr
= Addr
->getOperand(1);
453 SignedOffset
= OffsetValue
.getSExtValue();
458 // Unaligned memory access
459 if ((!MMO
->getSize().hasValue() ||
460 MMO
->getAlign() < MMO
->getSize().getValue()) &&
461 !STI
.systemSupportsUnalignedAccess()) {
462 if (MMO
->getSize() != 4 || !isRegInGprb(I
.getOperand(0).getReg(), MRI
))
465 if (I
.getOpcode() == G_STORE
) {
466 if (!buildUnalignedStore(I
, Mips::SWL
, BaseAddr
, SignedOffset
+ 3, MMO
))
468 if (!buildUnalignedStore(I
, Mips::SWR
, BaseAddr
, SignedOffset
, MMO
))
474 if (I
.getOpcode() == G_LOAD
) {
475 Register ImplDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
476 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::IMPLICIT_DEF
))
478 Register Tmp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
479 if (!buildUnalignedLoad(I
, Mips::LWL
, Tmp
, BaseAddr
, SignedOffset
+ 3,
482 if (!buildUnalignedLoad(I
, Mips::LWR
, I
.getOperand(0).getReg(),
483 BaseAddr
, SignedOffset
, Tmp
, MMO
))
492 const unsigned NewOpc
= selectLoadStoreOpCode(I
, MRI
);
493 if (NewOpc
== I
.getOpcode())
496 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(NewOpc
))
497 .add(I
.getOperand(0))
499 .addImm(SignedOffset
)
507 Register HILOReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
508 bool IsSigned
= I
.getOpcode() == G_SREM
|| I
.getOpcode() == G_SDIV
;
509 bool IsDiv
= I
.getOpcode() == G_UDIV
|| I
.getOpcode() == G_SDIV
;
511 MachineInstr
*PseudoDIV
, *PseudoMove
;
512 PseudoDIV
= BuildMI(MBB
, I
, I
.getDebugLoc(),
513 TII
.get(IsSigned
? Mips::PseudoSDIV
: Mips::PseudoUDIV
))
515 .add(I
.getOperand(1))
516 .add(I
.getOperand(2));
517 if (!constrainSelectedInstRegOperands(*PseudoDIV
, TII
, TRI
, RBI
))
520 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(),
521 TII
.get(IsDiv
? Mips::PseudoMFLO
: Mips::PseudoMFHI
))
522 .addDef(I
.getOperand(0).getReg())
524 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
531 // Handle operands with pointer type.
532 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MOVN_I_I
))
533 .add(I
.getOperand(0))
534 .add(I
.getOperand(2))
535 .add(I
.getOperand(1))
536 .add(I
.getOperand(3));
539 case G_UNMERGE_VALUES
: {
540 if (I
.getNumOperands() != 3)
542 Register Src
= I
.getOperand(2).getReg();
543 Register Lo
= I
.getOperand(0).getReg();
544 Register Hi
= I
.getOperand(1).getReg();
545 if (!isRegInFprb(Src
, MRI
) ||
546 !(isRegInGprb(Lo
, MRI
) && isRegInGprb(Hi
, MRI
)))
550 STI
.isFP64bit() ? Mips::ExtractElementF64_64
: Mips::ExtractElementF64
;
552 MachineInstr
*ExtractLo
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
556 if (!constrainSelectedInstRegOperands(*ExtractLo
, TII
, TRI
, RBI
))
559 MachineInstr
*ExtractHi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
563 if (!constrainSelectedInstRegOperands(*ExtractHi
, TII
, TRI
, RBI
))
569 case G_IMPLICIT_DEF
: {
570 Register Dst
= I
.getOperand(0).getReg();
571 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::IMPLICIT_DEF
))
574 // Set class based on register bank, there can be fpr and gpr implicit def.
575 MRI
.setRegClass(Dst
, getRegClassForTypeOnBank(Dst
, MRI
));
579 MachineIRBuilder
B(I
);
580 if (!materialize32BitImm(I
.getOperand(0).getReg(),
581 I
.getOperand(1).getCImm()->getValue(), B
))
588 const APFloat
&FPimm
= I
.getOperand(1).getFPImm()->getValueAPF();
589 APInt APImm
= FPimm
.bitcastToAPInt();
590 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
593 Register GPRReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
594 MachineIRBuilder
B(I
);
595 if (!materialize32BitImm(GPRReg
, APImm
, B
))
598 MachineInstrBuilder MTC1
=
599 B
.buildInstr(Mips::MTC1
, {I
.getOperand(0).getReg()}, {GPRReg
});
600 if (!MTC1
.constrainAllUses(TII
, TRI
, RBI
))
604 Register GPRRegHigh
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
605 Register GPRRegLow
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
606 MachineIRBuilder
B(I
);
607 if (!materialize32BitImm(GPRRegHigh
, APImm
.getHiBits(32).trunc(32), B
))
609 if (!materialize32BitImm(GPRRegLow
, APImm
.getLoBits(32).trunc(32), B
))
612 MachineInstrBuilder PairF64
= B
.buildInstr(
613 STI
.isFP64bit() ? Mips::BuildPairF64_64
: Mips::BuildPairF64
,
614 {I
.getOperand(0).getReg()}, {GPRRegLow
, GPRRegHigh
});
615 if (!PairF64
.constrainAllUses(TII
, TRI
, RBI
))
623 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
624 unsigned FABSOpcode
=
625 Size
== 32 ? Mips::FABS_S
626 : STI
.isFP64bit() ? Mips::FABS_D64
: Mips::FABS_D32
;
627 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FABSOpcode
))
628 .add(I
.getOperand(0))
629 .add(I
.getOperand(1));
633 unsigned FromSize
= MRI
.getType(I
.getOperand(1).getReg()).getSizeInBits();
634 unsigned ToSize
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
636 assert((ToSize
== 32) && "Unsupported integer size for G_FPTOSI");
637 assert((FromSize
== 32 || FromSize
== 64) &&
638 "Unsupported floating point size for G_FPTOSI");
642 Opcode
= Mips::TRUNC_W_S
;
644 Opcode
= STI
.isFP64bit() ? Mips::TRUNC_W_D64
: Mips::TRUNC_W_D32
;
645 Register ResultInFPR
= MRI
.createVirtualRegister(&Mips::FGR32RegClass
);
646 MachineInstr
*Trunc
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
648 .addUse(I
.getOperand(1).getReg());
649 if (!constrainSelectedInstRegOperands(*Trunc
, TII
, TRI
, RBI
))
652 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MFC1
))
653 .addDef(I
.getOperand(0).getReg())
654 .addUse(ResultInFPR
);
655 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
661 case G_GLOBAL_VALUE
: {
662 const llvm::GlobalValue
*GVal
= I
.getOperand(1).getGlobal();
663 if (MF
.getTarget().isPositionIndependent()) {
664 MachineInstr
*LWGOT
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
665 .addDef(I
.getOperand(0).getReg())
666 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
667 ->getGlobalBaseRegForGlobalISel(MF
))
668 .addGlobalAddress(GVal
);
669 // Global Values that don't have local linkage are handled differently
670 // when they are part of call sequence. MipsCallLowering::lowerCall
671 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
672 // MO_GOT_CALL flag when Callee doesn't have local linkage.
673 if (I
.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL
)
674 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL
);
676 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT
);
677 LWGOT
->addMemOperand(
678 MF
, MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
679 MachineMemOperand::MOLoad
, 4, Align(4)));
680 if (!constrainSelectedInstRegOperands(*LWGOT
, TII
, TRI
, RBI
))
683 if (GVal
->hasLocalLinkage()) {
684 Register LWGOTDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
685 LWGOT
->getOperand(0).setReg(LWGOTDef
);
687 MachineInstr
*ADDiu
=
688 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
689 .addDef(I
.getOperand(0).getReg())
691 .addGlobalAddress(GVal
);
692 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
693 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
697 Register LUiReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
699 MachineInstr
*LUi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
701 .addGlobalAddress(GVal
);
702 LUi
->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI
);
703 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
706 MachineInstr
*ADDiu
=
707 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
708 .addDef(I
.getOperand(0).getReg())
710 .addGlobalAddress(GVal
);
711 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
712 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
719 if (MF
.getTarget().isPositionIndependent()) {
720 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
721 .addDef(I
.getOperand(0).getReg())
722 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
723 ->getGlobalBaseRegForGlobalISel(MF
))
724 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_GOT
)
725 .addMemOperand(MF
.getMachineMemOperand(
726 MachinePointerInfo::getGOT(MF
), MachineMemOperand::MOLoad
, 4,
730 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
731 .addDef(I
.getOperand(0).getReg())
732 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_HI
);
739 Register Def
, LHS
, RHS
;
740 Instr(unsigned Opcode
, Register Def
, Register LHS
, Register RHS
)
741 : Opcode(Opcode
), Def(Def
), LHS(LHS
), RHS(RHS
){};
743 bool hasImm() const {
744 if (Opcode
== Mips::SLTiu
|| Opcode
== Mips::XORi
)
750 SmallVector
<struct Instr
, 2> Instructions
;
751 Register ICMPReg
= I
.getOperand(0).getReg();
752 Register Temp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
753 Register LHS
= I
.getOperand(2).getReg();
754 Register RHS
= I
.getOperand(3).getReg();
755 CmpInst::Predicate Cond
=
756 static_cast<CmpInst::Predicate
>(I
.getOperand(1).getPredicate());
759 case CmpInst::ICMP_EQ
: // LHS == RHS -> (LHS ^ RHS) < 1
760 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
761 Instructions
.emplace_back(Mips::SLTiu
, ICMPReg
, Temp
, 1);
763 case CmpInst::ICMP_NE
: // LHS != RHS -> 0 < (LHS ^ RHS)
764 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
765 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, Mips::ZERO
, Temp
);
767 case CmpInst::ICMP_UGT
: // LHS > RHS -> RHS < LHS
768 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, RHS
, LHS
);
770 case CmpInst::ICMP_UGE
: // LHS >= RHS -> !(LHS < RHS)
771 Instructions
.emplace_back(Mips::SLTu
, Temp
, LHS
, RHS
);
772 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
774 case CmpInst::ICMP_ULT
: // LHS < RHS -> LHS < RHS
775 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, LHS
, RHS
);
777 case CmpInst::ICMP_ULE
: // LHS <= RHS -> !(RHS < LHS)
778 Instructions
.emplace_back(Mips::SLTu
, Temp
, RHS
, LHS
);
779 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
781 case CmpInst::ICMP_SGT
: // LHS > RHS -> RHS < LHS
782 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, RHS
, LHS
);
784 case CmpInst::ICMP_SGE
: // LHS >= RHS -> !(LHS < RHS)
785 Instructions
.emplace_back(Mips::SLT
, Temp
, LHS
, RHS
);
786 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
788 case CmpInst::ICMP_SLT
: // LHS < RHS -> LHS < RHS
789 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, LHS
, RHS
);
791 case CmpInst::ICMP_SLE
: // LHS <= RHS -> !(RHS < LHS)
792 Instructions
.emplace_back(Mips::SLT
, Temp
, RHS
, LHS
);
793 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
799 MachineIRBuilder
B(I
);
800 for (const struct Instr
&Instruction
: Instructions
) {
801 MachineInstrBuilder MIB
= B
.buildInstr(
802 Instruction
.Opcode
, {Instruction
.Def
}, {Instruction
.LHS
});
804 if (Instruction
.hasImm())
805 MIB
.addImm(Instruction
.RHS
);
807 MIB
.addUse(Instruction
.RHS
);
809 if (!MIB
.constrainAllUses(TII
, TRI
, RBI
))
817 unsigned MipsFCMPCondCode
;
818 bool isLogicallyNegated
;
819 switch (CmpInst::Predicate Cond
= static_cast<CmpInst::Predicate
>(
820 I
.getOperand(1).getPredicate())) {
821 case CmpInst::FCMP_UNO
: // Unordered
822 case CmpInst::FCMP_ORD
: // Ordered (OR)
823 MipsFCMPCondCode
= Mips::FCOND_UN
;
824 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UNO
;
826 case CmpInst::FCMP_OEQ
: // Equal
827 case CmpInst::FCMP_UNE
: // Not Equal (NEQ)
828 MipsFCMPCondCode
= Mips::FCOND_OEQ
;
829 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OEQ
;
831 case CmpInst::FCMP_UEQ
: // Unordered or Equal
832 case CmpInst::FCMP_ONE
: // Ordered or Greater Than or Less Than (OGL)
833 MipsFCMPCondCode
= Mips::FCOND_UEQ
;
834 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UEQ
;
836 case CmpInst::FCMP_OLT
: // Ordered or Less Than
837 case CmpInst::FCMP_UGE
: // Unordered or Greater Than or Equal (UGE)
838 MipsFCMPCondCode
= Mips::FCOND_OLT
;
839 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLT
;
841 case CmpInst::FCMP_ULT
: // Unordered or Less Than
842 case CmpInst::FCMP_OGE
: // Ordered or Greater Than or Equal (OGE)
843 MipsFCMPCondCode
= Mips::FCOND_ULT
;
844 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULT
;
846 case CmpInst::FCMP_OLE
: // Ordered or Less Than or Equal
847 case CmpInst::FCMP_UGT
: // Unordered or Greater Than (UGT)
848 MipsFCMPCondCode
= Mips::FCOND_OLE
;
849 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLE
;
851 case CmpInst::FCMP_ULE
: // Unordered or Less Than or Equal
852 case CmpInst::FCMP_OGT
: // Ordered or Greater Than (OGT)
853 MipsFCMPCondCode
= Mips::FCOND_ULE
;
854 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULE
;
860 // Default compare result in gpr register will be `true`.
861 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
862 // using MOVF_I. When orignal predicate (Cond) is logically negated
863 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
864 unsigned MoveOpcode
= isLogicallyNegated
? Mips::MOVT_I
: Mips::MOVF_I
;
866 Register TrueInReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
867 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
872 unsigned Size
= MRI
.getType(I
.getOperand(2).getReg()).getSizeInBits();
873 unsigned FCMPOpcode
=
874 Size
== 32 ? Mips::FCMP_S32
875 : STI
.isFP64bit() ? Mips::FCMP_D64
: Mips::FCMP_D32
;
876 MachineInstr
*FCMP
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FCMPOpcode
))
877 .addUse(I
.getOperand(2).getReg())
878 .addUse(I
.getOperand(3).getReg())
879 .addImm(MipsFCMPCondCode
);
880 if (!constrainSelectedInstRegOperands(*FCMP
, TII
, TRI
, RBI
))
883 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(MoveOpcode
))
884 .addDef(I
.getOperand(0).getReg())
888 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
895 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SYNC
)).addImm(0);
899 MipsFunctionInfo
*FuncInfo
= MF
.getInfo
<MipsFunctionInfo
>();
900 int FI
= FuncInfo
->getVarArgsFrameIndex();
902 Register LeaReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
903 MachineInstr
*LEA_ADDiu
=
904 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LEA_ADDiu
))
908 if (!constrainSelectedInstRegOperands(*LEA_ADDiu
, TII
, TRI
, RBI
))
911 MachineInstr
*Store
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SW
))
913 .addUse(I
.getOperand(0).getReg())
915 if (!constrainSelectedInstRegOperands(*Store
, TII
, TRI
, RBI
))
926 return constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
930 InstructionSelector
*
931 createMipsInstructionSelector(const MipsTargetMachine
&TM
,
932 const MipsSubtarget
&Subtarget
,
933 const MipsRegisterBankInfo
&RBI
) {
934 return new MipsInstructionSelector(TM
, Subtarget
, RBI
);
936 } // end namespace llvm