1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/IR/IntrinsicsMips.h"
23 #define DEBUG_TYPE "mips-isel"
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "MipsGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
33 class MipsInstructionSelector
: public InstructionSelector
{
35 MipsInstructionSelector(const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
36 const MipsRegisterBankInfo
&RBI
);
38 bool select(MachineInstr
&I
) override
;
39 static const char *getName() { return DEBUG_TYPE
; }
42 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
43 bool isRegInGprb(Register Reg
, MachineRegisterInfo
&MRI
) const;
44 bool isRegInFprb(Register Reg
, MachineRegisterInfo
&MRI
) const;
45 bool materialize32BitImm(Register DestReg
, APInt Imm
,
46 MachineIRBuilder
&B
) const;
47 bool selectCopy(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
48 const TargetRegisterClass
*
49 getRegClassForTypeOnBank(Register Reg
, MachineRegisterInfo
&MRI
) const;
50 unsigned selectLoadStoreOpCode(MachineInstr
&I
,
51 MachineRegisterInfo
&MRI
) const;
52 bool buildUnalignedStore(MachineInstr
&I
, unsigned Opc
,
53 MachineOperand
&BaseAddr
, unsigned Offset
,
54 MachineMemOperand
*MMO
) const;
55 bool buildUnalignedLoad(MachineInstr
&I
, unsigned Opc
, Register Dest
,
56 MachineOperand
&BaseAddr
, unsigned Offset
,
57 Register TiedDest
, MachineMemOperand
*MMO
) const;
59 const MipsTargetMachine
&TM
;
60 const MipsSubtarget
&STI
;
61 const MipsInstrInfo
&TII
;
62 const MipsRegisterInfo
&TRI
;
63 const MipsRegisterBankInfo
&RBI
;
65 #define GET_GLOBALISEL_PREDICATES_DECL
66 #include "MipsGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_PREDICATES_DECL
69 #define GET_GLOBALISEL_TEMPORARIES_DECL
70 #include "MipsGenGlobalISel.inc"
71 #undef GET_GLOBALISEL_TEMPORARIES_DECL
74 } // end anonymous namespace
76 #define GET_GLOBALISEL_IMPL
77 #include "MipsGenGlobalISel.inc"
78 #undef GET_GLOBALISEL_IMPL
80 MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine
&TM
, const MipsSubtarget
&STI
,
82 const MipsRegisterBankInfo
&RBI
)
83 : InstructionSelector(), TM(TM
), STI(STI
), TII(*STI
.getInstrInfo()),
84 TRI(*STI
.getRegisterInfo()), RBI(RBI
),
86 #define GET_GLOBALISEL_PREDICATES_INIT
87 #include "MipsGenGlobalISel.inc"
88 #undef GET_GLOBALISEL_PREDICATES_INIT
89 #define GET_GLOBALISEL_TEMPORARIES_INIT
90 #include "MipsGenGlobalISel.inc"
91 #undef GET_GLOBALISEL_TEMPORARIES_INIT
95 bool MipsInstructionSelector::isRegInGprb(Register Reg
,
96 MachineRegisterInfo
&MRI
) const {
97 return RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() == Mips::GPRBRegBankID
;
100 bool MipsInstructionSelector::isRegInFprb(Register Reg
,
101 MachineRegisterInfo
&MRI
) const {
102 return RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() == Mips::FPRBRegBankID
;
105 bool MipsInstructionSelector::selectCopy(MachineInstr
&I
,
106 MachineRegisterInfo
&MRI
) const {
107 Register DstReg
= I
.getOperand(0).getReg();
108 if (Register::isPhysicalRegister(DstReg
))
111 const TargetRegisterClass
*RC
= getRegClassForTypeOnBank(DstReg
, MRI
);
112 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
120 const TargetRegisterClass
*MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg
, MachineRegisterInfo
&MRI
) const {
122 const LLT Ty
= MRI
.getType(Reg
);
123 const unsigned TySize
= Ty
.getSizeInBits();
125 if (isRegInGprb(Reg
, MRI
)) {
126 assert((Ty
.isScalar() || Ty
.isPointer()) && TySize
== 32 &&
127 "Register class not available for LLT, register bank combination");
128 return &Mips::GPR32RegClass
;
131 if (isRegInFprb(Reg
, MRI
)) {
133 assert((TySize
== 32 || TySize
== 64) &&
134 "Register class not available for LLT, register bank combination");
136 return &Mips::FGR32RegClass
;
137 return STI
.isFP64bit() ? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
141 llvm_unreachable("Unsupported register bank.");
144 bool MipsInstructionSelector::materialize32BitImm(Register DestReg
, APInt Imm
,
145 MachineIRBuilder
&B
) const {
146 assert(Imm
.getBitWidth() == 32 && "Unsupported immediate size.");
147 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148 if (Imm
.getHiBits(16).isNullValue()) {
150 B
.buildInstr(Mips::ORi
, {DestReg
}, {Register(Mips::ZERO
)})
151 .addImm(Imm
.getLoBits(16).getLimitedValue());
152 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
154 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155 if (Imm
.getLoBits(16).isNullValue()) {
156 MachineInstr
*Inst
= B
.buildInstr(Mips::LUi
, {DestReg
}, {})
157 .addImm(Imm
.getHiBits(16).getLimitedValue());
158 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
160 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161 if (Imm
.isSignedIntN(16)) {
163 B
.buildInstr(Mips::ADDiu
, {DestReg
}, {Register(Mips::ZERO
)})
164 .addImm(Imm
.getLoBits(16).getLimitedValue());
165 return constrainSelectedInstRegOperands(*Inst
, TII
, TRI
, RBI
);
167 // Values that cannot be materialized with single immediate instruction.
168 Register LUiReg
= B
.getMRI()->createVirtualRegister(&Mips::GPR32RegClass
);
169 MachineInstr
*LUi
= B
.buildInstr(Mips::LUi
, {LUiReg
}, {})
170 .addImm(Imm
.getHiBits(16).getLimitedValue());
171 MachineInstr
*ORi
= B
.buildInstr(Mips::ORi
, {DestReg
}, {LUiReg
})
172 .addImm(Imm
.getLoBits(16).getLimitedValue());
173 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
175 if (!constrainSelectedInstRegOperands(*ORi
, TII
, TRI
, RBI
))
180 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
182 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr
&I
,
183 MachineRegisterInfo
&MRI
) const {
184 const Register ValueReg
= I
.getOperand(0).getReg();
185 const LLT Ty
= MRI
.getType(ValueReg
);
186 const unsigned TySize
= Ty
.getSizeInBits();
187 const unsigned MemSizeInBytes
= (*I
.memoperands_begin())->getSize();
188 unsigned Opc
= I
.getOpcode();
189 const bool isStore
= Opc
== TargetOpcode::G_STORE
;
191 if (isRegInGprb(ValueReg
, MRI
)) {
192 assert(((Ty
.isScalar() && TySize
== 32) ||
193 (Ty
.isPointer() && TySize
== 32 && MemSizeInBytes
== 4)) &&
194 "Unsupported register bank, LLT, MemSizeInBytes combination");
197 switch (MemSizeInBytes
) {
208 // Unspecified extending load is selected into zeroExtending load.
209 switch (MemSizeInBytes
) {
213 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LH
: Mips::LHu
;
215 return Opc
== TargetOpcode::G_SEXTLOAD
? Mips::LB
: Mips::LBu
;
221 if (isRegInFprb(ValueReg
, MRI
)) {
223 assert(((TySize
== 32 && MemSizeInBytes
== 4) ||
224 (TySize
== 64 && MemSizeInBytes
== 8)) &&
225 "Unsupported register bank, LLT, MemSizeInBytes combination");
227 if (MemSizeInBytes
== 4)
228 return isStore
? Mips::SWC1
: Mips::LWC1
;
231 return isStore
? Mips::SDC164
: Mips::LDC164
;
232 return isStore
? Mips::SDC1
: Mips::LDC1
;
236 assert(STI
.hasMSA() && "Vector instructions require target with MSA.");
237 assert((TySize
== 128 && MemSizeInBytes
== 16) &&
238 "Unsupported register bank, LLT, MemSizeInBytes combination");
239 switch (Ty
.getElementType().getSizeInBits()) {
241 return isStore
? Mips::ST_B
: Mips::LD_B
;
243 return isStore
? Mips::ST_H
: Mips::LD_H
;
245 return isStore
? Mips::ST_W
: Mips::LD_W
;
247 return isStore
? Mips::ST_D
: Mips::LD_D
;
257 bool MipsInstructionSelector::buildUnalignedStore(
258 MachineInstr
&I
, unsigned Opc
, MachineOperand
&BaseAddr
, unsigned Offset
,
259 MachineMemOperand
*MMO
) const {
260 MachineInstr
*NewInst
=
261 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opc
))
262 .add(I
.getOperand(0))
266 if (!constrainSelectedInstRegOperands(*NewInst
, TII
, TRI
, RBI
))
271 bool MipsInstructionSelector::buildUnalignedLoad(
272 MachineInstr
&I
, unsigned Opc
, Register Dest
, MachineOperand
&BaseAddr
,
273 unsigned Offset
, Register TiedDest
, MachineMemOperand
*MMO
) const {
274 MachineInstr
*NewInst
=
275 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opc
))
280 .addMemOperand(*I
.memoperands_begin());
281 if (!constrainSelectedInstRegOperands(*NewInst
, TII
, TRI
, RBI
))
286 bool MipsInstructionSelector::select(MachineInstr
&I
) {
288 MachineBasicBlock
&MBB
= *I
.getParent();
289 MachineFunction
&MF
= *MBB
.getParent();
290 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
292 if (!isPreISelGenericOpcode(I
.getOpcode())) {
294 return selectCopy(I
, MRI
);
299 if (I
.getOpcode() == Mips::G_MUL
&&
300 isRegInGprb(I
.getOperand(0).getReg(), MRI
)) {
301 MachineInstr
*Mul
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MUL
))
302 .add(I
.getOperand(0))
303 .add(I
.getOperand(1))
304 .add(I
.getOperand(2));
305 if (!constrainSelectedInstRegOperands(*Mul
, TII
, TRI
, RBI
))
307 Mul
->getOperand(3).setIsDead(true);
308 Mul
->getOperand(4).setIsDead(true);
314 if (selectImpl(I
, *CoverageInfo
))
317 MachineInstr
*MI
= nullptr;
318 using namespace TargetOpcode
;
320 switch (I
.getOpcode()) {
322 Register PseudoMULTuReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
323 MachineInstr
*PseudoMULTu
, *PseudoMove
;
325 PseudoMULTu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMULTu
))
326 .addDef(PseudoMULTuReg
)
327 .add(I
.getOperand(1))
328 .add(I
.getOperand(2));
329 if (!constrainSelectedInstRegOperands(*PseudoMULTu
, TII
, TRI
, RBI
))
332 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoMFHI
))
333 .addDef(I
.getOperand(0).getReg())
334 .addUse(PseudoMULTuReg
);
335 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
342 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
343 .add(I
.getOperand(0))
344 .add(I
.getOperand(1))
345 .add(I
.getOperand(2));
350 I
.setDesc(TII
.get(COPY
));
351 return selectCopy(I
, MRI
);
353 case G_FRAME_INDEX
: {
354 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
355 .add(I
.getOperand(0))
356 .add(I
.getOperand(1))
361 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::BNE
))
362 .add(I
.getOperand(0))
364 .add(I
.getOperand(1));
369 MF
.getJumpTableInfo()->getEntrySize(MF
.getDataLayout());
370 assert(isPowerOf2_32(EntrySize
) &&
371 "Non-power-of-two jump-table entry size not supported.");
373 Register JTIndex
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
374 MachineInstr
*SLL
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SLL
))
376 .addUse(I
.getOperand(2).getReg())
377 .addImm(Log2_32(EntrySize
));
378 if (!constrainSelectedInstRegOperands(*SLL
, TII
, TRI
, RBI
))
381 Register DestAddress
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
382 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
384 .addUse(I
.getOperand(0).getReg())
386 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
389 Register Dest
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
391 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
394 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_LO
)
395 .addMemOperand(MF
.getMachineMemOperand(
396 MachinePointerInfo(), MachineMemOperand::MOLoad
, 4, Align(4)));
397 if (!constrainSelectedInstRegOperands(*LW
, TII
, TRI
, RBI
))
400 if (MF
.getTarget().isPositionIndependent()) {
401 Register DestTmp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
402 LW
->getOperand(0).setReg(DestTmp
);
403 MachineInstr
*ADDu
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDu
))
406 .addUse(MF
.getInfo
<MipsFunctionInfo
>()
407 ->getGlobalBaseRegForGlobalISel(MF
));
408 if (!constrainSelectedInstRegOperands(*ADDu
, TII
, TRI
, RBI
))
412 MachineInstr
*Branch
=
413 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
415 if (!constrainSelectedInstRegOperands(*Branch
, TII
, TRI
, RBI
))
422 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::PseudoIndirectBranch
))
423 .add(I
.getOperand(0));
427 const Register DestReg
= I
.getOperand(0).getReg();
429 const TargetRegisterClass
*DefRC
= nullptr;
430 if (Register::isPhysicalRegister(DestReg
))
431 DefRC
= TRI
.getRegClass(DestReg
);
433 DefRC
= getRegClassForTypeOnBank(DestReg
, MRI
);
435 I
.setDesc(TII
.get(TargetOpcode::PHI
));
436 return RBI
.constrainGenericRegister(DestReg
, *DefRC
, MRI
);
442 auto MMO
= *I
.memoperands_begin();
443 MachineOperand BaseAddr
= I
.getOperand(1);
444 int64_t SignedOffset
= 0;
445 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
446 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
447 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
448 // %LoadResult/%StoreSrc = load/store %Addr(p0)
450 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
452 MachineInstr
*Addr
= MRI
.getVRegDef(I
.getOperand(1).getReg());
453 if (Addr
->getOpcode() == G_PTR_ADD
) {
454 MachineInstr
*Offset
= MRI
.getVRegDef(Addr
->getOperand(2).getReg());
455 if (Offset
->getOpcode() == G_CONSTANT
) {
456 APInt OffsetValue
= Offset
->getOperand(1).getCImm()->getValue();
457 if (OffsetValue
.isSignedIntN(16)) {
458 BaseAddr
= Addr
->getOperand(1);
459 SignedOffset
= OffsetValue
.getSExtValue();
464 // Unaligned memory access
465 if (MMO
->getAlign() < MMO
->getSize() &&
466 !STI
.systemSupportsUnalignedAccess()) {
467 if (MMO
->getSize() != 4 || !isRegInGprb(I
.getOperand(0).getReg(), MRI
))
470 if (I
.getOpcode() == G_STORE
) {
471 if (!buildUnalignedStore(I
, Mips::SWL
, BaseAddr
, SignedOffset
+ 3, MMO
))
473 if (!buildUnalignedStore(I
, Mips::SWR
, BaseAddr
, SignedOffset
, MMO
))
479 if (I
.getOpcode() == G_LOAD
) {
480 Register ImplDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
481 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::IMPLICIT_DEF
))
483 Register Tmp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
484 if (!buildUnalignedLoad(I
, Mips::LWL
, Tmp
, BaseAddr
, SignedOffset
+ 3,
487 if (!buildUnalignedLoad(I
, Mips::LWR
, I
.getOperand(0).getReg(),
488 BaseAddr
, SignedOffset
, Tmp
, MMO
))
497 const unsigned NewOpc
= selectLoadStoreOpCode(I
, MRI
);
498 if (NewOpc
== I
.getOpcode())
501 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(NewOpc
))
502 .add(I
.getOperand(0))
504 .addImm(SignedOffset
)
512 Register HILOReg
= MRI
.createVirtualRegister(&Mips::ACC64RegClass
);
513 bool IsSigned
= I
.getOpcode() == G_SREM
|| I
.getOpcode() == G_SDIV
;
514 bool IsDiv
= I
.getOpcode() == G_UDIV
|| I
.getOpcode() == G_SDIV
;
516 MachineInstr
*PseudoDIV
, *PseudoMove
;
517 PseudoDIV
= BuildMI(MBB
, I
, I
.getDebugLoc(),
518 TII
.get(IsSigned
? Mips::PseudoSDIV
: Mips::PseudoUDIV
))
520 .add(I
.getOperand(1))
521 .add(I
.getOperand(2));
522 if (!constrainSelectedInstRegOperands(*PseudoDIV
, TII
, TRI
, RBI
))
525 PseudoMove
= BuildMI(MBB
, I
, I
.getDebugLoc(),
526 TII
.get(IsDiv
? Mips::PseudoMFLO
: Mips::PseudoMFHI
))
527 .addDef(I
.getOperand(0).getReg())
529 if (!constrainSelectedInstRegOperands(*PseudoMove
, TII
, TRI
, RBI
))
536 // Handle operands with pointer type.
537 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MOVN_I_I
))
538 .add(I
.getOperand(0))
539 .add(I
.getOperand(2))
540 .add(I
.getOperand(1))
541 .add(I
.getOperand(3));
544 case G_UNMERGE_VALUES
: {
545 if (I
.getNumOperands() != 3)
547 Register Src
= I
.getOperand(2).getReg();
548 Register Lo
= I
.getOperand(0).getReg();
549 Register Hi
= I
.getOperand(1).getReg();
550 if (!isRegInFprb(Src
, MRI
) ||
551 !(isRegInGprb(Lo
, MRI
) && isRegInGprb(Hi
, MRI
)))
555 STI
.isFP64bit() ? Mips::ExtractElementF64_64
: Mips::ExtractElementF64
;
557 MachineInstr
*ExtractLo
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
561 if (!constrainSelectedInstRegOperands(*ExtractLo
, TII
, TRI
, RBI
))
564 MachineInstr
*ExtractHi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
568 if (!constrainSelectedInstRegOperands(*ExtractHi
, TII
, TRI
, RBI
))
574 case G_IMPLICIT_DEF
: {
575 Register Dst
= I
.getOperand(0).getReg();
576 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::IMPLICIT_DEF
))
579 // Set class based on register bank, there can be fpr and gpr implicit def.
580 MRI
.setRegClass(Dst
, getRegClassForTypeOnBank(Dst
, MRI
));
584 MachineIRBuilder
B(I
);
585 if (!materialize32BitImm(I
.getOperand(0).getReg(),
586 I
.getOperand(1).getCImm()->getValue(), B
))
593 const APFloat
&FPimm
= I
.getOperand(1).getFPImm()->getValueAPF();
594 APInt APImm
= FPimm
.bitcastToAPInt();
595 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
598 Register GPRReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
599 MachineIRBuilder
B(I
);
600 if (!materialize32BitImm(GPRReg
, APImm
, B
))
603 MachineInstrBuilder MTC1
=
604 B
.buildInstr(Mips::MTC1
, {I
.getOperand(0).getReg()}, {GPRReg
});
605 if (!MTC1
.constrainAllUses(TII
, TRI
, RBI
))
609 Register GPRRegHigh
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
610 Register GPRRegLow
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
611 MachineIRBuilder
B(I
);
612 if (!materialize32BitImm(GPRRegHigh
, APImm
.getHiBits(32).trunc(32), B
))
614 if (!materialize32BitImm(GPRRegLow
, APImm
.getLoBits(32).trunc(32), B
))
617 MachineInstrBuilder PairF64
= B
.buildInstr(
618 STI
.isFP64bit() ? Mips::BuildPairF64_64
: Mips::BuildPairF64
,
619 {I
.getOperand(0).getReg()}, {GPRRegLow
, GPRRegHigh
});
620 if (!PairF64
.constrainAllUses(TII
, TRI
, RBI
))
628 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
629 unsigned FABSOpcode
=
630 Size
== 32 ? Mips::FABS_S
631 : STI
.isFP64bit() ? Mips::FABS_D64
: Mips::FABS_D32
;
632 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FABSOpcode
))
633 .add(I
.getOperand(0))
634 .add(I
.getOperand(1));
638 unsigned FromSize
= MRI
.getType(I
.getOperand(1).getReg()).getSizeInBits();
639 unsigned ToSize
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
641 assert((ToSize
== 32) && "Unsupported integer size for G_FPTOSI");
642 assert((FromSize
== 32 || FromSize
== 64) &&
643 "Unsupported floating point size for G_FPTOSI");
647 Opcode
= Mips::TRUNC_W_S
;
649 Opcode
= STI
.isFP64bit() ? Mips::TRUNC_W_D64
: Mips::TRUNC_W_D32
;
650 Register ResultInFPR
= MRI
.createVirtualRegister(&Mips::FGR32RegClass
);
651 MachineInstr
*Trunc
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Opcode
))
653 .addUse(I
.getOperand(1).getReg());
654 if (!constrainSelectedInstRegOperands(*Trunc
, TII
, TRI
, RBI
))
657 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::MFC1
))
658 .addDef(I
.getOperand(0).getReg())
659 .addUse(ResultInFPR
);
660 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
666 case G_GLOBAL_VALUE
: {
667 const llvm::GlobalValue
*GVal
= I
.getOperand(1).getGlobal();
668 if (MF
.getTarget().isPositionIndependent()) {
669 MachineInstr
*LWGOT
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
670 .addDef(I
.getOperand(0).getReg())
671 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
672 ->getGlobalBaseRegForGlobalISel(MF
))
673 .addGlobalAddress(GVal
);
674 // Global Values that don't have local linkage are handled differently
675 // when they are part of call sequence. MipsCallLowering::lowerCall
676 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
677 // MO_GOT_CALL flag when Callee doesn't have local linkage.
678 if (I
.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL
)
679 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL
);
681 LWGOT
->getOperand(2).setTargetFlags(MipsII::MO_GOT
);
682 LWGOT
->addMemOperand(
683 MF
, MF
.getMachineMemOperand(MachinePointerInfo::getGOT(MF
),
684 MachineMemOperand::MOLoad
, 4, Align(4)));
685 if (!constrainSelectedInstRegOperands(*LWGOT
, TII
, TRI
, RBI
))
688 if (GVal
->hasLocalLinkage()) {
689 Register LWGOTDef
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
690 LWGOT
->getOperand(0).setReg(LWGOTDef
);
692 MachineInstr
*ADDiu
=
693 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
694 .addDef(I
.getOperand(0).getReg())
696 .addGlobalAddress(GVal
);
697 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
698 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
702 Register LUiReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
704 MachineInstr
*LUi
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
706 .addGlobalAddress(GVal
);
707 LUi
->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI
);
708 if (!constrainSelectedInstRegOperands(*LUi
, TII
, TRI
, RBI
))
711 MachineInstr
*ADDiu
=
712 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
713 .addDef(I
.getOperand(0).getReg())
715 .addGlobalAddress(GVal
);
716 ADDiu
->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO
);
717 if (!constrainSelectedInstRegOperands(*ADDiu
, TII
, TRI
, RBI
))
724 if (MF
.getTarget().isPositionIndependent()) {
725 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LW
))
726 .addDef(I
.getOperand(0).getReg())
727 .addReg(MF
.getInfo
<MipsFunctionInfo
>()
728 ->getGlobalBaseRegForGlobalISel(MF
))
729 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_GOT
)
730 .addMemOperand(MF
.getMachineMemOperand(
731 MachinePointerInfo::getGOT(MF
), MachineMemOperand::MOLoad
, 4,
735 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LUi
))
736 .addDef(I
.getOperand(0).getReg())
737 .addJumpTableIndex(I
.getOperand(1).getIndex(), MipsII::MO_ABS_HI
);
744 Register Def
, LHS
, RHS
;
745 Instr(unsigned Opcode
, Register Def
, Register LHS
, Register RHS
)
746 : Opcode(Opcode
), Def(Def
), LHS(LHS
), RHS(RHS
){};
748 bool hasImm() const {
749 if (Opcode
== Mips::SLTiu
|| Opcode
== Mips::XORi
)
755 SmallVector
<struct Instr
, 2> Instructions
;
756 Register ICMPReg
= I
.getOperand(0).getReg();
757 Register Temp
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
758 Register LHS
= I
.getOperand(2).getReg();
759 Register RHS
= I
.getOperand(3).getReg();
760 CmpInst::Predicate Cond
=
761 static_cast<CmpInst::Predicate
>(I
.getOperand(1).getPredicate());
764 case CmpInst::ICMP_EQ
: // LHS == RHS -> (LHS ^ RHS) < 1
765 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
766 Instructions
.emplace_back(Mips::SLTiu
, ICMPReg
, Temp
, 1);
768 case CmpInst::ICMP_NE
: // LHS != RHS -> 0 < (LHS ^ RHS)
769 Instructions
.emplace_back(Mips::XOR
, Temp
, LHS
, RHS
);
770 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, Mips::ZERO
, Temp
);
772 case CmpInst::ICMP_UGT
: // LHS > RHS -> RHS < LHS
773 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, RHS
, LHS
);
775 case CmpInst::ICMP_UGE
: // LHS >= RHS -> !(LHS < RHS)
776 Instructions
.emplace_back(Mips::SLTu
, Temp
, LHS
, RHS
);
777 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
779 case CmpInst::ICMP_ULT
: // LHS < RHS -> LHS < RHS
780 Instructions
.emplace_back(Mips::SLTu
, ICMPReg
, LHS
, RHS
);
782 case CmpInst::ICMP_ULE
: // LHS <= RHS -> !(RHS < LHS)
783 Instructions
.emplace_back(Mips::SLTu
, Temp
, RHS
, LHS
);
784 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
786 case CmpInst::ICMP_SGT
: // LHS > RHS -> RHS < LHS
787 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, RHS
, LHS
);
789 case CmpInst::ICMP_SGE
: // LHS >= RHS -> !(LHS < RHS)
790 Instructions
.emplace_back(Mips::SLT
, Temp
, LHS
, RHS
);
791 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
793 case CmpInst::ICMP_SLT
: // LHS < RHS -> LHS < RHS
794 Instructions
.emplace_back(Mips::SLT
, ICMPReg
, LHS
, RHS
);
796 case CmpInst::ICMP_SLE
: // LHS <= RHS -> !(RHS < LHS)
797 Instructions
.emplace_back(Mips::SLT
, Temp
, RHS
, LHS
);
798 Instructions
.emplace_back(Mips::XORi
, ICMPReg
, Temp
, 1);
804 MachineIRBuilder
B(I
);
805 for (const struct Instr
&Instruction
: Instructions
) {
806 MachineInstrBuilder MIB
= B
.buildInstr(
807 Instruction
.Opcode
, {Instruction
.Def
}, {Instruction
.LHS
});
809 if (Instruction
.hasImm())
810 MIB
.addImm(Instruction
.RHS
);
812 MIB
.addUse(Instruction
.RHS
);
814 if (!MIB
.constrainAllUses(TII
, TRI
, RBI
))
822 unsigned MipsFCMPCondCode
;
823 bool isLogicallyNegated
;
824 switch (CmpInst::Predicate Cond
= static_cast<CmpInst::Predicate
>(
825 I
.getOperand(1).getPredicate())) {
826 case CmpInst::FCMP_UNO
: // Unordered
827 case CmpInst::FCMP_ORD
: // Ordered (OR)
828 MipsFCMPCondCode
= Mips::FCOND_UN
;
829 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UNO
;
831 case CmpInst::FCMP_OEQ
: // Equal
832 case CmpInst::FCMP_UNE
: // Not Equal (NEQ)
833 MipsFCMPCondCode
= Mips::FCOND_OEQ
;
834 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OEQ
;
836 case CmpInst::FCMP_UEQ
: // Unordered or Equal
837 case CmpInst::FCMP_ONE
: // Ordered or Greater Than or Less Than (OGL)
838 MipsFCMPCondCode
= Mips::FCOND_UEQ
;
839 isLogicallyNegated
= Cond
!= CmpInst::FCMP_UEQ
;
841 case CmpInst::FCMP_OLT
: // Ordered or Less Than
842 case CmpInst::FCMP_UGE
: // Unordered or Greater Than or Equal (UGE)
843 MipsFCMPCondCode
= Mips::FCOND_OLT
;
844 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLT
;
846 case CmpInst::FCMP_ULT
: // Unordered or Less Than
847 case CmpInst::FCMP_OGE
: // Ordered or Greater Than or Equal (OGE)
848 MipsFCMPCondCode
= Mips::FCOND_ULT
;
849 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULT
;
851 case CmpInst::FCMP_OLE
: // Ordered or Less Than or Equal
852 case CmpInst::FCMP_UGT
: // Unordered or Greater Than (UGT)
853 MipsFCMPCondCode
= Mips::FCOND_OLE
;
854 isLogicallyNegated
= Cond
!= CmpInst::FCMP_OLE
;
856 case CmpInst::FCMP_ULE
: // Unordered or Less Than or Equal
857 case CmpInst::FCMP_OGT
: // Ordered or Greater Than (OGT)
858 MipsFCMPCondCode
= Mips::FCOND_ULE
;
859 isLogicallyNegated
= Cond
!= CmpInst::FCMP_ULE
;
865 // Default compare result in gpr register will be `true`.
866 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
867 // using MOVF_I. When orignal predicate (Cond) is logically negated
868 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
869 unsigned MoveOpcode
= isLogicallyNegated
? Mips::MOVT_I
: Mips::MOVF_I
;
871 Register TrueInReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
872 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::ADDiu
))
877 unsigned Size
= MRI
.getType(I
.getOperand(2).getReg()).getSizeInBits();
878 unsigned FCMPOpcode
=
879 Size
== 32 ? Mips::FCMP_S32
880 : STI
.isFP64bit() ? Mips::FCMP_D64
: Mips::FCMP_D32
;
881 MachineInstr
*FCMP
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(FCMPOpcode
))
882 .addUse(I
.getOperand(2).getReg())
883 .addUse(I
.getOperand(3).getReg())
884 .addImm(MipsFCMPCondCode
);
885 if (!constrainSelectedInstRegOperands(*FCMP
, TII
, TRI
, RBI
))
888 MachineInstr
*Move
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(MoveOpcode
))
889 .addDef(I
.getOperand(0).getReg())
893 if (!constrainSelectedInstRegOperands(*Move
, TII
, TRI
, RBI
))
900 MI
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SYNC
)).addImm(0);
904 MipsFunctionInfo
*FuncInfo
= MF
.getInfo
<MipsFunctionInfo
>();
905 int FI
= FuncInfo
->getVarArgsFrameIndex();
907 Register LeaReg
= MRI
.createVirtualRegister(&Mips::GPR32RegClass
);
908 MachineInstr
*LEA_ADDiu
=
909 BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::LEA_ADDiu
))
913 if (!constrainSelectedInstRegOperands(*LEA_ADDiu
, TII
, TRI
, RBI
))
916 MachineInstr
*Store
= BuildMI(MBB
, I
, I
.getDebugLoc(), TII
.get(Mips::SW
))
918 .addUse(I
.getOperand(0).getReg())
920 if (!constrainSelectedInstRegOperands(*Store
, TII
, TRI
, RBI
))
931 return constrainSelectedInstRegOperands(*MI
, TII
, TRI
, RBI
);
935 InstructionSelector
*createMipsInstructionSelector(const MipsTargetMachine
&TM
,
936 MipsSubtarget
&Subtarget
,
937 MipsRegisterBankInfo
&RBI
) {
938 return new MipsInstructionSelector(TM
, Subtarget
, RBI
);
940 } // end namespace llvm