1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCVRegisterBankInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/IR/IntrinsicsRISCV.h"
26 #include "llvm/Support/Debug.h"
28 #define DEBUG_TYPE "riscv-isel"
31 using namespace MIPatternMatch
;
33 #define GET_GLOBALISEL_PREDICATE_BITSET
34 #include "RISCVGenGlobalISel.inc"
35 #undef GET_GLOBALISEL_PREDICATE_BITSET
39 class RISCVInstructionSelector
: public InstructionSelector
{
41 RISCVInstructionSelector(const RISCVTargetMachine
&TM
,
42 const RISCVSubtarget
&STI
,
43 const RISCVRegisterBankInfo
&RBI
);
45 bool select(MachineInstr
&MI
) override
;
46 static const char *getName() { return DEBUG_TYPE
; }
49 const TargetRegisterClass
*
50 getRegClassForTypeOnBank(LLT Ty
, const RegisterBank
&RB
) const;
52 bool isRegInGprb(Register Reg
, MachineRegisterInfo
&MRI
) const;
53 bool isRegInFprb(Register Reg
, MachineRegisterInfo
&MRI
) const;
55 // tblgen-erated 'select' implementation, used as the initial selector for
56 // the patterns that don't require complex C++.
57 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
59 // A lowering phase that runs before any selection attempts.
60 // Returns true if the instruction was modified.
61 void preISelLower(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
62 MachineRegisterInfo
&MRI
);
64 bool replacePtrWithInt(MachineOperand
&Op
, MachineIRBuilder
&MIB
,
65 MachineRegisterInfo
&MRI
);
67 // Custom selection methods
68 bool selectCopy(MachineInstr
&MI
, MachineRegisterInfo
&MRI
) const;
69 bool selectImplicitDef(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
70 MachineRegisterInfo
&MRI
) const;
71 bool materializeImm(Register Reg
, int64_t Imm
, MachineIRBuilder
&MIB
) const;
72 bool selectAddr(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
73 MachineRegisterInfo
&MRI
, bool IsLocal
= true,
74 bool IsExternWeak
= false) const;
75 bool selectSExtInreg(MachineInstr
&MI
, MachineIRBuilder
&MIB
) const;
76 bool selectSelect(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
77 MachineRegisterInfo
&MRI
) const;
78 bool selectFPCompare(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
79 MachineRegisterInfo
&MRI
) const;
80 bool selectIntrinsicWithSideEffects(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
81 MachineRegisterInfo
&MRI
) const;
82 void emitFence(AtomicOrdering FenceOrdering
, SyncScope::ID FenceSSID
,
83 MachineIRBuilder
&MIB
) const;
84 bool selectMergeValues(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
85 MachineRegisterInfo
&MRI
) const;
86 bool selectUnmergeValues(MachineInstr
&MI
, MachineIRBuilder
&MIB
,
87 MachineRegisterInfo
&MRI
) const;
89 ComplexRendererFns
selectShiftMask(MachineOperand
&Root
) const;
90 ComplexRendererFns
selectAddrRegImm(MachineOperand
&Root
) const;
92 ComplexRendererFns
selectSHXADDOp(MachineOperand
&Root
, unsigned ShAmt
) const;
93 template <unsigned ShAmt
>
94 ComplexRendererFns
selectSHXADDOp(MachineOperand
&Root
) const {
95 return selectSHXADDOp(Root
, ShAmt
);
98 ComplexRendererFns
selectSHXADD_UWOp(MachineOperand
&Root
,
99 unsigned ShAmt
) const;
100 template <unsigned ShAmt
>
101 ComplexRendererFns
selectSHXADD_UWOp(MachineOperand
&Root
) const {
102 return selectSHXADD_UWOp(Root
, ShAmt
);
105 // Custom renderers for tablegen
106 void renderNegImm(MachineInstrBuilder
&MIB
, const MachineInstr
&MI
,
108 void renderImmSubFromXLen(MachineInstrBuilder
&MIB
, const MachineInstr
&MI
,
110 void renderImmSubFrom32(MachineInstrBuilder
&MIB
, const MachineInstr
&MI
,
112 void renderImmPlus1(MachineInstrBuilder
&MIB
, const MachineInstr
&MI
,
114 void renderImm(MachineInstrBuilder
&MIB
, const MachineInstr
&MI
,
117 void renderTrailingZeros(MachineInstrBuilder
&MIB
, const MachineInstr
&MI
,
120 const RISCVSubtarget
&STI
;
121 const RISCVInstrInfo
&TII
;
122 const RISCVRegisterInfo
&TRI
;
123 const RISCVRegisterBankInfo
&RBI
;
124 const RISCVTargetMachine
&TM
;
126 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
127 // uses "STI." in the code generated by TableGen. We need to unify the name of
128 // Subtarget variable.
129 const RISCVSubtarget
*Subtarget
= &STI
;
131 #define GET_GLOBALISEL_PREDICATES_DECL
132 #include "RISCVGenGlobalISel.inc"
133 #undef GET_GLOBALISEL_PREDICATES_DECL
135 #define GET_GLOBALISEL_TEMPORARIES_DECL
136 #include "RISCVGenGlobalISel.inc"
137 #undef GET_GLOBALISEL_TEMPORARIES_DECL
140 } // end anonymous namespace
142 #define GET_GLOBALISEL_IMPL
143 #include "RISCVGenGlobalISel.inc"
144 #undef GET_GLOBALISEL_IMPL
146 RISCVInstructionSelector::RISCVInstructionSelector(
147 const RISCVTargetMachine
&TM
, const RISCVSubtarget
&STI
,
148 const RISCVRegisterBankInfo
&RBI
)
149 : STI(STI
), TII(*STI
.getInstrInfo()), TRI(*STI
.getRegisterInfo()), RBI(RBI
),
152 #define GET_GLOBALISEL_PREDICATES_INIT
153 #include "RISCVGenGlobalISel.inc"
154 #undef GET_GLOBALISEL_PREDICATES_INIT
155 #define GET_GLOBALISEL_TEMPORARIES_INIT
156 #include "RISCVGenGlobalISel.inc"
157 #undef GET_GLOBALISEL_TEMPORARIES_INIT
161 InstructionSelector::ComplexRendererFns
162 RISCVInstructionSelector::selectShiftMask(MachineOperand
&Root
) const {
166 using namespace llvm::MIPatternMatch
;
167 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
169 Register RootReg
= Root
.getReg();
170 Register ShAmtReg
= RootReg
;
171 const LLT ShiftLLT
= MRI
.getType(RootReg
);
172 unsigned ShiftWidth
= ShiftLLT
.getSizeInBits();
173 assert(isPowerOf2_32(ShiftWidth
) && "Unexpected max shift amount!");
174 // Peek through zext.
176 if (mi_match(ShAmtReg
, MRI
, m_GZExt(m_Reg(ZExtSrcReg
)))) {
177 ShAmtReg
= ZExtSrcReg
;
182 if (mi_match(ShAmtReg
, MRI
, m_GAnd(m_Reg(AndSrcReg
), m_ICst(AndMask
)))) {
183 APInt
ShMask(AndMask
.getBitWidth(), ShiftWidth
- 1);
184 if (ShMask
.isSubsetOf(AndMask
)) {
185 ShAmtReg
= AndSrcReg
;
187 // SimplifyDemandedBits may have optimized the mask so try restoring any
188 // bits that are known zero.
189 KnownBits Known
= KB
->getKnownBits(ShAmtReg
);
190 if (ShMask
.isSubsetOf(AndMask
| Known
.Zero
))
191 ShAmtReg
= AndSrcReg
;
197 if (mi_match(ShAmtReg
, MRI
, m_GAdd(m_Reg(Reg
), m_ICst(Imm
)))) {
198 if (Imm
!= 0 && Imm
.urem(ShiftWidth
) == 0)
199 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
202 } else if (mi_match(ShAmtReg
, MRI
, m_GSub(m_ICst(Imm
), m_Reg(Reg
)))) {
203 if (Imm
!= 0 && Imm
.urem(ShiftWidth
) == 0) {
204 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
205 // to generate a NEG instead of a SUB of a constant.
206 ShAmtReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
207 unsigned NegOpc
= Subtarget
->is64Bit() ? RISCV::SUBW
: RISCV::SUB
;
208 return {{[=](MachineInstrBuilder
&MIB
) {
209 MachineIRBuilder(*MIB
.getInstr())
210 .buildInstr(NegOpc
, {ShAmtReg
}, {Register(RISCV::X0
), Reg
});
211 MIB
.addReg(ShAmtReg
);
214 if (Imm
.urem(ShiftWidth
) == ShiftWidth
- 1) {
215 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
216 // to generate a NOT instead of a SUB of a constant.
217 ShAmtReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
218 return {{[=](MachineInstrBuilder
&MIB
) {
219 MachineIRBuilder(*MIB
.getInstr())
220 .buildInstr(RISCV::XORI
, {ShAmtReg
}, {Reg
})
222 MIB
.addReg(ShAmtReg
);
227 return {{[=](MachineInstrBuilder
&MIB
) { MIB
.addReg(ShAmtReg
); }}};
230 InstructionSelector::ComplexRendererFns
231 RISCVInstructionSelector::selectSHXADDOp(MachineOperand
&Root
,
232 unsigned ShAmt
) const {
233 using namespace llvm::MIPatternMatch
;
234 MachineFunction
&MF
= *Root
.getParent()->getParent()->getParent();
235 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
239 Register RootReg
= Root
.getReg();
241 const unsigned XLen
= STI
.getXLen();
244 std::optional
<bool> LeftShift
;
245 // (and (shl y, c2), mask)
246 if (mi_match(RootReg
, MRI
,
247 m_GAnd(m_GShl(m_Reg(RegY
), m_ICst(C2
)), m_ICst(Mask
))))
249 // (and (lshr y, c2), mask)
250 else if (mi_match(RootReg
, MRI
,
251 m_GAnd(m_GLShr(m_Reg(RegY
), m_ICst(C2
)), m_ICst(Mask
))))
254 if (LeftShift
.has_value()) {
256 Mask
&= maskTrailingZeros
<uint64_t>(C2
.getLimitedValue());
258 Mask
&= maskTrailingOnes
<uint64_t>(XLen
- C2
.getLimitedValue());
260 if (Mask
.isShiftedMask()) {
261 unsigned Leading
= XLen
- Mask
.getActiveBits();
262 unsigned Trailing
= Mask
.countr_zero();
263 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
264 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
265 if (*LeftShift
&& Leading
== 0 && C2
.ult(Trailing
) && Trailing
== ShAmt
) {
266 Register DstReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
267 return {{[=](MachineInstrBuilder
&MIB
) {
268 MachineIRBuilder(*MIB
.getInstr())
269 .buildInstr(RISCV::SRLI
, {DstReg
}, {RegY
})
270 .addImm(Trailing
- C2
.getLimitedValue());
275 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
276 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
277 if (!*LeftShift
&& Leading
== C2
&& Trailing
== ShAmt
) {
278 Register DstReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
279 return {{[=](MachineInstrBuilder
&MIB
) {
280 MachineIRBuilder(*MIB
.getInstr())
281 .buildInstr(RISCV::SRLI
, {DstReg
}, {RegY
})
282 .addImm(Leading
+ Trailing
);
291 // (shl (and y, mask), c2)
292 if (mi_match(RootReg
, MRI
,
293 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY
), m_ICst(Mask
))),
296 // (lshr (and y, mask), c2)
297 else if (mi_match(RootReg
, MRI
,
298 m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY
), m_ICst(Mask
))),
302 if (LeftShift
.has_value() && Mask
.isShiftedMask()) {
303 unsigned Leading
= XLen
- Mask
.getActiveBits();
304 unsigned Trailing
= Mask
.countr_zero();
306 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
307 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
308 bool Cond
= *LeftShift
&& Leading
== 32 && Trailing
> 0 &&
309 (Trailing
+ C2
.getLimitedValue()) == ShAmt
;
311 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
312 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
313 Cond
= !*LeftShift
&& Leading
== 32 && C2
.ult(Trailing
) &&
314 (Trailing
- C2
.getLimitedValue()) == ShAmt
;
317 Register DstReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
318 return {{[=](MachineInstrBuilder
&MIB
) {
319 MachineIRBuilder(*MIB
.getInstr())
320 .buildInstr(RISCV::SRLIW
, {DstReg
}, {RegY
})
330 InstructionSelector::ComplexRendererFns
331 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand
&Root
,
332 unsigned ShAmt
) const {
333 using namespace llvm::MIPatternMatch
;
334 MachineFunction
&MF
= *Root
.getParent()->getParent()->getParent();
335 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
339 Register RootReg
= Root
.getReg();
341 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
342 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
343 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
348 m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX
), m_ICst(C2
))),
350 Mask
&= maskTrailingZeros
<uint64_t>(C2
.getLimitedValue());
352 if (Mask
.isShiftedMask()) {
353 unsigned Leading
= Mask
.countl_zero();
354 unsigned Trailing
= Mask
.countr_zero();
355 if (Leading
== 32 - ShAmt
&& C2
== Trailing
&& Trailing
> ShAmt
) {
356 Register DstReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
357 return {{[=](MachineInstrBuilder
&MIB
) {
358 MachineIRBuilder(*MIB
.getInstr())
359 .buildInstr(RISCV::SLLI
, {DstReg
}, {RegX
})
360 .addImm(C2
.getLimitedValue() - ShAmt
);
370 InstructionSelector::ComplexRendererFns
371 RISCVInstructionSelector::selectAddrRegImm(MachineOperand
&Root
) const {
372 MachineFunction
&MF
= *Root
.getParent()->getParent()->getParent();
373 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
378 MachineInstr
*RootDef
= MRI
.getVRegDef(Root
.getReg());
379 if (RootDef
->getOpcode() == TargetOpcode::G_FRAME_INDEX
) {
381 [=](MachineInstrBuilder
&MIB
) { MIB
.add(RootDef
->getOperand(1)); },
382 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); },
386 if (isBaseWithConstantOffset(Root
, MRI
)) {
387 MachineOperand
&LHS
= RootDef
->getOperand(1);
388 MachineOperand
&RHS
= RootDef
->getOperand(2);
389 MachineInstr
*LHSDef
= MRI
.getVRegDef(LHS
.getReg());
390 MachineInstr
*RHSDef
= MRI
.getVRegDef(RHS
.getReg());
392 int64_t RHSC
= RHSDef
->getOperand(1).getCImm()->getSExtValue();
393 if (isInt
<12>(RHSC
)) {
394 if (LHSDef
->getOpcode() == TargetOpcode::G_FRAME_INDEX
)
396 [=](MachineInstrBuilder
&MIB
) { MIB
.add(LHSDef
->getOperand(1)); },
397 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(RHSC
); },
400 return {{[=](MachineInstrBuilder
&MIB
) { MIB
.add(LHS
); },
401 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(RHSC
); }}};
405 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
407 return {{[=](MachineInstrBuilder
&MIB
) { MIB
.addReg(Root
.getReg()); },
408 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); }}};
411 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
412 /// CC Must be an ICMP Predicate.
413 static RISCVCC::CondCode
getRISCVCCFromICmp(CmpInst::Predicate CC
) {
416 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
417 case CmpInst::Predicate::ICMP_EQ
:
418 return RISCVCC::COND_EQ
;
419 case CmpInst::Predicate::ICMP_NE
:
420 return RISCVCC::COND_NE
;
421 case CmpInst::Predicate::ICMP_ULT
:
422 return RISCVCC::COND_LTU
;
423 case CmpInst::Predicate::ICMP_SLT
:
424 return RISCVCC::COND_LT
;
425 case CmpInst::Predicate::ICMP_UGE
:
426 return RISCVCC::COND_GEU
;
427 case CmpInst::Predicate::ICMP_SGE
:
428 return RISCVCC::COND_GE
;
432 static void getOperandsForBranch(Register CondReg
, MachineRegisterInfo
&MRI
,
433 RISCVCC::CondCode
&CC
, Register
&LHS
,
435 // Try to fold an ICmp. If that fails, use a NE compare with X0.
436 CmpInst::Predicate Pred
= CmpInst::BAD_ICMP_PREDICATE
;
437 if (!mi_match(CondReg
, MRI
, m_GICmp(m_Pred(Pred
), m_Reg(LHS
), m_Reg(RHS
)))) {
440 CC
= RISCVCC::COND_NE
;
444 // We found an ICmp, do some canonicalizations.
446 // Adjust comparisons to use comparison with 0 if possible.
447 if (auto Constant
= getIConstantVRegSExtVal(RHS
, MRI
)) {
449 case CmpInst::Predicate::ICMP_SGT
:
450 // Convert X > -1 to X >= 0
451 if (*Constant
== -1) {
452 CC
= RISCVCC::COND_GE
;
457 case CmpInst::Predicate::ICMP_SLT
:
458 // Convert X < 1 to 0 >= X
459 if (*Constant
== 1) {
460 CC
= RISCVCC::COND_GE
;
473 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
474 case CmpInst::Predicate::ICMP_EQ
:
475 case CmpInst::Predicate::ICMP_NE
:
476 case CmpInst::Predicate::ICMP_ULT
:
477 case CmpInst::Predicate::ICMP_SLT
:
478 case CmpInst::Predicate::ICMP_UGE
:
479 case CmpInst::Predicate::ICMP_SGE
:
480 // These CCs are supported directly by RISC-V branches.
482 case CmpInst::Predicate::ICMP_SGT
:
483 case CmpInst::Predicate::ICMP_SLE
:
484 case CmpInst::Predicate::ICMP_UGT
:
485 case CmpInst::Predicate::ICMP_ULE
:
486 // These CCs are not supported directly by RISC-V branches, but changing the
487 // direction of the CC and swapping LHS and RHS are.
488 Pred
= CmpInst::getSwappedPredicate(Pred
);
493 CC
= getRISCVCCFromICmp(Pred
);
497 bool RISCVInstructionSelector::select(MachineInstr
&MI
) {
498 MachineBasicBlock
&MBB
= *MI
.getParent();
499 MachineFunction
&MF
= *MBB
.getParent();
500 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
501 MachineIRBuilder
MIB(MI
);
503 preISelLower(MI
, MIB
, MRI
);
504 const unsigned Opc
= MI
.getOpcode();
506 if (!MI
.isPreISelOpcode() || Opc
== TargetOpcode::G_PHI
) {
507 if (Opc
== TargetOpcode::PHI
|| Opc
== TargetOpcode::G_PHI
) {
508 const Register DefReg
= MI
.getOperand(0).getReg();
509 const LLT DefTy
= MRI
.getType(DefReg
);
511 const RegClassOrRegBank
&RegClassOrBank
=
512 MRI
.getRegClassOrRegBank(DefReg
);
514 const TargetRegisterClass
*DefRC
=
515 RegClassOrBank
.dyn_cast
<const TargetRegisterClass
*>();
517 if (!DefTy
.isValid()) {
518 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
522 const RegisterBank
&RB
= *RegClassOrBank
.get
<const RegisterBank
*>();
523 DefRC
= getRegClassForTypeOnBank(DefTy
, RB
);
525 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
530 MI
.setDesc(TII
.get(TargetOpcode::PHI
));
531 return RBI
.constrainGenericRegister(DefReg
, *DefRC
, MRI
);
534 // Certain non-generic instructions also need some special handling.
536 return selectCopy(MI
, MRI
);
541 if (selectImpl(MI
, *CoverageInfo
))
545 case TargetOpcode::G_ANYEXT
:
546 case TargetOpcode::G_PTRTOINT
:
547 case TargetOpcode::G_INTTOPTR
:
548 case TargetOpcode::G_TRUNC
:
549 return selectCopy(MI
, MRI
);
550 case TargetOpcode::G_CONSTANT
: {
551 Register DstReg
= MI
.getOperand(0).getReg();
552 int64_t Imm
= MI
.getOperand(1).getCImm()->getSExtValue();
554 if (!materializeImm(DstReg
, Imm
, MIB
))
557 MI
.eraseFromParent();
560 case TargetOpcode::G_FCONSTANT
: {
561 // TODO: Use constant pool for complext constants.
562 // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
563 Register DstReg
= MI
.getOperand(0).getReg();
564 const APFloat
&FPimm
= MI
.getOperand(1).getFPImm()->getValueAPF();
565 APInt Imm
= FPimm
.bitcastToAPInt();
566 unsigned Size
= MRI
.getType(DstReg
).getSizeInBits();
567 if (Size
== 32 || (Size
== 64 && Subtarget
->is64Bit())) {
568 Register GPRReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
569 if (!materializeImm(GPRReg
, Imm
.getSExtValue(), MIB
))
572 unsigned Opcode
= Size
== 64 ? RISCV::FMV_D_X
: RISCV::FMV_W_X
;
573 auto FMV
= MIB
.buildInstr(Opcode
, {DstReg
}, {GPRReg
});
574 if (!FMV
.constrainAllUses(TII
, TRI
, RBI
))
577 assert(Size
== 64 && !Subtarget
->is64Bit() &&
578 "Unexpected size or subtarget");
579 // Split into two pieces and build through the stack.
580 Register GPRRegHigh
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
581 Register GPRRegLow
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
582 if (!materializeImm(GPRRegHigh
, Imm
.extractBits(32, 32).getSExtValue(),
585 if (!materializeImm(GPRRegLow
, Imm
.trunc(32).getSExtValue(), MIB
))
587 MachineInstrBuilder PairF64
= MIB
.buildInstr(
588 RISCV::BuildPairF64Pseudo
, {DstReg
}, {GPRRegLow
, GPRRegHigh
});
589 if (!PairF64
.constrainAllUses(TII
, TRI
, RBI
))
593 MI
.eraseFromParent();
596 case TargetOpcode::G_GLOBAL_VALUE
: {
597 auto *GV
= MI
.getOperand(1).getGlobal();
598 if (GV
->isThreadLocal()) {
599 // TODO: implement this case.
603 return selectAddr(MI
, MIB
, MRI
, GV
->isDSOLocal(),
604 GV
->hasExternalWeakLinkage());
606 case TargetOpcode::G_JUMP_TABLE
:
607 case TargetOpcode::G_CONSTANT_POOL
:
608 return selectAddr(MI
, MIB
, MRI
);
609 case TargetOpcode::G_BRCOND
: {
611 RISCVCC::CondCode CC
;
612 getOperandsForBranch(MI
.getOperand(0).getReg(), MRI
, CC
, LHS
, RHS
);
614 auto Bcc
= MIB
.buildInstr(RISCVCC::getBrCond(CC
), {}, {LHS
, RHS
})
615 .addMBB(MI
.getOperand(1).getMBB());
616 MI
.eraseFromParent();
617 return constrainSelectedInstRegOperands(*Bcc
, TII
, TRI
, RBI
);
619 case TargetOpcode::G_BRJT
: {
620 // FIXME: Move to legalization?
621 const MachineJumpTableInfo
*MJTI
= MF
.getJumpTableInfo();
622 unsigned EntrySize
= MJTI
->getEntrySize(MF
.getDataLayout());
623 assert((EntrySize
== 4 || (Subtarget
->is64Bit() && EntrySize
== 8)) &&
624 "Unsupported jump-table entry size");
626 (MJTI
->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32
||
627 MJTI
->getEntryKind() == MachineJumpTableInfo::EK_Custom32
||
628 MJTI
->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress
) &&
629 "Unexpected jump-table entry kind");
632 MIB
.buildInstr(RISCV::SLLI
, {&RISCV::GPRRegClass
}, {MI
.getOperand(2)})
633 .addImm(Log2_32(EntrySize
));
634 if (!SLL
.constrainAllUses(TII
, TRI
, RBI
))
637 // TODO: Use SHXADD. Moving to legalization would fix this automatically.
638 auto ADD
= MIB
.buildInstr(RISCV::ADD
, {&RISCV::GPRRegClass
},
639 {MI
.getOperand(0), SLL
.getReg(0)});
640 if (!ADD
.constrainAllUses(TII
, TRI
, RBI
))
643 unsigned LdOpc
= EntrySize
== 8 ? RISCV::LD
: RISCV::LW
;
645 MIB
.buildInstr(LdOpc
, {&RISCV::GPRRegClass
}, {ADD
.getReg(0)})
647 .addMemOperand(MF
.getMachineMemOperand(
648 MachinePointerInfo::getJumpTable(MF
), MachineMemOperand::MOLoad
,
649 EntrySize
, Align(MJTI
->getEntryAlignment(MF
.getDataLayout()))));
650 if (!Dest
.constrainAllUses(TII
, TRI
, RBI
))
653 // If the Kind is EK_LabelDifference32, the table stores an offset from
654 // the location of the table. Add the table address to get an absolute
656 if (MJTI
->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32
) {
657 Dest
= MIB
.buildInstr(RISCV::ADD
, {&RISCV::GPRRegClass
},
658 {Dest
.getReg(0), MI
.getOperand(0)});
659 if (!Dest
.constrainAllUses(TII
, TRI
, RBI
))
664 MIB
.buildInstr(RISCV::PseudoBRIND
, {}, {Dest
.getReg(0)}).addImm(0);
665 if (!Branch
.constrainAllUses(TII
, TRI
, RBI
))
668 MI
.eraseFromParent();
671 case TargetOpcode::G_BRINDIRECT
:
672 MI
.setDesc(TII
.get(RISCV::PseudoBRIND
));
673 MI
.addOperand(MachineOperand::CreateImm(0));
674 return constrainSelectedInstRegOperands(MI
, TII
, TRI
, RBI
);
675 case TargetOpcode::G_SEXT_INREG
:
676 return selectSExtInreg(MI
, MIB
);
677 case TargetOpcode::G_FRAME_INDEX
: {
678 // TODO: We may want to replace this code with the SelectionDAG patterns,
679 // which fail to get imported because it uses FrameAddrRegImm, which is a
681 MI
.setDesc(TII
.get(RISCV::ADDI
));
682 MI
.addOperand(MachineOperand::CreateImm(0));
683 return constrainSelectedInstRegOperands(MI
, TII
, TRI
, RBI
);
685 case TargetOpcode::G_SELECT
:
686 return selectSelect(MI
, MIB
, MRI
);
687 case TargetOpcode::G_FCMP
:
688 return selectFPCompare(MI
, MIB
, MRI
);
689 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
:
690 return selectIntrinsicWithSideEffects(MI
, MIB
, MRI
);
691 case TargetOpcode::G_FENCE
: {
692 AtomicOrdering FenceOrdering
=
693 static_cast<AtomicOrdering
>(MI
.getOperand(0).getImm());
694 SyncScope::ID FenceSSID
=
695 static_cast<SyncScope::ID
>(MI
.getOperand(1).getImm());
696 emitFence(FenceOrdering
, FenceSSID
, MIB
);
697 MI
.eraseFromParent();
700 case TargetOpcode::G_IMPLICIT_DEF
:
701 return selectImplicitDef(MI
, MIB
, MRI
);
702 case TargetOpcode::G_MERGE_VALUES
:
703 return selectMergeValues(MI
, MIB
, MRI
);
704 case TargetOpcode::G_UNMERGE_VALUES
:
705 return selectUnmergeValues(MI
, MIB
, MRI
);
711 bool RISCVInstructionSelector::selectMergeValues(
712 MachineInstr
&MI
, MachineIRBuilder
&MIB
, MachineRegisterInfo
&MRI
) const {
713 assert(MI
.getOpcode() == TargetOpcode::G_MERGE_VALUES
);
715 // Build a F64 Pair from operands
716 if (MI
.getNumOperands() != 3)
718 Register Dst
= MI
.getOperand(0).getReg();
719 Register Lo
= MI
.getOperand(1).getReg();
720 Register Hi
= MI
.getOperand(2).getReg();
721 if (!isRegInFprb(Dst
, MRI
) || !isRegInGprb(Lo
, MRI
) || !isRegInGprb(Hi
, MRI
))
723 MI
.setDesc(TII
.get(RISCV::BuildPairF64Pseudo
));
724 return constrainSelectedInstRegOperands(MI
, TII
, TRI
, RBI
);
727 bool RISCVInstructionSelector::selectUnmergeValues(
728 MachineInstr
&MI
, MachineIRBuilder
&MIB
, MachineRegisterInfo
&MRI
) const {
729 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
);
731 // Split F64 Src into two s32 parts
732 if (MI
.getNumOperands() != 3)
734 Register Src
= MI
.getOperand(2).getReg();
735 Register Lo
= MI
.getOperand(0).getReg();
736 Register Hi
= MI
.getOperand(1).getReg();
737 if (!isRegInFprb(Src
, MRI
) || !isRegInGprb(Lo
, MRI
) || !isRegInGprb(Hi
, MRI
))
739 MI
.setDesc(TII
.get(RISCV::SplitF64Pseudo
));
740 return constrainSelectedInstRegOperands(MI
, TII
, TRI
, RBI
);
743 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand
&Op
,
744 MachineIRBuilder
&MIB
,
745 MachineRegisterInfo
&MRI
) {
746 Register PtrReg
= Op
.getReg();
747 assert(MRI
.getType(PtrReg
).isPointer() && "Operand is not a pointer!");
749 const LLT sXLen
= LLT::scalar(STI
.getXLen());
750 auto PtrToInt
= MIB
.buildPtrToInt(sXLen
, PtrReg
);
751 MRI
.setRegBank(PtrToInt
.getReg(0), RBI
.getRegBank(RISCV::GPRBRegBankID
));
752 Op
.setReg(PtrToInt
.getReg(0));
753 return select(*PtrToInt
);
756 void RISCVInstructionSelector::preISelLower(MachineInstr
&MI
,
757 MachineIRBuilder
&MIB
,
758 MachineRegisterInfo
&MRI
) {
759 switch (MI
.getOpcode()) {
760 case TargetOpcode::G_PTR_ADD
: {
761 Register DstReg
= MI
.getOperand(0).getReg();
762 const LLT sXLen
= LLT::scalar(STI
.getXLen());
764 replacePtrWithInt(MI
.getOperand(1), MIB
, MRI
);
765 MI
.setDesc(TII
.get(TargetOpcode::G_ADD
));
766 MRI
.setType(DstReg
, sXLen
);
769 case TargetOpcode::G_PTRMASK
: {
770 Register DstReg
= MI
.getOperand(0).getReg();
771 const LLT sXLen
= LLT::scalar(STI
.getXLen());
772 replacePtrWithInt(MI
.getOperand(1), MIB
, MRI
);
773 MI
.setDesc(TII
.get(TargetOpcode::G_AND
));
774 MRI
.setType(DstReg
, sXLen
);
779 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder
&MIB
,
780 const MachineInstr
&MI
,
782 assert(MI
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
783 "Expected G_CONSTANT");
784 int64_t CstVal
= MI
.getOperand(1).getCImm()->getSExtValue();
788 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder
&MIB
,
789 const MachineInstr
&MI
,
791 assert(MI
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
792 "Expected G_CONSTANT");
793 uint64_t CstVal
= MI
.getOperand(1).getCImm()->getZExtValue();
794 MIB
.addImm(STI
.getXLen() - CstVal
);
797 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder
&MIB
,
798 const MachineInstr
&MI
,
800 assert(MI
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
801 "Expected G_CONSTANT");
802 uint64_t CstVal
= MI
.getOperand(1).getCImm()->getZExtValue();
803 MIB
.addImm(32 - CstVal
);
806 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder
&MIB
,
807 const MachineInstr
&MI
,
809 assert(MI
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
810 "Expected G_CONSTANT");
811 int64_t CstVal
= MI
.getOperand(1).getCImm()->getSExtValue();
812 MIB
.addImm(CstVal
+ 1);
815 void RISCVInstructionSelector::renderImm(MachineInstrBuilder
&MIB
,
816 const MachineInstr
&MI
,
818 assert(MI
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
819 "Expected G_CONSTANT");
820 int64_t CstVal
= MI
.getOperand(1).getCImm()->getSExtValue();
824 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder
&MIB
,
825 const MachineInstr
&MI
,
827 assert(MI
.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx
== -1 &&
828 "Expected G_CONSTANT");
829 uint64_t C
= MI
.getOperand(1).getCImm()->getZExtValue();
830 MIB
.addImm(llvm::countr_zero(C
));
833 const TargetRegisterClass
*RISCVInstructionSelector::getRegClassForTypeOnBank(
834 LLT Ty
, const RegisterBank
&RB
) const {
835 if (RB
.getID() == RISCV::GPRBRegBankID
) {
836 if (Ty
.getSizeInBits() <= 32 || (STI
.is64Bit() && Ty
.getSizeInBits() == 64))
837 return &RISCV::GPRRegClass
;
840 if (RB
.getID() == RISCV::FPRBRegBankID
) {
841 if (Ty
.getSizeInBits() == 32)
842 return &RISCV::FPR32RegClass
;
843 if (Ty
.getSizeInBits() == 64)
844 return &RISCV::FPR64RegClass
;
847 // TODO: Non-GPR register classes.
851 bool RISCVInstructionSelector::isRegInGprb(Register Reg
,
852 MachineRegisterInfo
&MRI
) const {
853 return RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() == RISCV::GPRBRegBankID
;
856 bool RISCVInstructionSelector::isRegInFprb(Register Reg
,
857 MachineRegisterInfo
&MRI
) const {
858 return RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() == RISCV::FPRBRegBankID
;
861 bool RISCVInstructionSelector::selectCopy(MachineInstr
&MI
,
862 MachineRegisterInfo
&MRI
) const {
863 Register DstReg
= MI
.getOperand(0).getReg();
865 if (DstReg
.isPhysical())
868 const TargetRegisterClass
*DstRC
= getRegClassForTypeOnBank(
869 MRI
.getType(DstReg
), *RBI
.getRegBank(DstReg
, MRI
, TRI
));
871 "Register class not available for LLT, register bank combination");
873 // No need to constrain SrcReg. It will get constrained when
874 // we hit another of its uses or its defs.
875 // Copies do not have constraints.
876 if (!RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
877 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(MI
.getOpcode())
882 MI
.setDesc(TII
.get(RISCV::COPY
));
886 bool RISCVInstructionSelector::selectImplicitDef(
887 MachineInstr
&MI
, MachineIRBuilder
&MIB
, MachineRegisterInfo
&MRI
) const {
888 assert(MI
.getOpcode() == TargetOpcode::G_IMPLICIT_DEF
);
890 const Register DstReg
= MI
.getOperand(0).getReg();
891 const TargetRegisterClass
*DstRC
= getRegClassForTypeOnBank(
892 MRI
.getType(DstReg
), *RBI
.getRegBank(DstReg
, MRI
, TRI
));
895 "Register class not available for LLT, register bank combination");
897 if (!RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
898 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(MI
.getOpcode())
901 MI
.setDesc(TII
.get(TargetOpcode::IMPLICIT_DEF
));
905 bool RISCVInstructionSelector::materializeImm(Register DstReg
, int64_t Imm
,
906 MachineIRBuilder
&MIB
) const {
907 MachineRegisterInfo
&MRI
= *MIB
.getMRI();
910 MIB
.buildCopy(DstReg
, Register(RISCV::X0
));
911 RBI
.constrainGenericRegister(DstReg
, RISCV::GPRRegClass
, MRI
);
915 RISCVMatInt::InstSeq Seq
= RISCVMatInt::generateInstSeq(Imm
, *Subtarget
);
916 unsigned NumInsts
= Seq
.size();
917 Register SrcReg
= RISCV::X0
;
919 for (unsigned i
= 0; i
< NumInsts
; i
++) {
920 Register TmpReg
= i
< NumInsts
- 1
921 ? MRI
.createVirtualRegister(&RISCV::GPRRegClass
)
923 const RISCVMatInt::Inst
&I
= Seq
[i
];
924 MachineInstr
*Result
;
926 switch (I
.getOpndKind()) {
927 case RISCVMatInt::Imm
:
929 Result
= MIB
.buildInstr(I
.getOpcode(), {TmpReg
}, {})
933 case RISCVMatInt::RegX0
:
934 Result
= MIB
.buildInstr(I
.getOpcode(), {TmpReg
},
935 {SrcReg
, Register(RISCV::X0
)});
937 case RISCVMatInt::RegReg
:
938 Result
= MIB
.buildInstr(I
.getOpcode(), {TmpReg
}, {SrcReg
, SrcReg
});
940 case RISCVMatInt::RegImm
:
942 MIB
.buildInstr(I
.getOpcode(), {TmpReg
}, {SrcReg
}).addImm(I
.getImm());
946 if (!constrainSelectedInstRegOperands(*Result
, TII
, TRI
, RBI
))
955 bool RISCVInstructionSelector::selectAddr(MachineInstr
&MI
,
956 MachineIRBuilder
&MIB
,
957 MachineRegisterInfo
&MRI
,
959 bool IsExternWeak
) const {
960 assert((MI
.getOpcode() == TargetOpcode::G_GLOBAL_VALUE
||
961 MI
.getOpcode() == TargetOpcode::G_JUMP_TABLE
||
962 MI
.getOpcode() == TargetOpcode::G_CONSTANT_POOL
) &&
963 "Unexpected opcode");
965 const MachineOperand
&DispMO
= MI
.getOperand(1);
967 Register DefReg
= MI
.getOperand(0).getReg();
968 const LLT DefTy
= MRI
.getType(DefReg
);
970 // When HWASAN is used and tagging of global variables is enabled
971 // they should be accessed via the GOT, since the tagged address of a global
972 // is incompatible with existing code models. This also applies to non-pic
974 if (TM
.isPositionIndependent() || Subtarget
->allowTaggedGlobals()) {
975 if (IsLocal
&& !Subtarget
->allowTaggedGlobals()) {
976 // Use PC-relative addressing to access the symbol. This generates the
977 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
978 // %pcrel_lo(auipc)).
979 MI
.setDesc(TII
.get(RISCV::PseudoLLA
));
980 return constrainSelectedInstRegOperands(MI
, TII
, TRI
, RBI
);
983 // Use PC-relative addressing to access the GOT for this symbol, then
984 // load the address from the GOT. This generates the pattern (PseudoLGA
985 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
986 // %pcrel_lo(auipc))).
987 MachineFunction
&MF
= *MI
.getParent()->getParent();
988 MachineMemOperand
*MemOp
= MF
.getMachineMemOperand(
989 MachinePointerInfo::getGOT(MF
),
990 MachineMemOperand::MOLoad
| MachineMemOperand::MODereferenceable
|
991 MachineMemOperand::MOInvariant
,
992 DefTy
, Align(DefTy
.getSizeInBits() / 8));
994 auto Result
= MIB
.buildInstr(RISCV::PseudoLGA
, {DefReg
}, {})
996 .addMemOperand(MemOp
);
998 if (!constrainSelectedInstRegOperands(*Result
, TII
, TRI
, RBI
))
1001 MI
.eraseFromParent();
1005 switch (TM
.getCodeModel()) {
1007 reportGISelFailure(const_cast<MachineFunction
&>(*MF
), *TPC
, *MORE
,
1008 getName(), "Unsupported code model for lowering", MI
);
1011 case CodeModel::Small
: {
1012 // Must lie within a single 2 GiB address range and must lie between
1013 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1014 // (lui %hi(sym)) %lo(sym)).
1015 Register AddrHiDest
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
1016 MachineInstr
*AddrHi
= MIB
.buildInstr(RISCV::LUI
, {AddrHiDest
}, {})
1017 .addDisp(DispMO
, 0, RISCVII::MO_HI
);
1019 if (!constrainSelectedInstRegOperands(*AddrHi
, TII
, TRI
, RBI
))
1022 auto Result
= MIB
.buildInstr(RISCV::ADDI
, {DefReg
}, {AddrHiDest
})
1023 .addDisp(DispMO
, 0, RISCVII::MO_LO
);
1025 if (!constrainSelectedInstRegOperands(*Result
, TII
, TRI
, RBI
))
1028 MI
.eraseFromParent();
1031 case CodeModel::Medium
:
1032 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1033 // relocation needs to reference a label that points to the auipc
1034 // instruction itself, not the global. This cannot be done inside the
1035 // instruction selector.
1037 // An extern weak symbol may be undefined, i.e. have value 0, which may
1038 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1039 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1040 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1041 MachineFunction
&MF
= *MI
.getParent()->getParent();
1042 MachineMemOperand
*MemOp
= MF
.getMachineMemOperand(
1043 MachinePointerInfo::getGOT(MF
),
1044 MachineMemOperand::MOLoad
| MachineMemOperand::MODereferenceable
|
1045 MachineMemOperand::MOInvariant
,
1046 DefTy
, Align(DefTy
.getSizeInBits() / 8));
1048 auto Result
= MIB
.buildInstr(RISCV::PseudoLGA
, {DefReg
}, {})
1050 .addMemOperand(MemOp
);
1052 if (!constrainSelectedInstRegOperands(*Result
, TII
, TRI
, RBI
))
1055 MI
.eraseFromParent();
1059 // Generate a sequence for accessing addresses within any 2GiB range
1060 // within the address space. This generates the pattern (PseudoLLA sym),
1061 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1062 MI
.setDesc(TII
.get(RISCV::PseudoLLA
));
1063 return constrainSelectedInstRegOperands(MI
, TII
, TRI
, RBI
);
1069 bool RISCVInstructionSelector::selectSExtInreg(MachineInstr
&MI
,
1070 MachineIRBuilder
&MIB
) const {
1074 const MachineOperand
&Size
= MI
.getOperand(2);
1075 // Only Size == 32 (i.e. shift by 32 bits) is acceptable at this point.
1076 if (!Size
.isImm() || Size
.getImm() != 32)
1079 const MachineOperand
&Src
= MI
.getOperand(1);
1080 const MachineOperand
&Dst
= MI
.getOperand(0);
1081 // addiw rd, rs, 0 (i.e. sext.w rd, rs)
1082 MachineInstr
*NewMI
=
1083 MIB
.buildInstr(RISCV::ADDIW
, {Dst
.getReg()}, {Src
.getReg()}).addImm(0U);
1085 if (!constrainSelectedInstRegOperands(*NewMI
, TII
, TRI
, RBI
))
1088 MI
.eraseFromParent();
1092 bool RISCVInstructionSelector::selectSelect(MachineInstr
&MI
,
1093 MachineIRBuilder
&MIB
,
1094 MachineRegisterInfo
&MRI
) const {
1095 auto &SelectMI
= cast
<GSelect
>(MI
);
1098 RISCVCC::CondCode CC
;
1099 getOperandsForBranch(SelectMI
.getCondReg(), MRI
, CC
, LHS
, RHS
);
1101 Register DstReg
= SelectMI
.getReg(0);
1103 unsigned Opc
= RISCV::Select_GPR_Using_CC_GPR
;
1104 if (RBI
.getRegBank(DstReg
, MRI
, TRI
)->getID() == RISCV::FPRBRegBankID
) {
1105 unsigned Size
= MRI
.getType(DstReg
).getSizeInBits();
1106 Opc
= Size
== 32 ? RISCV::Select_FPR32_Using_CC_GPR
1107 : RISCV::Select_FPR64_Using_CC_GPR
;
1110 MachineInstr
*Result
= MIB
.buildInstr(Opc
)
1115 .addReg(SelectMI
.getTrueReg())
1116 .addReg(SelectMI
.getFalseReg());
1117 MI
.eraseFromParent();
1118 return constrainSelectedInstRegOperands(*Result
, TII
, TRI
, RBI
);
1121 // Convert an FCMP predicate to one of the supported F or D instructions.
1122 static unsigned getFCmpOpcode(CmpInst::Predicate Pred
, unsigned Size
) {
1123 assert((Size
== 32 || Size
== 64) && "Unsupported size");
1126 llvm_unreachable("Unsupported predicate");
1127 case CmpInst::FCMP_OLT
:
1128 return Size
== 32 ? RISCV::FLT_S
: RISCV::FLT_D
;
1129 case CmpInst::FCMP_OLE
:
1130 return Size
== 32 ? RISCV::FLE_S
: RISCV::FLE_D
;
1131 case CmpInst::FCMP_OEQ
:
1132 return Size
== 32 ? RISCV::FEQ_S
: RISCV::FEQ_D
;
1136 // Try legalizing an FCMP by swapping or inverting the predicate to one that
1138 static bool legalizeFCmpPredicate(Register
&LHS
, Register
&RHS
,
1139 CmpInst::Predicate
&Pred
, bool &NeedInvert
) {
1140 auto isLegalFCmpPredicate
= [](CmpInst::Predicate Pred
) {
1141 return Pred
== CmpInst::FCMP_OLT
|| Pred
== CmpInst::FCMP_OLE
||
1142 Pred
== CmpInst::FCMP_OEQ
;
1145 assert(!isLegalFCmpPredicate(Pred
) && "Predicate already legal?");
1147 CmpInst::Predicate InvPred
= CmpInst::getSwappedPredicate(Pred
);
1148 if (isLegalFCmpPredicate(InvPred
)) {
1150 std::swap(LHS
, RHS
);
1154 InvPred
= CmpInst::getInversePredicate(Pred
);
1156 if (isLegalFCmpPredicate(InvPred
)) {
1160 InvPred
= CmpInst::getSwappedPredicate(InvPred
);
1161 if (isLegalFCmpPredicate(InvPred
)) {
1163 std::swap(LHS
, RHS
);
1170 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1171 // the result in DstReg.
1172 // FIXME: Maybe we should expand this earlier.
1173 bool RISCVInstructionSelector::selectFPCompare(MachineInstr
&MI
,
1174 MachineIRBuilder
&MIB
,
1175 MachineRegisterInfo
&MRI
) const {
1176 auto &CmpMI
= cast
<GFCmp
>(MI
);
1177 CmpInst::Predicate Pred
= CmpMI
.getCond();
1179 Register DstReg
= CmpMI
.getReg(0);
1180 Register LHS
= CmpMI
.getLHSReg();
1181 Register RHS
= CmpMI
.getRHSReg();
1183 unsigned Size
= MRI
.getType(LHS
).getSizeInBits();
1184 assert((Size
== 32 || Size
== 64) && "Unexpected size");
1186 Register TmpReg
= DstReg
;
1188 bool NeedInvert
= false;
1189 // First try swapping operands or inverting.
1190 if (legalizeFCmpPredicate(LHS
, RHS
, Pred
, NeedInvert
)) {
1192 TmpReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
1193 auto Cmp
= MIB
.buildInstr(getFCmpOpcode(Pred
, Size
), {TmpReg
}, {LHS
, RHS
});
1194 if (!Cmp
.constrainAllUses(TII
, TRI
, RBI
))
1196 } else if (Pred
== CmpInst::FCMP_ONE
|| Pred
== CmpInst::FCMP_UEQ
) {
1197 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1198 NeedInvert
= Pred
== CmpInst::FCMP_UEQ
;
1199 auto Cmp1
= MIB
.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT
, Size
),
1200 {&RISCV::GPRRegClass
}, {LHS
, RHS
});
1201 if (!Cmp1
.constrainAllUses(TII
, TRI
, RBI
))
1203 auto Cmp2
= MIB
.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT
, Size
),
1204 {&RISCV::GPRRegClass
}, {RHS
, LHS
});
1205 if (!Cmp2
.constrainAllUses(TII
, TRI
, RBI
))
1208 TmpReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
1210 MIB
.buildInstr(RISCV::OR
, {TmpReg
}, {Cmp1
.getReg(0), Cmp2
.getReg(0)});
1211 if (!Or
.constrainAllUses(TII
, TRI
, RBI
))
1213 } else if (Pred
== CmpInst::FCMP_ORD
|| Pred
== CmpInst::FCMP_UNO
) {
1214 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1215 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1216 NeedInvert
= Pred
== CmpInst::FCMP_UNO
;
1217 auto Cmp1
= MIB
.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ
, Size
),
1218 {&RISCV::GPRRegClass
}, {LHS
, LHS
});
1219 if (!Cmp1
.constrainAllUses(TII
, TRI
, RBI
))
1221 auto Cmp2
= MIB
.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ
, Size
),
1222 {&RISCV::GPRRegClass
}, {RHS
, RHS
});
1223 if (!Cmp2
.constrainAllUses(TII
, TRI
, RBI
))
1226 TmpReg
= MRI
.createVirtualRegister(&RISCV::GPRRegClass
);
1228 MIB
.buildInstr(RISCV::AND
, {TmpReg
}, {Cmp1
.getReg(0), Cmp2
.getReg(0)});
1229 if (!And
.constrainAllUses(TII
, TRI
, RBI
))
1232 llvm_unreachable("Unhandled predicate");
1234 // Emit an XORI to invert the result if needed.
1236 auto Xor
= MIB
.buildInstr(RISCV::XORI
, {DstReg
}, {TmpReg
}).addImm(1);
1237 if (!Xor
.constrainAllUses(TII
, TRI
, RBI
))
1241 MI
.eraseFromParent();
1245 bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
1246 MachineInstr
&MI
, MachineIRBuilder
&MIB
, MachineRegisterInfo
&MRI
) const {
1247 assert(MI
.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
&&
1248 "Unexpected opcode");
1249 // Find the intrinsic ID.
1250 unsigned IntrinID
= cast
<GIntrinsic
>(MI
).getIntrinsicID();
1252 // Select the instruction.
1256 case Intrinsic::trap
:
1257 MIB
.buildInstr(RISCV::UNIMP
, {}, {});
1259 case Intrinsic::debugtrap
:
1260 MIB
.buildInstr(RISCV::EBREAK
, {}, {});
1264 MI
.eraseFromParent();
1268 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering
,
1269 SyncScope::ID FenceSSID
,
1270 MachineIRBuilder
&MIB
) const {
1271 if (STI
.hasStdExtZtso()) {
1272 // The only fence that needs an instruction is a sequentially-consistent
1273 // cross-thread fence.
1274 if (FenceOrdering
== AtomicOrdering::SequentiallyConsistent
&&
1275 FenceSSID
== SyncScope::System
) {
1277 MIB
.buildInstr(RISCV::FENCE
, {}, {})
1278 .addImm(RISCVFenceField::R
| RISCVFenceField::W
)
1279 .addImm(RISCVFenceField::R
| RISCVFenceField::W
);
1283 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1284 MIB
.buildInstr(TargetOpcode::MEMBARRIER
, {}, {});
1288 // singlethread fences only synchronize with signal handlers on the same
1289 // thread and thus only need to preserve instruction order, not actually
1290 // enforce memory ordering.
1291 if (FenceSSID
== SyncScope::SingleThread
) {
1292 MIB
.buildInstr(TargetOpcode::MEMBARRIER
, {}, {});
1296 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1297 // Manual: Volume I.
1298 unsigned Pred
, Succ
;
1299 switch (FenceOrdering
) {
1301 llvm_unreachable("Unexpected ordering");
1302 case AtomicOrdering::AcquireRelease
:
1303 // fence acq_rel -> fence.tso
1304 MIB
.buildInstr(RISCV::FENCE_TSO
, {}, {});
1306 case AtomicOrdering::Acquire
:
1307 // fence acquire -> fence r, rw
1308 Pred
= RISCVFenceField::R
;
1309 Succ
= RISCVFenceField::R
| RISCVFenceField::W
;
1311 case AtomicOrdering::Release
:
1312 // fence release -> fence rw, w
1313 Pred
= RISCVFenceField::R
| RISCVFenceField::W
;
1314 Succ
= RISCVFenceField::W
;
1316 case AtomicOrdering::SequentiallyConsistent
:
1317 // fence seq_cst -> fence rw, rw
1318 Pred
= RISCVFenceField::R
| RISCVFenceField::W
;
1319 Succ
= RISCVFenceField::R
| RISCVFenceField::W
;
1322 MIB
.buildInstr(RISCV::FENCE
, {}, {}).addImm(Pred
).addImm(Succ
);
1326 InstructionSelector
*
1327 createRISCVInstructionSelector(const RISCVTargetMachine
&TM
,
1328 RISCVSubtarget
&Subtarget
,
1329 RISCVRegisterBankInfo
&RBI
) {
1330 return new RISCVInstructionSelector(TM
, Subtarget
, RBI
);
1332 } // end namespace llvm